query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Returns a market segment ID into a JSON structure.
def json_market_builder(self, customerID, marketID) : json_result = '{\n' json_result += '\t "_results":[\n' json_result += '\t\t{ "customerID": "' + str(customerID) json_result += ', "marketID": "' + str(marketID) json_result += '}\n' json_result += '\n\t]\n}' return json_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_segment_dictionary(segment):\n return {\n \"encoded_name\": segment.encoded_name(),\n \"id\": segment.pk,\n \"timestamp\": int(time.time()),\n \"persistent\": segment.persistent\n }", "def getSerpentId(self):\n raise NotImplementedError", "def _make_segment_dict(obj):\n #NOTE(jrichard) drop change in next rebase.\n return {'id': obj.id,\n NETWORK_TYPE: obj.network_type,\n PHYSICAL_NETWORK: obj.physical_network,\n SEGMENTATION_ID: obj.segmentation_id,\n NETWORK_ID: getattr(obj, 'network_id', None)}", "def getSerpentId(self):\n symbol = self.element.symbol.capitalize()\n return \"{}-{}{}\".format(symbol, self.a, \"m\" if self.state else \"\")", "def id(self, param):\n data = self._http_get(\"cve\", query=param)\n return data.json()", "def dump_segment(arg_tuple):\n idx, year_month_day, segment = arg_tuple\n out_name = 'parsed_out/%s-%d.json' % (year_month_day, idx)\n json.dump(segment, open(out_name, 'w'), sort_keys=True, cls=CardEncoder, skipkeys=True)", "def stringifyID(point: dict, uid: Union[int, str]) -> str:\n # ordinal time for begin (b) and end (e)\n b = dt.datetime.fromisoformat(point['TBTimestamp']).strftime('%s')\n e = dt.datetime.fromisoformat(point['TETimestamp']).strftime('%s')\n # string concat of all sensor labels\n values = \"-\".join([str(sens[\"Scaled\"]) for sens in point[\"Channels\"]])\n\n idString = f\"{uid}-{b}-{e}_{values}\" # actual id string\n return idString", "def get_customer_marketSegment(self, df_invoice_line_customer):\n #-------------------------------------------------------------------------\n # Building data model \n #-------------------------------------------------------------------------\n self.data_transform(df_invoice_line_customer)\n\n #-------------------------------------------------------------------------\n # Customer features are built thanks to transformers.\n #-------------------------------------------------------------------------\n self.df_customers_features_build()\n \n #-------------------------------------------------------------------------\n # Customer market segment is predicted\n #-------------------------------------------------------------------------\n X_test = self._df_customers.values\n y_pred = self._classifier_model.predict(X_test)\n segmentID = y_pred[0]\n \n return segmentID", "def to_json(self, obj):\n _dict = obj._to_dict()\n if ID not in _dict or _dict[ID] is None:\n _dict[ID] = str(uuid.uuid4())\n json_str = json.dumps(_dict, indent=4)\n return json_str", "def _format_market_id(self, marketId):\n return \"{}:{}\".format(\"LON\", marketId.split(\"-\")[0])", "def get_market_segments(self):\r\n return self._market_segments", "def standard_id(self):\n return self.get(\"standard_id\", decode=True)", "def as_json(self, args=None):\n seg_data = _dict()\n seg_data[u'id'] = int(self.id)\n seg_data[u'parentID'] = int(self.parent_id)\n if self.biological_annotation is not None:\n seg_data[u'biologicalAnnotation'] = self.biological_annotation.as_json(args=args)\n if self.complexes_and_macromolecules:\n complexes = list()\n for _complex in self.complexes_and_macromolecules.complexes:\n complexes.append(_complex)\n macromolecules = list()\n for macromolecule in self.complexes_and_macromolecules.macromolecules:\n macromolecules.append(macromolecule)\n seg_data[u'complexesAndMacromolecules'] = {\n u'complexes': complexes,\n u'macromolecules': macromolecules,\n }\n seg_data.update(self.colour.as_json(args=args))\n # seg_data[u'colour'] = tuple(map(float, self.colour.value))\n if self.meshes:\n seg_data[u'meshList'] = len(self.meshes)\n if self.shapes:\n seg_data[u'shapePrimitiveList'] = len(self.shapes)\n return seg_data", "def get_id():\n try:\n regd_no = request.form['regd_no']\n query_society_id = queries['get_society_id']\n query = query_society_id.format(regd_no)\n \n with dbm.dbManager() as manager:\n result = manager.getDataFrame(query)\n\n return jsonify(result.to_dict(orient='records'))\n except psycopg2.DatabaseError as error:\n errors = {'registeration': False, 'error': (error) }\n return str(errors)", "def get_sector(self):\n _FUNCTION_KEY = \"SECTOR\"\n # The keys for the json output\n _DATA_KEYS = [\"Rank A: Real-Time Performance\",\n \"Rank B: 1 Day Performance\",\n \"Rank C: 5 Day Performance\",\n \"Rank D: 1 Month Performance\",\n \"Rank E: 3 Month Performance\",\n \"Rank F: Year-to-Date (YTD) Performance\",\n \"Rank G: 1 Year Performance\",\n \"Rank H: 3 Year Performance\",\n \"Rank I: 5 Year Performance\",\n \"Rank J: 10 Year Performance\"]\n return _FUNCTION_KEY, _DATA_KEYS, 'Meta Data'", "def to_json(self):\n def convert(o):\n if type(o) is Card:\n return o.ident\n else:\n return o.__dict__\n\n return json.dumps(self, default=convert)", "def to_json(self):\n def convert(o):\n if type(o) is Card:\n return o.ident\n else:\n return o.__dict__\n\n return json.dumps(self, default=convert)", "def vat_id(self) -> str:\n return self._vat_id", "def get_item_sets_id():\n with open(SETS_ID_FILE) as sets_file:\n sets = sets_file.read()\n return json.loads(sets)", "def get_genomic_data(lineage, segment, session):\n LOG.debug(f\"Exporting genomic data for lineage <{lineage}> and segment <{segment}>\")\n\n sequences = datastore.fetch_genomic_sequences(session, lineage, segment)\n\n return Response((row[0] + '\\n' for row in sequences), mimetype=\"application/x-ndjson\")", "def segment_number(self):\n if hasattr(self, '_m_segment_number'):\n return self._m_segment_number if hasattr(self, '_m_segment_number') else None\n\n self._m_segment_number = self.segment_number_raw.value\n return self._m_segment_number if hasattr(self, '_m_segment_number') else None", "def getSerpentId(self):\n return \"{}-nat\".format(self.element.symbol.capitalize())", "def generate_subsegment_id():\n return uuid.uuid4().hex[:16]", "def serialize(self):\n return {\n 'oid' : self.oid,\n 'sid' : self.sid,\n 'stopaddress' : self.stopaddress,\n 'cargosize' : self.cargosize,\n 'totalfee' : self.totalfee,\n # 'status' : self.status,\n 'grade' : self.grade,\n 'comment' : self.comment,\n 'stopaddr_lat' : self.stopaddr_lat,\n 'stopaddr_lng' : self.stopaddr_lng\n }", "def to_json(self):\n\t\t\n\t\treturn { \"total_sentences\": self.total_sentences }", "def to_json(self, category):\r\n return \"{{\\\"id\\\": {0}, \" \\\r\n \"\\\"name\\\": \\\"{1}\\\", \" \\\r\n \"\\\"abbreviation\\\": \\\"{2}\\\", \" \\\r\n \"\\\"category\\\": {3}, \" \\\r\n \"\\\"rank\\\": {4}}}\".format(self.id, self.name, self.abbreviation, category.id, self.rank)", "def to_string(self):\n\thistory_items = self.parse()\n\tvalue_counter = history_items[\"data\"]\n\tresp = { \"slots\": history_items[\"slots\"], \"num_data\": history_items[\"num_data\"], \"top_num_values\": [] }\n\n\tfor i in range(0, min(len(value_counter), 10)):\n\t resp[\"top_num_values\"].append({ \"itemid\": value_counter[i][0], \"num\": value_counter[i][1]})\n\t\n return json.dumps(self.parse(), indent=4)", "def getSegment(self):\n return self.segment", "async def process_get_did(self) -> str:\n\n return json.dumps(self.did)", "def json_out(self):\n temp_json = json.dumps(self.ecat_info, indent=4)\n print(temp_json)", "def getAAAZZZSId(self):\n if self.element.symbol == \"C\":\n return \"120060\"\n elif self.element.symbol == \"V\":\n return \"510230\"\n else:\n return None", "def serialize(self):\n return {\n \"id\": self.id,\n \"sid\": self.sid,\n \"sku\": self.sku,\n \"name\": self.name,\n \"price\": self.price,\n \"amount\": self.amount,\n \"create_time\": self.create_time,\n \"update_time\": self.update_time\n }", "def make_series_key(key, tags, attributes):\n\n return json.dumps({'key': key, 'tags': tags, 'attributes': attributes})", "def encode_span(self, span):\n\n json_span = {\n \"traceId\": span.trace_id,\n \"id\": span.span_id,\n }\n\n if span.name:\n json_span[\"name\"] = span.name\n if span.parent_id:\n json_span[\"parentId\"] = span.parent_id\n if span.timestamp:\n json_span[\"timestamp\"] = int(span.timestamp * 1000000)\n if span.duration:\n json_span[\"duration\"] = int(span.duration * 1000000)\n if span.shared is True:\n json_span[\"shared\"] = True\n if span.kind and span.kind.value is not None:\n json_span[\"kind\"] = span.kind.value\n if span.local_endpoint:\n json_span[\"localEndpoint\"] = self._create_json_endpoint(\n span.local_endpoint,\n False,\n )\n if span.remote_endpoint:\n json_span[\"remoteEndpoint\"] = self._create_json_endpoint(\n span.remote_endpoint,\n False,\n )\n if span.tags and len(span.tags) > 0:\n # Ensure that tags are all strings\n json_span[\"tags\"] = {\n str(key): str(value) for key, value in span.tags.items()\n }\n\n if span.annotations:\n json_span[\"annotations\"] = [\n {\"timestamp\": int(timestamp * 1000000), \"value\": key}\n for key, timestamp in span.annotations.items()\n ]\n\n encoded_span = json.dumps(json_span)\n\n return encoded_span", "def get_id(disk):\n\n #TODO\n return \"Unknown\"", "def get_seg(self):\n self.seg = self.render()[4]\n return self.seg", "def serialize(self):\n return {\n 'district': self.district,\n 'hadquartes': self.headquartes,\n 'revenue_division': self.revenue_division,\n 'mandals': self.mandals,\n 'population': self.population,\n 'area': self.area,\n 'density': self.density,\n 'id': self.id\n }", "def serialized_data(self):\n return {\n 'id': self.id,\n 'start_time': str(self.start_time),\n 'venue_id': self.venue_id,\n 'venue_name': self.venue.name,\n 'venue_image_link': self.venue.image_link,\n 'artist_id': self.artist_id,\n 'artist_name': self.artist.name,\n 'artist_image_link': self.artist.image_link\n }", "def get_id(self):\n return freeze_dict(self._get_and_check_id_params())", "def GetCalibrationAssetId(self, swivel_manifest, suffix):\r\n datestr = GetCompactDateString()\r\n if not swivel_manifest.HasField(\"scan_station\"):\r\n raise errors.ConfigError(\"Missing required scan_station metadata\")\r\n ss_id = swivel_manifest.scan_station.id\r\n # Additional 'K' precedes the Katamari ID in case the specification adds\r\n # more fields.\r\n return \"%s-%s-K-%s-%s\" % (datestr, ss_id,\r\n swivel_manifest.katamari_id,\r\n suffix)", "def serialize(self):\n return {\n 'id' : self.id,\n 'sid' : self.sid,\n 'status' : self.status,\n 'duration' : self.duration,\n 'recurl' : self.recurl,\n 'dateTime' : self.dateTime\n }", "def get_segment(self):\n return self.segment", "def get_segment(self):\n return self.segment", "def json(self):\r\n return {\"id\": self.id, \"code\": self.code, \"description\": self.description, \"xCoor\": self.x_coor, \"yCoor\": self.y_coor, \"latitude\": self.latitude,\r\n \"longitude\": self.longitude, \"waterschapId\": self.waterschap_id, \"watertypeId\": self.watertype_id, \"watertypeKrwId\": self.watertype_krw_id}", "def catalogItemJSON(sport_id, item_id):\n\n catalogItem = session.query(Item).filter_by(id=item_id).one()\n return jsonify(Item=catalogItem.serialize)", "def GetSwivelAssetId(self, swivel_manifest):\r\n datestr = GetCompactDateString()\r\n if not swivel_manifest.HasField(\"scan_station\"):\r\n raise errors.ConfigError(\"Missing required scan_station metadata\")\r\n ss_id = swivel_manifest.scan_station.id\r\n # Additional 'K' precedes the Katamari ID in case the specification adds\r\n # more fields.\r\n return \"%s-%s-K-%s-2d\" % (datestr, ss_id, swivel_manifest.katamari_id)", "def bookJSON(book_id):\n book = db_session.query(Book).filter_by(id=book_id).one()\n return jsonify(book=book.serialize)", "def get_idn(self):\n # not all IVVI racks support the version command, so return a dummy\n return -1\n\n idparts = ['QuTech', 'IVVI', 'None', self.version()]\n\n return dict(zip(('vendor', 'model', 'serial', 'firmware'), idparts))", "def json(self):\n return {\n \"instance_id\" : self.instance_id,\n \"library_id\" : self.library_id,\n \"symbol_index\" : self.symbol_index,\n \"symbol_attributes\":[s.json() for s in self.symbol_attributes],\n \"attributes\" : stringify_attributes(self.attributes)\n }", "def as_json(self):\n result = super().as_json()\n result[\"generator\"].update({\n \"block\": self.vein.with_purity(100).as_json(),\n \"cluster-size\": self.cluster_size,\n \"type\": \"cluster\",\n })\n return result", "def to_json_string(self):\n\t\treturn json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def json_format_by_id(id):\n fmt = Format.query.filter(Format.id==id).first()\n if fmt is None:\n abort(404)\n return jsonify(fmt.get_public_dict())", "def to_json_dict(self):\n json_dict = {}\n json_dict['event_status'] = self.event_status\n json_dict['event_status_list'] = self.event_status_list\n json_dict['spaces'] = [s.get_id() for s in self.spaces]\n json_dict['characters'] = [c.get_id() for c in self.characters]\n json_dict['exits'] = [e.get_id() for e in self.exits]\n json_dict['items'] = [i.get_id() for i in self.items]\n return json_dict", "def get_station(station_id):\n station = db.session.query(DublinBike) \\\n .filter(DublinBike.number == station_id) \\\n .order_by(DublinBike.scraping_time.desc()) \\\n .first()\n\n return jsonify({\n 'data': station.serialize\n })", "def imgaug_json_id(aug):\n import imgaug\n if isinstance(aug, tuple):\n return [imgaug_json_id(item) for item in aug]\n elif isinstance(aug, imgaug.parameters.StochasticParameter):\n return str(aug)\n else:\n try:\n info = OrderedDict()\n info['__class__'] = aug.__class__.__name__\n params = aug.get_parameters()\n if params:\n info['params'] = [imgaug_json_id(p) for p in params]\n if isinstance(aug, list):\n children = aug[:]\n children = [imgaug_json_id(c) for c in children]\n info['children'] = children\n return info\n except Exception:\n # imgaug is weird and buggy\n return str(aug)", "def price_chart_json(request, prod_id):\n prices = Product.objects.get(prod_id=prod_id).prices\n labels = []\n data = []\n for price in prices:\n labels.append(price.updated_at.strftime('%Y-%m-%d %H:%M'))\n data.append(price.price_discounted)\n output = {\n 'labels': labels,\n 'datasets': [\n {\n 'data': data,\n 'label': prod_id,\n 'name': prod_id,\n }\n ]\n }\n return JsonResponse(output)", "def _get_network_id(self):\n pubnet = self.conn.network.find_network('public')\n net = self.conn.network.find_network(self.net_conf['net_name'])\n subnet = self.conn.network.find_subnet(self.net_conf['subnet_name'])\n # TODO: Add support for security group\n\n self.network_id = {\n 'public': pubnet.id,\n 'net': net.id,\n 'subnet': subnet.id\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'description': self.description,\n 'id': self.id,\n 'race_cat': self.race_cat_id,\n 'utmb_points': self.utmb_points,\n 'wser_qualifier': self.wser_qualifier,\n 'race_website': self.race_website,\n 'state': self.state_id,\n 'month': self.month_id\n }", "def to_json(self):\n\n tcluster = {\"clusters\": [], \"matchings\": None}\n if self.matching is not None:\n tcluster[\"matchings\"] = self.matching\n elif self.matched is not None:\n tcluster[\"matchings\"] = self.matched\n\n for tid in self.get_observation_ids():\n ct = self.get_clustering_at(tid)\n partition = {\n \"tid\": tid,\n \"communities\": ct.named_communities,\n \"algorithm\": ct.method_name,\n \"params\": ct.method_parameters,\n \"overlap\": ct.overlap,\n \"coverage\": ct.node_coverage,\n }\n tcluster[\"clusters\"].append(partition)\n\n return json.dumps(tcluster)", "def _jsonify(self):\n return self.experiment_record.to_ddb_record()", "def biv_id(uri):\n print(biv.URI(uri).biv_id)", "def get_region(rid):\n region = Region.query.get_or_404(rid)\n return jsonify(region.to_long_json())", "def get_idn(self):\n try:\n idstr = '' # in case self.ask fails\n idstr = self._get_version().split()\n # form is supposed to be comma-separated, but we've seen\n # other separators occasionally\n idparts = [idstr[3] + ' ' + idstr[4], idstr[0], idstr[5],\n idstr[1] + ' ' + idstr[2]]\n # in case parts at the end are missing, fill in None\n if len(idparts) < 4:\n idparts += [None] * (4 - len(idparts))\n except:\n logging.warn('Error getting or interpreting *IDN?: ' + repr(idstr))\n idparts = [None, None, None, None]\n\n return dict(zip(('vendor', 'model', 'serial', 'firmware'), idparts))", "def get_json_info_sticker(self, st_id):\n remake = remake_url(self.get_json_cursor)\n json_url = remake.remake_sticker_url(st_id)\n response = get_responses(json_url)\n return self.return_response(response, st_id)", "def sid(self):\n return self.data[''].sid", "def json(self):\n if self.valid:\n return {\n 'articleID': self._id,\n 'ticker_symbol': self.ticker,\n 'published_date': self.pub_date,\n 'author_name': self.author,\n 'title': self.title,\n 'text': self.text,\n 'num_likes': 0,\n 'includes_symbols': self.includes\n }\n\n return {}", "def sign_contract(cid):\r\n with engine.with_session() as ss:\r\n is_part = ss.query(LxContractParticipation).filter_by(\r\n contract_id=cid, user_id=current_user.id\r\n ).first()\r\n has_sign_auth = ss.query(LxContractAuthorization).filter_by(\r\n contract_id=cid, user_id=current_user.id,\r\n ).first()\r\n if not (is_part or has_sign_auth):\r\n return jsonify({'success': False, 'errorMsg': constants.ERROR_CODE[\r\n 'NO_SIGN_AUTH']})\r\n if is_part:\r\n is_part.update({'stage': constants.CONTRACT_PART_STAGE['SIGN']})\r\n elif has_sign_auth:\r\n present_part = ss.query(LxContractParticipation).filter_by(\r\n contract_id=cid, user_id=has_sign_auth.auth_own_user.id\r\n ).first()\r\n if not present_part:\r\n return jsonify(\r\n {'success': False,\r\n 'errorMsg': constants.ERROR_CODE['CONTRACT_AUTH_ERROR']})\r\n present_part.update(\r\n {'stage': constants.CONTRACT_PART_STAGE['SIGN']})\r\n cur_contract = ss.query(LxContract).get(cid)\r\n cur_sign_num = ss.query(\r\n func.count(LxContractParticipation.id)\r\n ).filter_by(\r\n contract_id=cid, stage=constants.CONTRACT_PART_STAGE['SIGN']\r\n ).one()\r\n if cur_contract.part_num == cur_sign_num:\r\n cur_contract.update({'stage': constants.CONTRACT_STAGE['SIGN']})\r\n return jsonify({'success': True, 'data': cur_sign_num})", "def showModelInfoJSON(style_id, model_id):\n model = session.query(Model).filter_by(id=model_id).one()\n return jsonify(model=model.serialize)", "def getid(data):\n return int(data.split('/')[-1])", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def serialize(self):\n\t\treturn {\n\t\t\t'id': self.id,\n\t\t\t'title': self.title,\n\t\t\t'year': self.year,\n\t\t\t'artist': self.artist_id,\n\t\t\t'user': self.user_id\n\t\t}", "def getSentenceId(self):\n return( int(self.id.split('.')[1]) )", "def serialize_to_json(self):\n d = self.__dict__\n x = d.pop('epicenter_x')\n y = d.pop('epicenter_y')\n z = d.pop('epicenter_z')\n d['epicenter'] = [x, y, z]\n\n return dict_to_safe_for_json(d)", "def getID():", "def to_id_dict(self):\n return self._id, dict(self.__data)", "def id(*, alphabet=None, min_size=1, average_size=None, max_size=None):\n if min_size is None or min_size < 1:\n raise ValueError('Document ID must not be empty')\n return json.strings(alphabet=alphabet,\n min_size=min_size,\n average_size=average_size,\n max_size=max_size)", "def get_key_id(self):\n jwk_data = {\n \"crv\": \"P-256\",\n \"kty\": \"EC\",\n \"x\": base64.urlsafe_b64encode(self.public_key_obj.public_numbers().x.to_bytes(32, \"big\")).decode().replace(\"=\", \"\"),\n \"y\": base64.urlsafe_b64encode(self.public_key_obj.public_numbers().y.to_bytes(32, \"big\")).decode().replace(\"=\", \"\")\n }\n jwk = json.dumps(jwk_data, separators=(',', ':'))\n return hashlib.sha256(jwk.encode()).digest()", "def serialize(self):\r\n return {\r\n 'rateID': self.rateID,\r\n 'souceCode': self.sourceCode,\r\n 'currencyCode':self.currencyCode,\r\n 'sellPrice':self.sellPrice,\r\n 'buyPrice':self.buyPrice,\r\n 'date':'{}-{}-{}'.format(self.date.year, self.date.month, self.date.day)\r\n }", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def part_id(self):\n ...", "def to_json(self):\n return [\"population\", self.species_index, self.card_trade_index]" ]
[ "0.5654718", "0.5459859", "0.5401625", "0.5299592", "0.5264468", "0.5212608", "0.5205277", "0.5189763", "0.51112807", "0.5020024", "0.4996774", "0.49531874", "0.49421018", "0.49232537", "0.49065843", "0.48627487", "0.48627487", "0.48185506", "0.4808638", "0.47962487", "0.47957668", "0.475528", "0.47458923", "0.47438788", "0.47269607", "0.47195283", "0.4701069", "0.46954682", "0.4694243", "0.46937808", "0.46860483", "0.46778703", "0.4671877", "0.4656893", "0.4636202", "0.46329886", "0.46257353", "0.4621697", "0.46144766", "0.46141428", "0.46129254", "0.46103728", "0.46103728", "0.4602417", "0.45816568", "0.45675722", "0.45653886", "0.45636204", "0.45620054", "0.4556115", "0.45528242", "0.4552215", "0.45516187", "0.454794", "0.4547729", "0.45471492", "0.45463365", "0.45379534", "0.45351353", "0.45327592", "0.45221955", "0.4520303", "0.45170528", "0.45151168", "0.4513464", "0.45124257", "0.4511042", "0.45094177", "0.45034266", "0.45021102", "0.45021102", "0.45021102", "0.45021102", "0.45021102", "0.45021102", "0.45021102", "0.45021102", "0.45021102", "0.45021102", "0.44987282", "0.44947076", "0.44823888", "0.44819698", "0.44807276", "0.44769257", "0.44754422", "0.44753197", "0.4474281", "0.4474281", "0.4474281", "0.4474281", "0.4474281", "0.4474281", "0.4474281", "0.4474281", "0.4474281", "0.4474281", "0.4474281", "0.4468345", "0.44660586" ]
0.54065216
2
Returns RFM score from dataframe given from parameter. RFM score is computed from local RFM matrix threshold.
def get_rfm(self, df): df_tmp, df_RFM, df_RFM_threshold, day_now \ = p5_util.p5_df_rfm_build(df, df_RFM_threshold=self.df_RFM_quantiles ,day_now = self._day_now) RFM = df_RFM.RFM.iloc[0] return str(RFM)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_rfm_score(dataframe):\n\n dataframe[\"recency_score\"] = pd.qcut(dataframe['recency'].rank(method=\"first\"), 5, labels=[5, 4, 3, 2, 1])\n dataframe[\"frequency_score\"] = pd.cut(dataframe['frequency'], bins=[0, 4, 8, 13, 17, 20], labels=[1, 2, 3, 4, 5])\n dataframe[\"RFM_SCORE\"] = (dataframe['recency_score'].astype(str) +\n dataframe['frequency_score'].astype(str))\n\n return dataframe", "def score(self, df: pd.DataFrame, label_column: str) -> float:\n assert label_column not in self.feature_columns, 'Label column is in the feature list.'\n assert label_column in df.columns, 'Label column is not in the dataframe.'\n\n rounded_preds = self.predict(df).round()\n return f1_score(df[label_column].values, rounded_preds)", "def PP_SPF_AVG_PFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVG_PFL']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVG_PFL'])\n Feature_DF.loc[:,'PP_SPF_AVG_PFL_TRS'] = Feature_DF.loc[:,'PP_SPF_AVG_PFL'].apply(lambda x : (1+x-min_value)**(9/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVG_PFL_TRS']]\n\n return Feature_DF", "def compute_FRR(self, genuine_score, thresholds=0.01):\r\n print('Computing FRR')\r\n condition = lambda score, thr: score <= thr\r\n return self._F_performance(genuine_score, thresholds, condition)", "def RC_PFL_JPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_PFL_JPRE']]\n min_value = min(Feature_DF.loc[:,'RC_PFL_JPRE'])\n Feature_DF.loc[:,'RC_PFL_JPRE_TRS'] = Feature_DF.loc[:,'RC_PFL_JPRE'].apply(lambda x : (1+x-min_value)**(6/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_PFL_JPRE_TRS']]\n\n return Feature_DF", "def RC_PFL_HPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_PFL_HPRE']]\n min_value = min(Feature_DF.loc[:,'RC_PFL_HPRE'])\n Feature_DF.loc[:,'RC_PFL_HPRE_TRS'] = Feature_DF.loc[:,'RC_PFL_HPRE'].apply(lambda x : (1+x-min_value)**(7/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_PFL_HPRE_TRS']]\n\n return Feature_DF", "def PP_SPF_KNN_PFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_KNN_PFL']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_KNN_PFL'])\n Feature_DF.loc[:,'PP_SPF_KNN_PFL_TRS'] = Feature_DF.loc[:,'PP_SPF_KNN_PFL'].apply(lambda x : (1+x-min_value)**(5/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_KNN_PFL_TRS']]\n\n return Feature_DF", "def PP_PAF_STL_AVG_PFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_STL_AVG_PFL']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_STL_AVG_PFL'])\n Feature_DF.loc[:,'PP_PAF_STL_AVG_PFL_TRS'] = Feature_DF.loc[:,'PP_PAF_STL_AVG_PFL'].apply(lambda x : (1+x-min_value)**(9/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_STL_AVG_PFL_TRS']]\n\n return Feature_DF", "def PP_PAF_FP_AVG_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_FP_AVG']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_FP_AVG'])\n Feature_DF.loc[:,'PP_PAF_FP_AVG_TRS'] = Feature_DF.loc[:,'PP_PAF_FP_AVG'].apply(lambda x : (1+x-min_value)**(5/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_FP_AVG_TRS']]\n\n return Feature_DF", "def PP_SPF_AVG_SUR_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVG_SUR']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVG_SUR'])\n Feature_DF.loc[:,'PP_SPF_AVG_SUR_TRS'] = Feature_DF.loc[:,'PP_SPF_AVG_SUR'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVG_SUR_TRS']]\n\n return Feature_DF", "def fScore(cMatrix):\n if precision(cMatrix) + recall(cMatrix) == 0:\n return precision(cMatrix)\n else:\n return 2 * precision(cMatrix) * recall(cMatrix) / (precision(cMatrix) + recall(cMatrix))", "def PP_SPF_AVGRW_PFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVGRW_PFL']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVGRW_PFL'])\n Feature_DF.loc[:,'PP_SPF_AVGRW_PFL_TRS'] = Feature_DF.loc[:,'PP_SPF_AVGRW_PFL'].apply(lambda x : (1+x-min_value)**(9/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVGRW_PFL_TRS']]\n\n return Feature_DF", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def score(df, tmo, label):\n\n\tif str(type(tmo)) != \"<class 'sklearn.ensemble._forest.RandomForestRegressor'>\":\n\t\traise TypeError('Wrong model type!')\n\n\tX_test = df.loc[:, df.columns != label]\n\t\n\t# predict on test data\n\ty_pred = tmo.predict(X_test)\n\tdf['predict'] = y_pred\n\n\treturn df", "def PP_SPF_AVG_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVG']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVG'])\n Feature_DF.loc[:,'PP_SPF_AVG_TRS'] = Feature_DF.loc[:,'PP_SPF_AVG'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVG_TRS']]\n\n return Feature_DF", "def PP_FH_FP_AVG_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_FH_FP_AVG']]\n min_value = min(Feature_DF.loc[:,'PP_FH_FP_AVG'])\n Feature_DF.loc[:,'PP_FH_FP_AVG_TRS'] = Feature_DF.loc[:,'PP_FH_FP_AVG'].apply(lambda x : (1+x-min_value)**(-1/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_FH_FP_AVG_TRS']]\n\n return Feature_DF", "def PP_SPF_AVG_SIM_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVG_SIM_DIST']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVG_SIM_DIST'])\n Feature_DF.loc[:,'PP_SPF_AVG_SIM_DIST_TRS'] = Feature_DF.loc[:,'PP_SPF_AVG_SIM_DIST'].apply(lambda x : (1+x-min_value)**(5/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVG_SIM_DIST_TRS']]\n\n return Feature_DF", "def PP_FH_FP_PFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_FH_FP_PFL']]\n min_value = min(Feature_DF.loc[:,'PP_FH_FP_PFL'])\n Feature_DF.loc[:,'PP_FH_FP_PFL_TRS'] = Feature_DF.loc[:,'PP_FH_FP_PFL'].apply(lambda x : (1+x-min_value)**(3/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_FH_FP_PFL_TRS']]\n\n return Feature_DF", "def computeFScores(self, targetLabels, actualLabels):\r\n if self.prMeasures is None:\r\n self.prMeasures = self.computePRMeasures(targetLabels, actualLabels)\r\n if self.prMeasures[0] == 0:\r\n return 0\r\n self.f1score = 2 * self.prMeasures[0] * self.prMeasures[1] / (0.0 + self.prMeasures[0] + self.prMeasures[1])\r\n return self.f1score", "def PP_PAF_BEST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_BEST']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_BEST'])\n Feature_DF.loc[:,'PP_PAF_BEST_TRS'] = Feature_DF.loc[:,'PP_PAF_BEST'].apply(lambda x : (1+x-min_value)**(9/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_BEST_TRS']]\n\n return Feature_DF", "def PP_SPF_TOP_PFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_TOP_PFL']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_TOP_PFL'])\n Feature_DF.loc[:,'PP_SPF_TOP_PFL_TRS'] = Feature_DF.loc[:,'PP_SPF_TOP_PFL'].apply(lambda x : (1+x-min_value)**(5/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_TOP_PFL_TRS']]\n\n return Feature_DF", "def raw_score(self,X,Y):\n return self.rf.score(X,Y)", "def OD_PR_LPAVG_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','OD_PR_LPAVG']]\n min_value = min(Feature_DF.loc[:,'OD_PR_LPAVG'])\n Feature_DF.loc[:,'OD_PR_LPAVG_TRS'] = Feature_DF.loc[:,'OD_PR_LPAVG'].apply(lambda x : (1+x-min_value)**(9/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','OD_PR_LPAVG_TRS']]\n\n return Feature_DF", "def RC_SUR_SPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_SUR_SPRE']]\n min_value = min(Feature_DF.loc[:,'RC_SUR_SPRE'])\n Feature_DF.loc[:,'RC_SUR_SPRE_TRS'] = Feature_DF.loc[:,'RC_SUR_SPRE'].apply(lambda x : (1+x-min_value)**(6/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_SUR_SPRE_TRS']]\n\n return Feature_DF", "def RC_SUR_JPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_SUR_JPRE']]\n min_value = min(Feature_DF.loc[:,'RC_SUR_JPRE'])\n Feature_DF.loc[:,'RC_SUR_JPRE_TRS'] = Feature_DF.loc[:,'RC_SUR_JPRE'].apply(lambda x : (1+x-min_value)**(4/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_SUR_JPRE_TRS']]\n\n return Feature_DF", "def PP_SPF_AVG_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVG_DIST']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVG_DIST'])\n Feature_DF.loc[:,'PP_SPF_AVG_DIST_TRS'] = Feature_DF.loc[:,'PP_SPF_AVG_DIST'].apply(lambda x : (1+x-min_value)**(5/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVG_DIST_TRS']]\n\n return Feature_DF", "def evaluate(self, predicted_df):\n logging.info(\"Evaluating model: {}\".format(self.model_type))\n y_true = predicted_df[\"user_label\"].as_matrix()\n y_pred = predicted_df[\"label\"].as_matrix()\n\n scores_cols = [col for col in predicted_df.columns if col.startswith(\"scores_\")]\n print(\"scores_cols: {}\".format(scores_cols))\n\n y_pred_scores = predicted_df[scores_cols].copy().fillna(value=0).as_matrix()\n print(\"predicted scores: {}\".format(y_pred_scores))\n y_true_scores = []\n for lab in predicted_df[\"user_label\"]:\n trues = [0 for _ in range(len(scores_cols))]\n if \"scores_\"+lab in scores_cols:\n trues[scores_cols.index(\"scores_\"+lab)] = 1\n y_true_scores.append(trues)\n print(\"true scores: {}\".format(y_true_scores))\n y_true_scores = np.array(y_true_scores)\n\n performance = {\"model\": self.model_type, \"description\": self.description}\n if 'categorical_accuracy' in self.metrics:\n logging.info(\"Calculating categorical accuracy for {}\".format(self))\n performance['categorical_accuracy'] = sklearn.metrics.accuracy_score(y_true,\n y_pred) # np.mean(y_pred == y_true)\n if 'fmeasure' in self.metrics:\n logging.info(\"Calculating fmeasure for {}\".format(self))\n performance['fmeasure'] = sklearn.metrics.f1_score(y_true, y_pred, average=self.metrics_average)\n if 'MRR' in self.metrics:\n logging.info(\"Calculating MRR for {}\".format(self))\n performance['MRR'] = sklearn.metrics.label_ranking_average_precision_score(y_true_scores, y_pred_scores)\n logging.info(\"Calculated performance: {}\".format(performance))\n print(performance)\n return pd.DataFrame(performance, index=[0])", "def get_score(predictions_df: pd.DataFrame,\n real_df: pd.DataFrame,\n k: int = 5) -> float:\n y_pred = predictions_df.genres.apply(lambda x: x.split(\" \"))\n y_real = real_df.genres.apply(lambda x: x.split(\" \"))\n return mapk(y_real, y_pred, k)", "def PP_BL_AVGF_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_BL_AVGF']]\n min_value = min(Feature_DF.loc[:,'PP_BL_AVGF'])\n Feature_DF.loc[:,'PP_BL_AVGF_TRS'] = Feature_DF.loc[:,'PP_BL_AVGF'].apply(lambda x : (1+x-min_value)**(7/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_BL_AVGF_TRS']]\n\n return Feature_DF", "def PP_PAF_AP_AVG_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_AP_AVG']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_AP_AVG'])\n Feature_DF.loc[:,'PP_PAF_AP_AVG_TRS'] = Feature_DF.loc[:,'PP_PAF_AP_AVG'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_AP_AVG_TRS']]\n\n return Feature_DF", "def RC_DIST_SPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_DIST_SPRE']]\n min_value = min(Feature_DF.loc[:,'RC_DIST_SPRE'])\n Feature_DF.loc[:,'RC_DIST_SPRE_TRS'] = Feature_DF.loc[:,'RC_DIST_SPRE'].apply(lambda x : (1+x-min_value)**(2/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_DIST_SPRE_TRS']]\n\n return Feature_DF", "def RC_SUR_HPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_SUR_HPRE']]\n min_value = min(Feature_DF.loc[:,'RC_SUR_HPRE'])\n Feature_DF.loc[:,'RC_SUR_HPRE_TRS'] = Feature_DF.loc[:,'RC_SUR_HPRE'].apply(lambda x : (1+x-min_value)**(5/6))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_SUR_HPRE_TRS']]\n\n return Feature_DF", "def PP_PAF_SP_AVG_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_SP_AVG']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_SP_AVG'])\n Feature_DF.loc[:,'PP_PAF_SP_AVG_TRS'] = Feature_DF.loc[:,'PP_PAF_SP_AVG'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_SP_AVG_TRS']]\n\n return Feature_DF", "def PP_SPF_AVG_GO_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVG_GO']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVG_GO'])\n Feature_DF.loc[:,'PP_SPF_AVG_GO_TRS'] = Feature_DF.loc[:,'PP_SPF_AVG_GO'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVG_GO_TRS']]\n\n return Feature_DF", "def run_score(df, tmo_path, score_path, label):\n\n\ttry:\n\t\t# load the model\n\t\twith open(tmo_path, 'rb') as f:\n\t\t\ttmo = pickle.load(f)\n\texcept OSError:\n\t\tlogger.error(\"Cannot open %s\", tmo_path)\n\texcept Exception as e:\n\t\tlogger.error(e)\n\n\tlogger.info(\"Scoring the trained model...\")\n\n\tdata = score(df, tmo, label)\n\n\t# write score results\n\tdata.to_csv(score_path, index=False)\n\tlogger.info('Model scoring results saved to %s', score_path)", "def RC_GO_JPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_GO_JPRE']]\n min_value = min(Feature_DF.loc[:,'RC_GO_JPRE'])\n Feature_DF.loc[:,'RC_GO_JPRE_TRS'] = Feature_DF.loc[:,'RC_GO_JPRE'].apply(lambda x : (1+x-min_value)**(9/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_GO_JPRE_TRS']]\n\n return Feature_DF", "def PP_BL_AVG_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_BL_AVG']]\n min_value = min(Feature_DF.loc[:,'PP_BL_AVG'])\n Feature_DF.loc[:,'PP_BL_AVG_TRS'] = Feature_DF.loc[:,'PP_BL_AVG'].apply(lambda x : (1+x-min_value)**(-3/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_BL_AVG_TRS']]\n\n return Feature_DF", "def get_r_score(self):\n return self.r_score", "def PP_PAF_STL_B_PFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_STL_B_PFL']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_STL_B_PFL'])\n Feature_DF.loc[:,'PP_PAF_STL_B_PFL_TRS'] = Feature_DF.loc[:,'PP_PAF_STL_B_PFL'].apply(lambda x : (1+x-min_value)**(9/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_STL_B_PFL_TRS']]\n\n return Feature_DF", "def RC_LOC_JPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_LOC_JPRE']]\n min_value = min(Feature_DF.loc[:,'RC_LOC_JPRE'])\n Feature_DF.loc[:,'RC_LOC_JPRE_TRS'] = Feature_DF.loc[:,'RC_LOC_JPRE'].apply(lambda x : (1+x-min_value)**(5/6))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_LOC_JPRE_TRS']]\n\n return Feature_DF", "def RC_LOC_SPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_LOC_SPRE']]\n min_value = min(Feature_DF.loc[:,'RC_LOC_SPRE'])\n Feature_DF.loc[:,'RC_LOC_SPRE_TRS'] = Feature_DF.loc[:,'RC_LOC_SPRE'].apply(lambda x : (1+x-min_value)**(2/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_LOC_SPRE_TRS']]\n\n return Feature_DF", "def PP_BL_AVGF_SUR_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_BL_AVGF_SUR']]\n min_value = min(Feature_DF.loc[:,'PP_BL_AVGF_SUR'])\n Feature_DF.loc[:,'PP_BL_AVGF_SUR_TRS'] = Feature_DF.loc[:,'PP_BL_AVGF_SUR'].apply(lambda x : (1+x-min_value)**(-9/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_BL_AVGF_SUR_TRS']]\n\n return Feature_DF", "def RC_DIST_JPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_DIST_JPRE']]\n min_value = min(Feature_DF.loc[:,'RC_DIST_JPRE'])\n Feature_DF.loc[:,'RC_DIST_JPRE_TRS'] = Feature_DF.loc[:,'RC_DIST_JPRE'].apply(lambda x : (1+x-min_value)**(1/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_DIST_JPRE_TRS']]\n\n return Feature_DF", "def PP_PAF_FP_AVGRW_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_FP_AVGRW']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_FP_AVGRW'])\n Feature_DF.loc[:,'PP_PAF_FP_AVGRW_TRS'] = Feature_DF.loc[:,'PP_PAF_FP_AVGRW'].apply(lambda x : (1+x-min_value)**(5/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_FP_AVGRW_TRS']]\n\n return Feature_DF", "def RC_DIST_HPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_DIST_HPRE']]\n min_value = min(Feature_DF.loc[:,'RC_DIST_HPRE'])\n Feature_DF.loc[:,'RC_DIST_HPRE_TRS'] = Feature_DF.loc[:,'RC_DIST_HPRE'].apply(lambda x : (1+x-min_value)**(9/10))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_DIST_HPRE_TRS']]\n\n return Feature_DF", "def RC_GO_SPRE_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_GO_SPRE']]\n min_value = min(Feature_DF.loc[:,'RC_GO_SPRE'])\n Feature_DF.loc[:,'RC_GO_SPRE_TRS'] = Feature_DF.loc[:,'RC_GO_SPRE'].apply(lambda x : (1+x-min_value)**(9/10))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_GO_SPRE_TRS']]\n\n return Feature_DF", "def PredictRunwayRMSE(DF, predictor, input_func, name=None):\r\n if name:\r\n print 'Predicted win for %s:' % name\r\n weighted1 = 0\r\n weighted2 = 0\r\n total_weight = 0\r\n for df in DF:\r\n series_in = input_func(df)\r\n assert all(len(s) == len(series_in[0]) for s in series_in)\r\n X, _ = Transform(series_in, [])\r\n df['prediction'] = predictor.predict(X)\r\n assert len(df.prediction) == len(series_in[0]), (len(df.prediction), len(series_in[0]))\r\n\r\n filter_runway = (df.actual_runway_arrival < df.actual_gate_arrival)\r\n golden_runway = df.actual_runway_arrival[filter_runway]\r\n r1 = util.RMSE(golden_runway, df.last_era_update[filter_runway])\r\n r2 = util.RMSE(golden_runway, (df.last_era_update + df.prediction)[filter_runway])\r\n w = len(df.last_era_update[filter_runway])\r\n weighted1 += r1 * w\r\n weighted2 += r2 * w\r\n total_weight += w\r\n #print 'Runway: %.2f' % (r1 - r2)\r\n\r\n weighted_score = ((weighted1 - weighted2) / total_weight)\r\n print 'Weighted: %.4f' % weighted_score\r\n return weighted_score", "def PP_FH_FP_BIN_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_FH_FP_BIN']]\n min_value = min(Feature_DF.loc[:,'PP_FH_FP_BIN'])\n Feature_DF.loc[:,'PP_FH_FP_BIN_TRS'] = Feature_DF.loc[:,'PP_FH_FP_BIN'].apply(lambda x : (1+x-min_value)**(3/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_FH_FP_BIN_TRS']]\n\n return Feature_DF", "def PP_FH_FP_AVGRW_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_FH_FP_AVGRW']]\n min_value = min(Feature_DF.loc[:,'PP_FH_FP_AVGRW'])\n Feature_DF.loc[:,'PP_FH_FP_AVGRW_TRS'] = Feature_DF.loc[:,'PP_FH_FP_AVGRW'].apply(lambda x : (1+x-min_value)**(9/10))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_FH_FP_AVGRW_TRS']]\n\n return Feature_DF", "def RC_SUR_AVG_SUR_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_SUR_AVG_SUR']]\n Feature_DF.loc[:,'RC_SUR_AVG_SUR_TRS'] = Feature_DF.loc[:,'RC_SUR_AVG_SUR'].pow(4)\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_SUR_AVG_SUR_TRS']]\n\n return Feature_DF", "def RC_GO_AVG_GO_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_GO_AVG_GO']]\n min_value = min(Feature_DF.loc[:,'RC_GO_AVG_GO'])\n Feature_DF.loc[:,'RC_GO_AVG_GO_TRS'] = Feature_DF.loc[:,'RC_GO_AVG_GO'].apply(lambda x : (1+x-min_value)**(5/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_GO_AVG_GO_TRS']]\n\n return Feature_DF", "def PP_SPF_TOP_SUR_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_TOP_SUR']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_TOP_SUR'])\n Feature_DF.loc[:,'PP_SPF_TOP_SUR_TRS'] = Feature_DF.loc[:,'PP_SPF_TOP_SUR'].apply(lambda x : (1+x-min_value)**(9/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_TOP_SUR_TRS']]\n\n return Feature_DF", "def JS_S_FPRW_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','JS_S_FPRW']]\n min_value = min(Feature_DF.loc[:,'JS_S_FPRW'])\n Feature_DF.loc[:,'JS_S_FPRW_TRS'] = Feature_DF.loc[:,'JS_S_FPRW'].apply(lambda x : (1+x-min_value)**(3/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','JS_S_FPRW_TRS']]\n\n return Feature_DF", "def PP_SPF_AVGRW_SUR_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVGRW_SUR']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVGRW_SUR'])\n Feature_DF.loc[:,'PP_SPF_AVGRW_SUR_TRS'] = Feature_DF.loc[:,'PP_SPF_AVGRW_SUR'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVGRW_SUR_TRS']]\n\n return Feature_DF", "def JS_J_FPRW_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','JS_J_FPRW']]\n min_value = min(Feature_DF.loc[:,'JS_J_FPRW'])\n Feature_DF.loc[:,'JS_J_FPRW_TRS'] = Feature_DF.loc[:,'JS_J_FPRW'].apply(lambda x : (1+x-min_value)**(5/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','JS_J_FPRW_TRS']]\n\n return Feature_DF", "def PP_FH_FP_SUR_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_FH_FP_SUR']]\n min_value = min(Feature_DF.loc[:,'PP_FH_FP_SUR'])\n Feature_DF.loc[:,'PP_FH_FP_SUR_TRS'] = Feature_DF.loc[:,'PP_FH_FP_SUR'].apply(lambda x : (1+x-min_value)**(-2/9))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_FH_FP_SUR_TRS']]\n\n return Feature_DF", "def fit_rf_model(df, X_train, y_train, X_test, y_test, mask_test):\n print (\"**** RANDOM FOREST Grid Search ****\")\n random_forest_grid = {'max_depth': [3, None],\n 'max_features': ['sqrt', 'log2', round(X_train.shape[1]/3), None],\n 'min_samples_split': [2, 4],\n 'min_samples_leaf': [1, 2, 4],\n 'bootstrap': [True, False],\n 'n_estimators': [100,300,500],\n 'random_state': [10]}\n\n rf_gridsearch = GridSearchCV(RandomForestRegressor(),\n random_forest_grid,\n n_jobs=-1,\n verbose=True,\n scoring='neg_mean_squared_error')\n rf_gridsearch.fit(X_train, y_train)\n print(\"Best Parameters:\", rf_gridsearch.best_params_)\n print(' ')\n\n best_rf_model = rf_gridsearch.best_estimator_\n\n feature_importance = {}\n for label, importance in zip(X_train.columns, best_rf_model.feature_importances_):\n feature_importance[label] = importance\n print(\"Sorted Feature Importance:\")\n sorted_feature_imp = sorted(feature_importance.items(), key=lambda x: (-x[1]))\n for e in sorted_feature_imp:\n print(e)\n\n y_pred_test = best_rf_model.predict(X_test)\n df_test = pd.concat([df[mask_test][['player','wkts','year1_wkts_pm']].reset_index(),\n pd.DataFrame(y_pred_test).reset_index()],axis=1,)\n df_test = df_test.drop('index',axis=1)\n df_test.columns = ['player','wkts','wkts_baseline','wkts_exp']\n\n df_by_player = df_test.groupby('player').sum()\n\n print(' ')\n print('Explained Variance (RF model): ' + str(explained_variance_score(df_by_player.wkts,df_by_player.wkts_exp)))\n print('Explained Variance (Baseline): ' + str(explained_variance_score(df_by_player.wkts,df_by_player.wkts_baseline)))\n print('----')\n print('Mean Squared Error (RF model): ' + str(mean_squared_error(df_by_player.wkts,df_by_player.wkts_exp)))\n print('Mean Squared Error (Baseline): ' + str(mean_squared_error(df_by_player.wkts,df_by_player.wkts_baseline)))\n print('----')\n print(' ')", "def PP_PAF_EDL_PFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_EDL_PFL']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_EDL_PFL'])\n Feature_DF.loc[:,'PP_PAF_EDL_PFL_TRS'] = Feature_DF.loc[:,'PP_PAF_EDL_PFL'].apply(lambda x : (1+x-min_value)**(-7/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_EDL_PFL_TRS']]\n\n return Feature_DF", "def PP_SPF_L2_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_L2']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_L2'])\n Feature_DF.loc[:,'PP_SPF_L2_TRS'] = Feature_DF.loc[:,'PP_SPF_L2'].apply(lambda x : (1+x-min_value)**(7/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_L2_TRS']]\n\n return Feature_DF", "def PP_SPF_TOP_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_TOP']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_TOP'])\n Feature_DF.loc[:,'PP_SPF_TOP_TRS'] = Feature_DF.loc[:,'PP_SPF_TOP'].apply(lambda x : (1+x-min_value)**(9/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_TOP_TRS']]\n\n return Feature_DF", "def RC_PP_JPRE_JPFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_PP_JPRE_JPFL']]\n Feature_DF.loc[:,'RC_PP_JPRE_JPFL_TRS'] = Feature_DF.loc[:,'RC_PP_JPRE_JPFL'].apply(lambda x : (1+x)**(-1/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_PP_JPRE_JPFL_TRS']]\n\n return Feature_DF", "def evaluate(self):\n self.df['Score'] = self.df[self.review_col].apply(self.analyzer)\n\n return self.df", "def PP_SPF_AVGRW_SIM_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVGRW_SIM_DIST']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVGRW_SIM_DIST'])\n Feature_DF.loc[:,'PP_SPF_AVGRW_SIM_DIST_TRS'] = Feature_DF.loc[:,'PP_SPF_AVGRW_SIM_DIST'].apply(lambda x : (1+x-min_value)**(7/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVGRW_SIM_DIST_TRS']]\n\n return Feature_DF", "def performance_matrix(df):\r\n total = df.count()\r\n nP = df.filter((F.col('prediction') == 1)).count()\r\n nN = df.filter((F.col('prediction') == 0)).count()\r\n TP = df.filter((F.col('prediction') == 1) & (F.col('label') == 1)).count()\r\n FP = df.filter((F.col('prediction') == 1) & (F.col('label') == 0)).count()\r\n FN = df.filter((F.col('prediction') == 0) & (F.col('label') == 1)).count()\r\n TN = df.filter((F.col('prediction') == 0) & (F.col('label') == 0)).count()\r\n \r\n print('num positive: {}'.format(nP))\r\n print('num negative: {}'.format(nN))\r\n print(\"True Positives:\", TP)\r\n print(\"True Negatives:\", TN)\r\n print(\"False Positives:\", FP)\r\n print(\"False Negatives:\", FN)\r\n print('accuracy: {}'.format((TP + TN) / total))\r\n \r\n if TP == 0:\r\n print(\"Precision: 0\")\r\n print(\"Recall: 0\")\r\n \r\n else:\r\n print('recall: {}'.format(TP / (TP + FN)))\r\n print('precision: {}'.format(TP / (TP + FP)))", "def evalBaseline(self, df = None):\n \n if (df is None):\n self.r_b = self.df.merge(self.df_user[[\"user ind\", \"b_u\"]], on = \"user ind\")\n self.r_b = self.r_b.merge(self.df_item[[\"item ind\", \"b_i\"]], on = \"item ind\")\n self.r_b[\"baseline\"] = self.r_mean + self.r_b[\"b_u\"] + self.r_b[\"b_i\"]\n \n \n return self.r_b[[\"user id\", \"item id\", \"baseline\"]]\n \n else:\n df = df.merge(self.df_user, on = \"user id\").merge(self.df_item, on = \"item id\")\n df[\"baseline\"] = self.r_mean + df[\"b_u\"] + df[\"b_i\"]\n \n # clip the score to the interval [1, 5]\n df[\"baseline\"] = np.minimum(np.maximum(df[\"baseline\"], 1), 5)\n \n return df[[\"user id\", \"item id\", \"baseline\"]]", "def _F_performance(self, score, thresholds, condition):\r\n if type(thresholds) is float:\r\n thresholds = self._compute_thresholds(thresholds)\r\n F = np.zeros(shape=(1, len(thresholds)))\r\n impostors = 0\r\n L = len(score)\r\n for count, thr in enumerate(thresholds):\r\n N = 0\r\n for idx in range(0, L):\r\n N += condition(score[idx], thr)\r\n F[0, count] = N / L\r\n return F[0]", "def RC_DIST_AVG_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_DIST_AVG_DIST']]\n Feature_DF.loc[:,'RC_DIST_AVG_DIST_TRS'] = Feature_DF.loc[:,'RC_DIST_AVG_DIST'].apply(lambda x : (1+x)**(-5/6))\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_DIST_AVG_DIST_TRS']]\n\n return Feature_DF", "def scoreRsrc( self, rr ):\r\n result = 0.0\r\n for tt in self.getSched( )[rr.getid( )]:\r\n for se in tt:\r\n result += 1\r\n print( \"INFO: Value for %s: %s \" % ( rr, result ) )\r\n return( result )", "def PP_SPF_D_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_D']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_D'])\n Feature_DF.loc[:,'PP_SPF_D_TRS'] = Feature_DF.loc[:,'PP_SPF_D'].apply(lambda x : (1+x-min_value)**(1/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_D_TRS']]\n\n return Feature_DF", "def PP_SPF_AVGRW_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVGRW']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVGRW'])\n Feature_DF.loc[:,'PP_SPF_AVGRW_TRS'] = Feature_DF.loc[:,'PP_SPF_AVGRW'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVGRW_TRS']]\n\n return Feature_DF", "def _chrf_score_compute(total_preds_char_n_grams: Dict[int, Tensor], total_preds_word_n_grams: Dict[int, Tensor], total_target_char_n_grams: Dict[int, Tensor], total_target_word_n_grams: Dict[int, Tensor], total_matching_char_n_grams: Dict[int, Tensor], total_matching_word_n_grams: Dict[int, Tensor], n_order: float, beta: float) ->Tensor:\n chrf_f_score = _calculate_fscore(total_matching_char_n_grams, total_matching_word_n_grams, total_preds_char_n_grams, total_preds_word_n_grams, total_target_char_n_grams, total_target_word_n_grams, n_order, beta)\n return chrf_f_score", "def score_sc1(self, prediction_file):\n fh = TempFile()\n gs1, _ = self.download_gs()\n script = self.classpath + os.sep + \"DREAM_Olfaction_scoring_Q1.pl\"\n cmd = \"perl %s %s %s %s\"\n cmd = cmd % (script, prediction_file, fh.name, gs1)\n shellcmd(cmd)\n df = pd.read_csv(fh.name, sep='\\t', index_col=None).ix[0]\n fh.delete()\n return df\n\n\n # score sub1 = (zint +zple +zdec)/3\n # sigma_int = 0.0787\n # sigma_ple = 0.176\n # signa_dec = 0.0042\n\n # final is average of zscores", "def RC_PP_PFLA_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_PP_PFLA']]\n Feature_DF.loc[:,'RC_PP_PFLA_TRS'] = Feature_DF.loc[:,'RC_PP_PFLA'].pow(1/10)\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_PP_PFLA_TRS']]\n\n return Feature_DF", "def PP_PAF_AP_AVGRW_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_AP_AVGRW']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_AP_AVGRW'])\n Feature_DF.loc[:,'PP_PAF_AP_AVGRW_TRS'] = Feature_DF.loc[:,'PP_PAF_AP_AVGRW'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_AP_AVGRW_TRS']]\n\n return Feature_DF", "def PP_FH_FP_SIM_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_FH_FP_SIM_DIST']]\n min_value = min(Feature_DF.loc[:,'PP_FH_FP_SIM_DIST'])\n Feature_DF.loc[:,'PP_FH_FP_SIM_DIST_TRS'] = Feature_DF.loc[:,'PP_FH_FP_SIM_DIST'].apply(lambda x : (1+x-min_value)**(4/9))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_FH_FP_SIM_DIST_TRS']]\n\n return Feature_DF", "def PP_PAF_BEST_GOPFL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_BEST_GOPFL']]\n Feature_DF.loc[:,'PP_PAF_BEST_GOPFL_TRS'] = Feature_DF.loc[:,'PP_PAF_BEST_GOPFL'].pow(4/5)\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_BEST_GOPFL_TRS']]\n\n return Feature_DF", "def PP_PAF_SP_AVGRW_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_PAF_SP_AVGRW']]\n min_value = min(Feature_DF.loc[:,'PP_PAF_SP_AVGRW'])\n Feature_DF.loc[:,'PP_PAF_SP_AVGRW_TRS'] = Feature_DF.loc[:,'PP_PAF_SP_AVGRW'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_PAF_SP_AVGRW_TRS']]\n\n return Feature_DF", "def PP_SPF_L1_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_L1']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_L1'])\n Feature_DF.loc[:,'PP_SPF_L1_TRS'] = Feature_DF.loc[:,'PP_SPF_L1'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_L1_TRS']]\n\n return Feature_DF", "def PP_SPF_TOP_SIM_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_TOP_SIM_DIST']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_TOP_SIM_DIST'])\n Feature_DF.loc[:,'PP_SPF_TOP_SIM_DIST_TRS'] = Feature_DF.loc[:,'PP_SPF_TOP_SIM_DIST'].apply(lambda x : (1+x-min_value)**(9/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_TOP_SIM_DIST_TRS']]\n\n return Feature_DF", "def score(self, ref_im):\n \n if isinstance(self.res, type(None)):\n raise Exception('Result is not yet aviable.')\n \n shift = register_translation(ref_im, self.res)[0]\n shifted_res = fourier_shift(np.fft.fft2(self.res), shift)\n shifted_res = np.real(np.fft.ifft2(shifted_res))\n \n mse = np.linalg.norm(shifted_res - ref_im)\n drange = np.max(shifted_res) - np.min(shifted_res)\n ssim = compare_ssim(ref_im, shifted_res, data_range=drange)\n \n return mse, ssim", "def get_tuned_f1(results_df):\n df = results_df\n scores = []\n f1s = []\n lambdas = np.arange(.5, 5, .05)\n\n def add_weighted(df, lam):\n \"\"\" Calculates different weighted PMI values after already having mut_inf scores \"\"\"\n df['mut_inf_weighted'] = df.mut_inf + (lam - 1) * (df.head_conditional + df.tail_conditional) / 2.\n\n for lam in lambdas:\n ss = StandardScaler()\n add_weighted(df, lam=lam)\n model = GaussianMixture(2, n_init=1)\n dat = ss.fit_transform(df[['mut_inf_weighted']])\n pred = model.fit_predict(dat)\n score = model.aic(dat)\n f1 = f1_score((model.means_.argmax() == df.label), pred)\n scores.append(score)\n f1s.append(f1)\n\n scores = np.array(scores)\n f1s = np.array(f1s)\n lam = lambdas[scores.argmax()]\n\n optimal_lambda = lambdas[scores.argmax()]\n optimal_f1 = f1s[scores.argmax()]\n return optimal_f1, optimal_lambda", "def PP_SPF_AVGRW_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVGRW_DIST']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVGRW_DIST'])\n Feature_DF.loc[:,'PP_SPF_AVGRW_DIST_TRS'] = Feature_DF.loc[:,'PP_SPF_AVGRW_DIST'].apply(lambda x : (1+x-min_value)**(5/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVGRW_DIST_TRS']]\n\n return Feature_DF", "def PP_SPF_SEC_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_SEC']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_SEC'])\n Feature_DF.loc[:,'PP_SPF_SEC_TRS'] = Feature_DF.loc[:,'PP_SPF_SEC'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_SEC_TRS']]\n\n return Feature_DF", "def PP_SPF_TOP_GO_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_TOP_GO']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_TOP_GO'])\n Feature_DF.loc[:,'PP_SPF_TOP_GO_TRS'] = Feature_DF.loc[:,'PP_SPF_TOP_GO'].apply(lambda x : (1+x-min_value)**(5/2))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_TOP_GO_TRS']]\n\n return Feature_DF", "def PP_SPF_AVGRW_GO_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_AVGRW_GO']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_AVGRW_GO'])\n Feature_DF.loc[:,'PP_SPF_AVGRW_GO_TRS'] = Feature_DF.loc[:,'PP_SPF_AVGRW_GO'].apply(lambda x : (1+x-min_value)**(8/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_AVGRW_GO_TRS']]\n\n return Feature_DF", "def PP_FH_FP_GO_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_FH_FP_GO']]\n min_value = min(Feature_DF.loc[:,'PP_FH_FP_GO'])\n Feature_DF.loc[:,'PP_FH_FP_GO_TRS'] = Feature_DF.loc[:,'PP_FH_FP_GO'].apply(lambda x : (1+x-min_value)**(-2/9))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_FH_FP_GO_TRS']]\n\n return Feature_DF", "def PP_SPF_D1_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_D1']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_D1'])\n Feature_DF.loc[:,'PP_SPF_D1_TRS'] = Feature_DF.loc[:,'PP_SPF_D1'].apply(lambda x : (1+x-min_value)**(7/6))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_D1_TRS']]\n\n return Feature_DF", "def PP_FH_FP_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_FH_FP_DIST']]\n min_value = min(Feature_DF.loc[:,'PP_FH_FP_DIST'])\n Feature_DF.loc[:,'PP_FH_FP_DIST_TRS'] = Feature_DF.loc[:,'PP_FH_FP_DIST'].apply(lambda x : (1+x-min_value)**(1/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_FH_FP_DIST_TRS']]\n\n return Feature_DF", "def f1_score(model_id, test_set_id, rubric_id):\n result = {'true_positive': 0, 'false_positive': 0, 'true_negative': 0, 'false_negative': 0}\n # right answers\n answers = db.get_rubric_answers(test_set_id, rubric_id)\n # rubrication results\n rubrication_result = db.get_rubrication_result(model_id, test_set_id, rubric_id)\n\n for key in rubrication_result:\n if rubrication_result[key] == answers[key]:\n if rubrication_result[key] == 1:\n result['true_positive'] += 1\n else:\n result['true_negative'] += 1\n else:\n if rubrication_result[key] == 1:\n result['false_positive'] += 1\n else:\n result['false_negative'] += 1\n if (result['true_positive'] + result['false_positive']) > 0:\n result['precision'] = result['true_positive'] / (result['true_positive'] + result['false_positive'])\n else:\n result['precision'] = 0\n if (result['true_positive'] + result['false_negative']) > 0:\n result['recall'] = result['true_positive'] / (result['true_positive'] + result['false_negative'])\n else:\n result['recall'] = 0\n if (result['precision'] + result['recall']) > 0:\n result['f1'] = 2 * result['precision'] * result['recall'] / (result['precision'] + result['recall'])\n else:\n result['f1'] = 0\n return result", "def JS_J_HJ_SPAVG_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','JS_J_HJ_SPAVG']]\n min_value = min(Feature_DF.loc[:,'JS_J_HJ_SPAVG'])\n Feature_DF.loc[:,'JS_J_HJ_SPAVG_TRS'] = Feature_DF.loc[:,'JS_J_HJ_SPAVG'].apply(lambda x : (1+x-min_value)**(-9/5))\n Feature_DF = Feature_DF.loc[:,['HNAME','JS_J_HJ_SPAVG_TRS']]\n\n return Feature_DF", "def relative_quality(self, id, scores, neighbors_list_highdim):\n neighbors_highdim = neighbors_list_highdim[id]\n score = scores[id]\n avg_score = np.mean([scores[i] for i in neighbors_highdim])\n relative_quality_score = score / avg_score\n return relative_quality_score", "def f1_score(y_true, y_pred, threshold, macro = False, eps = 1e-9):\n\n y_pred = torch.ge(y_pred.float(), threshold).float()\n\n y_true = y_true.float()\n\n tp_l = (y_pred * y_true).sum(0).float()\n\n fp_l = (y_pred * (1 - y_true)).sum(0).float()\n\n fn_l = ((1 - y_pred) * y_true).sum(0).float()\n\n precision_label = tp_l.div(tp_l + fp_l + eps)\n\n recall_label = tp_l.div(tp_l + fn_l + eps)\n\n if macro:\n\n f1_macro = torch.mean((precision_label * recall_label).div(precision_label + recall_label + eps) * 2)\n\n return f1_macro.item(), torch.mean(precision_label).item(), torch.mean(recall_label).item()\n\n else: \n\n tp = tp_l.sum()\n\n fp = fp_l.sum()\n\n fn = fn_l.sum()\n\n precision = tp / (tp + fp + eps)\n\n recall = tp / (tp + fn + eps)\n\n f1_micro = (precision * recall).div(precision + recall + eps) * 2\n\n return f1_micro.item(), precision.item(), recall.item()", "def dataset_quality_score(data_matrix, threshold=0.2, good_days=None,\n use_advanced=True):\n if good_days is None:\n if use_advanced:\n good_days = daily_missing_data_advanced(data_matrix, threshold=threshold)\n else:\n good_days = daily_missing_data_simple(data_matrix, threshold=threshold)\n score = np.sum(good_days) / data_matrix.shape[1]\n return score", "def RC_PP_PFLEP_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_PP_PFLEP']]\n Feature_DF.loc[:,'RC_PP_PFLEP_TRS'] = Feature_DF.loc[:,'RC_PP_PFLEP'].pow(6/7)\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_PP_PFLEP_TRS']]\n\n return Feature_DF", "def PP_SPF_TOP_DIST_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_SPF_TOP_DIST']]\n min_value = min(Feature_DF.loc[:,'PP_SPF_TOP_DIST'])\n Feature_DF.loc[:,'PP_SPF_TOP_DIST_TRS'] = Feature_DF.loc[:,'PP_SPF_TOP_DIST'].apply(lambda x : (1+x-min_value)**(9/4))\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_SPF_TOP_DIST_TRS']]\n\n return Feature_DF", "def f1_score(prediction, ground_truth):\n return precision_recall_f1(prediction, ground_truth)[2]", "def CC_REC_NUM_LT3_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','CC_REC_NUM_LT3']]\n min_value = min(Feature_DF.loc[:,'CC_REC_NUM_LT3'])\n Feature_DF.loc[:,'CC_REC_NUM_LT3_TRS'] = Feature_DF.loc[:,'CC_REC_NUM_LT3'].apply(lambda x : (1+x-min_value)**(-3/8))\n Feature_DF = Feature_DF.loc[:,['HNAME','CC_REC_NUM_LT3_TRS']]\n\n return Feature_DF", "def data_transform_rfm(self) :\n \n is_built_step = False\n if self._encoder_rfm is None:\n is_built_step = True \n \n #-------------------------------------------------------------------------\n # RFM feature is built\n #-------------------------------------------------------------------------\n ser_invoice_date = self._df_invoice_line.InvoiceDate\n \n self.df_invoice_line, df_RFM, self.df_RFM_quantiles, self._day_now \\\n = p5_util.p5_df_rfm_build(self.df_invoice_line, day_now = self._day_now\\\n , df_RFM_threshold=self.df_RFM_quantiles)\n \n self._df_invoice_line.InvoiceDate = ser_invoice_date\n \n #-------------------------------------------------------------------------\n # RFM score is added to dataframe\n #-------------------------------------------------------------------------\n df_merged = pd.merge(self.df_invoice_line\\\n , df_RFM[['CustomerID','RFM']], how='left', on=['CustomerID'])\n\n self._df_invoice_line \\\n = pd.DataFrame(df_merged.values, index = self._df_invoice_line.index\\\n , columns=df_merged.columns)\n \n\n #self._df_invoice_line \\\n #= pd.concat([ self.df_invoice_line,df_RFM[['CustomerID','RFM']] ], axis=1\\\n #,join='inner')\n \n \n #-------------------------------------------------------------------------\n # RFM encoding\n #-------------------------------------------------------------------------\n self._encoder_rfm, df_RFM_encoded \\\n = p5_util.df_rfm_one_hot_encode(df_RFM,'RFM', encoder=self._encoder_rfm)\n\n #-------------------------------------------------------------------------\n # Encoded RFM features are renamed\n #-------------------------------------------------------------------------\n df_customers_rfm, list_col_unchanged \\\n = p5_util.df_rename_columns(df_RFM_encoded, df_RFM_encoded.columns\\\n , 'w_rfm_')\n \n self.strprint(\"df_customers_rfm =\" +str(df_customers_rfm.shape))\n\n #-------------------------------------------------------------------------\n # dataframe with RFM encoded values per customer is dumped\n #-------------------------------------------------------------------------\n if is_built_step is True:\n p5_util.object_dump(df_customers_rfm, self.df_customers_rfm_fileName)\n else :\n self._df_customers_rfm = df_customers_rfm.copy()\n return", "def f1_score(self):", "def get_score(self, solution: np.array) -> float:\n pass" ]
[ "0.6958095", "0.61751866", "0.60585284", "0.5860262", "0.58369166", "0.5765395", "0.57476515", "0.5725581", "0.5717504", "0.5708911", "0.5702308", "0.5689124", "0.5686297", "0.5685354", "0.56756556", "0.5671112", "0.5645583", "0.5639483", "0.56138635", "0.56126195", "0.5591871", "0.558586", "0.55645734", "0.5559015", "0.5542081", "0.55401313", "0.55349123", "0.55247056", "0.5522429", "0.55209035", "0.5513474", "0.5512501", "0.55113596", "0.5506145", "0.55003357", "0.54916275", "0.54895705", "0.54887617", "0.5483196", "0.5479682", "0.54772335", "0.5476744", "0.545181", "0.5450318", "0.54459655", "0.5445379", "0.5439702", "0.54340345", "0.54265815", "0.54244334", "0.5421819", "0.54209435", "0.5419816", "0.5410021", "0.5408646", "0.53993857", "0.5393935", "0.5392057", "0.5384322", "0.53817165", "0.53804994", "0.5378055", "0.53694624", "0.5368241", "0.53576493", "0.53504324", "0.53478706", "0.53416604", "0.53337806", "0.533023", "0.53275967", "0.53220594", "0.53214765", "0.5307145", "0.5295316", "0.5294045", "0.5285411", "0.5274034", "0.52721965", "0.52652884", "0.52558047", "0.52520627", "0.5250812", "0.5248488", "0.52436244", "0.52350974", "0.5232461", "0.5231751", "0.5229915", "0.5214895", "0.52136236", "0.52123487", "0.5193365", "0.51923805", "0.5185531", "0.5183491", "0.5181619", "0.5171984", "0.51695067", "0.51690114" ]
0.60182154
3
This function is used for validation process. It returns a list of stockCode items and a list of quantities for each item.
def get_order_lists(self, n_items, n_quantities): arr_stock_code = self._df_invoice_original.StockCode.unique() arr_stock_code = np.random.choice(arr_stock_code, n_items) list_stockCode = list(arr_stock_code) list_quantities = np.ones(arr_stock_code.shape[0]) list_quantities *=n_quantities return list_stockCode, list_quantities
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_items(self):\n\n items = []\n\n params = self.request.query_params\n\n if 'items[]' in params:\n items = params.getlist('items[]', [])\n elif 'item' in params:\n items = [params.get('item', None)]\n\n if type(items) not in [list, tuple]:\n items = [items]\n\n valid_ids = []\n\n for item in items:\n try:\n valid_ids.append(int(item))\n except (ValueError):\n pass\n\n # List of StockItems which match provided values\n valid_items = StockItem.objects.filter(pk__in=valid_ids)\n\n return valid_items", "def parse_quantities(quantities):\n codes = []; names = []\n\n for q in quantities:\n c, n = parse_quantity(q)\n codes.append(c)\n names.append(n)\n\n return codes, names", "def test_find_stock_items(self):\n pass", "def quantities_available(quantities):\n available = []\n for q in quantities:\n available.append(quantity_available(q))\n return available", "def getUnitPriceList(self, list_stockCode):\n df = self._df_invoice_original\n\n list_unitPrice = list()\n \n for stockCode in list_stockCode:\n unitPrice = df[df.StockCode==stockCode].UnitPrice.unique()[0]\n list_unitPrice.append(unitPrice)\n return list_unitPrice", "def get_items(self):\n return [item for item in self.items if item.quantity > 0]", "def fill_item_list(self):\n return_list = []\n with Transaction().start(DBNAME, 1):\n self.productlist = self.Product.search([('description', '=', 'Stock'), ('type', '=', 'goods')])\n for i in self.productlist:\n return_list.append(i.template.name)\n return return_list", "def get_item_variants(self, item_id, item_name, start):\n\n item_url = f\"https://www.supremenewyork.com/shop/{item_id}.json\"\n\n item_variants = rq.get(item_url, headers=self.headers, proxies=self.proxy).json()\n\n for stylename in item_variants[\"styles\"]:\n for itemsize in stylename[\"sizes\"]:\n item = [item_name, stylename[\"name\"], itemsize['name'], item_variants[\"description\"], 'https:' + stylename[\"image_url\"], item_url.split('.json')[0]]\n if itemsize[\"stock_level\"] != 0:\n # Checks if it already exists in our instock\n if self.checker(item):\n pass\n else:\n # Add to instock dict\n self.instock.append(item)\n \n # Send a notification to the discord webhook with the in-stock product\n if start == 0:\n print('Sending new Notification')\n self.discord_webhook(item)\n logging.info(msg='Successfully sent Discord notification')\n\n else:\n if self.checker(item):\n self.instock.remove(item)", "def load_stock(self):\n lines = []\n with Transaction().start(DBNAME, 1):\n stock_lines = self.Inventory.search([('state', '=', 'done'), ('location', '=', self.location.id)])\n if stock_lines:\n for i in stock_lines:\n batch = i.batch_number\n for j in i.lines:\n if j.quantity <= 0:\n continue\n dictionary = {}\n dictionary['code'] = j.product.code\n dictionary['item'] = j.product.template.name\n dictionary[\n 'category'] = j.product.template.category.name if j.product.template.category else None\n dictionary['quantity'] = Decimal(j.quantity).quantize(Decimal('0.11')).to_eng()\n dictionary['batch_number'] = batch\n dictionary['supplier'] = j.supplier.name if j.supplier else None\n dictionary['expiry_date'] = j.expiry_date.strftime('%d-%m-%Y') if j.expiry_date else None\n lines.append(dictionary)\n return lines", "def clean(self):\n cleaned_data = super().clean()\n variant = cleaned_data.get('variant')\n quantity = cleaned_data.get('quantity')\n if variant and quantity is not None:\n try:\n variant.check_quantity(quantity)\n except InsufficientStock as e:\n error = forms.ValidationError(\n pgettext_lazy(\n 'Add item form error',\n 'Could not add item. '\n 'Only %(remaining)d remaining in stock.' %\n {'remaining': e.item.quantity_available}))\n self.add_error('quantity', error)\n return cleaned_data", "def getItemList(self):\r\n raise AbstractError\r\n return []", "def validate(self, attrs):\n exception_body = []\n for orderline in attrs.get('orderlines', []):\n product = orderline['product']\n\n # If orderline has less units than available, all good.\n if orderline['units'] <= product.units:\n continue\n\n # else error is accumulated\n if product.units > 0:\n exception_body.append({product.name: 'Only {0} units available.'.format(str(product.units))})\n else:\n exception_body.append({product.name: 'Out of stock'})\n\n # If any orderline has problem, reject order.\n if exception_body:\n raise exceptions.PermissionDenied({'errors': exception_body})\n\n return attrs", "def test_CalculateStockItemOrders(self):\n symbol = \"XXXX\"\n\n # Create ActiveStockItem\n activeStockItem = ActiveStockItem(symbol=symbol)\n quantity = 2\n buyStepSize = 1\n activeStockItem.SellStepSize = 2\n activeStockItem.SellStepType = SellDeltaType.FIXED\n activeStockItem.StartPrice = 20.55\n activeStockItem.QuantityMultiplier = 1\n activeStockItem.MaxActiveBuy = 2\n priceCoordinates:List[PriceCoordinate] = []\n priceCoordinates.append(PriceCoordinate(startPrice=0,quantity=quantity, \n buyDeltaType=BuyDeltaType.FIXED, fixedBuyDelta=buyStepSize))\n activeStockItem.PriceCoordinates = priceCoordinates\n\n # Create PortfolioPosition\n portfolioPosition = PortfolioPosition(symbol=symbol)\n portfolioPosition.Quantity = 9\n \n expectedLimitOrders:List[OrderInfo] = [\n OrderInfo(Settings.NewOrderId, symbol, 22.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 21.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 20.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 19.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 18.55, 1, True, True),\n OrderInfo(Settings.NewOrderId, symbol, 16.55, 1, True, False),\n OrderInfo(Settings.NewOrderId, symbol, 15.55, 2, False, False)\n ]\n\n possibleLimitOrders:List[OrderInfo] = self.manageOrdersHelpers.GeneratePossibleLimitOrders(activeStockItem, portfolioPosition.Quantity)\n\n self.assertSequenceEqual(expectedLimitOrders, possibleLimitOrders)\n\n placeOrders, cancelOrders = self.moneyMaker.CalculateStockItemOrders(activeStockItem, [], portfolioPosition)\n\n print(placeOrders)\n\n print(cancelOrders)\n\n for activeStockItem in ActiveStockItems:\n print(activeStockItem.Symbol)", "def stock_availability():\n\tdef update_reserved_qty(bin_data, updates):\n\t\tfor k, v in updates.items():\n\t\t\tif k in bin_data:\n\t\t\t\told_reserved = bin_data[k][\"reserved\"]\n\t\t\t\tnew_reserved = old_reserved + v\n\t\t\t\tbin_data[k][\"reserved\"] = new_reserved\n\t\treturn bin_data\n\n\ttry:\n\t\tstock_for_so = []\n\t\tquery = \"\"\"\n\t\t\tselect so.name, so.customer, soi.item_code, (soi.qty - soi.delivered_qty) as qty\n\t\t\tfrom `tabSales Order` so left join `tabSales Order Item` soi\n\t\t\ton so.name = soi.parent\n\t\t\twhere so.status not in ('Closed', 'Stopped') and so.docstatus = 1\n\t\t\tgroup by so.name, soi.item_code order by so.creation\n\t\t\"\"\"\n\t\tso_data = frappe.db.sql(query, as_dict=True)\n\n\t\t# formatting: sales_data => {\"sales_order\": [{\"item_code\": \"qty\"}]}\n\t\tsales_data = {}\n\t\tfor so in so_data:\n\t\t\tif so.get(\"name\") not in sales_data:\n\t\t\t\tsales_data[so.name] = [{so.item_code: so.qty}]\n\t\t\telse:\n\t\t\t\texisting = sales_data[so.name]\n\t\t\t\texisting.append({so.item_code:so.qty})\n\t\t\t\tsales_data[so.name] = existing\n\n\t\t# available stock\n\t\tbin_data = frappe.db.sql(\"\"\"select item_code, sum(actual_qty) as actual_qty\n\t\t\tfrom `tabBin` group by item_code\"\"\")\n\n\t\t# {\"item_code\": {\"bin_qty\", \"reserved\"}}\n\t\tbin_qty = { b[0]:{\"qty\": b[1], \"reserved\": 0} for b in bin_data if b[1] > 0}\n\n\t\t# check sales order wise availability\n\t\tfor so, items in sales_data.items():\n\t\t\tif not frappe.db.get_value(\"Sales Order\", so, \"stock_availability_mail\"):\n\t\t\t\titem_qty = {}\n\t\t\t\tis_stock_available = True\n\t\t\t\tfor item in items:\n\t\t\t\t\titem_code, qty = item.keys()[0], item.values()[0]\n\t\t\t\t\tif item_code in bin_qty:\n\t\t\t\t\t\tif qty <= bin_qty[item_code][\"qty\"] - bin_qty[item_code][\"reserved\"]:\n\t\t\t\t\t\t\titem_qty[item_code] = qty\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tis_stock_available = False\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tis_stock_available = False\n\t\t\t\t\t\tbreak\n\t\t\t\tif is_stock_available:\n\t\t\t\t\t# update_bit_qty_reserved\n\t\t\t\t\tbin_qty = update_reserved_qty(bin_qty, item_qty)\n\t\t\t\t\tstock_for_so.append(so)\n\t\tif len(stock_for_so):\n\t\t\tstock_availability_mail(stock_for_so)\n\texcept Exception as e:\n\t\tfrappe.log_error(message=frappe.get_traceback(), title=\"Stock availability Scheduler failed\")", "def _get_stock_item_ids(cls, *skus):\n return linnapi.inventory.get_stock_item_ids_by_sku(*skus)", "def getStockCodeList(self, list_description=None):\n list_stockCode = list()\n df = self._df_invoice_original\n \n if list_description is None:\n list_stockCode = list(df.StockCode.unique())\n else :\n for description in list_description:\n stockCode = df[df.Description==description].StockCode.unique()[0]\n list_stockCode.append(stockCode)\n return list_stockCode", "def stocks(self):\n return self.quantity - self.reserved", "def test_gather_success(self):\n gathered_items = self.Quant._gather(self.apple, self.test_stock_location_01)\n # Check the number of apple quants returned is correct\n self.assertEqual(len(gathered_items), 3)\n # Check that the products are all of expected type\n self.assertEqual(gathered_items.product_id, self.apple)\n\n # Unfold the returned quants\n _q1, second_quant, _q2 = gathered_items\n # Check when quant_ids is set in the context\n gathered_items_subset = self.Quant.with_context(quant_ids=[second_quant.id])._gather(\n self.apple, self.test_stock_location_01\n )\n self.assertEqual(len(gathered_items_subset), 1)\n self.assertEqual(gathered_items_subset.product_id, self.apple)\n self.assertEqual(gathered_items_subset, second_quant)", "def _validate_qty(values: dict):\n\n if not (quantity := values.get('quantity')):\n raise ValueError(\"Quantity attribute is required.\")\n\n if not (symbol := values.get('symbol')):\n raise ValueError(\"Symbol attribute is required.\")\n\n filter = symbol.filters.lot_size_filter\n # if ONE :=1 and not filter.min_qty <= quantity <= filter.max_qty:\n # ValueError(\"The quantity is not in valid range.\")\n\n if filter.step_size and not is_valid_significant_digits(\n quantity,\n symbol.qty_decimal_precision\n ):\n raise ValueError(\"The quantity precision is not valid.\")\n\n return values", "def test_shopping_cart_has_items(self):\n list_items = self.get_list_of_items()\n\n self.assertTrue(len(self.expected_contents) == len(list_items))\n\n for expected_item, list_item in zip(\n self.expected_contents, list_items):\n item_dict = self.get_item_dict(list_item)\n for key in expected_item:\n try:\n list_value = item_dict[key].text\n except AttributeError:\n list_value = item_dict[key]\n self.assertEqual(str(expected_item[key]), list_value)\n self.assertEqual(\n str(self.client.session['cart_cost']),\n self.browser.find_element_by_id('food-cost').text\n )", "def compute_items(self):\n rule = self.rule\n # self.items is the sub-objects, as a list\n if rule.is_terminal():\n self.the_items = [rule]\n elif rule.is_symbol_name():\n self.the_items = [rule]\n elif rule.is_empty():\n self.the_items = []\n elif isinstance(rule, Seq):\n self.the_items = [i for i in rule]\n else:\n raise RuntimeError(\"invalid item object: {}\".format(str(rule)))\n return self.the_items", "def clean_items(self):\n items = self.cleaned_data['items']\n if len(items) < 1:\n v_err('no_items')\n return items", "def _generate_native_quantity_list(self):\n\n return set(self._schema).union(self._native_filter_quantities)", "async def _get_stock_data(self, stocks: list):\n\t\tapi_url = 'https://sandbox.tradier.com/v1/markets/quotes'\n\t\tstocks = ','.join(stocks)\n\t\tif not stocks:\n\t\t\treturn []\n\t\ttoken = await self.bot.get_shared_api_tokens('stocks')\n\t\ttoken = token.get('key', None)\n\t\tif not token:\n\t\t\traise ValueError(\n\t\t\t\t'You need to set an API key!\\n'\n\t\t\t\t'Follow this guide for instructions on how to get one:\\n'\n\t\t\t\t'<https://github.com/Flame442/FlameCogs/blob/master/stocks/setup.md>'\n\t\t\t)\n\t\tparams = {'symbols': stocks}\n\t\theaders = {'Authorization': f'Bearer {token}', 'Accept': 'application/json'}\n\t\tasync with aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(api_url, params=params, headers=headers) as r:\n\t\t\t\ttry:\n\t\t\t\t\tr = await r.json()\n\t\t\t\texcept aiohttp.client_exceptions.ContentTypeError:\n\t\t\t\t\t#This might happen when being rate limited, but IDK for sure...\n\t\t\t\t\traise ValueError('Could not get stock data. The API key entered is most likely not valid.')\n\t\tr = r['quotes']\n\t\tif 'quote' not in r:\n\t\t\treturn []\n\t\tr = r['quote']\n\t\tif not isinstance(r, list):\n\t\t\tr = [r]\n\t\tstock = {\n\t\t\tx['symbol']: {\n\t\t\t\t'price': max(1, int(x['last'] * 100)),\n\t\t\t\t#New API does not give this info.\n\t\t\t\t'total_count': None, #int(x['marketCap'] / x['last']) if x['marketCap'] else None\n\t\t\t} for x in r if 'last' in x and x['last'] is not None\n\t\t}\n\t\treturn stock", "def calc_multi_special(self):\r\n\r\n for special_type in self.specials_data_list:\r\n for num in range(special_type.get('num_to_apply', 0)):\r\n discounted_amount = self.basket_item['product_price'] - (self.basket_item['product_price'] *\r\n (1 - special_type['special_discount_rate']))\r\n\r\n try:\r\n self.processed_basket_item_list[num]['specials'].append({'special_code': special_type['special_code'],\r\n 'special_discount': '{0:.2f}'.format(discounted_amount)})\r\n except IndexError:\r\n self.processed_basket_item_list.append({'product_code': self.basket_item['product_code'],\r\n 'product_price': self.basket_item['product_price'],\r\n 'specials': [{'special_code': special_type['special_code'],\r\n 'special_discount': '{0:.2f}'.format(discounted_amount)}]})\r\n\r\n for item in self.processed_basket_item_list:\r\n yield item", "def get_data_of_stocks(self):\n\n indexes_to_remove = []\n # Request data for each stock\n for index, stock in enumerate(self.stock_list):\n stock.get_soups()\n stock.find_data()\n stock.print_report()\n self.print_progress(index)", "def getQuantitys(self, quantityNames):\n selectedQuantities = []\n for quantityName in quantityNames:\n foundQuantities = [q for q in self.quantityList if q.name == quantityName]\n if len(foundQuantities) > 0:\n selectedQuantities.append(foundQuantities[0])\n return selectedQuantities", "def portfolio_checkinput(stock_ticker_list):\n if not isinstance(stock_ticker_list, list):\n raise InvalidTickerlist\n return 0", "def parts_demand(request):\n critical_part = []\n quantity = None\n bom_name = None\n if request.method == 'POST':\n form = PartsDemandForm(request.POST)\n if form.is_valid():\n bom_name = form.cleaned_data['bom']\n quantity = int(form.cleaned_data['quantity'])\n warehouse = form.cleaned_data['warehouse']\n warehouse_obj = Warehouse.objects.get(warehouse_name=warehouse)\n stock = calculate_stock(warehouse_obj)\n parts = get_bom_parts(bom_name)\n print(stock)\n for part in parts:\n part_qty = float(part['Qty'])\n part_name = part['PartName']\n part_number = part['PartNumber']\n if stock.get(part_name):\n av_stock = stock.get(part_name)['total_usable_stock']\n # print(av_stock, quantity, part_qty, quantity * part_qty)\n else:\n av_stock = 0\n critical = int(av_stock) - int(quantity * part_qty)\n if critical <= 0:\n test = {\n \"critical_qty\": critical,\n \"part_number\": part_number,\n \"part_name\": part_name\n }\n critical_part.append(test)\n else:\n form = PartsDemandForm()\n context = {\n 'form': form,\n 'critical_part': critical_part,\n 'quantity': quantity,\n 'bom': bom_name,\n }\n\n return render(request, 'inventory/parts_demand.html', context)", "def __init__(self):\n # note: We could have implemented the list as a dictionary, with\n # the barcode as the key, however if the barcode for the item\n # changes we might have problems.\n self.stocklist = [] # a list of stock items", "def validate(self, value):\n\n current_values = dict(self.queryset.values_list('id', 'quantity'))\n for product_id in self.product_fields.keys():\n self.product_fields[product_id]['quantity'] = current_values[product_id]\n\n errors = []\n for (product_id, product_data), chosen_value in zip(self.product_fields.items(), value):\n name = product_data['name']\n int_chosen_val = int(chosen_value)\n if product_data['quantity'] == 0:\n errors.append(\n ValidationError(self.error_messages['out_of_stock'].format(name))\n )\n continue\n if int_chosen_val <= 0:\n errors.append(\n ValidationError(self.error_messages['incorrect_quantity'].format(name))\n )\n continue\n\n if product_data['quantity'] < int_chosen_val:\n errors.append(\n ValidationError(self.error_messages['less_quantity'].format(product_data['quantity'], name))\n )\n continue\n\n if len(errors) > 0:\n raise ValidationError(errors)", "def stock_processor(id, price, title, remaining, totalPackCount, preorder, start, proxy, headers):\n\n r = request_pack_stock(proxy, headers)\n packs = r['data']['searchPackListings']['data']['searchSummary']['data']['data']\n\n for pack in packs:\n item = [pack['id'], pack['title'], pack['price'], pack['remaining'], pack['totalPackCount'], pack['preorder']]\n #print(f'\\n\\nITEM:{item}\\n\\n')\n if pack['remaining'] == remaining: #change back to !=\n # Checks if it already exists in our instock\n if checker(item):\n pass\n else:\n # Add to instock dict\n INSTOCK.append(item)\n print(f'\\n\\nINSTOCK:{INSTOCK}\\n\\n')\n # Send a notification to the discord webhook with the in-stock product\n if start == 0:\n print('Sending new Notification')\n print(item)\n discord_webhook(item)\n logging.info(msg='Successfully sent Discord notification')\n\n else:\n if checker(item):\n INSTOCK.remove(item)", "def clean_stock(self):\n stock = self.cleaned_data.get('stock')\n if stock == 0:\n raise forms.ValidationError(u'Please insert product quantity')\n return stock", "def __parseQuantifiers(self):\n line = self.__nextLine()\n \n while line[0] in (\"e\", \"a\"): \n parts = line.split()\n if len(parts) > 2:\n typ = parts[0]\n if self.__lastQType == None:\n self.__lastQType = typ\n \n elif self.__lastQType == typ:\n self.__log(\"Not changing quantifiers\", \"SEVERE\")\n return False \n else:\n self.__lastQType = typ\n if parts[-1] == \"0\":\n variables = [Variable(x) for x in parts[1:-1] ]\n q = QuantifierList(typ, variables)\n self.__quantifierList.append(q)\n else:\n self.__log(\"Quantifier line not terminated with 0\",\"SEVERE\")\n return False\n else:\n self.__log(\"Quantifier line too short: %s\" % line, \"SEVERE\")\n return False\n \n line = self.__nextLine()\n self.__pushBackLine()\n if self.__lastQType == \"e\":\n return True\n else:\n self.__log(\"Not ending with e quantifier\")\n return False", "def calculate_prices(self, good=None):\n\n stock = self.calculate_init_stock(good)\n buy = self.buying_price()\n\n if stock == 0:\n sell = 0\n buy = buy + (buy * 0.5)\n\n elif stock < 500:\n # mild bug: stock, without selling price\n sell = self.selling_price()\n elif stock >= 500:\n # higher production, lower prices\n sell = self.selling_price() / 2\n buy = buy - (buy * 0.5)\n\n return [buy, sell, stock]", "def req_items_for_store(inventory_store_id, quantity_type):\r\n \r\n req_items = db( ( db.logs_req.inventory_store_id == inventory_store_id ) & \\\r\n ( db.logs_req.id == db.logs_req_item.logs_req_id) & \\\r\n ( db.logs_req_item.item_packet_id == db.logs_req_item.item_packet_id) & \\\r\n ( db.logs_req_item[\"quantity_%s\" % quantity_type] < db.logs_req_item.quantity) & \\\r\n ( db.logs_req_item.deleted == False ) \r\n ).select(db.logs_req_item.id,\r\n db.logs_req_item.logs_req_id,\r\n db.logs_req_item.item_id,\r\n db.logs_req_item.quantity,\r\n db.logs_req_item[\"quantity_%s\" % quantity_type],\r\n db.logs_req_item.item_packet_id,\r\n orderby = db.logs_req.date_required | db.logs_req.date, \r\n #groupby = db.logs_req_item.item_id\r\n ) \r\n \r\n # Because groupby doesn't follow the orderby remove any duplicate req_item \r\n # req_items = req_items.as_dict( key = \"logs_req_item.item_id\") <- doensn't work \r\n # @todo: Rows.as_dict function could be extended to enable this functionality instead \r\n req_item_ids = []\r\n unique_req_items = Storage()\r\n for req_item in req_items:\r\n if req_item.item_id not in req_item_ids:\r\n #This item is not already in the dict \r\n unique_req_items[req_item.item_id] = Storage( req_item.as_dict() )\r\n req_item_ids.append(req_item.item_id) \r\n \r\n return unique_req_items", "def create_order_items(self, order_items_list):\n\n item_obj_list =[]\n\n new_item = {}\n \n for item in order_items_list: # loop through items in the list\n \n new_item['item_uid'] = item[\"Order Item Item Uid\"]\n\n # cast quantity to integer and assign\n item_quantity_str = item[\"Order Item Quantity\"]\n if str.isdigit(item_quantity_str):\n new_item['item_quantity'] = int(item_quantity_str)\n else:\n new_item['item_quantity'] = 0\n \n new_item['item_product_id'] = item[\"Order Item Product Id\"]\n new_item['item_product_type'] = item[\"Order Item Product Type\"]\n new_item['item_product_title'] = item[\"Order Item Product Title\"]\n \n # cast return_days to integer and assign\n item_return_days_str = item[\"Order Item Return Days\"]\n if str.isdigit(item_return_days_str):\n new_item['item_return_days'] = int(item_return_days_str)\n else:\n new_item['item_return_days'] = 0\n\n # cast exchnage_days to integer and assign\n item_exchange_days_str = item[\"Order Item Exchange Days\"]\n if str.isdigit(item_exchange_days_str):\n new_item['item_exchange_days'] = int(item_exchange_days_str)\n else:\n new_item['item_exchange_days'] = 0\n\n # item product price\n try:\n new_item['item_product_price'] = Decimal(item['Order Item Product Price'])\n except:\n new_item['item_product_price'] = 0.0\n\n # item basic price\n try:\n new_item['item_basic_price'] = Decimal(item['Order Item Basic Price'])\n except:\n new_item['item_basic_price'] = 0.0\n \n # discount amount\n try:\n new_item['item_discount_amount'] = Decimal(item['Order Item Discount Amount'])\n except:\n new_item['item_discount_amount'] = 0.0\n\n # tax amount\n try:\n new_item['item_tax_amount'] = Decimal(item['Order Item Tax Amount'])\n except:\n new_item['item_tax_amount'] = 0.0\n\n try:\n new_item['item_sub_total'] = Decimal(item['Order Item Sub Total'])\n except:\n new_item['item_sub_total'] = 0.0\n\n #********\n new_item['seller'] = item['seller']\n \n\n item_obj_list.append(copy.deepcopy(new_item))\n new_item.clear()\n\n return item_obj_list", "def pars_input_for_eval(self, input_params:str)->List[str]:\n # buy sell composed eval code\n output: List[str] = [\"\", \"\"]\n bs_params: List[str] = [\"\", \"\"]\n\n # split buy sell logic \n buy_sell_params = input_params.split(\",\")\n\n # set code parts\n method_preposition = \"CheckIndicators.check_\"\n \n bs_params[0] = \"(stocks=self.bt_stocks.iloc[0:val],buy=True{params})\"\n bs_params[1] = bs_params[0].replace(\"True\", \"False\")\n\n # split commands like OR AND and checker names\n valid_eval_buy:List[str] = buy_sell_params[0].split(\"-\")\n valid_eval_sell: List[str] = buy_sell_params[1].split(\"-\") if len(buy_sell_params)>1 else []\n\n # spliter for each function parameters\n param_spliter = \"?\"\n # buy and sell aplittedd args iteration\n for i in range(len(buy_sell_params)):\n valid_eval_bs:List[str] = buy_sell_params[i].split(\"-\")\n # iterate over buy and sell variants\n for item in valid_eval_bs:\n if item.lower() == \"or\" or item.lower() == \"and\":\n output[i] = f\"{output[i]} {item} \"\n else:\n fce_fields = item.split(param_spliter)\n # check if input params are numeric or not\n if len(fce_fields) > 1:\n fce_fields[1] = fce_fields[1] if fce_fields[1].isnumeric() else f\"'{fce_fields[1]}'\"\n params = f\", params={fce_fields[1]}\" if len(fce_fields)>1 else \"\"\n bs_params[i] = bs_params[i].format(params=params)\n # check if its number or string\n output[i] = f\"{output[i]}{method_preposition}{fce_fields[0]}{bs_params[i]}\"\n \n # print(output) \n if len(buy_sell_params) < 2:\n output[1] = output[0].replace(\"True\", \"False\")\n\n return output", "def input_materials(type_id: int, quantity: int, me: int = 0, prod_type: str = 'manufacturing', prices: bool = False) -> pd.DataFrame:\n actID = {'manufacturing': 1, 'reaction': 11}\n mats = pd.DataFrame(columns=['type_id', 'type_name', 'quantity'])\n if type_id in quantities.loc[(quantities['activityID'] == actID[prod_type]) & (quantities['productTypeID'] == type_id)].values: # item can be manufactured\n\n if prod_type == 'reaction':\n bpid = productToFormula(type_id)\n\n elif prod_type == 'manufacturing':\n bpid = productToBP(type_id)\n \n else: \n raise ValueError(\"thats not a valid manufacturing type. options are 'reaction', 'manufacturing'\")\n\n qPerRun = quantPerRun(bpid)\n runs = quantity // qPerRun\n\n if runs > 0:\n for _, row in materials.loc[(materials['activityID'] == actID[prod_type]) & (materials['typeID'] == bpid)].iterrows():\n\n if prod_type == 'reaction':\n quant = row['quantity'] * runs\n\n elif prod_type == 'manufacturing':\n quant = me_formula(row['quantity'],me) * runs\n\n mats = mats.append({'type_id': row['materialTypeID'], 'type_name': emt.typeIDToName(row['materialTypeID']), 'quantity': quant}, ignore_index=True)\n \n \n # buys the product instead of manufacturing more of it than needed\n if int(runs * qPerRun) < int(quantity):\n mats = mats.append({'type_id': type_id, 'type_name': emt.typeIDToName(type_id), 'quantity': quantity - (runs * qPerRun)},ignore_index=True)\n \n mats = mats.groupby(['type_id', 'type_name']).sum().reset_index()\n mats = mats.astype({\"type_id\": int, \"quantity\": int})\n\n if prices:\n mats = emt.add_price(mats)\n\n return mats", "def test_add_stock_item(self):\n pass", "def get_basket_items_pricedrop(self, offer_info, actual_volume, product_prices):\n prod_code = offer_info.base_prod_code\n base_prod_vol = actual_volume.get(prod_code.lower())\n\n pricedrop_basket = []\n\n if base_prod_vol >= offer_info.min_vol:\n offer_on_prod = offer_info.offer_on\n if actual_volume.get(offer_on_prod.lower()):\n print(\n f\"Base product volume is greater than minimum required volume & product on offer is also available \"\n f\"in cart..\")\n if offer_info.is_limited:\n print(f\"Limited offer..\")\n if prod_code == offer_on_prod:\n # total_allowed_items_on_offer = Limit Volume of base product * (Offer Product Max Volume/Minimum volume of base product)\n total_allowed_items_on_offer = offer_info.limit_vol * (\n offer_info.offer_prod_volume / offer_info.min_vol)\n max_limit = 1\n for i in range(0, base_prod_vol):\n base_prod_actual_price = product_prices.get(prod_code.lower()).get('price')\n pricedrop_basket.append((prod_code, base_prod_actual_price))\n while max_limit <= total_allowed_items_on_offer:\n new_price = (base_prod_actual_price - (offer_info.new_price)) * -1\n pricedrop_basket.append((offer_info.offer_code, new_price))\n max_limit += 1\n else:\n total_allowed_items_on_offer = offer_info.limit_vol * (\n offer_info.offer_prod_volume / offer_info.min_vol)\n for i in range(0, base_prod_vol):\n base_prod_actual_price = product_prices.get(prod_code.lower()).get('price')\n pricedrop_basket.append((prod_code, base_prod_actual_price))\n max_limit = 1\n while max_limit <= total_allowed_items_on_offer:\n offer_onprod_actual_price = product_prices.get(offer_on_prod.lower()).get('price')\n new_price = (base_prod_actual_price - (offer_info.new_price)) * -1\n for j in range(0, actual_volume.get(offer_on_prod).lower()):\n pricedrop_basket.append((offer_on_prod, offer_onprod_actual_price))\n pricedrop_basket.append((offer_info.offer_code, new_price))\n max_limit += 1\n else:\n print(f\"Unlimited offer..\")\n if prod_code == offer_on_prod:\n for i in range(0, base_prod_vol):\n base_prod_actual_price = product_prices.get(prod_code.lower()).get('price')\n pricedrop_basket.append((prod_code, base_prod_actual_price))\n new_price = (base_prod_actual_price - (offer_info.new_price))*-1\n pricedrop_basket.append((offer_info.offer_code, new_price))\n else:\n for i in range(0, base_prod_vol):\n base_prod_actual_price = product_prices.get(prod_code.lower()).get('price')\n pricedrop_basket.append((prod_code, base_prod_actual_price))\n\n offer_onprod_actual_price = product_prices.get(offer_on_prod.lower()).get('price')\n new_price = (offer_onprod_actual_price - (offer_info.new_price)) * -1\n\n for j in range(0, actual_volume.get(offer_on_prod).lower()):\n pricedrop_basket.append((offer_on_prod, offer_onprod_actual_price))\n pricedrop_basket.append((offer_info.offer_code, new_price))\n\n return pricedrop_basket", "def get_validated_pairs_data(self):\n validated_pairs_prices = []\n for pair in self.__validated_pairs:\n stock1 = pair[0]\n stock2 = pair[1]\n validated_pairs_prices.append(self.__price_data[stock1])\n validated_pairs_prices.append(self.__price_data[stock2])\n\n validated_pairs_prices = pd.DataFrame(validated_pairs_prices).T\n\n return validated_pairs_prices", "def getQuantityList(self, infoFilePath):\n quList = []\n with open(infoFilePath, 'r')as infoFile:\n line = infoFile.readline()\n while line != '':\n if 'File.At.' in line:\n name = line.split(' ', 2)[2].rstrip()\n type = infoFile.readline().split(' ', 2)[2].rstrip()\n quList.append(Quantity(name, type))\n line = infoFile.readline()\n return quList", "def _get_consumption_issues(self):\n issues = []\n if self.env.context.get('skip_consumption', False) or self.env.context.get('skip_immediate', False):\n return issues\n for order in self:\n if order.consumption == 'flexible' or not order.bom_id or not order.bom_id.bom_line_ids:\n continue\n expected_move_values = order._get_moves_raw_values()\n expected_qty_by_product = defaultdict(float)\n for move_values in expected_move_values:\n move_product = self.env['product.product'].browse(move_values['product_id'])\n move_uom = self.env['uom.uom'].browse(move_values['product_uom'])\n move_product_qty = move_uom._compute_quantity(move_values['product_uom_qty'], move_product.uom_id)\n expected_qty_by_product[move_product] += move_product_qty * order.qty_producing / order.product_qty\n\n done_qty_by_product = defaultdict(float)\n for move in order.move_raw_ids:\n qty_done = move.product_uom._compute_quantity(move.quantity_done, move.product_id.uom_id)\n rounding = move.product_id.uom_id.rounding\n if not (move.product_id in expected_qty_by_product or float_is_zero(qty_done, precision_rounding=rounding)):\n issues.append((order, move.product_id, qty_done, 0.0))\n continue\n done_qty_by_product[move.product_id] += qty_done\n\n for product, qty_to_consume in expected_qty_by_product.items():\n qty_done = done_qty_by_product.get(product, 0.0)\n if float_compare(qty_to_consume, qty_done, precision_rounding=product.uom_id.rounding) != 0:\n issues.append((order, product, qty_done, qty_to_consume))\n\n return issues", "def __init__(self):\n self.quantityList = []", "def item_to_ids(items, user):\r\n sizes = [\"10\", \"12\", \"14\", \"16\", \"25\", \"30\", \"35\", \"40\"]\r\n if not items:\r\n return []\r\n ids = []\r\n names_to_id_product = get_names_preconfigured(user)\r\n for item in items:\r\n for name, product_id in names_to_id_product.items():\r\n # CLEAN TO REMOVE SMALL MEDIUM LARGE, AND STRIP\r\n item = item.strip()\r\n for size in sizes:\r\n if size in item:\r\n if size == \"10\" or size == \"25\":\r\n replace = \"Small\"\r\n elif size == \"12\" or size == \"30\":\r\n replace = \"Medium\"\r\n elif size == \"14\" or size == \"35\":\r\n replace = \"Large\"\r\n elif size == \"16\" or size == \"40\":\r\n replace = \"X-Large\"\r\n item = item.replace(size + '\"', replace).replace(size + \"'\", replace)\r\n # print(item, \" | \", name, editDistanceDP(item, name, len(item), len(name)) / (len(name)))\r\n if edit_distance_dp(item, name, len(item), len(name)) / (len(name)) < .3 or edit_distance_dp(\r\n item.replace(\"Pizza\", \"\"), name.replace(\"Dipping \", \"\"), len(item.replace(\"Pizza\", \"\")),\r\n len(name.replace(\"Dipping \", \"\"))) / (len(name)) < .1:\r\n ids.append(product_id)\r\n break\r\n final_ids = []\r\n for id in ids:\r\n if \"F_\" in id:\r\n variants = ids_to_variants(user)\r\n replace = variants[id][0]\r\n if replace == \"STJUDE\":\r\n replace = \"STJUDE10\"\r\n final_ids.append(replace)\r\n else:\r\n final_ids.append(id)\r\n return final_ids\r\n # order.add_item('P12IPAZA') # add a 12-inch pan pizza\r\n # order.add_item('MARINARA') # with an extra marinara cup\r\n # order.add_item('20BCOKE') # and a 20oz bottle of coke\r\n return ['P12IPAZA', 'MARINARA', '20BCOKE']", "def choose_inventory() -> list:\r\n print(\"What weapon would you like to start with? Enter the corresponding number\\n(1) Blaster Pistol\\n\"\r\n \"(2) Blaster Rifle\\n(3) Assault Cannon\\n(4) Sniper Rifle\\n\")\r\n item_list = [\"Blaster Pistol\", \"Blaster Rifle\", \"Assault Cannon\", \"Sniper Rifle\"]\r\n user_input = str(input())\r\n if user_input == \"1\":\r\n return [item_list[0]]\r\n elif user_input == \"2\":\r\n return [item_list[1]]\r\n elif user_input == \"3\":\r\n return [item_list[2]]\r\n elif user_input == \"4\":\r\n return [item_list[3]]\r\n else:\r\n print(\"Please enter a valid item number\")\r\n choose_inventory()", "def __init__(self, item_id=None, product_id=None, stock_id=None, qty=None, is_in_stock=None, is_qty_decimal=None, show_default_notification_message=None, use_config_min_qty=None, min_qty=None, use_config_min_sale_qty=None, min_sale_qty=None, use_config_max_sale_qty=None, max_sale_qty=None, use_config_backorders=None, backorders=None, use_config_notify_stock_qty=None, notify_stock_qty=None, use_config_qty_increments=None, qty_increments=None, use_config_enable_qty_inc=None, enable_qty_increments=None, use_config_manage_stock=None, manage_stock=None, low_stock_date=None, is_decimal_divided=None, stock_status_changed_auto=None):\n self.swagger_types = {\n 'item_id': 'int',\n 'product_id': 'int',\n 'stock_id': 'int',\n 'qty': 'float',\n 'is_in_stock': 'bool',\n 'is_qty_decimal': 'bool',\n 'show_default_notification_message': 'bool',\n 'use_config_min_qty': 'bool',\n 'min_qty': 'float',\n 'use_config_min_sale_qty': 'int',\n 'min_sale_qty': 'float',\n 'use_config_max_sale_qty': 'bool',\n 'max_sale_qty': 'float',\n 'use_config_backorders': 'bool',\n 'backorders': 'int',\n 'use_config_notify_stock_qty': 'bool',\n 'notify_stock_qty': 'float',\n 'use_config_qty_increments': 'bool',\n 'qty_increments': 'float',\n 'use_config_enable_qty_inc': 'bool',\n 'enable_qty_increments': 'bool',\n 'use_config_manage_stock': 'bool',\n 'manage_stock': 'bool',\n 'low_stock_date': 'str',\n 'is_decimal_divided': 'bool',\n 'stock_status_changed_auto': 'int'\n }\n\n self.attribute_map = {\n 'item_id': 'item_id',\n 'product_id': 'product_id',\n 'stock_id': 'stock_id',\n 'qty': 'qty',\n 'is_in_stock': 'is_in_stock',\n 'is_qty_decimal': 'is_qty_decimal',\n 'show_default_notification_message': 'show_default_notification_message',\n 'use_config_min_qty': 'use_config_min_qty',\n 'min_qty': 'min_qty',\n 'use_config_min_sale_qty': 'use_config_min_sale_qty',\n 'min_sale_qty': 'min_sale_qty',\n 'use_config_max_sale_qty': 'use_config_max_sale_qty',\n 'max_sale_qty': 'max_sale_qty',\n 'use_config_backorders': 'use_config_backorders',\n 'backorders': 'backorders',\n 'use_config_notify_stock_qty': 'use_config_notify_stock_qty',\n 'notify_stock_qty': 'notify_stock_qty',\n 'use_config_qty_increments': 'use_config_qty_increments',\n 'qty_increments': 'qty_increments',\n 'use_config_enable_qty_inc': 'use_config_enable_qty_inc',\n 'enable_qty_increments': 'enable_qty_increments',\n 'use_config_manage_stock': 'use_config_manage_stock',\n 'manage_stock': 'manage_stock',\n 'low_stock_date': 'low_stock_date',\n 'is_decimal_divided': 'is_decimal_divided',\n 'stock_status_changed_auto': 'stock_status_changed_auto'\n }\n\n self._item_id = item_id\n self._product_id = product_id\n self._stock_id = stock_id\n self._qty = qty\n self._is_in_stock = is_in_stock\n self._is_qty_decimal = is_qty_decimal\n self._show_default_notification_message = show_default_notification_message\n self._use_config_min_qty = use_config_min_qty\n self._min_qty = min_qty\n self._use_config_min_sale_qty = use_config_min_sale_qty\n self._min_sale_qty = min_sale_qty\n self._use_config_max_sale_qty = use_config_max_sale_qty\n self._max_sale_qty = max_sale_qty\n self._use_config_backorders = use_config_backorders\n self._backorders = backorders\n self._use_config_notify_stock_qty = use_config_notify_stock_qty\n self._notify_stock_qty = notify_stock_qty\n self._use_config_qty_increments = use_config_qty_increments\n self._qty_increments = qty_increments\n self._use_config_enable_qty_inc = use_config_enable_qty_inc\n self._enable_qty_increments = enable_qty_increments\n self._use_config_manage_stock = use_config_manage_stock\n self._manage_stock = manage_stock\n self._low_stock_date = low_stock_date\n self._is_decimal_divided = is_decimal_divided\n self._stock_status_changed_auto = stock_status_changed_auto", "def update(self):\n inventoryJson = self.__agent__.getInventoryJson()\n itemsLeft = len(inventoryJson) != 0\n itemTypesInObservation = []\n itemsAdded = []\n itemsDeleted = []\n\n # Loop over all item types in the observation\n while (itemsLeft):\n itemType = inventoryJson[0][\"type\"]\n itemTypesInObservation.append(itemType)\n numOfItemInObs = inventoryJson[0][\"quantity\"]\n\n if itemType not in self.__inventory__: # Add an array of ids for this item type if it was never discovered\n self.__inventory__[itemType] = []\n numOfItemInInv = len(self.__inventory__[itemType])\n\n for i in range(1, len(inventoryJson)): # Loop over remaining items, and for each item of matching type, add to counter\n if inventoryJson[i][\"type\"] == itemType:\n numOfItemInObs += inventoryJson[i][\"quantity\"]\n inventoryJson = [item for item in inventoryJson if item[\"type\"] != itemType] # Remove all of those inventory items\n \n if numOfItemInObs > numOfItemInInv: # Add more items with unique id of this type to inventory\n for i in range(numOfItemInInv, numOfItemInObs):\n newItem = self.addItem(itemType)\n itemsAdded.append(newItem)\n elif numOfItemInObs < numOfItemInInv: # Remove some items of this type from inventory\n for i in range(numOfItemInObs, numOfItemInInv):\n if len(self.__inventory__[itemType]) > 0:\n lostItem = self.__inventory__[itemType].pop(0)\n itemsDeleted.append(lostItem)\n\n # Only perform another iteration if there are more items of different types that we have not yet checked\n if len(inventoryJson) == 0:\n itemsLeft = False\n \n # For any items in the inventory that was not in the observation, set the quantity to 0\n for itemType in self.__inventory__:\n if itemType not in itemTypesInObservation:\n self.__inventory__[itemType].clear()\n\n return (itemsAdded, itemsDeleted)", "def getDescriptionList(self, list_stockCode=None):\n df = self._df_invoice_original\n\n list_description = list()\n if list_stockCode is None :\n list_description = list(df.Description.unique())\n else:\n for stockCode in list_stockCode:\n description = df[df.StockCode==stockCode].Description.unique()[0]\n list_description.append(description)\n \n return list_description", "def filter_my_data(stock_status):\n try:\n data = get_data()\n filtered_data = [item for item in data if item[0] == stock_status]\n return filtered_data\n except Exception:\n return []", "def get_all_stocks():\n url = r\"https://brapi.ga/api/quote/list\"\n response = requests.get(url)\n return [stock[\"stock\"] for stock in response.json()[\"stocks\"]]", "def checkQAPIreq(alist):\n\n blist = []\n for entry in alist:\n blist.append(entry)\n for i in range(len(alist)):\n fitsKeyWords = ['RAWPIREQ', 'RAWGEMQA']\n headerList = getFitsHeader(alist[i], fitsKeyWords)\n rawPIreq = headerList[1]\n rawGemQA = headerList[2]\n if rawPIreq in [\"YES\",\"UNKNOWN\"] and rawGemQA in [\"USABLE\",\"UNKNOWN\"]:\n logging.info(alist[i]+' added for processing')\n else:\n logging.info(alist[i]+' excluded, set to USABLE/FAIL')\n blist.remove(alist[i])\n\n return blist", "def inp_item_price(self) -> List[str]:\n \n return [str(input(\"Enter desired price for item: \"))]", "def __init__(self, basket_item, specials_data_list):\r\n\r\n self.basket_item = basket_item\r\n self.specials_data_list = specials_data_list\r\n\r\n self.processed_basket_item_list = []", "def get_basket_items_discount(self, offer_info, actual_volume, product_prices):\n prod_code = offer_info.base_prod_code\n base_prod_vol = actual_volume.get(prod_code.lower())\n\n discount_basket = []\n\n if base_prod_vol >= offer_info.min_vol:\n offer_on_prod = offer_info.offer_on\n if actual_volume.get(offer_on_prod.lower()):\n print(f\"Base product volume is greater than minimum required volume & product on offer is also available \"\n f\"in cart..\")\n if offer_info.is_limited:\n print(f\"Limited offer..\")\n if prod_code == offer_on_prod:\n # total_allowed_items_on_offer = Limit Volume of base product * (Offer Product Max Volume/Minimum volume of base product)\n total_allowed_items_on_offer = offer_info.limit_vol * (offer_info.offer_prod_volume/offer_info.min_vol)\n max_limit = 1\n for i in range(0, base_prod_vol):\n base_prod_actual_price = product_prices.get(prod_code.lower()).get('price')\n discount_basket.append((prod_code, base_prod_actual_price))\n while max_limit <= total_allowed_items_on_offer:\n discounted_price = (base_prod_actual_price *(offer_info.discount_perc/100))*-1\n discount_basket.append((offer_info.offer_code, discounted_price))\n max_limit += 1\n else:\n total_allowed_items_on_offer = offer_info.limit_vol * (offer_info.offer_prod_volume / offer_info.min_vol)\n for i in range(0, base_prod_vol):\n base_prod_actual_price = product_prices.get(prod_code.lower()).get('price')\n discount_basket.append((prod_code, base_prod_actual_price))\n max_limit = 1\n while max_limit <= total_allowed_items_on_offer:\n offer_onprod_actual_price = product_prices.get(offer_on_prod.lower()).get('price')\n discounted_price = (offer_onprod_actual_price *(offer_info.discount_perc/100))*-1\n for j in range(0, actual_volume.get(offer_on_prod.lower())):\n discount_basket.append((offer_on_prod, offer_onprod_actual_price))\n discount_basket.append((offer_info.offer_code, discounted_price))\n max_limit += 1\n else:\n print(f\"Unlimited offer..\")\n if prod_code == offer_on_prod:\n if base_prod_vol > offer_info.min_vol:\n for i in range(0, base_prod_vol):\n base_prod_actual_price = product_prices.get(prod_code.lower()).get('price')\n discount_basket.append((prod_code, base_prod_actual_price))\n if i%2 != 0:\n discounted_price = (base_prod_actual_price *(offer_info.discount_perc/100))*-1\n discount_basket.append((offer_info.offer_code, discounted_price))\n else:\n for i in range(0, base_prod_vol):\n base_prod_actual_price = product_prices.get(prod_code.lower()).get('price')\n discount_basket.append((prod_code, base_prod_actual_price))\n else:\n for i in range(0, base_prod_vol):\n base_prod_actual_price = product_prices.get(prod_code.lower()).get('price')\n discount_basket.append((prod_code, base_prod_actual_price))\n\n offer_onprod_actual_price = product_prices.get(offer_on_prod.lower()).get('price')\n discounted_price = (offer_onprod_actual_price * (offer_info.discount_perc / 100))*-1\n\n for j in range(0, actual_volume.get(offer_on_prod.lower())):\n discount_basket.append((offer_on_prod, offer_onprod_actual_price))\n discount_basket.append((offer_info.offer_code, discounted_price))\n\n\n return discount_basket", "def get_com_data_fr_all_stocks(self):\n full_list = self.replace_special_characters_in_list(self.full_stocklist_to_retrieve)\n chunk_of_list = self.break_list_to_sub_list(self.full_stocklist_to_retrieve)\n \n self.temp_full_data_df = None\n for n in chunk_of_list:\n # print the progress\n sys.stdout.write('.')\n\n # set the small chunk of list\n self.set_target_stocks_list(n)\n self.get_com_data()\n\n # convert to dataframe\n self.com_data_allstock_df = pandas.DataFrame(self.com_data_allstock_list)\n self.com_data_allstock_df.rename(columns ={'symbol':'SYMBOL'}, inplace=True)\n \n print 'Done\\n'", "def position_list_query(self, strcode='', stocktype='', pl_ratio_min='',\n pl_ratio_max='', envtype=0):\n if not TRADE_CN.check_envtype_cn(envtype):\n error_str = ERROR_STR_PREFIX + \"the type of environment param is wrong \"\n return RET_ERROR, error_str\n\n stock_code = ''\n if strcode != '':\n ret_code, content = split_stock_str(str(strcode))\n if ret_code == RET_ERROR:\n return RET_ERROR, content\n _, stock_code = content\n\n query_processor = self._get_sync_query_processor(PositionListQueryCN.cn_pack_req,\n PositionListQueryCN.cn_unpack_rsp)\n\n # the keys of kargs should be corresponding to the actual function arguments\n kargs = {'cookie': str(self.cookie),\n 'strcode': str(stock_code),\n 'stocktype': str(stocktype),\n 'pl_ratio_min': str(pl_ratio_min),\n 'pl_ratio_max': str(pl_ratio_max),\n 'envtype': str(envtype)}\n ret_code, msg, position_list = query_processor(**kargs)\n\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n col_list = [\"code\", \"stock_name\", \"qty\", \"can_sell_qty\", \"cost_price\",\n \"cost_price_valid\", \"market_val\", \"nominal_price\", \"pl_ratio\",\n \"pl_ratio_valid\", \"pl_val\", \"pl_val_valid\", \"today_buy_qty\",\n \"today_buy_val\", \"today_pl_val\", \"today_sell_qty\", \"today_sell_val\"]\n\n position_list_table = pd.DataFrame(position_list, columns=col_list)\n\n return RET_OK, position_list_table", "def _get_package_items(self):\r\n mask = \"mask[description,capacity,prices.id,categories[name,id]]\"\r\n package = self.client['Product_Package']\r\n return package.getItems(id=46, mask=mask)", "def check_stock_type(self, instance, product_ids):\n prod_obj = self.env['product.product']\n warehouse = instance.woo_warehouse_id\n products_stock = False\n if product_ids:\n if instance.woo_stock_field.name == 'free_qty':\n products_stock = prod_obj.get_qty_on_hand_ept(warehouse, product_ids.ids)\n elif instance.woo_stock_field.name == 'virtual_available':\n products_stock = prod_obj.get_forecasted_qty_ept(warehouse, product_ids.ids)\n return products_stock", "def get_inputs(self, scope: str) -> List[q.Quantity]:\n\n inputs: List[q.Quantity] = []\n log.debug(\"Getting inputs for {}\".format(self.name))\n for subproducer in self.producers[scope]:\n log.debug(\" --> {} {}\".format(subproducer, subproducer.get_inputs(scope)))\n inputs.extend(subproducer.get_inputs(scope))\n return inputs", "def populate_discard(self):\n with Transaction().start(DBNAME, 1):\n inventory = self.Inventory.search([])\n today = date.today()\n lines = []\n for j in inventory:\n for i in j.lines:\n expiry = i.expiry_date\n if expiry:\n if expiry < today:\n if i.quantity <= 0:\n continue\n dictionary = {}\n dictionary['code'] = i.product.code\n dictionary['item'] = i.product.template.name\n dictionary['category'] = i.product.template.category.name\n dictionary['unit'] = i.uom.name\n dictionary['quantity'] = i.quantity\n dictionary['batch_number'] = j.batch_number\n lines.append(dictionary)\n return lines", "def parse_US_Large_Cap():\n stocks = set()\n industries = defaultdict(set)\n with open('US_Large_Cap.txt') as f:\n\n for line in f:\n\n industry_match = re.match(r'.*--\\s*(.*)', line)\n\n if industry_match:\n ind = industry_match.group(1)\n\n stock_match = re.match(r'.*(WIKI/\\S*)', line)\n\n if stock_match:\n s = stock_match.group(1)\n industries[ind].add(s)\n stocks.add(s)\n\n print(\"Distinct Industries = {}\".format(len(industries)))\n print(\"Distinct Stocks = {}\".format(len(stocks)))\n\n quandl_wiki_stocks = set()\n with open('WIKI-datasets-codes.csv') as f:\n for line in f:\n stock_match = re.match(r'.*(WIKI/[^,]*)', line)\n if stock_match:\n s = stock_match.group(1)\n quandl_wiki_stocks.add(s)\n print(\"Distinct Quandl Wiki Stocks = {}\".format(len(quandl_wiki_stocks)))\n\n # remove stocks not in quandl_wiki_stocks\n for v in industries.values():\n v &= quandl_wiki_stocks\n\n return industries", "def prepare_optimization(items,schedule,df_pred):\n itemblocks_to_produce = schedule[itemnames()].sum(0).to_dict()\n blocks_available = schedule.blockid.unique()\n block_order = pd.unique(schedule.blockid)\n forecasted_block_prices = df_pred['forecasted_price'].to_dict()\n actual_block_prices = df_pred['price'].to_dict()\n item_consumptions = items.set_index('item').consumption.to_dict()\n return(itemblocks_to_produce,blocks_available,forecasted_block_prices,\n actual_block_prices,item_consumptions,block_order)", "def validate_product_quantity(item, qty):\n return True", "def get_basket_with_discounts(self, actual_volume, applicable_discounts, residual):\n\n # product_vs_min_volume, product_offer = self.get_product_vs_offer_and_volume(self.discounts)\n pr = Product('items')\n product_prices = pr.product_meta\n\n basket = []\n\n for discount in applicable_discounts:\n offer_info = Offer('discount', discount)\n\n if offer_info.offer_type.lower() == \"discount\":\n temp_basket = self.get_basket_items_discount(offer_info, actual_volume, product_prices)\n for item in temp_basket:\n basket.append(item)\n elif offer_info.offer_type.lower() == \"pricedrop\":\n temp_basket = self.get_basket_items_pricedrop(offer_info, actual_volume, product_prices)\n for item in temp_basket:\n basket.append(item)\n\n if residual:\n for product, volume in residual.items():\n for i in range(0, volume):\n basket.append((product, product_prices.get(product.lower()).get('price')))\n\n return basket", "def getStockData():\n pass", "def _execute(self) -> Tuple[List[PopularItemsOutput]]:\n # Get order table joined with customer table\n order_customer_query = (\n Order.select(\n Order.id.alias(\"order_id\"),\n Order.district_id.alias(\"district_id\"),\n Order.warehouse_id.alias(\"warehouse_id\"),\n Order.entry_date.alias(\"entry_date\"),\n Customer.middle_name.alias(\"middle_name\"),\n Customer.first_name.alias(\"first_name\"),\n Customer.last_name.alias(\"last_name\"),\n )\n .join(\n Customer,\n on=(\n (Order.warehouse_id == Customer.warehouse_id)\n & (Order.district_id == Customer.district_id)\n & (Order.customer_id == Customer.id)\n ),\n )\n .where(\n (Order.warehouse_id == self.warehouse_id)\n & (Order.district_id == self.district_id)\n )\n .order_by(Order.entry_date.desc())\n .limit(self.orders_to_examine)\n .cte(\"order_customer_query\")\n )\n\n # Get order lines with maximum quantity, joined with item table\n OrderLineInner: OrderLine = OrderLine.alias()\n order_line_sum_qty_query = (\n OrderLineInner.select(\n OrderLineInner.warehouse_id.alias(\"warehouse_id\"),\n OrderLineInner.district_id.alias(\"district_id\"),\n OrderLineInner.order_id.alias(\"order_id\"),\n fn.SUM(OrderLineInner.quantity).alias(\"sum_qty\"),\n )\n .where(\n (OrderLineInner.warehouse_id == self.warehouse_id)\n & (OrderLineInner.district_id == self.district_id)\n )\n .group_by(\n OrderLineInner.warehouse_id,\n OrderLineInner.district_id,\n OrderLineInner.order_id,\n OrderLineInner.item_id,\n )\n .cte(\"order_line_sum_qty_query\")\n )\n order_line_max_qty_query = (\n order_line_sum_qty_query.select(\n order_line_sum_qty_query.c.order_id,\n fn.MAX(order_line_sum_qty_query.c.sum_qty),\n )\n .group_by(\n order_line_sum_qty_query.c.warehouse_id,\n order_line_sum_qty_query.c.district_id,\n order_line_sum_qty_query.c.order_id,\n )\n .with_cte(order_line_sum_qty_query)\n )\n\n customer_name_field = Case(\n None,\n (\n (\n order_customer_query.c.middle_name.is_null(),\n fn.CONCAT(\n order_customer_query.c.first_name,\n \" \",\n order_customer_query.c.last_name,\n ),\n ),\n ),\n fn.CONCAT(\n order_customer_query.c.first_name,\n order_customer_query.c.middle_name,\n order_customer_query.c.last_name,\n ),\n ).alias(\"customer_name\")\n\n popular_items_query = (\n OrderLine.select(\n order_customer_query.c.order_id,\n order_customer_query.c.entry_date,\n customer_name_field,\n fn.SUM(OrderLine.quantity).alias(\"quantity\"),\n Item.id.alias(\"item_id\"),\n Item.name.alias(\"item_name\"),\n )\n .join(\n order_customer_query,\n on=(\n (\n OrderLine.warehouse_id\n == order_customer_query.c.warehouse_id\n )\n & (\n OrderLine.district_id\n == order_customer_query.c.district_id\n )\n & (OrderLine.order_id == order_customer_query.c.order_id)\n ),\n )\n .join(Item, on=(OrderLine.item_id == Item.id))\n .group_by(\n order_customer_query.c.order_id,\n order_customer_query.c.entry_date,\n customer_name_field,\n Item.id,\n Item.name,\n )\n .having(\n DBTuple(\n order_customer_query.c.order_id, fn.SUM(OrderLine.quantity)\n ).in_(order_line_max_qty_query)\n )\n .order_by(\n order_customer_query.c.order_id.desc(),\n fn.SUM(OrderLine.quantity).desc(),\n )\n .with_cte(order_customer_query)\n )\n\n # Process query output\n return ([result for result in popular_items_query.dicts()],)", "def validate(self, item):\n def _contains_number(text):\n \"\"\"\n Check if it has any number\n\n :param text: string\n :returns: True if there is any number in text\n \"\"\"\n return any((True for n in xrange(10) if str(n) in text))\n\n attempt, pkg_analyzer, journal_and_issue_data = item[:3]\n\n xml_tree = pkg_analyzer.xml\n\n funding_nodes = xml_tree.findall('.//funding-group')\n\n status, description = [models.Status.ok, etree.tostring(funding_nodes[0])] if funding_nodes != [] else [models.Status.warning, 'Missing data: funding-group']\n if status == models.Status.warning:\n ack_node = xml_tree.findall('.//ack')\n ack_text = etree.tostring(ack_node[0]) if ack_node != [] else ''\n\n if ack_text == '':\n description = 'Missing data: funding-group, ack'\n elif _contains_number(ack_text):\n description = '%s has numbers. If it is a contract number, it must be identified in funding-group.' % ack_text\n else:\n description = ack_text\n status = models.Status.ok\n\n return [status, description]", "def get_cur_quotes_fr_list(self):\n\n ## full list with special characters take care\n full_list = self.replace_special_characters_in_list(self.full_stocklist_to_retrieve)\n chunk_of_list = self.break_list_to_sub_list(self.full_stocklist_to_retrieve)\n self.temp_full_data_df = None\n for n in chunk_of_list:\n # print the progress\n sys.stdout.write('.')\n\n # set the small chunk of list\n self.set_target_stocks_list(n)\n self.get_cur_quotes()\n \n ## need take care of cases where the return is empty -- will return Missing symbols list\n if not len(self.cur_quotes_df.columns) < len(self.cur_quotes_parm_headers):\n self.store_individual_set_df.append(self.cur_quotes_df)\n if self.temp_full_data_df is None:\n self.temp_full_data_df = self.cur_quotes_df\n else:\n self.temp_full_data_df = self.temp_full_data_df.append(self.cur_quotes_df)\n\n ## Remove the % symbol fr self.temp_full_data_df columns\n self.rm_percent_symbol_fr_cols()\n\n print 'Done\\n'", "def process_quantities(quantities, quantities_values):\n\n # Trivial case when no quantities are defined\n if len(quantities) == 0:\n return dict(), list(), total_derivative\n\n quantity_subs = [(q, q_val) for q, q_val in zip(quantities, quantities_values)]\n quantity_sym, quantity_expr = zip(*quantity_subs)\n quantity_expr = [qty_expr.subs(quantity_subs) for qty_expr in quantity_expr]\n\n # Use substituted expressions to recreate quantity expressions\n quantity_subs = [(str(qty_var), qty_expr) for qty_var, qty_expr in zip(quantity_sym, quantity_expr)]\n # Dictionary for substitution\n quantity_vars = dict(quantity_subs)\n\n # Dictionary for use with mustache templating library\n quantity_list = [{'name': str(qty_var), 'expr': str(qty_expr)}\n for qty_var, qty_expr in zip(quantity_sym, quantity_expr)]\n\n # Function partial that takes derivative while considering quantities\n derivative_fn = ft.partial(total_derivative, dependent_vars=quantity_vars)\n return quantity_vars, quantity_list, derivative_fn", "def stock_market(no_profiles: int) -> tuple:\n all_companies = []\n Stocks = namedtuple(\"Stocks\", 'name symbol open high close company_weight')\n MkValue_ = random.uniform(1000, 50000, 100)\n wts_ = random.uniform(0, 1, 100)\n wts_ = wts_/sum(wts_)\n\n for _ in range(100):\n name = fake.company()\n open_ = round(MkValue_[_]*wts_[_],2)\n close = round(open_ * random.uniform(0.7, 1.15), 2)\n high = round(open_ * random.uniform(0.85, 1.15), 2)\n if high < open_:\n high = open_\n if high < close:\n high = close\n\n all_companies.append(\n Stocks(name=name, symbol=symbol(name), open=open_, high=round(high, 2), close=round(close, 2), company_weight=round(wts_[_], 4)))\n\n stock_index = round(\n sum(x.open * x.company_weight for x in all_companies), 4)\n highest_for_day = round(\n sum(x.high * x.company_weight for x in all_companies), 2)\n lowest_close_for_day = round(\n sum(x.close * x.company_weight for x in all_companies), 2)\n\n # print(f\"\\n------------------------------------Top 100 listed companies on Fake Stock Exchange------------------------------------\")\n # [print(x) for x in sorted(all_companies, key=lambda x:x.symbol)]\n # print(f\"\\n--------------Main details on {date.today()}--------------\")\n # print(f\"\\nStart of the day: {stock_index}\")\n # print(f\"Highest for the day: {highest_for_day}\")\n # print(f\"Lowest close for the day: {lowest_close_for_day}\")\n return sorted(all_companies, key=lambda x: x.symbol), stock_index, highest_for_day, lowest_close_for_day", "def prepare_data_with_warehouse(self,from_date,to_date,warehouses,all_products):\n data_dict = {}\n stock_quant_obj=self.env['stock.quant']\n for warehouse in warehouses:\n all_locations = self.get_all_locations(warehouse)\n if not all_locations:\n continue\n \n #here we are finding the opening stock for these we are using base query\n #of inventory at date v10\n result = self.get_product_qty(all_locations,from_date)\n qty_dict = dict((x,y) for x, y in result)\n \n for product in all_products:\n last_sales = ''\n qty_purchase_in_duration = 0\n qty_sales_in_duration = 0\n last_purchase_date = ''\n scrap_location_qty = 0\n adjusted_qty_in_duration = 0\n warehouse_out_qty = 0\n warehouse_in_qty = 0\n# here from result of inventory at date we are seaching for specific product.\n opening_product_qty = qty_dict.get(product.id)\n\n #finding last sales qty\n last_sales = self.find_last_sales_qty(from_date,to_date,warehouse,all_locations,product)\n #finding last purchase date of product\n last_purchase_date = self.find_last_purchase_date(from_date,to_date,all_locations,product)\n #fiding date purchase qty in duration for specific product\n qty_purchase_in_duration = self.find_purchase_qty_in_duration(from_date,to_date,all_locations,product)\n #fiding scrap qty of precific product\n scrap_location_qty = self.find_scap_location_qty(from_date,to_date,product,all_locations)\n #finding sales qty in duration\n qty_sales_in_duration = self.find_sale_qty_in_duration(from_date,to_date,warehouse,all_locations,product)\n #fidning adjusted qty in duration\n adjusted_qty_in_duration = self.find_adjusted_qty_in_duration(from_date, to_date, product, all_locations)\n \n dest_location_lst = self.get_other_wahouse_locations(warehouse)\n \n if any(all_locations) and any(dest_location_lst):\n #fidning warehouse in qty \n warehouse_in_qty = self.find_warehouse_transer_in_qty(product, all_locations, dest_location_lst,from_date,to_date)\n #fidning warehouse out qty for specific product.\n warehouse_out_qty = self.find_warehouse_transer_out_qty(product, all_locations, dest_location_lst,from_date,to_date)\n \n if warehouse_out_qty:\n warehouse_out_qty = warehouse_out_qty and warehouse_out_qty[0][0] or ''\n if warehouse_in_qty:\n warehouse_in_qty = warehouse_in_qty and warehouse_in_qty[0][0] or ''\n \n if adjusted_qty_in_duration:\n adjusted_qty_in_duration = adjusted_qty_in_duration and adjusted_qty_in_duration[0][0] or '' \n if scrap_location_qty:\n scrap_location_qty = scrap_location_qty and scrap_location_qty[0][0] or ''\n \n # if qty_sales_in_duration:\n # qty_sales_in_duration = qty_sales_in_duration and qty_sales_in_duration[0][0] or ''\n # if qty_purchase_in_duration:\n # qty_purchase_in_duration = qty_purchase_in_duration[0][0] or ''\n if last_sales:\n last_sales = datetime.strptime(last_sales and last_sales[0][0], '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%Y') or ''\n \n if last_purchase_date:\n last_purchase_date = datetime.strptime(last_purchase_date and last_purchase_date[0][0], '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%Y') or ''\n \n if data_dict.has_key(warehouse.id):\n data_lst=data_dict.get(warehouse.id)\n data_lst.append({'product':product,'sku':product.default_code or '','name':product.name,\n 'Cost':product.standard_price or '','sales_price':product.lst_price or '',\n 'opening_qty':opening_product_qty or 0,'last_sales':last_sales or '',\n 'last_purchase_date':last_purchase_date or '','qty_purchase_in_duration':qty_purchase_in_duration or 0,\n 'qty_sales_in_duration': qty_sales_in_duration or 0,'scrap_location_qty':scrap_location_qty or 0,\n 'adjusted_qty_in_duration':adjusted_qty_in_duration or 0\n ,'warehouse_in_qty':warehouse_in_qty or 0,\n 'warehouse_out_qty':warehouse_out_qty or 0 \n })\n data_dict.update({warehouse.id:data_lst})\n continue\n data_dict.update({warehouse.id:[{'product':product,'sku':product.default_code or '','name':product.name,\n 'Cost':product.standard_price or '','sales_price':product.lst_price or '',\n 'opening_qty':opening_product_qty or 0,\n 'last_sales':last_sales or '','last_purchase_date':last_purchase_date or '',\n 'qty_purchase_in_duration':qty_purchase_in_duration or 0,\n 'qty_sales_in_duration': qty_sales_in_duration or 0,\n 'scrap_location_qty':scrap_location_qty or 0,\n 'adjusted_qty_in_duration':adjusted_qty_in_duration or 0,\n 'warehouse_in_qty':warehouse_in_qty or 0,\n 'warehouse_out_qty':warehouse_out_qty or 0\n }]})\n return data_dict", "def test_acceptance_sku_item_defined_on_checkout(self):\r\n pattern = re.compile(r\"items: \\[\\{sku: 'sku_\\w{14}', quantity: \\d{1}\\}\\]\",\r\n re.I | re.M)\r\n res = re.search(pattern, self.dom_str)\r\n self.assertTrue(hasattr(res, 'group'),\r\n msg=\"You didn't add the SKU code in the items list.\")", "def _validate_items_existence(elements: List[OrderElementModel]) -> List[int]:\n invalid_elements = []\n with db.session.no_autoflush:\n for elem in elements:\n if not ItemModel.find_by_id(elem.item_id):\n invalid_elements.append(elem.item_id)\n return invalid_elements", "def get_available_items(table):\n\n list_of_items = []\n\n for i in range(len(table)):\n\n table[i][3] = int(table[i][3])\n table[i][4] = int(table[i][4])\n expiration_date = table[i][3] + table[i][4]\n durability = 2017 - expiration_date\n\n if durability <= 0:\n list_of_items.append(table[i])\n\n return list_of_items", "def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks", "def check_products(self, adi):\r\n results = []\r\n products = self.get_products(adi)\r\n for product in products[\"data\"][\"products\"]:\r\n print(\"Checking product '{}'... \".format(product[\"name\"]), end='')\r\n detail = self.get_product_detail(adi, product_id=product[\"productId\"], product_name=product[\"name\"])\r\n if self.rf.valid_product_detail(detail):\r\n print(\"Valid.\")\r\n result = \"Available\"\r\n else:\r\n print(\"INVALID.\")\r\n result = \"Not available\"\r\n results.append([product[\"name\"], result])\r\n return results", "def add_stock_flow(stock_list):\n run = 0\n while run == 0:\n stock_name = None\n stock_ticker = None\n stock_date = None\n stock_quantity = None\n while stock_name is None:\n stock_name = prompt.shortcuts.input_dialog(\n title=\"Stock Name\", text=\"Please type the stock name:\"\n ).run()\n while stock_ticker is None:\n stock_ticker = prompt.shortcuts.input_dialog(\n title=\"Stock Ticker\", text=\"Please type the stock ticker in all caps:\"\n ).run()\n while stock_date is None:\n stock_date = prompt.shortcuts.input_dialog(\n title=\"Stock Purchase Date\",\n text=\"Please type the date you purchased the stock in the form (YYYY,MM,DD) weekends do not work:\"\n ).run()\n year, month, day = map(int, stock_date.split(','))\n stock_date = datetime.date(year, month, day)\n while stock_quantity is None:\n stock_quantity = prompt.shortcuts.input_dialog(\n title=\"Stock Quantity Purchased\", text=\"Please type the quantity of the stock you purchased:\"\n ).run()\n stock_list.append(Stock(stock_name, stock_ticker, stock_quantity, stock_date))\n run = prompt.shortcuts.button_dialog(\n title=\"Add Another Stock\",\n text=\"Would you like to add another stock?\",\n buttons=[(\"Yes\", 0), (\"No\", 1)],\n ).run()\n return stock_list", "def validate_requirement_items(self, key: str, values: t.Sequence) -> t.Sequence:\n request = self.request\n user_id = str(request.user.id) if request else None\n current_value = list(self.requirement_items) if self.requirement_items else []\n if values:\n for item in values:\n if not item.get('created_by') and user_id:\n item['created_by'] = user_id\n if not item.get('created_at'):\n item['created_at'] = datetime_utcnow().isoformat()\n\n if values or current_value:\n requirements_schema = RequirementItems()\n try:\n values = requirements_schema.deserialize(values)\n except colander.Invalid as exc:\n raise ValidationError(message='Invalid payload for requirement_items', name=key)\n\n return values", "def _do_generate_webclient_stocklist(self) -> dict:\n # NOTE: as we want dicts and not Location instances, we go directly to\n # the 'SQL level' (session.execute() and not the 'ORM level' (session.query())\n # of sqlquery.\n loclst = self.get_location_list()\n itmlst = self.get_reagent_item_list()\n itmstat = self.get_reagent_item_status_list()\n\n # create a Dict[locationid, List[reagentitem]] and a Dict[RFID, reagentitem]\n d_d: typing.Dict[typing.Optional[int], typing.List[dict]] = {}\n # rfid_reagitem_dct = ff = {}\n f_f: typing.Dict[str, dict] = {}\n for reag_item in itmlst:\n loc_id = reag_item.get('qcs_location_id', None)\n # we will keep a list of items with None locations... should not happen, but does\n # then we add these to the UNKNOWN list later on\n d_d.setdefault(loc_id, []).append(reag_item)\n # if loc_id is not None:\n # else:\n # raise RuntimeError(\"found None location {}\".format(reag_item))\n #\n rfidstr = reag_item.get('rfid', None)\n if rfidstr is not None:\n if rfidstr != 'REPLACE ME':\n f_f.setdefault(rfidstr, reag_item)\n else:\n raise RuntimeError(\"found None location {}\".format(reag_item))\n # unmangling for None...\n # find loc_id for 'UNKNOWN'...\n if None in d_d:\n none_lst = d_d[None]\n del d_d[None]\n flst = [loc for loc in loclst if loc['name'] == 'UNKNOWN']\n assert len(flst) == 1, \"cannot determine 'UNKNOWN' location\"\n unknown_lst = d_d.setdefault(flst[0]['id'], [])\n unknown_lst.extend(none_lst)\n #\n # NOW, create a Dict[locationid, Tuple[locrecord, List[reagentitem]]]\n # which we send to the client\n r_r: typing.Dict[int, typing.Tuple[dict, typing.List[dict]]] = {}\n locid_reagitem_dct = r_r\n for location in loclst:\n loc_id = location.get('id', None)\n r_r[loc_id] = (location, d_d.get(loc_id, []))\n assert len(r_r) == len(loclst), \"problem with location ids!\"\n #\n # collect the state records for each reagent item...\n z_z: typing.Dict[int, list] = {}\n for state in itmstat:\n reag_item_id = state['qcs_reag_item_id']\n # we want to replace the occurred timedate entry with a simple date\n # to present to the user, i.e.\n # 'occurred': '2011-04-20T00:00:00Z' -> '2011-04-20'\n dstr = state['occurred']\n state['occurred'] = dstr.split('T')[0]\n z_z.setdefault(reag_item_id, []).append(state)\n # and evaluate the 'final state' for each reagent item\n ritemdct = {}\n for reag_item in itmlst:\n reag_item_id = reag_item['id']\n state_lst = z_z.get(reag_item_id, None)\n if state_lst is None:\n state_info = None\n else:\n state_info = self.calc_final_state(state_lst)\n # print(\"BLAAA {} {}\".format(reag_item_id, state_info))\n # we eliminate any reagent item that has a state of 'USED_UP'.\n dct, ismissing, hasexpired = state_info\n state_info = None if dct['status'] == 'USED_UP' else state_info\n if state_info is not None:\n ritemdct[reag_item_id] = (reag_item, state_info)\n # else:\n # print(\"skipping {}\".format(reag_item))\n # create a Dict[reagentid, reagent]\n rl = self.get_reagent_list()\n rg = {}\n for reagent in rl:\n # delete the legacy location field in reagents...\n reagent.pop('location', None)\n reagent_id = reagent.get('id', None)\n if reagent_id is not None:\n rg[reagent_id] = reagent\n else:\n raise RuntimeError(\"reagent ID is None\")\n assert len(rg) == len(rl), \"problem with reagent ids!\"\n # \"itmstatlst\": itmstat,\n # finally, sort the loclst according to a hierarchy\n loclst = sortloclist(loclst)\n # , \"rfiddct\": rfid_reagitem_dct}\n return {\"loclst\": loclst, \"locdct\": locid_reagitem_dct,\n \"ritemdct\": ritemdct, \"reagentdct\": rg}", "def processMarketOrders(self):\n try:\n nextRound = self.currentRound+1\n resultsList = []\n master = {}\n self.genMarketStat()\n myMarketStat = self.marketStats[str(self.currentRound)]\n \n # sorted lists of market orders\n master['buyAL'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'max', True, {'value':'AL', 'min':0})\n master['buyEC'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'max', True, {'value':'EC', 'min':0})\n master['buyIA'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'max', True, {'value':'IA', 'min':0})\n master['sellAL'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'min', False, {'value':'AL', 'max':0})\n master['sellEC'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'min', False, {'value':'EC', 'max':0})\n master['sellIA'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'min', False, {'value':'IA', 'max':0})\n \n for res in ['AL', 'EC', 'IA']:\n for sellOrder in master['sell%s' % res]:\n # min sell order gets first chance to sell its product\n if sellOrder.amountUsed == sellOrder.amount:\n pass # seller has sold all he wants with this order\n else:\n i = 0\n for buyOrder in master['buy%s' % res]:\n # determine price, allow for bidding on price\n try:\n nextBuyOrder = master['buy%s' % res][i+1]\n if nextBuyOrder.max < buyOrder.max and (nextBuyOrder.max+1) >= sellOrder.min:\n price = nextBuyOrder.max + 1\n else:\n price = buyOrder.max\n except IndexError:\n price = buyOrder.max\n # max buy order gets first chance to buy sellers product\n resultsList.append(self.processMarketTransaction(buyOrder, sellOrder, price))\n i += 1\n \n # set the average market prices for this round\n if getattr(myMarketStat, 'volSold%s' % res) > 0:\n setattr(myMarketStat, 'avgSold%s' % res, (getattr(myMarketStat, 'sumSold%s' % res) / \n getattr(myMarketStat, 'volSold%s' % res)))\n \n # clean up market orders for next round\n for orderID in self.marketOrders.keys():\n myMarketOrder = self.marketOrders[orderID]\n myMarketOrder.cleanUp()\n if myMarketOrder.amount == 0:\n resultsList.append('cancel market Order=%s' % orderID)\n self.cancelMarketOrder(orderID)\n \n return str(resultsList)\n except:\n return 'galaxy->processMarketOrders error'", "def data_convert(items):\n for item in items:\n converted_item = item.copy()\n if 'quantity_available' in item: # convert columns\n converted_item['quantity_available'] =\\\n int(item['quantity_available'])\n\n yield converted_item", "def getFSNDataDict(self):\n \"\"\"Checks all the FSN details and returns a dictionary which can be added to the Piggy bank.\"\"\"\n self.fsnDataList = []\n self.valid = True\n try:\n self.fsn = str(self.lineEditFSN.text()).replace(\",\",\";\").replace(\"'\",\"\").strip()\n if len(self.fsn) == 0:\n self.valid = False\n self.alertMessage(\"User Error\",\"Please enter the FSN.\")\n return []\n except:\n self.alertMessage(\"Runtime Error\",\"Please enter the FSN.\")\n self.lineEditFSN.setFocus()\n self.valid = False\n return []\n try:\n self.type = str(self.comboBoxType.currentText()).replace(\",\",\";\").replace(\"'\",\"\").strip()\n if len(self.type) == 0:\n self.valid = False\n self.alertMessage(\"User Error\",\"Please select a type.\")\n return []\n except:\n self.alertMessage(\"Runtime Error\",\"Please select a type.\")\n self.comboBoxType.setFocus()\n self.valid = False\n return []\n try:\n self.source = str(self.comboBoxSource.currentText()).replace(\",\",\";\").replace(\"'\",\"\").strip()\n if len(self.source) == 0:\n self.valid = False\n self.alertMessage(\"User Error\",\"Please select a type.\")\n return []\n except:\n self.alertMessage(\"Runtime Error\",\"Please select a source.\")\n self.comboBoxSource.setFocus()\n self.valid = False\n return []\n try:\n self.bu = str(self.comboBoxBU.currentText()).replace(\",\",\";\").replace(\"'\",\"\").strip()\n if len(self.bu) == 0:\n self.valid = False\n self.alertMessage(\"User Error\",\"Please select the BU.\")\n return []\n except:\n self.alertMessage(\"Runtime Error\",\"Please select the BU.\")\n self.comboBoxBU.setFocus()\n return []\n try:\n self.supercategory = str(self.comboBoxSuperCategory.currentText()).replace(\",\",\";\").replace(\"'\",\"\").strip()\n if len(self.supercategory) == 0:\n self.valid = False\n self.alertMessage(\"User Error\",\"Please select the super-category.\")\n return []\n except:\n self.alertMessage(\"Runtime Error\",\"Please select the super-category.\")\n self.comboBoxSuperCategory.setFocus()\n return []\n try:\n self.category = str(self.comboBoxCategory.currentText()).replace(\",\",\";\").replace(\"'\",\"\").strip()\n if len(self.category) == 0:\n self.valid = False\n self.alertMessage(\"User Error\",\"Please select the category.\")\n return []\n except:\n self.alertMessage(\"Runtime Error\",\"Please select the category.\")\n self.comboBoxCategory.setFocus()\n self.valid = False\n return []\n try:\n self.subcategory = str(self.comboBoxSubCategory.currentText()).replace(\",\",\";\").replace(\"'\",\"\").strip()\n if len(self.subcategory) == 0:\n self.valid = False\n self.alertMessage(\"User Error\",\"Please select the sub-category.\")\n return []\n except:\n self.alertMessage(\"Runtime Error\",\"Please select the sub-category.\")\n self.comboBoxSubCategory.setFocus()\n self.valid = False\n return []\n try:\n self.vertical = str(self.comboBoxVertical.currentText()).replace(\",\",\";\").replace(\"'\",\"\").strip()\n if len(self.vertical) == 0:\n self.valid = False\n self.alertMessage(\"User Error\",\"Please select the vertical.\")\n return []\n except:\n self.alertMessage(\"Runtime Error\",\"Please select the vertical.\")\n self.comboBoxVertical.setFocus()\n self.valid = False\n return []\n try:\n self.brand = str(self.lineEditBrand.text()).replace(\",\",\";\").replace(\"'\",\"\").strip()\n if len(self.brand) == 0:\n self.valid = False\n self.alertMessage(\"User Error\",\"Please enter the brand.\")\n return []\n except:\n self.alertMessage(\"Runtime Error\",\"Please enter the brand.\")\n self.lineEditBrand.setFocus()\n self.valid = False\n return []\n try:\n self.wordcount = int(self.spinBoxWordCount.value())\n if self.wordcount <= 50:\n answer = QtGui.QMessageBox.question(self, \"\"\"Are you sure that's the right word count?\"\"\", \"\"\"Are you sure you'd like to report only %d words for this article?\"\"\" % self.wordcount, QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)\n if answer == QtGui.QMessageBox.No:\n self.valid = False\n except:\n self.alertMessage(\"Runtime Error\",\"Please enter the word count. Word count must be an integer.\")\n self.spinBoxWordCount.setFocus()\n self.valid = False\n return []\n try:\n self.uploadlink = str(self.lineEditUploadLink.text()).replace(\",\",\";\").replace(\"'\",\"\").strip()\n if len(self.uploadlink) == 0:\n self.valid = False\n self.alertMessage(\"User Error\",\"Please enter the upload link.\")\n return []\n except:\n self.alertMessage(\"Runtime Error\",\"Please enter the upload link.\")\n self.lineEditUploadLink.setFocus()\n self.valid = False\n return []\n try:\n self.referencelink = str(self.lineEditRefLink.text()).replace(\",\",\";\").replace(\"'\",\"\").strip()\n if len(self.referencelink) == 0:\n self.valid = False\n self.alertMessage(\"User Error\",\"Please enter the reference link.\")\n return []\n except:\n self.alertMessage(\"Runtime Error\",\"Please enter the reference link.\")\n self.lineEditRefLink.setFocus()\n self.valid = False\n return []\n #Success!\n if self.valid:\n writer_name = MOSES.getEmpName(self.user_id)\n writer_email = MOSES.getEmpEmailID(self.user_id)\n active_date = self.getActiveDate()\n self.fsnDataList = {\n \"Article Date\": active_date,\n \"WriterID\": self.user_id,\n \"Writer Name\": writer_name,\n \"Writer Email ID\": writer_email,\n \"FSN\": self.fsn,\n \"Description Type\": self.type,\n \"Source\": self.source,\n \"BU\" : self.bu,\n \"Super-Category\": self.supercategory,\n \"Category\": self.category,\n \"Sub-Category\": self.subcategory,\n \"Vertical\": self.vertical,\n \"Brand\": self.brand,\n \"Word Count\": self.wordcount,\n \"Upload Link\": self.uploadlink,\n \"Reference Link\": self.referencelink,\n \"Rewrite Ticket\": 0,\n \"End Time\": datetime.datetime.now(),\n \"PC User Name\": getpass.getuser(),\n \"Job Ticket\": self.getJobTicket(active_date, self.user_id, self.fsn)\n }\n query_dict = {\n \"Source\":self.source,\n \"Description Type\":self.type,\n \"BU\":self.bu,\n \"Super-Category\":self.supercategory,\n \"Category\": self.category,\n \"Sub-Category\": self.subcategory,\n \"Vertical\": self.vertical\n }\n target = MOSES.getTargetFor(self.user_id, self.password, query_dict, self.getActiveDate(), self.category_tree)\n self.fsnDataList.update({\"Target\": target})\n return self.fsnDataList", "def vertical_production(type_id: int, quantity: int, me: int = 0, prod_type: str = 'manufacturing' , prices: bool = False) -> pd.DataFrame:\n actID = {'manufacturing': 1, 'reaction': 11}\n mats = pd.DataFrame(columns=['type_id', 'type_name', 'quantity'])\n\n if type_id in quantities.loc[(quantities['activityID'] == actID[prod_type]) & (quantities['productTypeID'] == type_id)].values: # item can be manufactured\n\n if prod_type == 'reaction':\n bpid = productToFormula(type_id)\n\n elif prod_type == 'manufacturing':\n bpid = productToBP(type_id)\n \n else: \n raise ValueError(\"thats not a valid manufacturing type. options are 'reaction', 'manufacturing'\")\n\n qPerRun = quantPerRun(bpid)\n runs = quantity // qPerRun\n\n if runs > 0:\n\n for _, row in materials.loc[(materials['activityID'] == actID[prod_type]) & (materials['typeID'] == bpid)].iterrows():\n\n if prod_type == 'reaction':\n quant = row['quantity'] * runs\n\n elif prod_type == 'manufacturing':\n quant = me_formula(row['quantity'],me) * runs\n\n mats = mats.append(vertical_production(row['materialTypeID'], quant, prod_type = prod_type) ,ignore_index=True)\n \n \n # buys the product instead of manufacturing more of it than needed\n if int(runs * qPerRun) < int(quantity):\n mats = mats.append({'type_id': type_id, 'type_name': emt.typeIDToName(type_id), 'quantity': quantity - (runs * qPerRun)},ignore_index=True)\n\n else: # item cannot be manufactured\n mats = mats.append({'type_id': type_id, 'type_name': emt.typeIDToName(type_id), 'quantity': quantity}, ignore_index=True)\n\n mats = mats.groupby(['type_id', 'type_name']).sum().reset_index()\n mats = mats.astype({\"type_id\": int, \"quantity\": int})\n\n if prices:\n mats = emt.add_price(mats)\n\n return mats", "def inventory_report(prod_list):\n prod_list = list(set(prod_list))\n x = 0\n price = 0\n weight = 0\n flammability = 0\n stealability = 0\n for item in prod_list:\n x += 1\n price += item.price\n weight += item.weight\n flammability += item.flammability\n if stealability != 'Not so stealable...':\n stealability += 1\n\n avg_price = price / x\n avg_weight = weight / x\n avg_flammability = flammability / x\n print(f'There are {x} unique products in this list. The average price is {avg_price}, '\n f'average weight is {avg_weight},'\n f'and the average flammability is {avg_flammability}.')\n if stealability >= len(prod_list) / 2:\n print('Many of these items are highly stealable!')\n return avg_price, avg_weight, avg_flammability", "def _validate_basket_contents(basket):\n # A basket is expected to have a one item (which in turn will create one Line)\n basket_items = basket.basketitems.all()\n if len(basket_items) == 0:\n raise ValidationError(\n {\"items\": \"No items in basket. Cannot complete checkout.\"}\n )\n if len(basket_items) > 1:\n log.error(\n \"User %s is checking out %d items in their basket. Baskets should only have one BasketItem.\",\n basket.user.email,\n len(basket_items),\n )\n raise ValidationError(\n {\n \"items\": \"Something went wrong with the items being purchased. Please contact support.\"\n }\n )\n basket_item = basket_items[0]\n product = basket_item.product\n if product.is_active is False or product.content_object.live is False:\n log.error(\n \"User %s is checking out with a product in their basket that was not live (%s).\",\n basket.user.email,\n product.content_object.text_id,\n )\n raise ValidationError(\n {\"items\": \"This item cannot be purchased. Please contact support.\"}\n )\n product_version = latest_product_version(basket_item.product)\n return basket_item, product, product_version", "def check_stock(self):\n if self.quantity > self.item.quantity:\n return \"%s Please adjust your cart.\" % CartItem.get_insufficient_stock_msg(self.item.quantity)\n return None", "def attribute_validation(cls, values: dict) -> dict:\n if not (total := values.get('total')):\n raise ValueError(\"Total attribute is required.\")\n \n if not (quantity := values.get('quantity')):\n raise ValueError(\"Quantity attribute is required.\")\n \n if not (symbol := values.get('symbol')):\n raise ValueError(\"Symbol attribute is required.\")\n\n filter = symbol.filters.market_lot_size_filter\n # if ONE :=1 and not filter.min_qty <= total <= filter.max_qty:\n # raise ValueError(\"The quantity is not in valid range.\")\n\n if filter.step_size and not is_valid_significant_digits(\n total,\n symbol.qty_decimal_precision\n ):\n raise ValueError(\"The quantity precision is not valid.\")\n\n return values", "def clean(self):\n\n cleaned_data = self.cleaned_data\n if cleaned_data.get('order') and cleaned_data.get('stock') \\\n and cleaned_data.get('volume') and cleaned_data.get('price'):\n t = cleaned_data['trader']\n if cleaned_data['order'] == 'B': # buy order\n open_orders = Order.objects.filter(trader=t,\n order='B', completed=False)\n open_order_value = float(sum([o.volume * o.price for o in open_orders]))\n open_order_value += int(cleaned_data['volume']) * float(cleaned_data['price'])\n\n if open_order_value > t.cash:\n raise ValidationError(\"You don't have enough cash!\")\n\n elif cleaned_data['order'] == 'S': # sell order!\n open_orders = sum(Order.objects.filter(trader=t, order='S',\n stock=cleaned_data['stock'],\n completed=False).values_list('volume', flat=True))\n open_orders += cleaned_data['volume']\n\n if open_orders > t.holding_set.get(stock=cleaned_data['stock']).shares:\n raise ValidationError(\"You don't have enough shares!\")\n return cleaned_data", "def calculate_stock_batch(warehouse=None, safety_stock=None):\n if not warehouse:\n inwards = (\n Inward.objects.values(\n \"part_number__part_name\",\n \"batch_number\"\n ).order_by(\"part_number__part_name\")\n .annotate(\n inw_received_qt_total=Sum(\"received_quantity\"),\n defect_qt_total=Sum(\"defected_quantity\"),\n )\n )\n\n outwards = (\n Outward.objects.values(\n \"part_number__part_name\",\n \"batch_number\"\n ).order_by(\"part_number__part_name\")\n .annotate(outward_qt_total=Sum(\"quantity\"))\n )\n\n else:\n inwards = (\n Inward.objects.filter(to_warehouse_name=warehouse)\n .values(\n \"part_number__part_name\",\n \"batch_number\"\n ).order_by(\"part_number__part_name\")\n .annotate(\n inw_received_qt_total=Sum(\"received_quantity\"),\n defect_qt_total=Sum(\"defected_quantity\"),\n )\n )\n outwards = (\n Outward.objects.filter(from_warehouse_name=warehouse)\n .values(\n \"part_number__part_name\",\n \"batch_number\"\n ).order_by(\"part_number__part_name\")\n .annotate(outward_qt_total=Sum(\"quantity\"))\n )\n\n partnames = []\n\n for inward in inwards:\n partnames.append({\n 'part_name': inward['part_number__part_name'],\n 'inward': inward['inw_received_qt_total'],\n 'defect': inward['defect_qt_total'],\n 'outward': 0,\n 'batch_number': inward['batch_number'],\n 'total_usable_stock': inward['inw_received_qt_total'] - inward['defect_qt_total'],\n 'total_stock': inward['inw_received_qt_total'],\n })\n for outward in outwards:\n outward_partname = outward['part_number__part_name']\n outward_batch_number = outward['batch_number']\n for partname in partnames:\n if outward_partname in partname and outward_batch_number in partname:\n partname[outward_partname]['outward'] = \\\n outward['outward_qt_total']\n partname[outward_partname]['total_stock'] -= \\\n partname[outward_partname]['outward']\n\n partname[outward_partname]['total_usable_stock'] -= \\\n partname[outward_partname]['outward']\n\n return (partnames)", "def get_stock(self, name: str=\"all\") -> List[QTableWidgetItem]:\n try:\n if name != \"all\":\n log.debug(f\"Getting the stock for {name.upper()}.\")\n with DBCursor(self.host) as cursor:\n cursor.execute(\"SELECT rowid, name, units, last_buy, cost_price, sell_price FROM items WHERE name = ?\", (name.lower(), ))\n result = cursor.fetchone()\n if result:\n log.debug(\"There was a product named like soo, returning a StoredProduct for it.\")\n return StoredProduct(*result).to_table()\n else:\n raise ProductNotFound(\"There was no product named like so.\")\n else:\n log.debug(\"Getting the stock for all products.\")\n with DBCursor(self.host) as cursor:\n cursor.execute(\"SELECT rowid, name, units, last_buy, cost_price, sell_price FROM items\")\n results = cursor.fetchall()\n if not results:\n log.error(\"There were no products to show at all.\")\n raise ProductsNotFound(\"There are no products to show.\")\n product_list = []\n for product in results:\n product_list.append(StoredProduct(*product).to_table())\n except Exception:\n log.critical(\"An exception was raised.\")\n raise\n else:\n log.debug(\"A StoredProduct list was consumated.\")\n return product_list", "def GetListedEquities(status_func : Optional[Callable[[str], None]] = None,\n progress_func : Optional[Callable[[int, int, dt.datetime], None]] = None) -> List['EquityListing']:\n all_equities = []\n \n page_size = 200\n stocks_url = 'https://www.nasdaq.com/api/v1/screener?page={}&pageSize={}'\n etfs_url = 'https://api.nasdaq.com/api/screener/etf?offset={}'\n \n # We use this variable for HTTP requests to NASDAQ because they do not\n # allow automated HTTP requests. This header will simulate a real user\n # accessing their API.\n fake_header = { \n 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'\n }\n\n # --- SECTION: Get basic info for stocks ---i\n if status_func:\n status_func(\"Scraping web for listed stocks\")\n start_time = dt.datetime.now()\n\n # Retrieve the total count of pages to access\n first_page_text = requests.get(stocks_url.format(1, 1), headers=fake_header).text\n total_page_count = int(js.loads(first_page_text)['count'] / page_size + 0.9)\n\n for page_index in range(1, total_page_count):\n # Download JSON file from NASDAQ\n current_page_text = requests.get(stocks_url.format(page_index, page_size), headers=fake_header).text\n current_page_json = js.loads(current_page_text)\n\n # Add all stocks into all_securities\n all_equities.extend([EquityListing(stock['ticker'], stock['company']) for stock in current_page_json['data']])\n\n if progress_func:\n progress_func(page_index, total_page_count - 1, start_time)\n # --- END SECTION ---\n\n # --- SECTION: Get basic info for etfs ---\n if status_func:\n status_func(\"Scraping web for listed ETFs\")\n start_time = dt.datetime.now()\n\n # Retrieve the total count of pages to access\n first_page_text = requests.get(etfs_url.format(0), headers=fake_header).text\n total_page_count = int(js.loads(first_page_text)['data']['records']['totalrecords'] / 50 + 0.9)\n\n for page_index in range(1, total_page_count):\n # Download JSON file from NASDAQ\n current_page_text = requests.get(etfs_url.format((page_index - 1) * 50), headers=fake_header).text\n current_page_json = js.loads(current_page_text)\n\n # Add all ETFs into all_securities\n all_equities.extend([EquityListing(etf['symbol'], etf['companyName'] if etf['companyName'] != None else \"N/A\") for etf in current_page_json['data']['records']['data']['rows']])\n\n if progress_func:\n progress_func(page_index, total_page_count - 1, start_time)\n # --- END SECTION --- \n \n return all_equities", "def validate(self, name: str, expansion: str) -> List:", "def get_put_data(stock_name, expire_time, strike_price):\n date = time.mktime(datetime.datetime.strptime(expire_time, \"%d/%m/%Y\").timetuple())+(16*3600)\n url = 'https://finance.yahoo.com/quote/'+stock_name+'/options?date='+str(int(date))+'&p='+stock_name\n print(url)\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n values = soup.findAll(\"table\")[1].findAll(\"td\")\n\n for i in range(2,len(values),11):\n x = float(str(values[i].contents[0].contents[0]))\n if x == float(strike_price):\n option_link = 'https://finance.yahoo.com/'+str(values[i-2].contents[0])[61:109]\n bid = float(values[i+2].contents[0])\n ask = float(values[i+3].contents[0])\n return bid, ask", "def collect_batch(self, item, quantity, kitchen=None):\n try:\n with Transaction().start(DBNAME, 1):\n item = item\n quantity = quantity\n if kitchen:\n inventory_list = self.Inventory.search([('location', '=', self.kitchen.id)]\n , order=[('batch_number', 'ASC')])\n else:\n inventory_list = self.Inventory.search([('location', '=', self.inventory.id)]\n , order=[('batch_number', 'ASC')])\n batch_list = []\n today = date.today()\n for i in inventory_list:\n lines = i.lines\n for j in lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n if Decimal(j.quantity) >= Decimal(quantity):\n batch_list.append([str(i.batch_number), str(quantity)])\n print [str(i.batch_number), str(quantity)]\n return batch_list\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n batch_list.append([str(i.batch_number), str(j.quantity)])\n print [str(i.batch_number), str(j.quantity)]\n return False\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def get_stock_price(df_excld):\n\n ts = TimeSeries(os.environ['ALPHA_VANTAGE_KEY'])\n\n info = []\n symbols = []\n counter = 0\n\n for t in df_excld['Ticker']:\n\n if counter % 5 == 0:\n time.sleep(65)\n\n i, m = ts.get_daily(symbol=t, outputsize='full')\n info.append(i)\n symbols.append(m['2. Symbol'])\n counter += 1\n\n return info, symbols", "def get_stock_data():\n if not os.path.exists('./catalog/stock_data'):\n os.mkdir('./catalog/stock_data')\n \n inventory_data = {}\n inventory_file = './catalog/stock_data/inventory-bro.txt'\n \n download_data = True\n if os.path.exists(inventory_file):\n # Check that inventory file is no more than 1 day old\n filestat = os.stat(inventory_file)\n tm = datetime.datetime.fromtimestamp(filestat.st_mtime)\n today = datetime.datetime.now()\n dt = today - tm\n if dt.days < 1:\n download_data = False\n \n if download_data:\n # Get inventory data from ftp site\n from ftplib import FTP_TLS\n print 'Downloading inventory-bro.txt ....'\n ftps = FTP_TLS('ftp.appareldownload.com')\n ftps.login('Br0d3r', 'Br0d3r2oll')\n ftps.prot_p()\n #ftps.retrlines('LIST')\n ftps.retrbinary('RETR inventory-bro.txt', open(inventory_file, 'wb').write)\n ftps.quit()\n \n print \"Parse inventory-bro.txt ... \"\n first_row = None\n for row in csv.reader(open(inventory_file, 'rb')):\n itemRef = row[4].lower()\n if itemRef == 'style number':\n # save first row to be used as column header\n first_row = row\n continue\n \n source_attribs = [{'attribute_type': 'source', 'attribute_value': 'broderbros'}]\n \n inventory_data.setdefault(itemRef, [])\n \n color = row[8].lower()\n size = row[10].lower()\n \n # Warehouses starts at column 13\n for i in range(13, len(first_row)):\n wh_name = first_row[i]\n options = [\n {'option_type': 'color', 'option_value': color, 'attributes': []},\n {'option_type': 'size', 'option_value': size, 'attributes': []},\n {'option_type': 'warehouse', 'option_value': wh_name, 'attributes': source_attribs, 'shared': True},\n {'option_type': 'vendor', 'option_value': 'broderbros', 'attributes': source_attribs, 'shared': True},\n ]\n inventory_data[itemRef].append({'options': options, 'inventory': row[i]})\n \n # Pricing data\n pricing_tarfile = \"./catalog/stock_data/bro-AllStyles_R06.tar.gz\"\n download_data = True\n if os.path.exists(pricing_tarfile):\n # Check that file is no more than 1 day old\n filestat = os.stat(pricing_tarfile)\n tm = datetime.datetime.fromtimestamp(filestat.st_mtime)\n today = datetime.datetime.now()\n dt = today - tm\n if dt.days < 1:\n download_data = False\n \n if download_data:\n print 'Downloading items.csv for price data ....'\n br = utils.create_browser(1, 2)\n br.open(\"https://www.broderbros.com/cgi-bin/online/webbro/bro-index.w\")\n try:\n # Fill login form\n br.select_form(name = 'frmLogin')\n frm = br.form\n \n ctrl = frm.find_control('userName')\n ctrl.value = USERNAME\n ctrl = frm.find_control('password')\n ctrl.value = PASSWORD\n \n # Submit login form\n if TESTRUN: print 'Submit Login Form'\n \n br.select_form(name = 'frmLogin')\n br.submit()\n except:\n print \"Login form does not exist, please check URL, downloaded html or site is down\"\n return None\n try:\n tar_url = \"https://www.broderbros.com/cgi-bin/download/webshr/prod-info-view.w?f=bro-AllStyles_R06.tar.gz\"\n br.retrieve(tar_url, pricing_tarfile)\n except:\n print \"Error when downloading pricing file\"\n return None\n \n try:\n tar = tarfile.open(pricing_tarfile)\n for member in tar.getmembers():\n member.name = member.name.split('/')[-1] # strip directory from filename\n tar.extractall('catalog/stock_data/bro-AllStyles_R06')\n tar.close()\n except:\n print \"Error when extracting items.csv\"\n return None\n \n f_object = open('./catalog/stock_data/bro-AllStyles_R06/items_R06.csv', 'rb')\n #~ f_object = open('items_R06.csv', 'rb')\n \n print \"Parse items_R06.csv ... \"\n for row in csv.reader(f_object):\n itemRef = row[7].lower()\n if itemRef == 'style code':\n continue\n \n size = row[8].lower()\n color = row[11].lower()\n price = row[18]\n \n item_data = inventory_data.get(itemRef)\n if not item_data:\n continue\n # Find data with same size and color\n for var_dict in item_data:\n options = var_dict['options']\n opt_dict = {}\n for opt in options:\n opt_type = opt['option_type']\n opt_value = opt['option_value']\n if opt_type == 'size':\n opt_dict['size'] = opt_value\n elif opt_type == 'color':\n opt_dict['color'] = opt_value\n if opt_dict['size'] == size and opt_dict['color'] == color:\n var_dict['price'] = [{'price_type': 'retail_price', 'price': price}]\n \n f_object.close()\n \n try:\n shutil.rmtree(\"./catalog/stock_data/bro-AllStyles_R06\")\n #~ os.remove(\"./catalog/stock_data/bro-AllStyles_R06.tar.gz\")\n except:\n pass\n \n return inventory_data", "def availableSquares(self):\n List2=[]\n for item in self.all:\n if item.retrieve()==\"\":\n List2.append(item.name())\n return List2", "def convert_items(items):\n for idx in range(len(items)):\n item_name, item_sell_in, item_quality = items[idx].name, items[idx].sell_in, items[idx].quality,\n comp_name = item_name.lower() # the name with which we compare by\n\n new_item = items[idx]\n if 'aged brie' in comp_name:\n new_item = AgedItem(item_name, item_sell_in, item_quality)\n elif 'sulfuras' in comp_name:\n new_item = LegendaryItem(item_name, item_sell_in, item_quality)\n elif 'conjured' in comp_name:\n new_item = ConjuredItem(item_name, item_sell_in, item_quality)\n elif 'backstage passes' in comp_name:\n new_item = BackstagePass(item_name, item_sell_in, item_quality)\n items[idx] = new_item\n\n return items" ]
[ "0.7183774", "0.62451553", "0.6027354", "0.5956186", "0.57162726", "0.5646305", "0.56151676", "0.55878735", "0.55574286", "0.54344946", "0.5425899", "0.54236686", "0.5393303", "0.539217", "0.5386628", "0.53802747", "0.5373666", "0.5363041", "0.53269744", "0.52845365", "0.52759457", "0.5270009", "0.5245161", "0.5220751", "0.5185271", "0.51629645", "0.5150221", "0.514785", "0.51308143", "0.51299", "0.5121119", "0.5108609", "0.5089291", "0.50865686", "0.50828725", "0.5068581", "0.5061538", "0.5060487", "0.50565636", "0.50415516", "0.50349474", "0.5024639", "0.50193", "0.50006807", "0.4999842", "0.49934915", "0.4979841", "0.49756283", "0.49653658", "0.4960353", "0.49590796", "0.49530256", "0.4947917", "0.49452874", "0.49411315", "0.49387658", "0.4929551", "0.49262124", "0.4909425", "0.49043176", "0.48960757", "0.4894772", "0.4893832", "0.48937464", "0.4893406", "0.48930946", "0.48886907", "0.4888156", "0.4872545", "0.48698473", "0.48667163", "0.48602387", "0.48556516", "0.48485705", "0.48446625", "0.48439932", "0.48426735", "0.48362237", "0.48326263", "0.48261026", "0.48194087", "0.48178264", "0.48046905", "0.47994277", "0.4798078", "0.47969702", "0.4796134", "0.479212", "0.4790139", "0.47871917", "0.47843215", "0.4780694", "0.47628358", "0.47621214", "0.4752176", "0.47504598", "0.47496194", "0.47471213", "0.47468454", "0.47461233" ]
0.6709346
1
Builds an projection block as described in Deep Residual Learning for Image Recognition (2015) You can assume the input data will have shape (224, 224, 3) All convolutions inside and outside the blocks should be followed by batch normalization along the channels axis and a rectified linear activation (ReLU), respectively. All weights should use he normal initialization
def resnet50(): initializer = K.initializers.he_normal(seed=None) X = K.Input(shape=(224, 224, 3)) # conv1 layer = K.layers.Conv2D(filters=64, kernel_size=(7, 7), strides=(2, 2), padding='same', kernel_initializer=initializer, )(X) layer = K.layers.BatchNormalization(axis=3)(layer) layer = K.layers.Activation('relu')(layer) # conv2_x layer = K.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(layer) layer = projection_block(layer, [64, 64, 256], 1) for _ in range(2): layer = identity_block(layer, [64, 64, 256]) # conv3_x layer = projection_block(layer, [128, 128, 512]) for _ in range(3): layer = identity_block(layer, [128, 128, 512]) # conv4_x layer = projection_block(layer, [256, 256, 1024]) for _ in range(5): layer = identity_block(layer, [256, 256, 1024]) # conv5_x layer = projection_block(layer, [512, 512, 2048]) for _ in range(2): layer = identity_block(layer, [512, 512, 2048]) layer = K.layers.AveragePooling2D(pool_size=(7, 7), padding='same')(layer) layer = K.layers.Dense(units=1000, activation='softmax', kernel_initializer=initializer, )(layer) model = K.models.Model(inputs=X, outputs=layer) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _building_block_v1(inputs, filters, training, projection_shortcut, strides,\n data_format):\n shortcut = inputs\n\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n shortcut = batch_norm(inputs=shortcut, training=training,\n data_format=data_format)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=strides,\n data_format=data_format)\n inputs = batch_norm(inputs, training, data_format)\n inputs = tf.nn.relu(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=1,\n data_format=data_format)\n inputs = batch_norm(inputs, training, data_format)\n inputs += shortcut\n inputs = tf.nn.relu(inputs)\n\n return inputs", "def building_block_v2(self, inputs, block_params, training, projection_shortcut,\n half_layer=None, initial_layer=False, no_prenorm=False):\n filters = block_params['filters']\n kernels = block_params['kernels']\n strides = block_params['strides']\n pad_stride1 = block_params['pad_stride1']\n\n shortcut = inputs\n if (not initial_layer) and (not no_prenorm):\n inputs = self.batch_norm_act(inputs, training)\n if projection_shortcut == 'FirstResUnit':\n # For pointnet, projection shortcut is not needed at the First ResUnit.\n # However, BN and Activation is still required at the First ResUnit for\n # pre-activation.\n shortcut = inputs\n projection_shortcut = None\n if self.IsShowModel: self.log(\n 'shortcut after activation identity for pointnet first res unit')\n if half_layer:\n projection_shortcut = None\n\n # The projection shortcut should come after the first batch norm and ReLU\n # since it performs a 1x1 convolution.\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n with tf.variable_scope('c0'):\n inputs = self.conv1d2d3d(inputs, filters, kernels, strides, pad_stride1)\n self.log_tensor_c(inputs, kernels, strides, pad_stride1,\n tf.get_variable_scope().name)\n if half_layer: return inputs\n inputs = self.batch_norm_act(inputs, training)\n\n with tf.variable_scope('c1'):\n inputs = self.conv1d2d3d(inputs, filters, kernels, 1, 's')\n self.log_tensor_c(inputs, kernels, 1, 's',\n tf.get_variable_scope().name)\n\n if self.residual and (not initial_layer):\n assert inputs.shape == shortcut.shape\n if self.IsShowModel: self.log('Add shortcut*%0.1f'%(self.res_scale))\n return inputs * self.res_scale + shortcut\n else:\n return inputs", "def _building_block_v2(inputs, filters, training, \n projection_shortcut, strides,\n data_format):\n shortcut = inputs\n inputs = batch_norm(inputs, training, data_format)\n inputs = tf.nn.relu(inputs)\n ENDING_POINTS.append(inputs)\n\n # The projection shortcut should come after the first batch norm and ReLU\n # since it performs a 1x1 convolution.\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n inputs = conv3d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, \n time_kernel_size=3, strides=strides,\n data_format=data_format, time_stride=strides)\n\n inputs = batch_norm(inputs, training, data_format)\n inputs = tf.nn.relu(inputs)\n inputs = conv3d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, \n time_kernel_size=3, strides=1,\n data_format=data_format)\n\n return inputs + shortcut", "def building_block(inputs, filters, is_training, projection_shortcut, strides, data_format):\n\n\n shortcut = inputs\n inputs = batch_norm_relu(inputs, is_training, data_format)\n\n\n # The projection shortcut should come after the first batch norm and ReLU\n # since it performs a 1x1 convolution.\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=strides,\n data_format=data_format)\n\n inputs = batch_norm_relu(inputs, is_training, data_format)\n\n inputs = tf.layers.dropout(inputs = inputs, rate = _DROPOUT_RATE)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=1,\n data_format=data_format)\n\n return inputs + shortcut", "def resnet50():\n\n X = K.Input(shape=(224, 224, 3))\n init = K.initializers.he_normal(seed=None)\n\n conv1 = K.layers.Conv2D(\n filters=64,\n kernel_size=(\n 7,\n 7),\n padding='same',\n strides=2,\n kernel_initializer=init)(X)\n\n bn1 = K.layers.BatchNormalization(axis=3)(conv1)\n\n activation1 = K.layers.Activation('relu')(bn1)\n\n maxpool1 = K.layers.MaxPooling2D(\n pool_size=(\n 3, 3), strides=(\n 2, 2), padding='same',)(activation1)\n\n Projection1 = projection_block(maxpool1, [64, 64, 256], s=1)\n IdenBlock1 = identity_block(Projection1, [64, 64, 256])\n IdenBlock2 = identity_block(IdenBlock1, [64, 64, 256])\n\n Projection2 = projection_block(IdenBlock2, [128, 128, 512])\n IdenBlock3 = identity_block(Projection2, [128, 128, 512])\n IdenBlock4 = identity_block(IdenBlock3, [128, 128, 512])\n IdenBlock5 = identity_block(IdenBlock4, [128, 128, 512])\n\n Projection3 = projection_block(IdenBlock5, [256, 256, 1024])\n IdenBlock6 = identity_block(Projection3, [256, 256, 1024])\n IdenBlock7 = identity_block(IdenBlock6, [256, 256, 1024])\n IdenBlock8 = identity_block(IdenBlock7, [256, 256, 1024])\n IdenBlock9 = identity_block(IdenBlock8, [256, 256, 1024])\n IdenBlock10 = identity_block(IdenBlock9, [256, 256, 1024])\n\n Projection4 = projection_block(IdenBlock10, [512, 512, 2048])\n IdenBlock11 = identity_block(Projection4, [512, 512, 2048])\n IdenBlock12 = identity_block(IdenBlock11, [512, 512, 2048])\n\n avgpool = K.layers.AveragePooling2D(\n pool_size=(\n 1, 1), strides=(\n 7, 7), padding='same',)(IdenBlock12)\n\n SoftMax = K.layers.Dense(\n units=1000,\n kernel_initializer=init,\n activation='softmax',\n )(avgpool)\n\n Keras = K.Model(inputs=X, outputs=SoftMax)\n\n return Keras", "def projection_block(A_prev, filters, s=2):\n X = K.layers.Conv2D(filters=filters[0],\n kernel_size=1,\n padding='same',\n strides=(s, s),\n kernel_initializer='he_normal')(A_prev)\n\n X = K.layers.BatchNormalization(axis=3)(X)\n\n X = K.layers.Activation('relu')(X)\n\n X = K.layers.Conv2D(filters=filters[1],\n kernel_size=3,\n padding='same',\n # strides=(s, s),\n kernel_initializer='he_normal')(X)\n\n X = K.layers.BatchNormalization()(X)\n\n X = K.layers.Activation('relu')(X)\n\n X = K.layers.Conv2D(filters=filters[2],\n kernel_size=1,\n padding='same',\n kernel_initializer='he_normal')(X)\n\n X = K.layers.BatchNormalization()(X)\n\n shortcut = K.layers.Conv2D(filters=filters[2],\n kernel_size=1,\n padding='same',\n strides=(s, s),\n kernel_initializer='he_normal')(A_prev)\n\n shortcut = K.layers.BatchNormalization()(shortcut)\n\n adding = K.layers.Add()([X, shortcut])\n\n output = K.layers.Activation('relu')(adding)\n\n return output", "def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):\r\n super(ResnetBlock, self).__init__()\r\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)", "def build(width, height, depth, classes, stages, filters, include_top, pooling,\n reg=1e-3, bnEps=2e-5, bnMom=0.0):\n inputShape = (height, width, depth)\n chanDim = -1\n\n if K.image_data_format() == \"channels_first\":\n inputShape = (depth, height, width)\n chanDim = 1\n\n inputs = Input(shape=inputShape)\n\n\n # block 1 (initial conv block)\n x = ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(inputs)\n x = Conv2D(64, (7,7), use_bias=False, strides=(2,2),\n kernel_initializer=\"he_normal\", kernel_regularizer=l2(reg))(x)\n x = BatchNormalization(axis=chanDim, name=\"bn_conv1\")(x)\n x = Activation(\"relu\")(x)\n x = ZeroPadding2D(padding=((1,1), (1,1)), name=\"pool1_pad\")(x)\n x = MaxPooling2D(3, strides=2)(x)\n\n for i in range(0, len(stages)):\n stride = (1,1) if i == 0 else (2,2) # block 2 (projection block) w stride(1,1)\n\n print(\"Stage {}, Stride={}\".format(i, stride))\n x = SEResNet.residual_module(x, filters[i+1], stride,\n chanDim=chanDim, red=True, bnEps=bnEps, bnMom=bnMom)\n for j in range(0, stages[i] + 1): #stacking res block to each depth layer\n x = SEResNet.residual_module(x, filters[i+1], stride=(1,1),\n chanDim=chanDim, bnEps=bnEps,\n bnMom=bnMom)\n x = BatchNormalization(axis=chanDim, epsilon=bnEps,\n momentum=bnMom)(x)\n x = Activation(\"relu\")(x)\n\n if include_top:\n x = GlobalAveragePooling2D()(x)\n x = Dense(classes, use_bias=False, kernel_regularizer=l2(reg),\n activation='softmax')(x)\n else:\n if pooling == 'avg':\n print(\"Adding average pool\")\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n model = Model(inputs=inputs, outputs=x, name=\"SEResNet\")\n return model", "def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n super(ResnetBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)", "def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n super(ResnetBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)", "def build_resnet(self):\r\n\r\n # INPUTS\r\n inputs_data = Input((self.data_rows, self.data_cols, 1),name='inputs_data')\r\n\r\n\r\n def residual_block(input, output_channels=64, kernel_size=(3, 3), stride=(1, 1)):\r\n x = Conv2D(output_channels, kernel_size, padding='same', strides=stride)(input)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(output_channels, kernel_size, padding='same', strides=stride)(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Add()([x, input])\r\n\r\n residual_block.counter += 1\r\n return x\r\n\r\n residual_block.counter = 0\r\n\r\n conv1=Conv2D(64,(3,3),strides=(1,1),padding='same',activation='relu')(inputs_data)\r\n res_block1=residual_block(conv1,output_channels=64)\r\n res_block2 =residual_block(res_block1, output_channels=64)\r\n res_block3 =residual_block(res_block2, output_channels=64)\r\n conv2=Conv2D(1,(3,3),strides=(1,1),padding='same')(res_block3)\r\n outputs=Add()([conv2,inputs_data])\r\n\r\n\r\n model = Model(inputs=inputs_data, outputs=outputs)\r\n\r\n\r\n return model", "def build_model(self):\n\n input_placeholder = Input(shape = self.input_shape)\n x = ZeroPadding2D((3, 3))(input_placeholder)\n\n # Stage 1\n x = self.main_path_block(x, 64, (7, 7), 'valid', 'conv1', 'bn_conv1', 'relu', (2, 2))\n x = MaxPooling2D((3, 3), strides = (2, 2))(x)\n\n # Stage 2\n x = self.convolutional_block(x, 3, [64, 64, 256], 2, 'a', 1)\n x = self.identity_block(x, 3, [64, 64, 256], 2, 'b')\n x = self.identity_block(x, 3, [64, 64, 256], 2, 'c')\n\n # Stage 3\n x = self.convolutional_block(x, 3, [128, 128, 512], 3, 'a', 2)\n x = self.identity_block(x, 3, [128, 128, 512], 3, 'b')\n x = self.identity_block(x, 3, [128, 128, 512], 3, 'c')\n x = self.identity_block(x, 3, [128, 128, 512], 3, 'd')\n\n # Stage 4\n x = self.convolutional_block(x, 3, [256, 256, 1024], 4, 'a', 2)\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'b')\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'c')\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'd')\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'e')\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'f')\n\n # Stage 5\n x = self.convolutional_block(x, 3, [512, 512, 2048], 5, 'a', 2)\n x = self.identity_block(x, 3, [512, 512, 2048], 5, 'b')\n x = self.identity_block(x, 3, [512, 512, 2048], 5, 'c')\n \n # Average Pooling Layer\n x = AveragePooling2D((2, 2), name = 'avg_pool')(x)\n \n # Fully Connected Layer\n x = Flatten()(x)\n x = Dense(\n self.classes,\n activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet50')", "def __init__(self, indim, outdim, ksize=3, stride=1, activation=nn.ReLU):\n\n # Run initialization for super class\n super(ConvBlock, self).__init__()\n\n # Check ksize, stride requirements\n assert (ksize % 2) == 1\n assert stride == 1\n assert indim == outdim\n\n # Store proper activation function depending on configuration\n self.activ = activation\n\n # Compute padding according to `ksize`. Make sure\n # that this will not cause image width and height to change.\n padding = ksize // 2\n\n # We will follow the architecture in slide 76 of lecture 21, but with\n # our `_conv` function as our conv ``block''. We'll also use\n # nn.Sequential() and its `add_module' function. Note that the 64 and\n # 256 in that slide are just examples, and you should instead use indim\n # and outdim.\n #\n # Also note that we are creating these layers with support for\n # different `ksize`, `stride`, `padding`, unlike previous assignment.\n self.layers = nn.Sequential()\n self.layers.add_module(\"conv_1\", self._conv(indim, indim, 1, 1, 0))\n self.layers.add_module(\"conv_2\", self._conv(\n indim, indim, ksize, 1, padding))\n self.layers.add_module(\"conv_3\", self._conv(indim, outdim, 1, 1, 0))", "def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias, last=False):\n super(ResnetBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, last)", "def build(input_shape, num_outputs, repetitions, mid_f = [64, 128, 256, 512], output_f=[256, 512, 1024, 2048], block_fn='resnet'):\n if len(input_shape) != 3:\n raise Exception(\"Input shape should be a tuple (nb_channels, nb_rows, nb_cols)\")\n\n # Permute dimension order if necessary\n #if K.image_dim_ordering() == 'tf':\n # input_shape = (input_shape[1], input_shape[2], input_shape[0])\n\n # Load function from str if needed.\n if block_fn == 'xresnet':\n id_block = xresneXt_identity_block\n conv_block = xresneXt_convolution_block\n elif block_fn == 'dresnet':\n id_block = dresneXt_identity_block\n conv_block = dresneXt_convolution_block\n else:\n id_block = resnet_identity_block\n conv_block = resnet_convolution_block\n\n print('the input shape: {}'.format(input_shape))\n input = Input(shape=input_shape)\n # initial building block\n conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(2, 2))(input)\n #pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding=\"same\")(conv1)\n\n block = conv1\n #filters = 64\n print('input before reisdual', block.shape)\n for i, r in enumerate(repetitions):\n if i == 0:\n block = _residual_block(block, id_block=id_block, conv_block=conv_block, stage=i,\n mid_f=mid_f[i], output_f=output_f[i], repetitions=r, is_first_layer=True)\n else:\n block = _residual_block(block, id_block=id_block, conv_block=conv_block, stage=i,\n mid_f=mid_f[i], output_f=output_f[i], repetitions=r, is_first_layer=False)\n\n\n # Last activation\n block = _bn_relu(block)\n\n # Classifier block\n block_shape = K.int_shape(block)\n\n pool2 = AveragePooling2D(pool_size=(block_shape[1], block_shape[2]),\n strides=(1, 1))(block)\n flatten1 = Flatten()(pool2)\n dense = Dense(num_outputs)(flatten1)\n dense = Activation('softmax', name='Softmax')(dense)\n\n model = Model(inputs=input, outputs=dense)\n return model", "def layer_construction(self, in_channel, out_channel, stride, num_blocks):\n layer = [ResBlock(in_channel,out_channel,stride)]\n for i in range(0, num_blocks-1):\n layer.append(ResBlock(out_channel * 4, out_channel))\n\n return nn.Sequential(*layer)", "def __init__(self, image_shape, z_dim, num_blocks, dropout=False,\n subsampling=True, embedding=128):\n super().__init__()\n\n self.image_shape = image_shape\n self.z_dim = z_dim\n self.num_blocks = num_blocks\n\n self.layers = nn.ModuleList()\n\n channels = self.image_shape[2]\n shape_x = self.image_shape[0]\n shape_y = self.image_shape[1]\n\n if subsampling:\n assert shape_x % (2 ** num_blocks) == 0, \\\n 'Image is not evenly divisible by max pooling layer'\n assert shape_y % (2 ** num_blocks) == 0, \\\n 'Image is not evenly divisible by max pooling layer'\n\n for i in range(num_blocks):\n self.layers.append(\n nn.Conv2d(channels, channels * 4, 3, padding=1))\n self.layers.append(nn.ReLU())\n self.layers.append(nn.MaxPool2d(2, 2))\n\n channels = channels * 4\n shape_x = int(shape_x / 2)\n shape_y = int(shape_y / 2)\n\n self.linear_input = channels * shape_x * shape_y\n self.linear = nn.Linear(channels * shape_x * shape_y, z_dim)\n\n else:\n block_shape = [8, 4, 3]\n block_strides = [4, 2, 1]\n filters = [16, 32, 64]\n for i in range(num_blocks):\n self.layers.append(\n nn.Conv2d(channels, filters[i], block_shape[i],\n stride=block_strides[i]))\n self.layers.append(nn.ReLU())\n\n channels = filters[i]\n # calculation taken from https://pytorch.org/docs/stable\n # nn.html#torch.nn.Conv2d\n shape_x = int(((shape_x - (block_shape[i] - 1) - 1) /\n block_strides[i]) + 1)\n shape_y = int(((shape_y - (block_shape[i] - 1) - 1) /\n block_strides[i]) + 1)\n\n self.linear_input = int(channels * shape_x * shape_y)\n self.linear = nn.Linear(self.linear_input, embedding)", "def _make_res_layer(self,\n block,\n planes,\n blocks,\n stride=1,\n norm_kwargs=None,\n layer_name=''):\n downsample = None\n\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.SequentialCell([\n nn.Conv3d(in_channels=self.inplanes,\n out_channels=planes * block.expansion,\n kernel_size=1,\n stride=(stride, stride, stride),\n has_bias=False),\n nn.BatchNorm3d(num_features=planes * block.expansion,\n **({} if norm_kwargs is None else norm_kwargs))])\n\n layers = []\n layers.append(block(inplanes=self.inplanes,\n planes=planes,\n stride=stride,\n downsample=downsample))\n\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(inplanes=self.inplanes, planes=planes))\n\n return nn.SequentialCell(layers)", "def __init__(self, block, layers, num_classes=1000, width=64):\n\n torch.nn.Module.__init__(self) # Skip the parent constructor. This replaces it.\n self._norm_layer = torch.nn.BatchNorm2d\n self.inplanes = width\n self.dilation = 1\n self.groups = 1\n self.base_width = 64\n\n # The initial convolutional layer.\n self.conv1 = torch.nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = self._norm_layer(self.inplanes)\n self.relu = torch.nn.ReLU(inplace=True)\n self.maxpool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n # The subsequent blocks.\n self.layer1 = self._make_layer(block, width, layers[0])\n self.layer2 = self._make_layer(block, width*2, layers[1], stride=2, dilate=False)\n self.layer3 = self._make_layer(block, width*4, layers[2], stride=2, dilate=False)\n self.layer4 = self._make_layer(block, width*8, layers[3], stride=2, dilate=False)\n\n # The last layers.\n self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))\n self.fc = torch.nn.Linear(width*8*block.expansion, num_classes)\n\n # Default init.\n for m in self.modules():\n if isinstance(m, torch.nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, torch.nn.BatchNorm2d):\n torch.nn.init.constant_(m.weight, 1)\n torch.nn.init.constant_(m.bias, 0)", "def resnet101_base(freeze_blocks=[1,2,3], weight_regularizer=None, bias_regularizer=None):\n img_input = Input(shape=(None, None, 3))\n bn_axis = 3\n train1 = 1 not in freeze_blocks\n x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1', trainable=train1, use_bias=False,\n kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(img_input)\n x = BatchNormalization(axis=bn_axis, name='bn_conv1', trainable=False)(x, training=False)\n x = Scale(axis=bn_axis, name='scale_conv1', trainable=False)(x)\n x = Activation('relu')(x)\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n train2 = 2 not in freeze_blocks\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n\n train3 = 3 not in freeze_blocks\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n for i in range(1, 4):\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b' + str(i), trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n\n train4 = 4 not in freeze_blocks\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n for i in range(1, 23):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b' + str(i), trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n\n base_model = Model(img_input, x, name='resnet101')\n\n return base_model", "def __init__(self, ):\n super().__init__()\n channels = 3\n\n # Initial convolution block\n out_features = 64\n # encoder\n self.input = nn.Sequential(\n nn.ReflectionPad2d(channels),\n nn.Conv2d(3, out_features, (7, 7)),\n nn.InstanceNorm2d(out_features),\n nn.MaxPool2d(2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(out_features, out_features * 2, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n nn.MaxPool2d(2),\n nn.Conv2d(out_features * 2, out_features * 4, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.in0 = nn.InstanceNorm2d(256)\n self.block0 = blocks()\n self.block1 = blocks()\n self.block2 = blocks()\n self.block3 = blocks()\n self.block4 = blocks()\n self.block5 = blocks()\n self.block6 = blocks()\n self.block7 = blocks()\n\n self.out = nn.Sequential(\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 4, out_features * 2, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 2, out_features, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.ReflectionPad2d(channels),\n nn.Conv2d(out_features, channels, 7),\n nn.Tanh(),\n )", "def __init__(self,\r\n in_channels_r, out_channels_r, in_channels_u, out_channels,\r\n kernel_size_in, kernel_size_out,\r\n up_stride_in, stride_out,\r\n up_padding_in, padding_out, output_padding=0,\r\n activation_in='lrelu', activation_out='lrelu',\r\n norm_in='bn', norm_out='none'):\r\n super(RecovecyBlock, self).__init__()\r\n\r\n self.in_upconv = upconv_block(\r\n in_channels=in_channels_r,\r\n out_channels=out_channels_r,\r\n kernel_size=kernel_size_in,\r\n stride=up_stride_in,\r\n padding=up_padding_in,\r\n output_padding=output_padding,\r\n norm=norm_in,\r\n activation=activation_in\r\n )\r\n\r\n self.out_conv = conv_block(\r\n in_channels=out_channels_r + in_channels_u,\r\n out_channels=out_channels,\r\n kernel_size=kernel_size_out,\r\n stride=stride_out,\r\n padding=padding_out,\r\n norm=norm_out,\r\n activation=activation_out\r\n )", "def residual_block(inputs, filters, is_training, strides,\n use_projection=False, data_format='channels_first'):\n shortcut = inputs\n if use_projection:\n # Projection shortcut in first layer to match filters and strides\n shortcut = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters,\n kernel_size=1,\n strides=strides,\n data_format=data_format)\n shortcut = batch_norm_relu(shortcut, is_training, relu=False,\n data_format=data_format)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters,\n kernel_size=3,\n strides=strides,\n data_format=data_format)\n inputs = batch_norm_relu(inputs, is_training, data_format=data_format)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters,\n kernel_size=3,\n strides=1,\n data_format=data_format)\n inputs = batch_norm_relu(inputs, is_training, relu=False, init_zero=True,\n data_format=data_format)\n\n return tf.nn.relu(inputs + shortcut)", "def __init__(self, kernel_size, filters, stage, block):\n super().__init__(name='identity' + str(stage) + block)\n filters1, filters2, filters3 = filters\n if K.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n self.conv1 = layers.Conv2D(\n filters1, (1, 1),\n kernel_initializer='he_normal',\n name=conv_name_base + '2a')\n self.bn1 = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')\n self.act1 = layers.Activation('relu')\n\n self.conv2 = layers.Conv2D(\n filters2,\n kernel_size,\n padding='same',\n kernel_initializer='he_normal',\n name=conv_name_base + '2b')\n self.bn2 = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')\n self.act2 = layers.Activation('relu')\n\n self.conv3 = layers.Conv2D(\n filters3, (1, 1),\n kernel_initializer='he_normal',\n name=conv_name_base + '2c')\n self.bn3 = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')\n\n self.add = layers.Add()\n self.act = layers.Activation('relu')", "def resnet50_base(freeze_blocks=[1,2,3], weight_regularizer=None, bias_regularizer=None):\n img_input = Input(shape=(None, None, 3))\n bn_axis = 3\n train1 = 1 not in freeze_blocks\n x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1', trainable=train1,\n kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(img_input)\n x = BatchNormalization(axis=bn_axis, name='bn_conv1', trainable=False)(x, training=False)\n x = Activation('relu')(x)\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n train2 = 2 not in freeze_blocks\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n train3 = 3 not in freeze_blocks\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n train4 = 4 not in freeze_blocks\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n base_model = Model(img_input, x, name='resnet50')\n\n return base_model", "def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')", "def conv_block(input, filters, phase=phase):\r\n\r\n conv_block = tf.layers.conv2d(\r\n inputs=input,\r\n filters=filters,\r\n kernel_size=3,\r\n padding=\"SAME\",\r\n activation=tf.nn.relu)\r\n\r\n #conv_block = tf.contrib.layers.batch_norm(\r\n # conv_block, \r\n # center=True, scale=True, \r\n # is_training=phase)\r\n\r\n #conv_block = tf.nn.leaky_relu(\r\n # features=conv_block,\r\n # alpha=0.2)\r\n #conv_block = tf.nn.relu(conv_block)\r\n\r\n return conv_block", "def _build_model(self, x: tf.Tensor) -> tf.Tensor:\n x = ResNet._first_conv(x)\n x = ResNet._v2_block(x, 'block1', base_depth=64, num_units=3, stride=2)\n x = ResNet._v2_block(x, 'block2', base_depth=128, num_units=4, stride=2)\n x = ResNet._v2_block(x, 'block3', base_depth=256, num_units=6, stride=2)\n x = ResNet._v2_block(x, 'block4', base_depth=512, num_units=3, stride=1)\n x = ResNet.batch_norm(x)\n return self.global_avg_pooling(x)", "def __init__(self,\r\n in_channels_1, in_channels_2, out_channels,\r\n kernel_size_1, kernel_size_2,\r\n stride_1, up_stride_2,\r\n padding_1, up_padding_2, output_padding=0,\r\n activation_in='relu', activation_out='lrelu',\r\n norm_in='bn', norm_out='none'):\r\n\r\n super(RefinementBlock, self).__init__()\r\n\r\n self.conv_1 = conv_block(\r\n in_channels=in_channels_1,\r\n out_channels=out_channels,\r\n kernel_size=kernel_size_1,\r\n stride=stride_1,\r\n padding=padding_1,\r\n norm='none',\r\n activation=activation_in\r\n )\r\n\r\n self.upconv_2 = upconv_block(\r\n in_channels=in_channels_2,\r\n out_channels=out_channels,\r\n kernel_size=kernel_size_2,\r\n stride=up_stride_2,\r\n padding=up_padding_2,\r\n output_padding=output_padding,\r\n norm=norm_in,\r\n activation='none'\r\n )\r\n\r\n self.conv_3 = conv_block(\r\n in_channels=out_channels,\r\n out_channels=out_channels,\r\n kernel_size=3,\r\n stride=1,\r\n padding=1,\r\n norm='none',\r\n activation='none'\r\n )\r\n\r\n self.conv_4 = conv_block(\r\n in_channels=out_channels,\r\n out_channels=out_channels,\r\n kernel_size=3,\r\n stride=1,\r\n padding=1,\r\n norm='none',\r\n activation='none'\r\n )\r\n\r\n self.out_act = _activation(act_type=activation_out)\r\n self.out_norm = _norm(norm_type=norm_out, channels=out_channels)", "def conv_block(\n input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):\n filters0, filters1, filters2 = filters\n conv_name_base = 'res' + str(stage) + block\n bn_name_base = 'bn' + str(stage) + block\n add_name = 'add' + str(stage) + \"_\" + block\n relu_name = 'relu' + str(stage) + \"_\" + block\n\n # sg.Tensors\n input_tensor_chans = input_tensor.dims(\n 3) if input_tensor.shape.layout == sg.NHWC else input_tensor.dims(1)\n conv0_tensor = sg.Tensor(\n data_layout=sg.NHWC, tensor_data=generate_random_data(\n (filters0, 1, 1, input_tensor_chans)))\n bn0_mean_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters0)))\n bn0_var_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters0)))\n bn0_gamma_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters0)))\n bn0_beta_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters0)))\n conv1_tensor = sg.Tensor(\n data_layout=sg.NHWC, tensor_data=generate_random_data(\n (filters1, kernel_size, kernel_size, filters0)))\n bn1_mean_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters1)))\n bn1_var_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters1)))\n bn1_gamma_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters1)))\n bn1_beta_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters1)))\n conv2_tensor = sg.Tensor(\n data_layout=sg.NHWC, tensor_data=generate_random_data(\n (filters2, 1, 1, filters1)))\n bn2_mean_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn2_var_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn2_gamma_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn2_beta_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n conv3_tensor = sg.Tensor(\n data_layout=sg.NHWC, tensor_data=generate_random_data(\n (filters2, 1, 1, input_tensor_chans)))\n bn3_mean_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn3_var_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn3_gamma_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn3_beta_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n\n x = sg.nn.convolution(\n input_tensor, conv0_tensor, stride=[1, 1], padding=\"same\",\n name=conv_name_base + '_2a')\n x = sg.nn.batch_norm(\n x, bn0_mean_tensor, bn0_var_tensor, bn0_gamma_tensor, bn0_beta_tensor,\n activation=\"relu\")\n x = sg.nn.convolution(\n x, conv1_tensor, stride=strides, padding=\"same\",\n name=conv_name_base + '_2b')\n x = sg.nn.batch_norm(\n x, bn1_mean_tensor, bn1_var_tensor, bn1_gamma_tensor, bn1_beta_tensor,\n activation=\"relu\", name=bn_name_base + '_2b')\n x = sg.nn.convolution(\n x, conv2_tensor, stride=[1, 1], padding=\"same\",\n name=conv_name_base + '_2c')\n x = sg.nn.batch_norm(\n x, bn2_mean_tensor, bn2_var_tensor, bn2_gamma_tensor, bn2_beta_tensor,\n name=bn_name_base + '_2c')\n shortcut = sg.nn.convolution(\n input_tensor, conv3_tensor, stride=strides, padding=\"same\",\n name=conv_name_base + '_1')\n shortcut = sg.nn.batch_norm(\n shortcut, bn3_mean_tensor, bn3_var_tensor, bn3_gamma_tensor,\n bn3_beta_tensor, name=bn_name_base + '_1')\n x = sg.math.add(x, shortcut, name=add_name)\n x = sg.nn.relu(x, name=relu_name)\n return x", "def ResBlock(input_tensor, filters):\n \n conv_1 = Conv2D(filters = filters, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal') \n conv_1a = conv_1(input_tensor) # Shared weights conv layer\n batch_1 = BatchNormalization()(conv_1a)\n relu_1 = Activation(\"relu\")(batch_1)\n drop_1 = Dropout(drop)(relu_1)\n conv_1b = conv_1(drop_1) # Shared weights conv layer\n batch_1 = BatchNormalization()(conv_1b)\n return batch_1", "def __init__(self, n_filters = 64,\n n_kernels = 3,\n n_outputs = 10,\n inp_shape = (28,28),\n residual=True,\n regularizer = None,\n intializer = None,\n use_pool= False,\n use_dropout = False,\n use_batchnorm = False\n ):\n super(CNNModel, self).__init__()\n self.conv_dim = len(inp_shape)-1\n self.n_filters = n_filters\n self.initializer = intializer\n self.n_kernels = n_kernels\n self.projection = 3\n self.n_outputs = n_outputs\n self.num_layers = 1\n self.inp_shape = inp_shape\n self.regularizer = regularizer\n self.use_pool = use_pool\n self.residual = residual\n self.use_dropout = use_dropout\n self.use_batchnorm = use_batchnorm\n\n kernel_initializer = initializers.RandomNormal(mean=0.0, stddev=0.05)\n\n if self.conv_dim == 1:\n self.input_layer = layers.Conv1D(self.n_filters, (self.projection),\n activation = \"linear\",\n input_shape = self.inp_shape,\n name ='cnn_input',\n padding = 'same',\n kernel_regularizer = self.regularizer,\n bias_regularizer = self.regularizer,\n kernel_initializer=kernel_initializer,\n bias_initializer=initializers.get(\"zeros\")\n )\n self.output_layer = layers.Conv1D(self.n_kernels, (self.projection),\n activation=\"linear\",\n input_shape=(None, self.inp_shape[0], self.n_filters),\n name='cnn_output',\n padding = 'same',\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer,\n kernel_initializer=kernel_initializer,\n bias_initializer=initializers.get(\"zeros\")\n )\n if self.use_pool:\n self.pool = layers.MaxPool1D()\n elif self.conv_dim == 2:\n self.input_layer = layers.Conv2D(self.n_filters, (self.projection,self.projection),\n activation=\"linear\",\n input_shape=self.inp_shape,\n name='cnn_input',\n padding = 'same',\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer,\n kernel_initializer=kernel_initializer,\n bias_initializer=initializers.get(\"zeros\")\n )\n self.output_layer = layers.Conv2D(self.n_kernels, (self.projection, self.projection),\n activation= \"linear\",\n input_shape=(None, self.inp_shape[0],self.inp_shape[1], self.n_filters),\n name=\"cnn_output\",\n padding = 'same',\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer,\n kernel_initializer=kernel_initializer,\n bias_initializer=initializers.get(\"zeros\")\n )\n if self.use_pool:\n self.pool = layers.MaxPool2D()\n self.list_cnn = [self.input_layer]\n self.flatten = layers.Flatten()\n\n #compute input shape after flatten for the dense layer\n if not self.use_pool:\n self.class_inp = np.prod(self.inp_shape[:-1])*self.n_kernels\n else:\n self.class_inp = np.prod(self.inp_shape[:-1])*self.n_kernels//(2**self.conv_dim)\n # self.classify = MyDenseLayer(\n # self.n_outputs,shape = (None,self.class_inp),\n # layer_name = 'classify',\n # initializer = \"RandomNormal\")\n self.classify = layers.Dense(units = self.n_outputs,\n activation = 'softmax', use_bias = True,\n input_shape = self.class_inp,\n kernel_initializer = kernel_initializer, bias_initializer=initializers.get(\"zeros\"),\n name = 'classification_layer')", "def _load_projection(self):\n input_dim = self.filter_dims\n self.projection = nn.Linear(input_dim, self.char_cnn_output_dim, bias=True)\n weight = self.npz_weights['W_proj']\n bias = self.npz_weights['b_proj']\n self.projection.weight.data.copy_(torch.div(torch.FloatTensor(np.transpose(weight)), 10.0))\n self.projection.bias.data.copy_(torch.div(torch.FloatTensor(np.transpose(bias)), 10.0))\n self.projection.weight.requires_grad = self._finetune_pretrained_weights\n self.projection.bias.requires_grad = self._finetune_pretrained_weights", "def _conv_block(inputs: \"Layer\",\n filters: int,\n kernel: int or Tuple[int, int],\n strides: int or Tuple[int, int]) -> \"Layer\":\n\n layer = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)\n layer = BatchNormalization()(layer)\n layer = relu6(layer)\n return layer", "def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):\n filters1, filters2, filters3 = filters\n if K.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = ConvGRU2D(filters1, (1, 1), strides=strides, padding='same', return_sequences=True, name=conv_name_base + '2a')(input_tensor)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)\n #x = Activation('relu')(x)\n\n x = ConvGRU2D(filters2, kernel_size, padding='same', return_sequences=True, name=conv_name_base + '2b')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)\n #x = Activation('relu')(x)\n\n x = ConvGRU2D(filters3, (1, 1), padding='same', return_sequences=True, name=conv_name_base + '2c')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\n\n shortcut = ConvGRU2D(filters3, (1, 1), strides=strides, padding='same', return_sequences=True, name=conv_name_base + '1')(input_tensor)\n shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)\n\n x = layers.add([x, shortcut])\n #x = Activation('relu')(x)\n return x", "def residual_block(layer_input, filters):\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)\n d = Activation('relu')(d)\n d = BatchNormalization(momentum=0.8)(d)\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)\n d = BatchNormalization(momentum=0.8)(d)\n d = Add()([d, layer_input])\n return d", "def build_bisenet(inputs, num_classes):\n\n ### The spatial path\n ### The number of feature maps for each convolution is not specified in the paper\n ### It was chosen here to be equal to the number of feature maps of a classification\n ### model at each corresponding stage\n # spatial_net = fluid.layers.resize_bilinear(inputs, [Image_Height/8, Image_Width/8])\n # print('spatial_net_1',spatial_net)\n\n ## spatial path\n spatial_net = ConvBlock(inputs, num_filters=64, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=128, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=256, kernel_size=3, stride=2)\n # print(\"spatial_net:\", spatial_net)\n\n # spatial_net = fluid.layers.resize_bilinear(spatial_net, [Image_Height/8, Image_Width/8])\n # print('spatial_net_2',spatial_net)\n ### Context path\n model = ResNet(is_test=False)\n # spatial_net = model.bottleneck_block1(inputs)\n end_points_16, end_points_32 = model.net(inputs)\n net_4 = AttentionRefinementModule(end_points_16, num_filters=512)\n net_5 = AttentionRefinementModule(end_points_32, num_filters=1024)\n global_channels = fluid.layers.reduce_mean(net_5, [2, 3], keep_dim=True)\n net_5_scaled = fluid.layers.elementwise_mul(net_5, global_channels, axis=0)\n\n ### Combining the paths\n net_4 = Upsample(net_4, scale=2)\n net_5_scaled = Upsample(net_5_scaled, scale=4)\n # print('net_4, net_5:', [net_4, net_5_scaled])\n # layers_concat = list()\n # layers_concat.append(spatial_net)\n ## layers_concat.append(net_4)\n # layers_concat.append(net_5_scaled)\n context_net = fluid.layers.concat([spatial_net, net_4, net_5_scaled], axis=1) #\n # context_net = fluid.layers.concat(input=layers_concat,axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([net_4, net_5_scaled], axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([spatial_net,context_net], axis=1)\n # print('context_net2',context_net)\n\n ### FFM\n # net = FeatureFusionModule(input_1=spatial_net, input_2=context_net, num_filters=num_classes)\n net = FeatureFusionModule(inputs=context_net, num_filters=num_classes)\n\n # print('net', net)\n\n ## [batch_zize, num_filters, 128, 64]\n\n ### Final upscaling and finish\n # net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 128])\n # print('conv2d_transpose', net)\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn1')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')\n #net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])\n #net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')\n # print('net',net)\n net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')\n\n net = fluid.layers.conv2d(net, num_classes, 1)\n return net", "def __init__(self):\n super(Encoder3, self).__init__()\n self.lblocks = nn.ModuleList(\n [\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n ]\n )\n\n self.blocks = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n ]\n )", "def _make_layer(self, block, planes, blocks, stride=1):\n\n if blocks == 0:\n return nn.Sequential(nn.Identity())\n norm_layer = self._norm_layer\n upsample = None\n if stride != 1:\n upsample = nn.Sequential(\n nn.UpsamplingNearest2d(scale_factor=2),\n SpectralNorm(conv1x1(self.inplanes, planes * block.expansion)),\n norm_layer(planes * block.expansion),\n )\n elif self.inplanes != planes * block.expansion:\n upsample = nn.Sequential(\n SpectralNorm(conv1x1(self.inplanes, planes * block.expansion)),\n norm_layer(planes * block.expansion),\n )\n\n layers = [block(self.inplanes, planes, stride, upsample, norm_layer, self.large_kernel)]\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, norm_layer=norm_layer, large_kernel=self.large_kernel))\n\n return nn.Sequential(*layers)", "def _block(self, filters, inp):\r\n layer_1 = BatchNormalization()(inp)\r\n act_1 = Activation('relu')(layer_1)\r\n conv_1 = Conv2D(filters, (3, 3),\r\n padding='same',\r\n kernel_initializer=self.initializer)(act_1)\r\n layer_2 = BatchNormalization()(conv_1)\r\n act_2 = Activation('relu')(layer_2)\r\n conv_2 = Conv2D(filters, (3, 3),\r\n padding='same',\r\n kernel_initializer=self.initializer)(act_2)\r\n return (conv_2)", "def res_net(*inputs, **kwargs):\n inp_shapes = kwargs['inp_shapes']\n out_shapes = kwargs['out_shapes']\n params = kwargs['params']\n layer_width = kwargs['layer_width']\n nblocks = kwargs['nblocks']\n block_size = kwargs['block_size']\n output_args = kwargs['output_args']\n ninputs = len(inp_shapes)\n noutputs = len(out_shapes)\n\n input_width = np.sum([in_shape[1] for in_shape in inp_shapes])\n flat_output_shapes = [np.prod(out_shape[1:]) for out_shape in out_shapes]\n output_width = np.sum(flat_output_shapes)\n print(\"Building resnet with: %s residual blocks of size %s inner width: %s from: %s inputs to %s outputs\" %\n (nblocks, block_size, layer_width, input_width, output_width))\n input_layers = [InputLayer(inp_shapes[i], input_var = inputs[i]) for i in range(len(inputs))]\n\n ## Flatten the input\n reshaped = [ReshapeLayer(inp, ([0], -1)) for inp in input_layers]\n\n net = {}\n net['concat'] = prev_layer = ConcatLayer(reshaped)\n # Projet inner layer down/up to hidden layer width only if necessary\n if layer_width != input_width:\n print(\"Input projection, layer_width: %s input_width: %s\" % (layer_width, input_width))\n wx_sfx = 'wxinpproj'\n wx = batch_norm_params(DenseLayer(prev_layer, layer_width, nonlinearity = rectify,\n W=params['W_%s' % wx_sfx, HeNormal(gain='relu')],\n b=params['b_%s' % wx_sfx, Constant(0)]), wx_sfx, params)\n else:\n print(\"Skipping input weight projection, layer_width: %s input_width: %s\" % (layer_width, input_width))\n wx = prev_layer\n\n ## Residual Blocks\n for j in range(nblocks):\n for i in range(block_size):\n sfx = \"%s_%s\" % (j,i)\n net['res2d%s_%s' % (j,i)] = prev_layer = batch_norm_params(\n DenseLayer(prev_layer, layer_width, nonlinearity = rectify,\n W=params['W_%s' % sfx, HeNormal(gain='relu')],\n b=params['b_%s' % sfx, Constant(0)]), sfx, params)\n net['block%s' % j] = prev_layer = wx = lasagne.layers.ElemwiseSumLayer([prev_layer, wx])\n\n ## Project output to correct width\n if layer_width != output_width:\n print(\"Output projection, layer_width: %s output_width: %s\" % (layer_width, output_width))\n wx_sfx = 'wxoutproj'\n net['output'] = wx = batch_norm_params(DenseLayer(prev_layer, output_width, nonlinearity = rectify,\n W=params['W_%s' % wx_sfx, HeNormal(gain='relu')],\n b=params['b_%s' % wx_sfx, Constant(0)]), wx_sfx, params)\n else:\n print(\"Skipping output projection, layer_width: %s output_width: %s\" % (layer_width, output_width))\n net['output'] = prev_layer\n\n # Split up the final layer into necessary parts and reshape\n output_product = lasagne.layers.get_output(net['output'], **output_args)\n outputs = []\n lb = 0\n for i in range(noutputs):\n ub = lb + flat_output_shapes[i]\n out = output_product[:, lb:ub]\n rout = out.reshape((out.shape[0],) + (out_shapes[i][1:]))\n outputs.append(rout)\n lb = ub\n\n params.add_tagged_params(get_layer_params(lasagne.layers.get_all_layers(net['output'])))\n params.check(lasagne.layers.get_all_params(prev_layer))\n return outputs, params", "def Unet4(shape, nb_filters=32, exp=1, kernel_size=3, initialization=\"glorot_uniform\", activation=\"relu\", sigma_noise=0, output_channels=1, drop=0.0, regularization=None):\n \n \n input_layer = Input(shape=shape)\n\n conv1 = ConvBlock(input_layer, nb_filters=nb_filters, kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n if drop > 0.0: pool1 = Dropout(drop)(pool1)\n\n conv2 = ConvBlock(pool1, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n if drop > 0.0: pool2 = Dropout(drop)(pool2)\n\n conv3 = ConvBlock(pool2, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n if drop > 0.0: pool3 = Dropout(drop)(pool3)\n\n conv4 = ConvBlock(pool3, nb_filters=nb_filters * 2 **(3 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n if drop > 0.0: pool4 = Dropout(drop)(pool4)\n\n deconv5 = DeconvBlock(conv4, residual=conv3, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv5 = Dropout(drop)(deconv5)\n\n deconv6 = DeconvBlock(deconv5, residual=conv2, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv6 = Dropout(drop)(deconv6)\n\n deconv7 = DeconvBlock(deconv6, residual=conv1, nb_filters=nb_filters, kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv7 = Dropout(drop)(deconv7)\n\n if sigma_noise > 0:\n deconv7 = GaussianNoise(sigma_noise)(deconv7)\n\n output_layer = Conv2D(filters=output_channels, kernel_size=(1, 1))(deconv7)\n output_layer = BatchNormalization()(output_layer)\n output_layer = Activation('softmax')(output_layer)\n\n model = Model(inputs=input_layer, outputs=output_layer, name='Unet')\n return model", "def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\n\n return nn.Sequential(*conv_block)", "def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\n\n return nn.Sequential(*conv_block)", "def build_unet(input_layer = Input((128,128,3)), start_depth=64, activation='relu', initializer='he_normal'):\n\n # 128 -> 64\n conv1 = Conv2D_BN(input_layer, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n conv1 = Conv2D_BN(conv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n pool1 = MaxPooling2D((2, 2))(conv1)\n\n # 64 -> 32\n conv2 = Conv2D_BN(pool1, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n conv2 = Conv2D_BN(conv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n pool2 = MaxPooling2D((2, 2))(conv2)\n\n # 32 -> 16\n conv3 = Conv2D_BN(pool2, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n conv3 = Conv2D_BN(conv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n pool3 = MaxPooling2D((2, 2))(conv3)\n\n # 16 -> 8\n conv4 = Conv2D_BN(pool3, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n conv4 = Conv2D_BN(conv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n pool4 = MaxPooling2D((2, 2))(conv4)\n\n # Middle\n convm=cbam_block(pool4)\n\n # 8 -> 16\n deconv4 = Conv2DTranspose(convm, start_depth * 8, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv4 = concatenate([deconv4, conv4])\n uconv4 = Conv2D_BN(uconv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv4 = Conv2D_BN(uconv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 16 -> 32\n deconv3 = Conv2DTranspose(uconv4, start_depth * 4, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv3 = concatenate([deconv3, conv3])\n uconv3 = Conv2D_BN(uconv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv3 = Conv2D_BN(uconv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 32 -> 64\n deconv2 = Conv2DTranspose(uconv3, start_depth * 2, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv2 = concatenate([deconv2, conv2])\n uconv2 = Conv2D_BN(uconv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv2 = Conv2D_BN(uconv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 64 -> 128\n deconv1 = Conv2DTranspose(uconv2, start_depth * 1, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv1 = concatenate([deconv1, conv1])\n uconv1 = Conv2D_BN(uconv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv1 = Conv2D_BN(uconv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n\n output_layer = Conv2D(1, (1,1), padding=\"same\", activation=\"sigmoid\")(uconv1)\n\n return output_layer", "def __init__(self,\n num_class=2,\n layer_nums=(3, 5, 5),\n layer_strides=(2, 2, 2),\n num_filters=(128, 128, 256),\n upsample_strides=(1, 2, 4),\n num_upsample_filters=(256, 256, 256),\n num_input_features=128,\n num_anchor_per_loc=2,\n use_groupnorm=False,\n num_groups=32,\n box_code_size=7,\n num_direction_bins=2):\n super(RPN, self).__init__()\n self._num_anchor_per_loc = num_anchor_per_loc\n self._box_code_size=box_code_size\n self._num_class=num_class\n self._num_direction_bins=num_direction_bins\n assert len(layer_nums) == 3\n assert len(layer_strides) == len(layer_nums)\n assert len(num_filters) == len(layer_nums)\n assert len(upsample_strides) == len(layer_nums)\n assert len(num_upsample_filters) == len(layer_nums)\n upsample_strides=[int(i) for i in upsample_strides]\n\n factors = []\n for i in range(len(layer_nums)):\n assert int(np.prod(\n layer_strides[:i + 1])) % upsample_strides[i] == 0\n factors.append(\n np.prod(layer_strides[:i + 1]) // upsample_strides[i])\n assert all([x == factors[0] for x in factors])\n\n # note that when stride > 1, conv2d with same padding isn't\n # equal to pad-conv2d. we should use pad-conv2d.\n block2_input_filters = num_filters[0]\n if use_groupnorm:\n BatchNorm2d = change_default_args(\n num_groups=num_groups, eps=1e-3)(GroupNorm)\n else:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n\n self.block1 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(num_input_features, num_filters[0], 3,\n stride=layer_strides[0],bias=False),\n BatchNorm2d(num_filters[0]),\n nn.ReLU(),)\n for i in range(layer_nums[0]):\n self.block1.add(\n nn.Conv2d(num_filters[0], num_filters[0], 3,padding=1,bias=False))\n self.block1.add(BatchNorm2d(num_filters[0]))\n self.block1.add(nn.ReLU())\n self.deconv1 = Sequential(\n nn.ConvTranspose2d(num_filters[0],num_upsample_filters[0],\n upsample_strides[0],stride=upsample_strides[0],bias=False),\n BatchNorm2d(num_upsample_filters[0]),\n nn.ReLU(),)\n self.block2 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(block2_input_filters,num_filters[1],3,\n stride=layer_strides[1],bias=False),\n BatchNorm2d(num_filters[1]),\n nn.ReLU(),)\n for i in range(layer_nums[1]):\n self.block2.add(\n nn.Conv2d(num_filters[1], num_filters[1], 3, padding=1,bias=False))\n self.block2.add(BatchNorm2d(num_filters[1]))\n self.block2.add(nn.ReLU())\n self.deconv2 = Sequential(\n nn.ConvTranspose2d(num_filters[1],num_upsample_filters[1],\n upsample_strides[1],stride=upsample_strides[1],bias=False),\n BatchNorm2d(num_upsample_filters[1]),\n nn.ReLU(),)\n self.block3 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(num_filters[1], num_filters[2], 3, stride=layer_strides[2],bias=False),\n BatchNorm2d(num_filters[2]),\n nn.ReLU(),)\n for i in range(layer_nums[2]):\n self.block3.add(nn.Conv2d(num_filters[2], num_filters[2], 3, padding=1,bias=False))\n self.block3.add(BatchNorm2d(num_filters[2]))\n self.block3.add(nn.ReLU())\n self.deconv3 = Sequential(\n nn.ConvTranspose2d(\n num_filters[2],num_upsample_filters[2],\n upsample_strides[2],stride=upsample_strides[2],bias=False),\n BatchNorm2d(num_upsample_filters[2]),\n nn.ReLU(),)\n\n num_cls = num_anchor_per_loc * num_class\n self.conv_cls = nn.Conv2d(sum(num_upsample_filters), num_cls, 1)\n self.conv_box = nn.Conv2d(sum(num_upsample_filters), num_anchor_per_loc * box_code_size, 1)\n self.conv_dir_cls = nn.Conv2d(sum(num_upsample_filters),num_anchor_per_loc * num_direction_bins, 1)", "def resnet_block(input_tensor, size, kernel_size, filters, stage, conv_strides=(2, 2), training=None):\n\n x = conv_building_block(input_tensor, kernel_size, filters, stage=stage,\n strides=conv_strides, block='block_0',\n training=training)\n for i in range(size - 1):\n x = identity_building_block(x, kernel_size, filters, stage=stage,\n block='block_%d' % (i + 1), training=training)\n return x", "def __call__(self, **kwargs):\n segname = 'block_{}_expand_relu'\n blocks = [13, 6, 3, 1]\n skips = [self._backbone.get_layer(segname.format(i)) for i in blocks]\n backbone_out = self._backbone.get_layer('block_16_project')\n\n p5 = self._fpn_block(backbone_out.output, skips[0].output)\n p4 = self._fpn_block(p5, skips[1].output)\n p3 = self._fpn_block(p4, skips[2].output)\n p2 = self._fpn_block(p3, skips[3].output)\n\n s5 = self._conv_block(p5, 128)\n s4 = self._conv_block(p4, 128)\n s3 = self._conv_block(p3, 128)\n s2 = self._conv_block(p2, 128)\n\n s5 = tf.keras.layers.UpSampling2D(\n size=(8, 8),\n interpolation='nearest'\n )(s5)\n\n s4 = tf.keras.layers.UpSampling2D(\n size=(4, 4),\n interpolation='nearest'\n )(s4)\n\n s3 = tf.keras.layers.UpSampling2D(\n size=(2, 2),\n interpolation='nearest'\n )(s3)\n\n concat = [s5, s4, s3, s2]\n x = tf.keras.layers.Concatenate()(concat)\n x = tf.keras.layers.Conv2D(\n 64,\n kernel_size=3,\n padding='same',\n kernel_initializer='he_uniform'\n )(x)\n\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Activation('relu')(x)\n x = tf.keras.layers.UpSampling2D((2, 2))(x)\n\n x = tf.keras.layers.Conv2D(\n 1,\n kernel_size=3,\n padding='same',\n kernel_initializer='he_uniform'\n )(x)\n\n out = tf.keras.layers.Activation('sigmoid')(x)\n model = tf.keras.models.Model(\n inputs=self._backbone.input,\n outputs=out\n )\n\n return model", "def residual_block(layer_input, filters):\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)\n d = Activation('relu')(d)\n d = BatchNormalization(momentum=0.8)(d)\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)\n d = BatchNormalization(momentum=0.8)(d)\n d = Add()([d, layer_input])\n return d", "def __init__(self, block, conv_makers, layers,\n stem, num_classes=256, zero_init_residual=False):\n super(VideoResNet, self).__init__()\n self.inplanes = 64\n\n self.stem = stem()\n\n self.layer1 = self._make_layer(block, conv_makers[0], 64, layers[0], stride=1)\n self.layer2 = self._make_layer(block, conv_makers[1], 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, conv_makers[2], 256, layers[2], stride=1) # original stride: 2\n self.layer4 = self._make_layer(block, conv_makers[3], 512, layers[3], stride=2)\n self.maxpool = nn.MaxPool3d((2, 3, 3)) # else (3, 3, 3)\n self.avgpool = nn.AvgPool3d((2, 3, 3))\n self.adaptive_avgpool = nn.AdaptiveAvgPool3d((None, 1, 1))\n self.adaptive_maxpool = nn.AdaptiveMaxPool3d((None, 1, 1))\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n # init weights\n self._initialize_weights()\n\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)", "def __init__(self, kernel_size, filters, stage, block, strides=(2, 2)):\n super().__init__(name='conv_block' + str(stage) + block)\n filters1, filters2, filters3 = filters\n if K.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n self.conv1 = layers.Conv2D(\n filters1, (1, 1),\n strides=strides,\n kernel_initializer='he_normal',\n name=conv_name_base + '2a')\n self.bn1 = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')\n self.act1 = layers.Activation('relu')\n\n self.conv2 = layers.Conv2D(\n filters2,\n kernel_size,\n padding='same',\n kernel_initializer='he_normal',\n name=conv_name_base + '2b')\n self.bn2 = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')\n self.act2 = layers.Activation('relu')\n\n self.conv3 = layers.Conv2D(\n filters3, (1, 1),\n kernel_initializer='he_normal',\n name=conv_name_base + '2c')\n self.bn3 = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')\n\n self.shortcut_conv = layers.Conv2D(\n filters3, (1, 1),\n strides=strides,\n kernel_initializer='he_normal',\n name=conv_name_base + '1')\n self.shortcut_bn = layers.BatchNormalization(\n axis=bn_axis, name=bn_name_base + '1')\n\n self.add = layers.Add()\n self.act = layers.Activation('relu')", "def __init__(self, config, input_shp):\n\n # Run initialization for super class\n super(MyNetwork, self).__init__()\n\n # Store configuration\n self.config = config\n\n # Placeholder for layers\n self.layers = {}\n indim = input_shp[0]\n\n # Retrieve Conv, Act, Pool functions from configurations. We'll use\n # these for our code below.\n if config.conv2d == \"torch\":\n self.Conv2d = nn.Conv2d\n elif config.conv2d == \"custom\":\n self.Conv2d = ConvBlock\n self.Activation = getattr(nn, config.activation)\n self.Pool2d = getattr(nn, config.pool2d)\n self.Linear = nn.Linear\n\n # Resnet Blocks, similar to slide 73 of lecture 21. However, for\n # simplicity, we'll make is slightly different. Note that we used\n # nn.Sequential this time.\n self.convs = nn.Sequential()\n cur_h, cur_w = input_shp[-2:]\n for _i in range(config.num_conv_outer):\n #\n # NOTE THE NEW LAYER ON THESE LINES!\n #\n # We have a dedicated 1x1 layer to get more channels. Note also\n # that this is a pure linear convolution layer.\n outdim = config.nchannel_base * 2 ** _i\n self.convs.add_module(\n \"conv_{}_base\".format(_i), nn.Conv2d(indim, outdim, 1, 1, 0))\n indim = outdim\n for _j in range(config.num_conv_inner):\n # We now use our selected convolution layer. Note that our\n # resnet implementation will have a different call style to\n # vanilla conv2d of torch, so we'll just do an ugly if-else\n # here.\n if config.conv2d == \"torch\":\n self.convs.add_module(\n \"conv_{}_{}\".format(_i, _j),\n self.Conv2d(indim, outdim, config.ksize, 1, 1))\n self.convs.add_module(\n \"act_{}_{}\".format(_i, _j),\n self.Activation())\n cur_h = cur_h - (config.ksize - 1)\n cur_w = cur_w - (config.ksize - 1)\n elif config.conv2d == \"custom\":\n self.convs.add_module(\n \"conv_{}_{}\".format(_i, _j),\n self.Conv2d(indim, outdim, config.ksize, 1, self.Activation))\n self.convs.add_module(\n \"conv_{}_pool\".format(_i), self.Pool2d(2, 2))\n cur_h = cur_h // 2\n cur_w = cur_w // 2\n\n # Final output layer. We'll assume that conv layer outputs are global\n # average pooled\n self.output = nn.Linear(indim, config.num_class)\n\n print(self)", "def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\r\n conv_block = []\r\n p = 0\r\n if padding_type == 'reflect':\r\n conv_block += [nn.ReflectionPad2d(1)]\r\n elif padding_type == 'replicate':\r\n conv_block += [nn.ReplicationPad2d(1)]\r\n elif padding_type == 'zero':\r\n p = 1\r\n else:\r\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\r\n\r\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\r\n if use_dropout:\r\n conv_block += [nn.Dropout(0.5)]\r\n\r\n p = 0\r\n if padding_type == 'reflect':\r\n conv_block += [nn.ReflectionPad2d(1)]\r\n elif padding_type == 'replicate':\r\n conv_block += [nn.ReplicationPad2d(1)]\r\n elif padding_type == 'zero':\r\n p = 1\r\n else:\r\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\r\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\r\n\r\n return nn.Sequential(*conv_block)", "def _construct_block(self, block_info):\n layer_name = block_info[0]\n if layer_name=='Conv2d':\n in_channels, out_channels, kernel_size = block_info[1:]\n return nn.Conv2d(in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size)\n elif layer_name=='ReLU':\n return nn.ReLU(inplace=True)\n elif layer_name=='MaxPool2d':\n kernel_size, stride = block_info[1:]\n return nn.MaxPool2d(kernel_size=kernel_size,\n stride=stride)\n elif layer_name=='BatchNorm2d':\n num_features = block_info[1]\n return nn.BatchNorm2d(num_features=num_features)\n elif layer_name=='Linear':\n in_features, out_features = block_info[1:]\n return nn.Linear(in_features=in_features,\n out_features=out_features)\n else:\n raise Exception(\"_construct_block cannot construct block\")", "def build_transformation_network(n_styles, depthwise_separable_conv):\n\n image_input = Input((None, None, 3), name=\"image\")\n style_weights = Input((n_styles, ), name=\"style_weights\")\n\n net = conv_block(image_input,\n style_weights,\n filters=32,\n kernel_size=(9, 9),\n strides=(1, 1),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = conv_block(net,\n style_weights,\n filters=64,\n kernel_size=(3, 3),\n strides=(2, 2),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = conv_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(2, 2),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = upsampling_block(net,\n style_weights,\n interpolation_factor=2,\n filters=64,\n kernel_size=(3, 3),\n strides=(1, 1),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = upsampling_block(net,\n style_weights,\n interpolation_factor=2,\n filters=32,\n kernel_size=(3, 3),\n strides=(1, 1),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = conv_block(net,\n style_weights,\n filters=3,\n kernel_size=(9, 9),\n strides=(1, 1),\n activation=\"sigmoid\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = Lambda(lambda t: t * 255.0, name=\"output\")(net)\n\n return Model([image_input, style_weights], net, name=\"transform_net\")", "def _contracting_block(self, in_channels, out_channels, kernel_size=3):\n block = nn.Sequential(\n nn.Conv2d(kernel_size=kernel_size, in_channels=in_channels, out_channels=out_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels),\n nn.Conv2d(kernel_size=kernel_size, in_channels=out_channels, out_channels=out_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels),\n )\n return block", "def _contracting_block(self, in_channels, out_channels, kernel_size=3):\n block = nn.Sequential(\n nn.Conv2d(kernel_size=kernel_size, in_channels=in_channels, out_channels=out_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels),\n nn.Conv2d(kernel_size=kernel_size, in_channels=out_channels, out_channels=out_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels),\n )\n return block", "def make_stage(num_blocks, input_channels, output_channels, stride, expand_ratio, norm, activation):\n blocks = []\n blocks.append(\n InvertedResBlock(input_channels, output_channels, stride=stride, expand_ratio=expand_ratio,\n norm=norm, activation=activation, use_shortcut=False)\n )\n for i in range(num_blocks - 1):\n blocks.append(\n InvertedResBlock(output_channels, output_channels, stride=1, expand_ratio=expand_ratio,\n norm=norm, activation=activation)\n )\n\n return blocks", "def __init__(\n self,\n image_size: tuple,\n out_channels: int,\n num_channel_initial: int,\n extract_levels: List[int],\n out_kernel_initializer: str,\n out_activation: str,\n name: str = \"LocalNet\",\n **kwargs,\n ):\n super().__init__(\n image_size=image_size,\n out_channels=out_channels,\n num_channel_initial=num_channel_initial,\n out_kernel_initializer=out_kernel_initializer,\n out_activation=out_activation,\n name=name,\n **kwargs,\n )\n\n # save parameters\n self._extract_levels = extract_levels\n self._extract_max_level = max(self._extract_levels) # E\n self._extract_min_level = min(self._extract_levels) # D\n\n # init layer variables\n num_channels = [\n num_channel_initial * (2 ** level)\n for level in range(self._extract_max_level + 1)\n ] # level 0 to E\n self._downsample_blocks = [\n layer.DownSampleResnetBlock(\n filters=num_channels[i], kernel_size=7 if i == 0 else 3\n )\n for i in range(self._extract_max_level)\n ] # level 0 to E-1\n self._conv3d_block = layer.Conv3dBlock(filters=num_channels[-1]) # level E\n\n self._upsample_blocks = [\n layer.LocalNetUpSampleResnetBlock(num_channels[level])\n for level in range(\n self._extract_max_level - 1, self._extract_min_level - 1, -1\n )\n ] # level D to E-1\n\n self._extract_layers = [\n # if kernels are not initialized by zeros, with init NN, extract may be too large\n layer.Conv3dWithResize(\n output_shape=image_size,\n filters=out_channels,\n kernel_initializer=out_kernel_initializer,\n activation=out_activation,\n )\n for _ in self._extract_levels\n ]", "def __init__(self, dropout=0, input_dim=(3, 32, 32), num_filters=32, filter_size=7,\r\n hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0, \r\n use_batch_norm=False, dtype=np.float32):\r\n self.use_dropout = dropout > 0\r\n self.use_batch_norm = use_batch_norm\r\n self.params = {}\r\n self.reg = reg\r\n self.num_layers = 3\r\n self.dtype = dtype\r\n self.pool_height = 2\r\n self.pool_width = 2\r\n self.pool_stride = 2\r\n\r\n ############################################################################\r\n # TODO: Initialize weights and biases for the three-layer convolutional #\r\n # network. Weights should be initialized from a Gaussian with standard #\r\n # deviation equal to weight_scale; biases should be initialized to zero. #\r\n # All weights and biases should be stored in the dictionary self.params. #\r\n # Store weights and biases for the convolutional layer using the keys 'W1' #\r\n # and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #\r\n # hidden affine layer, and keys 'W3' and 'b3' for the weights and biases #\r\n # of the output affine layer. #\r\n ############################################################################\r\n # NUmber of channels\r\n C, H, W = input_dim\r\n self.params['W1'] = np.random.randn(num_filters, C, filter_size, filter_size) * weight_scale\r\n self.params['b1'] = np.zeros(num_filters)\r\n H_pool = (H - self.pool_height) / 2 + 1\r\n W_pool = (W - self.pool_width) / 2 + 1\r\n self.params['W2'] = np.random.randn(np.prod((num_filters, H_pool, W_pool)), hidden_dim) * weight_scale\r\n self.params['b2'] = np.zeros(hidden_dim)\r\n self.params['W3'] = np.random.randn(hidden_dim, num_classes) * weight_scale\r\n self.params['b3'] = np.zeros(num_classes)\r\n\r\n # Initialize the parameters for batch normalization if necessary\r\n if self.use_batch_norm:\r\n self.params['gamma1'] = np.ones(num_filters) \r\n self.params['beta1'] = np.zeros(num_filters)\r\n self.params['gamma2'] = np.ones(hidden_dim)\r\n self.params['beta2'] = np.zeros(hidden_dim)\r\n\r\n # Set dropout parameters if necessary\r\n self.dropout_param={}\r\n if self.use_dropout:\r\n self.dropout_param ={'mode':'train', 'p':dropout}\r\n\r\n self.bn_params = []\r\n if self.use_batch_norm:\r\n self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]\r\n\r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n for k, v in self.params.items():\r\n self.params[k] = v.astype(dtype)", "def __init__(self, in_dim, dim, out_dim, nblocks = 8):\n\t\tsuper(Residual_Module, self).__init__()\n\t\tself.nblocks = nblocks\n\t\tassert self.nblocks > 0\n\t\tself.in_block = nn.utils.weight_norm(\n\t\t\tnn.Conv2d(in_dim, dim, (3, 3), stride=1, padding=1, bias=True)\n\t\t)\n\t\tself.core_blocks = nn.ModuleList(\n\t\t\t[BasicBlock(dim, dim) for _ in range(nblocks)]\n\t\t)\n\t\tself.out_block = nn.utils.weight_norm(\n\t\t\tnn.Conv2d(dim, out_dim, (1, 1), stride=1, padding=0, bias=True),\n\t\t)", "def build_resnet50(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128)\n res3b_feats = self.basic_block2(res3a_feats, 'res3b', 'bn3b', is_train, use_batch_norm, 128)\n res3c_feats = self.basic_block2(res3b_feats, 'res3c', 'bn3c', is_train, use_batch_norm, 128)\n res3d_feats = self.basic_block2(res3c_feats, 'res3d', 'bn3d', is_train, use_batch_norm, 128)\n\n res4a_feats = self.basic_block(res3d_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n res4b_feats = self.basic_block2(res4a_feats, 'res4b', 'bn4b', is_train, use_batch_norm, 256)\n res4c_feats = self.basic_block2(res4b_feats, 'res4c', 'bn4c', is_train, use_batch_norm, 256)\n res4d_feats = self.basic_block2(res4c_feats, 'res4d', 'bn4d', is_train, use_batch_norm, 256)\n res4e_feats = self.basic_block2(res4d_feats, 'res4e', 'bn4e', is_train, use_batch_norm, 256)\n res4f_feats = self.basic_block2(res4e_feats, 'res4f', 'bn4f', is_train, use_batch_norm, 256)\n\n res5a_feats = self.basic_block(res4f_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def build(classes):\n # data input\n data = mx.sym.Variable(\"data\")\n\n # Block #1: first CONV => RELU => POOL layer set\n conv1_1 = mx.sym.Convolution(data=data, kernel=(11, 11), stride=(4, 4), num_filter=96)\n act1_1 = mx.sym.LeakyReLU(data=conv1_1, act_type=\"elu\")\n bn1_1 = mx.sym.BatchNorm(data=act1_1)\n pool1 = mx.sym.Pooling(data=bn1_1, pool_type=\"max\", kernel=(3, 3), stride=(2, 2))\n do1 = mx.sym.Dropout(data=pool1, p=0.25)\n\n # Block #2: second CONV => RELU => POOL layer set\n conv2_1 = mx.sym.Convolution(data=do1, kernel=(5, 5), pad=(2, 2), num_filter=256)\n act2_1 = mx.sym.LeakyReLU(data=conv2_1, act_type=\"elu\")\n bn2_1 = mx.sym.BatchNorm(data=act2_1)\n pool2 = mx.sym.Pooling(data=bn2_1, pool_type=\"max\", kernel=(3, 3), stride=(2, 2))\n do2 = mx.sym.Dropout(data=pool2, p=0.25)\n\n # Block #3: (CONV => RELU) * 3 => POOL\n conv3_1 = mx.sym.Convolution(data=do2, kernel=(3, 3), pad=(1, 1), num_filter=384)\n act3_1 = mx.sym.LeakyReLU(data=conv3_1, act_type=\"elu\")\n bn3_1 = mx.sym.BatchNorm(data=act3_1)\n conv3_2 = mx.sym.Convolution(data=bn3_1, kernel=(3, 3), pad=(1, 1), num_filter=384)\n act3_2 = mx.sym.LeakyReLU(data=conv3_2, act_type=\"elu\")\n bn3_2 = mx.sym.BatchNorm(data=act3_2)\n conv3_3 = mx.sym.Convolution(data=bn3_2, kernel=(3, 3), pad=(1, 1), num_filter=256)\n act3_3 = mx.sym.LeakyReLU(data=conv3_3, act_type=\"elu\")\n bn3_3 = mx.sym.BatchNorm(data=act3_3)\n pool3 = mx.sym.Pooling(data=bn3_3, pool_type=\"max\", kernel=(3, 3), stride=(2, 2))\n do3 = mx.sym.Dropout(data=pool3, p=0.25)\n\n # Block #4: first set of FC => RELU layers\n flatten = mx.sym.Flatten(data=do3)\n fc1 = mx.sym.FullyConnected(data=flatten, num_hidden=4096)\n act4_1 = mx.sym.LeakyReLU(data=fc1, act_type=\"elu\")\n bn4_1 = mx.sym.BatchNorm(data=act4_1)\n do4 = mx.sym.Dropout(data=bn4_1, p=0.5)\n\n # Block #5: second set of FC => RELU layers\n fc2 = mx.sym.FullyConnected(data=do4, num_hidden=4096)\n act5_1 = mx.sym.LeakyReLU(data=fc2, act_type=\"elu\")\n bn5_1 = mx.sym.BatchNorm(data=act5_1)\n do5 = mx.sym.Dropout(data=bn5_1, p=0.5)\n\n # softmax classifier\n fc3 = mx.sym.FullyConnected(data=do5, num_hidden=classes)\n model = mx.sym.SoftmaxOutput(data=fc3, name=\"softmax\")\n\n # return the network architecture\n return model", "def _final_block(self, in_channels, mid_channels, out_channels, kernel_size=3):\n block = nn.Sequential(\n nn.Conv2d(kernel_size=kernel_size, in_channels=in_channels, out_channels=mid_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(mid_channels),\n nn.Conv2d(kernel_size=kernel_size, in_channels=mid_channels, out_channels=mid_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(mid_channels),\n nn.Conv2d(kernel_size=kernel_size, in_channels=mid_channels, out_channels=out_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels)\n )\n return block", "def _final_block(self, in_channels, mid_channels, out_channels, kernel_size=3):\n block = nn.Sequential(\n nn.Conv2d(kernel_size=kernel_size, in_channels=in_channels, out_channels=mid_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(mid_channels),\n nn.Conv2d(kernel_size=kernel_size, in_channels=mid_channels, out_channels=mid_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(mid_channels),\n nn.Conv2d(kernel_size=kernel_size, in_channels=mid_channels, out_channels=out_channels, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels)\n )\n return block", "def _conv_block( inputs, filters, kernel, strides, nl):\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n x = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)\n x = BatchNormalization(axis=channel_axis)(x)\n return _return_activation(x, nl)", "def conv_building_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2), training=None):\n filters1, filters2 = filters\n if tf.keras.backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = tf.keras.layers.Conv2D(filters1, kernel_size, strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n bias_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n name=conv_name_base + '2a')(input_tensor)\n x = tf.keras.layers.BatchNormalization(axis=bn_axis,\n name=bn_name_base + '2a',\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(\n x, training=training)\n x = tf.keras.layers.Activation('relu')(x)\n\n x = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n bias_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n name=conv_name_base + '2b')(x)\n x = tf.keras.layers.BatchNormalization(axis=bn_axis,\n name=bn_name_base + '2b',\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(\n x, training=training)\n\n shortcut = tf.keras.layers.Conv2D(filters2, (1, 1), strides=strides,\n kernel_initializer='he_normal',\n kernel_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n bias_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n name=conv_name_base + '1')(input_tensor)\n shortcut = tf.keras.layers.BatchNormalization(\n axis=bn_axis, name=bn_name_base + '1',\n momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON)(\n shortcut, training=training)\n\n x = tf.keras.layers.add([x, shortcut])\n x = tf.keras.layers.Activation('relu')(x)\n return x", "def mobilenetv2 (inputs, k, alpha = 1.0, train_bn = False):\n\n x = conv_block(inputs, 32, alpha, (3, 3), strides=(2, 2), block_id=0, train_bn=train_bn) # Input Res: 1\n\n x = inverted_residual_block(x, 16, (3, 3), t=1, strides=1, n=1, alpha=1.0, block_id=1, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 24, (3, 3), t=6, strides=2, n=2, alpha=1.0, block_id=2, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 32, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=4, train_bn=train_bn)\t# Input Res: 1/4\n x = inverted_residual_block(x, 64, (3, 3), t=6, strides=2, n=4, alpha=1.0, block_id=7, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 96, (3, 3), t=6, strides=1, n=3, alpha=1.0, block_id=11, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 160, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=14, train_bn=train_bn)\t# Input Res: 1/16\n x = inverted_residual_block(x, 320, (3, 3), t=6, strides=1, n=1, alpha=1.0, block_id=17, train_bn=train_bn)\t# Input Res: 1/32\n\n x = conv_block(x, 1280, alpha, (1, 1), strides=(1, 1), block_id=18, train_bn=train_bn) # Input Res: 1/32\n\n x = KL.GlobalAveragePooling2D()(x)\n x = KL.Reshape((1, 1, 1280))(x)\n x = KL.Dropout(0.3, name='Dropout')(x)\n x = KL.Conv2D(k, (1, 1), padding='same')(x)\n\n x = KL.Activation('softmax', name='softmax')(x)\n output = KL.Reshape((k,))(x)\n\n model = KM.Model(inputs, output)\n plot_model(model, to_file='MobileNetv2.png', show_shapes=True)\n\n return model", "def build_resnet101(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128) \n temp = res3a_feats\n for i in range(1, 4):\n temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, use_batch_norm, 128)\n res3b3_feats = temp\n \n res4a_feats = self.basic_block(res3b3_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n temp = res4a_feats\n for i in range(1, 23):\n temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, use_batch_norm, 256)\n res4b22_feats = temp\n\n res5a_feats = self.basic_block(res4b22_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):\r\n filters1, filters2, filters3 = filters\r\n if backend.image_data_format() == 'channels_last':\r\n bn_axis = 3\r\n else:\r\n bn_axis = 1\r\n conv_name_base = 'res' + str(stage) + block + '_branch'\r\n bn_name_base = 'bn' + str(stage) + block + '_branch'\r\n\r\n x = layers.Conv2D(filters1, (1, 1), strides=strides,\r\n kernel_initializer='he_normal',\r\n name=conv_name_base + '2a')(input_tensor)\r\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)\r\n x = layers.Activation('relu')(x)\r\n\r\n x = layers.Conv2D(filters2, kernel_size, padding='same',\r\n kernel_initializer='he_normal',\r\n name=conv_name_base + '2b')(x)\r\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)\r\n x = layers.Activation('relu')(x)\r\n\r\n x = layers.Conv2D(filters3, (1, 1),\r\n kernel_initializer='he_normal',\r\n name=conv_name_base + '2c')(x)\r\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\r\n\r\n shortcut = layers.Conv2D(filters3, (1, 1), strides=strides,\r\n kernel_initializer='he_normal',\r\n name=conv_name_base + '1')(input_tensor)\r\n shortcut = layers.BatchNormalization(\r\n axis=bn_axis, name=bn_name_base + '1')(shortcut)\r\n\r\n x = layers.add([x, shortcut])\r\n x = layers.Activation('relu')(x)\r\n return x", "def non_residual_block(\n inputs,\n starting_features,\n conv_kernel=(7, 7, 3),\n strides=(2, 2, 2),\n padding=3,\n max_pool_size=(3, 3, 2),\n activation=\"relu\",\n use_bias=False,\n bn_epsilon=1e-5,\n pooling_padding=\"same\",\n axis=3,\n):\n\n x = ZeroPadding3D(padding=padding, name=\"conv1_padding\")(inputs)\n x = Conv3D(\n starting_features,\n conv_kernel,\n strides=strides,\n use_bias=use_bias,\n name=\"conv1\",\n )(x)\n x = BatchNormalization(axis=axis, epsilon=bn_epsilon, name=\"conv1_bn\")(x)\n x = Activation(activation, name=\"conv1_activation\")(x)\n x = MaxPooling3D(\n max_pool_size,\n strides=strides,\n padding=pooling_padding,\n name=\"max_pool\",\n )(x)\n\n return x", "def build_resnet_generator3D(self, model_shape, filters=32, k_size=3, last_act='tanh', summary=False, model_file=None, name='gan_g_'):\n if (model_file):\n \"\"\"\n Load pretreined model\n \"\"\"\n model = self.utils.build_pretrained_model(model_file)\n if (summary):\n model.summary()\n return model\n else:\n init = RandomNormal(stddev=0.02)\n n_rows = model_shape[0]\n n_cols = model_shape[1]\n n_layers = model_shape[2]\n in_c_dims = model_shape[3]\n out_c_dims = model_shape[4]\n \n n_rows_e1, n_rows_e2, n_rows_e4, n_rows_e8 = n_rows//1, n_rows//2, n_rows//4, n_rows//8\n rows_matching = np.equal([2*n_rows_e2, 2*n_rows_e4, 2*n_rows_e8], [n_rows_e1, n_rows_e2, n_rows_e4])\n index_rows = np.where(np.logical_not(rows_matching))[0]\n \n n_cols_e1, n_cols_e2, n_cols_e4, n_cols_e8 = n_cols//1, n_cols//2, n_cols//4, n_cols//8\n cols_matching = np.equal([2*n_cols_e2, 2*n_cols_e4, 2*n_cols_e8], [n_cols_e1, n_cols_e2, n_cols_e4])\n index_cols = np.where(np.logical_not(cols_matching))[0]\n \n input_shape = (n_rows, n_cols, in_c_dims)\n input_layer = Input(shape=input_shape, name=name+'_input')\n \n e1 = self.Conv3D_Block(input_layer, n_kernels=filters, k_size=7, strides=1, bn=False,name=name+'e1') # rows, cols\n e2 = self.Conv3D_Block(e1, 2*filters, k_size=k_size, bn_training=True, name=name+'e2') # rows/2, cols/2\n e3 = self.Conv3D_Block(e2, 4*filters, k_size=k_size, bn_training=True, name=name+'e3') # rows/4, cols/4\n e4 = self.Conv3D_Block(e3, 8*filters, k_size=k_size, bn=False, name=name+'e4') # rows/8, cols/8\n\n rb1 = self.Residual3D_Block(e4, n_kernels=8*filters, k_size=k_size, bn_training=True, name=name+'1_')\n rb2 = self.Residual3D_Block(rb1, n_kernels=8*filters, k_size=k_size, bn_training=True, name=name+'2_')\n rb3 = self.Residual3D_Block(rb2, n_kernels=8*filters, k_size=k_size, bn_training=True, name=name+'3_')\n rb3 = Dropout(rate=0.5, name=name+'drop_1')(rb3, training=True)\n \n rb4 = self.Residual3D_Block(rb3, n_kernels=8*filters, k_size=k_size, bn_training=True, name=name+'4_')\n rb4 = Dropout(rate=0.5, name=name+'drop_2')(rb4, training=True) \n \n rb5 = self.Residual3D_Block(rb4, n_kernels=8*filters, k_size=k_size, bn_training=True, name=name+'5_')\n rb5 = Dropout(rate=0.5, name=name+'drop_3')(rb5, training=True) \n \n d1 = self.Conv3DTranspose_Block(rb5, 4*filters, k_size=k_size, activation='linear', name=name+'d1') # rows/4, cols/4\n if index_rows==2 or index_cols==2:\n d1 = BilinearUpsampling(output_size=(n_rows//4, n_cols//4), name=name+'_bilinear')(d1)\n d1 = Concatenate(name=name+'conc_1')([d1, e3])\n d1 = Activation('relu', name=name+'_act_1')(d1)\n \n d2 = self.Conv3DTranspose_Block(d1, 2*filters, k_size=k_size, activation='linear', name=name+'d2') # rows/2, cols/2\n if index_rows==1 or index_cols==1:\n d2 = BilinearUpsampling(output_size=(n_rows//2, n_cols//2), name=name+'_bilinear')(d2)\n d2 = Concatenate(name=name+'conc_2')([d2, e2])\n d2 = Activation('relu', name=name+'_act_2')(d2)\n \n d3 = self.Conv3DTranspose_Block(d2, 1*filters, k_size=k_size, activation='linear', name=name+'d3') # rows, cols\n if index_rows==0 or index_cols==0:\n d3 = BilinearUpsampling(output_size=(n_rows, n_cols), name=name+'_bilinear')(d2)\n d3 = Concatenate(name=name+'conc_3')([d3, e1])\n d3 = Activation('relu', name=name+'act_3')(d3)\n\n output = Conv3DTranspose(out_c_dims, 7, strides=1, padding='same', kernel_initializer=init, name=name+'d_out')(d3) # rows, cols\n output = Activation(last_act, name=name+last_act)(output)\n\n model = Model(inputs=[input_layer], outputs=[output], name='Generator'+name[-3:])\n if (summary):\n model.summary()\n return model", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.rpn_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.rpn_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.rpn_cls = nn.Conv2d(\n self.feat_channels, self.num_anchors * 1, 3, padding=1)\n self.rpn_reg = nn.Conv2d(\n self.feat_channels, self.num_anchors * 4 * (self.reg_max + 1), 3, padding=1)\n self.rpn_iou = nn.Conv2d(\n self.feat_channels, self.num_anchors * 1, 3, padding=1)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.anchor_generator.strides])\n\n ##############V2################\n conf_vector = [nn.Conv2d(self.num_anchors * 4 * self.total_dim, self.num_anchors * self.reg_channels, 1)]\n conf_vector += [self.relu]\n conf_vector += [nn.Conv2d(self.num_anchors * self.reg_channels, self.num_anchors, 1), nn.Sigmoid()]\n\n self.reg_conf = nn.Sequential(*conf_vector)\n ##############V2################", "def conv_block(self,\r\n\t\t\t\t input_tensor,\r\n\t\t\t\t kernel_size,\r\n\t\t\t\t filters,\r\n\t\t\t\t stage,\r\n\t\t\t\t block,\r\n\t\t\t\t strides=(2, 2)):\r\n\t\t# number of filters\r\n\t\tfilters1, filters2, filters3 = filters\r\n\r\n\t\t# number of classification categories\r\n\t\tbn_axis = self.channel_last\r\n \r\n\t\t# names for the layers\r\n\t\tconv_name_base = 'res' + str(stage) + block + '_branch'\r\n\t\tbn_name_base = 'bn' + str(stage) + block + '_branch'\r\n\r\n\t\t# simple convolutional layers\r\n\t\tx = Conv2D(filters1, (1, 1), strides=strides,\r\n\t\t\t\t\t\t kernel_initializer='he_normal',\r\n\t\t\t\t\t\t name=conv_name_base + '2a')(input_tensor)\r\n\t\tx = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)\r\n\t\tx = Activation('relu')(x)\r\n\r\n\t\tx = Conv2D(filters2, kernel_size, padding='same',\r\n\t\t\t\t\t\t kernel_initializer='he_normal',\r\n\t\t\t\t\t\t name=conv_name_base + '2b')(x)\r\n\t\tx = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)\r\n\t\tx = Activation('relu')(x)\r\n\r\n\t\tx = Conv2D(filters3, (1, 1),\r\n\t\t\t\t\t\t kernel_initializer='he_normal',\r\n\t\t\t\t\t\t name=conv_name_base + '2c')(x)\r\n\t\tx = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\r\n\r\n\t\t# define a shortcut, which is an output for convolutional layer obtained by directly applying theninput tensor to filter 3\r\n\t\tshortcut = Conv2D(filters3, (1, 1), strides=strides,\r\n\t\t\t\t\t\t\t\t kernel_initializer='he_normal',\r\n\t\t\t\t\t\t\t\t name=conv_name_base + '1')(input_tensor)\r\n \r\n\t\tshortcut = BatchNormalization(\r\n\t\t\taxis=bn_axis, name=bn_name_base + '1')(shortcut)\r\n\r\n\t\t# adding stepwise output x to shortcut output\r\n\t\tx = add([x, shortcut])\r\n\t\tx = Activation('relu')(x)\r\n \r\n\t\treturn x", "def _project(self):\n ghosts_w = self.input_field.topology.ghosts()\n self.input_field.data[0], self.input_field.data[1], \\\n self.input_field.data[2] = \\\n fftw2py.projection_om_3d(self.input_field.data[0],\n self.input_field.data[1],\n self.input_field.data[2], ghosts_w)", "def inference(image,norm = True,phase_train = True):\n batch_size = image.shape[0]\n r,g,b = tf.split(axis = 3,num_or_size_splits = 3,value = image)\n p_image = tf.concat([r - 123.68,\n g - 116.78,\n b - 103.94],axis = 3)\n with tf.variable_scope('vgg_16'):\n with tf.variable_scope('conv1'):\n conv1_1 = layer.conv_layer('conv1_1',p_image,[3,3,3,64])\n conv1_2 = layer.conv_layer('conv1_2',conv1_1,[3,3,64,64])\n pool1 = layer.pool_layer('pool1',conv1_2)\n with tf.variable_scope('conv2'):\n conv2_1 = layer.conv_layer('conv2_1',pool1,[3,3,64,128])\n conv2_2 = layer.conv_layer('conv2_2',conv2_1,[3,3,128,128])\n pool2 = layer.pool_layer('pool2',conv2_2)\n with tf.variable_scope('conv3'):\n conv3_1 = layer.conv_layer('conv3_1',pool2,[3,3,128,256])\n conv3_2 = layer.conv_layer('conv3_2',conv3_1,[3,3,256,256])\n conv3_3 = layer.conv_layer('conv3_3',conv3_2,[3,3,256,256])\n pool3 = layer.pool_layer('pool3',conv3_3)\n with tf.variable_scope('conv4'):\n conv4_1 = layer.conv_layer('conv4_1',pool3,[3,3,256,512])\n conv4_2 = layer.conv_layer('conv4_2',conv4_1,[3,3,512,512])\n conv4_3 = layer.conv_layer('conv4_3',conv4_2,[3,3,512,512])\n pool4 = layer.pool_layer('pool4',conv4_3)\n with tf.variable_scope('conv5'):\n conv5_1 = layer.conv_layer('conv5_1',pool4,[3,3,512,512])\n conv5_2 = layer.conv_layer('conv5_2',conv5_1,[3,3,512,512])\n conv5_3 = layer.conv_layer('conv5_3',conv5_2,[3,3,512,512])\n pool5 = layer.pool_layer('pool5',conv5_3,ksize = [1,3,3,1],strides = [1,1,1,1])\n with tf.variable_scope('ssd'):\n conv6 = layer.atrous_conv('conv6',pool5,[3,3,512,1024],rate = 6,\n batch_normalization = norm,phase_train = phase_train)\n conv7 = layer.conv_layer('conv7',conv6,[1,1,1024,1024],\n batch_normalization = norm,phase_train = phase_train)\n with tf.variable_scope('conv8'):\n conv8_1 = layer.conv_layer('conv8_1',conv7,[1,1,1024,256],\n batch_normalization = norm,phase_train = phase_train)\n conv8_2 = layer.conv_layer('conv8_2',conv8_1,[3,3,256,512],\n stride = [1,2,2,1],batch_normalization = norm,phase_train = phase_train)\n with tf.variable_scope('conv9'):\n conv9_1 = layer.conv_layer('conv9_1',conv8_2,[1,1,512,128],\n batch_normalization = norm,phase_train = phase_train)\n conv9_2 = layer.conv_layer('conv9_2',conv9_1,[3,3,128,256],\n stride = [1,2,2,1],batch_normalization = norm,phase_train = phase_train)\n with tf.variable_scope('conv10'):\n conv10_1 = layer.conv_layer('conv10_1',conv9_2,[1,1,256,128],\n batch_normalization = norm,phase_train = phase_train)\n conv10_2 = layer.conv_layer('conv10_2',conv10_1,[3,3,128,256],\n padding = 'VALID',batch_normalization = norm,phase_train = phase_train)\n with tf.variable_scope('conv11'):\n conv11_1 = layer.conv_layer('conv11_1',conv10_2,[1,1,256,128],\n batch_normalization = norm,phase_train = phase_train)\n conv11_2 = layer.conv_layer('conv11_2',conv11_1,[3,3,128,256],\n padding = 'VALID',batch_normalization = norm,phase_train = phase_train)#vgg300\n with tf.variable_scope('multibox'):\n\n l2_conv4_3 = layer.l2_normalization('l2_normalization',conv4_3,scaling = True)\n cls4 = layer.conv_layer('cls4',l2_conv4_3,[3,3,512,84],activation = None)\n loc4 = layer.conv_layer('loc4',l2_conv4_3,[3,3,512,16],activation = None)\n\n cls4_reshape = tf.reshape(cls4,[batch_size,-1,21])\n loc4_reshape = tf.reshape(loc4,[batch_size,-1,4])\n\n\n cls7 = layer.conv_layer('cls7',conv7,[3,3,1024,126],activation = None)\n loc7 = layer.conv_layer('loc7',conv7,[3,3,1024,24],activation = None)\n\n cls7_reshape = tf.reshape(cls7,[batch_size,-1,21])\n loc7_reshape = tf.reshape(loc7,[batch_size,-1,4])\n\n cls8 = layer.conv_layer('cls8',conv8_2,[3,3,512,126],activation = None)\n loc8 = layer.conv_layer('loc8',conv8_2,[3,3,512,24],activation = None)\n\n cls8_reshape = tf.reshape(cls8,[batch_size,-1,21])\n loc8_reshape = tf.reshape(loc8,[batch_size,-1,4])\n\n cls9 = layer.conv_layer('cls9',conv9_2,[3,3,256,126],activation = None)\n loc9 = layer.conv_layer('loc9',conv9_2,[3,3,256,24],activation = None)\n\n cls9_reshape = tf.reshape(cls9,[batch_size,-1,21])\n loc9_reshape = tf.reshape(loc9,[batch_size,-1,4])\n\n cls10 = layer.conv_layer('cls10',conv10_2,[3,3,256,84],activation = None)\n loc10 = layer.conv_layer('loc10',conv10_2,[3,3,256,16],activation = None)\n\n cls10_reshape = tf.reshape(cls10,[batch_size,-1,21])\n loc10_reshape = tf.reshape(loc10,[batch_size,-1,4])\n\n cls11 = layer.conv_layer('cls11',conv11_2,[1,1,256,84],activation = None)\n loc11 = layer.conv_layer('loc11',conv11_2,[1,1,256,16],activation = None)\n\n cls11_reshape = tf.reshape(cls11,[batch_size,-1,21])\n loc11_reshape = tf.reshape(loc11,[batch_size,-1,4])\n\n cls_logit = tf.concat([\n cls4_reshape,\n cls7_reshape,\n cls8_reshape,\n cls9_reshape,\n cls10_reshape,\n cls11_reshape\n ],1)\n loc_logit = tf.concat([\n loc4_reshape,\n loc7_reshape,\n loc8_reshape,\n loc9_reshape,\n loc10_reshape,\n loc11_reshape\n ],1)\n \n return cls_logit,loc_logit", "def resnet(num_blocks, classes=10, training=None):\n\n input_shape = (32, 32, 3)\n img_input = layers.Input(shape=input_shape)\n\n if backend.image_data_format() == 'channels_first':\n x = layers.Lambda(lambda x: backend.permute_dimensions(x, (0, 3, 1, 2)),\n name='transpose')(img_input)\n bn_axis = 1\n else: # channel_last\n x = img_input\n bn_axis = 3\n\n x = tf.keras.layers.ZeroPadding2D(padding=(1, 1), name='conv1_pad')(x)\n x = tf.keras.layers.Conv2D(16, (3, 3),\n strides=(1, 1),\n padding='valid',\n kernel_initializer='he_normal',\n kernel_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n bias_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n name='conv1')(x)\n x = tf.keras.layers.BatchNormalization(axis=bn_axis, name='bn_conv1',\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(\n x, training=training)\n x = tf.keras.layers.Activation('relu')(x)\n\n x = resnet_block(x, size=num_blocks, kernel_size=3, filters=[16, 16],\n stage=2, conv_strides=(1, 1), training=training)\n\n x = resnet_block(x, size=num_blocks, kernel_size=3, filters=[32, 32],\n stage=3, conv_strides=(2, 2), training=training)\n\n x = resnet_block(x, size=num_blocks, kernel_size=3, filters=[64, 64],\n stage=4, conv_strides=(2, 2), training=training)\n\n x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)\n x = tf.keras.layers.Dense(classes, activation='softmax',\n kernel_initializer='he_normal',\n kernel_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n bias_regularizer=\n tf.keras.regularizers.l2(L2_WEIGHT_DECAY),\n name='fc10')(x)\n\n inputs = img_input\n # Create model.\n model = tf.keras.models.Model(inputs, x, name='resnet56')\n\n return model", "def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=3,\n conv_layers=1, use_batchnorm=False, hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,\n dtype=np.float32):\n self.params = {}\n self.reg = reg\n self.dtype = dtype\n self.conv_layers = conv_layers\n self.num_layers = conv_layers + 2 # Currently conv + affine + softmax\n self.use_batchnorm = use_batchnorm\n\n if self.use_batchnorm:\n self.bn_params = []\n self.bn_params = [{'mode': 'train'} for i in xrange(self.num_layers + 1)]\n\n ############################################################################\n # TODO: Initialize weights and biases for the three-layer convolutional #\n # network. Weights should be initialized from a Gaussian with standard #\n # deviation equal to weight_scale; biases should be initialized to zero. #\n # All weights and biases should be stored in the dictionary self.params. #\n # Store weights and biases for the convolutional layer using the keys 'W1' #\n # and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #\n # hidden affine layer, and keys 'W3' and 'b3' for the weights and biases #\n # of the output affine layer. #\n ############################################################################\n C, H, W = input_dim\n F = num_filters\n HH = filter_size\n WW = filter_size\n\n layer_dim = (F, C, HH, WW)\n\n # Conv - relu - pool weights\n for l in xrange(1, self.conv_layers + 1):\n self.params['W%d' % l] = np.random.normal(loc=0.0, scale=weight_scale, size=layer_dim)\n self.params['b%d' % l] = np.zeros(F)\n if self.use_batchnorm:\n self.params['gamma%d' % l] = np.ones(F)\n self.params['beta%d' % l] = np.zeros(F)\n layer_dim = (F, F, HH, WW)\n\n # Affine - Relu layer\n l = self.conv_layers + 1\n h_shape = ((num_filters * np.prod(input_dim[1:]) / 4**self.conv_layers), hidden_dim)\n self.params['W%d' % l] = np.random.normal(loc=0.0, scale=weight_scale, size=h_shape)\n self.params['b%d' % l] = np.zeros(hidden_dim)\n if self.use_batchnorm:\n self.params['gamma%d' % l] = np.ones(hidden_dim)\n self.params['beta%d' % l] = np.zeros(hidden_dim)\n\n # Final affine layer (hidden layers -> classes)\n l = l + 1\n a_shape = (hidden_dim, num_classes)\n self.params['W%d' % l] = np.random.normal(loc=0.0, scale=weight_scale, size=a_shape)\n self.params['b%d' % l] = np.zeros(num_classes)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def _conv_block(self,\n input_tensor,\n kernel_size,\n filters,\n stage,\n block,\n strides=(2, 2),\n dilation=1):\n filters1, filters2, filters3 = filters\n if backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n strides = (1, 1) if dilation > 1 else strides\n\n x = layers.Conv2D(filters1, (1, 1), strides=strides,\n kernel_initializer='he_normal',\n name=conv_name_base + '2a')(input_tensor)\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters2, kernel_size, padding='same',\n kernel_initializer='he_normal',\n name=conv_name_base + '2b',\n dilation_rate=dilation)(x)\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters3, (1, 1),\n kernel_initializer='he_normal',\n name=conv_name_base + '2c')(x)\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\n\n shortcut = layers.Conv2D(filters3, (1, 1), strides=strides,\n kernel_initializer='he_normal',\n name=conv_name_base + '1')(input_tensor)\n shortcut = layers.BatchNormalization(\n axis=bn_axis, name=bn_name_base + '1')(shortcut)\n\n x = layers.add([x, shortcut])\n x = layers.Activation('relu')(x)\n return x", "def __init__(self):\n super(Backbone, self).__init__()\n\n # input size: (128, 282, 282)\n # Block 1:\n # relu + 4 conv + bn\n self.conv1 = torch.nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=0)\n self.conv2 = torch.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0)\n self.conv3 = torch.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0)\n self.conv4 = torch.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0)\n\n self.bn1 = torch.nn.BatchNorm2d(64)\n self.bn2 = torch.nn.BatchNorm2d(64)\n self.bn3 = torch.nn.BatchNorm2d(64)\n self.bn4 = torch.nn.BatchNorm2d(64)\n\n # Block 2:\n # relu + 6 conv + stride 2 + bn\n self.conv5 = torch.nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=0)\n self.conv6 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=0)\n self.conv7 = torch.nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=0)\n self.conv8 = torch.nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=0)\n self.conv9 = torch.nn.Conv2d(32, 16, kernel_size=3, stride=1, padding=0)\n self.conv10 = torch.nn.Conv2d(16, 8, kernel_size=3, stride=1, padding=0)\n\n self.bn5 = torch.nn.BatchNorm2d(128)\n self.bn6 = torch.nn.BatchNorm2d(128)\n self.bn7 = torch.nn.BatchNorm2d(64)\n self.bn8 = torch.nn.BatchNorm2d(32)\n self.bn9 = torch.nn.BatchNorm2d(16)\n self.bn10 = torch.nn.BatchNorm2d(8)\n\n # Block 3:\n # 2 fully connected with drop out.\n\n self.fc1 = torch.nn.Linear( 8 * 59 * 59, 32)\n self.fc1_bn = torch.nn.BatchNorm1d(32)\n self.fc_out = torch.nn.Linear(32, 3)", "def inception_block_v2(self, inputs, block_params, training,\n projection_shortcut, half_layer=None, no_prenorm=False):\n shortcut = inputs\n if not no_prenorm:\n inputs = self.batch_norm_act(inputs, training)\n\n # The projection shortcut should come after the first batch norm and ReLU\n # since it performs a 1x1 convolution.\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n icp_block_ops = block_params['icp_block_ops'](\n self.get_feature_shape(inputs)[0], self.get_feature_channels(inputs),\n block_params['filters'])\n inputs_branches = []\n for b, ops_branch in enumerate(icp_block_ops):\n for l,op in enumerate(ops_branch):\n with tf.variable_scope('b%d_l%d'%(b,l)):\n pre_indent = ' ' if l==len(ops_branch)-1 else ' '\n if l==0:\n inputs_b = inputs\n else:\n inputs_b = self.batch_norm_act(inputs_b, training)\n inputs_b = self.operation(op, inputs_b, pre_indent)\n inputs_branches.append(inputs_b)\n self._inception_block_layer += max([len(ops_branch) for ops_branch in icp_block_ops])\n\n c_axis = -1 if self.data_format == 'channels_last' else 1\n inputs = tf.concat(inputs_branches, c_axis)\n layer_name = '/'.join(tf.get_variable_scope().name.split('/')[2:])\n self.log_tensor_p(inputs, 'inception concat', layer_name)\n\n inputs = self.conv1d2d3d(inputs, block_params['filters'], 1, 1, 's')\n self.log_tensor_c(inputs, 1, 1, 's', tf.get_variable_scope().name)\n\n if self.residual and (not initial_layer):\n if not inputs.shape == shortcut.shape:\n if NoRes_InceptionReduction:\n return inputs\n else:\n import pdb; pdb.set_trace() # XXX BREAKPOINT\n else:\n if self.IsShowModel: self.log('Add shortcut*%0.1f'%(self.res_scale))\n return inputs * self.res_scale + shortcut\n else:\n return inputs", "def _identity_block(self, input_tensor, kernel_size, filters, stage, block, dilation=1):\n filters1, filters2, filters3 = filters\n if backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n\n if block > 'z':\n block = chr(ord(block) - ord('z') + ord('A') - 1)\n\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = layers.Conv2D(filters1, (1, 1),\n kernel_initializer='he_normal',\n name=conv_name_base + '2a')(input_tensor)\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters2, kernel_size,\n padding='same',\n kernel_initializer='he_normal',\n name=conv_name_base + '2b',\n dilation_rate=dilation)(x)\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters3, (1, 1),\n kernel_initializer='he_normal',\n name=conv_name_base + '2c')(x)\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\n\n x = layers.add([x, input_tensor])\n x = layers.Activation('relu')(x)\n return x", "def __init__(self, conv_block_args, deconv_block_args, flat_channels,\n flat_kernel_size):\n super().__init__()\n\n # Perform a number of steps validating the input arguments\n self._validate_parameters(conv_block_args, deconv_block_args,\n flat_channels, flat_kernel_size)\n\n # Create lists of conv and deconv blocks from the configurations\n # passed as arguments to this function\n self.conv_blocks = nn.ModuleList([\n ConvBlock(**args)\n for args in conv_block_args\n ])\n\n self.deconv_blocks = nn.ModuleList([\n DeconvBlock(**args)\n for args in deconv_block_args\n ])\n\n # The input and output from the flat channels must be compatible\n # with the configurations for the conv and deconv blocks\n flat_in_channels = conv_block_args[-1]['out_channels']\n flat_out_channels = deconv_block_args[0]['in_channels']\n\n # Setup the flat layers\n self.flat = nn.Conv2d(flat_in_channels, flat_channels,\n flat_kernel_size)\n self.flat2 = nn.Conv2d(flat_channels, flat_channels, 1)\n self.unflatten = nn.ConvTranspose2d(flat_channels, flat_out_channels,\n flat_kernel_size)", "def conv_block(input_tensor, kernel_size, filters, stage, block,use_bias=True, strides=(2, 2),dilation_rate=(1, 1), train_bn=None):\n filters1, filters2, filters3 = filters\n if K.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = Conv2D(filters1, (1, 1), strides=strides,use_bias=use_bias, \n name=conv_name_base + '2a')(input_tensor)\n x = BatchNorm(axis=bn_axis, name=bn_name_base + '2a')(x, training=train_bn)\n x = Activation('relu')(x)\n\n x = Conv2D(filters2, kernel_size, dilation_rate=dilation_rate, padding='same',use_bias=use_bias, \n name=conv_name_base + '2b')(x)\n x = BatchNorm(axis=bn_axis, name=bn_name_base + '2b')(x, training=train_bn)\n x = Activation('relu')(x)\n\n x = Conv2D(filters3, (1, 1), use_bias=use_bias, name=conv_name_base + '2c')(x)\n #x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\n x = BatchNorm(axis=bn_axis, name=bn_name_base + '2c')(x, training=train_bn)\n shortcut = Conv2D(filters3, (1, 1), strides=strides,use_bias=use_bias, \n name=conv_name_base + '1')(input_tensor)\n shortcut = BatchNorm(axis=bn_axis, name=bn_name_base + '1')(shortcut, training=train_bn)\n\n x = layers.add([x, shortcut])\n x = Activation('relu')(x)\n return x", "def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,\n hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,\n dtype=np.float32):\n self.params = {}\n self.reg = reg\n self.dtype = dtype\n \n ############################################################################\n # TODO: Initialize weights and biases for the three-layer convolutional #\n # network. Weights should be initialized from a Gaussian with standard #\n # deviation equal to weight_scale; biases should be initialized to zero. #\n # All weights and biases should be stored in the dictionary self.params. #\n # Store weights and biases for the convolutional layer using the keys 'W1' #\n # and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #\n # hidden affine layer, and keys 'W3' and 'b3' for the weights and biases #\n # of the output affine layer. #\n ############################################################################ \n C, H, W = input_dim;\n\n # Dimensions of data output by convolutional layer\n S = 1; pad = (filter_size - 1) / 2; # Stride and image padding\n hconv = (H - filter_size + 2*pad)/S + 1;\n wconv = (W - filter_size + 2*pad)/S + 1;\n\n # Get dimensions of 2x2 max-pool output\n hmp = hconv / 2;\n wmp = wconv / 2;\n\n # Get dimensions of vector fed into affine layer\n # Convert maxpool output by using np.reshape(v1,(N,-1))\n # Recover by using np.reshape(dv1,v1.shape)\n laff = hmp*wmp*num_filters;\n\n # Determine starting weight and bias matrices\n self.params['W1'] = weight_scale * np.random.randn(num_filters, C, filter_size, filter_size);\n self.params['b1'] = np.zeros(num_filters);\n self.params['W2'] = weight_scale * np.random.randn(laff, hidden_dim);\n self.params['b2'] = np.zeros(hidden_dim);\n self.params['W3'] = weight_scale * np.random.rand(hidden_dim,num_classes);\n self.params['b3'] = np.zeros(num_classes);\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias, last=False):\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n if last:\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias)]\n else:\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\n\n return nn.Sequential(*conv_block)", "def block3(\n x,\n filters,\n kernel_size=3,\n stride=1,\n groups=32,\n conv_shortcut=True,\n name='',\n norm_use=\"bn\",\n):\n if conv_shortcut is True:\n shortcut = layers.Conv2D(\n (64 // groups) * filters,\n 1,\n strides=stride,\n use_bias=False,\n name=name + '_0_conv',\n )(x)\n shortcut = normalize_layer(shortcut, norm_use=norm_use, name=name + '_0_')\n else:\n shortcut = x\n\n x = layers.Conv2D(\n filters,\n 1,\n use_bias=False,\n name=name + '_1_conv',\n kernel_initializer='he_normal',\n )(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_1_')\n x = layers.Activation('relu', name=name + '_1_relu')(x)\n\n c = filters // groups\n x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)\n x = layers.DepthwiseConv2D(\n kernel_size,\n strides=stride,\n depth_multiplier=c,\n use_bias=False,\n name=name + '_2_conv',\n kernel_initializer='he_normal',\n )(x)\n x_shape = backend.int_shape(x)[1:-1]\n x = layers.Reshape(x_shape + (groups, c, c))(x)\n output_shape = x_shape + (groups, c) if backend.backend() == 'theano' else None\n x = layers.Lambda(\n lambda x: sum([x[:, :, :, :, i] for i in range(c)]),\n output_shape=output_shape,\n name=name + '_2_reduce',\n )(x)\n x = layers.Reshape(x_shape + (filters, ))(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_2_')\n x = layers.Activation('relu', name=name + '_2_relu')(x)\n\n x = layers.Conv2D((64 // groups) * filters, 1, kernel_initializer='he_normal',\n use_bias=False, name=name + '_3_conv')(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_3_')\n\n x = layers.Add(name=name + '_add')([shortcut, x])\n x = layers.Activation('relu', name=name + '_out')(x)\n return x", "def generator_block(self, name):\n # generator same dim as input and output if multiple of 4\n inputs = Input(shape=(1, self.image_row, self.image_column, self.image_depth))\n\n # Representation\n gennet = ReflectPadding3D(padding=3)(inputs)\n gennet = Conv3D(self.generator_kernel, 7, strides=1, kernel_initializer=gen_initializer,\n use_bias=False,\n name=name + '_gen_conv1',\n data_format='channels_first')(gennet)\n gennet = InstanceNormalization3D(name=name + '_gen_isnorm_conv1')(gennet)\n gennet = Activation('relu')(gennet)\n\n # Downsampling 1\n gennet = ReflectPadding3D(padding=1)(gennet)\n gennet = Conv3D(self.generator_kernel * 2, 3, strides=2, kernel_initializer=gen_initializer,\n use_bias=False,\n name=name + '_gen_conv2',\n data_format='channels_first')(gennet)\n gennet = InstanceNormalization3D(name=name + '_gen_isnorm_conv2')(gennet)\n gennet = Activation('relu')(gennet)\n\n # Downsampling 2\n gennet = ReflectPadding3D(padding=1)(gennet)\n gennet = Conv3D(self.generator_kernel * 4, 3, strides=2, kernel_initializer=gen_initializer,\n use_bias=False,\n name=name + '_gen_conv3',\n data_format='channels_first')(gennet)\n gennet = InstanceNormalization3D(name=name + '_gen_isnorm_conv3')(gennet)\n gennet = Activation('relu')(gennet)\n\n # Resnet blocks : 6, 8*4 = 32\n gennet = resnet_blocks(gennet, self.generator_kernel * 4, name=name + '_gen_block1')\n gennet = resnet_blocks(gennet, self.generator_kernel * 4, name=name + '_gen_block2')\n gennet = resnet_blocks(gennet, self.generator_kernel * 4, name=name + '_gen_block3')\n gennet = resnet_blocks(gennet, self.generator_kernel * 4, name=name + '_gen_block4')\n gennet = resnet_blocks(gennet, self.generator_kernel * 4, name=name + '_gen_block5')\n gennet = resnet_blocks(gennet, self.generator_kernel * 4, name=name + '_gen_block6')\n\n # Upsampling 1\n gennet = UpSampling3D(size=(2, 2, 2),\n data_format='channels_first')(gennet)\n gennet = ReflectPadding3D(padding=1)(gennet)\n gennet = Conv3D(self.generator_kernel * 2, 3, strides=1, kernel_initializer=gen_initializer,\n use_bias=False,\n name=name + '_gen_deconv1',\n data_format='channels_first')(gennet)\n gennet = InstanceNormalization3D(name=name + '_gen_isnorm_deconv1')(gennet)\n gennet = Activation('relu')(gennet)\n\n # Upsampling 2\n gennet = UpSampling3D(size=(2, 2, 2),\n data_format='channels_first')(gennet)\n gennet = ReflectPadding3D(padding=1)(gennet)\n gennet = Conv3D(self.generator_kernel, 3, strides=1, kernel_initializer=gen_initializer,\n use_bias=False,\n name=name + '_gen_deconv2',\n data_format='channels_first')(gennet)\n gennet = InstanceNormalization3D(name=name + '_gen_isnorm_deconv2')(gennet)\n gennet = Activation('relu')(gennet)\n\n # Reconstruction\n gennet = ReflectPadding3D(padding=3)(gennet)\n if self.fit_mask : \n gennet = Conv3D(2+self.nb_classe_mask, 7, strides=1, kernel_initializer=gen_initializer,\n use_bias=False,\n name=name + '_gen_1conv',\n data_format='channels_first')(gennet)\n else : \n gennet = Conv3D(2, 7, strides=1, kernel_initializer=gen_initializer,\n use_bias=False,\n name=name + '_gen_1conv',\n data_format='channels_first')(gennet)\n\n predictions = gennet\n predictions = activation_SegSRGAN(is_residual=self.is_residual,nb_classe_mask=self.nb_classe_mask,fit_mask=self.fit_mask)(\n [predictions, inputs]) # sigmoid proba + add input and pred SR\n\n model = Model(inputs=inputs, outputs=predictions, name=name)\n return model", "def __init__(self, dropout_rate=0.0, in_channels=3):\n\n super(MaskNet, self).__init__()\n\n self.prep_block_1 = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(32),\n nn.Dropout(dropout_rate),\n )\n self.prep_block_2 = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(32),\n nn.Dropout(dropout_rate),\n )\n\n self.convblock1 = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=32, kernel_size=1, padding=0),\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Dropout(dropout_rate),\n )\n\n self.convblock2 = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Dropout(dropout_rate),\n )\n\n self.convblock3 = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=1, kernel_size=1, padding=0),\n )", "def conv_block(\n data,\n name,\n channels,\n kernel_size=(3, 3),\n strides=(1, 1),\n padding=(1, 1),\n epsilon=1e-5,\n layout=\"NCHW\",\n):\n # convolution + bn + relu\n conv = layers.conv2d(\n data=data,\n channels=channels,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_layout=layout,\n kernel_layout=layers.conv_kernel_layout(layout),\n name=name + \"_conv\",\n )\n bn = layers.batch_norm_infer(data=conv, epsilon=epsilon, name=name + \"_bn\")\n act = relay.nn.relu(data=bn)\n return act", "def forward(self, input_tensor):\n last = input_tensor\n for module in self.projection:\n projection = module(last)\n last = torch.cat((last, projection), -1)\n projection = last\n\n intermediate = self.seed(projection)\n intermediate = intermediate.view((-1, 512, 3, 3))\n\n projection_2d = projection.view((-1, self.projection_dim, 1, 1))\n projection_2d = self.projection_upscaler(projection_2d)\n\n for i, (conv, upscaling) in enumerate(zip(self.conv, self.upscaling)):\n if i + 1 != len(self.upscaling):\n if i > 0:\n intermediate = torch.cat((intermediate, projection_2d), 1)\n intermediate = torch.nn.functional.pixel_shuffle(intermediate, 2)\n intermediate = conv(intermediate)\n projection_2d = upscaling(projection_2d)\n\n r_space = self.colourspace_r(projection)\n r_space = r_space.view((-1, 16, 1, 1))\n r_space = self.colourspace_upscaler(r_space)\n r_space = intermediate * r_space\n r_space = torch.sum(r_space, dim=1, keepdim=True)\n\n g_space = self.colourspace_g(projection)\n g_space = g_space.view((-1, 16, 1, 1))\n g_space = self.colourspace_upscaler(g_space)\n g_space = intermediate * g_space\n g_space = torch.sum(g_space, dim=1, keepdim=True)\n\n b_space = self.colourspace_b(projection)\n b_space = b_space.view((-1, 16, 1, 1))\n b_space = self.colourspace_upscaler(b_space)\n b_space = intermediate * b_space\n b_space = torch.sum(b_space, dim=1, keepdim=True)\n\n output = torch.cat((r_space, g_space, b_space), dim=1)\n\n return output", "def identity_block(input_tensor, kernel_size, filters, stage, block):\n filters1, filters2, filters3 = filters\n if K.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = ConvGRU2D(filters1, (1, 1), padding='same', return_sequences=True, name=conv_name_base + '2a')(input_tensor)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)\n #x = Activation('relu')(x)\n\n x = ConvGRU2D(filters2, kernel_size, padding='same', return_sequences=True, name=conv_name_base + '2b')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)\n #x = Activation('relu')(x)\n\n x = ConvGRU2D(filters2,(1, 1), padding='same', return_sequences=True, name=conv_name_base + '2c')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\n\n x = layers.add([x, input_tensor])\n #x = Activation('relu')(x)\n return x", "def build_generator(self):\n\n def _make_divisible(v, divisor, min_value=None):\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\n def residual_block(inputs, filters, block_id, expansion=6, stride=1, alpha=1.0):\n \n channel_axis = 1 if tf.keras.backend.image_data_format() == 'channels_first' else -1\n\n in_channels = tf.keras.backend.int_shape(inputs)[channel_axis]\n pointwise_conv_filters = int(filters * alpha)\n pointwise_filters = _make_divisible(pointwise_conv_filters, 8)\n x = inputs\n prefix = 'block_{}_'.format(block_id)\n\n if block_id:\n # Expand\n x = tf.keras.layers.Conv2D(expansion * in_channels,\n kernel_size=1,\n padding='same',\n use_bias=True,\n activation=None,\n name=prefix + 'expand')(x)\n x = tf.keras.layers.BatchNormalization(axis=channel_axis,\n epsilon=1e-3,\n momentum=0.999,\n name=prefix + 'expand_BN')(x)\n x = tf.keras.layers.Activation('relu', name=prefix + 'expand_relu')(x)\n else:\n prefix = 'expanded_conv_'\n\n # Depthwise\n x = tf.keras.layers.DepthwiseConv2D(kernel_size=3,\n strides=stride,\n activation=None,\n use_bias=True,\n padding='same' if stride == 1 else 'valid',\n name=prefix + 'depthwise')(x)\n x = tf.keras.layers.BatchNormalization(axis=channel_axis,\n epsilon=1e-3,\n momentum=0.999,\n name=prefix + 'depthwise_BN')(x)\n\n x = tf.keras.layers.Activation('relu', name=prefix + 'depthwise_relu')(x)\n\n # Project\n x = tf.keras.layers.Conv2D(pointwise_filters,\n kernel_size=1,\n padding='same',\n use_bias=True,\n activation=None,\n name=prefix + 'project')(x)\n x = tf.keras.layers.BatchNormalization(axis=channel_axis,\n epsilon=1e-3,\n momentum=0.999,\n name=prefix + 'project_BN')(x)\n\n if in_channels == pointwise_filters and stride == 1:\n return tf.keras.layers.Add(name=prefix + 'add')([inputs, x])\n return x\n\n def deconv2d(layer_input):\n\n u = tf.keras.layers.UpSampling2D(size=2, interpolation='bilinear')(layer_input)\n u = tf.keras.layers.Conv2D(self.gf, kernel_size=3, strides=1, padding='same')(u)\n u = tf.keras.layers.PReLU(shared_axes=[1, 2])(u)\n return u\n\n # Low resolution image input\n img_lr = tf.keras.Input(shape=self.lr_shape)\n\n # Pre-residual block\n c1 = tf.keras.layers.Conv2D(self.gf, kernel_size=3, strides=1, padding='same')(img_lr)\n c1 = tf.keras.layers.BatchNormalization()(c1)\n c1 = tf.keras.layers.PReLU(shared_axes=[1, 2])(c1)\n\n # Propogate through residual blocks\n r = residual_block(c1, self.gf, 0)\n for idx in range(1, self.n_residual_blocks):\n r = residual_block(r, self.gf, idx)\n\n # Post-residual block\n c2 = tf.keras.layers.Conv2D(self.gf, kernel_size=3, strides=1, padding='same')(r)\n c2 = tf.keras.layers.BatchNormalization()(c2)\n c2 = tf.keras.layers.Add()([c2, c1])\n \n # Upsampling\n u1 = deconv2d(c2)\n u2 = deconv2d(u1)\n\n # Generate high resolution output\n gen_hr = tf.keras.layers.Conv2D(3, kernel_size=3, strides=1, padding='same', activation='tanh')(u2)\n\n return tf.keras.models.Model(img_lr, gen_hr)", "def __init__(self, channel_in, channel_out, kernel_size, stride, padding, residual=False):\n super().__init__()\n self.block = nn.Sequential(\n nn.Conv2d(channel_in, channel_out, kernel_size, stride, padding),\n nn.BatchNorm2d(channel_out)\n )\n self.act = nn.ReLU()\n self.residual = residual", "def __init__(self, \n input_dim=(3, 32, 32), \n num_filters = (32, 64), filter_sizes = (7, 7), conv_param = {\"stride\": 1, \"pad\": 3},\n hidden_dim= 100, num_classes=10, weight_scale=1e-3, reg=0.0,\n dtype=np.float32\n ):\n self.params = {}\n self.reg = reg\n self.dtype = dtype\n self.conv_param = conv_param\n self.filter_sizes = filter_sizes\n self.num_layers = 4\n ############################################################################\n # TODO: Initialize weights and biases for the three-layer convolutional #\n # network. Weights should be initialized from a Gaussian with standard #\n # deviation equal to weight_scale; biases should be initialized to zero. #\n # All weights and biases should be stored in the dictionary self.params. #\n ############################################################################\n \n C, H, W = input_dim\n filter_size1, filter_size2 = filter_sizes\n num_filters1, num_filters2 = num_filters\n\n # conv layer 1: (N, C, H, W) -> (N, num_filters1, H, W)\n self.params['W1'] = np.random.normal(0, weight_scale, [num_filters1, C, filter_size1, filter_size1]) # square filter\n self.params['b1'] = np.zeros((num_filters1, ))\n self.params[\"sbnGamma1\"] = np.ones((num_filters1, )) # scale parameter one for each color channel during spatial batch norm\n self.params[\"sbnBeta1\"] = np.zeros((num_filters1, )) # shift parameter one for each color channel during spatial batch norm\n\n # conv layer 2: (N, num_filters1, H, W) -> (N, num_filters2, H, W)\n self.params['W2'] = np.random.normal(0, weight_scale, [num_filters2, num_filters1, filter_size2, filter_size2]) # square filter\n self.params['b2'] = np.zeros((num_filters2, ))\n self.params[\"sbnGamma2\"] = np.ones((num_filters2, ))\n self.params[\"sbnBeta2\"] = np.zeros((num_filters2, ))\n\n # (2, 2, 2) maxpool: (N, num_filters2, H, W) -> (N, num_filters2, H/2. W/2)\n # maxpool layer contributes nothing to self.params that need to be updated.\n self.maxpool_params = {\"pool_height\": 2, \"pool_width\": 2, \"stride\": 2}\n\n # affine layer 3: (N, num_filters2, H/2. W/2) -> (N, hidden_dim)\n self.params['W3'] = np.random.normal(0, weight_scale, [num_filters2 * (H / 2) * (W / 2), hidden_dim])\n self.params['b3'] = np.zeros((hidden_dim, ))\n self.params[\"bnGamma3\"] = np.ones((hidden_dim, ))\n self.params[\"bnBeta3\"] = np.zeros((hidden_dim, ))\n\n # output affine - sfmx layer 4: (N, hidden_dim) -> (N, num_classes)\n self.params['W4'] = np.random.normal(0, weight_scale, [hidden_dim, num_classes])\n self.params['b4'] = np.zeros((num_classes, ))\n\n self.bn_params = [{\"mode\": \"train\"} for _ in range(self.num_layers)]\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def __call__(self, inputs, output_stages='c5', **kwargs):\n if backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n\n dilation = self.dilation\n\n x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(inputs)\n x = layers.Conv2D(64, (7, 7),\n strides=(2, 2),\n padding='valid',\n kernel_initializer='he_normal',\n name='conv1')(x)\n x = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)\n x = layers.Activation('relu')(x)\n x = layers.ZeroPadding2D(padding=(1, 1), name='pool1_pad')(x)\n x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)\n c1 = x\n\n x = self._conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))\n for i in range(self.params[0]):\n x = self._identity_block(x, 3, [64, 64, 256], stage=2, block=chr(ord('b') + i))\n c2 = x\n\n x = self._conv_block(x, 3, [128, 128, 512], stage=3, block='a')\n for i in range(self.params[1]):\n x = self._identity_block(x, 3, [128, 128, 512], stage=3, block=chr(ord('b') + i))\n c3 = x\n\n x = self._conv_block(x, 3, [256, 256, 1024], stage=4, block='a', dilation=dilation[0])\n for i in range(self.params[2]):\n x = self._identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(ord('b') + i), dilation=dilation[0])\n c4 = x\n\n x = self._conv_block(x, 3, [512, 512, 2048], stage=5, block='a', dilation=dilation[1])\n for i in range(self.params[3]):\n x = self._identity_block(x, 3, [512, 512, 2048], stage=5, block=chr(ord('b') + i), dilation=dilation[1])\n c5 = x\n\n self.outputs = {'c1': c1,\n 'c2': c2,\n 'c3': c3,\n 'c4': c4,\n 'c5': c5}\n\n if type(output_stages) is not list:\n return self.outputs[output_stages]\n else:\n return [self.outputs[ci] for ci in output_stages]", "def conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1),block_id=1, train_bn=False):\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n filters = int(filters * alpha)\n x = KL.Conv2D(filters, kernel,\n padding='same',\n use_bias=False,\n strides=strides,\n name='conv{}'.format(block_id))(inputs)\n x = BatchNorm(axis=channel_axis, name='conv{}_bn'.format(block_id))(x, training = train_bn)\n return KL.Activation(relu6, name='conv{}_relu'.format(block_id))(x)", "def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.input_dim, f'input_dim is {p.input_dim}'\n assert p.hidden_dim, f'hidden_dim is {p.hidden_dim}'\n assert p.num_heads > 0, f'num_heads is {p.num_heads}'\n # if proj_tpl does not have dim_per_head set, set it\n if p.proj_tpl.dim_per_head == 0:\n p.proj_tpl.dim_per_head = self.dim_per_head\n\n if p.device_mesh is not None:\n assert p.weight_split_dims_mapping is not None\n assert p.activation_split_dims_mapping is not None\n\n if isinstance(p.weight_split_dims_mapping, dict):\n qkv_weight_split_dims_mapping = p.weight_split_dims_mapping['qkv']\n post_weight_split_dims_mapping = p.weight_split_dims_mapping['post']\n else:\n qkv_weight_split_dims_mapping = p.weight_split_dims_mapping\n post_weight_split_dims_mapping = p.weight_split_dims_mapping\n\n def ProjectInput(input_dim):\n return p.proj_tpl.Copy().Set(\n input_dim=input_dim,\n num_heads=p.num_heads,\n use_bias=p.use_bias,\n device_mesh=p.device_mesh,\n weight_split_dims_mapping=qkv_weight_split_dims_mapping,\n make_output_proj_no_op=False)\n\n def ProjectInputOneStep(input_dim):\n return p.proj_tpl.Copy().Set(\n input_dim=input_dim,\n num_heads=p.num_heads,\n dim_per_head=self.dim_per_head * 3,\n use_bias=p.use_bias,\n device_mesh=p.device_mesh,\n weight_split_dims_mapping=qkv_weight_split_dims_mapping,\n make_output_proj_no_op=False,\n )\n\n if isinstance(p.input_dim, dict):\n key_input_dim = p.input_dim['key']\n value_input_dim = p.input_dim['value']\n query_input_dim = p.input_dim['query']\n assert key_input_dim, f'key_input_dim is {key_input_dim}'\n assert query_input_dim, f'query_input_dim is {query_input_dim}'\n else:\n key_input_dim = p.input_dim\n value_input_dim = p.input_dim\n query_input_dim = p.input_dim\n\n if p.enable_value_proj and p.enable_qkv_proj_in_onestep:\n self.CreateChild('qkv', ProjectInputOneStep(key_input_dim))\n else:\n self.CreateChild('key', ProjectInput(key_input_dim))\n self.CreateChild('query', ProjectInput(query_input_dim))\n if p.enable_value_proj:\n assert value_input_dim, f'value_input_dim is {value_input_dim}'\n self.CreateChild('value', ProjectInput(value_input_dim))\n if p.enable_query_scale and p.enable_per_dim_scale:\n self.CreateChild(\n 'per_dim_scale',\n PerDimScaleLayer.Params().Set(dim=p.proj_tpl.dim_per_head))\n self.CreateChild('atten_dropout',\n p.dropout_tpl.Set(keep_prob=1.0 - p.atten_dropout_prob))\n # Setting is_output_projection=True to set the projection direction\n # from hidden dim to input dim. Output projection follows query_input_dim.\n self.CreateChild(\n 'post',\n p.proj_tpl.Copy().Set(\n input_dim=p.output_dim or query_input_dim,\n num_heads=p.num_heads,\n is_output_projection=True,\n use_bias=p.use_bias,\n device_mesh=p.device_mesh,\n weight_split_dims_mapping=post_weight_split_dims_mapping))\n\n if p.rope_tpl:\n assert issubclass(p.rope_tpl.cls, layers.RotaryPositionalEmbeddingLayer)\n rope_p = p.rope_tpl.Copy()\n if rope_p.embedding_dim == 0:\n rope_p.embedding_dim = self.dim_per_head\n self.CreateChild('rope', rope_p)\n\n if p.attn_add_memory:\n assert p.memory_tpl is not None\n self.CreateChild(\n 'lsh_mem',\n p.memory_tpl.Copy().Set(\n input_dim=self.dim_per_head,\n output_dim=self.dim_per_head,\n name='attn_lsh_mem'))\n if p.use_scale_invariant_atten:\n assert not (p.enable_scaling_code_motion or p.atten_extra_logit)", "def construct(input_placeholder):\n\t\t###############################\n\t\t# MODEL ARCHITECTURE #\n\t\t###############################\n\t\t# First block of convolutions\n\t\twith tf.variable_scope(\"conv_1\"):\n\t\t\tconv_1_1 = conv2d(input_placeholder,\n\t\t\t\tinput_channels=1,\n\t\t\t\toutput_channels=64,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_1_2 = conv2d(conv_1_1,\n\t\t\t\tinput_channels=64,\n\t\t\t\toutput_channels=64,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=2)\n\t\t\t# TODO batchn\n\t\t\tbn_1 = conv_1_2\n\n\t\t# Second block of convolutions.\n\t\twith tf.variable_scope(\"conv2\"):\n\t\t\tconv_2_1 = conv2d(bn_1,\n\t\t\t\tinput_channels=64,\n\t\t\t\toutput_channels=128,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_2_2 = conv2d(conv_2_1,\n\t\t\t\tinput_channels=128,\n\t\t\t\toutput_channels=128,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=2)\n\n\t\t\t# TODO batchn\n\t\t\tbn_2 = conv_2_2\n\n\t\twith tf.variable_scope(\"conv3\"):\n\t\t\tconv_3_1 = conv2d(bn_2,\n\t\t\t\tinput_channels=128,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_3_2 = conv2d(conv_3_1,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_3_3 = conv2d(conv_3_2,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=2)\n\t\t\t# TODO batchn\n\t\t\tbn_3 = conv_3_3\n\n\n\t\t# DILATED LAYERS:\n\t\twith tf.variable_scope(\"conv4\"):\n\t\t\tconv_4_1 = conv2d(bn_3,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\tconv_4_2 = conv2d(conv_4_1,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\tconv_4_3 = conv2d(conv_4_2,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\t# TODO batchn\n\t\t\tbn_4 = conv_4_3\n\n\t\twith tf.variable_scope(\"conv5\"):\n\t\t\tconv_5_1 = conv2d(bn_4,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\tconv_5_2 = conv2d(conv_5_1,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\tconv_5_3 = conv2d(conv_5_2,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\t# TODO batchn\n\t\t\tbn_5 = conv_5_3\n\n\t\twith tf.variable_scope(\"conv6\"):\n\t\t\tconv_6_1 = conv2d(bn_5,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\tconv_6_2 = conv2d(conv_6_1,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\tconv_6_3 = conv2d(conv_6_2,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\t# TODO batchn\n\t\t\tbn_6 = conv_6_3\n\n\n\t\twith tf.variable_scope(\"conv7\"):\n\t\t\tconv_7_1 = conv2d(bn_6,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\tconv_7_2 = conv2d(conv_7_1,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\tconv_7_3 = conv2d(conv_7_2,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\t# TODO batchn\n\t\t\tbn_7 = conv_7_3\n\n\n\t\twith tf.variable_scope(\"conv8\"):\n\t\t\tconv_8_1 = deconv2d(bn_7,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_size=[None, 64, 64, 256],\n\t\t\t\tkernel_size=4,\n\t\t\t\tstride=2,\n\t\t\t\tpad=1)\n\t\t\tconv_8_2 = conv2d(conv_8_1,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_8_3 = conv2d(conv_8_2,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=1)\n\t\t\tconv_8_313 = conv2d(conv_8_3,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=313,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=1)\n\n\n\t\treturn conv_8_313", "def _make_layer(self, block, outputs, blocks, stride=1):\n downsample = None\n \n downsample = nn.Sequential(\n nn.Conv2d(self.inputs, outputs * 4,\n kernel_size=1, stride=stride, bias=False,\n dilation=self.dilation),\n nn.BatchNorm2d(outputs * 4),\n )\n\n layers = []\n layers.append(block(self.inputs, outputs, stride, downsample, self.dilation))\n self.inputs = outputs * 4\n for i in range(1, blocks):\n layers.append(block(self.inputs, outputs))\n\n layer = nn.Sequential(*layers)\n\n self.channels.append(outputs * 4)\n self.layers.append(layer)\n\n return layer" ]
[ "0.6879483", "0.6712965", "0.6681837", "0.6617764", "0.65615404", "0.65488005", "0.6432119", "0.64201134", "0.6342974", "0.6342974", "0.63249046", "0.63225436", "0.62239516", "0.6200089", "0.6196667", "0.6185265", "0.61718935", "0.6138998", "0.61151755", "0.6111726", "0.60973215", "0.60953975", "0.60940576", "0.6069791", "0.6069683", "0.6067775", "0.6055852", "0.60353184", "0.6033513", "0.6030778", "0.60306835", "0.60150295", "0.60119873", "0.60009843", "0.5998713", "0.59752196", "0.5974212", "0.5969725", "0.59689784", "0.5954446", "0.5945263", "0.5938959", "0.5936477", "0.5936477", "0.59293234", "0.5919008", "0.5909816", "0.5875439", "0.58728266", "0.58718044", "0.58694845", "0.58641374", "0.5861078", "0.5856386", "0.5856215", "0.5851727", "0.5851727", "0.58512896", "0.58508277", "0.5848076", "0.58477694", "0.5846335", "0.5836978", "0.58304745", "0.58304745", "0.58215207", "0.581361", "0.5811278", "0.58088255", "0.5806012", "0.58033645", "0.5799912", "0.57995296", "0.57973206", "0.57860965", "0.57707506", "0.57703036", "0.57671255", "0.5764219", "0.5760496", "0.5755248", "0.57539177", "0.5741019", "0.57391536", "0.57349133", "0.5730176", "0.57298785", "0.5723238", "0.57188094", "0.57157564", "0.57157475", "0.57143784", "0.5709602", "0.57082564", "0.5707335", "0.5701418", "0.56984", "0.56848776", "0.5684323", "0.56786656" ]
0.57862914
74
Sentence generator for an entire corpus directory.
def sentences_for_dir(path='./',separate=True,gzipped=True): for filename in cowfiles(path): for metadata, data in sentence_generator(filename,separate,gzipped): yield metadata, data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data_sentences(dirname):\n sentence_list = []\n for fname in os.listdir(dirname):\n with open(os.path.join(dirname, fname)) as file:\n #sentence_list.append(gensim.models.word2vec.LineSentence(file))\n sentence_list.append(file)\n return sentence_list", "def sents(self, fileids=None, categories=None):\n for paragraph in self.paras(fileids, categories):\n for sentence in sent_tokenize(paragraph, language='russian'):\n yield sentence", "def process_docs(directory, vocab):\n for filename in listdir(directory):\n if not filename.endswith('.txt'):\n continue\n path = directory + '/' + filename\n add_doc_to_vocab(path, vocab)", "def generate_words(text='', train_path=None, case_sensitive=True, epochs=20, classifier=nlup.BinaryAveragedPerceptron, **kwargs):\n if train_path:\n generate_sentences.detector = Detector(slurp(train_path), epochs=epochs, nocase=not case_sensitive)\n # generate_sentences.detector = SentenceDetector(text=text, nocase=not case_sensitive, epochs=epochs, classifier=classifier)\n return iter(generate_sentences.detector.segments(text))", "def iter_documents(top_directory):\n for root, dirs, files in os.walk(top_directory):\n for file in filter(lambda file: file.endswith('.txt'), files):\n document = open(os.path.join(root, file)).read() # read the entire document, as one big string\n yield utils.tokenize(document, lower=True) # or whatever tokenization suits you", "def genSent(self, toTree, num):\n sentences = []\n\n\n while num > 0:\n words = []\n root = self.weightedRandomChoice(\"ROOT\")\n for w in root:\n subSentence = self.findSubSentence(toTree, w)\n if toTree:\n words.append((w, subSentence))\n else:\n words.extend(subSentence)\n if toTree:\n sentence = (\"ROOT\", tuple(words))\n else:\n sentence = \" \".join(words)\n\n if sentence not in sentences: # prevent storing duplicated sentence\n sentences.append(sentence)\n num -= 1\n\n # print all the generated sentences\n if toTree:\n for sentence in sentences:\n self.printTree(sentence, 0, 0)\n print \"\"\n # CFG.treeToSentence(sentence)\n # print \"\"\n else:\n for sentence in sentences:\n print sentence", "def generate_corpus(self, text):\n if isinstance(text, str):\n sentences = self.sentence_split(text)\n else:\n sentences = []\n for line in text:\n sentences += self.sentence_split(line)\n passing = filter(self.test_sentence_input, sentences)\n runs = map(self.word_split, passing)\n return runs", "def import_spontaneous_speech_corpus(corpus_name, directory, **kwargs):\n\n dialect = kwargs.pop('dialect', 'textgrid')\n stop_check = kwargs.pop('stop_check', None)\n call_back = kwargs.pop('call_back', None)\n speaker_source = kwargs.pop('speaker_source', None)\n delimiter = kwargs.pop('delimiter', None)\n\n corpus = SpontaneousSpeechCorpus(corpus_name,directory)\n\n words = []\n phones = []\n textgrids = []\n wavs = []\n if call_back is not None:\n call_back('Finding files...')\n call_back(0,1)\n cur = 0\n for root, subdirs, files in os.walk(directory):\n if stop_check is not None and stop_check():\n return\n for f in files:\n if dialect == 'textgrid' and f.lower().endswith('.textgrid'):\n textgrids.append(os.path.join(root,f))\n elif dialect == 'buckeye' and f.endswith('.words'):\n words.append(os.path.join(root,f))\n elif dialect == 'buckeye' and f.endswith('.phones'):\n phones.append(os.path.join(root,f))\n elif dialect == 'timit' and f.endswith('.wrd'):\n words.append(os.path.join(root,f))\n elif dialect == 'timit' and f.endswith('.phn'):\n phones.append(os.path.join(root,f))\n elif f.endswith('.wav'):\n wavs.append(os.path.join(root,f))\n if dialect == 'textgrid':\n word_tier_name = kwargs.pop('word_tier_name', None)\n phone_tier_name = kwargs.pop('phone_tier_name', None)\n dialogs = align_textgrid_info(textgrids, wavs, speaker_source, stop_check, call_back)\n else:\n dialogs = align_dialog_info(words, phones, wavs, speaker_source, stop_check, call_back)\n if call_back is not None:\n call_back('Processing discourses...')\n call_back(0,len(dialogs))\n cur = 0\n\n for d, v in dialogs.items():\n if stop_check is not None and stop_check():\n return\n if call_back is not None:\n cur += 1\n call_back(cur)\n discourse_info = {'name':d}\n if dialect == 'textgrid':\n if 'textgrid' not in v:\n continue\n data = textgrids_to_data(v['textgrid'], word_tier_name,\n phone_tier_name,\n v['speaker'], delimiter)\n else:\n if 'words' not in v:\n continue\n if 'phones' not in v:\n continue\n data = files_to_data(v['words'], v['phones'], dialect)\n discourse_info['speaker'] = Speaker(v['speaker'])\n\n if 'wav' in v:\n discourse_info['wav_path'] = v['wav']\n corpus.add_discourse(data, discourse_info,delimiter=delimiter)\n return corpus", "def process_docs(directory, vocab):\n for file_name in listdir(directory):\n file_path = directory + '/' + file_name\n add_doc_to_vocab(file_path, vocab)", "def word_runner(self):\n with open(self.filename) as doc:\n text = doc.readlines()\n for line in text:\n for word in line.split():\n yield word", "def get_corpus():\n all_text = []\n\n for _, _, files in os.walk(DATA_DIRECTORY):\n for f in files:\n with open(os.path.join(DATA_DIRECTORY, f), 'r') as article:\n # Quotation marks rarely come out as pairs in finished chains.\n # So we remove them before adding the article text:\n all_text.append(re.sub(r'[„“]', '', article.read()))\n\n return markovify.Text(\"\".join(all_text), state_size=2)", "def generate_sentences(text='', train_path=None, case_sensitive=True, epochs=20, classifier=nlup.BinaryAveragedPerceptron, **kwargs):\n if train_path:\n generate_sentences.detector = Detector(slurp(train_path), epochs=epochs, nocase=not case_sensitive)\n # generate_sentences.detector = SentenceDetector(text=text, nocase=not case_sensitive, epochs=epochs, classifier=classifier)\n return iter(generate_sentences.detector.segments(text))", "def make_sentences(self):\n\n if self.document == None:\n return\n\n sent = sent_tokenize(self.document) # contains raw sentences\n\n\n # Create parameters for NER and Dependency Parsing a\n # and pass it to the sentence objcet\n\n # set config file\n config = CP.RawConfigParser()\n config = config\n config.read('config.py')\n\n # Server for dependency parsing\n\n server = ServerProxy(JsonRpc20(),TransportTcpIp(addr=(\"127.0.0.1\", 8080), timeout=200.0))\n\n # Parameters for Named entitye recognition\n\n # get the classifier and tagger location from config file\n tagger = config.get('NER','tagger') # gets the path of the stanford tagger\n classifier = config.get('NER','classifier') # gets the path of the stanford classifier\n st = StanfordNERTagger(classifier,tagger)\n for i in range(len(sent)):\n s = Sentence(sent[i],i,server, st, 'test')\n self.sentences.append(s)", "def build_corpus(self):\n print(\"Inside the build_corpus >>>>>\")\n documentsCount = 0\n documents = self.documents\n\t\t\n with open(self.documents_path) as file:\n for documents in file.readlines():\n documents = documents.rstrip('}\\n ').strip('0\\t').strip('1\\t').split(' ')\n documentsCount = documentsCount +1\n self.documents.append(documents)\n\t\t\t\n self.number_of_documents = documentsCount", "def process_corpus(self):\n sentences = []\n sentence = []\n with open(str(self.file), encoding=self.encoding) as f:\n\n line = f.readline()\n\n while line:\n\n if line.startswith(\"#\"):\n line = f.readline()\n continue\n\n if line.strip().replace(\"\", \"\") == \"\":\n if len(sentence) > 0:\n self.infer_space_after(sentence)\n if self.tagging_scheme is not None:\n self.convert_tag_scheme(\n sentence, target_scheme=\"iobes\"\n )\n\n sentences.append(sentence)\n sentence = []\n\n else:\n fields = re.split(r\"\\s+\", line)\n token = fields[0] # text column\n token_tags = {\n v: fields[k]\n for k, v in self.columns.items()\n if v != \"text\"\n }\n sentence.append({\"name\": token, \"tags\": token_tags})\n\n line = f.readline()\n\n return sentences", "def process_docs_2(directory, vocab):\n lines = []\n for filename in listdir(directory):\n if not filename.endswith('.txt'):\n continue\n path = directory + '/' + filename\n line = doc_to_line(path, vocab)\n lines.append(line)\n return lines", "def _doc2vec_doc_stream(paths, n, tokenizer=word_tokenize, sentences=True):\n i = 0\n p = Progress()\n for path in paths:\n with open(path, 'r') as f:\n for line in f:\n i += 1\n p.print_progress(i/n)\n\n # We do minimal pre-processing here so the model can learn\n # punctuation\n line = line.lower()\n\n if sentences:\n for sent in sent_tokenize(line):\n tokens = tokenizer(sent)\n yield LabeledSentence(tokens, ['SENT_{}'.format(i)])\n else:\n tokens = tokenizer(line)\n yield LabeledSentence(tokens, ['SENT_{}'.format(i)])", "def generate(self, handle, occurrence_threshold=None): # silently creates other pieces of data\n print colors.yellow(\"generating corpus for {}...\\n\".format(handle))\n if occurrence_threshold: # if one was given, set it\n self.threshold = occurrence_threshold\n self.handle = handle\n self.path = \"bot_files/{0}/{0}\".format(handle)\n self.process_tweets()\n self.generate_vocab()\n self.generate_corpus()", "def sents(self):\n\n text = str()\n for file in os.listdir(self.path):\n # checks if the given path contains a text file and opens it\n if file.endswith(\".txt\"):\n with open(self.path + \"/\" + file) as connection:\n text += connection.read()\n\n # tokenizes the text to sentences and tokenizes the tokenized sentences to words\n sentences_list = nltk.sent_tokenize(text)\n word_list = [nltk.word_tokenize(sent) for sent in sentences_list]\n\n return word_list", "def sent_to_words(self, sentences):\n\n for sentence in sentences:\n yield(gensim.utils.simple_preprocess(str(sentence)))", "def load_sentences(path, lower, zeros=True):\n sentences = []\n sentence = []\n for line in codecs.open(path, 'r', 'utf8'):\n line = zero_digits(line.rstrip()) if zeros else line.rstrip()\n if not line:\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n sentence = []\n else:\n word = line.split()\n assert len(word) >= 2\n sentence.append(word)\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n return sentences", "def peoples_speech(\n corpus_dir: Pathlike,\n output_dir: Pathlike,\n):\n prepare_peoples_speech(\n corpus_dir,\n output_dir=output_dir,\n )", "def generate_corpus():\n data = load_data()\n questions = [s.split(' ', 1)[1].lower() for s in data]\n return questions", "def word_iterator(folder):\n for filename in glob.glob(os.path.join(folder, \"*.txt\")):\n with codecs.open(filename, \"r\", \"utf8\") as file:\n for line in file.readlines():\n for word in WORD_SPLIT_PATTERN.split(line.strip()):\n if word == \"\":\n continue\n yield slugify.slugify(word.lower())", "def generate_sentence():\n markov_chain = makeMarkovDict(\"text.txt\")\n\n # Pick a random word to begin with.\n first_word = random.choice(markov_chain.keys()) # Illegall\n\n # print first_word\n # random_choice = random.randint(0, len(markov_chain.keys()))\n # index = 0\n # first_word = \"\"\n # for word in markov_chain:\n # print word\n # if index == random_choice:\n # first_word = word\n # break\n # index += 1\n\n # Based on that word, call function to chose the next word.\n # print markov_chain[first_word]\n # print word_selection(markov_chain[first_word])\n\n lenght_of_sentence = 10\n sentence = [first_word] # First word already in there\n for i in range(lenght_of_sentence):\n sentence.append(word_selection(markov_chain[sentence[i]]))\n # Sentence after loop: ['fish', 'red', 'fish', 'two', 'fish', 'red', 'fish', 'red', 'fish', 'two', 'fish']\n\n # Cap with letter and add period at the end.\n final_sentece = \" \".join(sentence) + \".\"\n return final_sentece.capitalize()", "def read_corpus(dir):\n corpus = {}\n file_names = glob.glob(f\"{dir}/*\")\n for file_name in file_names:\n name = os.path.splitext(os.path.basename(file_name))[0]\n text = \" \".join(open(file_name, \"rt\").readlines())\n text = text.replace(\"\\n \\n\", \" \")\n text = text.replace(\"\\n\", \"\")\n text = text.replace(\" \", \" \")\n corpus[os.path.splitext(name)[0]] = text\n return corpus", "def generate_docs(root_dir, session):\n ...", "def load_sentences(path, zeros):\n sentences = []\n sentence = []\n for line in codecs.open(path, 'r', 'utf8'):\n line = zero_digits(line.rstrip()) if zeros else line.rstrip()\n if not line:\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n sentence = []\n else:\n word = line.split()\n sentence.append(word)\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n return sentences", "def write_file(tweets):\n with open((folderlink + \"markov_sentences.txt\"), \"w\") as text_file:\n for tweet in tweets:\n text_file.write (tweet + '\\n')\n with file ((folderlink + \"markov_sentences.txt\"), 'r') as f:\n text = f.read()\n text_model = markovify.NewlineText(text)\n print \"model successful \\n\\n\\n\\n\"\n for i in range(5):\n print(text_model.make_short_sentence(140, tries=100))\n text_file.close()", "def write_doc(self, file=sys.stdout, tm=False):\n for sentence in self.sentences:\n if tm:\n print(\"<tu><tuv><seg>\", file=file)\n print(\"{}\".format(sentence.raw), file=file)\n if tm:\n print(\"</seg></tuv><tuv><seg>\", file=file)\n print(\"{}\".format(sentence.translation), file=file)\n if tm:\n print(\"</seg></tuv></tu>\", file=file)", "def tokenize(self, path):\n assert os.path.exists(path)\n with open(path, 'r') as f:\n sentences = []\n for sentence in tqdm(f, desc='Processing file: {}'.format(path)):\n sentences.append(sentence.split())\n self.data = sentences", "def create_corpus_for_genre(genre):\n corpus = \"\"\n if genre in os.listdir(DATA_DIR):\n #iterate through artists\n for artist in os.listdir(DATA_DIR + \"/\" + genre + \"/\"):\n for filename in os.listdir(DATA_DIR + \"/\" + genre + \"/\" + artist + \"/\"):\n with open(DATA_DIR + \"/\" + genre + \"/\" + artist + \"/\" + filename) as f:\n corpus += f.read()\n return corpus", "def sentence_generator(filename, length=10):\n random.seed(1) # Set the seed for the random generator - do not remove\n # Enter your code here\n\n ret = learn(filename) # stores learn function in var\n sort = sorted(ret[1]) # Sort the dictionary\n curr = random.choice(sort) # selects first word\n sent = [curr]\n sent_len = len(sent) # length\n end_loop = False\n\n while not end_loop: # infinite loop\n curr = sent[-1] # Word to start with\n sent_len += 1\n get_word = next_word(ret[1][curr], ret[0])\n\n sent.append(get_word) # puts words in sent\n if sent_len == length: # checks length\n sentence = ' '.join(sent) # joins sentence\n return sentence", "def sents_bow_generator(filepath):\n \n for sents in LineSentence(filepath):\n yield sents_dictionary.doc2bow(sents)", "def collect_article_text(directory: str,\n sentences: List[Dict[str, str]],\n keyword: str,\n collect_all: bool,\n ) -> List[Dict[str, str]]:\n articles = (os.path.join(directory, article)\n for article in os.listdir(directory)\n if article.endswith(\".txt\"))\n for article in articles:\n sentences = process_article(sentences, article, keyword, collect_all)\n return sentences", "def create_corpus(source):\n\treturn \" \".join([file.read() for file in source])", "def update_corpus(sentences):\n \n corNeg = None\n corPos = None\n corNeu = None\n try:\n corNeg = open('corpus\\\\neg.txt', 'ab')\n corPos = open('corpus\\\\pos.txt', 'ab')\n corNeu = open('corpus\\\\neu.txt', 'ab')\n except:\n print(\"Error: Loading Corpus\")\n return\n for sent_d in sentences:\n sent = sent_d[\"sentence_txt\"]\n tagged = sent_d[\"tag_id\"]\n # update corpus\n if tagged == tag.neg:\n corNeg.write('\\n'+sent)\n if tagged == tag.pos:\n corPos.write('\\n'+sent)\n if tagged == tag.neu:\n corNeu.write('\\n'+sent)\n corNeg.close()\n corPos.close()\n corNeu.close()", "def load_sentences(path):\n sentences = []\n sentence = []\n num = 0\n with codecs.open(path, 'r', 'utf8') as fread:\n # n_lines = len(fread)\n print(\"Read from {:s}\".format(path))\n # pbar = progressbar.ProgressBar(max_value=n_lines)\n for line_idx, line in enumerate(fread):\n assert line_idx==num,'ER'\n num += 1\n\n line = line.rstrip()\n # print(list(line))\n if not line: #Update: only deal with space between sentences\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:# remove the DOCstart\n sentences.append(sentence)\n sentence = []\n else:\n if line[0] == \" \":#Update: this part is never used in Chinese ner!\n line = \"$\" + line[1:]\n word = line.split()\n # word[0] = \" \"\n else:\n word= line.split()\n assert len(word) >= 2, ([word[0]])\n sentence.append(word)\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n\n return sentences", "def train(self, corpus): \n\n # Generate all possible n-grams\n # for every sentence in the corpus\n for sentence in corpus:\n\n #for every possible gram-length in the sentence\n for gramlength in xrange(1,len(sentence)):\n\n #iterate through all possible grams of that gramlength\n for i in xrange(len(sentence) - gramlength):\n\n #generate tuple\n key = ();\n for index in xrange(gramlength):\n key += (sentence[i + index],);\n\n if(gramlength == 2):\n self.continuationProb[key[1]].add(key[0]);\n\n self.ngramCounts[key] += 1;\n\n self.total = len(set(map(lambda tup: tup[0], self.ngramCounts)));", "def read_file(path, tok=False):\n with open_file(path) as f:\n for line in f.readlines():\n words = split_sentence(line.strip(), tok)\n yield words", "def words(self, fileids=None, categories=None):\n for sentence in self.sents(fileids, categories):\n for token in wordpunct_tokenize(sentence):\n yield token", "def generate_sentence_seed(self, seed):\n if self.word_to_index is None:\n self.log.error(\"Need to load a model or data before this step.\")\n return []\n\n if seed is None:\n self.log.error(\"There is not sentence seed.\")\n return []\n\n # Start sentence with the start token\n sentence = [self.word_to_index[self.sentence_start_token]]\n sentence.extend(seed.lower().split(' '))\n\n # Predict next word until end token is received\n while not sentence[-1] == self.word_to_index[self.sentence_end_token]:\n next_word_probs = self.forward_propagate(sentence)\n sampled_word = self.word_to_index[self.unknown_token]\n # We don't want the unknown token to appear in the sentence\n while sampled_word == self.word_to_index[self.unknown_token]:\n samples = np.random.multinomial(1, next_word_probs[-1])\n sampled_word = np.argmax(samples)\n sentence.append(sampled_word)\n sentence_str = [self.index_to_word[word] for word in sentence[1:-1]]\n return sentence_str", "def get_parses(corpus_sents, corpus_name, target_dir):\n print('Generating the parse files for the {:s} corpus ...'.format(corpus_name))\n # Define POS tag inventory separated along the open/ closed class axis;\n # exclude tags not associated with either class (such as 'filler' words), due to their relatively low frequency\n # and low relevance for the contrastive analysis of the two ID-variant corpora\n open_class_tags = ['FW', 'GW', 'JJ', 'JJR', 'JJS', 'NN', 'NNP', 'NNPS', 'NNS', 'PDT', 'RB', 'RBR', 'RBS', 'UH',\n 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'WRB']\n closed_class_tags = ['AFX', 'BES', 'CC', 'CD', 'DT', 'EX', 'HVS', 'IN', 'MD', 'POS', 'PRP', 'PRP$', 'RP', 'SYM',\n 'TO', 'WDT', 'WP', 'WP$']\n all_tags = open_class_tags + closed_class_tags\n\n # Parse corpus contents with SpaCy\n model = sc.load('en')\n parses = [model(sent) for sent in corpus_sents]\n # Obtain tag counts for the specified tag inventory\n flat_tags = [parse.tag_ for parsed_sent in parses for parse in parsed_sent]\n unique_tags = set(flat_tags)\n tag_counts = sorted([(tag, flat_tags.count(tag)) for tag in unique_tags if tag in all_tags],\n reverse=True, key=lambda x: x[1])\n\n # Calculate open class fraction (total and top 50%), to determine whether open classes are distributed differently\n # in sentences of varying ID; intuitively one may expect high-ID sentences to contain a greater portion of open\n # class words, as they exhibit greater variation and are therefore less predictable in sentential context\n top_open = [tag_tpl[1] for tag_tpl in tag_counts[: len(tag_counts) // 2] if tag_tpl[0] in open_class_tags]\n top_closed = [tag_tpl[1] for tag_tpl in tag_counts[: len(tag_counts) // 2] if tag_tpl[0] in closed_class_tags]\n top_all = [tag_tpl[1] for tag_tpl in tag_counts[: len(tag_counts) // 2]]\n top_open_fraction = sum(top_open) / sum(top_all)\n top_closed_fraction = sum(top_closed) / sum(top_all)\n\n full_open = [tag_tpl[1] for tag_tpl in tag_counts if tag_tpl[0] in open_class_tags]\n full_closed = [tag_tpl[1] for tag_tpl in tag_counts if tag_tpl[0] in closed_class_tags]\n full_all = [tag_tpl[1] for tag_tpl in tag_counts]\n full_open_fraction = sum(full_open) / sum(full_all)\n full_closed_fraction = sum(full_closed) / sum(full_all)\n\n # Write tag counts to file\n tag_log_path = os.path.join(target_dir, '{:s}_tag_counts.txt'.format(corpus_name))\n with codecs.open(tag_log_path, 'w', encoding='utf8') as tag_file:\n for i in range(len(tag_counts)):\n tag_file.write('{:s}\\t{:d}\\n'.format(tag_counts[i][0], tag_counts[i][1]))\n # Calculate corpus statistics\n tag_file.write('=' * 10 + '\\n')\n tag_file.write('Open class fraction of most frequent 50% POS tags: {:.4f}\\n'.format(top_open_fraction))\n tag_file.write('Closed class fraction of most frequent 50% POS tags: {:.4f}\\n'.format(top_closed_fraction))\n tag_file.write('Open class fraction of all identified POS tags: {:.4f}\\n'.format(full_open_fraction))\n tag_file.write('Closed class fraction of all identified POS tags: {:.4f}'.format(full_closed_fraction))\n print('Done with POS-tagging.')\n\n # Perform dependency parsing related analysis\n def _get_dlt(_parent, _children):\n \"\"\" Computes the integration cost at the head of dependency relations identified within the input sentence,\n according to the Dependency Locality Theory. \"\"\"\n dlt_cost = 0\n for child in _children:\n # Determine the span length between the child and parent node\n left = min(_parent.i, child.i)\n right = max(_parent.i, child.i)\n for j in range(left + 1, right):\n # Identify discourse referents present within the determined span\n if 'NN' in parse[j].tag_ or 'VB' in parse[j].tag_:\n dlt_cost += 1\n # Check if the parent node is also occupied by a new discourse referent\n if 'NN' in _parent.tag_ or 'VB' in _parent.tag_:\n dlt_cost += 1\n return dlt_cost\n\n corpus_spans = list()\n corpus_costs = list()\n # Compute the mean dependency arc length and DLT integration cost for each sentence within the corpus\n for parse in parses:\n sent_spans = list()\n sent_costs = list()\n for parent in parse:\n children = [w for w in parent.lefts] + [w for w in parent.rights]\n if len(children) == 0:\n continue\n parent_spans = [abs(parent.i - child.i) for child in children]\n sent_spans += parent_spans\n sent_costs += [_get_dlt(parent, children)]\n # Collect means\n corpus_spans += [np.mean(sent_spans)]\n corpus_costs += [np.mean(sent_costs)]\n\n # Calculate SVO fraction (ultimately did not yield any interesting insights)\n clause_triples = list()\n svo_count = 0\n other_count = 0\n for parse in parses:\n # Identify subjects, predicates, and objects\n subjects = [[word.i, word.head.i] for word in parse if 'subj' in word.dep_ and word.head.pos_ == 'VERB']\n objects = [[word.head.i, word.i] for word in parse if 'obj' in word.dep_]\n for subj_list in subjects:\n for obj_list in objects:\n if subj_list[-1] == obj_list[0]:\n clause_triple = subj_list + obj_list[-1:]\n clause_triples.append(clause_triple)\n # Check if isolated triples are in the SVO order, increment counter if so\n if clause_triple[0] < clause_triple[1] < clause_triple[2]:\n svo_count += 1\n else:\n other_count += 1\n # Compute word order fractions\n svo_fraction = svo_count / len(clause_triples)\n other_fraction = other_count / len(clause_triples)\n\n # Write mean sentence-wise dependency arc lengths and DLT integration costs to file\n parse_log_path = os.path.join(target_dir, '{:s}_parse_stats.txt'.format(corpus_name))\n with codecs.open(parse_log_path, 'w', encoding='utf8') as parse_file:\n # Document mean sentence dependency arc length and mean sentence DLT integration cost\n for i in range(len(corpus_spans)):\n parse_file.write('{:.4f}\\t{:.4f}\\n'.format(corpus_spans[i], corpus_costs[i]))\n # Calculate corpus statistics\n parse_file.write('=' * 10 + '\\n')\n parse_file.write('Span length max: {:.4f}\\n'.format(np.max(corpus_spans)))\n parse_file.write('Span length min: {:.4f}\\n'.format(np.min(corpus_spans)))\n parse_file.write('Span length mean: {:.4f}\\n'.format(np.mean(corpus_spans)))\n parse_file.write('Span length standard deviation: {:.4f}\\n'.format(np.std(corpus_spans)))\n parse_file.write('=' * 10 + '\\n')\n parse_file.write('DLT cost max: {:.4f}\\n'.format(np.max(corpus_costs)))\n parse_file.write('DLT cost min: {:.4f}\\n'.format(np.min(corpus_costs)))\n parse_file.write('DLT cost mean: {:.4f}\\n'.format(np.mean(corpus_costs)))\n parse_file.write('DLT cost standard deviation: {:.4f}\\n'.format(np.std(corpus_costs)))\n # Document word order distribution\n parse_file.write('=' * 10 + '\\n')\n parse_file.write('SVO clauses count: {:d}\\n'.format(svo_count))\n parse_file.write('SVO clauses fraction: {:.4f}\\n'.format(svo_fraction))\n parse_file.write('Other clauses count: {:d}\\n'.format(other_count))\n parse_file.write('Other clauses fraction: {:.4f}'.format(other_fraction))\n print('Done with dependency parsing.')", "def generate_corpus(series, documents):\r\n all_words = []\r\n for data_chunk in pd.read_csv(documents, chunksize=10000):\r\n for record in data_chunk.itertuples(index=True, name='Pandas'):\r\n text = getattr(record, 'text')\r\n lang = getattr(record, 'language')\r\n words = text.split(\" \")\r\n\r\n if lang == 'en':\r\n if series != None:\r\n show = getattr(record, 'category')\r\n if show == series:\r\n all_words.append(words)\r\n if series == None:\r\n all_words.append(words)\r\n \r\n return all_words", "def generate_sentence(self):\n if self.word_to_index is None:\n self.log.error(\"Need to load a model or data before this step.\")\n return []\n # Start sentence with the start token\n sentence = [self.word_to_index[self.sentence_start_token]]\n # Predict next word until end token is received\n while not sentence[-1] == self.word_to_index[self.sentence_end_token]:\n next_word_probs = self.forward_propagate(sentence)\n sampled_word = self.word_to_index[self.unknown_token]\n # We don't want the unknown token to appear in the sentence\n while sampled_word == self.word_to_index[self.unknown_token]:\n samples = np.random.multinomial(1, next_word_probs[-1])\n sampled_word = np.argmax(samples)\n sentence.append(sampled_word)\n sentence_str = [self.index_to_word[word] for word in sentence[1:-1]]\n return sentence_str", "def process_corpus(data_dir, feature_source):\n raw_dir = os.path.join(data_dir, 'text/', feature_source)\n stem_dir = os.path.join(data_dir, 'text/stemmed_{0}/'.format(feature_source))\n proc_dir = os.path.join(data_dir, 'text/cleaned_{0}/'.format(feature_source))\n spell_file = os.path.join('/home/data/nbc/athena/athena-data/misc/english_spellings.csv')\n spell_df = pd.read_csv(spell_file, index_col='UK')\n spell_dict = spell_df['US'].to_dict()\n\n # Defines functions that are from the NLTK, I assume?\n stemmer = EnglishStemmer()\n test_stemmer = PorterStemmer()\n\n # Cycles through each raw .txt file\n for file_ in glob(os.path.join(raw_dir, '*.txt')):\n filename = os.path.basename(file_)\n print('Processing {0}'.format(filename))\n with open(file_, 'rb') as fo:\n text = fo.read()\n\n text = text.decode('utf8', 'ignore').encode('ascii', 'ignore')\n\n # Clean text\n text = abbr.clean_str(text)\n\n # Detect and expand abbreviations\n text = abbr.expandall(text)\n\n # Remove periods (for abbreviations)\n text = text.replace('.', '')\n\n # Replace British words with American words.\n pattern = re.compile(r'\\b(' + '|'.join(spell_dict.keys()) + r')\\b')\n text = pattern.sub(lambda x: spell_dict[x.group()], text)\n\n # Defines stem_list which will be a list of all the words in the file,\n # not including spaces\n stem_list = []\n for word in text.split():\n # Use Porter stemmer to test for string unicode encoding, then use\n # English stemmer to perform stemming\n try:\n ' '.join(['kdkd', test_stemmer.stem(word), 'kdkd'])\n except:\n word = word.decode('utf8', 'ignore').encode('ascii', 'ignore')\n stem_list.append(stemmer.stem(word))\n\n # Writes the stem_list\n with open(os.path.join(stem_dir, filename), 'wb') as fo:\n fo.write(' '.join(stem_list))\n\n # Writes the processed text\n with open(os.path.join(proc_dir, filename), 'wb') as fo:\n fo.write(text)", "def main(infile, outdir):\n outfname = Path(infile).stem + '.txt'\n outdir = Path(outdir)\n outdir.mkdir(parents=True, exist_ok=True)\n outfile = outdir / outfname\n out_path = extract_sentences_to_file(infile, outfile)\n\n return out_path", "def train(self, corpus):\n\n\n temp = \"\"\n for sentence in corpus.corpus:\n\n i = 0\n for datum in sentence.data:\n # print str(sentence.data)\n self.total=self.total+1\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n if (i == 0):\n temp = datum.word\n i = i + 1\n continue\n\n i = i + 1\n\n key = temp + \",\" + token\n self.bigramCounts[key] = self.bigramCounts[key] + 1\n # print token\n temp = token\n\n pass", "def extract_all_sentences_from_files(source_directory):\n\n start_time = time.time()\n\n sentences = list()\n files = os.listdir(source_directory)\n\n print(\"FilesCount={}, Message=\\\"Starting to extract sentences from files\\\"\".format(len(files)))\n\n for file in files:\n if not file.endswith(\".txt\"):\n print(\"FileName={}, Message=\\\"Ignoring file with invalid structure. File extension needs to be .txt for sentences to be loaded\\\"\".format(file))\n continue\n\n file_path = os.path.join(source_directory, file)\n\n sentences_file = extract_sentences(file_path)\n sentences.extend(sentences_file)\n\n print(\"ElapsedTime={}, FilesCount={}, TotalSentencesCount={}, \"\n \"Message=\\\"Finished extracting sentences from files\\\"\".format(get_elapsed_time(start_time), len(files), len(sentences)))\n\n return sentences", "def tokenize(self, path):\n assert os.path.exists(path)\n # add the start of sentence token\n sentence_sep = [BOS]\n with open(path, 'r') as f:\n sentences = [BOS]\n for sentence in tqdm(f, desc='Processing file: {}'.format(path)):\n sentences += sentence.split() + sentence_sep\n # split into list of tokens\n self.data = sentences", "def train(self, corpus):\n for sentence in corpus.corpus:\n cleanSentence = sentence.cleanSentence()\n for datum in cleanSentence.data:\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n self.total += 1\n\n i = 0\n while i < len(sentence.data) - 1:\n token = str(cleanSentence.get(i))\n self.followingWords[token].add(str(cleanSentence.get(i+1)))\n i += 1\n\n i = 1\n while i < len(sentence.data):\n bigram = str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.bigramCounts[bigram] = self.bigramCounts[bigram] + 1\n\n self.precedingWords[str(cleanSentence.get(i))].add(str(cleanSentence.get(i-1)))\n i += 1\n self.precedingWordsTotal = sum(map(lambda x: len(x), self.precedingWords.values()))\n\n i = 2\n while i < len(sentence.data):\n trigram = str(cleanSentence.get(i-2)) + \" \" + str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.trigramCounts[trigram] = self.trigramCounts[trigram] + 1\n i += 1\n\n #print('precedingWords')\n #print(self.precedingWords)\n #print('followingWords')\n #print(self.followingWords)\n #print('unigrams')\n #print(self.unigramCounts)\n #print('bigrams')\n #print(self.bigramCounts)\n\n #self.discount(self.trigramCounts)\n #self.discount(self.bigramCounts)\n #self.discount(self.unigramCounts)", "def read_corpus(file_path, source):\n data = []\n for line in open(file_path):\n sent = line.strip().split(' ')\n # only append <s> and </s> to the target sentence\n if source == 'tgt':\n sent = ['<s>'] + sent + ['</s>'] #TODO: Change\n data.append(sent)\n return data", "def sent_to_words(sentences):\n for sentence in sentences:\n yield(gensim.utils.simple_preprocess(str(sentence), deacc=True))", "def generate_corpus(model, sample):\r\n \r\n dl_corpus = []\r\n for word in sample:\r\n if word in model:\r\n dl_corpus.append(model[word])\r\n else:\r\n dl_corpus.append([0]*VECTOR_DIM)\r\n\r\n return [dl_corpus]", "def createTXT(self):\n now = dt.datetime.now().strftime(\"%m-%d %H-%M\")\n self.filename = \"bwcca_tags \" + now\n try:\n if \"/\" in self.dir_lbl[\"text\"]:\n desired_list = self.phraseMaker()\n with open(f\"{self.folder}/{self.filename}.txt\", \"w\") as f:\n for i in desired_list:\n f.write(f\"{i}\\n\")\n self.stat_lbl[\"text\"] = f\"/{self.filename} created!\"\n else:\n self.dir_lbl[\"text\"] = \"Select a folder!\"\n self.dir_btn.focus()\n except Exception as e:\n self.dir_lbl[\"text\"] = e", "def sent_to_words(sentences):\r\n for sentence in sentences:\r\n yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations\r", "def corpus_iter_words(corpus):\n\n corpus_file = open(corpus[\"path\"], mode='r', encoding=corpus[\"encoding\"])\n\n if corpus[\"word_tokenization\"] == DEFAULT:\n \"\"\"\n This is the regex that we use to perform the default word tokenization.\n Use http://regexper.com to visualize it.\n\n Examples of words that we parse in their entirety:\n - M&Ms\n - 123.45-point\n - magnitude-7.0\n - they're\n\n Examples of words that are broken up due to ambiguities that are\n difficult to resolve:\n - 'tis (becomes [\"'\", \"tis\"])\n - U.S. (becomes [\"U\", \".\", \"S\", \".\"])\n \"\"\"\n regex = r\"(?:\\d+(?:\\.\\d+)*|[\\w<>]+)(?:[-'&]?(?:\\d+(?:\\.\\d+)*|[\\w<>]+))*|--|[^\\w\\s]\"\n delimiter = corpus[\"delimiter\"]\n if delimiter != \"\":\n regex = regex + \"|\" + delimiter\n\n text = corpus_file.read()\n for token in re.findall(regex, text, re.UNICODE):\n yield token\n else:\n for line in corpus_file.readlines():\n for token in line.strip(\" \").split(\" \"):\n yield token", "def iter_sents(just_some=False):\n for i, text in enumerate(iter_texts()):\n if just_some and i == 1000: break\n doc = nlp(text)\n for sent in doc.sents:\n yield [t.text.lower() for t in sent if not t.is_stop and t.is_alpha and len(t.text) > 1]", "def sentences(self, tag=False, tag_method=None):\n self.__set_text_node(self.root_)\n sentence_nodes = filter(lambda n: n.nodeType == n.ELEMENT_NODE and n.tagName == 's',\n list(self.text_node.childNodes))\n sentences = []\n for s in sentence_nodes:\n current = []\n TimeMLDoc.__get_text(s, current, False)\n #print(current)\n if not tag:\n sentences.append(''.join([ c[0] for c in current]))\n else:\n sentences.append(tag_method(current))\n return sentences", "def train(self, corpus):\n for sentence in corpus.corpus:\n for datum in sentence.data: \n self.unigramCounts[datum.word] += 1\n self.totalCount += 1", "def anlText(self, inputFile):\n strBuf = \"\"\n splitter = re.compile(self._stcSeps)\n for rawLine in inputFile:\n line = rawLine.replace(\"\\n\", \"\")\n if (not splitter.search(line)): # Don't have a full sentence yet\n strBuf += \" \" + line\n else: # Found a sentence end. Get and process the full sentence.\n tempText = strBuf + line\n while splitter.search(tempText):\n stcList = splitter.split(tempText, 1)\n self.anlSentence(stcList[0])\n tempText = stcList[1] # Store what's left for the next\n strBuf = tempText\n if len(strBuf): # Process whatever is left at the end.\n self.anlSentence(strBuf)", "def create_lyrics_corpus(artists, lang_model):\n complete_lyrics = []\n indices = []\n for i, artist in enumerate(artists):\n directory = f\"lyrics/{artist.lower().replace(' ', '-')}-lyrics\"\n allfiles = os.listdir(directory)\n all_lyrics = []\n for file in allfiles:\n with open(directory + \"/\" + file, \"r\", encoding=\"utf-8\") as f:\n song_lyrics = f.read()\n all_lyrics.append(clean_my_song(song_lyrics, lang_model))\n indices += [i] * len(all_lyrics)\n print(artist, len(all_lyrics))\n complete_lyrics += all_lyrics\n return complete_lyrics, indices", "def read_corpus(file_path, source):\n data = []\n for line in open(file_path, encoding='utf-8'):\n sent = line.strip().split(' ')\n # only append <s> and </s> to the target sentence\n if source == 'tgt':\n sent = ['<s>'] + sent + ['</s>']\n data.append(sent)\n\n return data", "def read_data(self, dirname):\n # NOTE: We cache stemmed documents for speed\n # (i.e. write to files in new 'stemmed/' dir).\n\n print(\"Reading in documents...\")\n # dict mapping file names to list of \"words\" (tokens)\n filenames = os.listdir(dirname)\n subdirs = os.listdir(dirname)\n if 'stemmed' in subdirs:\n titles, docs = self.__read_stemmed_data(dirname)\n else:\n titles, docs = self.__read_raw_data(dirname)\n\n # Sort document alphabetically by title to ensure we have the proper\n # document indices when referring to them.\n ordering = [idx for idx, title in sorted(enumerate(titles),\n key = lambda xx : xx[1])]\n\n self.titles = []\n self.docs = []\n numdocs = len(docs)\n for d in range(numdocs):\n self.titles.append(titles[ordering[d]])\n self.docs.append(docs[ordering[d]])\n\n # Get the vocabulary.\n self.vocab = [xx for xx in self.get_uniq_words()]", "def extract_sentences_from_dir(root_path: str, constraint: Optional[str]) -> list:\n sentences = list()\n for root, _, files in os.walk(root_path, topdown=False):\n for file in files:\n if constraint and constraint not in file:\n continue\n path = os.path.join(root, file)\n sentences.extend(extract_sentences_from_file(path))\n return unique(sentences)", "def train(self, corpus): \n # TODO your code here\n # Tip: To get words from the corpus, try\n # for sentence in corpus.corpus:\n # for datum in sentence.data: \n # word = datum.word\n for sentence in corpus:\n prevWord = \"\"\n prevPrevWord = \"\"\n for word in sentence:\n word = word.strip(STRIP_CHARS)\n word = word.lower()\n currentWord = word\n self.unigramCounts[currentWord] += 1\n self.total += 1\n if prevWord != \"\":\n if prevPrevWord != \"\":\n trigram = (prevPrevWord, prevWord, currentWord)\n if trigram not in self.trigramCounts:\n self.continuationCounts[currentWord] += 1\n self.followingCounts[(prevPrevWord, prevWord)] += 1\n self.trigramCounts[trigram] += 1\n self.bigramCounts[(prevWord, currentWord)] += 1\n self.totalBigramCounts += 1\n else:\n self.bigramCounts[(prevWord, currentWord)] += 1\n self.totalBigramCounts += 1\n prevPrevWord = prevWord\n prevWord = currentWord\n else:\n prevWord = currentWord\n self.total += len(self.unigramCounts)", "def prepare_corpus(self, final_training):\n if final_training:\n df_seq = self.data_processor.create_user_click_sequence()\n else:\n df_seq = self.data_processor.create_user_click_sequence(\n end_date=self.config[\"test_split_date\"]\n )\n sentences = df_seq[\"merchant_seq\"].values.tolist()\n sentences = [list(map(str, sent)) for sent in sentences]\n return sentences", "def generate_sentence_embeddings():\n generate_embeddings_sentence(\"Data/en-train.json\", \"Data_Sent_Embds/en_sent.pkl\")\n generate_embeddings_sentence(\"Data/es-train.json\", \"Data_Sent_Embds/es_sent.pkl\")\n generate_embeddings_sentence(\"Data/pr-train.json\", \"Data_Sent_Embds/pr_sent.pkl\")", "def train(self, corpus): \n # TODO your code here\n \n for sentence in corpus.corpus:\n for i,dotum in enumerate(sentence.data[1:]):\n self.vocab[dotum.word][sentence.data[i].word] +=1\n self.word_counts[sentence.data[i].word] +=1\n self.total +=1\n self.v = len(self.vocab.keys())", "def generate_samples(self, data_dir, tmp_dir, dataset_split):\r\n train = dataset_split == problem.DatasetSplit.TRAIN\r\n dataset_path = (\"train.tok.clean.bpe.32000\"\r\n if train else \"newstest2013.tok.bpe.32000\") # da controllare\r\n train_path = _get_wmt_enit_bpe_dataset(tmp_dir, dataset_path)\r\n\r\n # Vocab\r\n token_path = os.path.join(data_dir, self.vocab_filename)\r\n if not tf.gfile.Exists(token_path):\r\n token_tmp_path = os.path.join(tmp_dir, self.vocab_filename)\r\n tf.gfile.Copy(token_tmp_path, token_path)\r\n with tf.gfile.GFile(token_path, mode=\"r\") as f:\r\n vocab_data = \"<pad>\\n<EOS>\\n\" + f.read() + \"UNK\\n\"\r\n with tf.gfile.GFile(token_path, mode=\"w\") as f:\r\n f.write(vocab_data)\r\n\r\n return text_problems.text2text_txt_iterator(train_path + \".en\",\r\n train_path + \".it\")", "def generate_text(seed, numlines, gen_file, wseed=False):\n generated = ''\n gprinted = ''\n sentence = seed\n generated += sentence\n\n nlines = 0\n for i in range(1000):\n x = np.zeros((1, maxlen, len(chars)))\n for t, char in enumerate(sentence):\n x[0, t, char_indices[char]] = 1.\n\n predictions = model.predict(x, verbose=0)[0]\n next_index = sample(predictions, diversity)\n next_char = indices_char[next_index]\n generated += next_char\n gprinted += next_char\n\n sentence = sentence[1:] + next_char\n # Count the number of lines generated\n if next_char == '\\n':\n nlines += 1\n if nlines > numlines:\n break\n\n if wseed:\n gen_file.write(seed + gprinted)\n else:\n gen_file.write(gprinted)\n\n gen_file.write('\\n')\n gen_file.flush()", "def generate_sentences(self, count=5):\n\n with self.open_text_data() as f:\n result = self.read_sentences(f, count=count)\n return result", "def generate_markov_text(self, file, size=15, sent=7000):\n\t\tseed = random.randint(0, self.word_size-3)\n\t\tend = \"</f>\"\n\t\t# print(seed)\n\t\tseed_word, next_word = self.words[seed], self.words[seed+1]\n\t\tw1, w2 = seed_word, next_word\n\t\trestart = 0\n\n\t\twith open (file, 'a') as output: # 'append' instead of 'w'\n\t\t\tindex = 18001 # the previous 18k sentences are already written down in file \n\t\t\tfor i in range(sent):\n\t\t\t\tgen_words = [] # record one sentence\t\n\t\t\t\tfor j in range(1, size):\n\t\t\t\t\tgen_words.append(w1)\n\t\t\t\t\t# when comes to the end of words, restart with a new random seed number for w1 and w2\n\t\t\t\t\tif w2 == end:\n\t\t\t\t\t\trestart += 1 # record the restarting number\n\t\t\t\t\t\tseed = random.randint(0, self.word_size-3)\n\t\t\t\t\t\tw1, w2 = self.words[seed], self.words[seed+1]\n\t\t\t\t\tw1, w2 = w2, random.choice(self.cache[(w1, w2)])\n\t\t\t\tgen_words.append(w2)\n\t\t\t\t# print(str(i+1) + '. ' + ' '.join(gen_words))\n\t\t\t\tsentence = ' '.join(gen_words)\n\t\t\t\toutput.write(str(index)+'\\t0000000\\t'+str(sentence)+'\\tnegatif\\n')\n\t\t\t\tindex += 1\n\t\toutput.close()\n\t\t# print(restart)", "def save_in_sentence_form(self, dataset):\n big_dict = []\n\n # iterating through directories\n for file_path in self.list_news_path:\n with open(file_path, 'r') as f:\n # loading news content into labelled sections\n doc = json.load(f)\n if doc[\"title\"] == \"\" or doc[\"text\"] == \"\":\n pass\n else:\n big_dict.append(\n {\"title\": remove_emoji(doc[\"title\"]),\n \"body\": remove_emoji(doc[\"text\"]),\n \"label\": str(file_path.split('/')[-3])})\n # write contents of dictionary to file\n print(len(big_dict))\n outfilepath = \"-\".join(dataset)+ \"_data.csv\"\n pd.DataFrame(big_dict).to_csv(outfilepath, index=False)\n return outfilepath\n # with open(\"data.json\", 'w+') as file:\n # json.dump(big_dict, file)", "def word_gen(path, skiplines=0):\n\n with open(path) as f:\n for _ in range(skiplines):\n next(f)\n for line in f:\n stripped_line = line.translate(str.maketrans('', '', string.punctuation+string.digits))\n for word in stripped_line.split():\n yield word.lower()", "def generate_sentence(session, model, config, *args, **kwargs):\n return generate_text(session, model, config, *args, stop_tokens=['<eos>'], **kwargs)", "def semcor2run(args):\r\n input_files = list_files(*args.input_files)\r\n output_dir = Path(args.output_dir)\r\n if not output_dir.is_dir():\r\n try:\r\n output_dir.mkdir()\r\n except:\r\n print('Invalid output directory name. Files will be stored in default directory.', file = stderr)\r\n output_dir = output_default / 'running_text'\r\n output_dir.mkdir()\r\n multiword = args.multiword\r\n for input_file in input_files:\r\n corpus_file = CorpusFile(input_file)\r\n filename = corpus_file.shortname + '.txt'\r\n dirname = output_dir / corpus_file.concordance\r\n if not dirname.exists():\r\n dirname.mkdir()\r\n output_file_name = dirname / filename\r\n with output_file_name.open('w') as output_file:\r\n for paragraph in corpus_file.text.find_all('p'):\r\n for word in paragraph.find_all(['wf', 'punc']):\r\n if word.name == 'punc':\r\n output_file.write(word.string)\r\n elif not multiword:\r\n for token in Token.from_tag(word).get_components():\r\n output_file.write(' {}/{}'.format(token.wordform, token.pos))\r\n else:\r\n token = Token.from_tag(word)\r\n output_file.write(' {}/{}'.format(token.wordform, token.pos))\r\n output_file.write('\\n')", "def generate_sentence(self,t=20):\n return result", "def generate_samples(self, data_dir, tmp_dir, dataset_split):\n train = dataset_split == problem.DatasetSplit.TRAIN\n dataset_path = (\"train.tok.clean.bpe.32000\"\n if train else \"newstest2013.tok.bpe.32000\")\n train_path = _get_wmt_ende_bpe_dataset(tmp_dir, dataset_path)\n\n # Vocab\n token_path = os.path.join(data_dir, self.vocab_filename)\n if not tf.gfile.Exists(token_path):\n token_tmp_path = os.path.join(tmp_dir, self.vocab_filename)\n tf.gfile.Copy(token_tmp_path, token_path)\n with tf.gfile.GFile(token_path, mode=\"r\") as f:\n vocab_data = \"<pad>\\n<EOS>\\n\" + f.read() + \"UNK\\n\"\n with tf.gfile.GFile(token_path, mode=\"w\") as f:\n f.write(vocab_data)\n\n return text_problems.text2text_txt_iterator(train_path + \".en\",\n train_path + \".de\")", "def readcorpus(index):\n for docid in range(0, 1001):\n file = open(PATH + \"Document-\" + str(docid) + \".txt\", 'r', encoding=\"utf-8\")\n allcontent = file.readlines()\n stoplist = getstopwords()\n corpus = []\n stemmer = PorterStemmer()\n getatitle(allcontent, corpus)\n getmetakeywords(allcontent, corpus)\n getcontent(allcontent, corpus)\n flagfordate = 0\n for i in range(0, len(corpus)):\n if flagfordate == 1:\n flagfordate = 0\n continue\n word = corpus[i]\n if word in MONTH:\n if expressionfordateb(corpus, i) is True or expressionfordatef(corpus, i) is True:\n word = constructdate_expression(corpus, i, index)\n increasecount(index[0], word, docid)\n if word in stoplist:\n continue\n increasecount(index[1], word, docid)\n increasecount(index[2], processword(stemmer.stem(word)), docid)", "def splitSentencesSingleDocument(documentPath, targets, numLeadingWords, numTrailingWords, spanTargetPunctuation=None):\n\n pyConTextInput = PyConTextInput()\n\n sentenceTuples = _splitSentencesSingleDocumentInternal(documentPath, targets, numLeadingWords, numTrailingWords, spanTargetPunctuation)\n\n if sentenceTuples[0][0] == None:\n pyConTextInput.addDocumentPlaceholder(sentenceTuples[0][1])\n return pyConTextInput\n else:\n for sentenceTuple in sentenceTuples:\n pyConTextInput.addSentence(*sentenceTuple)\n return pyConTextInput", "def train(self, corpus):\n self.tokens = []\n self.tags = []\n sentences = corpus.split(NEW_LINE)\n for sentence in sentences:\n start = START_SIGHT + SLASH + START_SIGHT + SPACE + START_SIGHT + SLASH + START_SIGHT + SPACE\n end = SPACE + END + SLASH + END\n sentence = start + sentence + end \n tokens = sentence.split(SPACE)\n for t in tokens:\n token = t.rsplit(SLASH, 1)\n if (len(token) > 1):\n self.tokens.append(token) \n self.tags.append(token[TAG_INDEX])\n \n nonsense_cases = set([(END, START_SIGHT), (START_SIGHT, END),\n (START_SIGHT, START_SIGHT, END),\n (END, START_SIGHT, START_SIGHT)])\n self.bigram_tags = [b for b in zip(self.tags[:-1], self.tags[1:]) if b not in nonsense_cases]\n self.trigram_tags = [t for t in zip(self.tags[:-1], self.tags[1:], self.tags[2:])\\\n if not (t[WORD_INDEX], t[TAG_INDEX]) in nonsense_cases and\\\n not (t[WORD_INDEX], t[TAG_INDEX]) in nonsense_cases]", "def iter_texts():\n dirs = 'comm_use_subset noncomm_use_subset pmc_custom_license biorxiv_medrxiv'.split()\n for dir in dirs:\n fnames = (DATA_PATH / dir / dir).glob('*')\n for fname in fnames:\n with fname.open() as f:\n content = json.load(f)\n \n for key in 'abstract body_text'.split():\n for row in content[key]:\n yield row['text']", "def main():\n filepath = input(\"Enter the Source File: \")\n with open(filepath, encoding=\"utf-8\") as f:\n sentences = f.readlines()\n sentences = \" \".join(sentences)\n\n summary = summarize_sentences(sentences)\n\n filepath_index = filepath.find(\".txt\")\n outputpath = filepath[:filepath_index] + \"_lexRank.txt\"\n\n with open(outputpath, \"w\") as w:\n for sentence in summary:\n w.write(str(sentence) + \"\\n\")", "def splitSentencesMultipleDocuments(directoryList, targets, numLeadingWords, numTrailingWords,\n spanTargetPunctuation=None):\n if type(directoryList) != list:\n directoryList = [directoryList]\n\n cleanList = utilities.cleanDirectoryList(directoryList)\n\n fileList = [filename for directory in cleanList for filename in glob.glob(directory)]\n\n sentenceTuples = []\n for index, filepath in enumerate(fileList):\n sys.stdout.write(\"\\rSplitting document %i of %i. (%.2f%%)\"% (index + 1, len(fileList) + 1, float(index + 1)/float(len(fileList) + 1) * 100.))\n sys.stdout.flush()\n tupleList = _splitSentencesSingleDocumentInternal(filepath, targets, numLeadingWords, numTrailingWords,spanTargetPunctuation)\n #If the splitter returns (None, <docname>) instead of a list then we should append the tuple rather than\n # extending the list with the contents of the tuple.\n if isinstance(tupleList, list):\n sentenceTuples.extend(tupleList)\n else:\n sentenceTuples.append(tupleList)\n print \"\"\n\n pyConTextInput = PyConTextInput(numDocs=len(fileList))\n for sentenceTuple in sentenceTuples:\n if sentenceTuple[0] == None:\n pyConTextInput.addDocumentPlaceholder(sentenceTuple[1])\n else:\n pyConTextInput.addSentence(*sentenceTuple)\n\n if not pyConTextInput.containsExpectedNumberOfDocKeys():\n raise RuntimeError(\"The PyConTextInput object produced by PyConTextBuiltinSplitter does not contain the expected number of documents. Expected: %i, Contains: %i\" % (pyConTextInput.numDocs, len(pyConTextInput.keys())))\n\n return pyConTextInput", "def __iter__(self):\n for tokens in iter_folder(self.top_dir, self.ngrams):\n # transform tokens (strings) into a sparse vector, one at a time\n yield self.dictionary.doc2bow(tokens)", "def iter_sentence_words(self, lower=True, stem=False):\n for sentence in self.iter_sentences():\n words = self.word_tokenizer.tokenize(sentence)\n if lower:\n words = [word.lower() for word in words]\n if stem:\n words = [self.stemmer.stem(word) for word in words]\n\n yield words", "def train(self, corpus):\n lastToken = \"#\"\n for sentence in corpus.corpus:\n for datum in sentence.data:\n token = datum.word\n self.reverseBigramCount[token][lastToken] += 1\n self.bigramCount[lastToken][token] += 1\n self.unigramCount[token] += 1\n self.total += 1\n lastToken = token", "def extract_corpus(corpus_dir = \"articles\"):\n corpus = {}\n num_documents = 0\n for filename in os.listdir(corpus_dir):\n with open(os.path.join(corpus_dir, filename)) as f:\n corpus[filename] = re.sub(\"[^\\w]\", \" \", f.read()).split()\n return corpus", "def get_sentences(self):\n for tree in self.tree_generator():\n yield tree[\"title\"] + \" \" + tree[\"selftext\"]\n for _, comment in tree[\"comments\"].items():\n yield comment[\"body\"]", "def generate(size, data_dim=5, n_phrase_labels=4, n_words=3,\n n_phrase_words=3, n_phrases=5, label_noise=0.,\n min_sent_len=5, max_sent_len=5, tag_end=True):\n assert n_words < 256\n assert max_sent_len >= n_phrase_words\n global dictionary, phrases\n\n # generate dictionary\n dictionary = uniform(-1.0, 1.0, size=(n_words, data_dim))\n\n # generate n_phrases unique word sequences of length n_phrase_words\n print \"Generating %d phrases\" % n_phrases\n phrases = []\n phrase_labels = []\n while len(phrases) != n_phrases:\n phrases = np.unique(np.array([\"\".join(map(chr, randint(n_words, size=n_phrase_words)))\n for i in xrange(n_phrases)], dtype=np.object))\n assert np.unique(map(len, phrases)) == n_phrase_words\n phrase_labels = 1+randint(n_phrase_labels-1, size=n_phrases)\n\n # generate 'sentences'\n print \"Generating %d sentences\" % sum(size)\n Xind = []\n Y = []\n for i in xrange(sum(size)):\n while True:\n sent_len = randint(min_sent_len, max_sent_len+1)\n sent = \"\".join(map(chr, randint(n_words, size=sent_len)))\n if contains_any_phrase(sent, phrases):\n print \".\",\n break\n Y.append(np.zeros(sent_len,dtype=np.int))\n Xind.append(sent)\n\n # generate labels for dataset\n print \"Generating labels for the sentences...\"\n for phrase, plabel in zip(phrases, phrase_labels):\n for idx, sent in enumerate(Xind):\n start = 0\n while True:\n sidx = sent.find(phrase, start)\n if sidx < 0:\n break\n if tag_end:\n Y[idx][sidx+len(phrase)-1] = plabel\n else:\n Y[idx][sidx] = plabel\n start += 1\n\n print \"Trafo...\"\n # transform dataset to code\n if data_dim > 1:\n X = [[dictionary[ord(c)] for c in sent] for sent in Xind]\n else:\n X = [[ord(c) for c in sent] for sent in Xind]\n\n Xtrain, Xtest = X[:size[0]], X[size[0]:]\n Ytrain, Ytest = Y[:size[0]], Y[size[0]:]\n\n # training label noise\n for sent in Ytrain:\n mask = uniform(size=sent.size) < label_noise\n sent[mask] = randint(n_phrase_labels, size=mask.sum())\n print \"Done.\"\n\n return Xtrain, Xtest, Ytrain, Ytest", "def tokenize(self, fileid):\n for paragraph in self.corpus.paras(fileids=fileid):\n sents = []\n for sent in sent_tokenize(paragraph, language='russian'):\n words = []\n for word in wordpunct_tokenize(sent):\n token = self.lemmatize(word)\n if not self.is_punct(token) and not self.is_stopword(token):\n\n words.append((token, str(self.morph.parse(word)[0].tag.POS)))\n\n sents.append(words)\n yield sents\n # yield [\n # (word, morph.parse(word)[0].tag.POS)\n # # pos_tag(wordpunct_tokenize(sent), lang='rus')\n # for sent in sent_tokenize(paragraph, language='russian')\n # for word in wordpunct_tokenize(sent)\n # ]\n # yield [\n # pos_tag(wordpunct_tokenize(sent), lang='rus')\n # for sent in sent_tokenize(paragraph, language='russian')\n # ]", "def create_corpus(crawled_lyrics_file, save=False):\n\n # generating cleaned lyrics corpus from crawled data\n clean_lyrics(crawled_lyrics_file) # the corpus is one sequence of characters per line\n subprocess.call('kytea < ./data/cleaned_lyrics.txt > ./data/kytea_out.txt', shell=True) # processing with kytea\n logger.info(\" Done kytea processing! \")\n\n pron = []\n unk_pat = re.compile(u\"/補助記号/UNK\")\n slash_pat = re.compile(ur\"\\\\\")\n\n with codecs.open(\"data/kytea_out.txt\", 'UTF-8') as f:\n for line in f:\n line = line.decode(encoding=\"utf-8\").strip()\n line = unk_pat.sub(u\"\", line)\n line = slash_pat.sub(u\"\", line)\n\n triplets = line.split(u\" \") # take a look at Kytea output: https://github.com/chezou/Mykytea-python\n seq = []\n for item in triplets:\n try:\n # hir = item.split(u\"/\")[2]\n # if hir != \"UNK\":\n hir = item.split(u\"/\")[0]\n if hir != \"\\\\\":\n seq.append(hir)\n except IndexError:\n continue\n\n candidate_line = unicodedata.normalize(\"NFKC\", u\" \".join(seq))\n candidate_line = re.sub(u\"[A-Za-z]\", u\"\", candidate_line)\n candidate_line = re.sub(u\"\\s+\", u\"\", candidate_line)\n candidate_line = re.sub(u\"\\d+\", u\"5\", candidate_line)\n\n if len(candidate_line) > 10:\n pron.append(candidate_line)\n\n\n NN_input = u\"\\n\".join(pron)\n return NN_input", "def sentence_iterator(corpus_iterator):\r\n current_sentence = [] #Buffer for the current sentence\r\n for l in corpus_iterator: \r\n if l is None:\r\n if current_sentence: #Reached the end of a sentence\r\n yield current_sentence\r\n current_sentence = [] #Reset buffer\r\n else: # Got empty input stream\r\n sys.stderr.write(\"WARNING: Got empty input file/stream.\\n\")\r\n raise StopIteration\r\n else:\r\n current_sentence.append(l) #Add token to the buffer\r\n\r\n if current_sentence: # If the last line was blank, we're done\r\n yield current_sentence #Otherwise when there is no more token\r\n # in the stream return the last sentence.\r", "def readFileToCorpus(f):\n if os.path.isfile(f):\n file = open(f, \"r\") # open the input file in read-only mode\n i = 0 # this is just a counter to keep track of the sentence numbers\n corpus = [] # this will become a list of sentences\n print(\"Reading file \", f)\n for line in file:\n i += 1\n sentence = line.split() # split the line into a list of words\n #append this lis as an element to the list of sentences\n corpus.append(sentence)\n if i % 1000 == 0:\n #print a status message: str(i) turns int i into a string\n #so we can concatenate it\n sys.stderr.write(\"Reading sentence \" + str(i) + \"\\n\")\n #endif\n #endfor\n return corpus\n else:\n #ideally we would throw an exception here, but this will suffice\n print(\"Error: corpus file \", f, \" does not exist\")\n sys.exit() # exit the script\n #endif", "def get_corpus():\n corpus_raw = []\n files = os.listdir()\n\n for name in files:\n if \".txt\" in name:\n try:\n file = open(name, \"rt\", encoding='utf8')\n data_org = file.read()\n corpus_raw.append(data_org)\n except:\n print(\"ERROR: Couldn't open a .txt file. Please ensure that the text is UTF-8 encoded.\")\n elif \".docx\" in name:\n try:\n data_org = docx2txt.process(name)\n corpus_raw.append(data_org)\n except:\n print(\"ERROR: Couldn't open a .docx file. Please ensure that the text is UTF-8 encoded.\")\n else:\n print(\"ERROR: Cannot print non .txt or .docx files. Please verify the input folder's contents.\")\n\n return corpus_raw", "def sentencing(any_text, nlp):\n nlp.add_pipe(nlp.create_pipe('sentencizer'))\n doc = nlp(any_text)\n sentences = [sent.string.strip() for sent in doc.sents]\n return sentences", "def train_from_dir(self, path, cat):\n dirfiles = glob.glob(os.path.join(path, '*'))\n total = len(dirfiles)\n count = 0\n for infile in dirfiles:\n f = open(infile, \"r\")\n text = f.read()\n self.train(text, cat)", "def __generateSentences(self, ngrams, n, length, repetition, seed):\n randInt = random.randint(1, repetition)\n sent = ''\n for i in range(randInt):\n sent += self.__markovGen(self.ngrams, n, length, seed)\n sent += ' '\n return sent", "def tokenize_files(paths, source_root, destination_root):\n for p, path in enumerate(paths):\n # CHANGE AND CREATE PATHS\n if not os.path.exists(destination_root):\n os.makedirs(destination_root)\n new_path = path.replace(source_root, destination_root) \n repertoire = '/'.join(new_path.split('/')[:-1])\n title = new_path.split('/')[-1]\n # CREATE THE NEW ARBORESCENCE\n directory = repertoire.split('/')\n for d in range(1, len(directory)):\n directory[d] = '/'.join([directory[d-1], directory[d]])\n if not os.path.exists(directory[d]):\n os.makedirs(directory[d])\n # WRITE TOKENIZED FILES\n with open('{}/{}'.format(repertoire, title), 'w') as tokenized_txt:\n\n tokenized_txt.write(\"\\n\".join(txt_to_sentences(path)))\n tokenized_txt.close()\n print('{}/{}'.format(repertoire, title))" ]
[ "0.6356309", "0.6221751", "0.6179384", "0.595983", "0.5889285", "0.5882137", "0.5873492", "0.58633006", "0.5855387", "0.5792811", "0.578709", "0.5780858", "0.5774546", "0.5762869", "0.5740365", "0.5727936", "0.5726981", "0.5708242", "0.56983536", "0.56931585", "0.569029", "0.56899077", "0.5688693", "0.568819", "0.56764174", "0.56647307", "0.56582177", "0.5655392", "0.5640177", "0.56345856", "0.56144303", "0.5609844", "0.56089234", "0.5591854", "0.55913293", "0.5588278", "0.5578526", "0.5558728", "0.55544883", "0.55523086", "0.55438733", "0.5542963", "0.5501647", "0.55015576", "0.5496572", "0.54793113", "0.5467625", "0.54662275", "0.54535425", "0.54440975", "0.5444092", "0.5441186", "0.54322124", "0.543013", "0.5422799", "0.541556", "0.5410875", "0.53995353", "0.53994995", "0.5391707", "0.53903306", "0.5387879", "0.53778577", "0.5376433", "0.5374133", "0.5369625", "0.536572", "0.5361086", "0.5357224", "0.53550154", "0.53515625", "0.5351178", "0.53489727", "0.53435826", "0.5340804", "0.5324622", "0.5318531", "0.5317525", "0.5311299", "0.53072685", "0.5295819", "0.5292822", "0.5292693", "0.52909315", "0.5288249", "0.5286383", "0.5264846", "0.5264215", "0.52466553", "0.52254754", "0.5221771", "0.52208984", "0.52200806", "0.5214824", "0.5214145", "0.5214144", "0.52093273", "0.5204322", "0.5199766", "0.5197301" ]
0.71198934
0
This function returns a Pyramid WSGI application.
def main(global_config, **settings): # Read the settings for SQLAlchemy and # configure connection engine and session maker objects engine = engine_from_config(settings, 'sqlalchemy.') DBSession.configure(bind=engine) Base.metadata.bind = engine pic_dir = settings['picture_directory'] session_factory = UnencryptedCookieSessionFactoryConfig('itsaseekreet') config = Configurator(settings=settings, session_factory=session_factory) config.add_route('favicon.ico', '/favicon.ico') # Serves static directory (ie. css, js, bootstrap, etc) config.add_static_view('static', 'static', cache_max_age=3600) #config.add_static_view(pic_dir,pic_dir) # Serves up home page config.add_route('home', '/') # Product Routes config.add_route('product', '/product/{id:\d+}/{slug}') config.add_route('product_action', '/product/{action}') # ex. product/create # ex product/edit?id=number # Category Routes config.add_route('category', '/category/{id:\d+}/{slug}') # Cart Routes config.add_route('cart', '/cart') # Checkout Routes config.add_route('checkout', '/checkout') config.add_route('checkout_receipt', '/receipt') # Account Registration/Login/Logout config.add_route('login', '/login') #config.add_route('logout', '/logout') config.add_route('register', '/register') #config.add_route('add_card') # Sign authorization - added later config.add_route('auth', '/sign/{action}') config.scan() return config.make_wsgi_app()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wsgi_app():\n return bottle.default_app()", "def wsgi_app():\n return bottle.default_app()", "def bootstrap_wsgi():\n return get_wsgi_application()", "def app():\n return create_app()", "def app():\n app = create_app()\n return app", "def create_app():\n from server.web import create_app\n # If we do a static javascript app via flask, add it here\n # from server.web import create_app as create_web_app\n return create_app()", "def application(environ, start_response, app=[]):\n if not app:\n app.append(make_application())\n return app[0](environ, start_response)", "def get_app():\n return ApplicationContainer()", "def app():\n return aplicattion", "def app(request):\n app = flask.Flask(__name__)\n return app", "def get_app(self):\n return Application()", "def main(global_config, **settings):\n\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n config.include('.models')\n config.include('.routes')\n config.scan()\n return config.make_wsgi_app()", "def wsgi_app_factory(global_config, **local_config):\n dn = name = '${namespace}${ndot}${nested_namespace}${nsdot}${project_name}'\n wconf = global_config.copy()\n wconf.update(**local_config)\n debug = False\n if global_config.get('debug', 'False').lower() == 'true':\n debug = True\n wconf['pyramid.debug_authorization'] = 'true'\n wconf['pyramid.debug_notfound'] = 'true'\n wconf['pyramid.reload_templates'] = 'true'\n wconf['zcmls' ] = utils.splitstrip(wconf['zcmls'])\n if not wconf['zcmls']:\n wconf['zcmls'] = []\n wconf['zcmls'].insert(0, 'configure.zcml')\n for i, zcml in enumerate(wconf['zcmls']):\n if os.path.sep in zcml:\n zcml = os.path.abspath(zcml)\n else:\n zcml = pkg_resources.resource_filename(dn, zcml)\n wconf['zcmls'][i] = zcml\n\n config = Configurator(settings=wconf)\n \\# activate if you want to enable global components\n \\# globalreg = getGlobalSiteManager()\n \\# config = Configurator(registry=globalreg)\n \\# config.setup_registry(settings=wconf)\n \\# config.include('pyramid_zcml')\n\n config.hook_zca()\n for z in wconf['zcmls']:\n config.load_zcml(z)\n app = config.make_wsgi_app()\n def webbuilder_app(environ, start_response):\n req = Request(environ)\n try:\n resp = req.get_response(app)\n return resp(environ, start_response)\n except Exception, e:\n if not debug:\n return exc.HTTPServerError(str(e))(environ, start_response)\n else:\n raise\n return webbuilder_app", "def app_factory(global_conf, load_app_kwds={}, **kwargs):\n # Create the Galaxy application unless passed in\n kwargs = load_app_properties(\n kwds=kwargs,\n **load_app_kwds\n )\n if 'app' in kwargs:\n app = kwargs.pop('app')\n else:\n from galaxy.webapps.coralsnp_reports.app import UniverseApplication\n app = UniverseApplication(global_conf=global_conf, **kwargs)\n atexit.register(app.shutdown)\n # Create the universe WSGI application\n webapp = CoralSNPReportsWebApplication(app, session_cookie='galaxycoralsnpreportssession', name=\"coralsnp_reports\")\n add_ui_controllers(webapp, app)\n # These two routes handle our simple needs at the moment\n webapp.add_route('/{controller}/{action}', controller=\"root\", action='index')\n webapp.add_route('/{action}', controller='root', action='index')\n webapp.finalize_config()\n # Wrap the webapp in some useful middleware\n if kwargs.get('middleware', True):\n webapp = wrap_in_middleware(webapp, global_conf, app.application_stack, **kwargs)\n if asbool(kwargs.get('static_enabled', True)):\n webapp = wrap_if_allowed(webapp, app.application_stack, wrap_in_static,\n args=(global_conf,),\n kwargs=kwargs)\n # Close any pooled database connections before forking\n try:\n galaxy.model.corals.mapping.metadata.bind.dispose()\n except Exception:\n log.exception(\"Unable to dispose of pooled coralsnp_reports model database connections.\")\n # Return\n return webapp", "def get_app(self):\n return make_app(store=Store('http://localhost/'), no_auth=True)", "def make_app(global_conf, **app_conf):\n app = RestishApp(root.Root())\n app = repoze.who.config.make_middleware_with_config(app, global_conf, app_conf['repoze.who.ini'])\n app = setup_environ(app, global_conf, app_conf)\n # General \"middleware\".\n app = flash.flash_middleware_factory(app)\n app = cookies.cookies_middleware_factory(app)\n return app", "def main(_, **settings):\n config = Configurator(settings=settings)\n register_includes(config)\n register_json_renderer(config)\n register_routes(config)\n\n config.scan()\n return config.make_wsgi_app()", "def make_app():\n return tornado.web.Application([\n tornado.web.URLSpec(r\"/ws/\", WebSocket, name=\"websocket\"),\n tornado.web.URLSpec(r\"/\", StartPage, name='index'),\n (r\"/static/\", tornado.web.StaticFileHandler,\n dict(path=SETTINGS['static_path'])),\n ], **SETTINGS)", "def application():\n\n configure_app(app)\n yield app", "def app(self):\n return self.__app", "def main(global_config, **settings):\n # add settings in here?\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n config.include('.models')\n config.include('.routes')\n config.include('.security')\n config.scan()\n return config.make_wsgi_app()", "def app_factory():\n app = web.Application()\n app.add_routes([\n web.get('/ping', handle_ping),\n ])\n return app", "def _make_app():\n app = web.Application(middlewares=[middleware.error_middleware])\n admin_routes.setup(app)\n return app", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include(includeme)\n return config.make_wsgi_app()", "def make_app(*args, **kwargs):\n app = Flask(*args, **kwargs)\n Roots(app)\n return app", "def create_app():\n app = Flask(__name__)\n\n @app.route('/')\n def root():\n \"\"\"Base view.\"\"\"\n return 'TODO - part 2 and beyond!'\n\n return app", "def get_app(config_path):\n config = get_config(config_path)\n app = App(config['app'], get_session(config))\n app.users = get_users(config)\n app.anon_urls, app.auth_urls, app.user_urls = get_urls(config)\n\n return app", "def make_app():\n return tornado.web.Application([\n (r'/', MainHandler),\n (r'/async', AsyncHandler),\n (r'/gen', GenHandler),\n ])", "def create_app():\n app = web.Application(handlers=[\n (r'/', FilesetHandler),\n ])\n return app", "def get_application(config=None, debug=False, module=None):\n\n base_path = os.path.join(os.path.dirname(__file__), '..')\n if not base_path:\n base_path = os.getcwd()\n base_path = os.path.abspath(base_path)\n\n if config is None:\n for path in [os.path.join(base_path, 'config.yaml'),\n os.path.join(base_path, 'kohlrabi.yaml')]:\n if os.path.exists(path):\n with open(path) as cf:\n config = yaml.load(cf)\n break\n else:\n config = {'debug': debug}\n\n module = module or config.get('module', 'kohlrabi.modules.example')\n\n application = kohlrabi.handlers.application(\n static_path=os.path.join(base_path, 'static'),\n template_path=os.path.join(base_path, 'templates'),\n debug=debug,\n config=config\n )\n\n db_path = config.get('database', 'sqlite:///:memory:')\n kohlrabi.db.bind(db_path, module, create_tables=debug)\n return application", "def make_app(global_conf, full_stack=True, **app_conf):\r\n\r\n # Configure the Pylons environment\r\n load_environment(global_conf, app_conf)\r\n\r\n # The Pylons WSGI app\r\n app = PylonsApp(base_wsgi_app=RedditApp)\r\n\r\n # CUSTOM MIDDLEWARE HERE (filtered by the error handling middlewares)\r\n\r\n app = LimitUploadSize(app)\r\n app = ProfilingMiddleware(app)\r\n app = SourceViewMiddleware(app)\r\n\r\n app = DomainListingMiddleware(app)\r\n app = SubredditMiddleware(app)\r\n app = ExtensionMiddleware(app)\r\n app = DomainMiddleware(app)\r\n\r\n log_path = global_conf.get('log_path')\r\n if log_path:\r\n process_iden = global_conf.get('scgi_port', 'default')\r\n app = RequestLogMiddleware(log_path, process_iden, app)\r\n\r\n #TODO: breaks on 404\r\n #app = make_gzip_middleware(app, app_conf)\r\n\r\n if asbool(full_stack):\r\n # Handle Python exceptions\r\n app = ErrorHandler(app, global_conf, error_template=error_template,\r\n **config['pylons.errorware'])\r\n\r\n # Display error documents for 401, 403, 404 status codes (and 500 when\r\n # debug is disabled)\r\n app = ErrorDocuments(app, global_conf, mapper=error_mapper, **app_conf)\r\n\r\n # Establish the Registry for this application\r\n app = RegistryManager(app)\r\n\r\n # Static files\r\n javascripts_app = StaticJavascripts()\r\n # Set cache headers indicating the client should cache for 7 days\r\n static_app = StaticURLParser(config['pylons.paths']['static_files'], cache_max_age=604800)\r\n app = Cascade([static_app, javascripts_app, app])\r\n\r\n app = AbsoluteRedirectMiddleware(app)\r\n\r\n #add the rewrite rules\r\n app = RewriteMiddleware(app)\r\n\r\n app = CleanupMiddleware(app)\r\n\r\n return app", "def create_app():\n app = Flask(__name__)\n\n\n @app.route('/')\n def barebones():\n return 'the barebones'\n\n return app", "def main(global_config, **settings):\n config = Configurator(settings=settings, root_factory=root_factory)\n config.include('substanced')\n config.include('.resources')\n config.scan()\n return config.make_wsgi_app()", "def create_app():\n app = Flask(__name__)\n app.config.from_object('app.configs.config')\n app.config.from_object('app.configs.settings')\n return app", "def app(self):\r\n return self._app", "def main():\r\n run_wsgi_app(app)", "def main(global_config, **settings):\n engine = engine_from_config(settings, 'sqlalchemy.')\n DBSession.configure(bind=engine)\n Base.metadata.bind = engine\n config = Configurator(settings=settings)\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_route('home', '/')\n config.include(pyramid_beaker)\n config.scan()\n config['safe'] = loadSafe(config['safe_path'])\n return config.make_wsgi_app()", "def main():\n run_wsgi_app(APP)", "def get_asgi_application():\n django.setup(set_prefix=False)\n return ASGIHandler()", "def main(global_config, **settings):\n LOGGER.info('= main :: settings = %s', settings)\n\n config = Configurator(settings=settings)\n\n # Home\n config.add_route('home', '/')\n\n # Lastly, we scan the config and make the app\n # config.scan()\n\n return config.make_wsgi_app()", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n\n # Initialize Plugins\n bootstrap = Bootstrap(app) # noqa: F841\n\n with app.app_context():\n # Include our Routes\n from . import routes # noqa: F401\n\n # # Register Blueprints\n # app.register_blueprint(auth.auth_bp)\n # app.register_blueprint(admin.admin_bp)\n\n return app", "def make_app(conf=None):\n if not conf:\n conf = 'development'\n app = create_app(cm.get(conf))\n return app", "def generate(self) -> Flask:\n app = Flask(self.name, *self.args, **self.kwargs)\n app = self.setup_app_config(app)\n app = self.add_app_headers(app)\n app = self.add_xsrf_error_handler(app)\n\n return app", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n\n init_includes(config)\n init_routing(config)\n init_db(config)\n return config.make_wsgi_app()", "def app(self):\n\n ## set flask specific things that are non-optional\n error = lambda k: 'Fatal: You need to specify a \"flask\" section ' + \\\n 'with an entry like \"'+k+'=...\" in your .ini file'\n try: app_name = self['flask.app']\n except KeyError: raise SystemExit(error('app'))\n try: secret_key = self['flask.secret_key']\n except KeyError: raise SystemExit(error('secret_key'))\n app = Flask(app_name)\n app.secret_key = secret_key\n\n ## set flask specific things that are optional\n if 'flask.template_path' in self:\n app.jinja_loader = FileSystemLoader(self['template_path'])\n if 'flask.before_request' in self:\n before_request = self['flask.before_request']\n before_request = namedAny(before_request)\n app.before_request(before_request)\n if 'flask.after_request' in self:\n after_request = self['flask.after_request']\n after_request = namedAny(after_request)\n app.after_request(after_request)\n\n ## setup views\n try: view_holder = self['corkscrew.views']\n except KeyError:\n error = 'Fatal: could not \"view=<dotpath>\" entry in your .ini file'\n raise SystemExit(error)\n else:\n view_list = namedAny(view_holder)\n [ v(app=app, settings=self) for v in view_list]\n\n return app", "def app(request) -> Application:\n global fixture\n if fixture is None:\n fixture = WebApplication()\n return fixture", "def create_app(settings_override=None):\n app = factory.create_app(__name__, __path__, settings_override)\n\n Bootstrap(app)\n admin.init_app(app)\n filters.init_app(app)\n Sentry(app)\n\n if not app.debug:\n for e in (404, 500):\n app.errorhandler(e)(handle_error)\n\n return app", "def create_app():\n\n app = FastAPI()\n add_root_route(app)\n\n return app", "def make_velruse_app(global_conf, **settings):\n return make_app(**settings)", "def _engine_of(*, application: Application) -> Flask:\n return application()", "def create_app():\n app = Flask(__name__)\n\n app.config.from_pyfile('../settings.py')\n\n app.register_blueprint(layout_bp, url_prefix='/layouts')\n app.register_blueprint(sheet_bp, url_prefix='/sheets')\n app.register_blueprint(user_bp, url_prefix='/users')\n\n db.init_app(app)\n ma.init_app(app)\n migrate.init_app(app)\n login_manager.init_app(app)\n\n return app", "def make_app(global_conf, full_stack=True, static_files=True, **app_conf):\n # Configure the Pylons environment\n load_environment(global_conf, app_conf)\n\n # The Pylons WSGI app\n app = PylonsApp()\n\n # Routing/Session/Cache Middleware\n app = RoutesMiddleware(app, config['routes.map'])\n app = SessionMiddleware(app, config)\n app = CacheMiddleware(app, config)\n\n # CUSTOM MIDDLEWARE HERE (filtered by error handling middlewares)\n\n if asbool(full_stack):\n # Handle Python exceptions\n app = ErrorHandler(app, global_conf, **config['pylons.errorware'])\n\n # Display error documents for 401, 403, 404 status codes (and\n # 500 when debug is disabled)\n if asbool(config['debug']):\n app = StatusCodeRedirect(app)\n else:\n app = StatusCodeRedirect(app, [400, 401, 403, 404, 500])\n\n # Optionally suppress all Python warnings\n if not 'warnings' in config or not asbool(config['warnings']):\n warnings.simplefilter(\"ignore\")\n\n # Establish the Registry for this application\n app = RegistryManager(app)\n\n if asbool(static_files):\n # Serve static files\n static_app = StaticURLParser(\n config['pylons.paths']['static_files'],\n cache_max_age = int(config['app_conf']['expires.static']))\n app = Cascade([static_app, app])\n\n return app", "def create_app(env=\"production\"):\n app = Flask(__name__, static_url_path=\"/\")\n config_app(app, env=env)\n\n with app.app_context():\n Moment(app)\n init_db(app)\n enable_parser(app)\n register_route(app)\n register_blue(app)\n init_logger(app)\n init_scheduler(app)\n return app", "def get_app():\n\n global _app\n if _app is None:\n\n entrypoint = get_config().get('APP_ENTRYPOINT')\n if not entrypoint:\n raise RuntimeError('APP_ENTRYPOINT missing from config')\n module_name, attr_names = entrypoint.split(':')\n obj = __import__(module_name, fromlist=['.'])\n for attr_name in attr_names.split('.'):\n obj = getattr(obj, attr_name)\n _app = obj\n\n return _app", "def app(request) -> Flask:\n app = create_app()\n context = app.app_context()\n context.push()\n\n def teardown():\n context.pop()\n\n request.addfinalizer(teardown)\n return app", "def main(global_config, **settings):\n #import pdb; pdb.set_trace()\n config = Configurator(settings=settings)\n\n # logging config for pserve / wsgi\n if settings and 'logging_config_file' in settings:\n from pyramid.paster import setup_logging\n setup_logging(settings['logging_config_file'])\n\n from . import views\n config.include(views.do_view_config)\n config.scan('pelias.adapter.pyramid')\n\n # CORS -- might not make this call in production (eliminate a bit of overheads, as CORS is handled by Apache)\n if settings and settings.get('enable_cors_headers') == 'true':\n config.add_subscriber(app_utils.add_cors_headers_response_callback, NewRequest)\n\n return config.make_wsgi_app()", "def make_app(global_conf, full_stack=True, **app_conf):\n app = make_base_app(global_conf, full_stack=True, **app_conf)\n \n # Wrap your base TurboGears 2 application with custom middleware here\n from depot.manager import DepotManager\n app = DepotManager.make_middleware(app)\n\n return app", "def create_app(config='dev'):\n if config == 'dev':\n from .conf.config import DevelopmentConfig as dev_config\n app = configure_app(Flask(__name__), dev_config)\n else:\n from .conf.config import ProdConfig\n app = configure_app(Flask(__name__), ProdConfig)\n\n # setup flask blueprints\n configure_blueprints(app)\n\n return app", "def make_server() -> Flask:\n app: Flask = Flask(__name__)\n return app", "def create_app():\n logging.basicConfig(\n level=REANA_LOG_LEVEL,\n format=REANA_LOG_FORMAT\n )\n app = Flask(__name__)\n app.config.from_object('reana_server.config')\n app.secret_key = \"hyper secret key\"\n\n # Register API routes\n from .rest import ping, secrets, users, workflows # noqa\n app.register_blueprint(ping.blueprint, url_prefix='/api')\n app.register_blueprint(workflows.blueprint, url_prefix='/api')\n app.register_blueprint(users.blueprint, url_prefix='/api')\n app.register_blueprint(secrets.blueprint, url_prefix='/api')\n\n app.session = Session\n CORS(app)\n return app", "def main(global_config, **settings):\n SETTINGS = settings\n config = Configurator(settings=settings,)\n config.include('pyramid_jinja2')\n config.include('.models')\n config.include('.cors')\n config.add_cors_preflight_handler()\n config.include('.routes')\n config.include('.security')\n config.include('..greggo')\n config.add_static_view('static', path='repoll:static')\n config.scan()\n return config.make_wsgi_app()", "def _make_core_app():\n app = web.Application(middlewares=[middleware.error_middleware])\n management_routes.setup(app, is_core=True)\n return app", "def app(self):\n if self._application is None:\n self._application = Application(client=self)\n return self._application", "def create_app(settings_override=None):\n app = Flask(__name__, instance_relative_config=True)\n\n app.config.from_object('config.settings')\n app.config.from_pyfile('settings.py', silent=True)\n\n # set only during the testing\n if settings_override:\n app.config.update(settings_override)\n\n app.register_blueprint(page)\n extentions_init(app)\n\n return app", "def create_app(config_name):\n app = Flask(__name__)\n # create app instance\n app.config.from_object(config_by_name[config_name])\n flask_bcrypt.init_app(app)\n\n CORS(app)\n\n routes.init_routes(app)\n\n return app", "def create_app(settings_override: Optional[object] = None):\n cwd = os.path.dirname(os.path.abspath(__file__))\n package_path = [cwd]\n\n app = factory.create_app(\n __name__,\n package_path,\n settings_override,\n )\n setup_jinja_env(app)\n\n # Register custom error handlers\n if not app.debug:\n for e in [500, 404]:\n app.errorhandler(e)(handle_error)\n\n return app", "def create_app():\n\n # Create app\n app = Flask(__name__)\n app.config.from_object(\"nextbus.config.Config\")\n\n app.logger = logger.app_logger\n # Load logging configuration and log initial configuration\n logger.load_config(app)\n\n # Initialise SQLAlchemy and Migrate in app\n db.init_app(app)\n migrate.init_app(app, db)\n\n # Adding app, db and model objects to flask shell\n from nextbus import models\n app.shell_context_processor(\n lambda: {\"app\": app, \"db\": db, \"models\": models}\n )\n\n from nextbus.converters import add_converters\n add_converters(app)\n\n from nextbus.views import page\n from nextbus.resources import api\n app.register_blueprint(page)\n app.register_blueprint(api)\n\n return app", "def create_app():\n app = Flask(__name__)\n\n # Load application settings\n settings = os.environ.get(\"FLASK_SETTINGS\", SETTINGS)\n if settings is not None:\n c = Config(settings)\n print(c)\n app.config.update(c.get_map('flask'))\n\n from users.views import user\n # Register the blueprints to app\n app.register_blueprint(user)\n\n db.init_app(app)\n\n return app", "def create_app(self):\n app.config.from_object('config.TestingConfig')\n return app", "async def create_app(dev: bool = True) -> web.Application:\n app = web.Application()\n setup_jinja2(app)\n routes_setup(app, dev)\n if not dev:\n # not a development server - we need to run directly\n parser = argparse.ArgumentParser()\n parser.add_argument('--port', default=4000)\n parser.add_argument('--host', default='0.0.0.0')\n parser.add_argument('--path', default='/tmp/navigation_editor')\n server_cfg = vars(parser.parse_args())\n web.run_app(\n app,\n host=server_cfg['host'],\n port=int(server_cfg['port']),\n path=server_cfg['path'])\n return app", "def create_app():\n app = Flask(\n __name__,\n instance_relative_config=False,\n )\n app.config.from_object('config.Config')\n\n with app.app_context():\n # CORS\n CORS(app)\n\n # JWT & BCRYPT\n from .utils.auth import init_auth\n init_auth(app)\n\n # DB\n from .utils.db import db\n db.init_app(app)\n\n # Mail\n from .utils.mail.service import mail\n mail.init_app(app)\n app.extensions['mail'].debug = 0 # No logging\n\n # Jobs\n from .utils.scheduler import start_jobs\n start_jobs(app)\n\n # Import routes\n from .routes import (\n admin, users, files,\n suprema,\n b_locals, b_federals)\n\n app.register_blueprint(admin.bp)\n app.register_blueprint(users.bp)\n app.register_blueprint(files.bp)\n app.register_blueprint(suprema.bp)\n app.register_blueprint(b_locals.bp)\n app.register_blueprint(b_federals.bp)\n\n return app", "def _build_flask_app(self, name):\n app = Flask(name)\n app.add_url_rule('/ping', 'healthcheck', self._healthcheck)\n app.add_url_rule('/invocations', 'invoke', self._invoke, methods=[\"POST\"])\n app.register_error_handler(Exception, self._default_error_handler)\n return app", "def create_app(config: dict) -> Flask:\n for key, value in config.items():\n app.config[key] = value\n db.init_app(app)\n ma.init_app(app)\n app.app_context().push()\n return app", "def create_app(config='dev'):\n config_object = {'dev': DevConfig, 'test': TestConfig}[config]\n\n app = Flask(__name__)\n app.config.from_object(config_object)\n\n if app.config.get('PROFILE'):\n from werkzeug.contrib.profiler import ProfilerMiddleware\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])\n\n configure_log(app)\n configure_database(app)\n configure_json(app)\n configure_converters(app)\n\n register_extensions(app)\n register_blueprints(app)\n\n log.info(\"%s loaded with %s configuration\", bright(\"ups\"), bright(config))\n\n return app", "def get_app(self):\n app.init_options()\n return app.NBViewer().tornado_application", "def create_app(config_name):\n\n app = Flask(__name__)\n app.config.from_object(config_by_name[config_name])\n CORS(app)\n mongo.init_app(app)\n app.register_blueprint(check_bp)\n\n return app", "def main(gloabl_config, **settings):\n if os.environ.get('DATABASE_URL', ''):\n settings['sqlalchemy.url'] = os.environ['DATABASE_URL']\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n config.include('.models')\n config.include('.routes')\n config.include('.security')\n config.scan()\n return config.make_wsgi_app()", "def init_app():\r\n LOG.info('Initialising web server.')\r\n app = web.Application(middlewares=[api_key()])\r\n app.router.add_routes(routes)\r\n set_cors(app)\r\n app.on_startup.append(init_db)\r\n app.on_cleanup.append(close_db)\r\n return app", "def app():\n app = create_app(schema=create_schema())\n with app.test_request_context():\n yield app", "def create_app() -> Flask:\r\n app = Flask(__name__.split('.')[0])\r\n init_config(app)\r\n app.register_blueprint(observer)\r\n app.teardown_appcontext(close_db)\r\n app.cli.add_command(init_db)\r\n\r\n return app", "def app(self):\n return self._app", "def create_app():\n\n ###############################\n # Create a FLASK application\n app = Flask(__name__)\n # Note: since the app is defined inside this file,\n # the static dir will be searched inside this subdirectory\n\n ###############################\n # Apply configuration\n app.config.from_object(CONFIG_MODULE + '.MyConfig')\n logger = get_logger(__name__, False) # app.config['DEBUG'])\n\n ###############################\n # # Cache\n # # http://flask.pocoo.org/docs/0.10/patterns/caching/#setting-up-a-cache\n # from werkzeug.contrib.cache import SimpleCache\n # cache = SimpleCache()\n\n # ###############################\n # # Database\n # db.init_app(app)\n\n # ###############################\n # # Application context\n # with app.app_context():\n # db.create_all()\n # logger.info(\"Initialized Database\")\n\n # ###############################\n # Add basic things to this app\n app.register_blueprint(cms)\n\n ###############################\n # Flask LOGIN\n lm.init_app(app)\n lm.login_view = '.login'\n\n # Logging\n @app.after_request\n def log_response(resp):\n\n log = logger.debug\n if resp.status_code == hcodes.HTTP_NOT_MODIFIED:\n log = logger.debug\n\n if 'static/' not in req.url and '/js/' not in req.url:\n log = logger.info\n\n from commons.logs import obscure_passwords\n log(\"{} {} {} {}\".format(\n req.method, req.url,\n obscure_passwords(req.data), resp))\n return resp\n\n return app", "def create_local_app():\n app = create_app()\n app.test_request_context().push()\n return app", "def make_app():\n app = flask.Flask('sahara.api')\n\n @app.route('/', methods=['GET'])\n def version_list():\n context.set_ctx(None)\n return api_utils.render({\n \"versions\": [\n {\"id\": \"v1.0\", \"status\": \"CURRENT\"}\n ]\n })\n\n @app.teardown_request\n def teardown_request(_ex=None):\n context.set_ctx(None)\n\n app.register_blueprint(api_v10.rest, url_prefix='/v1.0')\n app.register_blueprint(api_v10.rest, url_prefix='/v1.1')\n app.register_blueprint(api_v11.rest, url_prefix='/v1.1')\n\n def make_json_error(ex):\n status_code = (ex.code\n if isinstance(ex, werkzeug_exceptions.HTTPException)\n else 500)\n description = (ex.description\n if isinstance(ex, werkzeug_exceptions.HTTPException)\n else str(ex))\n return api_utils.render({'error': status_code,\n 'error_message': description},\n status=status_code)\n\n for code in six.iterkeys(werkzeug_exceptions.default_exceptions):\n app.error_handler_spec[None][code] = make_json_error\n\n if CONF.debug and not CONF.log_exchange:\n LOG.debug('Logging of request/response exchange could be enabled using'\n ' flag --log-exchange')\n\n if CONF.log_exchange:\n app.wsgi_app = log_exchange.LogExchange.factory(CONF)(app.wsgi_app)\n\n app.wsgi_app = auth_valid.wrap(app.wsgi_app)\n app.wsgi_app = acl.wrap(app.wsgi_app)\n\n return app", "def create_for_debug():\n\n app = App()\n return app.flask_app", "def testapp(request):\n from webtest import TestApp\n\n def main(global_config, **settings):\n \"\"\"The function returns a Pyramid WSGI application.\"\"\"\n # add settings in here?\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n config.include('.models')\n config.include('.routes')\n config.include('.security')\n config.scan()\n return config.make_wsgi_app()\n\n app = main({}, **{\n 'sqlalchemy.url': 'postgres://clairegatenby@localhost:5432/test_lj'\n })\n testapp = TestApp(app)\n\n session_factory = app.registry[\"dbsession_factory\"]\n engine = session_factory().bind\n # Base.metadata.drop_all(engine) # replace with teardown\n Base.metadata.create_all(bind=engine)\n\n def tear_down():\n Base.metadata.drop_all(bind=engine)\n\n request.addfinalizer(tear_down)\n\n return testapp", "def create_app(test_config=None):\n app = Flask(__name__)\n\n # apply the blueprints to the app\n from app import common\n\n app.register_blueprint(common.bp)\n\n # default url for site\n app.add_url_rule(\"/\", endpoint=\"index\")\n\n return app", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include('pyramid_chameleon')\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_route('home', '/')\n config.add_route('about', '/about')\n config.add_route('calendar', '/calendar')\n config.add_route('project', '/project')\n config.add_route('media', '/media')\n config.add_route('hackerspace', '/hackerspace')\n config.add_route('classes', '/classes')\n config.add_route('workshops', '/workshops')\n config.scan()\n\n db_url = urlparse(settings['mongo_uri'])\n config.registry.db = MongoClient(settings['mongo_uri'])\n\n def get_db(request):\n db = config.registry.db[db_url.path[1:]]\n return db\n config.add_request_method(get_db, 'db', reify=True)\n return config.make_wsgi_app()", "def app(environ, start_response):\n status = '200 OK'\n response_headers = [('Content-Type', 'text/plain')]\n start_response(status, response_headers)\n return ['Hello world from a simple WSGI application!\\n']", "def create_app(config=Config):\r\n # Initialise app and configuration\r\n app = Flask(__name__)\r\n app.config.from_object(config)\r\n\r\n\r\n # Initialise flask plugins\r\n db.init_app(app)\r\n api.init_app(app)\r\n ma.init_app(app)\r\n login.init_app(app)\r\n migrate.init_app(app, db)\r\n register_api(api)\r\n\r\n\r\n return app", "def create_app():\n app = FastAPI()\n configure_rest_server(app=app, router_configs=WEB_SERVICES_ROUTER_CONFIGS, db_configs=DB_CONFIGS)\n return app", "def buildSite():\n flaskResource = wsgi.WSGIResource(reactor, reactor.getThreadPool(), app)\n root = ChildrenFirstResource(flaskResource)\n root.putChild(\"sockjs\", factory.SockJSResource(chat.ChatFactory()))\n return server.Site(root)", "def application(env, start):\n\n global application\n config = read_config()\n script_dir = os.path.dirname(os.path.abspath(__file__))\n if config.get('pages_path') is None:\n config.set('pages_path', os.path.join(script_dir, 'docs'))\n wiki = Wiki(config)\n application = wiki.application\n return application(env, start)", "def app(self):\n return self.wrapped_app.app", "def get_app(tornado_port,\n redis, capacity, error_rate, filter_sync_secs,\n serve_index=False, template_path=None, index_template_kwargs=None,\n close_future_gen=None, **app_kwargs):\n logging.getLogger(\"%s.get_app\" % __name__).info(\"building app:\\n%s\", pprint.pformat(locals()))\n\n node = ChatNode(redis,\n capacity,\n error_rate)\n\n handlers = [\n (r'/favicon.ico', FaviconHandler),\n (r'/peerjs', ChatHandler, {\n 'close_future_gen': close_future_gen,\n 'node': node,\n }),\n (r'/peerjs/id', GetCidHandler, {'node': node}),\n ]\n\n if serve_index:\n index_handler_kwargs = {}\n index_handler_kwargs['template_path'] = template_path\n index_handler_kwargs.update(index_template_kwargs)\n handlers.append((r'/', TestIndexHandler, index_handler_kwargs))\n\n app = web.Application(handlers, **app_kwargs)\n\n sync_callback = ioloop.PeriodicCallback(\n node.sync_filter_from_redis, filter_sync_secs * 1000,\n io_loop=ioloop.IOLoop.instance()\n )\n\n for fieldname in ['_id_node', '_sync_periodic_calback', 'start']:\n # Make sure we're not accidentally overriding anything.\n assert not hasattr(app, fieldname), \"%r has field %s\" % (app, fieldname)\n\n app._id_node = node\n app._sync_periodic_callback = sync_callback\n\n def start(self):\n self._sync_periodic_callback.start()\n ioloop.IOLoop.instance().start()\n\n app.start = MethodType(start, app)\n\n return app", "def create_app(config: str) -> Flask:\n api = FlaskApp(__name__, specification_dir=Path() / \"swagger\")\n api.add_api(\"swagger.yml\")\n\n # Get `Flask` object\n app = api.app\n\n app.config.from_object(config)\n app.register_blueprint(site.mod)\n\n db.init_app(app)\n\n return app", "def app():\n _app = create_app(TestConfig)\n ctx = _app.test_request_context()\n ctx.push()\n\n yield _app\n\n ctx.pop()", "def app():\n _app = create_app(TestConfig)\n ctx = _app.test_request_context()\n ctx.push()\n\n yield _app\n\n ctx.pop()", "def app():\n _app = create_app(TestConfig)\n ctx = _app.test_request_context()\n ctx.push()\n\n yield _app\n\n ctx.pop()", "def create_app(app_name: str):\n\n app = Flask(app_name)\n app.json_encoder = CustomJSONEncoder\n\n app.config.update({\n 'SQLALCHEMY_DATABASE_URI': build_db_uri(),\n 'SQLALCHEMY_TRACK_MODIFICATIONS': os.environ.get('SQLALCHEMY_TRACK_MODIFICATIONS', False),\n 'APP_CONFIG': {\n 'HOSTNAME': os.environ.get('HOSTNAME', ''),\n 'GREETING': os.environ.get('GREETING', 'Hello'),\n }\n })\n\n db.init_app(app)\n api = Api(app)\n\n with app.app_context():\n api.add_resource(Index, '/')\n api.add_resource(Config, '/config')\n api.add_resource(StudentMany, '/student')\n api.add_resource(StudentOne, '/student/<int:student_id>')\n return app", "def app() -> Generator:\n app = create_app({\"TESTING\": True})\n\n yield app" ]
[ "0.78450423", "0.78450423", "0.77413297", "0.770514", "0.7661694", "0.75318485", "0.7400592", "0.72658414", "0.7241545", "0.72377515", "0.71705705", "0.71670246", "0.714194", "0.71155995", "0.7101749", "0.7096859", "0.70582855", "0.7028316", "0.7028151", "0.70058817", "0.7002921", "0.69999254", "0.6990775", "0.6967048", "0.69353527", "0.6883265", "0.686569", "0.68322194", "0.6816643", "0.6788993", "0.6782546", "0.675499", "0.67470896", "0.674006", "0.6738444", "0.67337865", "0.6712036", "0.66991425", "0.6697074", "0.6691394", "0.6682483", "0.6670322", "0.66685766", "0.6648291", "0.6647813", "0.66443276", "0.6617829", "0.6611569", "0.66093457", "0.66068953", "0.6593158", "0.6590156", "0.6583935", "0.65768504", "0.65730745", "0.65686554", "0.6568581", "0.65661293", "0.6563025", "0.6546479", "0.65462345", "0.6543831", "0.65423185", "0.65355325", "0.6532873", "0.65291315", "0.6518338", "0.6513816", "0.6498208", "0.64978564", "0.6493244", "0.6486645", "0.6480084", "0.64729214", "0.6470566", "0.64699656", "0.6467924", "0.6460404", "0.64583355", "0.64579594", "0.6457855", "0.64503753", "0.64365494", "0.6430562", "0.64267653", "0.6414446", "0.64001745", "0.63994", "0.6385555", "0.6384391", "0.638383", "0.63802326", "0.63783824", "0.63782597", "0.63731164", "0.6369701", "0.6369653", "0.6369653", "0.6369653", "0.6368059", "0.6366396" ]
0.0
-1
Create a BDT with some trees
def __init__(self, *items, **kw): self.dTrees = [] self.ntrees = 50 # TODO self.beta = 0.5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def construct_tree():\n root = TreeNode(5)\n root.left = TreeNode(3)\n root.right = TreeNode(8)\n root.left.left = TreeNode(2)\n root.left.right = TreeNode(4)\n root.right.left = TreeNode(7)\n return root", "def _gen_test_tree_6():\n tree = BinaryNode(20)\n tree.left = BinaryNode(10)\n tree.right = BinaryNode(30)\n tree.left.right = BinaryNode(25)\n return tree", "def _gen_test_tree_1():\n tree = BinaryNode(5)\n tree.left = BinaryNode(5)\n return tree", "def _gen_test_tree_2():\n tree = BinaryNode(5)\n tree.left = BinaryNode(3)\n tree.left.left = BinaryNode(2)\n tree.left.left.left = BinaryNode(1)\n tree.left.right = BinaryNode(4)\n tree.right = BinaryNode(7)\n tree.right.left = BinaryNode(6)\n tree.right.right = BinaryNode(8)\n tree.right.right.right = BinaryNode(9)\n return tree", "def create_tree():\n\tdtree = {}\n\n\tdtree['stats'] = None\n\tdtree['libs'] = {'fort':\n\t\t\t\t\t\t\t {'integers': None, 'floats': None, 'data': None}\n\t\t\t\t\t ,'cache blocking': None}\n\tdtree['grid'] = {'size' :\n\t\t\t\t\t\t\t {'nxgb': None, 'nygb': None, 'nzgb': None}\n\t\t\t\t\t ,'geom' :\n\t\t\t\t\t\t\t {'Lx' : None, 'Ly' : None, 'Lz' : None\n\t\t\t\t\t\t\t ,'dx' : None, 'dy' : None, 'dz' : None\n\t\t\t\t\t\t\t ,'x' : None, 'y' : None, 'z' : None\n\t\t\t\t\t\t\t ,'xloc': None, 'yloc': None, 'zloc': None}}\t\t\t\t\t\t\t \t\t\t\t\t \t\t \n\tdtree['eqns'] = {'qvec' : \n\t\t\t\t\t\t\t {'nvars': None, 'solved': None, 'stored': None, 'views': None}\n\t\t\t\t\t ,'coeff': None\n\t\t\t\t\t ,'time' : None\n\t\t\t\t\t ,'ndim' : None}\n\n\tdtree['misc'] = {'verbose': None, 'working precision': None}\n\tdtree['mpi'] = {'split': \n\t\t\t\t\t\t\t {'nxpr': None, 'nypr': None, 'nzpr': None}\n\t\t\t\t\t ,'dMpi' : None}\t\t \t\n\tdtree['num'] = {'hlo' : None\n\t\t\t\t\t ,'deriv': \n\t\t\t\t\t\t\t {'order': None, 'stencil': None, 'hlo': None} \n\t\t\t\t\t ,'filtr': \n\t\t\t\t \t {'order': None, 'stencil': None, 'hlo': None,'eps': None}\n\t\t\t\t\t ,'tint' : \n\t\t\t\t\t {'tstep': None, 'cfl': None, 'itn': None}}\n\n\tdtree['bc'] = {'wall': \n\t\t\t\t\t\t\t {'isoT': None, 'zeroQ': None, 'slip': None}}\t\n\n\tdtree['usr'] = None\n\tdtree['ios'] = None\n\t\n\tfrom rhsinfo import dim, stencil, order, coefficients, varname, varsolved, varstored, varbc, wp,hlo_rhs\n\n\tdtree['eqns']['qvec']['solved'] = []\n\tdtree['eqns']['qvec']['stored'] = []\n\tdtree['eqns']['qvec']['bcs'] = {'face':{'i' :[],'j' :[],'k' :[]},\n\t\t\t\t\t\t\t\t\t 'edge':{'ij':[],'jk':[],'ik':[]}}\n\n\tfor v in varsolved:\n\t\tdtree['eqns']['qvec']['solved'].append([v,varname[v]])\t\n\n\tfor v in varstored:\n\t\tdtree['eqns']['qvec']['stored'].append([v,varstored[v]['ind']])\n\n\tfor v in varbc:\n\t\tfor bcloc in ['face','edge']:\n\t\t\tif bcloc in varbc[v]:\n\t\t\t\tloctype = ''.join(sorted(varbc[v][bcloc].replace('1','').replace('max','')))\n\t\t\t\tdtree['eqns']['qvec']['bcs'][bcloc][loctype].append([v,varbc[v]['ind']])\n\n\n\tdtree['eqns']['coeff'] = []\n\tfor v in coefficients:\n\t\tdtree['eqns']['coeff'].append([v,coefficients[v]])\t\n\n\tdtree['eqns']['qvec']['nvars'] = len(varname)#+len(dtree['eqns']['qvec']['stored'])\t\n\tdtree['num']['deriv']['stencil'] = stencil\n\tdtree['num']['deriv']['hlo'] = hlo_rhs #int((stencil-1)/2)\n\tdtree['num']['deriv']['order'] = order\n\t\n\t# if dtree['num']['filtr']['hlo'] != None:\n\t# \tdtree['num']['hlo'] = max(dtree['num']['deriv']['hlo'],dtree['num']['filtr']['hlo'])\n\t# else:\n\t# \tdtree['num']['hlo'] = dtree['num']['deriv']['hlo']\t\n \n\tdtree['num']['hlo'] = hlo_rhs\t\t\t\n\t\n\tdtree['eqns']['ndim'] = dim\n\tdtree['misc']['working precision'] = wp\n\tdtree['misc']['verbose'] = True\n\n\t# dtree['libs']['cache blocking'] = [256,2,6] # good for 11 pts 3D, div. forme of N.S.\n\t\n\tdtree['libs']['cache blocking'] = [2560000,2,6]\n\n\t# recover BCs info:\n\t\n\ttry:\n\t from rhsinfo import bc_info\n\texcept: \n\t\tbc_info = [{},{}]\n\n\tdtree['bc']\t = {'allbc':bc_info[1],'mybc':[]} # OVERWRITE predefined 'bc' key.\n\n\treturn dtree", "def _gen_test_tree_3():\n tree = BinaryNode(5)\n tree.left = BinaryNode(1)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(3)\n tree.right = BinaryNode(7)\n tree.right.left = BinaryNode(8)\n tree.right.right = BinaryNode(9)\n return tree", "def _gen_test_tree_4():\n tree = BinaryNode(5)\n tree.left = BinaryNode(3)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(10)\n tree.right = BinaryNode(9)\n tree.right.left = BinaryNode(6)\n tree.right.right = BinaryNode(8)\n return tree", "def makeTTree():\n \n tree = TTree(\"tree\",\"tree\")\n px = array('d',[0])\n py = array('d',[0])\n pz = array('d',[0])\n pi = array('i',[0])\n tree.Branch(\"x\",px,\"x/D\")\n tree.Branch(\"y\",py,\"y/D\")\n tree.Branch(\"z\",pz,\"y/D\")\n tree.Branch(\"i\",pi,\"y/I\")\n for i in range(500):\n px[0] = gRandom.Gaus(0,3)\n py[0] = gRandom.Uniform()*30 - 15\n pz[0] = gRandom.Gaus(0,5)\n pi[0] = i%3\n tree.Fill()\n return tree", "def new_branch_tree(tree, ids):\n branch_tree = {}\n branch_tree[\"selftext\"] = tree[\"selftext\"]\n branch_tree[\"title\"] = tree[\"title\"]\n branch_tree[\"id\"] = tree[\"id\"]\n branch_tree[\"comments\"] = {}\n for id in ids[1:]:\n branch_tree[\"comments\"][id] = tree[\"comments\"][id]\n return branch_tree", "def _gen_test_tree_5():\n tree = BinaryNode(30)\n tree.right = BinaryNode(30)\n return tree", "def make_trees(self):\n self.trees = build_recursively_from_cells(self.cells, container=self)\n# self.trees = []\n# for cell in self.cells:\n# if cell.bpointer is None: # test whether cell is root\n# tree = Colony(container=self)\n# tree.add_cell_recursive(cell)\n# self.trees.append(tree)\n return", "def _initialize_trees(self):", "def make_tree(self, columns, nt):\n # print('Top tree item:', nt)\n tree, end = self.walk(columns, len(columns) - 1, nt)\n assert end == 0\n return tree", "def __make_tree(self, wd, root=\"d1\", create=True):\n d1 = \"%s/%s\" % (wd, root)\n t1 = FSTree(d1)\n d2 = \"%s/d2\" % d1\n t2 = t1.add(d2)\n if create:\n hdfs.mkdir(d2)\n for t, d, bn in ((t1, d1, \"f1\"), (t2, d2, \"f2\")):\n f = \"%s/%s\" % (d, bn)\n if create:\n hdfs.dump(self.data, f, mode=\"wb\")\n t.add(f, 0)\n return t1", "def binary_tree():\n\n class Node(object):\n def __init__(self, data):\n self.left = None\n self.right = None\n self.data = data\n\n # Create a root\n root = Node(data=1)\n root.left = Node(data=2)\n root.right = Node(data=3)\n root.left.left = Node(data=4)\n \"\"\" Structure\n 1 <-- root\n / \\\n 2 3 \n / \n 4\n \"\"\"", "def tree():\n return defaultdict(tree)", "def build_UNIST_tree():\n root = LinkedBinaryTree()", "def build(cls, m, data):\n # TODO\n data = sorted(data, key=lambda x: x[0]) # Sort pairs by key.\n nodes = {} # Holds nodes and they governing value as key.\n\n while True:\n # Split into chunks of size m\n chunks = [data[i:i+m] for i in range(0, len(data), m)]\n data = []\n for chunk in chunks:\n parent = chunk.pop()\n data.append(parent)\n node = BTreeNode(m)\n node.keys = map(lambda i: i[0], chunk)\n node.values = map(lambda i: i[0], chunk)\n nodes[parent[0]] = node", "def build():\n # root = TreeNode(5)\n # root.left = TreeNode(2)\n # root.right = TreeNode(7)\n # return root\n\n \"\"\"\n 5\n / \\\n 2 6\n / \\\n 1 3\n [5,2,1,3,6]\n \"\"\"\n _5 = TreeNode(5)\n _2 = TreeNode(2)\n _6 = TreeNode(6)\n _1 = TreeNode(1)\n _3 = TreeNode(3)\n _5.left = _2\n _5.right = _6\n _2.left = _1\n _2.right = _3\n return _5", "def _buildTree(data, labels, features, tree=None) -> Tree:\n if tree is None:\n tree = Tree()\n\n if len(data) == 0:\n tree.addNode(Leaf(None, None), None)\n return tree\n labelsCount: dict = {lbl: len(labels[labels == lbl]) for lbl in set(labels)}\n if len(labelsCount) == 1:\n tree.addNode(Leaf(None, None, labels[0]), None)\n return tree\n if len(features) == 0:\n return tree\n\n best = Node(DecisionTree._chooseBestFeatures(data, labels, features))\n tree.addNode(best, None)\n\n data = np.delete(data, features.index(best.data), axis=1)\n features.remove(best.data)\n\n subFeatIdx = best.data.getRowIdxSubFeatures()\n for idx, subFeat in enumerate(best.data.subFeatures):\n if subFeat.getEntropy() > 0 and len(features) > 0:\n subNode = SubNode(subFeat, best)\n tree.addNode(subNode, subNode.parent)\n\n features_i = deepcopy(features)\n for jdx, feat in enumerate(features_i):\n feat.setData(np.column_stack((data[subFeatIdx[idx], jdx], labels[subFeatIdx[idx]])))\n\n tree.attachTree(DecisionTree._buildTree(data[subFeatIdx[idx], :], labels[subFeatIdx[idx]],\n features_i, None), subNode)\n else:\n leaf = Leaf(subFeat, best, subFeat(), subFeat.getOutLabel())\n tree.addLeaf(leaf, best)\n return tree", "def build():\n root = TreeNode(3)\n root.left = TreeNode(2)\n root.right = TreeNode(4)\n root.right.right = TreeNode(7)\n root.right.left = TreeNode(5)\n return root", "def make(self, tree):\n if type(tree) != type(()):\n return tree\n return Tree(tree[0], [self.make(tree[1]), self.make(tree[2])])", "def add_trees(t1, t2):\n \"*** YOUR CODE HERE ***\"\n # offical solution:\n if not t1:\n return t2\n if not t2:\n return t1\n new_label = label(t1) + label(t2)\n t1_children, t2_children = branches(t1), branches(t2)\n length_t1, length_t2 = len(t1_children), len(t2_children)\n if length_t1 < length_t2:\n t1_children += [None for _ in range(length_t1, length_t2)]\n elif len(t1_children) > len(t2_children):\n t2_children += [None for _ in range(length_t2, length_t1)]\n return tree(new_label, [add_trees(child1, child2) for child1, child2 in zip(t1_children, t2_children)])", "def buildTree(self, tree=None):\n if tree is None:\n tree = phylogeny.Tree()\n tree.buildFromString(self.newick, False) # tree descriptions from NCL 2.1 are 1-based not 0-based\n return tree", "def __init__(self):\n Tree.__init__(self, \"\")", "def _build_trees(tree, forest, X, Y, sample_weight, tree_idx, n_trees,\n n_samples_bootstrap=None):\n # Initialize the number of samples input data\n n_samples = X.shape[0]\n\n # If the samples are drawn with replacement, then,\n # weight the sample weights by the number of times\n # that each sample appears on the indexes\n if forest.bootstrap:\n # Check the sample weights, initializing them to an\n # uniform distribution if they are not provided and,\n # if provided, copying them to properly weight the\n # samples according to the bootstrap indexes\n if sample_weight is None:\n curr_sample_weight = np.ones(n_samples, dtype=np.float64)\n else:\n curr_sample_weight = np.array(sample_weight, dtype=np.float64)\n # Obtain the sample weights\n # from to the bootstrap indexes\n indexes = _generate_sample_indexes(tree.random_state, n_samples,\n n_samples_bootstrap)\n sample_counts = np.bincount(indexes, minlength=n_samples)\n curr_sample_weight *= sample_counts\n # Fit the estimator using the sample weight\n # obtained from the bootstrap indexes\n tree.fit(X, Y, curr_sample_weight)\n # Otherwise, directly use the sample\n # weight provided in the fit method\n else:\n tree.fit(X, Y, sample_weight)\n\n # Return the built tree\n return tree", "def balanced_tree(ordered):\n bt = BinaryTree()\n\n add_range(bt, ordered, 0, len(ordered)-1)\n\n return bt", "def build_tree(self):\n stack = []\n self._handle_solo_node_case()\n while self.root_hash == None:\n if len(stack) >= 2 and stack[-1].height == stack[-2].height:\n mom = stack.pop()\n dad = stack.pop()\n child_hash = self.sha256Sum(mom.hash + dad.hash)\n child = self.Node(mom, dad, child_hash)\n self.node_table[child_hash] = child\n mom.child = child\n dad.child = child\n\n if child.height == self.max_height:\n self.root_hash = child.hash\n\n stack.append(child)\n elif len(self.leaves) > 0:\n leaf = self.leaves.pop()\n self.node_table[leaf.hash] = leaf\n stack.append(leaf)\n # Handle case where last 2 nodes do not match in height by \"graduating\"\n # last node\n else:\n stack[-1].height += 1\n self.is_built = True", "def __init__(self):\n self.tree = {}", "def make_drs_tree(self):\n pass", "def test_instantiate_six_nodes():\n input = [13, 42, 7, 3, 9, 99]\n six = BinaryTree(input)\n assert isinstance(six, BinaryTree)", "def test_binarytree_instantiate_list():\n input = [13, 42, 7]\n c = BinaryTree(input)\n assert isinstance(c, BinaryTree)", "def makeTree(node,baseName,baseAddress,nodes,parentNode,vars,isGenerated):\n \n if (isGenerated == None or isGenerated == False) and node.get('generate') is not None and node.get('generate') == 'true':\n generateSize = parseInt(node.get('generate_size'))\n generateAddressStep = parseInt(node.get('generate_address_step'))\n generateIdxVar = node.get('generate_idx_var')\n for i in range(0, generateSize):\n vars[generateIdxVar] = i\n makeTree(node, baseName, baseAddress + generateAddressStep * i, nodes, parentNode, vars, True)\n return\n newNode = Node()\n name = baseName\n if baseName != '': name += '.'\n if node.get('id') is not None:\n name += node.get('id')\n name = substituteVars(name, vars)\n newNode.name = name\n if node.get('description') is not None:\n newNode.description = node.get('description')\n address = baseAddress\n if node.get('address') is not None:\n address = baseAddress + parseInt(node.get('address'))\n newNode.address = address\n newNode.real_address = (address<<2)+0x64000000\n newNode.permission = node.get('permission')\n newNode.mask = parseInt(node.get('mask'))\n newNode.isModule = node.get('fw_is_module') is not None and node.get('fw_is_module') == 'true'\n if node.get('sw_monitor_warn_min_threshold') is not None:\n newNode.warn_min_value = node.get('sw_monitor_warn_min_threshold') \n if node.get('sw_monitor_error_min_threshold') is not None:\n newNode.error_min_value = node.get('sw_monitor_error_min_threshold') \n nodes[name] = newNode\n if parentNode is not None:\n parentNode.addChild(newNode)\n newNode.parent = parentNode\n newNode.level = parentNode.level+1\n for child in node:\n makeTree(child,name,address,nodes,newNode,vars,False)", "def copy_tree(t):\n return tree(label(t), [copy_tree(b) for b in branches(t)])", "def build():\n r = TreeNode(1)\n r.left = TreeNode(2)\n r.left.left = TreeNode(4)\n r.left.right = TreeNode(5)\n\n r.right = TreeNode(3)\n\n return r\n return TreeNode(3)", "def test_tree_binary_tree() -> None:\n t = generate_binary_tree_resources(4, 3)\n field(t, (\"root\", \"ds\", \"f1\")).identity = \"email\"\n field(t, (\"root.0.1.0\", \"ds.0.1.0\", \"f1\")).identity = \"ssn\"\n field(t, (\"root.1.1\", \"ds.1.1\", \"f1\")).identity = \"user_id\"\n assert generate_traversal({\"email\": \"X\"}, *t)\n assert generate_traversal({\"ssn\": \"X\"}, *t)\n assert generate_traversal({\"user_id\": \"X\"}, *t)", "def __init__(self, rootData=None, treetype=None):\n\t\tself._root = BTNode(rootData) if rootData else None\n\t\tself._size = 1 if rootData else 0\n\t\tself._treetype = treetype\n\n\t\t#Only for printing functionality\n\t\tself._depth = 1 if rootData else 0 #Root is at depth 1\n\t\tself._max_chars = len(str(rootData)) if rootData else 1 #max string length of data elements", "def build_tree(n, d, name=defaultname):\n return build_tree_helper(1, n, 1, d, name)", "def create_tree(self, tree):\n # print(self)\n if len(self.available_combinations()) > 1:\n comb1 = random.choice(self.available_combinations())\n comb2 = random.choice(self.available_combinations())\n\n if self.last_move == 5:\n next_move = 7\n else:\n next_move = 5\n\n # print(next_move)\n\n board1 = copy.deepcopy(self)\n board2 = copy.deepcopy(self)\n\n board1.board[comb1[0]][comb1[1]] = next_move\n board1.last_move = 7\n tree.insert_left(board1)\n board2.board[comb2[0]][comb2[1]] = next_move\n board2.last_move = 7\n tree.insert_right(board2)\n\n board1.create_tree(tree.get_left_child())\n board2.create_tree(tree.get_left_child())", "def __init__(self,tree):\n self._tree = tree", "def construct(lst):\n t = Tree()\n t.root = lst[0]\n for node in lst[1:]:\n if isinstance(node, list):\n t.nodes.append(construct(node))\n else:\n t.nodes.append(node)\n return t", "def build():\n\n root = Node(9)\n root.left = Node(6)\n root.left.left = Node(3)\n root.left.right = Node(8)\n root.left.right.left = Node(7)\n root.right = Node(14)\n root.right.left = Node(12)\n return root", "def construct_trees(self, nodes):\n trees = {}\n for root in tqdm.tqdm(nodes):\n # note that nodes is an uniquely ordered set\n # tree = {0: {0 : [nb_1, nb_2, ..., nb_k], nb_1: [0, ...]}, 1 : {1: [nb_1,...], nb_1 : [..]},...}\n trees[root] = {}\n trees[root][root] = [root]\n # print('test...', trees[root][root])\n used_nodes = set()\n # queue has the form as following queue([root] for root in tqdm.tqdm(nodes)\n # with each node, we construct the tree rooted at that node, denoted as queue(['root'])\n queue = collections.deque([root]) # deque([0]) -> deque([0,1])\n while len(queue) > 0:\n cur_node = queue.popleft()\n used_nodes.add(cur_node)\n for sub_node in self.graph[cur_node]:\n # sub_node is not ordered\n if sub_node not in used_nodes:\n trees[root][cur_node].append(sub_node)\n trees[root][sub_node] = [cur_node]\n queue.append(sub_node)\n used_nodes.add(sub_node)\n return trees", "def build_tree(rows: list) -> DecisionNode or Leaf:\n info_gain, question = get_best_split(rows)\n\n # If no info is gained just return a leaf node with remaining rows\n if info_gain == 0:\n return Leaf(rows)\n\n true_rows, false_rows = partition(rows, question)\n false_branch = build_tree(false_rows)\n true_branch = build_tree(true_rows)\n return DecisionNode(question, rows, true_branch, false_branch)", "def __init__(self, tree):\n self._tree = tree", "def __init__(self, tree):\n self._tree = tree", "def add_trees(t1, t2):\n \"*** YOUR CODE HERE ***\"\n if not t1:\n return t2 # Could replace with copy_tree(t2)\n if not t2:\n return t1 # Could replace with copy_tree(t1)\n new_entry = t1.root + t2.root\n t1_branches, t2_branches = list(t1.branches), list(t2.branches)\n length_t1, length_t2 = len(t1_branches), len(t2_branches)\n if length_t1 < length_t2:\n t1_branches += [None for _ in range(length_t1, length_t2)]\n elif length_t1 > length_t2:\n t2_branches += [None for _ in range(length_t2, length_t1)]\n return Tree(new_entry, [add_trees(branch1, branch2) for branch1, branch2 in zip(t1_branches, t2_branches)])", "def __build_test_model_children_tree_1(self) -> Model:\n self.model_builder.clear()\n\n r_a = SystemFile(\"a\", 1024, True)\n r_aa = SystemFile(\"aa\", 512, False)\n r_a.add_child(r_aa)\n r_ab = SystemFile(\"ab\", 512, False)\n r_a.add_child(r_ab)\n r_b = SystemFile(\"b\", 3090, True)\n r_ba = SystemFile(\"ba\", 2048, True)\n r_b.add_child(r_ba)\n r_baa = SystemFile(\"baa\", 2048, False)\n r_ba.add_child(r_baa)\n r_bb = SystemFile(\"bb\", 42, True) # only in remote\n r_b.add_child(r_bb)\n r_bba = SystemFile(\"bba\", 42, False) # only in remote\n r_bb.add_child(r_bba)\n r_bd = SystemFile(\"bd\", 1000, False)\n r_b.add_child(r_bd)\n r_c = SystemFile(\"c\", 1234, False) # only in remote\n r_d = SystemFile(\"d\", 5678, True) # only in remote\n r_da = SystemFile(\"da\", 5678, False) # only in remote\n r_d.add_child(r_da)\n\n l_a = SystemFile(\"a\", 1024, True)\n l_aa = SystemFile(\"aa\", 512, False)\n l_a.add_child(l_aa)\n l_ab = SystemFile(\"ab\", 512, False)\n l_a.add_child(l_ab)\n l_b = SystemFile(\"b\", 1611, True)\n l_ba = SystemFile(\"ba\", 512, True)\n l_b.add_child(l_ba)\n l_baa = SystemFile(\"baa\", 512, False)\n l_ba.add_child(l_baa)\n l_bc = SystemFile(\"bc\", 99, True) # only in local\n l_b.add_child(l_bc)\n l_bca = SystemFile(\"bca\", 99, False) # only in local\n l_bc.add_child(l_bca)\n l_bd = SystemFile(\"bd\", 1000, False)\n l_b.add_child(l_bd)\n\n s_b = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.RUNNING, \"b\", \"\")\n s_b.total_transfer_state = LftpJobStatus.TransferState(1611, 3090, 52, 10, 1000)\n s_b.add_active_file_transfer_state(\"ba/baa\", LftpJobStatus.TransferState(512, 2048, 25, 5, 500))\n s_c = LftpJobStatus(0, LftpJobStatus.Type.PGET, LftpJobStatus.State.QUEUED, \"c\", \"\")\n s_d = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.QUEUED, \"d\", \"\")\n\n self.model_builder.set_remote_files([r_a, r_b, r_c, r_d])\n self.model_builder.set_local_files([l_a, l_b])\n self.model_builder.set_lftp_statuses([s_b, s_c, s_d])\n return self.model_builder.build_model()", "def __build_binary_tree(self, root, node_id, json_data):\n new_node = BinaryTree(value=json_data[node_id][\"value\"], left=None, right=None)\n if json_data[node_id][\"left\"] != None:\n new_node.left = self.__build_binary_tree(new_node, json_data[node_id][\"left\"], json_data)\n if json_data[node_id][\"right\"] != None:\n new_node.right = self.__build_binary_tree(new_node, json_data[node_id][\"right\"], json_data)\n return new_node", "def create_tree(self):\n feature_indices = []\n for i in self.estimator.tree_.feature:\n n_features = self.n_features\n if self.n_features > 1 or (self.n_features == 1 and i >= 0):\n feature_indices.append([str(j) for j in range(n_features)][i])\n indentation = 1 if self.target_language in ['java', 'js',\n 'php', 'ruby'] else 0\n return self.create_branches(\n self.estimator.tree_.children_left,\n self.estimator.tree_.children_right,\n self.estimator.tree_.threshold,\n self.estimator.tree_.value,\n feature_indices, 0, indentation)", "def tree(n, names):\n \n t = Tree()\n t.populate(n, names_library=names.keys(), random_branches=True, support_range=(0.7, 1))\n t.write(format=2, outfile='fake.tree.newick', dist_formatter='%0.4g', support_formatter='%0.4g')\n with open('fake.tree.notation.tsv', 'w') as o:\n o.write('#TaxaID\\tName\\n')\n o.writelines('{}\\t{}\\n'.format(k, v) for k, v in names.items())", "def allocTree(tree_ptr):\n pybtlib.allocTree.argtypes = [ctypes.POINTER(ctypes.POINTER(Tree))]\n pybtlib.allocTree.restype = None\n return pybtlib.allocTree(ctypes.byref(tree_ptr))", "def _build_tree(self, X, y, label, feature_names, depth, sample_weights=None):\n mytree = dict()\n # YOUR CODE HERE\n # TODO: Use `_choose_best_feature` to find the best feature to split the X. Then use `_split_dataset` to\n # get subtrees.\n # Hint: You may find `np.unique` is useful.\n # begin answer\n #1. no feature 2. all lables are the same 3. depth exceed 4. X is too small\n if len(feature_names)==0 or len(np.unique(y))==1 or depth >= self.max_depth or len(X) <= self.min_samples_leaf: \n return self._leaf_calculation(y, label, sample_weights)\n best_feature_idx, best_feature_val=self._choose_best_feature(X, y, label, sample_weights)\n best_feature_name = feature_names[best_feature_idx]\n feature_names=feature_names[:]\n feature_names.remove(best_feature_name)\n mytree={best_feature_name:{}}\n sub1_X, sub1_y, label1, sub1_sample_weights, sub2_X, sub2_y, label2, sub2_sample_weights = self._split_dataset(X, y, label, best_feature_idx, best_feature_val, sample_weights)\n mytree[best_feature_name][(best_feature_val, True)]=self._build_tree(sub1_X, sub1_y, label1, feature_names, depth+1, sub1_sample_weights)\n mytree[best_feature_name][(best_feature_val, False)]=self._build_tree(sub2_X, sub2_y, label2, feature_names, depth+1, sub2_sample_weights)\n # end answer\n return mytree", "def test01(self):\n\n t = tree(\"a\", [tree(\"b\"), tree(\"c\")]);\n self.assertTrue(self.isTree(t))", "def test_binarytree_instantiate_given(given_list):\n assert isinstance(given_list, BinaryTree)", "def make_tree(fname: str):\n\n tree = PDDL_Tree.create(fname)\n tree.print_tree()", "def __init__(self): # 用dict模拟字典树即可\n self.root = {}", "def __init__(self, value: T):\n self.value = value\n self.children: List[Tree] = []", "def build_tree(self):\n active = self.get_active()\n family = self.dbstate.db.get_family_from_handle(active)\n self.goto_handle(handle=family)", "def retrieveTrees(c):\n\n all_nodes = dict()\n root_nodes = list()\n c.execute('''SELECT id, parent_id, title FROM node''')\n data_db = c.fetchall()\n \n # Initialize nodes list\n for data_line in data_db:\n db_child_id = data_line[0]\n db_parent_id = data_line[1]\n child_title = data_line[2]\n \n node = Node(db_child_id, child_title)\n all_nodes[db_child_id] = node\n if not db_parent_id:\n root_nodes.append(node)\n \n # Create relations\n for data_line in data_db:\n db_child_id = data_line[0]\n db_parent_id = data_line[1]\n if db_parent_id:\n all_nodes[db_parent_id].append(all_nodes[db_child_id])\n \n return (all_nodes, root_nodes,)", "def test_binarytree_instantiate_tuple():\n input = (13, 42, 7)\n d = BinaryTree(input)\n assert isinstance(d, BinaryTree)", "def make_tree(arr):\n\n for i in range(len(arr)):\n arr, val = mid(arr)\n\n if i == 0: \n binary = BinaryNode(val)\n\n else:\n binary.insert(val)\n\n return binary", "def __init__(self, *args):\n _snap.TFltTree_swiginit(self, _snap.new_TFltTree(*args))", "def create_tree():\n basey = 960/2\n basex = 600/2\n newstruct = defaultdict(dict)\n homenw = Utility.read_configuration(config=\"HOME_NETWORK\")\n alive, _ = srp(Ether(dst=\"ff:ff:ff:ff:ff:ff\")/ARP(pdst=homenw),\\\n timeout=2, verbose=0)\n\n for idx in range(0, len(alive)):\n try:\n hname, _, _ = socket.gethostbyaddr(alive[idx][1].psrc)\n hostname = hname.split(\".\")[0]\n except:\n hostname = alive[idx][1].psrc\n\n mac = alive[idx][1].hwsrc\n ipaddr = alive[idx][1].psrc\n xcoord = random.randint(0, basex)\n ycoord = random.randint(0, basey)\n\n newstruct[hostname]['ip'] = ipaddr\n newstruct[hostname]['mac'] = mac\n newstruct[hostname]['hostname'] = hostname\n newstruct[hostname]['x'] = xcoord\n newstruct[hostname]['y'] = ycoord\n\n if not ipaddr.endswith('.1'):\n newstruct[hostname]['gateway'] = \"N\"\n else:\n newstruct[hostname]['gateway'] = \"Y\"\n newstruct[hostname]['x'] = basex + 50\n newstruct[hostname]['y'] = basey + 50\n\n\n #---------------------------------#\n # New implementation with sqlite3 #\n #---------------------------------#\n HomeNetwork.add_update_rows(newstruct, init=True)", "def __init__(self, key, tree=None, parent=None, left=None, right=None):\n super().__init__(key)\n\n self.tree = tree if isinstance(tree, BinaryTree) else None\n self.parent = BinaryNode.or_none(parent)\n self.left = BinaryNode.or_none(left)\n self.right = BinaryNode.or_none(right)", "def _createTree(dataSet, impurity_crit, min_impurity_decrease, min_samples_split):\n\t\tif type(dataSet).__name__ != 'ndarray':\n\t\t\traise TypeError('input must be a numpy array.')\n\n\t\ttreenode = TreeNode()\n\t\tfeat_ind, val = DecisionTree._bestFeat2split(dataSet, impurity_crit, \n\t\t\t\t\t\tmin_impurity_decrease, min_samples_split)\n\t\tif feat_ind is None:\n\t\t\ttreenode.value = val\n\t\t\treturn treenode\n\t\ttreenode.cut_off = cut_off(feat_ind, val)\n\t\t\n\t\tD1, D2 = DecisionTree._binarySplit(dataSet, *treenode.cut_off)\n\n\t\ttreenode.left = DecisionTree._createTree(D1, impurity_crit, \n\t\t\t\t\t\tmin_impurity_decrease, min_samples_split)\n\t\ttreenode.right = DecisionTree._createTree(D2, impurity_crit, \n\t\t\t\t\t\tmin_impurity_decrease, min_samples_split)\n\t\treturn treenode", "def bulid_binary_tree(ast):\n def is_terminal(node):\n if 'children' in node.keys() and len(node['children']) > 0:\n return False\n return True\n\n brother_map = {0: -1}\n for index, node in enumerate(ast): # 顺序遍历每个AST中的node\n if not isinstance(node, dict) and node == 0: # 删除AST中最后的0标识\n del ast[index]\n break\n\n if is_terminal(node):\n # 表示该node为terminal\n node['isTerminal'] = True\n continue\n else:\n # 注: 存在四种token,有children list但是list的长度为0,暂时将其归为terminal\n node['left'] = -1\n node['right'] = brother_map.get(node['id'], -1) # 只为non-terminal node构建左右child\n node['isTerminal'] = False\n add_two_bits_info(ast, node, brother_map) # 向每个节点添加两bit的额外信息\n child_list = node['children']\n\n first_nt_i = None\n temp_nt_node_id = None\n for i, child_index in enumerate(child_list):\n # 找到该节点第一个non-terminal child node作为该node的left node\n if not is_terminal(ast[child_index]):\n node['left'] = child_index\n first_nt_i = i\n temp_nt_node_id = child_list[first_nt_i]\n break\n\n if first_nt_i != None:\n # 说明该node有non-terminal left child,所以为这个nt left child构建brother map\n assert isinstance(first_nt_i, int) \\\n and first_nt_i < len(child_list) \\\n and isinstance(temp_nt_node_id, int)\n\n for index in range(first_nt_i+1, len(child_list)):\n next_node_id = child_list[index]\n # 为该node的所有non-terminal children构建non-terminal right sibling\n if not is_terminal(ast[next_node_id]):\n #print('nt',next_node_id)\n brother_map[temp_nt_node_id] = next_node_id\n temp_nt_node_id = next_node_id\n #print(brother_map)\n\n # 将转化生成的binary tree添加节点,组成完全二叉树\n if (node['left'] == -1) and (node['right'] != -1):\n node['left'] = 'PAD_EMPTY'\n if (node['left'] != -1) and(node['right'] == -1):\n node['right'] = 'PAD_EMPTY'\n\n return ast", "def build_decision_tree():\n\n decision_tree_root = None\n decision_tree_root = DecisionNode(None,None,lambda feature:feature[0]==1)\n decision_tree_root.left = DecisionNode(None,None,None,1)\n decision_tree_root.right = DecisionNode(None,None,lambda feature:feature[3]==1)\n decision_tree_root.right.left = DecisionNode(None,None,lambda feature:feature[1]==0)\n decision_tree_root.right.right = DecisionNode(None,None,lambda feature:feature[2]==1)\n decision_tree_root.right.left.left = DecisionNode(None,None,None,1)\n decision_tree_root.right.left.right = DecisionNode(None,None,None,0)\n decision_tree_root.right.right.left = DecisionNode(None,None,None,0)\n decision_tree_root.right.right.right = DecisionNode(None,None,None,1)\n return decision_tree_root", "def build_tree(data, B, serializer):\n # root will have a block position of 0\n if serializer.read_mode:\n # if we're already in read mode, then the file's already been built, so\n # return the last node in file\n # TODO: allow negative indexing for root node in serializer\n return serializer.loads(-1)\n\n # sort the fieldnames in each data item before doing field checks\n # last element of each data item is now a unique id for the record\n [data_item.sort(key=lambda field: field[0]) for data_item in data]\n data.sort(key=lambda data_item: data_item[0][1])\n data = [data_item + [i] for i, data_item in enumerate(data)]\n\n # If the sequence of keys is not the same in every other data item\n # as well, you fucked up. We make it immutable just to be safe.\n seq = tuple(d[0] for d in data[0][:-1])\n for item in data:\n if tuple(d[0] for d in item[:-1]) != seq:\n raise Exception('You fucked up. The sequence of keys is \\\n not the same in every data item.')\n\n # Now that we've got leaves, let's build their parents, recursively.\n build_upwards(data, 0, B, RangeLeaf, serializer)\n\n # should be done serializing\n serializer.flush()\n # and output the root\n return serializer.loads(-1)", "def build_tree(keys, ROOT_TABLE, i, j):\n\n # index rootu aktualneho podstromu (so suradnicami [i, j] v ROOT_TABLE)\n root_id = ROOT_TABLE.at[i, j]\n # novy podstrom\n search_tree = Tree(keys.get(root_id))\n\n # rekurzivne do laveho podstromu\n if i < root_id:\n search_tree.set_left_subtree(\n build_tree(keys, ROOT_TABLE, i, root_id - 1)) # root podstromu v bunke vlavo\n\n # rekurzivne do praveho podstromu\n if j > root_id:\n search_tree.set_right_subtree(\n build_tree(keys, ROOT_TABLE, root_id + 1, j)) # root podstromu v bunke dole\n\n return search_tree", "def generate_tree_general(node_lst, root_index):\n\n def generate_huffman(note):\n \"\"\" Return a new tree based on the given ReadNode node.\n\n @param ReadNode note: a given ReadNode\n @rtype: HuffmanNode\n\n >>> t = generate_huffman(ReadNode(0, 5, 0, 7))\n >>> t\n HuffmanNode(None, HuffmanNode(5, None, None), HuffmanNode(7, None, None))\n >>> t = generate_huffman(ReadNode(1, 1, 1, 0))\n >>> t\n HuffmanNode(None, HuffmanNode(None, None, None), HuffmanNode(None, None, None))\n >>> t.left.number\n 1\n >>> t.right.number\n 0\n \"\"\"\n\n if note.l_type == 0 and note.r_type == 0:\n return HuffmanNode(None, HuffmanNode(note.l_data),\n HuffmanNode(note.r_data))\n elif note.l_type == 0 and note.r_type == 1:\n k = HuffmanNode(None, HuffmanNode(note.l_data), HuffmanNode())\n k.right.number = note.r_data\n return k\n elif note.l_type == 1 and note.r_type == 0:\n k = HuffmanNode(None, HuffmanNode(), HuffmanNode(note.r_data))\n k.left.number = note.l_data\n return k\n else:\n k = HuffmanNode(None, HuffmanNode(), HuffmanNode())\n k.left.number, k.right.number = note.l_data, note.r_data\n return k\n\n def combine_trees(trees_, index):\n \"\"\" Return a new tree based on the list of frame trees and take the\n HuffmanNode tree at int index as the root tree.\n\n @param list[HuffmanNode] trees_:\n @param int index:\n @rtype: HuffmanNode\n\n >>> t = [ReadNode(0, 5, 0, 7), ReadNode(0, 10, 0, 12), ReadNode(1, 1, 1, 0)]\n >>> huff_list = [generate_huffman(x) for x in t]\n >>> a = combine_trees(huff_list, 2)\n >>> a == HuffmanNode(None, HuffmanNode(None, HuffmanNode(10, None, None), HuffmanNode(12, None, None)), HuffmanNode(None, HuffmanNode(5, None, None), HuffmanNode(7, None, None)))\n True\n >>> a.left.number\n 1\n >>> a.right.number\n 0\n >>> combine_trees(huff_list, 0)\n HuffmanNode(None, HuffmanNode(5, None, None), HuffmanNode(7, None, None))\n \"\"\"\n\n root = trees_[index]\n if root.left.number is None and root.right.number is None:\n return root\n elif root.left.number is None and root.right.number is not None:\n right = combine_trees(trees_, root.right.number)\n number = root.right.number\n root.right = right\n root.right.number = number\n return root\n elif root.left.number is not None and root.right.number is None:\n left = combine_trees(trees_, root.left.number)\n number = root.left.number\n root.left = left\n root.left.number = number\n return root\n else:\n left = combine_trees(trees_, root.left.number)\n num_l = root.left.number\n right = combine_trees(trees_, root.right.number)\n num_r = root.right.number\n root.left = left\n root.right = right\n root.left.number = num_l\n root.right.number = num_r\n return root\n\n trees = []\n for node in node_lst:\n trees.append(generate_huffman(node))\n root_tree = combine_trees(trees, root_index)\n\n return root_tree", "def bst_wiki():\n from bbst import Bst\n tree = Bst([6, 7, 9, 8, 2, 1, 4, 3, 5])\n return tree", "def tree(branchLen):\n if branchLen > 5:\n t.backward(branchLen)\n t.right(20)\n tree(branchLen-16,t)\n t.left(40)\n tree(branchLen-16,t)\n t.right(20)\n t.forward(branchLen)", "def _tree():\n return collections.defaultdict(_tree)", "def binaryTree(r):\r\n return [r, [], []]", "def test_binarytree_instantiate_single_value():\n b = BinaryTree(42)\n assert isinstance(b, BinaryTree)", "def __init__(self, tree):\n connector = db\n if isinstance(tree, list):\n for collection in tree:\n connector = connector[collection]\n else:\n connector = connector[tree]\n self.table = connector\n self.attributs = {}", "def __init__(self, data: typing.Any = None, left: 'TreeNode' = None, right: 'TreeNode' = None) -> None:\n self.data = data\n self.left = left\n self.right = right", "def build_tree(elements):\n print(\"Building tree with these elements:\",elements)\n root = BinarySearchTreeNode(elements[0])\n\n for i in range(1, len(elements)):\n root.add_child(elements[i])\n\n return root", "def create_tree(data_set, labels):\n labels = copy.copy(labels)\n class_list = [ eg[-1] for eg in data_set]\n # if all classes are same\n if class_list.count(class_list[0]) == len(class_list):\n return class_list[0]\n # only have class feature\n if len(data_set[0]) == 1:\n return majority_cnt(class_list)\n best_feat = choose_best_feature(data_set)\n best_feat_cls = labels[best_feat]\n node = {best_feat_cls: {}}\n del(labels[best_feat])\n feat_values = [eg[best_feat] for eg in data_set]\n unique_values = set(feat_values)\n for value in unique_values:\n sub_cls = labels[:]\n sub_ds = splite_dataset(data_set, best_feat, value)\n node[best_feat_cls][value] = create_tree(sub_ds, sub_cls)\n\n return node", "def build_graph(tree):\n # boundary case: trees with no children\n graph = nx.DiGraph()\n if isinstance(tree, (float, int)):\n graph.add_node(str(np.round(tree, 2)))\n return graph\n\n parent_name = LABELS_MAPPING.get(tree.name, tree.name)\n graph.add_node(parent_name)\n if len(tree) == 0:\n return graph\n\n # process generic trees\n labels = {parent_name: parent_name}\n _build_graph(tree, graph, parent_name, labels)\n\n return graph, labels", "def as_binary_search_tree(self):\n root = self.get_root()\n bst = BinarySearchTree(root)\n\n for patient in self.dataset:\n bst.insert_node(patient)\n\n return bst", "def make_nltk_tree(derivation):\n\td = defaultdict(None, ((r.lhs, r.rhs) for r in derivation))\n\t\n\tdef make_tree(lhs):\n\t\treturn Tree(lhs[1:-1], (child if child not in d else make_tree(child) for child in d[lhs]))\n\t\n\treturn make_tree(derivation[0].lhs)", "def makeTree(self):\n return makeTree(self.events,self.outTree)", "def __init__(self, container=[]):\n # Initialize empty tree.\n self.root = None\n # Insert every item from container.\n for item in container:\n self.insert(item)", "def __init__(self, dim, points, parent, root, rec):\n import numpy as np\n \n ##Assumption: 0 - left and down, 1 - right and up, 0 - x, 1 - y \n self.node = root\n self.parent = parent\n self.points = points\n self.children = [None, None]\n self.childlist = [[], []]\n self.rec = rec\n \n \n if(parent == None):\n self.depth = 0\n else:\n self.depth = self.parent.depth + 1\n \n self.dim = self.depth%dim\n \n if(self.parent == None): #ROOT - 0, BRANCH - 1, LEAF - 2\n self.type = \"ROOT\"\n \n elif(len(self.points) == 0):\n self.type = \"LEAF\"\n \n else:\n self.type = \"BRANCH\"\n \n self.update_rec()\n self.subdivide(dim)", "def test_minimal_tree_creation():\n t = Tree(None)\n\n assert t.data is None\n assert t.parent is None\n assert len(t) == 0", "def three():\n from bbst import Bst\n tree = Bst([2, 1, 3])\n return tree", "def __init__(self):\n self.root = TreeNode(\"\")", "def __init__(self):\n self.root = TreeNode(\"\")", "def make_tree(self, l):\n\t\tfor el in l:\n\t\t\tself.insert(el)", "def BalancedTree(r, h):\n import networkx\n return Graph(networkx.balanced_tree(r, h), name=\"Balanced tree\")", "def make_dtrees_table(tablename, input_basename, output_basename):\n data = read_dhalo_trees(input_basename)\n add_depth_first_index(data) \n write_sql_server_native_file(tablename, data, \n output_basename+\".dat\", \n output_basename+\".sql\")", "def BinaryTree(root):\n return [root, [], []]", "def __init__(self, start_tree=None) -> None:\n self.root = None\n\n # populate BST with initial values (if provided)\n # before using this feature, implement add() method\n if start_tree is not None:\n for value in start_tree:\n self.add(value)", "def __init__(self, start_tree=None) -> None:\n self.root = None\n\n # populate BST with initial values (if provided)\n # before using this feature, implement add() method\n if start_tree is not None:\n for value in start_tree:\n self.add(value)", "def __init__(self):\n self.left = None\n self.right = None\n self.depth = 0\n self.val = None\n self.id = None", "def build_taxdump_tree(taxdump):\n # create the tree from root\n tree = TreeNode('1')\n\n # iteratively attach child nodes to parent node\n def iter_node(node):\n for cid in taxdump[node.name]['children']:\n child = TreeNode(cid)\n node.extend([child])\n iter_node(child)\n\n iter_node(tree)\n return tree", "def __call__(self, params: Mapping[str, Any],\n batch: Mapping[str, jnp.ndarray]) -> PyTreeDef:\n ...", "def tree(label, branches=[]):\n for branch in branches:\n assert is_tree(branch), 'branches must be trees'\n return [label] + list(branches)" ]
[ "0.68108326", "0.6772506", "0.67162484", "0.67155635", "0.67095953", "0.6641939", "0.66419315", "0.66286445", "0.658514", "0.6475259", "0.64652383", "0.6458261", "0.6430485", "0.6366659", "0.634264", "0.631714", "0.6273454", "0.62638414", "0.6260716", "0.62589467", "0.6194391", "0.61930954", "0.61803603", "0.6160031", "0.6121936", "0.61118144", "0.60784984", "0.6063105", "0.60586375", "0.6038033", "0.60177994", "0.599712", "0.5992746", "0.598232", "0.5945722", "0.5945295", "0.5941224", "0.59395504", "0.59332573", "0.5923436", "0.59191453", "0.5917215", "0.589674", "0.5889437", "0.5886443", "0.5886443", "0.5885216", "0.5865268", "0.58578837", "0.58516407", "0.5851052", "0.58318627", "0.58287436", "0.58179224", "0.58125323", "0.580698", "0.58055043", "0.58010375", "0.5798652", "0.57759875", "0.5771", "0.57527006", "0.5743479", "0.57405114", "0.5731035", "0.5722029", "0.57184553", "0.5718229", "0.5694438", "0.56811273", "0.5675255", "0.56730014", "0.56723344", "0.5671882", "0.567142", "0.566888", "0.5667347", "0.565869", "0.5651706", "0.5646282", "0.5646003", "0.5644754", "0.562891", "0.5627918", "0.561881", "0.5617977", "0.56134975", "0.56133586", "0.561209", "0.561209", "0.56053454", "0.5603103", "0.560082", "0.56005436", "0.55959964", "0.55959964", "0.5591873", "0.55801344", "0.55785793", "0.55774415" ]
0.5919498
40
load the data samples that are used
def load(self,sigData,bkgData): self.sigData = sigData self.bkgData = bkgData self.nVars = sigData.shape[1] self.nSig = sigData.shape[0] self.nBkg = bkgData.shape[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_sample(self):\n\n self.load_images(self.folder + \"/sampleSet.txt\")\n self.load_traces(self.folder + \"/sampleLabel.txt\")", "def _preload_all_samples(self):\n if self.mode in ['train_noval', 'train_with_val']:\n\n self._images_train, self._labels_train = [], []\n desc = \"Loading train image pairs & flows\"\n with tqdm(total=len(self._img_trn_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_trn_path):\n pbar.update(1)\n label_path = self._lbl_trn_path[n]\n image, label = self._load_sample(image_path, label_path)\n self._labels_train.append(label)\n self._images_train.append(image)\n\n if self.mode == 'train_with_val':\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n if self.opts['tb_test_imgs'] is True:\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))\n\n elif self.mode in ['val', 'val_notrain']:\n\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n elif self.mode == 'test':\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))", "def samples(self):\n pass", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def load_data(self, f): \n self.sampling = True\n self.reads = np.load(f)\n self.total = self.reads.shape[0]", "def _fetch_data(self, samples):\n pass", "def load_all(): \n training_data = dict() \n for i in range(7):\n training_data[i+1] = load_data(i+1) \n\n return training_data", "def _load_training_data(self):\n self._save_training_data()", "def load_data_pickle(self, load_full=False):\n self.train = pd.read_pickle('../input/train_mod.pkl')\n self.test = pd.read_pickle('../input/test_mod.pkl')\n if load_full:\n self.train_full = pd.read_pickle('../input/train_full_mod.pkl')", "def load_data(self):", "def create_samples(self):\n self._samples = self.load_samples()\n self.modify_samples()", "def loadtrainData_oversampling():\n pre_x = []\n pre_y = []\n fileIn = open(PATH + 'traindata_Subtask4.txt')\n for line in fileIn.readlines():\n lineArr = line.strip().split()\n pre_x.append([float(lineArr[i]) for i in range(len(lineArr) - 1)])\n pre_y.append(int(lineArr[-1]))\n ros = RandomOverSampler(random_state=0)\n sampl_x, sampl_y = ros.fit_sample(pre_x, pre_y)\n return np.mat(sampl_x), np.mat(sampl_y).transpose()", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def load_data(self) -> None:", "def _init_al_dataset(self):\n\n self._init_dataset()\n\n train_dataset = self.datasets['train']\n\n dataset_size = len(train_dataset)\n self.budget = math.ceil(self.budget_frac*dataset_size)\n Sampler.__init__(self, config, self.budget) # TODO: Weird place to initialise this\n\n all_indices = set(np.arange(dataset_size))\n k_initial = math.ceil(len(all_indices)*self.initial_budget_frac)\n initial_indices = random.sample(list(all_indices), k=k_initial)\n\n sampler_init = data.sampler.SubsetRandomSampler(initial_indices) # need to sample from training dataset\n\n self.labelled_dataloader = data.DataLoader(train_dataset, sampler=sampler_init, batch_size=self.batch_size, drop_last=True)\n self.val_dataloader = data.DataLoader(self.datasets['valid'], batch_size=self.batch_size, drop_last=False)\n self.test_dataloader = data.DataLoader(self.datasets['test'], batch_size=self.batch_size, drop_last=False)\n\n return all_indices, initial_indices", "def load_data():\r\n global labelNames\r\n print(\"Loading Data...\")\r\n\r\n fnpath = \"rawdata\\\\cifar-10-batches-py\"\r\n fnprefix = 'data_batch_'\r\n fnlblnames = 'batches.meta'\r\n fntstbatch = 'test_batch'\r\n\r\n labelNames = unpickle(path.join(fnpath, fnlblnames))\r\n label_names = []\r\n for label in labelNames['label_names']:\r\n label_names.append(\"\".join(map(chr, label)))\r\n labelNames['label_names'] = label_names\r\n\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fntstbatch)))\r\n for n in range(1, 6):\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fnprefix + str(n))))", "def load_data(batch_size=batch_size):\n trainset = LibriSpeechDataset(training_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds))\n testset = LibriSpeechDataset(validation_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds), stochastic=False)\n\n train_loader = DataLoader(trainset, batch_size=batch_size, num_workers=1, shuffle=True, drop_last=True)\n test_loader = DataLoader(testset, batch_size=1, num_workers=1, drop_last=True)\n\n return train_loader, test_loader", "def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test", "def load_data():\n # Load in data\n sample_frame = energy_connection.sample_series('energy_readings')\n # TODO: Rooms/QL Extract\n sample_frame = energy_connection.sample_series('external_readings', append_frame=sample_frame)\n\n # To object\n sample = TimeSeriesSample(sample_frame, 'time')\n\n return sample", "def load_train_dataset(data_dir, word_list, silence_percentage, noise_percentage):\n validation_percentage, testing_percentage = 0.1, 0.1\n temp_list = []\n\n #wav_lists = os.path.join(data_dir, *, '*.wav')\n for word_l in word_list:\n #wav_word_list = os.path.join(data_dir, word_l)\n wav_list = os.path.join(data_dir, word_l, '*.wav')\n for file in gfile.Glob(wav_list):\n _, word = os.path.split(os.path.dirname(file))\n word = word.lower()\n\n if which_set(file, validation_percentage, testing_percentage) == 'training':\n rate, signal = load_wav(file);\n signal_and_noise = add_noise(signal, rate, 1, os.path.join(data_dir,'_background_noise_'), noise_percentage)\n \n feature = psf.mfcc(signal_and_noise, rate, nfilt = 40,numcep = 12, appendEnergy = False)\n #if feature.shape[0] != 99:\n # print(str(len(signal)) + \" \" + str(rate))\n temp_list.append({'feature': feature, 'label': word_l})\n\n # hotspot\n #silence = len(X_train) * silence_percentage\n silence = int(math.ceil(len(temp_list) * silence_percentage / 100))\n for _ in range(silence):\n temp_list.append({'feature': 0, 'label': \"_silence_\"})\n\n random.shuffle(temp_list)\n\n X_train = np.zeros((len(temp_list), 99, 12))\n Y_train = np.zeros( len(temp_list) )\n\n for i in range(len(X_train)):\n X_train[i] = temp_list[i]['feature']\n Y_train[i] = word2index(temp_list[i]['label'])\n\n return X_train, Y_train", "def load_data():\n directories=[\"./track1/\",\n \"./track1_recovery/\",\n \"./track2/\",\n \"./track1_reverse/\",\n \"./track2_reverse/\",#Additional data for model built on top of lenet.h5\n \"./track2_recovery/\",#Additions data for model built on top of lenet.h5\n ]\n lines=[]\n for directory in directories:\n with open(directory+\"driving_log.csv\") as csvfile:\n reader=csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n train_samples, validation_samples = train_test_split(lines, test_size=0.2)\n return train_samples, validation_samples", "def load_openml_data():\n datasets = dict()\n files = os.listdir(_DATA_DIRECTORY.value)\n for file_name in files:\n with open(_DATA_DIRECTORY.value + file_name, \"r\") as ff:\n task = np.loadtxt(ff, delimiter=\",\", skiprows=1)\n np.random.shuffle(task)\n datasets[file_name] = [task]\n return datasets, files", "def _load_data(self):\n if self._name in BALANCE_DATASET:\n _loader = dataset_loaders[self._name]\n xnp, y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.y_ts = y[test_idx]\n\n else:\n _loader = dataset_loaders[self._name]\n xnp, xp, y = _loader()\n # self.xnp, self.xp, self.y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.xp_tr = xp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.xp_ts = xp[test_idx]\n self.y_ts = y[test_idx]", "def dataload():\n\t\n\tglobal A, B, fnA, fnB, lPcnA, lPcnB\n\t\n\tdwd = os.getcwd() # Data WD\n\t\t\n\t# First sample A is loaded. This is the \"calibrating\" sample.\n\t# In this case it is the OGLE III LMC small amplitude RGs.\n\t\n\tfnA = '/LMC-CalSample-cleaned_2.fits'\n\tA = Table.read(dwd+fnA)\n\n\t# Then sample B is loaded. For comparison/testing purposes, this is\n\t# again the OGLE III LMC SARGs.\n\t\n\tfnB = '/LMC-CalSample-cleaned_2.fits'\n\tB = Table.read(dwd+fnB)\n\t\n\t\"\"\" Fix tables so only the stars with all three good periods are \n\tconsidered. \"\"\"\n\t\n\tlPcnA = get_logPcn(A)\n\tlPcnB = get_logPcn(B)\n\t\n\tfor cn in lPcnA:\n\t\tA = A[A[cn]>0]\n\tfor cn in lPcnB:\n\t\tB = B[B[cn]>0]", "def load_data(self) -> tuple:\n label_num = {}\n data_set = pathlib.Path(self.path)\n data = []\n\n # create the label lookup dict for verifcation later\n for i, v in enumerate(data_set.iterdir()):\n label_num[v.name] = i\n self.labels[i] = v.name\n # end\n\n # read images\n for img_path in data_set.rglob(\"*.jpg\"):\n lbl = label_num[str(img_path.parent.stem)]\n img = cv2.imread(str(img_path))\n img = cv2.resize(img, self.dims, interpolation=cv2.INTER_AREA)\n\n # flatten RGB data into a vector\n # NOTE: NOT ACTUALLY NECESSARY! \n img.flatten()\n\n # label the sample and append to temp data list\n sample = np.append(lbl, img)\n data.append(sample)\n # end\n\n # partition and package the data (*_ ensures safe unpacking)\n train, test, validate, *_ = Data.partition(data, self.parts, 0.7, 0.2)\n self.train = Data(train)\n self.test = Data(test)\n self.validate = Data(validate)", "def get_data(self):\n if self.random_seeds: \n self._validate_random_seeds()\n seed_iter = list(map(iter,self.random_seeds))\n nsamples = len(self.random_seeds[0])\n else:\n seed_iter = None\n nsamples = self.numsamples\n self._set_meta_features()\n for _ in tqdm(range(nsamples)):\n self._update_meta_features(seed_iter)\n self._sample()\n yield self._extract_features()", "def load_data(self,split='train'):\n raise NotImplementedError", "def _load_dataset(self, path):\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tX_train = np.load(\"data/X_train.npy\")\n\t\t\t\tX_val = np.load(\"data/X_val.npy\")\n\t\t\t\tY_train = np.load(\"data/Y_train.npy\")\n\t\t\t\tY_val = np.load(\"data/Y_val.npy\")\n\t\t\t\tbreak\n\n\t\t\texcept FileNotFoundError:\n\n\t\t\t\tdata_temp = np.zeros((50000,64,64,3))\n\t\t\t\tlabel_temp = []\n\n\t\t\t\tfor i in range(5):\n\n\t\t\t\t\tfile = path + str(i+1)\n\t\t\t\t\twith open(file, 'rb') as fo:\n\t\t\t\t\t\ttemp_element = pickle.load(fo, encoding='bytes')\n\n\t\t\t\t\ttemp_data = temp_element[b'data']\n\t\t\t\t\tlabel_temp.extend(temp_element[b'labels'])\n\n\t\t\t\t\tfor j in range(10000):\n\t\t\t\t\t\tdata_temp[j+(i*10000)] = self._reshape(temp_data[j])\n\n\t\t\t\tlabel_temp = np.eye(10)[np.array(label_temp)]\n\n\t\t\t\tnp.random.seed(123)\n\t\t\t\tpermutations = list(np.random.permutation(50000))\n\t\t\t\tX = data_temp[permutations, :, : , :] \n\t\t\t\tY = label_temp[permutations, :]\n\t\t\t\tX_train = X[0:40000, :, :, :] \n\t\t\t\tY_train = Y[0:40000, :]\n\t\t\t\tX_val = X[40000:50000, :, :, :] \n\t\t\t\tY_val = Y[40000:50000, :]\n\n\t\t\t\tnp.save(\"./data/X_train\", X_train)\n\t\t\t\tnp.save(\"./data/X_val\", X_val)\n\t\t\t\tnp.save(\"./data/Y_train\", Y_train)\n\t\t\t\tnp.save(\"./data/Y_val\", Y_val)\n\t\t\t\tbreak\n\n\t\treturn X_train, X_val, Y_train, Y_val", "def load_train_data():\r\n X_train = np.load('data/train/X_train.npy')\r\n scaling_train = np.load('data/train/scaling_train.npy')\r\n ids_train = np.load('data/train/ids_train.npy')\r\n y_train = np.load('data/train/y_train.npy')\r\n\r\n seed = np.random.randint(1, 10e6)\r\n np.random.seed(seed)\r\n np.random.shuffle(X_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(scaling_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(ids_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(y_train)\r\n\r\n return X_train, scaling_train, ids_train, y_train", "def load_all(self):\n if os.path.isfile(self.vocab_path):\n self.vocab_processor = self.load_vocab()\n else:\n self.vocab_processor = self.train_vocab()\n if self.data_path:\n self.x, self.y = self.load_data(self.need_shuffle)\n print(\"Max document length: {}\".format(self.max_doc))", "def loadtrainData_undersampling():\n train = []\n fileIn = open(PATH + 'traindata_Subtask4.txt')\n for line in fileIn.readlines():\n lineArr = line.strip().split()\n train.append([float(lineArr[i]) for i in range(len(lineArr))])\n\n pos = []\n neg = []\n for i in train:\n if i[-1] == 1.0:\n pos.append(i)\n else:\n neg.append(i)\n slice1 = random.sample(neg, len(pos))\n data = pos + slice1\n train_x = []\n train_y = []\n y = []\n for line in data:\n train_x.append([float(line[i]) for i in range(len(line) - 1)])\n y.append([int(line[-1])])\n for i in range(len(y)):\n train_y.append(y[i][0])\n return np.mat(train_x), np.mat(train_y).transpose()", "def load_data(self):\n self.data = self.read_var(self.datavar)\n self.test_shape(self.datavar, self.data.shape, 2)", "def load_cleaned_data(self):\n try:\n self.train = pd.read_pickle('../input/train_clean.pkl')\n self.test = pd.read_pickle('../input/test_clean.pkl')\n except FileNotFoundError:\n self.load_raw_data()", "def _load_processed_data(self):\n with open(os.path.join(self._data_root_path, self._processed_train_data_file_name),\n 'r') as f:\n train_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._processed_dev_data_file_name), 'r') as f:\n dev_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._word_vocab_file_name), 'r') as f:\n word_vocab = Vocab.from_json(json.load(f))\n\n with open(os.path.join(self._data_root_path, self._char_vocab_file_name), 'r') as f:\n char_vocab = Vocab.from_json(json.load(f))\n\n return train_examples, dev_examples, word_vocab, char_vocab", "def _load_dataset(self, path):\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tX_test = np.load(\"data/X_test.npy\")\n\t\t\t\tY_test = np.load(\"data/Y_test.npy\")\n\t\t\t\tbreak\n\n\t\t\texcept FileNotFoundError:\n\n\t\t\t\tX_test = np.zeros((10000,64,64,3))\n\t\t\t\tY_test = []\n\n\t\t\t\t\n\t\t\t\twith open(path, 'rb') as fo:\n\t\t\t\t\ttemp_element = pickle.load(fo, encoding='bytes')\n\n\t\t\t\ttemp_data = temp_element[b'data']\n\t\t\t\tY_test.extend(temp_element[b'labels'])\n\n\t\t\t\tfor j in range(10000):\n\t\t\t\t\tX_test[j] = self._reshape(temp_data[j])\n\n\t\t\t\tY_test = np.eye(10)[np.array(Y_test)]\n\t\t\t\t\n\t\t\t\tnp.save(\"./data/X_test\", X_test)\n\t\t\t\tnp.save(\"./data/Y_test\", Y_test)\n\n\t\t\t\tbreak\n\n\n\t\treturn X_test, Y_test", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def _load_train_data(self):\n\n self.train_loader = data.Train_loader(self.N_max, self.n_per_conn,\n self.data_path, self.device)\n self.train_loader.load_data()\n\n # load mean and std\n scc_mean_std = np.loadtxt(\n os.path.join(self.data_path, 'scc_mean_std.csv'), delimiter=',')\n self.mean = torch.Tensor(scc_mean_std[0])\n self.std = torch.Tensor(scc_mean_std[1])", "def load_data():\n t = time()\n print 'loading tweets, please wait...'\n trained_tweets = load_tweets('training_dataset')\n eval_tweets = load_tweets('evaluation_dataset')\n print 'Time taken {}'.format(time() - t)\n t = time()\n print 'loading w2v model, please wait...'\n model = w2v_load_model('GoogleNews-vectors-negative300.bin')\n print 'Time taken {}'.format(time() - t)\n return trained_tweets, eval_tweets, model", "def loadRawData():\n samples = []\n with open('Sample_data/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n # skip the first line, which is the title line\n if line[0] != 'center':\n samples.append(line)\n # for testing implementation only, commented for GPU training\n #if len(samples)>100:\n # break\n train_samples, validation_samples = train_test_split(samples, test_size=0.2)\n return train_samples, validation_samples", "def load_data(self):\n params = self.params\n catg = params.data_category\n langs = ['en', params.target_lang]\n data = {lang: {splt: {} for splt in (['train', 'valid'] if lang == 'en' else ['test'])} for lang in langs}\n clf_dataset_path = {\n lang: {\n splt: {\n 'x': os.path.join(params.data_path, '%s_%s_%s_x.bpe.pth' % (splt, lang, catg)),\n 'y': os.path.join(params.data_path, '%s_%s_%s_y.txt' % (splt, lang, catg)),\n } for splt in (['train', 'valid'] if lang == 'en' else ['test'])\n } for lang in langs\n }\n for splt in ['train', 'valid', 'test']:\n for lang in langs:\n if lang == 'en' and splt in ['train', 'valid'] or lang != 'en' and splt == 'test':\n # load data and dictionary\n data1 = load_binarized(clf_dataset_path[lang][splt]['x'], params)\n data['dico'] = data.get('dico', data1['dico'])\n # set dictionary parameters\n set_dico_parameters(params, data, data1['dico'])\n # create dataset\n data[lang][splt]['x'] = Dataset(data1['sentences'], data1['positions'], params)\n # load labels\n with open(clf_dataset_path[lang][splt]['y'], 'r') as f:\n labels = [int(l) for l in f]\n data[lang][splt]['y'] = torch.LongTensor(labels)\n assert len(data[lang][splt]['x']) == len(data[lang][splt]['y'])\n\n return data", "def _load_test_data(self):\n\n self.test_loader = data.Test_loader(self.N_max, self.n_per_conn,\n self.data_path, self.device)\n\n self.test_loader.load_data()\n\n # load mean and std from train\n scc_mean_std = np.loadtxt(\n os.path.join(self.data_path, 'scc_mean_std.csv'), delimiter=',')\n self.mean = torch.Tensor(scc_mean_std[0])\n self.std = torch.Tensor(scc_mean_std[1])", "def load_datasets(self):\n if self.processed_extension == '.npz':\n logger.info(f'Loading sets from npz:')\n \n logger.info(f'train: {self.train_path}')\n self.train_data = sparse.load_npz(self.train_path)\n\n logger.info(f'val: {self.val_path}')\n self.val_data = sparse.load_npz(self.val_path)\n\n logger.info(f'test: {self.test_path}')\n self.test_data = sparse.load_npz(self.test_path)\n \n # Split x and y\n self.train_data = [sparse.lil_matrix(sparse.csr_matrix(self.train_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.train_data)[:,-1])]\n \n self.val_data = [sparse.lil_matrix(sparse.csr_matrix(self.val_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.val_data)[:,-1])]\n \n self.test_data = [sparse.lil_matrix(sparse.csr_matrix(self.test_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.test_data)[:,-1])]\n \n elif self.processed_extension == '.csv':\n logger.info(f'Loading sets from csv:')\n \n logger.info(f'train: {self.train_path}')\n self.train_data = pd.read_csv(self.train_path)\n train_cols = self.train_data.columns\n self.train_data = [self.train_data[train_cols.difference(['TARGET'])],\n self.train_data['TARGET']]\n \n logger.info(f'val: {self.val_path}')\n self.val_data = pd.read_csv(self.val_path)\n self.val_data = [self.val_data[train_cols.difference(['TARGET'])],\n self.val_data['TARGET']]\n \n logger.info(f'test: {self.test_path}')\n self.test_data = pd.read_csv(self.test_path)\n self.test_data = [self.test_data[train_cols.difference(['TARGET'])],\n self.test_data['TARGET']]\n else:\n raise AttributeError(f'Wrong extension: {self.processed_extension}')\n self.n_train = self.train_data[0].shape[0]\n self.n_val = self.val_data[0].shape[0]\n self.n_test = self.test_data[0].shape[0]\n self.input_size = self.train_data[0].shape[1]\n self.n_examples = self.n_train + self.n_val + self.n_test\n \n logger.info(f'Set sizes:')\n logger.info(f'train: {self.n_train}')\n logger.info(f'val: {self.n_val}')\n logger.info(f'test: {self.n_test}')", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def test_large_import(self):\n self.create_sample_data_set_dir(\"node59p1.dat\", TELEM_DIR)\n self.assert_initialize()\n result = self.data_subscribers.get_samples(DataParticleType.METADATA_TELEMETERED,1,60)\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE_TELEMETERED,750,400)", "def _compute_samples(self, samples):\n return samples", "def samples(self):\n return glob.glob(os.path.join(self.production.rundir, \"extrinsic_posterior_samples.dat\"))", "def load_dataset(data_dir, img_size):\n global input_set\n global test_set\n\n imgs = []\n img_files = os.listdir(data_dir)\n for img in img_files:\n # try:\n tmp = scipy.misc.imread(data_dir + \"/\" + img)\n x, y, z = tmp.shape # shape : width * length * chanel\n coords_x = int(x / img_size) # 坐标\n coords_y = int(y / img_size) #\n coords = [(q, r) for q in range(coords_x) for r in range(coords_y)] # 列表 x * y\n for coord in coords:\n imgs.append((data_dir + \"/\" + img, coord)) # 为列表添加文件目录\n # except BaseException:\n # print(\"oops\")\n test_size = min(10, int(len(imgs) * 0.2))\n random.shuffle(imgs)\n test_set = imgs[:test_size]\n train_set_X = imgs[test_size:][:200]\n train_set = imgs[test_size:][200:400]\n return", "def load_data():\n\n # Load data from categories\n comp = fetch_20newsgroups(subset='all', categories=['comp.graphics', 'comp.sys.mac.hardware', 'comp.windows.x'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n science = fetch_20newsgroups(subset='all', categories=['sci.crypt', 'sci.electronics', 'sci.space'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n politics = fetch_20newsgroups(subset='all', categories=['talk.politics.guns', 'talk.politics.mideast'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n religion = fetch_20newsgroups(subset='all', categories=['alt.atheism', 'soc.religion.christian'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n recreation = fetch_20newsgroups(subset='all', categories=['rec.autos', 'rec.sport.baseball', 'rec.sport.hockey'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n\n # Print total number of documents\n data_len = [len(comp.data), len(science.data), len(politics.data), len(recreation.data), len(religion.data)]\n\n # Subsample classes to create a balanced dataset\n sub_k = min(data_len)\n comp.data, comp.target = [list(t) for t in zip(*random.sample(list(zip(comp.data, comp.target)), sub_k))]\n science.data, science.target = [list(t) for t in zip(*random.sample(list(zip(science.data, science.target)), sub_k))]\n politics.data, politics.target = [list(t) for t in zip(*random.sample(list(zip(politics.data, politics.target)), sub_k))]\n religion.data, religion.target = [list(t) for t in zip(*random.sample(list(zip(religion.data, religion.target)), sub_k))]\n recreation.data, recreation.target = [list(t) for t in zip(*random.sample(list(zip(recreation.data, recreation.target)), sub_k))]\n\n # Subcategories labels\n subcat_comp = np.array(comp.target)\n subcat_scien = np.array(science.target) + len(comp.target_names)\n subcat_polit = np.array(politics.target) + len(comp.target_names) + len(science.target_names)\n subcat_rel = np.array(religion.target) + len(comp.target_names) + len(science.target_names) + len(politics.target_names)\n subcat_rec = np.array(recreation.target) + len(comp.target_names) + len(science.target_names) + len(politics.target_names) + len(religion.target_names)\n\n # Assign labels to train data based on categories\n y_comp = np.ones(len(comp.data))\n y_scien = 2*np.ones(len(science.data))\n y_polit = 3*np.ones(len(politics.data))\n y_rel = 4*np.ones(len(religion.data))\n y_rec = 5*np.ones(len(recreation.data))\n labels = np.concatenate((y_comp,y_scien,y_polit,y_rel,y_rec), axis=None)\n\n # Computers\n train_comp, test_comp, y_train_comp, y_test_comp, subcat_comp_train, subcat_comp_test = train_test_split(comp.data, y_comp, subcat_comp, test_size=0.2, random_state=42)\n train_comp, val_comp, y_train_comp, y_val_comp, subcat_comp_train, subcat_comp_val = train_test_split(train_comp, y_train_comp, subcat_comp_train, test_size=0.25, random_state=42)\n\n # Sciences\n train_scien, test_scien, y_train_scien, y_test_scien, subcat_scien_train, subcat_scien_test = train_test_split(science.data, y_scien, subcat_scien, test_size=0.2, random_state=42)\n train_scien, val_scien, y_train_scien, y_val_scien, subcat_scien_train, subcat_scien_val = train_test_split(train_scien, y_train_scien, subcat_scien_train, test_size=0.25, random_state=42)\n\n # Politics\n train_polit, test_polit, y_train_polit, y_test_polit, subcat_polit_train, subcat_polit_test = train_test_split(politics.data, y_polit, subcat_polit, test_size=0.2, random_state=42)\n train_polit, val_polit, y_train_polit, y_val_polit, subcat_polit_train, subcat_polit_val = train_test_split(train_polit, y_train_polit, subcat_polit_train, test_size=0.25, random_state=42)\n\n # Religion\n train_rel, test_rel, y_train_rel, y_test_rel, subcat_rel_train, subcat_rel_test = train_test_split(religion.data, y_rel, subcat_rel, test_size=0.2, random_state=42)\n train_rel, val_rel, y_train_rel, y_val_rel, subcat_rel_train, subcat_rel_val = train_test_split(train_rel, y_train_rel, subcat_rel_train, test_size=0.25, random_state=42)\n\n # Recreation\n train_rec, test_rec, y_train_rec, y_test_rec, subcat_rec_train, subcat_rec_test = train_test_split(recreation.data, y_rec, subcat_rec, test_size=0.2, random_state=42)\n train_rec, val_rec, y_train_rec, y_val_rec, subcat_rec_train, subcat_rec_val = train_test_split(train_rec, y_train_rec, subcat_rec_train, test_size=0.25, random_state=42)\n\n # Corpus from all categories in train set\n newsgroups_train = train_comp + train_scien + train_polit + train_rel + train_rec\n #print(f\"Total number of documents in all categories in the train set is {len(newsgroups_train)}.\")\n train_labels = np.concatenate((y_train_comp,y_train_scien,y_train_polit,y_train_rel,y_train_rec), axis=None)\n #print(train_labels.shape)\n train_subcat = np.concatenate((subcat_comp_train,subcat_scien_train,subcat_polit_train,subcat_rel_train,subcat_rec_train), axis=None)\n #print(train_subcat.shape)\n\n # Corpus from all categories in test set\n newsgroups_test = test_comp + test_scien + test_polit + test_rel + test_rec\n test_labels = np.concatenate((y_test_comp,y_test_scien,y_test_polit,y_test_rel,y_test_rec), axis=None)\n test_subcat = np.concatenate((subcat_comp_test,subcat_scien_test,subcat_polit_test,subcat_rel_test,subcat_rec_test), axis=None)\n\n # Corpus from all categories in validation set\n newsgroups_val = val_comp + val_scien + val_polit + val_rel + val_rec\n val_labels = np.concatenate((y_val_comp,y_val_scien,y_val_polit,y_val_rel,y_val_rec), axis=None)\n val_subcat = np.concatenate((subcat_comp_val,subcat_scien_val,subcat_polit_val,subcat_rel_val,subcat_rec_val), axis=None)\n\n # Data Split\n total = len(test_labels) + len(val_labels) + len(train_labels)\n\n return newsgroups_train, train_labels, newsgroups_test, test_labels, newsgroups_val, val_labels, train_subcat, test_subcat, val_subcat", "def load(train_file, test_file):\n print('\\nLoad the raw training and test set data...')\n y_train, tx_train, ids_train = load_csv_data(train_file)\n y_test, tx_test, ids_test = load_csv_data(test_file)\n print('\\n... finished.')\n return y_train, tx_train, ids_train, y_test, tx_test, ids_test", "def load_data():\n # use the load_snippet_pths_test in data writer to get frames and labels\n print('Loading frames and labels...')\n dataset_writer = dataset_factory.get_writer(FLAGS.datasetname)\n writer = dataset_writer()\n\n # retrieve list of test videos\n vid_lst = writer.generate_data_lst_from_split(FLAGS.split_fn)\n if _DEBUG_:\n vid_lst = vid_lst[:3]\n\n # for each video, collect fnames and labels with downsampling\n frames, labels = [], []\n print('Found {:d} videos'.format(len(vid_lst)))\n for vid in vid_lst:\n print(' Loading {}...'.format(vid))\n fname_pths_per_vid, labels_per_vid = writer.load_snippet_pths_test(\n FLAGS.datadir, [vid], FLAGS.labels_fname, FLAGS.bg_lbl,\n FLAGS.ext, FLAGS.frameskip)\n fname_pths_per_vid = [x[0] for x in fname_pths_per_vid]\n\n if _DEBUG_:\n fname_pths_per_vid = fname_pths_per_vid[:200]\n labels_per_vid = labels_per_vid[:200]\n\n frames.append(_load_images(fname_pths_per_vid))\n labels.append(np.array(labels_per_vid))\n return frames, labels", "def load_dataset():\n\n\n train_dd_loader = DailyDialogLoader(PATH_TO_TRAIN_DATA, load=False)\n train_dataloader = DataLoader(train_dd_loader, batch_size=16, shuffle=True, num_workers=0,\n collate_fn=PadCollate())\n\n test_dd_loader = DailyDialogLoader(PATH_TO_TEST_DATA, load=True)\n test_dataloader = DataLoader(test_dd_loader, batch_size=1, shuffle=False, num_workers=0,\n collate_fn=PadCollate())\n\n assert train_dd_loader.vocabulary.n_words == test_dd_loader.vocabulary.n_words\n\n return train_dd_loader, train_dataloader, test_dataloader", "def get_train_full_examples(self, data_dir):\n raise NotImplementedError()", "def _setup(self, used_sample_id_list):\n self.file_lengths = dict()\n self.len = 0\n\n files_to_remove = []\n for file_path in reversed(self.file_paths):\n data = np.load(file_path)\n\n index_list = self._get_index_list(data, used_sample_id_list)\n if not index_list:\n files_to_remove.append(file_path)\n continue\n\n self.data = data\n self.current_loaded_file = file_path\n self.index_list = index_list\n\n self.index_dict[file_path] = index_list\n\n self.file_lengths[file_path] = int(np.ceil(\n float(len(index_list))/float(self.batch_size)))\n\n self.len += self.file_lengths[file_path]\n\n for f_p in files_to_remove:\n self.file_paths.remove(f_p)\n\n self.has_labels = len(self.data[0]) >= 3", "def load_dataset_in_memory(self, demo_list, hdf5_file, obs_keys, dataset_keys, load_next_obs):\n all_data = dict()\n print(\"SequenceDataset: loading dataset into memory...\")\n for ep in LogUtils.custom_tqdm(demo_list):\n all_data[ep] = {}\n all_data[ep][\"attrs\"] = {}\n all_data[ep][\"attrs\"][\"num_samples\"] = hdf5_file[\"data/{}\".format(ep)].attrs[\"num_samples\"]\n # get obs\n all_data[ep][\"obs\"] = {k: hdf5_file[\"data/{}/obs/{}\".format(ep, k)][()].astype('float32') for k in obs_keys}\n if load_next_obs:\n all_data[ep][\"next_obs\"] = {k: hdf5_file[\"data/{}/next_obs/{}\".format(ep, k)][()].astype('float32') for k in obs_keys}\n # get other dataset keys\n for k in dataset_keys:\n if k in hdf5_file[\"data/{}\".format(ep)]:\n all_data[ep][k] = hdf5_file[\"data/{}/{}\".format(ep, k)][()].astype('float32')\n else:\n all_data[ep][k] = np.zeros((all_data[ep][\"attrs\"][\"num_samples\"], 1), dtype=np.float32)\n\n if \"model_file\" in hdf5_file[\"data/{}\".format(ep)].attrs:\n all_data[ep][\"attrs\"][\"model_file\"] = hdf5_file[\"data/{}\".format(ep)].attrs[\"model_file\"]\n\n return all_data", "def create_samples(self):\n for s_id in range(len(self.data[\"sample\"])):\n self.samples.add(Sample(s_id, [self.data[key][s_id] for key in self.data.keys() if key not in WRONG_KEYS],\n self.data[\"label\"][s_id]))", "def load_data(self):\n\n print(\"load chinese training data\")\n self.train_file = os.path.join(self.dataset_dir, \"train.tsv\")\n self.chin_train_df = self.get_df_from_file(self.train_file)\n self.eng_train_df = self.get_df_from_file(os.path.join(self.dataset_dir, \"en_train.tsv\"))\n self.train_num = len(self.chin_train_df)\n\n print(\"load dev data\")\n self.dev_file = os.path.join(self.dataset_dir, \"dev.tsv\")\n self.chin_dev_df = self.get_df_from_file(self.dev_file)\n self.eng_dev_df = self.get_df_from_file(os.path.join(self.dataset_dir, \"en_dev.tsv\"))\n self.dev_num = len(self.chin_dev_df)\n\n print(\"load test data\")\n self.test_file = os.path.join(self.dataset_dir, \"test.tsv\")\n self.chin_test_df = self.get_df_from_file(self.test_file)\n self.chin_test_df[\"labels\"] = self.chin_test_df[\"labels\"].apply(lambda x: x[0])\n self.eng_test_df = self.get_df_from_file(os.path.join(self.dataset_dir, \"en_test.tsv\"))\n self.eng_test_df[\"labels\"] = self.eng_test_df[\"labels\"].apply(lambda x: x[0])\n self.test_num = len(self.chin_dev_df)\n\n print(\"loading Chinese data done\")", "def samples(self, gp):\r\n raise NotImplementedError", "def load_data(self):\n if self.debug:\n print(\"Loading data\")", "def samples(self, samples):\n\n self._samples = samples", "def load_data(self, data_path, use_plus_minus_feats):\n loaded = np.load(data_path + '-targets.npz')\n self.max_num_ans = int(loaded['max_num_ans'])\n self.max_prob_set_id = int(loaded['max_prob_set_id'])\n targets = loaded['targets']\n if use_plus_minus_feats:\n print(\"using plus minus feats!!!\")\n inputs = sp.load_npz(data_path + '-inputs-plus-minus.npz')\n self.encoding_dim = self.max_prob_set_id + 1\n else:\n inputs = sp.load_npz(data_path + '-inputs.npz')\n self.encoding_dim = 2 * self.max_prob_set_id + 1\n self.target_ids = sp.load_npz(data_path + '-targetids.npz')\n\n return inputs, targets", "def load_data():\n X = load_pickle(config['image_paths']['train_images_pickle'])\n y = load_train_labels()\n y = to_categorical(y)\n test_indices = np.random.choice(len(X), int(len(X) * float(config['model']['test_size'])), replace=False)\n X_train = np.asarray([e for idx, e in enumerate(X) if idx not in test_indices])\n X_test = np.asarray([e for idx, e in enumerate(X) if idx in test_indices])\n y_train = np.asarray([e for idx, e in enumerate(y) if idx not in test_indices])\n y_test = np.asarray([e for idx, e in enumerate(y) if idx in test_indices])\n return X_train, y_train, X_test, y_test", "def load_data():\r\n train = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'train.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n val = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'valid.txt', ['words', 'pos', 'ignore', 'chunk'])) # testa will be our val set\r\n test = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'test.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n\r\n return train, val, test", "def load_data(self,split='train'):\n raise ValueError('Please implement me!')", "def load_or_generate_data(self) -> None:\n x = np.linspace(0, 10, self.n_samples).reshape(-1, 1)\n y_sin = np.sin(x * 1.5)\n noise = np.random.randn(*x.shape)\n y = (y_sin + noise).reshape(x.shape[0], 1)\n self.x, self.y = x, y", "def _read_samples(self):\n\n logging.debug(\"Start file parsing.\")\n data = pd.read_csv(self._source_file, header=None)\n \n data = pd.read_csv(self._source_file, header=None)\n header = pd.read_csv(self._header_file, delimiter=':', skiprows=1, header=None)\n header.columns = ['column', 'column_type']\n\n data.columns = header.column.tolist() + ['attack']\n data['attack'] = data['attack'].str.replace('.', '')\n data['label'] = 1\n data.loc[data['attack'] == 'normal', 'label'] = 0\n\n symbolic_columns = header.loc[header.column_type == ' symbolic.'].column.tolist()\n # print(symbolic_columns)\n\n for scol in symbolic_columns:\n data[scol] = pd.Categorical(data[scol])\n one_hot_cols = pd.get_dummies(data[scol], prefix=scol)\n data = pd.concat([data, one_hot_cols], axis=1)\n\n data = data.drop(columns=symbolic_columns)\n data = data.drop(columns=['attack'])\n\n # data.loc[data.attack != 'normal' , ['attack', 'label']].head(20)\n\n data_normal = data.loc[data['label'] == 0]\n data_abnormal = data.loc[data['label'] == 1]\n\n data_normal_train = data_normal.sample(frac=0.7)\n data_normal_test = data_normal.loc[~data_normal.index.isin(data_normal_train.index)]\n\n data_normal_train = data_normal_train.drop(columns=['label']).values\n data_normal_test = data_normal_test.drop(columns=['label']).values\n data_abnormal = data_abnormal.drop(columns=['label']).values\n \n scaler = MinMaxScaler()\n _ = scaler.fit(data_normal_train)\n data_normal_train = scaler.transform(data_normal_train)\n data_normal_test = scaler.transform(data_normal_test)\n data_abnormal = scaler.transform(data_abnormal)\n \n logging.debug('Normal {}; Train {}; Test{}'.format(data_normal.shape, data_normal_train.shape, data_normal_test.shape))\n logging.debug('Abnormal {}'.format(data_abnormal.shape))\n\n samples = {}\n samples['NORMAL'] = data_normal_train\n samples['NORMAL_TEST'] = data_normal_test\n samples['ABNORMAL_TEST'] = data_abnormal\n\n logging.debug(\"End file parsing.\")\n\n return samples", "def _load_data(self):\n\n from sklearn.datasets import fetch_openml\n mnist = fetch_openml('mnist_784', cache=True)\n # data_x = np.array(final_data_df)\n feat_data = np.array(mnist.data).astype('float32')\n target_data = mnist.target.astype('int64')\n shuffling_index = np.arange(feat_data.shape[0])\n np.random.shuffle(shuffling_index)\n feat_data = feat_data[shuffling_index]\n target_data = target_data[shuffling_index]\n\n cur_data_list = []\n cur_target_list = []\n for i in range(10):\n cur_mask = target_data == i\n cur_data_list.append(feat_data[cur_mask][:500])\n cur_target_list.append(target_data[cur_mask][:500])\n feat_data = np.concatenate(cur_data_list)\n target_data = np.concatenate(cur_target_list)\n\n self.data_x = feat_data\n self.data_y = self.to_one_hot_encoding(target_data)\n self.numerical_idx = np.arange(784)\n self.non_num_idx = None\n\n # Calculate adjacency matrix\n self.meta_inf = self.data_x.astype('float32')\n\n if self.args.graph_type:\n self.adj = self.get_adjacency()", "def load_data(self):\n for set_name in self.image_dir_path:\n if self.verbose:\n print('\\n> Loading data files for the set: ' + set_name)\n\n # image dir\n image_dir = os.path.join(self.data_path, self.image_dir_path[set_name])\n\n # annotation file path\n annot_filepath = os.path.join(self.data_path, self.annotation_path[set_name])\n\n if 'test' in set_name:\n yield load_data_test(set_name, image_dir, annot_filepath, self.verbose)\n else:\n yield self.load_data_trainval(set_name, image_dir, annot_filepath)", "def _load_data(self):\n self.mapper = Mapper()\n self.mapper.generate_vocabulary(self.review_summary_file)\n self.X_fwd, self.X_bwd, self.Y = self.mapper.get_tensor(reverseflag=True)\n # Store all the mapper values in a dict for later recovery\n self.mapper_dict = dict()\n self.mapper_dict['seq_length'] = self.mapper.get_seq_length()\n self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size()\n self.mapper_dict['rev_map'] = self.mapper.get_reverse_map()\n # Split into test and train data\n self._split_train_tst()", "def test_cached_dataloader(self):\n\n v = [\"data\", \"target\", \"model_out_sqnet\"]\n\n for data, target in self.train_loader:\n b, c, h, w = data[v[0]].shape\n assert data[v[1]].shape == (b, )\n assert data[v[2]].shape == (b, 100)\n assert data[v[1]].shape == target.shape", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def load_training_data(self) -> Tuple[List[np.ndarray], np.ndarray]:\n return self._load_set(config.TRAIN_DIR, True)", "def load(self):\n\t\t# Initialize empty list\n\t\tdata_files = []\n\n\t\t# Append the Drusen files to the list\n\t\tfor single_file in os.listdir(self.data_dir):\n\t\t\tdata_files.append(single_file)\n\t\treturn data_files", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def prepare_data():\n #data, label = load_ta_data(), load_ta_target()\n data, label = load_own_data(), load_own_target()\n tra_x, tst_x = split_samples(data)\n tra_y, tst_y = split_samples(label)\n return (tra_x, tst_x, tra_y, tst_y)", "def _load_test_data(self):\n self._save_test_data()", "def load_training_set():\n global training_set\n f = gzip.open('mnist.pkl.gz', 'rb')\n train, valid, test = cPickle.load(f)\n [training_set, training_labels] = train\n [validation_set, validation_labels] = valid\n [testing_set, testing_labels] = test\n training_set = np.concatenate((training_set, validation_set))\n f.close()\n np.random.shuffle(training_set)", "def get_dldata(filepath, dlTrainCorpusPath, dlTestCorpusPath, seed=2018, batch_size=16):\r\n\tf = open(\"record/synthetic and academic datasets/testcases_train.pkl\",'rb') #get the testcase ids of train sets and test sets\r\n\ttestcases += pickle.load(f) \r\n\tf.close()\r\n\r\n\tf = open(\"record/synthetic and academic datasets/testcases_test.pkl\",'rb')\r\n\ttestcases += pickle.load(f)\r\n\tf.close()\r\n\t\r\n print(\"produce train dataset...\") \r\n N = 6\r\n num = list(range(N))\r\n for i in num:\r\n train_set = [[], [], [], [], [], []]\r\n for folder_train in folders_train[int(i*len(folders_train)/N) : int((i+1)*len(folders_train)/N)]:\r\n if not folder_train in os.listdir(filepath):\r\n continue\r\n print(\"\\r\"+str(folder_train), end='')\r\n for filename in os.listdir(os.path.join(filepath, folder_train)):\r\n f = open(filepath + folder_train + '/' + filename, 'rb')\r\n data = pickle.load(f)\r\n f.close()\r\n if len(data[0][0]) > MAXLEN:\r\n data[2] = [x for x in data[2] if x <= MAXLEN]\r\n data[0] = cutdata(data[0][0])\r\n if data[0] == None:\r\n continue \r\n for n in range(len(data)):\r\n train_set[n].append(data[n])\r\n train_set[-1].append(folder_train+\"/\"+filename)\r\n f_train = open(dlTrainCorpusPath + \"train_\" + str(i)+ \"_0818.pkl\", 'wb')\r\n pickle.dump(train_set, f_train)\r\n f_train.close()\r\n\r\n del train_set \r\n gc.collect() \r\n\r\n print(\"\\nproduce test dataset...\")\r\n N = 6\r\n num = list(range(N))\r\n for i in num:\r\n test_set = [[], [], [], [], [], []]\r\n for folder_test in folders_test[int(i*len(folders_test)/N) : int((i+1)*len(folders_test)/N)]:\r\n if not folder_test in os.listdir(filepath):\r\n continue\r\n print(\"\\r\"+str(folder_test), end='')\r\n for filename in os.listdir(os.path.join(filepath, folder_test)):\r\n f = open(filepath + folder_test + '/' + filename, 'rb')\r\n data = pickle.load(f)\r\n f.close()\r\n if len(data[0][0]) > MAXLEN:\r\n data[2] = [x for x in data[2] if x <= MAXLEN]\r\n data[0] = cutdata(data[0][0])\r\n if data[0] == None:\r\n continue \r\n for n in range(len(data)):\r\n test_set[n].append(data[n])\r\n test_set[-1].append(folder_test+\"/\"+filename)\r\n \r\n f_test = open(dlTestCorpusPath + \"test_\" + str(i)+ \"_0124.pkl\", 'wb')\r\n pickle.dump(test_set, f_test)\r\n f_test.close()\r\n\r\n del test_set\r\n gc.collect()\r\n return", "def load_crawl():\n\n\tmodule_path = dirname(__file__)\n\twith open(join(module_path, 'data', 'train2.csv')) as csv_file:\n\t\tdata_file = csv.reader(csv_file)\n\t\ttemp = next(data_file)\n\t\tglobal n_samples\n\t\tn_samples = int(temp[0])\n\t\tglobal n_features\n\t\tn_features = int(temp[1])\n\t\tprint \"n samples \" + str((n_samples))\n\t\tprint \"n_features\" + str((n_features))\n\t\ttarget_names = np.array(temp[2:4])\n\t\tdata = np.empty((n_samples, n_features))\n\t\ttarget = np.empty((n_samples,), dtype=np.int)\n\n\t\tfor count, value in enumerate(data_file):\n\t\t\tdata[count] = np.asarray(value[:-1], dtype=np.float)\n\t\t\ttarget[count] = np.asarray(value[-1], dtype=np.int)\n\t\t\t#print \"data is \" + str(data[count])\n\t\t\t#print \"target is \" + str(target[count])\n\t\tprint \"Number of target records is \" + str(len(target))\n\t#with open(join(module_path, 'descr', 'train.rst')) as rst_file:\n\t#\tfdescr = rst_file.read()\n\n\treturn Bunch(data=data, target=target,\n\t\t\t target_names=target_names,\n\t\t\t DESCR=None,\n\t\t\t feature_names = ['evalCount', 'setInterval', 'setTimeout', 'link', \n\t\t\t\t\t\t\t 'search', 'exec','escape', 'unescape', 'ratio', \n\t\t\t\t\t\t\t 'emtropyAvg', 'entropyScript', 'longStrings', \n\t\t\t\t\t\t\t 'maxEntropy', 'stringAvg', 'maxLength', 'longVarFunc', \n\t\t\t\t\t\t\t 'stringAssignments', 'stringModFuncsCount', 'eventFuncsCount', \n\t\t\t\t\t\t\t 'domModFuncsCounter', 'suspStrings', 'whiteSpaceRatio', \n\t\t\t\t\t\t\t 'hexaStrings', 'maxNonPrintableCharactersinString', 'lineAvg', \n\t\t\t\t\t\t\t 'iframeCount', 'malTagCount', 'jsLength'])", "def load_data(params):\n train_df = pd.read_csv(os.path.join(*[dataset_path, params['tokenizer'] + '_train_' + params['stemming'] + '.csv']))\n dev_df = pd.read_csv(os.path.join(*[dataset_path, params['tokenizer'] + '_dev_' + params['stemming'] + '.csv']))\n train_data, label_encode = data_prep(train_df, params, if_resample=True)\n dev_data, _ = data_prep(dev_df, params)\n return train_data, dev_data, label_encode", "def sample(self):\n timestamp = time.time()\n try:\n res = requests.get(self.url)\n except requests.exceptions.ConnectionError as error:\n LOG.warning(\"%s %s\", self, error)\n return\n if 199 < res.status_code < 300:\n self.data.append((timestamp, res.json()))\n LOG.debug(\"%s appended data sample\", self)\n else:\n LOG.warning(\"Error %s loading data from %s\", res.status_code, self)\n self.data = self.data[-self.max_samples:]", "def LoadTroikaDataset():\n data_dir = \"./datasets/troika/training_data\"\n data_fls = sorted(glob.glob(data_dir + \"/DATA_*.mat\"))\n ref_fls = sorted(glob.glob(data_dir + \"/REF_*.mat\"))\n return data_fls, ref_fls", "def loadSamples(filename):\n data = []\n with open(filename, encoding=\"utf-8\") as f_obj:\n reader = csv.DictReader(f_obj, delimiter=';')\n for line in reader:\n Tetta = float(line['dTetta'])\n Lambda = float(line['Lambda'])\n item = TSample(Tetta, Lambda)\n data.append(item)\n return data", "def datasets(self):\n pass", "def samples_set(self):\n self.get_samples_set(self.samples_db)\n self.choose_samples(self.chosen_samples_db, self.chosen_hashes)", "def _load_sample_table(self):\n self.sampleTable = pd.read_table(self.config['sampletable'], sep='\\t', dtype=str)\n self.sampleTable.set_index('sampleID', inplace=True)\n self.samples = self.sampleTable.reset_index().to_dict('records')", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def load_samplers(self):\n for sampler in self.gltf.samplers:\n # Use a sane default sampler if the sampler data is empty\n # Samplers can simply just be json data: \"{}\"\n if sampler.minFilter is sampler.magFilter is None:\n self.samplers.append(\n self.ctx.sampler(\n filter=(moderngl.LINEAR_MIPMAP_LINEAR, moderngl.LINEAR),\n repeat_x=False,\n repeat_y=False,\n anisotropy=16.0,\n )\n )\n else:\n self.samplers.append(\n self.ctx.sampler(\n filter=(sampler.minFilter, sampler.magFilter),\n repeat_x=sampler.wrapS in [REPEAT, MIRRORED_REPEAT],\n repeat_y=sampler.wrapT in [REPEAT, MIRRORED_REPEAT],\n anisotropy=16.0,\n )\n )", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def load_data():\n data = gzip.open(\"mnist.pkl.gz\", \"rb\")\n train_set, valid_set, test_set = cPickle.load(data)\n data.close()\n\n # Combine validation and train folds to recreate the master 60k set.\n new_images = numpy.concatenate((train_set[0], valid_set[0]))\n new_labels = numpy.concatenate((train_set[1], valid_set[1]))\n\n train_set = (new_images, new_labels)\n \n return (train_set, test_set)", "def trainSet(self):\r\n self.currIdx = 0\r\n random.shuffle(self.trainSamples)\r\n self.samples = self.trainSamples[:self.numTrainSamplesPerEpoch]", "def loadData(self):\n # Load the raw CIFAR-10 data\n num_training = 49000\n num_validation = 1000\n num_test = 1000\n subtract_mean = True\n\n cifar10_dir = '/home/parallels/PycharmProjects/Courses/232A/project2/stats232a/datasets/cifar-10-batches-py'\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n\n # Subsample the data\n mask = list(range(num_training, num_training + num_validation))\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = list(range(num_training))\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = list(range(num_test))\n X_test = X_test[mask]\n y_test = y_test[mask]\n\n # Normalize the data: subtract the mean image\n if subtract_mean:\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_val -= mean_image\n X_test -= mean_image\n\n # Transpose so that channels come first\n X_train = X_train.transpose(0, 3, 1, 2)\n X_val = X_val.transpose(0, 3, 1, 2)\n X_test = X_test.transpose(0, 3, 1, 2)\n\n # Package data into a dictionary\n self.data = {\n 'X_train': X_train, 'y_train': y_train,\n 'X_val': X_val, 'y_val': y_val,\n 'X_test': X_test, 'y_test': y_test,\n }", "def load_for_sklearn(self):\n\n labels = [] # string labels\n examples = [] # examples as strings\n\n # document number -> label mapping\n doc2label = n2b2.map_patients_to_labels(\n self.xml_dir,\n self.category)\n\n for f in os.listdir(self.cui_dir):\n doc_id = f.split('.')[0]\n file_path = os.path.join(self.cui_dir, f)\n file_as_string = open(file_path).read()\n\n string_label = doc2label[doc_id]\n int_label = LABEL2INT[string_label]\n labels.append(int_label)\n examples.append(file_as_string)\n\n return examples, labels", "def trainData(self,):\n count = 0\n while count < len(self.RAD_sequences_train):\n RAD_filename = self.RAD_sequences_train[count] \n RAD_complex = loader.readRAD(RAD_filename)\n if RAD_complex is None:\n raise ValueError(\"RAD file not found, please double check the path\")\n ### NOTE: Gloabl Normalization ###\n RAD_data = helper.complexTo2Channels(RAD_complex)\n RAD_data = (RAD_data - self.config_data[\"global_mean_log\"]) / \\\n self.config_data[\"global_variance_log\"]\n ### load ground truth instances ###\n gt_filename = loader.gtfileFromRADfile(RAD_filename, \\\n self.config_data[\"train_set_dir\"])\n gt_instances = loader.readRadarInstances(gt_filename)\n if gt_instances is None:\n raise ValueError(\"gt file not found, please double check the path\")\n\n ### NOTE: decode ground truth boxes to YOLO format ###\n gt_labels, has_label, raw_boxes = self.encodeToLabels(gt_instances)\n\n if has_label:\n yield (RAD_data, gt_labels, raw_boxes)\n count += 1\n if count == len(self.RAD_sequences_train) - 1:\n # np.random.seed() # should I add seed here ?\n np.random.shuffle(self.RAD_sequences_train)", "def InitDataset(self):\n train_txt = 'ImageSets/Main/train.txt'\n val_txt = 'ImageSets/Main/val.txt'\n annotations = \"Annotations\"\n jpegimages = \"JPEGImages\"\n images_path = train_txt if (self.is_train) else val_txt \n images_path = readTxt(os.path.join(self.path, images_path))\n images_path.pop(-1)\n # rawdata format: [path_2_image, path_2_xml]\n rawData = list()\n for each in images_path:\n xml = os.path.join(self.path, annotations, each + '.xml')\n jpeg = os.path.join(self.path, jpegimages, each + '.jpg')\n rawData.append([jpeg, xml])\n return rawData", "def load_data(self):\n raise NotImplementedError()", "def load_data():\n # Load and preprocess data\n sentences, labels = load_data_and_labels()\n sentences_padded = pad_sentences(sentences)\n vocabulary, vocabulary_inv = build_vocab(sentences_padded)\n x, y = build_input_data(sentences_padded, labels, vocabulary)\n return [x, y, vocabulary, vocabulary_inv]", "def load_data():\n\n print('Loading and Visualizing Data ...')\n\n file_name = path.join(getcwd(), 'ex3', 'src', 'data', 'ex3data1')\n data = scipy.io.loadmat(file_name)\n\n # training data stored in arrays X, y\n # y should be a row vector of labels\n return data['X'], data['y'].T[0]", "def get_data():\n samples = []\n for fn in files:\n samples.extend(_json.load(open(fn, \"r\")))\n for sample in samples:\n graph = _nx.readwrite.json_graph.node_link_graph(sample)\n _edges = graph.edges(data=True)\n _nodes = dict(graph.nodes(data=True)).values()\n sources, targets, edges = zip(*[(src, tgt, edge) for src, tgt, edge in _edges])\n edge_features = _tf.constant(_np.array([\n [edge[k] for k in edge_feature_names if k in edge] for edge in edges\n ]))\n edge_sources = _tf.squeeze(_tf.constant(_np.array(sources)))\n edge_targets = _tf.squeeze(_tf.constant(_np.array(targets)))\n node_features = _tf.constant(_np.array([\n [node[k] for k in node_feature_names if k in node]\n for node in _nodes\n ]))\n additional_inputs = (\n _tf.constant(_np.array([\n [node[k] for k in additional_inputs_names if k in node]\n for node in _nodes\n ]))\n if local else\n _tf.constant(_np.array([\n graph.graph[additional_input] for additional_input in additional_inputs_names\n if additional_input in graph.graph\n ]))\n )\n data = GNNInput(\n edge_features=edge_features,\n edge_sources=edge_sources,\n edge_targets=edge_targets,\n node_features=node_features,\n additional_inputs=additional_inputs,\n )\n if local:\n y = _tf.squeeze(_tf.constant(_np.array([\n [node[k] for k in target if k in node] for node in _nodes\n ])))\n else:\n y = _tf.constant(_np.array([\n graph.graph[_target] for _target in target if _target in graph.graph\n ]))\n yield data, y", "def load_data(self,split='train'):\n return load_arrow_data(self.config,split)", "def get_train_examples(self, data_path):\r\n return self.create_examples(self.read_data(data_path), 'train')" ]
[ "0.73953384", "0.713623", "0.71360576", "0.7046007", "0.7029139", "0.7012496", "0.691182", "0.6855628", "0.6850381", "0.68215597", "0.6801296", "0.67527807", "0.6750066", "0.67280877", "0.6723853", "0.66835517", "0.66824424", "0.6658515", "0.66440684", "0.66392", "0.66277313", "0.66109234", "0.6606205", "0.66013336", "0.6571333", "0.6561711", "0.6541203", "0.654101", "0.6540753", "0.6531322", "0.65239555", "0.6521584", "0.64907974", "0.6481237", "0.647172", "0.64470917", "0.64304465", "0.6425416", "0.64048225", "0.6396871", "0.6392381", "0.63876814", "0.63697493", "0.63633966", "0.6360947", "0.63524586", "0.6349445", "0.6341849", "0.63171864", "0.6314966", "0.63143426", "0.6305128", "0.6299672", "0.62929666", "0.62830716", "0.6279925", "0.6272959", "0.627107", "0.6266525", "0.62634623", "0.62556833", "0.6240804", "0.6239866", "0.62385434", "0.6225985", "0.62248987", "0.62229663", "0.62175775", "0.6216021", "0.6215589", "0.62056726", "0.62020195", "0.6201709", "0.6196186", "0.61956805", "0.6191094", "0.61904883", "0.6187196", "0.6185896", "0.6181906", "0.61781067", "0.61698365", "0.61607325", "0.6157221", "0.61475974", "0.61433744", "0.61433744", "0.614098", "0.613679", "0.61352795", "0.61307776", "0.612177", "0.6120281", "0.6119794", "0.6115102", "0.6114358", "0.6111362", "0.61079425", "0.61068517", "0.61057097", "0.6098779" ]
0.0
-1
Build each tree in the 'forest' of trees. After each iteration, evaluate the tree and reweight the input sample such that incorrect events are weighted up and correct events are weighted down
def build(self): # weights to apply to training samples, updated on each # iteration of the boosting algo, normalised to 1 sigWeights = np.ones(self.nSig, dtype=float) bkgWeights = np.ones(self.nBkg, dtype=float) reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights)) sigWeights *= reweight bkgWeights *= reweight # Weight of each tree, strong classifers have higher weight self.treeWeights = np.zeros(self.ntrees, dtype=float) for i in xrange(self.ntrees): # build new tree newTree = Tree() newTree.load(self.sigData,self.bkgData,weights=(sigWeights,bkgWeights)) newTree.build() self.dTrees.append(newTree) # evaluate trees # keep track of each event err = 0.0 sigWrong = np.zeros(self.nSig) bkgWrong = np.zeros(self.nBkg) for j in range(self.nSig): if newTree.classify(np.array((self.sigData[j,])))<0: sigWrong[i]=1 err+=sigWeights[j] for j in range(self.nBkg): if newTree.classify(np.array((self.bkgData[j,])))>0: bkgWrong[i]=1 err+=bkgWeights[j] alpha = self.beta*math.log((1.0-err)/err) print err,alpha corFactor = math.exp(-alpha) wrongFactor = math.exp(alpha) if (err<1e-20 or err >= 0.5): print "SOEMTHING WRONG!!" self.treeWeights[i] = alpha # reweight training samples for j in range(self.nSig): if sigWrong[j]: sigWeights[j]*=wrongFactor else : sigWeights[j]*=corFactor for j in range(self.nBkg): if bkgWrong[j]: bkgWeights[j]*=wrongFactor else : bkgWeights[j]*=corFactor # normalise weights reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights)) sigWeights *= reweight bkgWeights *= reweight
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_trees(tree, forest, X, Y, sample_weight, tree_idx, n_trees,\n n_samples_bootstrap=None):\n # Initialize the number of samples input data\n n_samples = X.shape[0]\n\n # If the samples are drawn with replacement, then,\n # weight the sample weights by the number of times\n # that each sample appears on the indexes\n if forest.bootstrap:\n # Check the sample weights, initializing them to an\n # uniform distribution if they are not provided and,\n # if provided, copying them to properly weight the\n # samples according to the bootstrap indexes\n if sample_weight is None:\n curr_sample_weight = np.ones(n_samples, dtype=np.float64)\n else:\n curr_sample_weight = np.array(sample_weight, dtype=np.float64)\n # Obtain the sample weights\n # from to the bootstrap indexes\n indexes = _generate_sample_indexes(tree.random_state, n_samples,\n n_samples_bootstrap)\n sample_counts = np.bincount(indexes, minlength=n_samples)\n curr_sample_weight *= sample_counts\n # Fit the estimator using the sample weight\n # obtained from the bootstrap indexes\n tree.fit(X, Y, curr_sample_weight)\n # Otherwise, directly use the sample\n # weight provided in the fit method\n else:\n tree.fit(X, Y, sample_weight)\n\n # Return the built tree\n return tree", "def __build_iteration(self) -> None:\n trees = [t for t in self.__trees.keys()]\n for tree in trees:\n heads = []\n branches = self.__trees[tree]\n for i in range(len(branches) - 1, -1, -1):\n if self.__trees.get(tree) and np.random.rand(1)[0] < self.__rate:\n heads += self.__branch_out(branches.pop(i), tree)\n self.__trees[self.__mappings[tree]] += heads\n\n # NB: this can cause errors when seeds spawn near the edge\n if len(self.__trees[self.__mappings[tree]]) == 0:\n logging.info(\"deleting tree with id {}\".format(tree))\n del self.__trees[self.__mappings[tree]]", "def _forest_nodes(self):\n\n self.arbor._grow_tree(self)\n root = self.root\n for link in root._links:\n yield self.arbor._generate_tree_node(self.root, link)", "def __build_tree__(self, features, classes, depth=0):\n\n # TODO: finish this.\n root = None\n if (len(set(classes)) <= 1) and (len(classes) != 0) :\n return DecisionNode(None,None,None,classes[0])\n elif (len(classes) == 0):\n return DecisionNode(None,None,None,2)\n elif depth == self.depth_limit:\n return DecisionNode(None,None,None,max(set(classes), key=list(classes).count))\n else:\n# if depth == 0:\n features = np.array(features)\n classes = np.array(classes).reshape(-1,1)\n feat_shape = features.shape\n sample_list = range(feat_shape[0])\n gains = np.zeros((feat_shape[1]))\n indices = np.zeros((feat_shape[1]))\n for i in range(feat_shape[1]):\n attribute = features[:,i]\n for j in range(20):\n split_indx = int(np.random.choice(sample_list, replace=False))\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n gain = gini_gain(list(classes.reshape(1,-1)[0]),[list(classes_below),list(classes_above)])\n if gain > gains[i]:\n gains[i] = gain\n indices[i] = split_indx\n indx = np.argmax(gains)\n split_indx = int(indices[indx])\n attribute = features[:,indx]\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0] \n features_below = features[idx_below,:]\n features_above = features[idx_above,:]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n if (len(classes_below) != 0) and (len(classes_above) != 0):\n root = DecisionNode(None,None,lambda feat:feat[indx] > features[split_indx,indx])\n root.left = self.__build_tree__(features_above, classes_above, depth+1)\n root.right = self.__build_tree__(features_below, classes_below, depth+1)\n return root\n elif (len(classes_below) == 0) and (len(classes_above) != 0):\n return DecisionNode(None,None,None,max(set(classes_above), key=list(classes_above).count))\n elif (len(classes_above) == 0) and (len(classes_below) !=0):\n return DecisionNode(None,None,None,max(set(classes_below), key=list(classes_below).count))\n else:\n return DecisionNode(None,None,None,2)", "def populate_synthetic_tree(self):\r\n logging.debug('populating synthetic tree...')\r\n a_data = self.realData\r\n ndata = a_data.shape[1]\r\n for i in range(ndata):\r\n ptx = a_data[0, i]\r\n pty = a_data[1, i]\r\n leaf = self.root.find_subnode(ptx, pty)\r\n leaf.n_count += 1\r\n\r\n # traverse the tree and update leaf counts\r\n stack = deque()\r\n stack.append(self.root)\r\n while len(stack) > 0:\r\n cur_node = stack.popleft()\r\n if cur_node.n_isLeaf is True: # leaf\r\n cur_node.n_count += self.differ.getNoise(1, 0.5 * self.param.Eps)\r\n else:\r\n stack.append(cur_node.nw)\r\n stack.append(cur_node.ne)\r\n stack.append(cur_node.sw)\r\n stack.append(cur_node.se)", "def build_random_forest(X_train, y_train):", "def build_tree(self, w):\n w_abs = np.abs(w)\n if sum(w_abs) != 1.:\n w_abs = w_abs / sum(w_abs)\n self.w = w_abs\n self.tree = np.zeros(w.shape)\n self._build_node(w_abs, 1)\n self.w_apx = extract_distribution(self.tree)\n\n n_levels = np.ceil(np.log2(len(w)))\n self.lfsr = []\n for n in range(int(n_levels)):\n seed = np.random.randint(1, int(2**(self.lfsr_nbits-n)-1))\n self.lfsr.append(LFSR(self.lfsr_nbits-n, seed))", "def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))", "def __init__(self, dims, treeCount, incAdd = 1, testDims = 3, dimCount = 4, rotCount = 32):\n # Support structures...\n self.cats = dict() # Dictionary from cat to internal indexing number.\n self.treeCount = treeCount\n self.incAdd = incAdd\n \n # Setup the classification forest...\n self.classify = DF()\n self.classify.setInc(True)\n self.classify.setGoal(Classification(None, 1))\n self.classify.setGen(LinearClassifyGen(0, 1, testDims, dimCount, rotCount))\n \n self.classifyData = MatrixGrow()\n self.classifyTrain = self.treeCount\n \n # Setup the density estimation forest...\n self.density = DF()\n self.density.setInc(True)\n self.density.setGoal(DensityGaussian(dims))\n self.density.setGen(LinearMedianGen(0, testDims, dimCount, rotCount))\n self.density.getPruner().setMinTrain(48)\n \n self.densityData = MatrixGrow()\n self.densityTrain = self.treeCount", "def prep_tree_data(self, number: int):\n filename = \"data-before-normalization-{}-out-of-7.csv\".format(number)\n path = str(DATA_PATH.joinpath(\"data-splitted\", filename))\n df = pandas.read_csv(path)\n\n df.drop(df.columns[0], axis=1, inplace=True)\n assessments = [x for x in df.columns.values if x.split(\"_\")[0] == \"assessment\"]\n df['average_score'] = df[assessments].mean(skipna=True, axis=1)\n for assessment in assessments: # somehow he doesn't want to fillna in a batch?\n df[assessment].fillna(df['average_score'], inplace=True)\n clicks = [x for x in df.columns.values if x.split(\"_\")[0] == \"vle\"]\n df['vle_click_average'] = df[clicks].mean(skipna=True, axis=1)\n for click in clicks: # somehow he doesn't want to fillna in a batch?\n df[click].fillna(df['vle_click_average'], inplace=True)\n df.dropna()\n\n self.change_oh_cat(\"gender\", df)\n self.change_oh_cat(\"highest_education\", df)\n self.change_oh_cat(\"imd_band\", df)\n self.change_oh_cat(\"age_band\", df)\n self.change_oh_cat(\"disability\", df)\n result_order = {'final_result__Fail': 0, 'final_result__Withdrawn': 2,\n 'final_result__Pass': 1, 'final_result__Distinction': 3}\n self.change_oh_cat(\"final_result\", df, result_order)\n df[\"final_result\"].replace(2, 0, inplace=True)\n df[\"final_result\"].replace(3, 1, inplace=True)\n\n target = df[\"final_result\"]\n df.drop([\"final_result\"], axis=1, inplace=True)\n\n x_train, x_test, y_train, y_test = train_test_split(df, target, test_size=0.1,\n random_state=32, shuffle=True,\n stratify=target)\n\n return x_train, x_test, y_train, y_test", "def sample(tree, i, alpha=0.5, beta=0.5, only_tree=True):\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\" + str(list(n)[0]) + \")\"\n # tree.node[n] = {\"color\": \"black\", \"label\": lab}\n # print tree.nodes()\n\n if only_tree is True:\n tree_new = tree # Alter the input tree\n else:\n #tree_new = tree.subgraph(tree.nodes()) # nx < 2.0\n tree_new = tree.copy() # nx < 2.0\n\n #print(nocopy)\n #old_G = trilearn.graph.junction_tree.get_graph(tree)\n #(subtree, old_separators, probtree) = glib.random_subtree(tree, alpha, beta)\n\n # plotGraph(subtree, directory+\"subtree_\"+str(i)+\".eps\")\n # for n in subtree.nodes():\n # tree_old.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n # if n in tree.nodes():\n # tree.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n\n # plotGraph(tree_old.subgraph(tree_old.nodes()),\n # directory + \"tree(\" + str(i-1) + \")p.eps\")\n\n (_, subtree_nodes, subtree_edges, subtree_adjlist,\n old_separators, prob_subtree) = ss.random_subtree(tree, alpha, beta, i)\n\n (old_cliques,\n new_cliques,\n new_separators,\n P,\n neig) = sample_cond_on_subtree_nodes(i, tree_new, subtree_nodes, subtree_edges, subtree_adjlist)\n\n if only_tree is True:\n return tree_new\n #conn_nodes = set()\n #for clique in new_cliques:\n # conn_nodes |= clique\n\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\"+str(list(n)[0])+\")\"\n # if n in new_cliques:\n # tree.node[n] = {\"color\": \"red\", \"label\": lab}\n # plotGraph(tree.subgraph(tree.nodes()), directory+\"tree(\"+str(i)+\").eps\")\n\n #G = trilearn.graph.junction_tree.get_graph(tree)\n # G.node[i] = {\"color\": \"red\"}\n # for n in old_G:\n # if n in conn_nodes:\n # old_G.node[n] = {\"color\": \"blue\"}\n # G.node[n] = {\"color\": \"blue\"}\n\n # plotGraph(G, directory+\"G\"+str(i)+\".eps\")\n # plotGraph(old_G, directory+\"G\"+str(i-1)+\"p.eps\")\n\n # Proposal kernel\n K_st = None\n if len(subtree_nodes) == 1:\n # There might be two possible subtrees so\n # we calculate the probabilities for these explicitly\n K_st = pdf(tree, tree_new, alpha, beta, i)\n else:\n K_st = prob_subtree\n for c in P:\n K_st *= P[c] * neig[c]\n return tree_new, K_st, old_cliques, old_separators, new_cliques, new_separators", "def main():\n\n ''' Reading the training data file '''\n original_training_data = pd.read_csv(\"DT_Data_CakeVsMuffin_v012_TRAIN.csv\")\n\n ''' Storing the final decision tree '''\n final_tree = decision_tree(original_training_data,0)\n\n ''' Printing the final decision tree '''\n print(\"This is the resulting decision tree: \\n\")\n print(final_tree)\n\n ''' Iterating through the dictionary by using the key values '''\n for key in final_tree.keys():\n ''' Parent = Flour <= 5.1636'''\n parent = key\n ''' left_child = [{'Oils <= 3.1265': [{'Flour <= 2.7291': [{'Proteins <= 2.6527': ['Muffin', 'CupCake']}, 'Muffin']}, 'CupCake']}'''\n left_child = final_tree[parent][0]\n ''' right_child = {'Oils <= 7.7793': ['Muffin', {'Flour <= 8.2225': ['CupCake', 'Muffin']}]}]'''\n right_child = final_tree[parent][1]\n\n ''' Writing a file which generates code for classification '''\n file = open('HW06_Parchand_Nihal_Classifier.py','w+')\n file.write(\"'''Importing libraries''' \"\n \"\\n\\nimport pandas as pd \\n\\ndef main():\"\n \"\\n\\tdata_df = pd.read_csv('DT_Data_CakeVsMuffin_v012_TEST.csv')\"\n \"\\n\\tresult = []\"\n \"\\n\\tfor row in range(0,len(data_df)):\"\n \"\\n\\t\\tFlour = data_df.loc[row][0]\"\n \"\\n\\t\\tSugar = data_df.loc[row][1]\"\n \"\\n\\t\\tOils = data_df.loc[row][2]\"\n \"\\n\\t\\tProteins = data_df.loc[row][3]\"\n \"\\n\\t\\tif {}:\\n\".format(parent))\n\n ''' Iterating through the left_tree '''\n for key in left_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n\n ''' Iterating through the inner left_tree '''\n for inner_key in left_child[key][0].keys():\n file.write(\"\\t\\t\\t\\tif {}:\\n\".format(inner_key))\n\n for inner_inner_key in ((left_child[key][0])[inner_key])[0]:\n file.write(\"\\t\\t\\t\\t\\tif {}:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\t\\t\\telse:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(1)\\n\")\n\n file.write(\"\\t\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\telse:\\n\")\n\n ''' Iterating through the right_tree '''\n for key in right_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\")\n for inner_key in right_child[key][1].keys():\n file.write(\"\\t\\t\\telif {}:\\n\".format(inner_key))\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\\n\")\n\n ''' Writing the results of classifier to a csv file '''\n file.write(\n \"\\twith open('HW06_Parchand_Nihal_MyClassifications.csv', 'w+') as file2:\\n\"\n \"\\t\\tfor value in result:\\n\"\n \"\\t\\t\\tfile2.write(str(value))\\n\"\n \"\\t\\t\\tfile2.write('\\\\n')\\n\\n\"\n \"main()\")", "def get_featured_tree(self):\n\n for t in self.tree.get_terminals():\n t.sample_series = self.feature_table[t.name]\n self.feature_tree = self.recursion_tree(self.tree.root)\n for clade in self.feature_tree.find_clades(order='level'):\n clade.depth = 1+len(self.feature_tree.get_path(clade))\n \n #i = 0\n #for clade in self.feature_tree.find_clades(order='level'):\n # clade.ID_num = i \n #clade.abu = np.mean(clade.sample_series.values)\n #clade.domain_otu = clade.sample_series.idxmax()", "def fill(self):\n # Fail fast if num_classes or num_features isn't set.\n _ = getattr(self, 'num_classes')\n _ = getattr(self, 'num_features')\n\n self.training_library_base_dir = getattr(\n self, 'training_library_base_dir', '')\n self.inference_library_base_dir = getattr(\n self, 'inference_library_base_dir', '')\n\n self.bagged_num_features = int(self.feature_bagging_fraction *\n self.num_features)\n\n self.bagged_features = None\n if self.feature_bagging_fraction < 1.0:\n self.bagged_features = [random.sample(\n range(self.num_features),\n self.bagged_num_features) for _ in range(self.num_trees)]\n\n self.regression = getattr(self, 'regression', False)\n\n # Num_outputs is the actual number of outputs (a single prediction for\n # classification, a N-dimenensional point for regression).\n self.num_outputs = self.num_classes if self.regression else 1\n\n # Add an extra column to classes for storing counts, which is needed for\n # regression and avoids having to recompute sums for classification.\n self.num_output_columns = self.num_classes + 1\n\n # Allow each tree to be unbalanced by up to a factor of 2.\n self.max_depth = (self.max_depth or\n int(2 * math.ceil(math.log(self.max_nodes, 2))))\n\n # The Random Forest literature recommends sqrt(# features) for\n # classification problems, and p/3 for regression problems.\n # TODO(thomaswc): Consider capping this for large number of features.\n self.num_splits_to_consider = (\n self.num_splits_to_consider or\n max(10, int(math.ceil(math.sqrt(self.num_features)))))\n\n # max_fertile_nodes doesn't effect performance, only training speed.\n # We therefore set it primarily based upon space considerations.\n # Each fertile node takes up num_splits_to_consider times as much\n # as space as a non-fertile node. We want the fertile nodes to in\n # total only take up as much space as the non-fertile nodes, so\n num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))\n # But always use at least 1000 accumulate slots.\n num_fertile = max(num_fertile, 1000)\n self.max_fertile_nodes = self.max_fertile_nodes or num_fertile\n # But it also never needs to be larger than the number of leaves,\n # which is max_nodes / 2.\n self.max_fertile_nodes = min(self.max_fertile_nodes,\n int(math.ceil(self.max_nodes / 2.0)))\n\n # We have num_splits_to_consider slots to fill, and we want to spend\n # approximately split_after_samples samples initializing them.\n num_split_initializiations_per_input = max(1, int(math.floor(\n self.num_splits_to_consider / self.split_after_samples)))\n self.split_initializations_per_input = getattr(\n self, 'split_initializations_per_input',\n num_split_initializiations_per_input)\n\n # If base_random_seed is 0, the current time will be used to seed the\n # random number generators for each tree. If non-zero, the i-th tree\n # will be seeded with base_random_seed + i.\n self.base_random_seed = getattr(self, 'base_random_seed', 0)\n\n return self", "def prepare_data_for_g(self):\n\n paths = []\n for i in self.root_nodes:\n if np.random.rand() < config.update_ratio:\n sample, paths_from_i = self.sample(i, self.trees[i], config.n_sample_gen, for_d=False)\n if paths_from_i is not None:\n paths.extend(paths_from_i)\n # for each root, we generate 20 samples, each sample is equal to one path from root to that sample\n # So, we will get maximum (num_root x 20) paths\n # path is a list with length = (N x num_sample), with num_sample = 20\n # paths =[[path_root1_to_sample1],[path_root1_to_sample2],....,[path_root1_to_sample20],\n # [path_root2_to_sample1],[path_root2_to_sample2],....,[path_root2_to sample20]\n # .\n # .\n # [path_rootN_to_sample1],[path_rootN_to_sample2],....,[path_rootN_to_sample20]]\n # get_node_pairs_from_path\n\n node_pairs = list(map(self.get_node_pairs_from_path, paths))\n # node_pairs = [[node pairs for path_root1_to_sample1],[node pairs for path_root1_to_sample2],....,[node pairs for path_root1_to_sample20],\n # [node_pairs for path_root2_to_sample1],[node pairs for path_root2_to_sample2],....,[node pairs for path_root2_to sample20],\n # .\n # .\n # [node pairs for path_rootN_to_sample1],[node pairs for path_rootN_to_sample2],....,[node pairs for path_rootN_to_sample20]]\n\n node_1 = []\n node_2 = []\n for i in range(len(node_pairs)):\n for pair in node_pairs[i]:\n node_1.append(pair[0])\n node_2.append(pair[1])\n # reward = self.sess.run(self.discriminator.reward,\n # feed_dict={self.discriminator.node_id: np.array(node_1),\n # self.discriminator.node_neighbor_id: np.array(node_2)})\n reward = self.discriminator.forward(node_1, node_2)\n return node_1, node_2, reward", "def grow_tree(self):\n\n decision_node = self.root\n internal_env = copy.copy(self.env)\n\n while (not decision_node.is_final) and decision_node.visits > 1:\n\n a = self.select(decision_node)\n\n new_random_node = decision_node.next_random_node(a, self._hash_action)\n\n (new_decision_node, r) = self.select_outcome(internal_env, new_random_node)\n\n new_decision_node = self.update_decision_node(new_decision_node, new_random_node, self._hash_space)\n\n new_decision_node.reward = r\n new_random_node.reward = r\n\n decision_node = new_decision_node\n\n decision_node.visits += 1\n cumulative_reward = self.evaluate(internal_env)\n\n while not decision_node.is_root:\n random_node = decision_node.father\n cumulative_reward += random_node.reward\n random_node.cumulative_reward += cumulative_reward\n random_node.visits += 1\n decision_node = random_node.father\n decision_node.visits += 1", "def create_trees(self, importance_values: List[int]) -> None:\n target_names = [\"Fail\", \"Pass\"]\n trees = defaultdict(list)\n for importance in importance_values:\n for i in range(7):\n print(f'making tree for week {i + 1} with importance {importance}')\n x_train, x_test, y_train, y_test = self.prep_tree_data(i + 1)\n tree = TreeClassifier(x_train, x_test, y_train, y_test, target_names, importance)\n tree.run_model()\n trees[importance].append(tree)\n\n self.trees = trees", "def grow_forest( n, records ):\n dataset = Dataset( records )\n record_number = dataset.size\n\n dts = []\n for i in xrange(n):\n print \"Training\", i\n # pick randomly as many records as the number in the dataset.\n picked_records = []\n for j in xrange( record_number ):\n ind_picked = randint(0, record_number-1)\n picked_records.append( dataset[ ind_picked ] )\n picked_records = Dataset( picked_records )\n # train a tree with these records and add it to the forest\n tree = train(picked_records)\n dts.append( tree )\n return dts", "def get_forest(self, verbose):\n _antecessors = []\n for key, cluster in self.clusters.items():\n if cluster.leaf_cluster is True:\n _antecessors.append(cluster.antecessor)\n _antecessors = remdup_preserve_order(_antecessors)\n _antecessors = sorted(_antecessors, key=get_cluster_idx, reverse=True)\n\n _tree_idx = 0\n\n print('Generating forest...')\n print('')\n count= 0.0\n if verbose:\n progress_bar = progress_bar = AnimatedProgressBar(end=len(_antecessors), width=50, \\\n fill='=', blank='.')\n for antecessor in _antecessors:\n if verbose and (count % 1 == 0):\n progress_bar + 1\n progress_bar.show_progress()\n tree = Tree(antecessor, idx = _tree_idx, acorns=self)\n self.forest[_tree_idx] = tree\n _tree_idx += 1\n\n if verbose:\n progress_bar.progress = 100 # Done\n progress_bar.show_progress()\n print('')\n print('')\n\n return", "def train(self):\n max_tuple = self.max_gain()\n # If that gain is 0 then every node should be a pure leaf (hopefully) and you can stop\n while max_tuple.gain != 0:\n max_tuple.node.split(max_tuple.attribute)\n max_tuple = self.max_gain()", "def fit(self, features, classes):\n\n self.root = self.__build_tree__(features, classes)", "def train(self):\n logger.info('TreeTrainer.train')\n set_random_seed(self._config['seed'])\n init_epochs = self._config['learning_init_epochs']\n full_epochs = self._config['learning_full_epochs']\n sample_tree_rate = self._config['learning_sample_tree_rate']\n num_rows = self._num_rows\n\n # Initialize using subsample annealing.\n assert len(self._added_rows) == 0\n schedule = make_annealing_schedule(num_rows, init_epochs,\n sample_tree_rate)\n for action, row_id in schedule:\n if action == 'add_row':\n self.add_row(row_id)\n elif action == 'remove_row':\n self.remove_row(row_id)\n elif action == 'sample_tree':\n edges, edge_logits = self.sample_tree()\n self.set_edges(edges)\n else:\n raise ValueError(action)\n\n # Run full gibbs scans.\n assert len(self._added_rows) == num_rows\n for step in range(full_epochs):\n edges, edge_logits = self.sample_tree()\n self.set_edges(edges)\n for row_id in range(num_rows):\n self.remove_row(row_id)\n self.add_row(row_id)\n\n # Compute optimal tree.\n assert len(self._added_rows) == num_rows\n edges, edge_logits = self.estimate_tree()\n if self._config['learning_estimate_tree']:\n self.set_edges(edges)\n\n self._tree.gc()\n\n return {\n 'config': self._config,\n 'tree': self._tree,\n 'edge_logits': edge_logits,\n }", "def generate(self, approx_n: int) -> Tuple[np.ndarray, float]:\n # number of samples per tree\n n_per_tree = approx_n // self._rf.n_estimators\n n = self._rf.n_estimators * n_per_tree # actual number of samples\n\n # default values (some features won't be set by the below algorithm)\n stds = np.sqrt(self._scaler.var_)\n X = np.random.normal(size=(n, self._dim)) * stds + self._scaler.mean_\n\n # generate n_per_tree samples from each tree\n for i_tree, estimator in enumerate(self._rf.estimators_):\n tree = estimator.tree_\n for i in range(n_per_tree):\n row_index = i_tree * n_per_tree + i\n node_index = 0\n right_bound = np.ones(self._dim) * np.inf\n left_bound = -right_bound\n\n # randomly pick one path in the tree\n while node_index != TREE_LEAF and \\\n tree.children_left[node_index] != tree.children_right[node_index]:\n threshold = tree.threshold[node_index]\n feature_i = tree.feature[node_index]\n\n # probability of branching left or right\n left_prob = self._counting_trees[i_tree].left_probability(node_index)\n\n # we pick a value close to the threshold...\n shift = 0.05 * np.abs(np.random.normal()) * stds[feature_i]\n if random.random() <= left_prob:\n value = threshold - shift\n else:\n value = threshold + shift\n # ... but still within the known bounds\n value = min(right_bound[feature_i], max(left_bound[feature_i], value))\n # alternatively, we could keep the value already set, but I believe\n # the chosen method restricts the value to be even closer to the\n # decision boundary\n X[row_index, feature_i] = value\n\n # branching\n if value <= threshold:\n node_index = tree.children_left[node_index]\n right_bound[feature_i] = min(right_bound[feature_i], threshold)\n else:\n node_index = tree.children_right[node_index]\n left_bound[feature_i] = max(left_bound[feature_i], threshold)\n\n return X, self._total_samples / X.shape[0]", "def __init__(self, X_init: np.ndarray, Y_init: np.ndarray, num_trees: int = 30,\n do_bootstrapping: bool = True, n_points_per_tree: int = 0, seed: int = None) -> None:\n super().__init__()\n\n # Set random number generator for the random forest\n if seed is None:\n seed = np.random.randint(10000)\n self.reg_rng = reg.default_random_engine(seed)\n\n self.n_points_per_tree = n_points_per_tree\n\n self.rf = reg.binary_rss_forest()\n self.rf.options.num_trees = num_trees\n\n self.rf.options.do_bootstrapping = do_bootstrapping\n\n self.rf.options.num_data_points_per_tree = n_points_per_tree\n\n self._X = X_init\n self._Y = Y_init\n\n if self.n_points_per_tree == 0:\n self.rf.options.num_data_points_per_tree = X_init.shape[0]\n\n data = reg.default_data_container(self._X.shape[1])\n\n for row_X, row_y in zip(X_init, Y_init):\n data.add_data_point(row_X, row_y)\n\n self.rf.fit(data, self.reg_rng)", "def random_forest(path_m1a, path_non_m1a, repetitions, splits, trees, outfile):\n\n # Path to the output file comprised of a 1:1 ratio of m1A and non-m1A\n m1a_list = fill_list(path_m1a)\n non_m1a_list = fill_list(path_non_m1a)\n\n predictor_number = []\n for predic in predictors_in_use:\n predictor_number.append(predic)\n\n predictor_string = []\n for j in range(len(predictors_in_use)):\n if predictors_in_use[j] != 'pre_base':\n predictor_string.append(predictors_in_use[j])\n if pre_base:\n predictor_string.extend(['A', 'C', 'G', 'T'])\n predictor_number.extend(['A', 'C', 'G', 'T'])\n mean_feature_importance = [0] * (len(predictor_number) - 1)\n else:\n mean_feature_importance = [0] * len(predictor_number)\n\n # List for mean scores\n mean_sensitivity, mean_specificity, mean_ppv, mean_npv, mean_roc_auc, mean_mcc = [], [], [], [], [], []\n\n outfile.write('AUC' + '\\t' + 'Sensitivity' + '\\t' + 'Specificity' + '\\t' + 'PPV' + '\\t' + 'NPV' + '\\t' +\n 'MCC' + '\\t')\n\n predictors_in_use.append('mod_type')\n\n for j in range(repetitions):\n random.shuffle(m1a_list)\n random.shuffle(non_m1a_list)\n\n # Write equal numbers of m1As and non-m1As into a file\n temp_list = []\n for i in range(len(m1a_list)):\n temp_list.append(m1a_list[i].strip().split())\n temp_list.append(non_m1a_list[i].strip().split())\n\n # Build data pandas frame using all columns from the input file\n df = pd.DataFrame.from_records(temp_list, columns=predictor_features)\n # Remove columns that are not used\n for column in df.columns:\n if column not in predictors_in_use:\n df.drop(column, 1, inplace=True)\n\n # Change the modification type to numerical value\n df['mod_type'] = df['mod_type'].map({temp_list[0][-1]: 1, temp_list[1][-1]: 0})\n\n # Get categorical values (pre_base). This function creates 4 more columns in the pandas data frame (A, C, G, T).\n # Column 'pre_base' will be removed\n if pre_base:\n one_hot = pd.get_dummies(df['pre_base'])\n df.drop('pre_base', 1, inplace=True)\n df = df.join(one_hot)\n\n df_clean = df.dropna()\n df_clean.describe()\n\n # Use all values except for 'mod_type' as predictors\n predictors = df_clean[predictor_string]\n predictors = predictors.as_matrix()\n\n targets = df_clean.mod_type\n\n skf = StratifiedKFold(n_splits=splits, shuffle=True, random_state=None)\n forest = RandomForestClassifier(n_estimators=trees, criterion='gini', max_depth=None, max_features='sqrt',\n n_jobs=-1, warm_start=True, oob_score=True, random_state=None)\n\n splits_mean_roc, splits_sensitivity, splits_specificity, splits_ppv, splits_npv, splits_mcc = 0, 0, 0, 0, 0, 0\n\n\tif pre_base:\n temp_feature_importance = [0] * (len(predictor_number) - 1)\n else:\n temp_feature_importance = [0] * len(predictor_number)\n\t\n\t# Random forest training + testing\n for train, test in skf.split(predictors, targets):\n x_train, x_test = predictors[train], predictors[test]\n y_train, y_test = targets[train], targets[test]\n\n forest.fit(x_train, y_train)\n test_prediction = forest.predict(x_test)\n\n false_pos, true_pos, _ = roc_curve(y_test, test_prediction)\n roc_auc = auc(false_pos, true_pos)\n splits_mean_roc = splits_mean_roc + roc_auc * 100\n for k in range(len(forest.feature_importances_)):\n temp_feature_importance[k] = temp_feature_importance[k] + forest.feature_importances_[k]\n\n false_pos, true_pos, _ = roc_curve(y_test, test_prediction)\n\n # Build confusion matrix and calculate relevant values for statistical analysis\n cm = pd.crosstab(y_test, test_prediction, rownames=['Actual Class'], colnames=['Predicted Class'])\n TN = cm[0][0]\n FP = cm[0][1]\n FN = cm[1][0]\n TP = cm[1][1]\n sensitivity = (TP / (TP + FN)) * 100\n specificity = (TN / (FP + TN)) * 100\n ppv = (TP / (TP + FP)) * 100\n npv = (TN / (TN + FN)) * 100\n mcc = ((TP * TN - FP * FN) / (sqrt((TP + FP)*(TP + FN)*(TN + FP)*(TN + FN)))) * 100\n\n splits_sensitivity = splits_sensitivity + sensitivity\n splits_specificity = splits_specificity + specificity\n splits_ppv = splits_ppv + ppv\n splits_npv = splits_npv + npv\n splits_mcc = splits_mcc + mcc\n\n # Calculate the averages of n splits\n mean_sensitivity.append(splits_sensitivity / skf.n_splits)\n mean_specificity.append(splits_specificity / skf.n_splits)\n mean_ppv.append(splits_ppv / skf.n_splits)\n mean_npv.append(splits_npv / skf.n_splits)\n mean_mcc.append(splits_mcc / skf.n_splits)\n mean_roc_auc.append(splits_mean_roc / skf.n_splits)\n for l in range(len(temp_feature_importance)):\n mean_feature_importance[l] = mean_feature_importance[l] + temp_feature_importance[l] / skf.n_splits\n\n # Calculate the overall averages of x repetitions\n print('Sensitivity: ', sum(mean_sensitivity) / repetitions)\n print('specificity: ', sum(mean_specificity) / repetitions)\n print('Positive predicted value (PPV): ', sum(mean_ppv) / repetitions)\n print('Negative predicted value (NPV): ', sum(mean_npv) / repetitions)\n print('MCC: ', sum(mean_mcc) / repetitions)\n print('AUC: ', sum(mean_roc_auc) / repetitions)\n\n outfile.write(str((sum(mean_sensitivity) / repetitions)) + '\\t' + str((sum(mean_specificity) / repetitions)) +\n '\\t' + str((sum(mean_ppv) / repetitions)) + '\\t' + str((sum(mean_npv) / repetitions)) + '\\t' +\n str((sum(mean_mcc) / repetitions)) + '\\t' + str((sum(mean_roc_auc) / repetitions)) + '\\t')\n for j in range(len(mean_feature_importance)):\n outfile.write(str(mean_feature_importance[j] / repetitions) + '\\t')\n outfile.write('\\n')\n \n\n with open(sys.argv[4], 'wb') as f:\n\tpickle.dump(forest, f)", "def reproduce(self):\n\n def compute_seeds(fitness):\n \"\"\" Computes the number of seeds given a fitness value. \"\"\"\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)\n\n # evaluates max and min fitness for current year\n max_fitness = max(tree[0] for tree in self.population)\n min_fitness = min(tree[0] for tree in self.population)\n\n # computes the number of seeds produced per tree\n for tree in self.population:\n tree[1].seeds = int(compute_seeds(tree[0]))", "def _iter_build_most_significant_tree(ktree, stree, node):\n sch = find_significant_children(ktree, node)\n if sch is not None:\n small, big = sch\n stree.parents[small] = node\n stree.parents[big] = node\n stree.children[node] = [small, big]\n stree.population[node] = ktree.population[node]\n stree.descriptor[node] = ktree.descriptor[node]\n stree.weights[node] = ktree.weights[node]\n stree.slides[node] = ktree.slides[node]\n _iter_build_most_significant_tree(ktree, stree, small)\n _iter_build_most_significant_tree(ktree, stree, big)", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def _initialize_trees(self):", "def brute_tree(XTRAIN,istopTRAIN,XTEST,istopTEST):\n \n ntrain=XTRAIN.shape[0]\n ntest=XTEST.shape[0]\n \n if np.sum(istopTRAIN)==0:\n return 0,[]\n\n cost0=np.zeros(Ngammas*Nreps)\n cost1=np.zeros(Ngammas*Nreps)\n cost0test=np.zeros(Ngammas*Nreps)\n cost1test=np.zeros(Ngammas*Nreps)\n \n precisionTRAIN=np.zeros(Ngammas*Nreps)\n precisionTEST=np.zeros(Ngammas*Nreps)\n recallTEST=np.zeros(Ngammas*Nreps)\n rate=np.zeros(Ngammas*Nreps)\n \n for iii in range(Ngammas):\n \n gamma=GAMMA[iii]\n \n for jjj in range(Nreps):\n \n \"\"\" train a tree using training data with random splitting \"\"\"\n \n tree_hyperparameters['class_weight']={0:1,1:gamma}\n clf=tree.DecisionTreeClassifier(**tree_hyperparameters)\n clf.fit(XTRAIN,istopTRAIN)\n \n \"\"\"\" record costs and precision on validation data \"\"\"\n \n pTRAIN=clf.predict(XTRAIN)\n precisionTRAIN[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==1),sum(pTRAIN))\n cost0[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==0)\n cost1[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 0 and istopTRAIN[i]==1)\n \n \"\"\" record precision on test data \"\"\"\n \n pTEST=clf.predict(XTEST)\n precisionTEST[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1),sum(pTEST))\n recallTEST[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1)/sum(istopTEST)\n cost0test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==0)\n cost1test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 0 and istopTEST[i]==1)\n \n \"\"\" record positive rate on full data \"\"\"\n \n rate[iii*Nreps+jjj]=(sum(pTRAIN)+sum(pTEST))/(ntrain+ntest)\n \n \"\"\" Compute Pareto front for validation data \"\"\"\n \n Pareto = Lower_Convex_Hull(np.concatenate((cost0.reshape(-1,1),cost1.reshape(-1,1)),1))\n \n \"\"\" make some nice plots for whoever is watching \"\"\"\n \n plt.figure(figsize=(10,5))\n plt.subplot(121)\n plt.plot(cost0,cost1,'.')\n plt.plot(cost0[Pareto],cost1[Pareto],'d')\n plt.xlabel('errors on class zero training data')\n plt.ylabel('errors on class one training data')\n\n plt.subplot(122)\n plt.plot(cost0test,cost1test,'.')\n plt.plot(cost0test[Pareto],cost1test[Pareto],'d')\n plt.xlabel('errors on class zero test data')\n plt.ylabel('errors on class one test data')\n plt.show()\n \n plt.figure(figsize=(15,5))\n plt.subplot(131)\n plt.semilogy(precisionTRAIN,rate,'.')\n plt.semilogy(precisionTRAIN[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on training data')\n plt.ylabel('positive rate')\n\n plt.subplot(132) \n plt.semilogy(precisionTEST,rate,'.')\n plt.semilogy(precisionTEST[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('positive rate')\n\n plt.subplot(133) \n plt.plot(precisionTEST,recallTEST,'.')\n plt.plot(precisionTEST[Pareto],recallTEST[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('recall on test data')\n plt.show() \n \n return {'cost0':cost0,'cost1':cost1,'cost0test':cost0test,'cost1test':cost1test,'precisionTRAIN':precisionTRAIN,'precisionTEST':precisionTEST,'recallTEST':recallTEST,'rate':rate,'Pareto':Pareto}", "def train(eps, ntrees, min_size, max_splits, nfeats_test, resample=True):\n # TODO your code here\n trees = []\n for _ in range(ntrees):\n # repeatedly add values from the list of expression profiles without removal to a set\n # (so there could be duplicate expression profiles in the set we are creating) until the size of the set\n # is equal to the size of the original list of profiles\n if resample:\n resampled_eps = []\n for _ in range(len(eps)):\n idx = random.randint(0, len(eps) - 1)\n resampled_eps.append(eps[idx])\n trees.append(\n ExpressionDecisionTree.train(resampled_eps, len(resampled_eps), min_size, max_splits, nfeats_test))\n else:\n trees.append(\n ExpressionDecisionTree.train(eps, len(eps), min_size, max_splits, nfeats_test))\n return ExpressionRandomForest(trees)", "def fit(self, features, classes):\n\n # TODO: finish this.\n features = np.array(features)\n classes = np.array(classes).reshape(-1,1)\n #print classes.shape\n feat_shape = features.shape\n num_sample = int(self.example_subsample_rate*feat_shape[0])\n num_attr = int(self.attr_subsample_rate*feat_shape[1])\n #print num_attr, self.attr_subsample_rate\n for i in range(self.num_trees):\n idx = np.random.randint(feat_shape[0],size=num_sample)\n sampled_features = features[idx,:]\n sampled_classes = classes[idx,:].reshape(1,-1)[0]\n sampled_attr = np.random.choice(range(feat_shape[1]),num_attr,replace=False)\n #print sampled_attr, feat_shape[1], num_attr\n self.attr_track.append(sampled_attr)\n tree = DecisionTree(depth_limit=self.depth_limit)\n tree.fit(sampled_features[:,sampled_attr],sampled_classes)\n self.trees.append(tree)", "def tree_query(self, pta_root):\n self.sul.pre()\n curr_node = pta_root\n\n inputs = []\n outputs = []\n\n while True:\n\n if curr_node.children:\n frequency_sum = sum(curr_node.input_frequencies.values())\n if frequency_sum == 0:\n # uniform sampling in case we have no information\n inp = choice(list(curr_node.children.keys()))\n else:\n # use float random rather than integers to be able to work with non-integer frequency information\n selection_value = random() * frequency_sum\n inp = None\n for i in curr_node.input_frequencies.keys():\n inp = i\n selection_value -= curr_node.input_frequencies[i]\n if selection_value <= 0:\n break\n # curr_node.input_frequencies[inp] -= 1\n\n inputs.append(inp)\n out = self.sul.step(inp)\n new_node = curr_node.get_child(inp, out)\n\n if new_node:\n outputs.append(out)\n curr_node = new_node\n else:\n self.sul.post()\n return\n else:\n curr_node = pta_root\n for i, o in zip(inputs, outputs):\n self.curr_node.input_frequencies[i] -= 1\n curr_node = curr_node.get_child(i, o)\n self.sul.post()\n return", "def learn(self,k=10):\n \n gradients = {}\n for i in range(k):\n\n #create TILDE(R) tree object\n tree_i = TILDE(typ=\"regression\",score=\"WV\",max_depth=self.max_depth)\n\n #subsample negatives if too many for each tree\n sampled_neg = deepcopy(self.neg)\n if len(self.neg) > 2*len(self.pos):\n sampled_neg = sample(self.neg,2*len(self.pos))\n\n #compute gradients as I-P\n for ex in self.examples:\n p = sigmoid(self.examples[ex])\n if ex in self.pos:\n gradients[ex] = 1-p\n elif ex in sampled_neg:\n gradients[ex] = 0-p\n\n #fit tree on gradients\n tree_i.learn(self.data,self.bk,self.target,examples=gradients)\n \n #recompute example values as previous example value + tree_i value\n for ex in self.examples:\n tree_i_value = tree_i.infer(self.data,ex)\n self.examples[ex] += 0.01*tree_i_value\n\n #add tree to boosted_trees\n self.boosted_trees.append(tree_i)", "def _internal_build(self):\n self.nodes = self.__tree.Nodes()\n self.edges = self.__tree.Edges()\n self.augmentedEdges = {}\n for key, val in self.__tree.AugmentedEdges().items():\n self.augmentedEdges[key] = list(val)\n self.root = self.__tree.Root()\n\n seen = set()\n self.branches = set()\n\n # Find all of the branching nodes in the tree, degree > 1\n # That is, they appear in more than one edge\n for e1, e2 in self.edges:\n if e1 not in seen:\n seen.add(e1)\n else:\n self.branches.add(e1)\n\n if e2 not in seen:\n seen.add(e2)\n else:\n self.branches.add(e2)\n\n # The nodes that are not branches are leaves\n self.leaves = set(self.nodes.keys()) - self.branches\n self.leaves.remove(self.root)", "def copyAndCleanTree (self):\n\t\t# TODO: Need to do several things here:\n\t\t# - NoNames\n\t\t# - copy support scores to internal branch names\n\n\t\t## Main:\n\t\t# Copy the tree so as not to damage original\n\t\tete_tree = deepcopy (self.data)\n\n\t\t# set root branch to zero, make change later\n\t\tete_tree.dist = 0.0\n\n\t\t# find max / min branchlength for diagnostic purposes\n\t\t# doesn't use negative or zero branch lengths\n\t\t# Also clean names\n\t\tmax_bl = None\n\t\tmin_bl = None\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (0.0 < n.dist):\n\t\t\t\tif (max_bl is None) or (max_bl < n.dist):\n\t\t\t\t\tmax_bl = n.dist\n\t\t\t\tif (min_bl is None) or (n.dist < min_bl):\n\t\t\t\t\tmin_bl = n.dist\n\t\t\tclean_name = n.name.strip()\n\t\t\tif (clean_name[0] == \"'\") and (clean_name[-1] == \"'\"):\n\t\t\t\tclean_name = clean_name[1:-1]\n\t\t\tn.name = clean_name\n\n\t\t# set all branches to be at least 1/100 of the largest or 1/10 the\n\t\t# smallest, whichever is larger\n\t\tdefault_bl = max (max_bl / 100, min_bl/10)\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (n.dist <= 0.0):\n\t\t\t\tn.dist = default_bl\n\n\t\t# get support values on tree by setting supprt as name\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\t# if an internal node\n\t\t\tif (not n.is_leaf()):\n\t\t\t\tn.name = config.SUPPORT_FMT % n.support\t\n\n\t\t# very hacky - calc appropriate scale bar size and stick on root\n\t\tmagn = int (floor (log10 (max_bl)))\n\t\tscale_size = 10**magn\n\t\tete_tree.scale_size = scale_size\n\n\t\t## Postcondtions & return:int ( floor ( log10 (x)))\n\t\treturn ete_tree", "def MakeAllScenarioTreeNodes(model, bf):\n TreeNodes = dict()\n TreeNodes[\"ROOT\"] = scenario_tree.ScenarioNode(\"ROOT\",\n 1.0,\n 1,\n model.StageCost[1],\n None,\n [model.Pgt[1],\n model.Pgh[1],\n model.PDns[1],\n model.Vol[1]],\n model)\n for b in range(bf):\n ndn = \"ROOT_\"+str(b)\n TreeNodes[ndn] = scenario_tree.ScenarioNode(ndn,\n 1.0/bf,\n 2,\n model.StageCost[2],\n None,\n [model.Pgt[2],\n model.Pgh[2],\n model.PDns[2],\n model.Vol[2]],\n model,\n parent_name=\"ROOT\")", "def tree_apply(tree_arrays, features, extra_features=None, reduce_axis=0) -> np.ndarray:\n qi = np.zeros(features.shape[reduce_axis], dtype=np.int32)\n for current_depth in range(tree_arrays[\"treedepth\"]):\n fi = tree_arrays[\"features\"][qi]\n f = np.choose(\n fi, assure_numpy(features).T if reduce_axis == 0 else assure_numpy(features)\n ) # TODO: try to do it in more effective tf-oriented way\n t = tree_arrays[\"thresholds\"][qi]\n # print(qi, fi, f, t)\n # if current_depth == 0:\n # print(fi, f.shape, features.shape, f)\n answer = (f < t) * 1\n new_qi = (\n answer * tree_arrays[\"yes_node\"][qi]\n + (1 - answer) * tree_arrays[\"no_node\"][qi]\n )\n qi = new_qi\n if extra_features is None:\n assert tree_arrays[\"leaf_data\"].shape[1] == 1, \"extra_features needed\"\n leaf_data = tree_arrays[\"leaf_data\"][qi, 0]\n else:\n leaf_data = assure_numpy(\n tree_arrays[\"leaf_data\"][qi, :]\n * (extra_features.T if reduce_axis == 1 else extra_features)\n ).sum(\n axis=1\n ) # TODO: try to do it in more effective tf-oriented way\n return leaf_data", "def mutate(self):\n num_leafs_before = self.num_leafs()\n non_leafs = [v for v, d in self.out_degree() if d > 0]\n box = non_leafs[np.random.choice(len(non_leafs))]\n children = list(self[box])\n for child in children:\n self.remove_subtree(child)\n num_leafs_after = self.num_leafs()\n num_removed = num_leafs_before - num_leafs_after\n self.generate(num_removed)", "def __init__(self,\n lower, upper ,\n fun ,\n max_std, min_std ,\n init_numb_trees = 10 ,\n max_numb_trees = 20 ,\n max_seeds = 10 ,\n min_seeds = 1 ,\n epsilon = 0.1 ,\n epsilon_decay = 0.0 ,\n max_iters = 100 ,\n mut_proba = 0.1 ,\n seed = None ,\n ):\n\n # generates a seed for the random number generator\n if (seed == None):\n self.seed = random.randint(0, 1000)\n else:\n self.seed = seed\n random.seed(self.seed)\n\n # assigns properties of FO algorithm\n self.max_number_trees = max_numb_trees\n self.max_seeds = max_seeds\n self.min_seeds = min_seeds\n self.epsilon = epsilon\n self.epsilon_decay = epsilon_decay\n self.max_iters = max_iters\n self.max_std = max_std\n self.min_std = min_std\n self.mut_proba = mut_proba\n\n # assigns fitness function\n self.evaluate = fun\n\n # stores lower and upper bounds\n self.lower = lower\n self.upper = upper\n\n # evaluates dimension of the optimal problem\n assert ( len(lower)==len(upper) ), \\\n \"'lower' and 'upper' must be of the same dimension.\"\n self.dim = len(lower)\n\n # initialises a forest of trees\n self.population = []\n for _ in range(init_numb_trees):\n tree = Tree(lower, upper)\n if (fun != None):\n self.population.append((fun(tree.vector), tree))\n else:\n self.population.append((sys.float_info.max, tree))\n\n # initialises iterations counter\n self.iteration = 1\n\n # creates a seedlings buffer\n self.seedlings = []", "def sample(self, root, tree, sample_num, for_d):\n\n # all_score = self.sess.run(self.generator.all_score)\n # all_score is a matrix with shape [n_node, n_node]\n all_score = self.generator.all_score\n samples = []\n paths = []\n n = 0\n\n while len(samples) < sample_num:\n current_node = root\n previous_node = -1\n paths.append([])\n is_root = True\n paths[n].append(current_node)\n while True:\n node_neighbor = tree[current_node][1:] if is_root else tree[current_node]\n # print(\"////\", tree[current_node])\n is_root = False\n if len(node_neighbor) == 0: # the tree only has a root\n return None, None\n if for_d: # skip 1-hop nodes (positive samples)\n if node_neighbor == [root]:\n # in current version, None is returned for simplicity\n return None, None\n if root in node_neighbor:\n node_neighbor.remove(root)\n\n # we retrieve embeddings corresponding to current node's neighbors\n # the multiply of g_v with shape (1, 50) and g_vi with shape(1, 50) is a scala\n # to calculate the multiply of g_v and g_vi: we calculate the \"multiplication\" (inner product) between embedding_matrix with shape(n_node, 50) and its transpose\n # then saved the result in self.score with shape (n_node, n_node) in dis_torch.py\n # all_score has the shape = (5254, 5254), each row is a list of scala, each scala is the \"multiplication\" (inner product) between a particular node to an other node in the graph\n # due to for each current_node, we have a list of its neighbors, saved in [node_neighbor]\n # we can retrieve a list of scalas that equal to the \"multiplications\" (inner product) between g_v(current node) to its neighbor g_vi\n # to do that, we have:\n relevance_probability = all_score[current_node][node_neighbor]\n\n # convert tensor to numpy array\n relevance_probability = relevance_probability.cpu().detach().numpy()\n\n # finally, applying softmax function, we get the relevance probability of current_node and its neighbors, as formed in the paper\n relevance_probability = utils.softmax(relevance_probability)\n \n # pick a random node from its neighbors based on relevance_probability\n next_node = np.random.choice(node_neighbor, size=1, p=relevance_probability)[0] # select next node\n # print(\"???\", next_node)\n paths[n].append(next_node)\n if next_node == previous_node: # terminating condition\n samples.append(current_node)\n break\n previous_node = current_node\n current_node = next_node\n n = n + 1 # n equal to sample_num\n return samples, paths # for each sample, we get one path from root to that sample", "def fit(self, X, y, sample_weight=None):\n self.forest.fit(X, y.ravel(), sample_weight)\n self.trainy = y.copy()\n ntrees = self.forest.n_estimators\n ntrain = y.shape[0]\n self.train_tree_node_ID = np.zeros([ntrain, ntrees])\n for i in range(ntrees):\n self.train_tree_node_ID[:, i] = self.forest.estimators_[i].apply(X)\n self.train_tree_node_ID = self.train_tree_node_ID.astype('h') # because it is only indexes, store as short int", "def fit_tree_stump_forest(X_train: np.ndarray, y_train: np.ndarray, n_estimators: int) -> RandomForestClassifier:\n clf = RandomForestClassifier(n_estimators=n_estimators)\n clf = clf.fit(X_train, y_train)\n return clf", "def select(self):\n\n def truncate(self):\n \"\"\" Truncates forest to maximum number of trees. \"\"\"\n\n self.population = self.population[:self.max_number_trees]\n\n def SortOnItem(list_, item_loc):\n \"\"\" Sorts based on a given item. \"\"\"\n\n templist = [elmt[item_loc] for elmt in list_]\n index = np.argsort(templist)\n return [list_[i] for i in index]\n\n # adds current seedlings to forest\n for tree in self.seedlings:\n\n # if tree does not competes with another existing one, adds it\n if tree not in self.population:\n self.population.append(tree)\n\n # sorts the trees of the forest in ascending values - minimization\n self.population = SortOnItem(self.population, item_loc=0)\n\n # removes unfit trees from forest\n truncate(self)", "def make_tree(self, X_subset, y_subset, depth):\n \n # YOUR CODE HERE\n #self.depth += 1\n if depth < self.max_depth and X_subset.shape[0] >= self.min_samples_split:\n \n best_feature, best_threshold = self.choose_best_split(X_subset, y_subset)\n print('depth = {}, size parent node = {}'.format(depth, len(X_subset)))\n print('best_feature = {}, best_threshold = {}'.format(best_feature, best_threshold))\n new_node = Node(best_feature, best_threshold)\n \n left_child, right_child = self.make_split(best_feature, best_threshold, X_subset, y_subset)\n new_node.left_child = self.make_tree(*left_child, depth+1)\n new_node.right_child = self.make_tree(*right_child, depth+1)\n \n else: # we have a leaf\n new_node = Node(-1, -1) # We flag leaf nodes by setting feature_index and threshold to -1\n new_node.value = self.predicted_values(y_subset)\n \n if self.classification:\n new_node.proba = np.mean(y_subset, axis=0)\n \n # We reduce the depth to compensate for the two calls to self.depth += 1 we make on\n # the same level for left_child and right_child.\n #self.depth -= 1\n \n return new_node", "def fit(self, X:np.ndarray, improved=False):\n if isinstance(X, pd.DataFrame):\n X = X.values\n for i in range(self.n_trees):\n X_sample = X[np.random.choice(X.shape[0], self.sample_size, replace=False), :]\n itree = IsolationTree(self.height_limit)\n itree.fit(X_sample, improved)\n self.trees.append(itree)\n return self", "def __init__(self, n_samples=1000, n_features=4):\n self.n_samples = 1000\n self.n_features = 4\n self.forest = []", "def build_random_trees(rows, n_features, max_depth, min_size, n_trees, random_dataset_size):\n trees = []\n for tree_number in range(n_trees):\n print(\"Building tree number:\", tree_number, \"of\", n_trees)\n # Select random dataset from original dataset\n random_dataset = select_random_rows(rows, random_dataset_size)\n\n # Select random features (columns)\n random_features = []\n for random_feature in range (n_features):\n # generate random index number to pick column\n random_column = randrange(len(rows))\n random_features.append(random_column)\n # generate the random tree with randomly picked features (columns) and a random dataset\n random_tree = build_single_random_tree(random_dataset, random_features, max_depth, min_size, 1)\n # add to list of trees\n trees.append(random_tree)\n return trees", "def weight(tree):\n return root(tree)", "def create_initial_roots(self):\n\n # Trace each tree, one at a time\n initial_roots = list()\n\n for seed in self.all_seed_pixels:\n\n initial_root = rt.Root([seed], len(self.root_dict))\n self.root_dict[len(self.root_dict)] = initial_root\n\n self.all_seed_roots.add(initial_root)\n initial_roots.append(initial_root)\n\n # Iteratively create all child roots from the initial point\n root_queue = initial_roots\n while root_queue:\n for output_root in self.trace_along_children(root_queue.pop(0)):\n root_queue.append(output_root)", "def evaluate_cuts(base_tree, node):\n config = Configuration.config # Collect configuration\n\n N = config.normals # Collect predefined set of normal vectors\n N = np.append(N, node.auxiliary_normals, axis=0) # Append partition's bounding-box-aligned vectors as normals\n N = np.unique(np.round(N, 3), axis=0) # Return sorted unique elements of input array_like\n\n trees = []\n for i in range(N.shape[0]):\n trees_of_this_normal = [] # start a list of trees for splits along this normal\n normal = N[i] # current normal\n for plane in bsp_tree.get_planes(node.part, normal): # iterate over all valid cutting planes for the node\n tree, result = bsp_tree.expand_node(base_tree, node.path, plane) # split the node using the plane\n if tree: # only keep the tree if the split is successful\n trees_of_this_normal.append(tree)\n logger.debug(f\"normal index: {i}, origin: {plane[0]}, normal: {plane[1]}, result: {result}\")\n if len(trees_of_this_normal) == 0: # avoid empty list errors during objective function evaluation\n logger.info(f\"normal index: {i}, trees for normal: {len(trees_of_this_normal)}, total trees: {len(trees)}\")\n continue\n # go through each objective function, evaluate the objective function for each tree in this normal's\n # list, fill in the data in each tree object in the list\n for evaluate_objective_func in objectives.values():\n evaluate_objective_func(trees_of_this_normal, node.path)\n trees += trees_of_this_normal\n logger.info(f\"normal index: {i}, trees for normal: {len(trees_of_this_normal)}, total trees: {len(trees)}\")\n\n # go through the list of trees, best ones first, and throw away any that are too similar to another tree already\n # in the result list\n result_set = []\n for tree in sorted(trees, key=lambda x: x.objective):\n if tree.sufficiently_different(node, result_set):\n result_set.append(tree)\n logger.info(f\"{len(result_set)} valid trees\")\n return result_set", "def sample_tree(self):\n logger.info('TreeCatTrainer.sample_tree given %d rows',\n len(self._added_rows))\n SERIES.sample_tree_num_rows.append(len(self._added_rows))\n complete_grid = self._tree.complete_grid\n edge_logits = self.compute_edge_logits()\n assert edge_logits.shape[0] == complete_grid.shape[1]\n assert edge_logits.dtype == np.float32\n edges = self.get_edges()\n edges = sample_tree(complete_grid, edge_logits, edges)\n return edges, edge_logits", "def improve_tree(tree, freq_dict):\n # todo", "def _build_tree(self, X, y, label, feature_names, depth, sample_weights=None):\n mytree = dict()\n # YOUR CODE HERE\n # TODO: Use `_choose_best_feature` to find the best feature to split the X. Then use `_split_dataset` to\n # get subtrees.\n # Hint: You may find `np.unique` is useful.\n # begin answer\n #1. no feature 2. all lables are the same 3. depth exceed 4. X is too small\n if len(feature_names)==0 or len(np.unique(y))==1 or depth >= self.max_depth or len(X) <= self.min_samples_leaf: \n return self._leaf_calculation(y, label, sample_weights)\n best_feature_idx, best_feature_val=self._choose_best_feature(X, y, label, sample_weights)\n best_feature_name = feature_names[best_feature_idx]\n feature_names=feature_names[:]\n feature_names.remove(best_feature_name)\n mytree={best_feature_name:{}}\n sub1_X, sub1_y, label1, sub1_sample_weights, sub2_X, sub2_y, label2, sub2_sample_weights = self._split_dataset(X, y, label, best_feature_idx, best_feature_val, sample_weights)\n mytree[best_feature_name][(best_feature_val, True)]=self._build_tree(sub1_X, sub1_y, label1, feature_names, depth+1, sub1_sample_weights)\n mytree[best_feature_name][(best_feature_val, False)]=self._build_tree(sub2_X, sub2_y, label2, feature_names, depth+1, sub2_sample_weights)\n # end answer\n return mytree", "def compute_tree(self,\n verbose=True):\n\n # Tree structure in format {leaf_id: node()}\n self.tree = {}\n # A path is list of integers in (-1, 0, 1) indicating the set of\n # decisions to take through the tree (lower, null, higher)\n # based on the specified labels and cutoff of the nodes.\n paths = [[]]\n path_idx = 0\n start_time = time()\n\n # Each path will point to a leaf that is not yet in the tree.\n while path_idx < len(paths):\n if verbose:\n string = f'{path_idx}/{len(paths)} ({time()-start_time:.0f} s)'\n sys.stdout.write('\\r'+string[:40]+' '*(40-len(string)))\n sys.stdout.flush()\n path = paths[path_idx]\n self.compute_path(path)\n leaf = node(path_idx)\n if self.sub_y_data.size == 0:\n raise NameError('No data on the leaf error')\n if len(path) < self.max_tree_depth or self.max_tree_depth <= 0:\n cutoffs = []\n for split_label in self.labels:\n cutoff, value = self.best_cutoff(split_label)\n cutoffs.append([split_label, cutoff, value])\n cutoffs = sorted(cutoffs, key=lambda x: -x[2])\n split_label, cutoff, value = cutoffs[0]\n leaf.value = value\n if value > self.value_threshold:\n leaf.label = split_label\n leaf.cutoff = cutoff\n leaf.id_lower = len(paths)\n paths.append(path+[-1])\n leaf.id_higher = len(paths)\n paths.append(path+[1])\n if np.isnan(self.sub_split_data[split_label]).any():\n leaf.id_null = len(paths)\n paths.append(path+[0])\n else:\n leaf.is_leaf = True\n ys_with = self.sub_y_data[self.sub_bin_data]\n ys_without = self.sub_y_data[self.sub_bin_data]\n leaf.n_data_with = len(ys_with)\n leaf.n_data_without = len(ys_without)\n if ys_with.size == 0 or ys_without.size == 0:\n leaf.effect = 0\n else:\n leaf.effect = ys_with.mean() - ys_without.mean()\n self.tree[leaf.id] = leaf\n path_idx += 1\n\n if verbose:\n string = f'{path_idx}/{len(paths)} ({time()-start_time:.0f} s)'\n sys.stdout.write('\\r'+string[:40]+' '*(40-len(string)))\n sys.stdout.flush()\n print()", "def expand_tree(self, N=1):\n # type: (int) -> None\n assert self._initialized, 'Search not initialized.'\n for _ in range(N): \n x_rand = self.sample_free()\n x_nearest = self.nearest(x_rand)\n x_new = self.steer(x_nearest, x_rand)\n if self.coll_free(x_nearest, x_new):\n self.index+=1\n X_near = [x for x in self.near(x_new) if self.coll_free(x, x_new)]\n cost_min = self.costs[self.research_index(self.nodes,x_nearest)][1] + self.dist(x_nearest, x_new)\n x_min = x_nearest\n for x in X_near:\n cost = self.costs[self.research_index(self.nodes,x)][1] + self.dist(x, x_new)\n if cost < cost_min:\n cost_min = cost\n x_min = x\n \n self.nodes.append(x_new)\n j=self.research_index(self.nodes,x_min)\n self.parents[self.index,j]=1\n self.costs[self.index] = (x_new,self.costs[j][1] + self.dist(x_min, x_new))\n for x in X_near:\n k=self.research_index(self.nodes,x)\n if self.costs[self.index][1] + self.dist(x_new, x) < self.costs[k][1]:\n self.parents[self.index]=np.zeros(self.N)\n self.parents[self.index,k] = 1\n self.costs[k] = (self.costs[k][0],self.costs[self.index][1] + self.dist(x_new, x))", "def boostedTrees(train, \n labels, \n test, \n column_names = None, \n target = 'target',\n max_iterations = 200, \n min_child_weight = 5, \n step_size = 0.2, \n max_depth = 10, \n class_weights = None, \n min_loss_reduction = 0.5,\n verbose = 0,\n outlier_frac=0.0,\n outlier_method='EE',\n rescale_pred=False):\n if outlier_frac > 0:\n train, labels = filter_data(train, labels, cut_outlier_frac = outlier_frac, method = outlier_method, use_caching=False) # remove ourliers\n if column_names is None:\n column_names = range(np.shape(train)[1])\n target = 'target'\n newTrain = np.vstack((train.T, labels)).T\n pdTrain = pd.DataFrame(newTrain, columns = np.append(column_names,target))\n trainFrame = gl.SFrame(pdTrain)\n del newTrain, pdTrain\n pdTest = pd.DataFrame(test, columns = column_names)\n testFrame = gl.SFrame(pdTest)\n del pdTest\n model = gl.boosted_trees_classifier.create(trainFrame, \n target=target, \n max_iterations=max_iterations, \n min_child_weight=min_child_weight,\n step_size = step_size,\n max_depth = max_depth,\n class_weights = class_weights,\n min_loss_reduction = min_loss_reduction,\n verbose = verbose)\n preds = model.predict_topk(testFrame, output_type='probability', k=9)\n preds['id'] = preds['id'].astype(int)\n #some hacky dataframe magic, creates Nx10 matrix (id in first column)\n preds = preds.unstack(['class', 'probability'], 'probs').unpack('probs', '').sort('id')\n\n newPreds = preds.to_dataframe().values\n newPreds = newPreds[:,1:] #remove the id column\n del preds, model\n \n assert np.shape(newPreds)[0] == np.shape(test)[0], \"conversion failed somewhere, size doesn't match\"\n \n if rescale_pred:\n newPreds = rescale_prior(newPreds, np.bincount(labels))\n return newPreds", "def batch_predict(tree_adj, training_signs, edge_weight):\n # since shazoo use the revealed signs as-is, it's ok to use the same name\n training_signs, l2_values, rta_signs = training_signs\n all_nodes_to_predict = set(tree_adj) - set(training_signs)\n logging.debug('batch_predict has %d nodes to predict', len(all_nodes_to_predict))\n methods = ['l2cost', 'rta', 'shazoo']\n # fields are current_closest_hinge, current_sign, current_dst_to_closest_hinge\n node_predictions = {m: defaultdict(lambda: (None, None, 2e9)) for m in methods}\n hinge_value = {m: {} for m in methods}\n total_iter = 0\n while all_nodes_to_predict:\n some_root_of_a_border_tree = next(iter(all_nodes_to_predict))\n hinge_nodes, border_tree_nodes = find_hinge_nodes(tree_adj, edge_weight, training_signs,\n some_root_of_a_border_tree,\n with_visited=True)\n unmarked = border_tree_nodes - hinge_nodes\n for u in hinge_nodes:\n if u in hinge_value['shazoo']:\n continue\n vals, _, status = flep(tree_adj, (training_signs, rta_signs), edge_weight, u)\n hinge_value['shazoo'][u] = sgn(vals[0])\n hinge_value['rta'][u] = sgn(vals[1])\n if not USE_SCIPY:\n continue\n border_tree = build_border_tree_from_mincut_run(status, edge_weight)\n _, E, El, leaves_sign, _, _ = border_tree\n L = {u: l2_values[u] for u in leaves_sign}\n mapped_E, mapped_El_L, mapping = preprocess_edge_and_leaves(E, El, L)\n val = solve_by_zeroing_derivative(mapped_E, mapped_El_L, mapping, L,\n reorder=False)[0][u]\n hinge_value['l2cost'][u] = sgn(val)\n predicted_in_that_border_tree = set()\n inner_iter = 0\n # to avoid the same fork being picked again and again\n unmarked.add(some_root_of_a_border_tree)\n while unmarked:\n one_to_predict = next(iter(unmarked))\n hinge_tree = get_hinge_tree(one_to_predict, tree_adj, hinge_nodes)\n other_predicted = set()\n for h, h_val in iteritems(hinge_value['shazoo']):\n if h not in hinge_tree:\n continue\n predicted = propagate_hinge(hinge_tree, h, h_val, node_predictions['shazoo'],\n edge_weight)\n for u in predicted:\n prediction_info = node_predictions['shazoo'][u]\n used_hinge = prediction_info[0]\n node_predictions['rta'][u] = (used_hinge, hinge_value['rta'][used_hinge],\n prediction_info[2])\n if not USE_SCIPY:\n continue\n node_predictions['l2cost'][u] = (used_hinge, hinge_value['l2cost'][used_hinge],\n prediction_info[2])\n other_predicted.update(predicted)\n predicted_in_that_border_tree.update(other_predicted)\n unmarked -= other_predicted\n inner_iter += 1\n if inner_iter > len(tree_adj):\n import time\n logging.critical('batch predict failed in the inner loop')\n persistent.save_var('__fail_{}.my'.format(int(time.time())), (tree_adj, (training_signs, l2_values, rta_signs), edge_weight))\n raise RuntimeError('batch predict failed in the inner loop')\n all_nodes_to_predict -= predicted_in_that_border_tree\n total_iter += 1\n if total_iter > len(tree_adj):\n import time\n logging.critical('batch predict failed in the outer loop')\n persistent.save_var('__fail_{}.my'.format(int(time.time())), (tree_adj, (training_signs, l2_values, rta_signs), edge_weight))\n raise RuntimeError('batch predict failed in the outer loop')\n logging.debug('batch_predict has actually predicted %d nodes', len(node_predictions) - len(training_signs))\n return {m: {u: v[1] for u, v in iteritems(node_predictions[m]) if u not in training_signs}\n for m in methods}", "def fit(self, X:np.ndarray, e=0, improved=False):\n if e>=self.height_limit or len(X)<=1:\n self.n_nodes = self.n_nodes + 1\n return Tree(X,None,None,None,None,'ex')\n else:\n Q = np.arange(X.shape[1], dtype='int')\n q = np.random.choice(Q)\n q_min = X[:,q].min()\n q_max = X[:,q].max()\n if improved:\n p_list = np.random.uniform(q_min,q_max,5)\n best_p = q_max\n x_len = len(X)\n for p in p_list:\n X_left = X[np.where(X[:,q] < p)]\n X_right = X[np.where(X[:,q] >= p)]\n if min(len(X_left), len(X_right))<=5:\n best_p = p\n break\n if min(len(X_left), len(X_right))<x_len:\n best_p = p\n else:\n best_p = np.random.uniform(q_min,q_max)\n X_left = X[np.where(X[:,q] < best_p)]\n X_right = X[np.where(X[:,q] >= best_p)]\n self.n_nodes = self.n_nodes + 1\n self.root = Tree(None,q, best_p, self.fit(X_left,e+1), self.fit(X_right,e+1), 'in')\n return self.root", "def fit(self, features, targets):\r\n \r\n \r\n # if no\r\n # run information gain on each row\r\n # see which attribute has the largest information gain\r\n # split on this attribute\r\n \r\n \r\n self._check_input(features)\r\n\r\n # Creating the root\r\n self.tree = Tree()\r\n \r\n all_features_original = np.asarray(self.attribute_names)\r\n \r\n self._extend_tree(features, targets, all_features_original, all_features_original, self.tree)", "def grow_trees(self, regrow=False):\n if self.forest == [] or regrow:\n mtry = int(math.floor(math.sqrt(len(self.variables))))\n data, trees, var, pred_index = self.data, self.trees, self.variables, self.prediction_index\n attr_fn, dist_classes, order, imp = self.attr_fn, self.dist_classes, len(self.data), self.importance_fn\n self.forest = random_forest.RandomForest(data, trees, mtry, var, pred_index, attr_fn, dist_classes, order, imp)\n print self.trees, ' have been grown using a set of ', len(self.variables), ' variables.'\n else:\n print \"Already a forest in place, add regrow=True to override.\"", "def learn(self,k=10):\n \n gradients = {}\n for i in range(k):\n\n #create TILDE(R) tree object\n tree_i = TILDE(typ=\"regression\",score=\"WV\",max_depth=self.max_depth)\n\n #subsample negatives if too many for each tree\n sampled_neg = deepcopy(self.neg)\n if len(self.neg) > 2*len(self.pos):\n sampled_neg = sample(self.neg,2*len(self.pos))\n\n #compute gradients using LMNN loss function\n for ex in self.examples:\n gradient = self.compute_gradient(ex,\n self.pos,\n self.neg,\n self.examples)\n gradients[ex] = gradient\n\n\n #fit tree on gradients\n tree_i.learn(self.data,self.bk,self.target,examples=gradients)\n \n #recompute example values as previous example value - gamma*tree_i value\n for ex in self.examples:\n tree_i_value = tree_i.infer(self.data,ex)\n self.examples[ex] -= 0.01*tree_i_value #learning rate\n \n #add tree to boosted_trees\n self.boosted_trees.append(tree_i)", "def test_forest_dml(self):\n\n Y, T, X, _ = ihdp_surface_B()\n est = AutomatedForestDML(model_y=automl_model_reg(),\n model_t=GradientBoostingClassifier(),\n discrete_treatment=True,\n n_estimators=1000,\n subsample_fr=.8,\n min_samples_leaf=10,\n min_impurity_decrease=0.001,\n verbose=0, min_weight_fraction_leaf=.01)\n est.fit(Y, T, X=X)\n _ = est.effect(X)", "def __init__(self,num_trees=100, depth_limit=5, example_subsample_rate=0.4,\n attr_subsample_rate=0.4):\n\n # TODO: finish this.\n self.num_trees = num_trees\n self.depth_limit = depth_limit\n self.example_subsample_rate = example_subsample_rate\n self.attr_subsample_rate = attr_subsample_rate\n self.classifier = RandomForest(self.num_trees, self.depth_limit, self.example_subsample_rate,\n self.attr_subsample_rate)", "def fit(self, dataset, verbose=False):\n self.inputs = dataset.shape[1]-1\n self.bits = np.ceil(\n np.log2(\n np.abs(\n np.amax(dataset, axis=0) -\n np.amin(dataset, axis=0)))).astype(np.int32)\n self.is_neg = (np.amin(dataset, axis=0) < 0).astype(np.int8)\n\n self.trees = []\n\n for i in range(self.n_trees):\n if verbose:\n print(\"... creating tree {}\".format(i))\n\n # as subsample is an expensive operation, we will only perform it if it\n # reduces the dataset substantially\n\n if self.sample_size and self.sample_size < 0.3 * dataset.shape[0]:\n if verbose:\n print(\"... generated subsample of size {}\".format(self.sample_size))\n sample = self.subsample(dataset)\n else:\n sample = dataset\n\n self.trees.append(fit_parallel(\n self.max_depth, self.min_size, sample, True))", "def build_mht(eddies_data,\n\tprune_depth = 2,\n\twithin_bounds = lambda x: True,\n\tdo_lookahead = True,\n\tgate_dist = 150,\n\tprev_data = None,\n\tprune_mode = 'parent'):\n\n\troots = []\n\tdepth = 0\n\n\tif prev_data is not None:\n\t\troots = prev_data['roots']\n\t\tdepth = prev_data['start_depth']\n\t\tprune_depth = prev_data['prune_depth']\n\t\tgate_dist = prev_data['gate_dist']\n\n\tif depth >= len(eddies_data):\n\t\treturn roots\n\n\tfor dataset in eddies_data[depth:]:\n\t\tstart_time = time.mktime(time.localtime())\n\t\tprint dataset[0]\n\t\tmat = scipy.io.loadmat(dataset[1], struct_as_record=False)\n\t\teddies = mat['eddies'][0]\n\t\tpnodes = get_nodes_at_depth(roots, depth - 1)\n\t\tfor i in range(len(eddies)):\n\t\t\teddy = Eddy(eddies[i].Stats[0,0],\n\t\t\t\teddies[i].Lat[0,0],\n\t\t\t\teddies[i].Lon[0,0],\n\t\t\t\teddies[i].Amplitude[0,0],\n\t\t\t\teddies[i].ThreshFound[0,0],\n\t\t\t\teddies[i].SurfaceArea[0,0],\n\t\t\t\teddies[i].Date[0,0],\n\t\t\t\teddies[i].Cyc[0,0],\n\t\t\t\teddies[i].MeanGeoSpeed[0,0],\n\t\t\t\teddies[i].DetectedBy[0])\n\t\t\tif not within_bounds(eddy):\n\t\t\t\tcontinue\n\t\t\tmk_node_and_add(eddy, depth, pnodes, roots, gate_dist)\n\n\t\tif do_lookahead:\n\t\t\tlookahead.add_lookahead_nodes(pnodes, gate_dist)\n\n\t\tif depth >= prune_depth:\n\t\t\tif prune_mode == 'parent':\n\t\t\t\tprune_parent(roots, depth-prune_depth, gate_dist)\n\t\t\telse:\n\t\t\t\tprune(roots, depth-prune_depth, gate_dist)\n\t\tdepth += 1\n\t\tprint 'time:', time.mktime(time.localtime())-start_time\n\n\treturn roots", "def _generate(self, input_row, output_row):\n self._fullInput = input_row\n self.power = self.settings.population_count\n self._fullOutput = output_row\n for one_forest in range(self.power):\n self._forests.append(OneForest(self.settings, input_row=self._fullInput, full_output=self._fullOutput))", "def create_tree(self):\n feature_indices = []\n for i in self.estimator.tree_.feature:\n n_features = self.n_features\n if self.n_features > 1 or (self.n_features == 1 and i >= 0):\n feature_indices.append([str(j) for j in range(n_features)][i])\n indentation = 1 if self.target_language in ['java', 'js',\n 'php', 'ruby'] else 0\n return self.create_branches(\n self.estimator.tree_.children_left,\n self.estimator.tree_.children_right,\n self.estimator.tree_.threshold,\n self.estimator.tree_.value,\n feature_indices, 0, indentation)", "def _apply_tree_policy(self, root, state):\n visit_path = [root]\n working_state = state.clone()\n current_node = root\n while not working_state.is_terminal() and current_node.explore_count > 0:\n if not current_node.children:\n # For a new node, initialize its state, then choose a child as normal.\n legal_actions = working_state.legal_actions()\n # Reduce bias from move generation order.\n self._random_state.shuffle(legal_actions)\n player_sign = -1 if working_state.current_player() != self.player else 1\n current_node.children = [SearchNode(action, player_sign)\n for action in legal_actions]\n\n if working_state.is_chance_node():\n # For chance nodes, rollout according to chance node's probability\n # distribution\n outcomes = working_state.chance_outcomes()\n action_list, prob_list = zip(*outcomes)\n action = self._random_state.choice(action_list, p=prob_list)\n chosen_child = next(c for c in current_node.children\n if c.action == action)\n else:\n # Otherwise choose node with largest UCT value\n chosen_child = max(\n current_node.children,\n key=lambda c: c.uct_value(current_node.explore_count, self.uct_c, # pylint: disable=g-long-lambda\n self.child_default_value))\n\n working_state.apply_action(chosen_child.action)\n current_node = chosen_child\n visit_path.append(current_node)\n\n return visit_path, working_state", "def mutate(self):\n for forest in self._forests:\n forest.mutate(self._fullInput)", "def generate_trees(self, dx, dy, width, height, freq):\n\n forest_colors = get_forest_colors()\n entities = []\n for y in range(dy, height):\n for x in range(dx, width):\n if not self.tiles[x][y].occupied:\n self.tiles[x][y].spawnable = False\n\n # Generate forest tiles\n if randint(1, 100) < freq:\n\n if abs(self.owner.world_tendency) * 33 > randint(1, 100):\n name = \"dead tree\"\n char = tilemap()[\"dead_tree\"][randint(0, (len(tilemap()[\"dead_tree\"]) - 1))]\n wall_component = Wall(name)\n wall = Entity(x, y, 2, char, forest_colors[randint(0, 4)], name, wall=wall_component)\n self.tiles[x][y].add_entity(wall)\n wall_component.set_attributes(self)\n entities.append(wall)\n else:\n name = \"tree\"\n char = tilemap()[\"tree\"][randint(0, (len(tilemap()[\"tree\"]) - 1))]\n wall_component = Wall(name)\n wall = Entity(x, y, 2, char, forest_colors[randint(0, 4)], name, wall=wall_component)\n self.tiles[x][y].add_entity(wall)\n wall_component.set_attributes(self)\n entities.append(wall)\n\n return entities", "def generate(self, num_leafs):\n leafs = self.get_leafs()\n for _ in range(num_leafs):\n box = leafs[np.random.choice(len(leafs))]\n leafs.remove(box)\n ch0, ch1 = box.split()\n self.add_edge(box, ch0)\n self.add_edge(box, ch1)\n leafs.append(ch0)\n leafs.append(ch1)", "def trees(\n self,\n tracked_samples=None,\n *,\n sample_lists=False,\n root_threshold=1,\n sample_counts=None,\n tracked_leaves=None,\n leaf_counts=None,\n leaf_lists=None,\n ):\n # tracked_leaves, leaf_counts and leaf_lists are deprecated aliases\n # for tracked_samples, sample_counts and sample_lists respectively.\n # These are left over from an older version of the API when leaves\n # and samples were synonymous.\n if tracked_leaves is not None:\n tracked_samples = tracked_leaves\n if leaf_counts is not None:\n sample_counts = leaf_counts\n if leaf_lists is not None:\n sample_lists = leaf_lists\n tree = Tree(\n self,\n tracked_samples=tracked_samples,\n sample_lists=sample_lists,\n root_threshold=root_threshold,\n sample_counts=sample_counts,\n )\n return TreeIterator(tree)", "def update_tree(root, executed_acts, total_rew):\n root.value = max(total_rew, root.value)\n root.visits += 1\n new_nodes = 0\n\n node = root\n for step, act in enumerate(executed_acts):\n if act not in node.children:\n node.children[act] = Node()\n new_nodes += 1\n node = node.children[act]\n node.value = max(total_rew, node.value)\n node.visits += 1\n\n return new_nodes", "def train_ml(self, samples):\n\t\t\n\t\tfor node in self.nodes:\n\t\t\tparents = node.parents\n\t\t\tfor pv in node.cpt.probTable:\n\t\t\t\tparentSamples = [s for s in samples if False not in \\\n\t\t\t\t\t\t\t\t[s[p] == pv[i] for i,p in enumerate(parents)]]\n\t\t\t\tfor i,val in enumerate(node.cpt.values()):\n\t\t\t\t\tif len(parentSamples) == 0:\n\t\t\t\t\t\tnode.cpt.probTable[pv][i] = 0\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tvalSamples = [s for s in parentSamples if s[node.name]==val]\n\t\t\t\t\tnode.cpt.probTable[pv][i] = float(len(valSamples)+1) / \\\n\t\t\t\t\t\t\t\t\t\t\t\t(len(parentSamples)+\\\n\t\t\t\t\t\t\t\t\t\t\t\t\tlen(node.cpt.values()))", "def evaluate_random_forest(y_test, y_pred):", "def makeWeights(_files,treeName,category,_outputFile, BINS, PT, ETA):\n\tROOT.gROOT.SetBatch(1)\n\n\t#treeName = 'histoMuFromTk/fitter_tree'\n\t_trees = dict( [ ( name, _file.Get(treeName) ) for name,_file in _files.iteritems()] )\n\t#Check if in both files are the tree\n\tfor _tree in _trees.itervalues():\n\t\tif not _tree:\n\t\t\treturn None\n\t\n\thistos = {}\n\tweights = {}\n\n\t#-- The ':' token in A:B read as 'B conditioned to A' (look this unregular order)\n\t#-- The categories are datamembers which can be 1 or 0, a condition;\n\t#-- if we want to weight the pt-distribution of all probes for the L1Mu3 trigger\n\t#-- category, we must decided with respect which muonID category (Glb, TMLSAT, ...), then\n\t#-- reduce to a subset which the muonID category == 1 and calculate the weight of the\n\t#-- pt-distribution\n\t#-- The category variable can be A:B:C:..., the last one is the only one which we don't \n\t#-- want to reduce (see find category)\n\tcondCategory = ''\n\tstoreCategory = 'weight'\n\tif category.find(':') != -1:\n\t\t_catList = category.split(':')\n\t\t#-- This for is to include the quality cuts and other possible categories\n\t\tfor i in xrange(len(_catList)-1):\n\t\t\tcondCategory += ' && '+_catList[i]+' == 1 '# BUG------> && '+triggerCat+' == 1' \n\t\t\tstoreCategory += '_'+_catList[i]\n\n\tinstName = lambda k,pt : PT+'>>h_'+category+name+str(k)+'(50,'+str(pt[0])+','+str(pt[1])+')'\n\tcuts = lambda pt,eta: PT+' >= '+str(pt[0])+' && '+PT+' <'+str(pt[1])+\\\n\t\t\t' && '+ETA+' >= '+str(eta[0])+' && '+ETA+' < '+str(eta[1])+condCategory\n\t#print cuts #--------------------------> PROVISONAL: PARECE QUE SE RECUPERAN LOS ESPECTROS DE LOS PASSING\n\t #--------------------------> NO DE LOS ALL\n\tk = 0\n\tfor i in xrange(len(BINS.__getattribute__(PT))-1):\n\t\tpt = (BINS.__getattribute__(PT)[i],BINS.__getattribute__(PT)[i+1])\n\t\tfor j in xrange(len(BINS.__getattribute__(ETA))-1):\n\t\t\teta = (BINS.__getattribute__(ETA)[j],BINS.__getattribute__(ETA)[j+1])\n\t\t\tfor name,_t in _trees.iteritems(): \n\t\t\t\tN = _t.Draw( instName(k,pt),cuts(pt,eta) )\n\t\t\t\thistos[name] = ROOT.gDirectory.Get('h_'+category+name+str(k))\n\t\t\tprint ' \\033[1;34mDoing bin'+str(k)+' '+PT+'=('+str(pt[0])+','+str(pt[1])+') '+ETA+'=('+str(eta[0])+','+str(eta[1])+')\\033[1;m'\n\t\t\tswap = histos['numerator'].Clone(category+'_bin'+str(k))\n\t\t\tdummy = swap.Divide(histos['denominator'])\n\t\t\tweights[category+'_bin'+str(k)] =( (eta[0],eta[1]), (pt[0],pt[1]), ROOT.gDirectory.Get(category+'_bin'+str(k)) )\n\t\t\t#Acura els limits\n\t\t\tweights[category+'_bin'+str(k)][2].GetXaxis().SetLimits( pt[0], pt[1] ) \n\t\t\t#weights[category+'_bin'+str(k)][2].SetNormFactor(1) \n\t\t\tk += 1\n\t_out = ROOT.TFile(_outputFile,'RECREATE')\n\tfor name,(etaBins,ptBins,histo) in weights.iteritems():\n\t\thisto.Write()\n\t_out.Close()\t\n\treturn weights", "def test_random_forest_max_depth_parameter(params, X_train, X_test, y_train, y_test):", "def build_tree(df) -> dict:\r\n # initialize empty tree as a dictionary\r\n tree = {}\r\n # find column associated with best information gain\r\n next_att = best_inf_gain_att(df)\r\n # next_att = find_winner(df)\r\n tree[next_att] = {}\r\n\r\n # for each value of the attribute at hand\r\n for val in np.unique(df[next_att]):\r\n # get new table\r\n subtable = get_subtable(df, next_att, val)\r\n # get information on new y characteristics\r\n sub_val, sub_val_counts = np.unique(subtable.iloc[:, -1], return_counts=True)\r\n\r\n # if there's only one label value left, assign it\r\n if 1 == sub_val.shape[0]:\r\n tree[next_att][val] = sub_val[0]\r\n # if there are no more columns except the label column, assign the most frequent label\r\n elif 1 == subtable.columns.shape[0]:\r\n tree[next_att][val] = sub_val[np.argmax(sub_val_counts)]\r\n # otherwise add node recursively\r\n else:\r\n tree[next_att][val] = build_tree(subtable)\r\n\r\n return tree", "def train(\n cls, params: Dict[str, Any], ematrix: EMatrix, num_boost_round: int = 10\n ) -> \"EBooster\":\n start_params = {\n \"max_depth\": 5,\n \"learning_rate\": 0.3,\n \"splitgax\": False,\n \"transposed_feature\": False,\n \"progress_callback\": None,\n }\n start_params.update(params)\n\n reduce_axis = 1 if start_params[\"transposed_feature\"] else 0\n use_extra = ematrix.extra_features is not None\n\n if start_params[\"splitgax\"] and ematrix.gax is None:\n ematrix.gax = make_gax(ematrix.features, axis=reduce_axis)\n\n forest = []\n bias = np.zeros(ematrix.label.shape)\n features = ematrix.features\n for r in range(num_boost_round):\n print(f\"\\n{r} round\", file=sys.stderr)\n tree = build_tree(\n start_params,\n EMatrix(\n features=ematrix.features,\n label=ematrix.label,\n bias=bias,\n extra_features=ematrix.extra_features,\n gax=ematrix.gax,\n splitgax=start_params[\"splitgax\"],\n ),\n # split_maker=split_maker,\n transposed_feature=start_params[\"transposed_feature\"],\n unbalanced_penalty=start_params[\"unbalanced_penalty\"],\n reduce_axis=reduce_axis,\n use_extra=use_extra,\n )\n # print(\"tree ok, bias shape = {}\".format(bias.shape), file=sys.stderr)\n tree_arrays = init_arrays(\n root=tree,\n n=init_id(tree),\n weights_num=ematrix.extra_features.shape[1 - reduce_axis]\n if ematrix.extra_features is not None\n else 1,\n )\n bias_delta = tree_apply(\n tree_arrays=tree_arrays,\n features=features,\n extra_features=ematrix.extra_features,\n reduce_axis=reduce_axis,\n )\n # print(\"apply ok, bias delta shape = {}\".format(bias_delta.shape), file=sys.stderr)\n bias = bias + np.reshape(bias_delta, newshape=bias.shape)\n forest.append((tree, tree_arrays))\n # print(\"forest appended\", file=sys.stderr)\n if start_params[\"progress_callback\"] is not None:\n start_params[\"progress_callback\"](r, num_boost_round)\n\n return cls(forest)", "def test_uniform_search_produces_forest(graph):\n g = graph()\n \n if hasattr(g, 'edge_weight'):\n edge_weight = g.edge_weight\n else:\n edge_weight = defaultdict(int)\n\n # Create a visitor that will produce a forest\n class ForestVisitor(TraversalVisitor):\n def __init__(self):\n TraversalVisitor.__init__(self)\n self.forest = yaupon.Forest()\n \n def tree_edge(self, e):\n # This will throw from inside \"traverse\" if a cycle is created\n self.forest.add_edge(e[0],e[1])\n\n forest_visitor = ForestVisitor()\n traverse(g.vertices(), forest_visitor, \n uniform_cost_generator(g, edge_weight))", "def fit_transform(self, X, y, sample_weight=None):\n # Instantiate rule ensemble generator and set parameters\n if isinstance(self.base_estimator, XGBClassifier):\n self.base_estimator.set_params(n_estimators=self.n_estimators, silent=(self.verbose>0),\n max_depth=self.max_depth, n_jobs=self.n_jobs)\n elif isinstance(self.base_estimator, RandomForestClassifier):\n warnings.warn('This base_estimator implementation has not been tested in a while!')\n self.base_estimator.set_params(n_estimators=self.n_estimators, verbose=self.verbose,\n max_depth=self.max_depth, n_jobs=self.n_jobs)\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n warnings.warn('This base_estimator implementation has not been tested in a while!')\n self.base_estimator.set_params(n_estimators=self.n_estimators, verbose=self.verbose,\n max_depth=self.max_depth, n_jobs=self.n_jobs)\n else:\n raise NotImplementedError\n \n # Name features\n if isinstance(X, DataFrame):\n self.features = X.columns.values\n else:\n self.features = ['f'+str(i) for i in range(X.shape[1])]\n \n # Check input\n X = check_array(X)\n \n # Generate and extract rules\n if not self.rand_tree_size:\n self.base_estimator.fit(X, y, sample_weight=sample_weight)\n if isinstance(self.base_estimator, XGBClassifier):\n self._rule_dump = self.base_estimator._Booster.get_dump()\n else:\n NotImplementedError() # TODO: work out how to incrementally train XGB\n \n if self.verbose > 0:\n print('fitting trees')\n \n # For each tree: get leaf numbers and map them to [0, num leaves]\n # before one-hot encoding them\n n_values = \"auto\"\n leaves_l = []\n for tree_i in self._rule_dump:\n leaves = [int(i) for i in re.findall(r'([0-9]+):leaf=', tree_i)]\n leaves_l.append(leaves)\n self._one_hot_encoder = LabelOneHotEncoder(leaves_l)\n \n if self.verbose > 0:\n print('setup encoding')\n \n # Scale and centre linear features\n X = self.ext_scaler.fit_transform(X)\n \n if self.linear_features:\n # Linear features must be scaled to have same weighting as an average rule\n self._scaler = FriedScaler(quantile=self.linear_feature_quantile)\n X_scale = self._scaler.fit_transform(X)\n X_transform = hstack([X_scale, self._one_hot_encoder.fit_transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n X_transform = self._one_hot_encoder.fit_transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))\n \n if self.verbose > 0:\n print('encoded')\n \n # Fit sparse linear model to rules (and optionally linear features)\n self.LR = LogisticRegression(C=self.C, penalty=self.penalty, class_weight=self.class_weight,\n warm_start=self.warm_start, solver='saga', verbose=self.verbose)\n self.LR.fit(X_transform, y, sample_weight=sample_weight)\n \n if self.verbose > 0:\n print('fitted')\n \n # Mask features with zero co-efficients\n # self.feature_mask_ = np.arange(self.LR.coef_.size)\n self.feature_mask_ = self.LR.coef_.nonzero()[1]\n \n self.coef_ = self.LR.coef_[0, self.feature_mask_]\n self.intercept_ = self.LR.intercept_\n self.get_feature_names()\n assert self.features_.size == self.feature_mask_.size\n return X_transform", "def predict(self, X_new):\n trees = self.all_trees\n num_observations = X_new.shape[0]\n pred = np.zeros((len(trees), num_observations))\n np.random.randint(len(trees))\n for draw, trees_to_sum in enumerate(trees):\n new_Y = np.zeros(num_observations)\n for tree in trees_to_sum:\n new_Y += [tree.predict_out_of_sample(x) for x in X_new]\n pred[draw] = new_Y\n return pred", "def tree_gen(self, data, attri_set):\n # Create a new node.\n newNode = Node()\n\n # If data set is already classified, return a leaf node.\n if data.is_positive():\n newNode.set_leaf(True)\n return newNode\n elif data.is_negative():\n newNode.set_leaf(False)\n return newNode\n\n # If attribute set is empty, can't be classified.\n if not attri_set:\n type = data.mark_most()\n newNode.set_leaf(type)\n return newNode\n\n # Find a best decision attribute.\n # If it is a continuous attribute, it should have a best mid point.\n choice, midpoint = self.find_best(data, attri_set)\n if choice == -1:\n print \"error\"\n return None\n print \"best choice:\", Attribute(choice), midpoint\n newNode.attri = Attribute(choice)\n\n # Create a new attribute set,\n # which doesn't contain the best choice just find.\n new_attri_set = deepcopy(attri_set)\n new_attri_set.remove(choice)\n\n # Create branches.\n for val in self.attri_list[choice]:\n data_v = data.filter(choice, val, midpoint=midpoint)\n if data_v.empty():\n # If branch has empty data, create a leaf child.\n childNode = Node()\n childNode.set_leaf(data.mark_most()) # set parent's most\n newNode.children.append(childNode)\n else:\n # Recursively generate decision child tree.\n childNode = self.tree_gen(data_v, new_attri_set)\n newNode.children.append(childNode)\n\n return newNode", "def predict(tree, dataSet):\n\n\tcount = 0 #used for tracking how many times we've correctly classified our data\n\tfor index in range(len(dataSet)):\n\t\tdataPoint = dataSet[index]\n\t\tprint \"Current dataPoint: \", dataPoint.retrieve('id').getValue()\n\t\tnode = 0\n\t\tfor i in tree.fields[tree.nType].keys():\n\t\t\tif NodeType.ROOT == tree.getNodeType(i):\n\t\t\t\tnode = i #basically an index\n\t\t\t\tprint \"root node: \", node\n\t\t\t\tbreak\n\t\t\t#keep going down the tree until no children exist, then get output classification\n\n\t\tprint \"node type\", tree.getNodeType(node)\n\n\t\twhile tree.getNodeType(node) != NodeType.LEAF:\n\t\t\tsplitVal = tree.getSplitValue(node)\n\t\t\tprint \"tree split value: \", splitVal\n\t\t\tsplitAttribute = tree.getSplitAtribute(node)\n\t\t\tprint \"tree split attribute: \", splitAttribute\n\t\t\tval = dataPoint.retrieve(splitAttribute).getValue()\n\t\t\tif val == None:\t\t\n\t\t\t\tval = np.median(retrieveDataFromColumn(dataSet, splitAttribute))\n\n\t\t\tprint \"data point value for split attribute: \", val\n\t\t\tif FeatureType.CONTINUOUS == tree.getSplitType(node): \n\t\t\t\tif val >= splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\t\tprint \"greater than\", \"going to next node\", node\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"lesser than\", \"going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\telif FeatureType.DISCRETE == tree.getSplitType(node):\n\t\t\t\tif val != splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"not equal\", \" going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"equal\", \"goint to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\tleafClass = tree.getMajorityClassification(node)\n\t\tprint \"leaf classification: \", leafClass\n\t\tleafAttribute = tree.getSplitAtribute(node)\n\t\tprint \"leaf attribute: \", leafAttribute\n\t\t\n\t\t# Need to fill the last column (which is the same column as leafAttribute) with the \n\t\t# value of the leaf (i.e. classify as winner or not)\n\t\tdataPoint.retrieve(leafAttribute).addValue(leafClass)\n\t\tprint \"prediction is: \", dataPoint.retrieve(leafAttribute).getValue()\n\n\tcreateFileCSV(dataSet)\n\treturn dataSet", "def _generate_feature_tree(self, features):\n # build a set of all features, including top-level features and\n # dependencies.\n self.top_level_features = defaultdict(list)\n\n # find top-level features and index them by entity id.\n for f in self.all_features:\n _, num_forward = self.entityset.find_path(self.target_eid, f.entity.id,\n include_num_forward=True)\n if num_forward or f.entity.id == self.target_eid:\n self.top_level_features[f.entity.id].append(f)", "def DecisionTreeAlgorithm(df, mltask, counter = 0, min_samples = 2, max_depth = 5, random_subspace = None):\n\n if counter == 0:\n global COLUMN_HEADERS, FEATURE_TYPE\n COLUMN_HEADERS = df.columns\n FEATURE_TYPE = hf.determine_type_of_feature(df)\n data = df.values\n else:\n data = df\n \n if (check_purity(data)) or (len(data) < min_samples) or (counter == max_depth):\n leaf = create_leaf(data, mltask)\n return leaf\n \n else:\n counter += 1\n \n potential_splits = get_potential_split(data, random_subspace)\n split_column,split_value = determine_best_split(data, potential_splits, mltask)\n data_below,data_above = split_data(data,split_column,split_value)\n \n if (len(data_below) == 0) or (len(data_above) == 0):\n leaf = create_leaf(data, mltask)\n return leaf\n \n feature_name = COLUMN_HEADERS[split_column]\n type_of_feature = FEATURE_TYPE[split_column]\n if type_of_feature == 'continuous':\n question = '{} <= {}'.format(feature_name,split_value)\n else:\n question = '{} = {}'.format(feature_name,split_value)\n sub_tree = {question:[]}\n \n yes_answer = DecisionTreeAlgorithm(data_below, mltask, counter, min_samples, max_depth, random_subspace)\n no_answer = DecisionTreeAlgorithm(data_above, mltask, counter, min_samples, max_depth, random_subspace)\n \n if yes_answer == no_answer :\n sub_tree = yes_answer\n else :\n sub_tree[question].append(yes_answer)\n sub_tree[question].append(no_answer)\n \n return sub_tree", "def forestPandas(data, resCol, maxDepth=None, percentage=70, numfeats = 15, fsize=5, selected=None):\n indices = data.index.tolist()\n trainingSets = {}\n percent = float(percentage)/100\n split = int(percent * len(indices) + 0.5)\n cols = data.columns.tolist() \n for i in range(fsize + 1):\n if selected == None:\n np.random.shuffle(cols)\n selected = cols[:15]\n selected.append(\"spam\")\n np.random.shuffle(indices)\n trainingSets[i] = {}\n trainingSets[i][\"data\"]= data[selected].loc[indices[:split + 1]]\n trainingSets[i][\"tree\"]= buildTreePandas(trainingSets[i][\"data\"], resCol, maxDepth=maxDepth) \n return trainingSets", "def train_decision_tree():\n train_model(DecisionTreeRegressor(max_depth=3, random_state=42),\n dataset_file_name=DECISION_TREE_DEFAULT_DATASET,\n model_file_name=DECISION_TREE_DEFAULT_MODEL)", "def rebalance(self):\r\n points = [p for p in self.tree]\r\n if points:\r\n self.tree = kd_factory.generate(points)\r\n self.paint()", "def decision_tree(df, dt_dict, curr_node,\r\n prev_attr = None, align_dir = None,\r\n depth = -1, no_data = False,\r\n ensemble = None):\r\n \r\n class_count = get_class_count(df)\r\n # get the class label counts for the given dataframe\r\n leaf_node_bool = check_leaf_node(df)\r\n # this function helps to check if we have a leaf node\r\n if leaf_node_bool:\r\n # if its leaf node\r\n curr_node[align_dir] = df['class'].values[0]\r\n # assign the leaf node value\r\n elif no_data:\r\n # if we are out of data points\r\n class_counts = df['class'].value_counts()\r\n # get the class counts\r\n curr_node[align_dir] = np.argmax(class_counts)\r\n # assign the majority class of prev node\r\n else:\r\n entropy_values_series = impurity.entropy_calc(df, ensemble = ensemble)\r\n # calculate the entropy values for each feature\r\n info_gain_dict = {}\r\n # empty dict for information gain\r\n for feature in entropy_values_series.index:\r\n # iterate over each features\r\n impurity.information_gain_calc(df, feature, info_gain_dict)\r\n # function call for information gain calculation\r\n for f in entropy_values_series.index:\r\n # iterate over each feature\r\n information_gain = entropy_values_series[f] - info_gain_dict[f][1]\r\n # calculation of information gain\r\n info_gain_dict[f] = (info_gain_dict[f][0], information_gain)\r\n # update the information gain dict\r\n best_feature = sorted(info_gain_dict, key = lambda x: info_gain_dict[x][1])[-1]\r\n # get the best feature on which to be splitted.\r\n #print(best_feature)\r\n node_value = (best_feature, info_gain_dict[best_feature], class_count[0],\r\n class_count[1])\r\n # get the node value\r\n \r\n if not leaf_node_bool and align_dir:\r\n # growing the tree\r\n if depth == 0:\r\n if node_value[2] > node_value[3]:\r\n node_value = 0\r\n else:\r\n node_value = 1\r\n curr_node[align_dir] = node_value\r\n return 0\r\n else:\r\n curr_node[align_dir] = {node_value:{}}\r\n curr_node = curr_node[align_dir][node_value]\r\n else:\r\n dt_dict[node_value] = {}\r\n curr_node = dt_dict[node_value]\r\n \r\n data_split(df, best_feature, info_gain_dict, \r\n dt_dict, curr_node, depth)\r\n # function call for data split\r", "def infer(self,data,examples,k=10):\n\n example_values = []\n for example in examples:\n example_value = 0\n for i in range(k):\n tree_i = self.boosted_trees[i]\n tree_i_value = tree_i.infer(data,example)\n example_value += tree_i_value\n example_values.append(sigmoid(example_value))\n\n return example_values", "def make_thenThan_classifier(window=2, n_estimators=20):\n taggedSentTotal = len(brown.tagged_sents())\n\n thenSentTags = []\n thanSentTags = []\n\n for sentIndex, tagged_sent in enumerate(brown.tagged_sents()):\n sent = [x[0] for x in tagged_sent]\n if ('then' in sent):\n thenInd = sent.index('then')\n tags = [x[1] for x in tagged_sent[max(0,thenInd-window):\n min(thenInd+window+1,len(tagged_sent))\n ]\n ]\n #tags.extend(0)\n thenSentTags.append(tags)\n if ('than' in sent):\n thanInd = sent.index('than')\n tags = [x[1] for x in tagged_sent[max(0,thanInd-window):\n min(thanInd+window+1,len(tagged_sent))\n ]\n ]\n #tags.extend(1)\n thanSentTags.append(tags)\n\n # Convert the lists of then and than tag contexts to pandas dataframes, which we'll\n # then feed to our classifier for training\n\n thenData = pd.DataFrame(thenSentTags)\n thenData.columns = [\"Slot{}\".format(x-window) for x in thenData.columns]\n thenData['th{e|a}n'] = 0\n\n thanData = pd.DataFrame(thanSentTags)\n thanData.columns = [\"Slot{}\".format(x-window) for x in thanData.columns]\n thanData['th{e|a}n'] = 1\n\n allData = thenData.append(thanData)\n allData.drop('Slot0', axis=1, inplace=True)\n \n \"\"\"\n # convert categorical labels to one-hot encoding/dummy variables and specify the input\n # and output of the model\n\n dummyData = pd.get_dummies(allData)\n\n X = dummyData.loc[:,\"Slot-{}_'\".format(window):]\n y = dummyData['th{e|a}n']\n \n # now select and fit a model\n clf = RandomForestClassifier(n_estimators=n_estimators)\n clf.fit(X,y)\n \"\"\"\n \n # The block below is an attempt, following this SO post, to circumvent the nasty\n # dummy encoding wrangling I had to do previously\n # http://stackoverflow.com/questions/38574222/onehotencoded-features-causing-error-when-input-to-classifier/38587625#38587625\n \n X = allData.loc[:,:'Slot2']\n y = allData['th{e|a}n']\n clf = Pipeline([\n ('transformer', DictVectorizer()),\n ('estimator', RandomForestClassifier()),\n ]\n )\n print(y.head())\n print(X.head())\n clf.set_params(estimator__n_estimators=n_estimators).fit(X,y)\n \n return (clf, dummyData, allData)", "def decision_tree_prediction(example, root, attributes):\n # If reached a leaf node, return the label\n if isinstance(root, str):\n return root\n\n # Attribute that was split on\n attribute = root.attribute\n # Column of the attribute that was split on\n i = get_index(attribute, attributes)\n testValue = example[i]\n # Check every child to see what path the example must take in the decision tree\n for child in root.children:\n if isinstance(child.branch, int):\n if int(testValue) <= child.branch:\n return decision_tree_prediction(example, child.nextTree, attributes)\n elif isinstance(child.branch, float):\n if int(testValue) > child.branch:\n return decision_tree_prediction(example, child.nextTree, attributes)\n# -----------------------------------------------Naive Bayes-------------------------------------------------\n # Naive bayes\n elif child.branch == \"Naive\":\n yes_probability = child.histogram[0]\n no_probability = child.histogram[2]\n i = 0\n for feature in example:\n if feature == \"yes\" or feature == \"no\":\n continue\n if i == 0 or i == 2 or i == 4 or i == 10 or i == 11 or i == 12:\n j = 0\n # Its a float so check\n for key in child.histogram[1][attribute_index[i]]:\n if float(feature) <= float(key) and j == 0:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][key]\n elif j == 1:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][key]\n j += 1\n for key in child.histogram[1][attribute_index[i]]:\n if float(feature) <= float(key) and j == 0:\n no_probability = no_probability * child.histogram[1][attribute_index[i]][key]\n elif j == 1:\n no_probability = no_probability * child.histogram[1][attribute_index[i]][key]\n j += 1\n else:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][feature]\n no_probability = no_probability * child.histogram[3][attribute_index[i]][feature]\n i += 1\n if yes_probability > no_probability:\n return \"yes\"\n elif no_probability >= yes_probability:\n return \"no\"\n# -----------------------------------------------End Naive Bayes-------------------------------------------------\n else:\n if child.branch == testValue:\n return decision_tree_prediction(example, child.nextTree, attributes)", "def get_random_depth_sample(n=8, depths=list(range(2,26,2)), num_samples=100):\n\n def get_states(start):\n frontier = [start]\n frontier_set = {start}\n explored = set()\n\n states = [False for _ in range(len(depths))]\n while not all(states):\n node = frontier.pop(0)\n frontier_set.remove(node)\n explored.add(node)\n\n children = node.get_children()\n\n # It's necessary to shuffle children to get a truly random sample; otherwise, the first child (always\n # produced from the parent by the same action) produced at a certain depth will always be selected,\n # and children produced by other actions will never be selected\n shuffle(children)\n\n for child in children:\n if child not in frontier_set and child not in explored:\n frontier_set.add(child)\n frontier.append(child)\n child.path_cost = node.path_cost+1\n index = depths.index(child.path_cost) if child.path_cost in depths else None\n if index is not None and not states[index]:\n states[index] = {'start': start.sequence, 'end': child.sequence}\n\n return states\n\n depth_sample = [[] for depth in range(len(depths))]\n\n for _ in range(num_samples):\n start = list(range(1,n+2))\n shuffle(start)\n start = PuzzleState(start, path_cost=0)\n\n states = get_states(start)\n print('\\rSet ' + str(_+1) + ' of ' + str(num_samples) + ' complete', end='', flush=True)\n list(map(list.append, depth_sample, states))\n\n return depth_sample", "def _buildtree(self):\n self.pricetree = np.zeros((self.steps+1,self.steps+1))\n self.pricetree[0][0] = self.p\n for j in range(self.steps):\n for i in range(j+1):\n self.pricetree[j+1][i+1] = self.pricetree[j][i]*self.down\n self.pricetree[j+1][0] = self.pricetree[j][0]*self.up", "def classify(self, event):\n\n results = np.zeros(self.ntrees, dtype=float)\n\n for i,dt in enumerate(self.dTrees):\n results[i] = self.treeWeights[i]*dt.classify(event)\n\n return np.sum(results)*(1.0/np.sum(self.treeWeights))", "def test_leaf_node_kernel_unbalanced(unbalanced_data):\n X, y = unbalanced_data\n\n forest = RandomForestClassifierKernel(\n n_estimators=3,\n kernel_type='leaves',\n sampling_method='supervised',\n random_state=123)\n K = forest.fit_transform(X, y)\n\n K_expected = np.array([[1., 1., 1., 0.33333333, 0.33333333,\n 0.33333333, 0.33333333, 0.33333333, 0.33333333],\n [1, 1., 1., 0.33333333, 0.33333333,\n 0.33333333, 0.33333333, 0.33333333, 0.33333333],\n [1., 1., 1., 0.33333333, 0.33333333,\n 0.33333333, 0.33333333, 0.33333333, 0.33333333],\n [0.33333333, 0.33333333, 0.33333333, 1., 1.,\n 0.66666667, 0.33333333, 0.33333333, 0.33333333],\n [0.33333333, 0.33333333, 0.33333333, 1., 1. ,\n 0.66666667, 0.33333333, 0.33333333, 0.33333333],\n [0.33333333, 0.33333333, 0.33333333, 0.66666667,\n 0.66666667, 1., 0.33333333, 0.66666667,\n 0.33333333],\n [0.33333333, 0.33333333, 0.33333333, 0.33333333,\n 0.33333333, 0.33333333, 1., 0.66666667, 1.],\n [0.33333333, 0.33333333, 0.33333333, 0.33333333,\n 0.33333333, 0.66666667, 0.66666667, 1.,\n .66666667],\n [0.33333333, 0.33333333, 0.33333333, 0.33333333,\n 0.33333333, 0.33333333, 1, 0.66666667, 1]])\n np.testing.assert_allclose(K, K_expected)", "def apply(self, tree):\n raise NotImplementedError()", "def main():\n t = []\n for i in range(1, 19):\n t.append(i)\n config = Config()\n config.DEBUG = True\n config['time_list']=t\n config['load_graphs_from_xml']=True\n\n defaults = dict(num_samples=100, max_depth=5, run=0, num_runs=1,num_trees=100, stat='logrank', split_stat='logrank', num_folds=None,exp='flood',\n verbose=True, folds=None, load_graphs_from_xml=True, time_list=t)\n for key, value in defaults.items():\n cur_value = config.get(key, None)\n # print(\"key={0}:cur_value={1}\".format(key,cur_value))\n config[key] = value if cur_value is None else cur_value\n config.DEBUG = True\n #loadExperimentFile(config, filename=experiment_Path, experiment_name=\"flood\")\n #config.parseOpts()\n print('Start Grow Forest')\n growForest(config)" ]
[ "0.7096626", "0.6913265", "0.6533237", "0.6523216", "0.63476014", "0.6344354", "0.61861014", "0.61825013", "0.6182119", "0.61536306", "0.6152425", "0.61442596", "0.608408", "0.60710186", "0.60495037", "0.6023182", "0.60046387", "0.59995925", "0.5979982", "0.59393513", "0.58764184", "0.5813951", "0.58050126", "0.58028626", "0.57565653", "0.57516474", "0.5750774", "0.5747398", "0.574207", "0.5739161", "0.57245266", "0.57060385", "0.56989336", "0.56930065", "0.56912553", "0.5684589", "0.5673266", "0.56701976", "0.5647857", "0.56404054", "0.5633812", "0.56328773", "0.55875516", "0.5585107", "0.5578644", "0.55683225", "0.55631745", "0.5559836", "0.5544083", "0.5528315", "0.55194145", "0.5505783", "0.5501843", "0.54815066", "0.54741585", "0.5472268", "0.54708517", "0.5455015", "0.54541785", "0.5453153", "0.5451999", "0.54503185", "0.5446896", "0.5443783", "0.5427043", "0.54180187", "0.5415151", "0.5412209", "0.54121125", "0.54102343", "0.54063505", "0.5405268", "0.5402916", "0.53885806", "0.53855705", "0.53837705", "0.5375257", "0.53715795", "0.5353663", "0.5348773", "0.53416896", "0.5337811", "0.533703", "0.5329573", "0.5328586", "0.5327953", "0.531998", "0.53164065", "0.53158337", "0.53155357", "0.53090763", "0.5308876", "0.5308595", "0.5306276", "0.5301589", "0.52947456", "0.5286283", "0.5285774", "0.5280237", "0.5275712" ]
0.75269973
0
classify a given event. Iterates over each tree in the forest and then returns the weighted average of the results
def classify(self, event): results = np.zeros(self.ntrees, dtype=float) for i,dt in enumerate(self.dTrees): results[i] = self.treeWeights[i]*dt.classify(event) return np.sum(results)*(1.0/np.sum(self.treeWeights))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _classify(tree, x):\n # YOUR CODE HERE\n # begin answer\n feature_name=list(tree.keys())[0] #first element\n secondDict=tree[feature_name] \n key=x.loc[feature_name] #extract value from x\n for key_val in secondDict:\n feature_val=key_val[0]\n valueOfKey=secondDict[(feature_val, key>=feature_val)]\n if isinstance(valueOfKey,dict):\n label=_classify(valueOfKey,x)\n else:\n label=valueOfKey\n return label\n # end answer", "def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs", "def classify(observation,tree):\n if tree.results!=None:\n return tree.results\n else:\n v=observation[tree.col]\n branch=None\n if isinstance(v, int) or isinstance(v, float):\n if v>=tree.value:\n branch=tree.tb\n else: \n branch=tree.fb\n else:\n if v==tree.value: \n branch=tree.tb\n \n else: \n branch=tree.fb\n return classify(observation,branch)", "def predict(tree, dataSet):\n\n\tcount = 0 #used for tracking how many times we've correctly classified our data\n\tfor index in range(len(dataSet)):\n\t\tdataPoint = dataSet[index]\n\t\tprint \"Current dataPoint: \", dataPoint.retrieve('id').getValue()\n\t\tnode = 0\n\t\tfor i in tree.fields[tree.nType].keys():\n\t\t\tif NodeType.ROOT == tree.getNodeType(i):\n\t\t\t\tnode = i #basically an index\n\t\t\t\tprint \"root node: \", node\n\t\t\t\tbreak\n\t\t\t#keep going down the tree until no children exist, then get output classification\n\n\t\tprint \"node type\", tree.getNodeType(node)\n\n\t\twhile tree.getNodeType(node) != NodeType.LEAF:\n\t\t\tsplitVal = tree.getSplitValue(node)\n\t\t\tprint \"tree split value: \", splitVal\n\t\t\tsplitAttribute = tree.getSplitAtribute(node)\n\t\t\tprint \"tree split attribute: \", splitAttribute\n\t\t\tval = dataPoint.retrieve(splitAttribute).getValue()\n\t\t\tif val == None:\t\t\n\t\t\t\tval = np.median(retrieveDataFromColumn(dataSet, splitAttribute))\n\n\t\t\tprint \"data point value for split attribute: \", val\n\t\t\tif FeatureType.CONTINUOUS == tree.getSplitType(node): \n\t\t\t\tif val >= splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\t\tprint \"greater than\", \"going to next node\", node\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"lesser than\", \"going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\telif FeatureType.DISCRETE == tree.getSplitType(node):\n\t\t\t\tif val != splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"not equal\", \" going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"equal\", \"goint to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\tleafClass = tree.getMajorityClassification(node)\n\t\tprint \"leaf classification: \", leafClass\n\t\tleafAttribute = tree.getSplitAtribute(node)\n\t\tprint \"leaf attribute: \", leafAttribute\n\t\t\n\t\t# Need to fill the last column (which is the same column as leafAttribute) with the \n\t\t# value of the leaf (i.e. classify as winner or not)\n\t\tdataPoint.retrieve(leafAttribute).addValue(leafClass)\n\t\tprint \"prediction is: \", dataPoint.retrieve(leafAttribute).getValue()\n\n\tcreateFileCSV(dataSet)\n\treturn dataSet", "def predict(self,x):\n preds = [tree.predict(x) for tree in self.forest]\n if self.classify:\n cls_counts = [0] * self.param['numClasses']\n for p in preds:\n cls_counts[p] += 1\n return argmax(cls_counts)\n else:\n return sum(preds) / (len(preds)*1.0)", "def classify(series, tree):\n feature = tree[0]\n subtree = tree[1]\n\n answer = series[feature]\n response = subtree[answer]\n\n if type(response) != list: #base case\n return subtree[answer]\n else:\n return classify(series, response) #recursive case", "def classifyAll(self,tree,data):\n\n\t\tresults = []\n\t\tfor i in range(len(data)):\n\t\t\tresults.append(self.classify(tree,data[i]))\n\t\treturn results", "def classify(self, tree, datapoint):\n\n\t\tif type(tree) == type(\"string\"):\n\t\t\treturn tree\n\t\telse:\n\t\t\ta = list(tree.keys())[0]\n\t\t\tfor i in range(len(self.featureNames)):\n\t\t\t\tif self.featureNames[i]==a:\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\ttry:\n\t\t\t\tt = tree[a][datapoint[i]]\n\t\t\t\treturn self.classify(t,datapoint)\n\t\t\texcept:\n\t\t\t\treturn None", "def classify(tree, input):\n\n #if this is a leaf node, return its value\n if tree in [True, False]:\n return tree\n\n #otherwise this tree consists of an attribute to split on\n #and a dict whose keys are values of that attribute\n #and whose values are subtrees to consider next\n attribute, subtree_dict = tree\n\n subtree_key = input.get(attribute) #None if input is missing\n\n if subtree_key not in subtree_dict: #if no subtree for key, use None\n subtree_key = None\n\n subtree = subtree_dict[subtree_key] # choose the appropriate subtree\n return classify(subtree, input) # and use it to classify the input", "def classify(observations, tree, dataMissing=False):\n\n def classifyWithoutMissingData(observations, tree):\n if tree.results != None: # leaf\n return tree.results\n else:\n v = observations[tree.col]\n branch = None\n #if isinstance(v, int) or isinstance(v, float):\n #if v >= tree.value: branch = tree.trueBranch\n #else: branch = tree.falseBranch\n #else:\n if v == tree.value: branch = tree.trueBranch\n else: branch = tree.falseBranch\n return classifyWithoutMissingData(observations, branch)\n\n\n def classifyWithMissingData(observations, tree):\n if tree.results != None: # leaf\n return tree.results\n else:\n v = observations[tree.col]\n if v == None:\n tr = classifyWithMissingData(observations, tree.trueBranch)\n fr = classifyWithMissingData(observations, tree.falseBranch)\n tcount = sum(tr.values())\n fcount = sum(fr.values())\n tw = float(tcount)/(tcount + fcount)\n fw = float(fcount)/(tcount + fcount)\n result = collections.defaultdict(int) # Problem description: http://blog.ludovf.net/python-collections-defaultdict/\n for k, v in tr.items(): result[k] += v*tw\n for k, v in fr.items(): result[k] += v*fw\n return dict(result)\n else:\n branch = None\n #if isinstance(v, int) or isinstance(v, float):\n # if v >= tree.value: branch = tree.trueBranch\n # else: branch = tree.falseBranch\n #else:\n if v == tree.value: branch = tree.trueBranch\n else: branch = tree.falseBranch\n return classifyWithMissingData(observations, branch)\n\n # function body\n if dataMissing:\n return classifyWithMissingData(observations, tree)\n else:\n return classifyWithoutMissingData(observations, tree)", "def traverse_tree(self, example):\n current_node = self.root\n while not current_node.is_leaf:\n feature_value = example[self.get_feature_index(current_node.feature)]\n current_node = current_node.children[feature_value]\n\n return current_node.pred", "def decision_tree_prediction(example, root, attributes):\n # If reached a leaf node, return the label\n if isinstance(root, str):\n return root\n\n # Attribute that was split on\n attribute = root.attribute\n # Column of the attribute that was split on\n i = get_index(attribute, attributes)\n testValue = example[i]\n # Check every child to see what path the example must take in the decision tree\n for child in root.children:\n if isinstance(child.branch, int):\n if int(testValue) <= child.branch:\n return decision_tree_prediction(example, child.nextTree, attributes)\n elif isinstance(child.branch, float):\n if int(testValue) > child.branch:\n return decision_tree_prediction(example, child.nextTree, attributes)\n# -----------------------------------------------Naive Bayes-------------------------------------------------\n # Naive bayes\n elif child.branch == \"Naive\":\n yes_probability = child.histogram[0]\n no_probability = child.histogram[2]\n i = 0\n for feature in example:\n if feature == \"yes\" or feature == \"no\":\n continue\n if i == 0 or i == 2 or i == 4 or i == 10 or i == 11 or i == 12:\n j = 0\n # Its a float so check\n for key in child.histogram[1][attribute_index[i]]:\n if float(feature) <= float(key) and j == 0:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][key]\n elif j == 1:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][key]\n j += 1\n for key in child.histogram[1][attribute_index[i]]:\n if float(feature) <= float(key) and j == 0:\n no_probability = no_probability * child.histogram[1][attribute_index[i]][key]\n elif j == 1:\n no_probability = no_probability * child.histogram[1][attribute_index[i]][key]\n j += 1\n else:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][feature]\n no_probability = no_probability * child.histogram[3][attribute_index[i]][feature]\n i += 1\n if yes_probability > no_probability:\n return \"yes\"\n elif no_probability >= yes_probability:\n return \"no\"\n# -----------------------------------------------End Naive Bayes-------------------------------------------------\n else:\n if child.branch == testValue:\n return decision_tree_prediction(example, child.nextTree, attributes)", "def classify(self, features):\n node = self.tree\n answer = node.right_label + node.left_label\n while len(answer)>1:\n if node.model.classify(features)==+1:\n answer=node.left_label\n node=node.left\n else:\n answer=node.right_label\n node=node.right \n return answer[0]", "def _predict(self, treenode, X):\n if treenode.is_leaf:\n return treenode.leaf_score\n elif pd.isnull(X[1][treenode.feature]):\n if treenode.nan_direction == 0:\n return self._predict(treenode.left_child, X)\n else:\n return self._predict(treenode.right_child, X)\n elif X[1][treenode.feature] < treenode.threshold:\n return self._predict(treenode.left_child, X)\n else:\n return self._predict(treenode.right_child, X)", "def classify(self, document, tree):\n if type(tree) is ClassTreeNode:\n return tree.c\n else:\n if tree.word in document.bag_of_words:\n return self.classify(document, tree.children[0])\n else:\n return self.classify(document, tree.children[1])", "def _classify(self, sample):\n # This function is used so that we can reduce each row with respect \n # to the sample.\n def calc_dist(vector):\n return distance_utils.euclidean(vector, sample)\n\n distances = self.training_set.reduce_rows(calc_dist)\n \n votes = self._tally_votes(self.training_set.get_labels(), distances)\n \n return collection_utils.get_key_with_highest_value(votes)", "def predStat(self,x,f):\n return f([tree.predict(x) for tree in self.forest])", "def classify(self, instance):\n numerator = 0\n denominator = 0\n for training_instance in self.training_data:\n h_value = self._h_function(instance, training_instance[0])\n numerator = numerator + h_value*training_instance[1]\n denominator = denominator + h_value\n return numerator/denominator", "def find_shrunken_averages(tuple_input):\n #The categorical level.\n level = tuple_input[0]\n # The labels list (y varaibale) from a map function.\n labels = tuple_input[1]\n # The total number of level occurances in the frame (ie count)\n level_n = len(labels)\n level_mean = sum(labels) / level_n\n\n # Determine if there enough occurances of a level. If NOT return overall_mean\n if level_n >= threshold:\n return(level,level_mean)\n else:\n return(level, ((1 - lambda_) * level_mean) +\\\n (lambda_ * overall_mean) )", "def classify(self, ep):\n # just here for defining the interface; work is done in subclasses\n pass", "def classify(cls, i):\r\n sums = [0,0]\r\n sums[int(WekaClassifier_0.classify(i))] += 1.2134644010075073\r\n sums[int(WekaClassifier_1.classify(i))] += 0.57177685574344\r\n sums[int(WekaClassifier_2.classify(i))] += 0.40154496884580815\r\n sums[int(WekaClassifier_3.classify(i))] += 0.35999934750119333\r\n sums[int(WekaClassifier_4.classify(i))] += 0.36937329276984643\r\n sums[int(WekaClassifier_5.classify(i))] += 0.16351990613377496\r\n sums[int(WekaClassifier_6.classify(i))] += 0.1396078832952814\r\n sums[int(WekaClassifier_7.classify(i))] += 0.15882943193304253\r\n sums[int(WekaClassifier_8.classify(i))] += 0.1284505298097081\r\n sums[int(WekaClassifier_9.classify(i))] += 0.09903161346969916\r\n sums[int(WekaClassifier_10.classify(i))] += 0.19672733155497407\r\n sums[int(WekaClassifier_11.classify(i))] += 0.17672847093616786\r\n sums[int(WekaClassifier_12.classify(i))] += 0.18729151620386228\r\n sums[int(WekaClassifier_13.classify(i))] += 0.24810462685136855\r\n sums[int(WekaClassifier_14.classify(i))] += 0.23706555932983922\r\n sums[int(WekaClassifier_15.classify(i))] += 0.14276017880034322\r\n sums[int(WekaClassifier_16.classify(i))] += 0.2655207144416779\r\n sums[int(WekaClassifier_17.classify(i))] += 0.24759035974335297\r\n sums[int(WekaClassifier_18.classify(i))] += 0.14255881855351965\r\n sums[int(WekaClassifier_19.classify(i))] += 0.1181101393342422 \r\n return float(sums[0] - sums[1])", "def predict_from_all_children ( self, node: TreeSplits ):\n # Collect the children\n children_values = BaseTree.collect_children ( node )\n # Aggregate the leaf values\n return self.agg_function ( children_values )\n # End predict_from_all_children", "def _predict(self, inputs):\n node = self.tree_\n while node.left:\n if inputs[node.feature_index] < node.split:\n node = node.left\n else:\n node = node.right\n return node.predicted_class", "def main():\n\n ''' Reading the training data file '''\n original_training_data = pd.read_csv(\"DT_Data_CakeVsMuffin_v012_TRAIN.csv\")\n\n ''' Storing the final decision tree '''\n final_tree = decision_tree(original_training_data,0)\n\n ''' Printing the final decision tree '''\n print(\"This is the resulting decision tree: \\n\")\n print(final_tree)\n\n ''' Iterating through the dictionary by using the key values '''\n for key in final_tree.keys():\n ''' Parent = Flour <= 5.1636'''\n parent = key\n ''' left_child = [{'Oils <= 3.1265': [{'Flour <= 2.7291': [{'Proteins <= 2.6527': ['Muffin', 'CupCake']}, 'Muffin']}, 'CupCake']}'''\n left_child = final_tree[parent][0]\n ''' right_child = {'Oils <= 7.7793': ['Muffin', {'Flour <= 8.2225': ['CupCake', 'Muffin']}]}]'''\n right_child = final_tree[parent][1]\n\n ''' Writing a file which generates code for classification '''\n file = open('HW06_Parchand_Nihal_Classifier.py','w+')\n file.write(\"'''Importing libraries''' \"\n \"\\n\\nimport pandas as pd \\n\\ndef main():\"\n \"\\n\\tdata_df = pd.read_csv('DT_Data_CakeVsMuffin_v012_TEST.csv')\"\n \"\\n\\tresult = []\"\n \"\\n\\tfor row in range(0,len(data_df)):\"\n \"\\n\\t\\tFlour = data_df.loc[row][0]\"\n \"\\n\\t\\tSugar = data_df.loc[row][1]\"\n \"\\n\\t\\tOils = data_df.loc[row][2]\"\n \"\\n\\t\\tProteins = data_df.loc[row][3]\"\n \"\\n\\t\\tif {}:\\n\".format(parent))\n\n ''' Iterating through the left_tree '''\n for key in left_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n\n ''' Iterating through the inner left_tree '''\n for inner_key in left_child[key][0].keys():\n file.write(\"\\t\\t\\t\\tif {}:\\n\".format(inner_key))\n\n for inner_inner_key in ((left_child[key][0])[inner_key])[0]:\n file.write(\"\\t\\t\\t\\t\\tif {}:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\t\\t\\telse:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(1)\\n\")\n\n file.write(\"\\t\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\telse:\\n\")\n\n ''' Iterating through the right_tree '''\n for key in right_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\")\n for inner_key in right_child[key][1].keys():\n file.write(\"\\t\\t\\telif {}:\\n\".format(inner_key))\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\\n\")\n\n ''' Writing the results of classifier to a csv file '''\n file.write(\n \"\\twith open('HW06_Parchand_Nihal_MyClassifications.csv', 'w+') as file2:\\n\"\n \"\\t\\tfor value in result:\\n\"\n \"\\t\\t\\tfile2.write(str(value))\\n\"\n \"\\t\\t\\tfile2.write('\\\\n')\\n\\n\"\n \"main()\")", "def predict_one(tree, sample):\n if tree['leaf']:\n return tree['class']\n\n else:\n if sample[tree['feature']] <= tree['split']:\n return predict_one(tree['left'], sample)\n else:\n return predict_one(tree['right'], sample)", "def build(self):\n # weights to apply to training samples, updated on each\n # iteration of the boosting algo, normalised to 1\n sigWeights = np.ones(self.nSig, dtype=float)\n bkgWeights = np.ones(self.nBkg, dtype=float)\n reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights))\n sigWeights *= reweight\n bkgWeights *= reweight \n\n # Weight of each tree, strong classifers have higher weight\n self.treeWeights = np.zeros(self.ntrees, dtype=float)\n\n for i in xrange(self.ntrees):\n\n # build new tree\n newTree = Tree()\n newTree.load(self.sigData,self.bkgData,weights=(sigWeights,bkgWeights))\n newTree.build()\n self.dTrees.append(newTree) \n\n # evaluate trees\n # keep track of each event\n err = 0.0\n sigWrong = np.zeros(self.nSig)\n bkgWrong = np.zeros(self.nBkg)\n\n for j in range(self.nSig):\n if newTree.classify(np.array((self.sigData[j,])))<0:\n sigWrong[i]=1\n err+=sigWeights[j]\n\n for j in range(self.nBkg):\n if newTree.classify(np.array((self.bkgData[j,])))>0:\n bkgWrong[i]=1\n err+=bkgWeights[j]\n\n alpha = self.beta*math.log((1.0-err)/err)\n print err,alpha\n corFactor = math.exp(-alpha)\n wrongFactor = math.exp(alpha)\n\n if (err<1e-20 or err >= 0.5):\n print \"SOEMTHING WRONG!!\"\n\n self.treeWeights[i] = alpha\n\n # reweight training samples\n for j in range(self.nSig):\n if sigWrong[j]:\n sigWeights[j]*=wrongFactor\n else :\n sigWeights[j]*=corFactor\n\n for j in range(self.nBkg):\n if bkgWrong[j]:\n bkgWeights[j]*=wrongFactor\n else :\n bkgWeights[j]*=corFactor\n\n # normalise weights\n reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights))\n sigWeights *= reweight\n bkgWeights *= reweight", "def classify(data_point, tree):\r\n current = tree\r\n while(current.is_leaf == False): #while we're not at a leaf\r\n q = tree.issue\r\n v = data_point.dat_votes[ord(q) - 97]\r\n if(current is None): pass\r\n current = current.get_classification(v)\r\n #we should now be at a Leaf\r\n if(current is None): print(\"FATAL\")\r\n c =current.get_classification(\"\")\r\n # print(\"classified: \" + str(data_point) + \" as \" + str(c))\r\n return c", "def classify(self, row, node):\n\n # Base case: we've reached a leaf\n if isinstance(node, Leaf):\n return node.predictions\n\n # Decide whether to follow the true-branch or the false-branch.\n # Compare the feature / value stored in the node,\n # to the example we're considering.\n if node.question.match(row):\n return self.classify(row, node.true_branch)\n else:\n return self.classify(row, node.false_branch)", "def average_impurity(self):\n children = tf.squeeze(tf.slice(self.variables.tree, [0, 0], [-1, 1]),\n squeeze_dims=[1])\n is_leaf = tf.equal(LEAF_NODE, children)\n leaves = tf.to_int32(tf.squeeze(tf.where(is_leaf), squeeze_dims=[1]))\n counts = tf.gather(self.variables.node_sums, leaves)\n impurity = self._weighted_gini(counts)\n return tf.reduce_sum(impurity) / tf.reduce_sum(counts + 1.0)", "def entropy_gain(node,attribute):\n data_subset1 = filter_data(node.data,node.ancestors)\n data_counts = list(Counter(data_subset1['Class']).values())\n base_entropy = entropy(data_counts,base=2)\n num_values = len(data_subset1)\n entropy_sum = 0\n \n for value in [0,1]:\n data_subset2 = filter_data(node.data, node.ancestors + [(attribute,value)])\n subset_counts = list(Counter(data_subset2['Class']).values())\n entropy_sum += (len(data_subset2)/num_values) * entropy(subset_counts,base=2)\n \n return base_entropy - entropy_sum", "def apply(self, image):\n if isinstance(image, hyperread):\n image = image.image()\n\n reshape = False\n if len(image.shape) > 2:\n (n,m,k) = np.shape(image)\n image = np.reshape(image, (n*m, k))\n reshape = True\n\n if self.apply_preprocessing:\n image = self.preprocess_data_matrix(image)\n\n classes = self.random_forest.predict(image)\n\n if reshape:\n classes = np.reshape(classes, (n,m))\n\n if self.apply_postprocessing:\n classes = post_processing(classes, self.region_selection)\n\n return classes", "def predict_all(self,entry,counts):\n if self.type == 'v':\n counts[self.value] += 1\n return\n v = entry[self.feature]\n if v is None:\n for val,c in self.children.iteritems():\n c.predict_all(entry,counts)\n return\n if self.type == 's':\n c = None\n try:\n c = self.children[v]\n except KeyError:\n #print \"Unseen value for feature\",self.feature,\": \",v\n best = None\n bestDist = float('inf')\n for (val,c) in self.children.iteritems():\n if abs(val - v) < bestDist:\n bestDist = abs(val - v)\n best = c\n c = best\n c.predict_all(entry,counts)\n elif self.type == 'i':\n if v <= self.value:\n self.children[0].predict_all(entry,counts)\n else:\n self.children[1].predict_all(entry,counts)\n return", "def predict(self, data):\t\t\n\t\tpredictions = {}\n\t\tfor tree in self.trees:\n\t\t\tprediction, prob = tree.predict(data)\n\t\t\tif prediction in predictions:\n\t\t\t\tpredictions[prediction] += prob\n\t\t\telse:\n\t\t\t\tpredictions[prediction] = prob\n\t\treturn max(predictions, key=predictions.get)", "def classify(self, data):\n abstract", "def analyze(self, event):\n ##### set variables ####\n self.nElectrons = 0\n self.nMuons = 0\n self.nTaus = 0\n self.nFatJets = 0\n self.EventWeight = 1.\n self.TopWeight = 1.\n self.BTagAK8Weight = 1.\n self.BTagAK4Weight = 1.\n self.BTagAK8Weight_deep = 1.\n self.BTagAK8Weight_deep_up = 1.\n self.BTagAK8Weight_deep_down = 1.\n self.BTagAK4Weight_deep = 1.\n self.BTagAK4Weight_deep_up = 1.\n self.BTagAK4Weight_deep_down = 1.\n self.BBTagWeight = 1.\n self.GenWeight = 1.\n self.PUWeight = 1.\n self.LeptonWeight = 1.\n self.LeptonWeightUp = 1.\n self.LeptonWeightDown = 1.\n self.TriggerWeight = 1.\n self.TriggerWeightUp = 1.\n self.TriggerWeightDown = 1.\n self.isZtoMM = False\n self.isZtoEE = False\n self.isZtoNN = False\n self.isTtoEM = False\n self.isBoosted4B = False\n self.isHtobb = False\n self.isHtobb_ml = False\n self.isMaxBTag_loose = False\n self.isMaxBTag_medium = False\n self.isMaxBTag_tight = False\n self.isVBF = False\n self.is2016 = False\n self.is2017 = False\n self.is2018 = False\n self.nTaus = 0\n self.nJetsNoFatJet = 0\n self.H_partonflavour = -1.\n self.H_hadronflavour = -1.\n self.DPhi = -1.\n self.VHDEta = -1.\n self.MinJetMetDPhi = 10.\n self.MaxJetNoFatJetBTag = -1.\n self.BtagDeepB = -1.\n self.DeepTagMD_H4qvsQCD = -1.\n self.DeepTagMD_HbbvsQCD = -1.\n self.DeepTagMD_ZHbbvsQCD = -1.\n self.DeepTagMD_ZbbvsQCD = -1.\n self.DeepTagMD_bbvsLight = -1.\n self.DeepTagMD_WvsQCD = -1.\n self.DeepTagMD_ZvsQCD = -1.\n self.Mu1_pt = -1.\n self.Mu1_eta = -1.\n self.Mu1_phi = -1.\n self.Mu1_mass = -1.\n self.Mu1_pfIsoId = -1.\n self.Mu1_relIso = -1.\n self.Mu1_highPtId = -1.\n self.Mu2_pt = -1.\n self.Mu2_eta = -1.\n self.Mu2_phi = -1.\n self.Mu2_mass = -1.\n self.Mu2_pfIsoId = -1.\n self.Mu2_relIso = -1.\n self.Mu2_highPtId = -1.\n self.Ele1_pt = -1.\n self.Ele1_eta = -1.\n self.Ele1_phi = -1.\n self.Ele1_mass = -1.\n self.Ele2_pt = -1.\n self.Ele2_eta = -1.\n self.Ele2_phi = -1.\n self.Ele2_mass = -1.\n self.Ele_HEM15_16 = -1.\n self.HT_HEM15_16 = -1.\n self.HT = 0.\n self.LHEScaleWeight = -1.\n self.LHEPdfWeight = -1.\n self.LHEWeight_originalXWGTUP = -1.\n self.PrefireWeight = 1.\n self.PrefireWeightUp = 1.\n self.PrefireWeightDown = 1.\n self.QCDNLO_Corr = 1.\n self.QCDNNLO_Corr = 1.\n self.EWKNLO_Corr = 1.\n self.Jet1_VBF_pt = -1.\n self.Jet1_VBF_eta = -1.\n self.Jet1_VBF_phi = -1.\n self.Jet1_VBF_mass = -1.\n self.Jet2_VBF_pt = -1.\n self.Jet2_VBF_eta = -1.\n self.Jet2_VBF_phi = -1.\n self.Jet2_VBF_mass = -1.\n self.dijet_VBF_mass = -1.\n self.deltaR_VBF = -1.\n self.deltaR_HVBFjet1 = -1.\n self.deltaR_HVBFjet2 = -1.\n self.H_pt = -1.\n self.H_eta = -1.\n self.H_phi = -1.\n self.H_mass = -1.\n self.H_M = -1.\n self.H_tau21 = -1.\n self.H_tau41 = -1.\n self.H_tau42 = -1.\n self.H_tau31 = -1.\n self.H_tau32 = -1.\n self.H_ddt = -1.\n self.H_csv1 = -1.\n self.H_csv2 = -1.\n self.H_deepcsv1 = -1.\n self.H_deepcsv2 = -1.\n self.H_dbt = -1.\n self.H_chf = -1.\n self.H_nhf = -1.\n self.V_pt = -1.\n self.V_eta = -1.\n self.V_phi = -1.\n self.V_mass = -1.\n self.VH_deltaR = -1.\n self.X_pt = -1.\n self.X_eta = -1.\n self.X_phi = -1.\n self.X_mass = -1.\n self.X_mass_chs = -1.\n self.X_mass_nom = -1.\n self.X_mass_jesUp = -1.\n self.X_mass_jesDown = -1.\n self.X_mass_jerUp = -1.\n self.X_mass_jerDown = -1.\n self.X_mass_MET_nom = -1.\n self.X_mass_MET_jesUp = -1.\n self.X_mass_MET_jesDown = -1.\n self.X_mass_MET_jerUp = -1.\n self.X_mass_MET_jerDown = -1.\n self.H_mass_nom = -1.\n self.H_mass_jmsUp = -1.\n self.H_mass_jmsDown = -1.\n self.H_mass_jmrUp = -1.\n self.H_mass_jmrDown = -1.\n\n \n \n eecutflow_list = []\n mmcutflow_list = []\n nncutflow_list = []\n\n idx_electrons = []\n idx_loose_electrons = []\n idx_muons = []\n idx_loose_muons = []\n idx_fatjet = []\n idx_jet = []\n idx_jet_vbf = []\n\n electrons_tlv_list = []\n loose_electrons_tlv_list = []\n muons_tlv_list = []\n loose_muons_tlv_list = []\n fatjet_tlv_list = []\n jet_tlv_list = []\n jet_tlv_list_vbf = []\n fatjet_tau21_list = []\n fatjet_tau41_list = []\n fatjet_tau42_list = []\n fatjet_tau31_list = []\n fatjet_tau32_list = []\n\n V = ROOT.TLorentzVector()\n H = ROOT.TLorentzVector()\n X = ROOT.TLorentzVector()\n\n V_chs = ROOT.TLorentzVector()\n ######### cuts #########\n elec1_pt_cut = 55.\n elec2_pt_cut = 20.\n elec_pt_cut = 10.\n elec_eta_cut = 2.5\n muon1_pt_cut = 55.\n muon2_pt_cut = 20. \n muon_pt_cut = 10.\n muon_eta_cut = 2.4\n tau_pt_cut = 18.\n tau_eta_cut = 2.3\n ak4_pt_cut = 30.\n ak4_eta_cut = 2.4\n fatjet_pt_cut = 200.\n fatjet_eta_cut = 2.4\n met_pt_cut = 250.\n v_pt_cut = 200.\n tau21_lowercut = 0.35\n tau21_uppercut = 0.75\n j_mass_lowercut = 30.\n j_mass_uppercut = 250.\n v_mass_lowercut = 65.\n v_mass_intercut = 85.\n v_mass_uppercut = 105.\n h_mass_lowercut = 105.\n h_mass_uppercut = 135.\n x_mass_lowercut = 750.\n xt_mass_lowercut = 650.\n xjj_mass_lowercut = 950.\n \n #### flag for year #######\n if self.year == 2016:\n self.is2016 = True\n elif self.year == 2017:\n self.is2017 = True\n elif self.year == 2018:\n self.is2018 = True\n \n \n ######### triggers #########\n if self.year == 2016:\n try:\n trigger_SingleMu = any([event.HLT_Mu50,\n event.HLT_TkMu50])\n except:\n trigger_SingleMu = event.HLT_Mu50\n trigger_SingleEle = event.HLT_Ele115_CaloIdVT_GsfTrkIdT\n trigger_SingleIsoEle = event.HLT_Ele27_WPTight_Gsf\n trigger_SinglePhoton = event.HLT_Photon175\n trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,\n event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,\n event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])\n trigger_METMHT = any([event.HLT_PFMET110_PFMHT110_IDTight, \n event.HLT_PFMET120_PFMHT120_IDTight])\n trigger_MET = any([event.HLT_PFMET170_NotCleaned,\n event.HLT_PFMET170_HBHECleaned])\n elif self.year == 2017:\n try:\n trigger_SingleMu = any([event.HLT_Mu50,\n event.HLT_TkMu100,\n event.HLT_OldMu100])\n except:\n trigger_SingleMu = event.HLT_Mu50\n try:\n trigger_SingleEle = event.HLT_Ele115_CaloIdVT_GsfTrkIdT\n except:\n trigger_SingleEle = None\n trigger_SingleIsoEle = event.HLT_Ele35_WPTight_Gsf\n trigger_SinglePhoton = event.HLT_Photon200\n try:\n trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,\n event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,\n event.HLT_PFMETNoMu130_PFMHTNoMu130_IDTight,\n event.HLT_PFMETNoMu140_PFMHTNoMu140_IDTight,\n event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])\n except:\n trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,\n event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,\n event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])\n trigger_METMHT = any([event.HLT_PFMET110_PFMHT110_IDTight, \n event.HLT_PFMET120_PFMHT120_IDTight,\n event.HLT_PFMET130_PFMHT130_IDTight, \n event.HLT_PFMET140_PFMHT140_IDTight,\n event.HLT_PFMETTypeOne110_PFMHT110_IDTight,\n event.HLT_PFMETTypeOne120_PFMHT120_IDTight,\n event.HLT_PFMETTypeOne130_PFMHT130_IDTight,\n event.HLT_PFMETTypeOne140_PFMHT140_IDTight])\n try:\n trigger_MET = any([event.HLT_PFMET200_NotCleaned,\n event.HLT_PFMET200_HBHECleaned,\n event.HLT_PFMET200_HBHE_BeamHaloCleaned,\n event.HLT_PFMET250_HBHECleaned])\n except:\n trigger_MET = None\n\n elif self.year == 2018:\n trigger_SingleMu = any([event.HLT_Mu50,\n event.HLT_TkMu100,\n event.HLT_OldMu100])\n trigger_SingleEle = event.HLT_Ele115_CaloIdVT_GsfTrkIdT\n trigger_SingleIsoEle = event.HLT_Ele32_WPTight_Gsf\n trigger_SinglePhoton = event.HLT_Photon200\n trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,\n event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,\n event.HLT_PFMETNoMu130_PFMHTNoMu130_IDTight,\n event.HLT_PFMETNoMu140_PFMHTNoMu140_IDTight,\n event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])\n trigger_METMHT = any([event.HLT_PFMET110_PFMHT110_IDTight, \n event.HLT_PFMET120_PFMHT120_IDTight,\n event.HLT_PFMET130_PFMHT130_IDTight, \n event.HLT_PFMET140_PFMHT140_IDTight,\n event.HLT_PFMETTypeOne110_PFMHT110_IDTight,\n event.HLT_PFMETTypeOne120_PFMHT120_IDTight,\n event.HLT_PFMETTypeOne130_PFMHT130_IDTight,\n event.HLT_PFMETTypeOne140_PFMHT140_IDTight])\n trigger_MET = any([event.HLT_PFMET200_NotCleaned,\n event.HLT_PFMET200_HBHECleaned,\n event.HLT_PFMET200_HBHE_BeamHaloCleaned,\n event.HLT_PFMET250_HBHECleaned])\n ########## Gen Weight #########\n if self.isMC:\n self.GenWeight = -1. if event.genWeight < 0 else 1.\n self.PUWeight = self.puTool.getWeight(event.Pileup_nTrueInt)\n self.EventWeight *= self.GenWeight\n self.EventWeight *= self.PUWeight\n for i,weight in enumerate(event.LHEScaleWeight):\n self.out.LHEScaleWeight_hist.Fill(i,weight)\n for j,weight in enumerate(event.LHEPdfWeight):\n self.out.LHEPdfWeight_hist.Fill(j,weight)\n self.LHEScaleWeight = event.LHEScaleWeight\n self.LHEPdfWeight = event.LHEPdfWeight\n self.LHEWeight_originalXWGTUP = event.LHEWeight_originalXWGTUP\n self.out.events.Fill(0.,self.GenWeight)\n self.out.original.Fill(0.,event.LHEWeight_originalXWGTUP)\n if self.year == 2016 or self.year == 2017:\n self.PrefireWeight = event.PrefireWeight\n self.PrefireWeightUp = event.PrefireWeight_Up\n self.PrefireWeightDown = event.PrefireWeight_Down\n \n if self.isData and event.PV_npvs == 0:\n return False\n if not self.isData:\n self.out.pileup.Fill(event.Pileup_nTrueInt)\n if event.Pileup_nTrueInt == 0:\n return False\n ########### FatJet #########\n for ifatjet in range(event.nFatJet):\n fatjet_pt = event.FatJet_pt[ifatjet]\n fatjet_eta = event.FatJet_eta[ifatjet]\n fatjet_phi = event.FatJet_phi[ifatjet]\n fatjet_mass = event.FatJet_mass[ifatjet]\n fatjet_jetid = event.FatJet_jetId[ifatjet]\n fatjet_tlv = ROOT.TLorentzVector()\n fatjet_tlv.SetPtEtaPhiM(fatjet_pt, fatjet_eta, fatjet_phi, fatjet_mass)\n if fatjet_pt > fatjet_pt_cut and abs(fatjet_eta) < fatjet_eta_cut:\n fatjet_tlv_list.append(fatjet_tlv)\n idx_fatjet.append(ifatjet)\n if event.FatJet_tau1[ifatjet]==0:\n fatjet_tau21_list.append(0)\n fatjet_tau41_list.append(0)\n fatjet_tau31_list.append(0)\n else:\n fatjet_tau21_list.append(event.FatJet_tau2[ifatjet]/event.FatJet_tau1[ifatjet])\n fatjet_tau41_list.append(event.FatJet_tau4[ifatjet]/event.FatJet_tau1[ifatjet])\n fatjet_tau31_list.append(event.FatJet_tau3[ifatjet]/event.FatJet_tau1[ifatjet])\n if event.FatJet_tau2[ifatjet]==0:\n fatjet_tau42_list.append(0)\n fatjet_tau32_list.append(0)\n else:\n fatjet_tau42_list.append(event.FatJet_tau4[ifatjet]/event.FatJet_tau2[ifatjet])\n fatjet_tau32_list.append(event.FatJet_tau3[ifatjet]/event.FatJet_tau2[ifatjet])\n self.nFatJets = len(fatjet_tlv_list)\n #stop if no suitable Fatjet\n if len(fatjet_tlv_list) == 0:\n return False \n ########### electrons ##########\n for ielectron in range(event.nElectron):\n electron_pt = event.Electron_pt[ielectron]\n electron_eta = event.Electron_eta[ielectron]\n electron_phi = event.Electron_phi[ielectron]\n electron_mass = event.Electron_mass[ielectron]\n electron_tlv = ROOT.TLorentzVector()\n electron_tlv.SetPtEtaPhiM(electron_pt,electron_eta,electron_phi,electron_mass)\n if electron_eta > -2.5 and electron_eta < -1.479 and electron_phi > -1.55 and electron_phi < -0.9:\n if self.Ele_HEM15_16 == -1.:\n self.Ele_HEM15_16 = 0.\n self.Ele_HEM15_16 += electron_pt\n if electron_pt > elec_pt_cut and abs(electron_eta) < elec_eta_cut:\n idx_electrons.append(ielectron)\n electrons_tlv_list.append(electron_tlv)\n if event.Electron_cutBased[ielectron] >= 2:\n idx_loose_electrons.append(ielectron)\n loose_electrons_tlv_list.append(electron_tlv)\n self.nElectrons = len(loose_electrons_tlv_list)\n \n ########### muons #########\n for imuon in range(event.nMuon):\n muon_pt = event.Muon_pt[imuon]\n muon_eta = event.Muon_eta[imuon]\n muon_phi = event.Muon_phi[imuon]\n muon_mass = event.Muon_mass[imuon]\n muon_tlv = ROOT.TLorentzVector()\n muon_tlv.SetPtEtaPhiM(muon_pt, muon_eta, muon_phi, muon_mass)\n if muon_pt > muon_pt_cut and abs(muon_eta) < muon_eta_cut:\n idx_muons.append(imuon)\n muons_tlv_list.append(muon_tlv)\n if event.Muon_isPFcand[imuon] and struct.unpack('B',event.Muon_pfIsoId[imuon])[0]>=2 and (event.Muon_isGlobal[imuon] or event.Muon_isTracker[imuon]):\n idx_loose_muons.append(imuon)\n loose_muons_tlv_list.append(muon_tlv)\n self.nMuons = len(loose_muons_tlv_list)\n\n\n ############ taus #########\n for itau in range(event.nTau):\n tau_pt = event.Tau_pt[itau]\n tau_eta = event.Tau_eta[itau]\n tau_phi = event.Tau_phi[itau]\n tau_mass = event.Tau_mass[itau]\n tau_tlv = ROOT.TLorentzVector()\n tau_tlv.SetPtEtaPhiM(tau_pt, tau_eta, tau_phi, tau_mass)\n if tau_pt > tau_pt_cut and abs(tau_eta) < tau_eta_cut:\n cleanTau = True\n for loose_electrons_tlv in loose_electrons_tlv_list:\n if loose_electrons_tlv.DeltaR(tau_tlv) < 0.4:\n cleanTau = False\n for loose_muons_tlv in loose_muons_tlv_list:\n if loose_muons_tlv.DeltaR(tau_tlv) < 0.4:\n cleanTau = False\n if cleanTau:\n self.nTaus += 1\n\n ############ MET ##########\n METx = 0.\n METy = 0.\n MET_tlv = ROOT.TLorentzVector()\n MET_tlv.SetPtEtaPhiE(event.PuppiMET_pt,0.,event.PuppiMET_phi, event.PuppiMET_pt)\n \n ############ TTbar pT reweighting ########\n if self.isMC and 'TT' in self.sample[0]:\n Top1_pt, Top2_pt = getTTPt(event)\n self.TopWeight = getTTptWeight(Top1_pt, Top2_pt)\n\n ############ ZtoEE ############\n self.out.eecutflow.Fill(0.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n maxZpt = -1.\n Z_pt = -1.\n Z_m = -1.\n goodelectronpair = False\n for i in idx_electrons:\n for j in idx_electrons:\n if i==j or event.Electron_charge[i] == event.Electron_charge[j]:\n continue\n eli_tlv = ROOT.TLorentzVector()\n eli_tlv.SetPtEtaPhiM(event.Electron_pt[i],event.Electron_eta[i],event.Electron_phi[i],event.Electron_mass[i])\n eli_v = ROOT.TVector3()\n eli_v.SetPtEtaPhi(event.Electron_pt[i],event.Electron_eta[i],event.Electron_phi[i])\n elj_tlv = ROOT.TLorentzVector()\n elj_tlv.SetPtEtaPhiM(event.Electron_pt[j],event.Electron_eta[j],event.Electron_phi[j],event.Electron_mass[j])\n elj_v = ROOT.TVector3()\n elj_v.SetPtEtaPhi(event.Electron_pt[j],event.Electron_eta[j],event.Electron_phi[j])\n diel = eli_tlv + elj_tlv\n Z_pt = diel.Pt()\n Z_m = diel.M()\n if Z_m > 70. and Z_m < 110. and Z_pt > maxZpt:\n maxZpt = Z_pt\n if eli_tlv.Pt() > elj_tlv.Pt():\n el1 = i\n el2 = j\n el1_tlv = eli_tlv\n el2_tlv = elj_tlv\n el1_v = eli_v\n el2_v = elj_v\n else:\n el1 = j\n el2 = i\n el1_tlv = elj_tlv\n el2_tlv = eli_tlv\n el1_v = elj_v\n el2_v = eli_v\n goodelectronpair = True\n \n \n if goodelectronpair:\n self.out.eecutflow.Fill(1.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if el1_tlv.Pt() > elec1_pt_cut and el2_tlv.Pt() > elec2_pt_cut:\n self.out.eecutflow.Fill(2.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if event.Electron_cutBased[el1] >= 2 and event.Electron_cutBased[el2] >= 2:\n self.out.eecutflow.Fill(3.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if maxZpt > v_pt_cut:\n self.out.eecutflow.Fill(4.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if trigger_SingleEle == None:\n if not trigger_SingleIsoEle and not trigger_SinglePhoton:\n print \"ZtoEE trigger inconsistency\"\n return False\n else:\n if not trigger_SingleEle and not trigger_SingleIsoEle and not trigger_SinglePhoton:\n print \"ZtoEE trigger inconsistency\"\n return False\n #if not self.isMC and (\"SinglePhoton\" in self.sample[0] and (trigger_SingleEle or trigger_SingleIsoEle)):\n # print \"ZtoEE double counting\"\n # return False\n self.out.eecutflow.Fill(5.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if self.isMC:\n eltrig_tlv = el1_tlv\n #for i in range(event.nTrigObj):\n # if event.TrigObj_id[i] ==11:\n # trigobj_v = ROOT.TVector3()\n # trigobj_v.SetPtEtaPhi(event.TrigObj_pt[i],event.TrigObj_eta[i],event.TrigObj_phi[i])\n # print \"electron TrigObj_filterBits:\",event.TrigObj_filterBits[i]\n # if event.TrigObj_filterBits[i]==14336:\n # #if event.TrigObj_filterBits[i]==1110000000000000:\n # print \"found matching electron\"\n # deltaR1 = trigobj_v.DeltaR(el1_v)\n # deltaR2 = trigobj_v.DeltaR(el2_v)\n # if deltaR2 < deltaR1 and deltaR2 < 0.2:\n # eltrig_tlv = el2_tlv\n # break\n self.TriggerWeight = self.elSFs.getTriggerSF(eltrig_tlv.Pt(),eltrig_tlv.Eta())\n self.TriggerWeightUp = self.elSFs.getTriggerSF(eltrig_tlv.Pt(),eltrig_tlv.Eta()) + self.elSFs.getTriggerSFerror(eltrig_tlv.Pt(),eltrig_tlv.Eta())\n self.TriggerWeightDown = self.elSFs.getTriggerSF(eltrig_tlv.Pt(),eltrig_tlv.Eta()) - self.elSFs.getTriggerSFerror(eltrig_tlv.Pt(),eltrig_tlv.Eta())\n self.LeptonWeight = self.elSFs.getIdIsoSF(el1_tlv.Pt(), el1_tlv.Eta())*self.elSFs.getIdIsoSF(el2_tlv.Pt(),el2_tlv.Eta())\n IdIsoSF1 = self.elSFs.getIdIsoSF(el1_tlv.Pt(), el1_tlv.Eta())\n IdIsoSF2 = self.elSFs.getIdIsoSF(el2_tlv.Pt(),el2_tlv.Eta())\n IdIsoSF1error = self.elSFs.getIdIsoSFerror(el1_tlv.Pt(), el1_tlv.Eta())\n IdIsoSF2error = self.elSFs.getIdIsoSFerror(el2_tlv.Pt(),el2_tlv.Eta())\n \n self.LeptonWeight = IdIsoSF1*IdIsoSF2\n LeptonWeightsigma = np.sqrt((IdIsoSF1error*IdIsoSF2)**2+(IdIsoSF2error*IdIsoSF1)**2)\n self.LeptonWeightUp = self.LeptonWeight + LeptonWeightsigma\n self.LeptonWeightDown = self.LeptonWeight - LeptonWeightsigma\n if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:\n GenVpt = getGenVpt(event)\n self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)\n self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)\n self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)\n self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr\n self.EventWeight *= self.TriggerWeight\n self.EventWeight *= self.LeptonWeight\n V = el1_tlv + el2_tlv\n self.Ele1_pt = el1_tlv.Pt()\n self.Ele1_eta = el1_tlv.Eta()\n self.Ele1_phi = el1_tlv.Phi()\n self.Ele1_mass = el1_tlv.M()\n self.Ele2_pt = el2_tlv.Pt()\n self.Ele2_eta = el2_tlv.Eta()\n self.Ele2_phi = el2_tlv.Phi()\n self.Ele2_mass = el2_tlv.M()\n self.isZtoEE = True\n\n ########## ZtoMM #############\n self.out.mmcutflow.Fill(0.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n maxZpt = -1.\n Z_pt = -1.\n Z_m = -1.\n goodmuonpair = False\n for i in idx_muons:\n for j in idx_muons:\n if i==j or event.Muon_charge[i] == event.Muon_charge[j]:\n continue\n mui_tlv = ROOT.TLorentzVector()\n mui_tlv.SetPtEtaPhiM(event.Muon_pt[i],event.Muon_eta[i],event.Muon_phi[i],event.Muon_mass[i])\n mui_v = ROOT.TVector3()\n mui_v.SetPtEtaPhi(event.Muon_pt[i],event.Muon_eta[i],event.Muon_phi[i])\n muj_tlv = ROOT.TLorentzVector()\n muj_tlv.SetPtEtaPhiM(event.Muon_pt[j],event.Muon_eta[j],event.Muon_phi[j],event.Muon_mass[j]) \n muj_v = ROOT.TVector3()\n muj_v.SetPtEtaPhi(event.Muon_pt[j],event.Muon_eta[j],event.Muon_phi[j])\n dimu = mui_tlv + muj_tlv\n Z_pt = dimu.Pt()\n Z_m = dimu.M()\n if Z_m > 70. and Z_m < 110. and Z_pt > maxZpt:\n maxZpt = Z_pt\n if mui_tlv.Pt() > muj_tlv.Pt():\n mu1 = i\n mu2 = j\n mu1_tlv = mui_tlv\n mu2_tlv = muj_tlv\n mu1_v = mui_v\n mu2_v = muj_v\n else:\n mu1 = j\n mu2 = i\n mu1_tlv = muj_tlv\n mu2_tlv = mui_tlv\n mu1_v = muj_v\n mu2_v = mui_v\n goodmuonpair = True\n \n\n if goodmuonpair:\n self.out.mmcutflow.Fill(1.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n mu1_highPtId = struct.unpack('B',event.Muon_highPtId[mu1])[0]\n mu2_highPtId = struct.unpack('B',event.Muon_highPtId[mu2])[0] \n if mu1_tlv.Pt() > muon1_pt_cut and mu2_tlv.Pt() > muon2_pt_cut:\n self.out.mmcutflow.Fill(2.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n if (mu1_highPtId >= 2 and mu2_highPtId >= 1) or (mu1_highPtId >= 1 and mu2_highPtId >= 2):\n self.out.mmcutflow.Fill(3.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n if maxZpt > v_pt_cut:\n self.out.mmcutflow.Fill(4.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n if not trigger_SingleMu:\n print \"ZtoMM trigger inconsistency\"\n return False\n self.out.mmcutflow.Fill(5.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n if self.isMC:\n if mu1_highPtId >=2:\n mutrig_tlv = mu1_tlv\n else:\n mutrig_tlv = mu2_tlv\n #for i in range(event.nTrigObj):\n # if event.TrigObj_id[i] ==13:\n # trigobj_v = ROOT.TVector3()\n # trigobj_v.SetPtEtaPhi(event.TrigObj_pt[i],event.TrigObj_eta[i],event.TrigObj_phi[i])\n # deltaR1 = trigobj_v.DeltaR(mu1_v)\n # deltaR2 = trigobj_v.DeltaR(mu2_v)\n # print \"muon TrigObj_filterBits:\",event.TrigObj_filterBits[i]\n # if event.TrigObj_filterBits[i]==2048:\n # #if event.TrigObj_filterBits[i]==10000000000:\n # print \"found matching muon\"\n # if deltaR2 < deltaR1 and deltaR2 < 0.2:\n # mutrig_tlv = mu2_tlv\n # break\n\n self.TriggerWeight = self.muSFs.getTriggerSF(mutrig_tlv.Pt(),mutrig_tlv.Eta())\n self.TriggerWeightUp = self.muSFs.getTriggerSF(mutrig_tlv.Pt(),mutrig_tlv.Eta()) + self.muSFs.getTriggerSFerror(mutrig_tlv.Pt(),mutrig_tlv.Eta())\n self.TriggerWeightDown = self.muSFs.getTriggerSF(mutrig_tlv.Pt(),mutrig_tlv.Eta()) - self.muSFs.getTriggerSFerror(mutrig_tlv.Pt(),mutrig_tlv.Eta())\n IdSF1 = self.muSFs.getIdSF(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)\n IdSF2 = self.muSFs.getIdSF(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)\n IsoSF1 = self.muSFs.getIsoSF(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)\n IsoSF2 = self.muSFs.getIsoSF(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)\n IdSF1error = self.muSFs.getIdSFerror(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)\n IdSF2error = self.muSFs.getIdSFerror(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)\n IsoSF1error = self.muSFs.getIsoSFerror(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)\n IsoSF2error = self.muSFs.getIsoSFerror(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)\n self.LeptonWeight = IdSF1*IdSF2*IsoSF1*IsoSF2\n LeptonWeightsigma = np.sqrt((IdSF1error*IdSF2*IsoSF1*IsoSF2)**2+(IdSF2error*IdSF1*IsoSF1*IsoSF2)**2+(IsoSF1error*IdSF1*IdSF2*IsoSF2)**2+(IsoSF2error*IdSF1*IdSF2*IsoSF1)**2)\n self.LeptonWeightUp = self.LeptonWeight + LeptonWeightsigma\n self.LeptonWeightDown = self.LeptonWeight - LeptonWeightsigma\n if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:\n GenVpt = getGenVpt(event)\n self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)\n self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)\n self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)\n self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr\n self.EventWeight *= self.TriggerWeight\n self.EventWeight *= self.LeptonWeight\n if mu1_tlv.DeltaR(mu2_tlv) < 0.3:\n try:\n self.Mu1_relIso = ((event.Muon_tkRelIso[mu1]*mu1_tlv.Pt()) - mu2_tlv.Pt())/mu1_tlv.Pt()\n self.Mu2_relIso = ((event.Muon_tkRelIso[mu2]*mu2_tlv.Pt()) - mu1_tlv.Pt())/mu2_tlv.Pt()\n except:\n self.Mu1_relIso = -1.\n self.Mu2_relIso = -1.\n else:\n try:\n self.Mu1_relIso = event.Muon_tkRelIso[mu1]\n self.Mu2_relIso = event.Muon_tkRelIso[mu2]\n except:\n self.Mu1_relIso = -1.\n self.Mu2_relIso = -1.\n V = mu1_tlv + mu2_tlv\n self.Mu1_pt = mu1_tlv.Pt()\n self.Mu1_eta = mu1_tlv.Eta()\n self.Mu1_phi = mu1_tlv.Phi()\n self.Mu1_mass = mu1_tlv.M()\n self.Mu1_pfIsoId = struct.unpack('B',event.Muon_pfIsoId[mu1])[0]\n self.Mu1_highPtId = struct.unpack('B',event.Muon_highPtId[mu1])[0]\n self.Mu2_pt = mu2_tlv.Pt()\n self.Mu2_eta = mu2_tlv.Eta()\n self.Mu2_phi = mu2_tlv.Phi()\n self.Mu2_mass = mu2_tlv.M()\n self.Mu2_pfIsoId = struct.unpack('B',event.Muon_pfIsoId[mu2])[0]\n self.Mu2_highPtId = struct.unpack('B',event.Muon_highPtId[mu2])[0]\n self.isZtoMM = True\n\n \n ########### TtoEM ######### \n if not self.isZtoMM and not self.isZtoEE and self.nElectrons == 1 and self.nMuons == 1:\n if event.Electron_charge[idx_loose_electrons[0]] != event.Muon_charge[idx_loose_muons[0]]:\n el_tlv = loose_electrons_tlv_list[0]\n mu_tlv = loose_muons_tlv_list[0]\n if mu_tlv.Pt() > 30. and el_tlv.Pt() > 30.: \n V = mu_tlv + el_tlv\n if V.Pt() > 50.:\n if trigger_SingleEle == None:\n if not trigger_SingleIsoEle:\n print \"TtoEM trigger inconsistency\"\n return False\n else:\n if not trigger_SingleEle and not trigger_SingleIsoEle:\n print \"TtoEM trigger inconsistency\"\n return False\n if self.isMC:\n self.TriggerWeight = self.elSFs.getTriggerSF(el_tlv.Pt(),el_tlv.Eta())\n self.LeptonWeight = self.elSFs.getIdIsoSF(el_tlv.Pt(), el_tlv.Eta())\n if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:\n GenVpt = getGenVpt(event)\n self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)\n self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)\n self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)\n self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr\n self.EventWeight *= self.TriggerWeight\n self.EventWeight *= self.LeptonWeight\n self.Mu1_pt = mu_tlv.Pt()\n self.Mu1_eta = mu_tlv.Eta()\n self.Mu1_phi = mu_tlv.Phi()\n self.Mu1_mass = mu_tlv.M()\n self.Ele1_pt = el_tlv.Pt()\n self.Ele1_eta = el_tlv.Eta()\n self.Ele1_phi = el_tlv.Phi()\n self.Ele1_mass = el_tlv.M()\n self.isTtoEM = True\n\n ######### ZtoNN ##########\n self.out.nncutflow.Fill(0.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n if not self.isZtoMM and not self.isZtoEE and not self.isTtoEM:\n if event.PuppiMET_pt > met_pt_cut :\n self.out.nncutflow.Fill(1.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n if self.nElectrons == 0 and self.nMuons == 0 and self.nTaus == 0:\n self.out.nncutflow.Fill(2.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n V.SetPtEtaPhiE(event.PuppiMET_pt,0.,event.PuppiMET_phi,event.PuppiMET_pt)\n V_chs.SetPtEtaPhiE(event.MET_pt,0.,event.MET_phi,event.MET_pt)\n if trigger_MET == None:\n if not self.isMC and not trigger_METMHT and not trigger_METMHTNoMu:\n print \"ZtoNN Trigger inconsistency\"\n return False\n else:\n if not self.isMC and not trigger_MET and not trigger_METMHT and not trigger_METMHTNoMu:\n print \"ZtoNN Trigger inconsistency\"\n return False\n self.out.nncutflow.Fill(3.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n if self.filter(event) == False:\n print \"Bad event\"\n return False\n self.out.nncutflow.Fill(4.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n if self.isMC:\n if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:\n GenVpt = getGenVpt(event)\n self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)\n self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)\n self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)\n self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr\n self.TriggerWeight = 1.\n self.isZtoNN = True\n #stop if no semileptonic decays\n if self.isZtoEE==False and self.isZtoMM==False and self.isZtoNN==False and self.isTtoEM==False:\n return False\n ########## setting the Higgs and V index #######\n fatjet_idx_H = 0\n valid_Higgs = False\n if self.isZtoMM:\n fatjet_maxpt = 0.\n for i,fatjet_tlv in enumerate(fatjet_tlv_list):\n if fatjet_tlv.DeltaR(mu1_tlv)>0.8 and fatjet_tlv.DeltaR(mu2_tlv)>0.8 and fatjet_tlv.Pt()>fatjet_maxpt:\n fatjet_maxpt=fatjet_tlv.Pt()\n fatjet_idx_H = i\n valid_Higgs = True\n if not valid_Higgs:\n return False\n\n elif self.isZtoEE:\n fatjet_maxpt = 0.\n for i,fatjet_tlv in enumerate(fatjet_tlv_list):\n if fatjet_tlv.DeltaR(el1_tlv)>0.8 and fatjet_tlv.DeltaR(el2_tlv)>0.8 and fatjet_tlv.Pt()>fatjet_maxpt:\n fatjet_maxpt=fatjet_tlv.Pt()\n fatjet_idx_H = i\n valid_Higgs = True\n if not valid_Higgs:\n return False\n \n elif self.isZtoNN:\n fatjet_maxpt = 0.\n for i,fatjet_tlv in enumerate(fatjet_tlv_list):\n if fatjet_tlv.Pt()>fatjet_maxpt:\n fatjet_maxpt=fatjet_tlv.Pt()\n fatjet_idx_H = i\n\n ############ AK4 Jet ###########\n for ijet in range(event.nJet):\n jet_pt = event.Jet_pt[ijet]\n jet_eta = event.Jet_eta[ijet]\n jet_phi = event.Jet_phi[ijet]\n jet_mass = event.Jet_mass[ijet]\n jet_tlv = ROOT.TLorentzVector()\n jet_tlv.SetPtEtaPhiM(jet_pt,jet_eta,jet_phi,jet_mass)\n self.HT += jet_pt\n if jet_eta > -2.5 and jet_eta < -1.479 and jet_phi > -1.55 and jet_phi < -0.9:\n if self.HT_HEM15_16 == -1.:\n self.HT_HEM15_16 = 0.\n self.HT_HEM15_16 += jet_pt\n if jet_pt > ak4_pt_cut and abs(jet_eta) < ak4_eta_cut:\n cleanJet = True\n for loose_electrons_tlv in loose_electrons_tlv_list:\n if loose_electrons_tlv.DeltaR(jet_tlv) < 0.4:\n cleanJet = False\n for loose_muons_tlv in loose_muons_tlv_list:\n if loose_muons_tlv.DeltaR(jet_tlv) < 0.4:\n cleanJet = False\n if cleanJet and getJetID(self.year,event,ijet):\n if len(fatjet_tlv_list) > 0 and fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv) > 1.2:\n jet_tlv_list.append(jet_tlv)\n idx_jet.append(ijet)\n\n ############ AK4 Jet check for VBF ###########\n if self.isZtoMM:\n lep1_tlv = mu1_tlv\n lep2_tlv = mu2_tlv\n if self.isZtoEE:\n lep1_tlv = el1_tlv\n lep2_tlv = el2_tlv\n \n for ijet in range(event.nJet):\n jet_pt = event.Jet_pt[ijet]\n jet_eta = event.Jet_eta[ijet]\n jet_phi = event.Jet_phi[ijet]\n jet_mass = event.Jet_mass[ijet]\n jet_tlv = ROOT.TLorentzVector()\n jet_tlv.SetPtEtaPhiM(jet_pt,jet_eta,jet_phi,jet_mass)\n if abs(jet_eta) < 5.0:\n if len(fatjet_tlv_list) > 0:\n if fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv) > 1.2:\n if getJetID(self.year,event,ijet) and event.Jet_puId[ijet]==7:\n if self.isZtoMM or self.isZtoEE:\n if jet_tlv.DeltaR(lep1_tlv)>0.4 and jet_tlv.DeltaR(lep2_tlv)>0.4:\n jet_tlv_list_vbf.append(jet_tlv)\n idx_jet_vbf.append(ijet) \n elif self.isZtoNN:\n jet_tlv_list_vbf.append(jet_tlv)\n idx_jet_vbf.append(ijet) \n\n idx1_vbf = -1\n idx2_vbf = -1\n maxVBFmass = -1.\n for ijet1, jet1_tlv in enumerate(jet_tlv_list_vbf):\n for ijet2, jet2_tlv in enumerate(jet_tlv_list_vbf):\n if ijet1 == ijet2: continue\n eta1 = jet_tlv_list_vbf[ijet1].Eta()\n eta2 = jet_tlv_list_vbf[ijet2].Eta()\n V_VBF = jet_tlv_list_vbf[ijet1]+jet_tlv_list_vbf[ijet2]\n VBFmass = V_VBF.M()\n if abs(eta1-eta2)>4.0 and eta1*eta2<0. and VBFmass>maxVBFmass:\n idx1_vbf = ijet1\n idx2_vbf = ijet2\n maxVBFmass = VBFmass\n \n\n self.dijet_VBF_mass = maxVBFmass\n if maxVBFmass > 500.: \n self.isVBF = True\n self.Jet1_VBF_pt = jet_tlv_list_vbf[idx1_vbf].Pt()\n self.Jet1_VBF_eta = jet_tlv_list_vbf[idx1_vbf].Eta()\n self.Jet1_VBF_phi = jet_tlv_list_vbf[idx1_vbf].Phi()\n self.Jet1_VBF_mass = jet_tlv_list_vbf[idx1_vbf].M()\n self.Jet2_VBF_pt = jet_tlv_list_vbf[idx2_vbf].Pt()\n self.Jet2_VBF_eta = jet_tlv_list_vbf[idx2_vbf].Eta()\n self.Jet2_VBF_phi = jet_tlv_list_vbf[idx2_vbf].Phi()\n self.Jet2_VBF_mass = jet_tlv_list_vbf[idx2_vbf].M()\n self.deltaR_VBF = jet_tlv_list_vbf[idx1_vbf].DeltaR(jet_tlv_list_vbf[idx2_vbf])\n self.deltaR_HVBFjet1 = (fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv_list_vbf[idx1_vbf]))\n self.deltaR_HVBFjet2 = (fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv_list_vbf[idx2_vbf]))\n\n ########## Higgs ######## \n H = fatjet_tlv_list[fatjet_idx_H]\n\n if self.runJEC:\n self.H_mass_nom = event.FatJet_msoftdrop_nom[fatjet_idx_H]\n self.H_mass_jmsUp = event.FatJet_msoftdrop_jmsUp[fatjet_idx_H]\n self.H_mass_jmsDown = event.FatJet_msoftdrop_jmsDown[fatjet_idx_H]\n self.H_mass_jmrUp = event.FatJet_msoftdrop_jmrUp[fatjet_idx_H]\n self.H_mass_jmrDown = event.FatJet_msoftdrop_jmrDown[fatjet_idx_H]\n self.H_pt_nom = event.FatJet_pt_nom[fatjet_idx_H]\n self.H_pt_jesUp = event.FatJet_pt_jesTotalUp[fatjet_idx_H]\n self.H_pt_jesDown = event.FatJet_pt_jesTotalDown[fatjet_idx_H]\n self.H_pt_jerUp = event.FatJet_pt_jerUp[fatjet_idx_H]\n self.H_pt_jerDown = event.FatJet_pt_jerDown[fatjet_idx_H]\n self.PuppiMET_pt_nom = event.PuppiMET_pt_nom\n self.PuppiMET_pt_jesUp = event.PuppiMET_pt_jesTotalUp\n self.PuppiMET_pt_jesDown = event.PuppiMET_pt_jesTotalDown\n self.PuppiMET_pt_jerUp = event.PuppiMET_pt_jerUp\n self.PuppiMET_pt_jerDown = event.PuppiMET_pt_jerDown\n \n H_Eta = H.Eta()\n H_Phi = H.Phi()\n H_M = H.M()\n H_nom = ROOT.TLorentzVector()\n H_jesUp = ROOT.TLorentzVector()\n H_jesDown = ROOT.TLorentzVector()\n H_jerUp = ROOT.TLorentzVector()\n H_jerDown = ROOT.TLorentzVector()\n H_nom.SetPtEtaPhiM(self.H_pt_nom,H_Eta,H_Phi,H_M)\n H_jesUp.SetPtEtaPhiM(self.H_pt_jesUp,H_Eta,H_Phi,H_M)\n H_jesDown.SetPtEtaPhiM(self.H_pt_jesDown,H_Eta,H_Phi,H_M)\n H_jerUp.SetPtEtaPhiM(self.H_pt_jerUp,H_Eta,H_Phi,H_M)\n H_jerDown.SetPtEtaPhiM(self.H_pt_jerDown,H_Eta,H_Phi,H_M)\n MET_nom = ROOT.TLorentzVector()\n MET_jesUp = ROOT.TLorentzVector()\n MET_jesDown = ROOT.TLorentzVector()\n MET_jerUp = ROOT.TLorentzVector()\n MET_jerDown = ROOT.TLorentzVector()\n MET_nom.SetPtEtaPhiM(self.PuppiMET_pt_nom,0.,event.PuppiMET_phi,self.PuppiMET_pt_nom)\n MET_jesUp.SetPtEtaPhiM(self.PuppiMET_pt_jesUp,0.,event.PuppiMET_phi,self.PuppiMET_pt_jesUp)\n MET_jesDown.SetPtEtaPhiM(self.PuppiMET_pt_jesDown,0.,event.PuppiMET_phi,self.PuppiMET_pt_jesDown)\n MET_jerUp.SetPtEtaPhiM(self.PuppiMET_pt_jerUp,0.,event.PuppiMET_phi,self.PuppiMET_pt_jerUp)\n MET_jerDown.SetPtEtaPhiM(self.PuppiMET_pt_jerDown,0.,event.PuppiMET_phi,self.PuppiMET_pt_jerDown)\n\n for ifatjet in idx_fatjet:\n if event.FatJet_btagHbb[ifatjet] > 0.3:\n self.isBoosted4B = True\n\n \n self.nJetsNoFatJet = len(jet_tlv_list)\n \n if self.isZtoNN:\n self.DPhi = abs(MET_tlv.DeltaPhi(H))\n else:\n self.DPhi = abs(V.DeltaPhi(H))\n \n self.VH_deltaR = H.DeltaR(V)\n \n jet_list_temp = []\n for ijet in range(event.nJet):\n jet_pt = event.Jet_pt[ijet]\n jet_eta = event.Jet_eta[ijet]\n jet_phi = event.Jet_phi[ijet]\n jet_mass = event.Jet_mass[ijet]\n jet_tlv = ROOT.TLorentzVector()\n jet_tlv.SetPtEtaPhiM(jet_pt,jet_eta,jet_phi,jet_mass)\n if jet_tlv.DeltaR(H) < 0.8:\n jet_list_temp.append(ijet)\n if len(jet_list_temp) == 1:\n idx = jet_list_temp[0]\n self.H_chf = event.Jet_chHEF[idx]\n self.H_nhf = event.Jet_neHEF[idx]\n elif len(jet_list_temp) == 2:\n idx1 = jet_list_temp[0]\n idx2 = jet_list_temp[1]\n pt1 = event.Jet_pt[idx1]\n pt2 = event.Jet_pt[idx2]\n chf1 = event.Jet_chHEF[idx1]\n chf2 = event.Jet_chHEF[idx2]\n nhf1 = event.Jet_neHEF[idx1]\n nhf2 = event.Jet_neHEF[idx2]\n self.H_chf = (chf1*pt1+chf2*pt2)/(pt1+pt2) \n self.H_nhf = (nhf1*pt1+nhf2*pt2)/(pt1+pt2)\n elif len(jet_list_temp) == 3:\n idx1 = jet_list_temp[0]\n idx2 = jet_list_temp[1]\n idx3 = jet_list_temp[2]\n pt1 = event.Jet_pt[idx1]\n pt2 = event.Jet_pt[idx2]\n pt3 = event.Jet_pt[idx3]\n chf1 = event.Jet_chHEF[idx1]\n chf2 = event.Jet_chHEF[idx2]\n chf3 = event.Jet_chHEF[idx3]\n nhf1 = event.Jet_neHEF[idx1]\n nhf2 = event.Jet_neHEF[idx2]\n nhf3 = event.Jet_neHEF[idx3]\n self.H_chf = (chf1*pt1+chf2*pt2+chf3*pt3)/(pt1+pt2+pt3) \n self.H_nhf = (nhf1*pt1+nhf2*pt2+nhf3*pt3)/(pt1+pt2+pt3)\n\n\n\n for jet_tlv in jet_tlv_list:\n if abs(MET_tlv.DeltaPhi(jet_tlv)) < self.MinJetMetDPhi:\n self.MinJetMetDPhi = abs(MET_tlv.DeltaPhi(jet_tlv))\n\n\n for ijet in idx_jet:\n if event.Jet_btagDeepB[ijet] > self.MaxJetNoFatJetBTag:\n self.MaxJetNoFatJetBTag = event.Jet_btagDeepB[ijet]\n\n if not self.isData:\n for igenjet in range(event.nGenJetAK8):\n genjetAK8_tlv = ROOT.TLorentzVector()\n genjetAK8_tlv.SetPtEtaPhiM(event.GenJetAK8_pt[igenjet], event.GenJetAK8_eta[igenjet], event.GenJetAK8_phi[igenjet], event.GenJetAK8_mass[igenjet])\n if H.DeltaR(genjetAK8_tlv) < 0.8:\n self.H_hadronflavour = struct.unpack('B',event.GenJetAK8_hadronFlavour[igenjet])[0]\n self.H_partonflavour = event.GenJetAK8_partonFlavour[igenjet]\n self.btagToolAK4_deep.fillEfficiencies(event,idx_jet,fatjet_idx_H)\n self.BTagAK4Weight_deep = self.btagToolAK4_deep.getWeight(event,idx_jet,fatjet_idx_H)\n self.BTagAK4Weight_deep_up = self.btagToolAK4_deep_up.getWeight(event,idx_jet,fatjet_idx_H)\n self.BTagAK4Weight_deep_down = self.btagToolAK4_deep_down.getWeight(event,idx_jet,fatjet_idx_H)\n #search for AK4 jets which match with the subjets from the H\n ak4_subjets = []\n subjet1 = TLorentzVector()\n subjet2 = TLorentzVector()\n subjet1_idx = event.FatJet_subJetIdx1[fatjet_idx_H]\n subjet2_idx = event.FatJet_subJetIdx2[fatjet_idx_H]\n if subjet1_idx>=0. and subjet2_idx>=0.:\n subjet1.SetPtEtaPhiM(event.SubJet_pt[subjet1_idx],event.SubJet_eta[subjet1_idx],event.SubJet_phi[subjet1_idx],event.SubJet_mass[subjet1_idx])\n subjet2.SetPtEtaPhiM(event.SubJet_pt[subjet2_idx],event.SubJet_eta[subjet2_idx],event.SubJet_phi[subjet2_idx],event.SubJet_mass[subjet2_idx])\n for jetid in range(event.nJet):\n ak4jet = TLorentzVector()\n ak4jet.SetPtEtaPhiM(event.Jet_pt[jetid],event.Jet_eta[jetid],event.Jet_phi[jetid],event.Jet_mass[jetid])\n if ak4jet.DeltaR(subjet1)<0.4:\n ak4_subjets.append(jetid)\n if ak4jet.DeltaR(subjet2)<0.4:\n ak4_subjets.append(jetid)\n self.btagToolAK8_deep.fillEfficiencies(event,ak4_subjets,fatjet_idx_H)\n self.BTagAK8Weight_deep = self.btagToolAK8_deep.getWeight(event,ak4_subjets,fatjet_idx_H)\n self.BTagAK8Weight_deep_up = self.btagToolAK8_deep_up.getWeight(event,ak4_subjets,fatjet_idx_H)\n self.BTagAK8Weight_deep_down = self.btagToolAK8_deep_down.getWeight(event,ak4_subjets,fatjet_idx_H)\n ########### X and variables ############\n X = V + H\n if self.isZtoNN:\n X_chs = V_chs + H\n self.X_mass_chs = X_chs.M()\n\n if self.runJEC:\n X_nom = V + H_nom\n X_jesUp = V + H_jesUp\n X_jesDown = V + H_jesDown\n X_jerUp = V + H_jerUp\n X_jerDown = V + H_jerDown\n X_MET_nom = MET_nom + H_nom\n X_MET_jesUp = MET_jesUp + H_jesUp\n X_MET_jesDown = MET_jesDown + H_jesDown\n X_MET_jerUp = MET_jerUp + H_jerUp\n X_MET_jerDown = MET_jerDown + H_jerDown\n self.X_mass_nom = X_nom.M()\n self.X_mass_jesUp = X_jesUp.M()\n self.X_mass_jesDown = X_jesDown.M()\n self.X_mass_jerUp = X_jerUp.M()\n self.X_mass_jerDown = X_jerDown.M()\n self.X_mass_MET_nom = X_MET_nom.M()\n self.X_mass_MET_jesUp = X_MET_jesUp.M()\n self.X_mass_MET_jesDown = X_MET_jesDown.M()\n self.X_mass_MET_jerUp = X_MET_jerUp.M()\n self.X_mass_MET_jerDown = X_MET_jerDown.M()\n\n self.V_pt = V.Pt()\n self.V_eta = V.Eta()\n self.V_phi = V.Phi()\n self.V_mass = V.M()\n \n if self.isZtoNN:\n self.V_mass = 0.\n\n self.H_pt = H.Pt()\n self.H_eta = H.Eta()\n self.H_phi = H.Phi()\n self.H_M = H.M()\n self.H_mass = event.FatJet_msoftdrop[fatjet_idx_H]\n self.X_pt = X.Pt()\n self.X_eta = X.Eta()\n self.X_phi = X.Phi()\n self.X_mass = X.M()\n\n\n self.H_dbt = event.FatJet_btagHbb[fatjet_idx_H]\n self.BtagDeepB = event.FatJet_btagDeepB[fatjet_idx_H]\n self.DeepTagMD_H4qvsQCD = event.FatJet_deepTagMD_H4qvsQCD[fatjet_idx_H]\n self.DeepTagMD_HbbvsQCD = event.FatJet_deepTagMD_HbbvsQCD[fatjet_idx_H]\n self.DeepTagMD_ZHbbvsQCD = event.FatJet_deepTagMD_ZHbbvsQCD[fatjet_idx_H]\n self.DeepTagMD_ZbbvsQCD = event.FatJet_deepTagMD_ZbbvsQCD[fatjet_idx_H]\n self.DeepTagMD_bbvsLight = event.FatJet_deepTagMD_bbvsLight[fatjet_idx_H]\n self.DeepTagMD_WvsQCD = event.FatJet_deepTagMD_WvsQCD[fatjet_idx_H]\n self.DeepTagMD_ZvsQCD = event.FatJet_deepTagMD_ZvsQCD[fatjet_idx_H]\n self.H_tau21 = fatjet_tau21_list[fatjet_idx_H]\n self.H_tau41 = fatjet_tau41_list[fatjet_idx_H]\n self.H_tau42 = fatjet_tau42_list[fatjet_idx_H]\n self.H_tau31 = fatjet_tau31_list[fatjet_idx_H]\n self.H_tau32 = fatjet_tau32_list[fatjet_idx_H]\n self.VHDEta = abs(V.Eta() - H.Eta())\n\n \n \n if event.FatJet_subJetIdx1[fatjet_idx_H] >= 0:\n Hcsv1 = event.SubJet_btagCSVV2[event.FatJet_subJetIdx1[fatjet_idx_H]]\n Hdeepcsv1 = event.SubJet_btagDeepB[event.FatJet_subJetIdx1[fatjet_idx_H]]\n else:\n Hcsv1 = -1.\n Hdeepcsv1 = -1.\n if event.FatJet_subJetIdx2[fatjet_idx_H] >= 0:\n Hcsv2 = event.SubJet_btagCSVV2[event.FatJet_subJetIdx2[fatjet_idx_H]]\n Hdeepcsv2 = event.SubJet_btagDeepB[event.FatJet_subJetIdx2[fatjet_idx_H]]\n else:\n Hcsv2 = -1.\n Hdeepcsv2 = -1.\n \n self.H_csv1 = max(Hcsv1,Hcsv2)\n self.H_csv2 = min(Hcsv1,Hcsv2)\n self.H_deepcsv1 = max(Hdeepcsv1,Hdeepcsv2)\n self.H_deepcsv2 = min(Hdeepcsv1,Hdeepcsv2)\n\n\n if self.year == 2016:\n wp_loose = 0.2217\n wp_medium = 0.6321\n wp_tight = 0.8953\n elif self.year == 2017:\n wp_loose = 0.1522\n wp_medium = 0.4941\n wp_tight = 0.8001\n elif self.year == 2018:\n wp_loose = 0.1241\n wp_medium = 0.4184\n wp_tight = 0.7527\n\n if self.H_deepcsv2 > wp_loose:\n self.isHtobb = True\n if self.H_deepcsv1 > wp_medium and self.H_deepcsv2 > wp_loose:\n self.isHtobb_ml = True\n\n if self.MaxJetNoFatJetBTag > wp_loose:\n self.isMaxBTag_loose = True\n if self.MaxJetNoFatJetBTag > wp_medium:\n self.isMaxBTag_medium = True\n if self.MaxJetNoFatJetBTag > wp_tight:\n self.isMaxBTag_tight = True\n\n \n if self.H_mass != 0.:\n self.H_ddt = self.H_tau21 + 0.082 *np.log(self.H_mass*self.H_mass/self.H_pt)\n else:\n self.H_ddt = -1.\n \n self.X_tmass = np.sqrt(2.*V.Pt()*fatjet_tlv_list[fatjet_idx_H].Pt()*(1.-np.cos(fatjet_tlv_list[fatjet_idx_H].DeltaPhi(V))))\n if self.isZtoNN:\n self.X_mass = self.X_tmass\n else:\n self.X_mass = X.M()\n if self.X_mass > 750 and self.VH_deltaR > 2:\n if self.MinJetMetDPhi>0.5 and self.DPhi>2:\n for i,weight in enumerate(nncutflow_list):\n self.out.nncutflow_inc.Fill(i,weight)\n if self.VHDEta<1.3:\n for i,weight in enumerate(eecutflow_list):\n self.out.eecutflow_inc.Fill(i,weight)\n for i,weight in enumerate(mmcutflow_list):\n self.out.mmcutflow_inc.Fill(i,weight)\n \n if self.isZtoEE or self.isZtoMM or self.isZtoNN or self.isTtoEM:\n self.fillBranches(event)\n return True", "def predict_from_all_children(self, node: TreeSplits):\n # Collect the children\n children_values = BaseTree.collect_children(node)\n # Aggregate the leaf values\n return self.agg_func(children_values)", "def classify_treeNN(self, query_name):\n # 1) Find set of closest neighbors & their class names\n # ie. leaves with at most neighborhood_max_edges edges between itself \n # and the query node\n neighborhood_classes = self.getNeighborhoodClasses(query_name)\n print \"neighborhood \" , neighborhood_classes\n\n # 2) Find aggregate similarity score for each class\n # Use minimum operator for distance measure & maximum for similarity measure\n # EQ 6.1 in Chapt 6, Busa-Fekete et al\n R = {}\n for c,ids in neighborhood_classes.iteritems():\n sim_score = min([nx.shortest_path_length(self.tree, source=query_name, \n target=i, weight='length') for i in ids])\n if DEBUG: print \"\\tCLASS / SIM_SCORE: \", c, sim_score\n R[sim_score] = c # distance measure\n\n min_score = min(R.keys())\n if DEBUG: print \"MIN_SCORE: \", min_score\n\n return R[min_score] #class of minimum distance score", "def analyze(self, event):\n jets = self.inputCollection(event)\n \n predictionsPerCtauAndClass = {ctau: {className: [] for className in self.predictionLabels} for ctau in self.logctauValues}\n for ijet,jet in enumerate(jets):\n if not hasattr(jet,self.taggerName):\n print \"WARNING - jet \",jet,\" has no \",self.taggerName,\" result stored for \",self.outputName,\" -> skip\"\n continue\n predictions = getattr(jet,self.taggerName)\n for ctau in self.logctauValues:\n for label in self.predictionLabels:\n predictionsPerCtauAndClass[ctau][label].append(predictions[ctau][label])\n \n for ctau in self.logctauValues:\n for label in self.predictionLabels:\n predictionsPerCtauAndClass[ctau][label] = sorted(predictionsPerCtauAndClass[ctau][label],reverse=True)\n\n for m in self.multiplicities:\n if m<len(predictionsPerCtauAndClass[ctau][label]):\n self.out.fillBranch(self.outputName+\"_\"+getCtauLabel(ctau)+\"_\"+label+\"_min\"+str(m),predictionsPerCtauAndClass[ctau][label][m])\n else:\n self.out.fillBranch(self.outputName+\"_\"+getCtauLabel(ctau)+\"_\"+label+\"_min\"+str(m),0)\n \n \n return True", "def compute_adjacency_confidence(self, full_attachedness, tree_adjacency, tree_based_confidence):\n if sp.sparse.issparse(tree_adjacency):\n tree_adjacency = [tree_adjacency[i].nonzero()[1] for i in range(tree_adjacency.shape[0])]\n segs_distances = 1/full_attachedness\n if not tree_based_confidence: # inter- and intra-cluster based confidence\n from scipy.stats import norm\n # intra-cluster connections\n total_n = self.k * np.array(self.segs_sizes) # total number of connections\n a = full_attachedness\n confidence = np.zeros_like(full_attachedness)\n for i in range(a.shape[0]):\n for j in range(i+1, a.shape[1]):\n expected = total_n[i] * total_n[j] / np.sum(total_n)**2\n actual = a[i, j] / np.sum(total_n)\n variance = expected * (1 - expected) / np.sum(total_n)\n if actual > expected:\n confidence[i, j] = 1\n elif actual < 1e-12:\n confidence[i, j] = 0\n else:\n confidence[i, j] = 2 * norm.cdf(actual, expected, np.sqrt(variance))\n # i_name = self.segs_names_original[i]\n # j_name = self.segs_names_original[j]\n # print(i_name, j_name, expected, actual, variance, confidence[i, j])\n full_confidence = confidence + confidence.T\n tree_confidence = self.compute_tree_confidence(full_confidence, tree_adjacency)\n else:\n # compute the average tree distances\n tree_distances = []\n for i, neighbors in enumerate(tree_adjacency):\n tree_distances += segs_distances[i][neighbors].tolist()\n median_tree_distances = np.median(tree_distances)\n full_confidence = np.zeros_like(segs_distances)\n full_confidence[segs_distances <= median_tree_distances] = 1\n full_confidence[segs_distances > median_tree_distances] = (\n np.exp(-(segs_distances-median_tree_distances)/median_tree_distances)\n [segs_distances > median_tree_distances])\n np.fill_diagonal(full_confidence, 0)\n tree_confidence = self.compute_tree_confidence(full_confidence, tree_adjacency, minimal_tree_attachedness=MINIMAL_TREE_ATTACHEDNESS)\n return full_confidence, tree_confidence", "def classify(self, features):\n\n # TODO: finish this.\n class_labels = []\n # TODO: finish this.\n features = np.array(features)\n feat_shape = features.shape\n for i in range(feat_shape[0]):\n vote = np.zeros((self.num_trees))\n for j in range(self.num_trees):\n #print self.trees[j].classify(feat)\n vote[j] = self.trees[j].classify(features[i,self.attr_track[j]].reshape(1,-1))[0]\n counts = np.bincount(vote.astype(int))\n class_labels.append(np.argmax(counts))\n return class_labels", "def predict(self,event_graph):\n if self._greedy_inference:\n return self.greedy_inference(event_graph)\n elif self._probs_insteadOf_weights:\n return find_min_hamiltonian_path(event_graph,self._weights,True)\n else:\n return find_min_hamiltonian_path(event_graph,self._weights)", "def average(data, event):\n if len(data) == 0:\n return 0\n\n score = 0\n # scores = []\n count = 0\n for i in data:\n count += 1\n if event == 'Swim' or event == 'Run':\n num = time_seconds(i[event])\n #print(\"first if\")\n #Sprint(num)\n else:\n num = int(i[event])\n #print(\"second if\")\n #print(num)\n #scores[count] =\n #print(\"end of loop count\" + str(count))\n score += num\n #print (\"score\" + str(score))\n\n # total = 0\n # for x in range(0,len(scores)):\n # total += scores[x]\n score = float(score)\n\n return score / count", "def predict(self,entry):\n if self.type == 'v':\n return self.value\n v = entry[self.feature]\n if v is None:\n #multiple childrens' predictions\n counts = defaultdict(int)\n labels = self.predict_all(entry,counts)\n if len(counts) == 1:\n return counts.keys()[0]\n #return a probability distribution\n return normalize(counts)\n #maximum likelihood\n #return argmax(counts)\n if self.type == 's':\n c = None\n try:\n c = self.children[v]\n except KeyError:\n #print \"Unseen value for feature\",self.feature,\": \",v\n best = None\n bestDist = float('inf')\n for (val,c) in self.children.iteritems():\n if abs(val - v) < bestDist:\n bestDist = abs(val - v)\n best = c\n c = best\n return c.predict(entry)\n elif self.type == 'i':\n if v <= self.value:\n return self.children[0].predict(entry)\n else:\n return self.children[1].predict(entry)\n raise RuntimeError(\"Invalid DecisionTreeNode type?\")", "def evaluate(self, tree):\n\t\tpass", "def predict(self, data):\n try:\n getattr(self, \"tree\")\n except AttributeError:\n raise RuntimeError(\"You must train classifer before predicting data!\")\n\n predicts_proba = self.predict_proba(data)\n predicts = _classify_from_probs(predicts_proba)\n return predicts", "def predict_avg(self, features, data_tier):\n prediction = self.clf_avg[data_tier].predict(features)\n return prediction[0]", "def ensembleVote(x, classes, ensemble):\n votes = np.array([0 for kk in range(len(classes))])\n for i in ensemble:\n votes = votes + classProbs(x, ensemble[i][\"tree\"], classes)\n maxVote = 0\n loc = None\n for ind, vote in enumerate(votes):\n if vote > maxVote:\n maxVote = vote\n loc = ind\n prediction = classes[loc]\n return prediction", "def apply(self, tree):\n raise NotImplementedError()", "def compute_evidence_weighted_aggregated_veracity_score(\n gold: Dict[Tuple[int, str], Dict],\n pred: Dict[Tuple[int, str], Dict],\n elementwise_evidence_f1: Dict[Tuple[int, str], float],\n elementwise_evidence_f1_corrected: Dict[Tuple[int, str], float],\n) -> Dict:\n\n accuracies_passages: List[float] = []\n f1_scores_evidence: List[float] = []\n f1_scores_corrected_evidence: List[float] = []\n\n keys: List[Any] = list(gold.keys())\n\n for key in keys:\n gold_sample: Dict = gold[key]\n pred_sample: Dict = pred[key]\n\n gold_passage_label: str = gold_sample['labels']['passage']\n predicted_passage_label: str = pred_sample['predicted']\n\n accuracies_passages.append(get_instance_accuracy(gold_passage_label, predicted_passage_label))\n f1_scores_evidence.append(elementwise_evidence_f1[key])\n f1_scores_corrected_evidence.append(elementwise_evidence_f1_corrected[key])\n\n return {\n 'ev_weighted_accuracy': np.mean(np.array(accuracies_passages) * np.array(f1_scores_evidence)),\n 'ev_weighted_accuracy_corrected': np.mean(\n np.array(accuracies_passages) * np.array(f1_scores_corrected_evidence)\n )\n }", "def calculate_weighted_results():\n pass", "def weight(tree):\n return root(tree)", "def predict(self, new_table):\r\n \r\n if self.is_leaf:\r\n if len(self.path) == 1:\r\n self.cur.execute(\"SELECT ROUND(AVG(\" + self.y_name + \")) FROM \" + self.table_name + \" WHERE \" + self.path[0] + \";\")\r\n else:\r\n self.cur.execute(\"SELECT ROUND(AVG(\" + self.y_name + \")) FROM \" + self.table_name + \" WHERE \" + \" AND \".join(self.path) + \";\")\r\n y = self.cur.fetchone()[0]\r\n self.cur.execute(\"UPDATE \" + new_table + \" SET preds = \" + str(y) + \" WHERE \" + \" AND \".join(self.path) + \";\")\r\n \r\n else:\r\n self.lhs.predict(new_table)\r\n self.rhs.predict(new_table)", "def h_t(self, x, t):\n ret = 0\n strong_classifier = self.classifiers[0:t+1]\n for wc in strong_classifier:\n ret += wc.classify(x)\n return ret", "def h_t(self, x, t):\n ret = 0\n strong_classifier = self.classifiers[0:t+1]\n for wc in strong_classifier:\n ret += wc.classify(x)\n return ret", "def importance_sample(self,e,sampling_weights = 'uniform'):\n\t\tresult = dict()\n\t\tlikelihood = 1.0\n\t\tfor n in self.topological_sort():\n\t\t\tpvals = tuple(result[p] for p in n.parents)\n\t\t\tif n.name in e:\n\t\t\t\t#evidence variable\n\t\t\t\tresult[n.name] = e[n.name]\n\t\t\t\tlikelihood *= n.cpt.prob_dist(pvals)[e[n.name]]\n\t\t\telse:\n\t\t\t\t#not an evidence variable\n\t\t\t\t#likelihood weight of the chosen value for n\n\t\t\t\tw = 1.0\n\t\t\t\t#chosen value\n\t\t\t\tnvalue = None\n\t\t\t\tif sampling_weights=='uniform':\n\t\t\t\t\tnvalue = random.choice(n.cpt.values())\n\t\t\t\t\tw = 1.0/len(n.cpt.values())\n elif sampling_weights=='proportional':\n\t\t\t\t\tnvalue = n.cpt.rand_result(pvals)\n\t\t\t\t\tw = n.cpt.prob_dist(pvals)[nvalue]\n\t\t\t\telse:\n\t\t\t\t\tnvalue = sampling_weights[n.name].rand_result(pvals)\n\t\t\t\t\tw = sampling_weights[n.name].prob_dist(pvals)[nvalue]\n\t\t\t\tresult[n.name] = nvalue\n\t\t\t\tlikelihood *= (n.cpt.prob_dist(pvals)[nvalue]/w)\n\t\treturn (likelihood,result)", "def predictWithTree(observation, tree, classes, d_boundary=0.5):\n \n try:\n assert len(classes) == 2\n except Exception('Currently, predict with tree only works with two classes') as inst:\n print inst\n raise Exception\n \n probs= classProbs(observation,tree, classes)\n if probs[1] >= d_boundary:\n return classes[1]\n else:\n return classes[0]\n print \"There is some unexpected error, none of the probabilities is greater than the boundary probability\"\n print \"Perhaps this is a multiclass problem and the boundary probability was misspecified?\"\n return", "def classify_test(classifier, test_data):\n for d in test_data:\n test(d[\"name\"], d[\"attribute\"], classifier)", "def impurity_gain(node, attribute):\n data_subset1 = filter_data(node.data,node.ancestors)\n data_counts = list(Counter(data_subset1['Class']).values())\n base_impurity = impurity(data_counts)\n num_values = len(data_subset1)\n impurity_sum = 0\n \n for value in [0,1]:\n data_subset2 = filter_data(node.data, node.ancestors + [(attribute,value)])\n subset_counts = list(Counter(data_subset2['Class']).values())\n impurity_sum += (len(data_subset2)/num_values) * impurity(subset_counts)\n \n return base_impurity - impurity_sum", "def decision_tree(df, dt_dict, curr_node,\r\n prev_attr = None, align_dir = None,\r\n depth = -1, no_data = False,\r\n ensemble = None):\r\n \r\n class_count = get_class_count(df)\r\n # get the class label counts for the given dataframe\r\n leaf_node_bool = check_leaf_node(df)\r\n # this function helps to check if we have a leaf node\r\n if leaf_node_bool:\r\n # if its leaf node\r\n curr_node[align_dir] = df['class'].values[0]\r\n # assign the leaf node value\r\n elif no_data:\r\n # if we are out of data points\r\n class_counts = df['class'].value_counts()\r\n # get the class counts\r\n curr_node[align_dir] = np.argmax(class_counts)\r\n # assign the majority class of prev node\r\n else:\r\n entropy_values_series = impurity.entropy_calc(df, ensemble = ensemble)\r\n # calculate the entropy values for each feature\r\n info_gain_dict = {}\r\n # empty dict for information gain\r\n for feature in entropy_values_series.index:\r\n # iterate over each features\r\n impurity.information_gain_calc(df, feature, info_gain_dict)\r\n # function call for information gain calculation\r\n for f in entropy_values_series.index:\r\n # iterate over each feature\r\n information_gain = entropy_values_series[f] - info_gain_dict[f][1]\r\n # calculation of information gain\r\n info_gain_dict[f] = (info_gain_dict[f][0], information_gain)\r\n # update the information gain dict\r\n best_feature = sorted(info_gain_dict, key = lambda x: info_gain_dict[x][1])[-1]\r\n # get the best feature on which to be splitted.\r\n #print(best_feature)\r\n node_value = (best_feature, info_gain_dict[best_feature], class_count[0],\r\n class_count[1])\r\n # get the node value\r\n \r\n if not leaf_node_bool and align_dir:\r\n # growing the tree\r\n if depth == 0:\r\n if node_value[2] > node_value[3]:\r\n node_value = 0\r\n else:\r\n node_value = 1\r\n curr_node[align_dir] = node_value\r\n return 0\r\n else:\r\n curr_node[align_dir] = {node_value:{}}\r\n curr_node = curr_node[align_dir][node_value]\r\n else:\r\n dt_dict[node_value] = {}\r\n curr_node = dt_dict[node_value]\r\n \r\n data_split(df, best_feature, info_gain_dict, \r\n dt_dict, curr_node, depth)\r\n # function call for data split\r", "def get_average_progress(self, event=None):\n cnt_items = len(self)\n sum_progress = None\n\n if event == self.EVENT_TOTAL_PROGRESS:\n return self.get_average_complete_progress()\n\n if event is None:\n sum_progress = sum(map(lambda node: node.get_average_progress(), self.values()))\n else:\n sum_progress = self.get_sum_progress_event(event)\n\n if cnt_items > 0:\n return sum_progress * 1.0 / cnt_items\n\n return 0.0", "def get_prediction(self, document):\n return self.classify(document, self.tree)", "def classify_data(self, test_set, include_features_in_result=False):\n if len(test_set) == 1:\n return self.__classify(test_set, self.__tree)\n else:\n\n indices = test_set.index.values.tolist()\n correct_classified_rows = 0\n\n classification_result = []\n\n for index in indices:\n\n training_row = pd.DataFrame(test_set.loc[index])\n training_row = training_row.T\n\n result_row = [list(x) for x in training_row.values][0]\n expected_value = str(training_row[self.__resulting_feature].iloc[0])\n classified_value = self.classify_data(training_row)\n result_row.append(classified_value)\n result_row = tuple(result_row)\n\n classification_result.append(result_row)\n\n if expected_value == classified_value:\n correct_classified_rows += 1\n\n self.accuracy_of_previous_test = (correct_classified_rows / len(test_set) * 100)\n\n column_names = list(test_set)\n column_names.append(\"classified\")\n classification_result = pd.DataFrame(classification_result, columns=column_names)\n\n if include_features_in_result:\n return classification_result\n else:\n return classification_result.iloc[:, -2:]", "def _leaf_calculation(y, label, sample_weights=None):\n if sample_weights is None:\n sample_weights = np.ones(y.shape[0]) / y.shape[0]\n # YOUR CODE HERE\n # begin answer\n numerator=np.sum(y)\n denominator=np.sum((label-y)*(1-label+y))\n if numerator == 0 or abs(denominator) < 1e-150:\n return 0.0\n else:\n return numerator/denominator", "def GetPhyloCoEventScore(tree, phenotree, phen_ind, skip=0, with_rand=False):\n\n population = (len(tree) * 2) - 1\n score = np.zeros(tree.genotype.shape[1] - skip)\n node_to_arr = lambda n: np.array(n.genotype.todense().astype(np.int))[0]\n contingency_sum = 100\n for i, (cur_node, phen_node) in tqdm.tqdm(enumerate(zip(tree.traverse(), phenotree.traverse())),\n total=population, desc='Iterating tree'):\n if not cur_node.is_root():\n if not cur_node.is_leaf() and with_rand and cur_node.random[phen_ind]: continue\n node = node_to_arr(cur_node)\n prev_node = node_to_arr(cur_node.up)\n\n gene_state = node[skip:]\n prev_gene_state = prev_node[skip:]\n\n phen_state = phen_node.genotype[0, phen_ind]\n prev_phen_state = phen_node.up.genotype[0, phen_ind]\n\n phen_event = np.abs((prev_phen_state - phen_state)) # all that differs from paralel is an abs\n gene_event = np.abs((prev_gene_state - gene_state))\n\n if with_rand: gene_event[cur_node.up.random[skip:]] = 0\n score += phen_event * gene_event\n\n return score.astype(np.int)", "def infer(self,data,examples,k=10):\n\n example_values = []\n for example in examples:\n example_value = 0\n for i in range(k):\n tree_i = self.boosted_trees[i]\n tree_i_value = tree_i.infer(data,example)\n example_value += tree_i_value\n example_values.append(sigmoid(example_value))\n\n return example_values", "def predict_tree(self, testing_data, average=False):\n predictions = []\n for point in testing_data:\n # Loop over each point and find it's k-nearest neighbors\n k_nearest = self.kd_tree.return_nearest_k(point, self.k)\n targets = [self.targets[n.node[1]] for n in k_nearest]\n if average:\n predictions.append(round(np.average(targets)))\n else:\n unique, counts = np.unique(targets, return_counts=True)\n max_index = np.argmax(counts)\n predictions.append(unique[max_index])\n return predictions", "def __classify(self, instance, tree, default=None):\n attribute = str(list(tree.keys())[0])\n keys_of_attribute = list(tree[attribute].keys())\n if instance[attribute].iloc[0] in keys_of_attribute:\n subtree = tree[attribute]\n result = subtree[instance[attribute].iloc[0]]\n if isinstance(result, dict):\n return self.__classify(instance, result)\n else:\n return result\n else:\n return default", "def class_average(images,ref=None,niter=1,normproc=(\"normalize.edgemean\",{}),prefilt=0,align=(\"rotate_translate_flip\",{}),\n\t\taligncmp=(\"ccc\",{}),ralign=None,raligncmp=None,averager=(\"mean\",{}),scmp=(\"ccc\",{}),keep=1.5,keepsig=1,automask=0,saveali=0,verbose=0,callback=None,center=\"xform.center\"):\n\n\tif verbose>2 : print \"class_average(\",images,ref,niter,normproc,prefilt,align,aligncmp,ralign,raligncmp,averager,scmp,keep,keepsig,automask,verbose,callback,\")\"\n\n\t# nimg is the number of particles we have to align/average\n\tif isinstance(images[0],EMData) : nimg=len(images)\n\telif isinstance(images[0],str) and isinstance(images[1],int) : nimg=len(images)-1\n\telse : raise Exception,\"Bad images list (%s)\"%str(images)\n\n\tif verbose>2 : print \"Average %d images\"%nimg\n\n\t# If one image and no reference, just return it\n\tif nimg==1 and ref==None : return (get_image(images,0,normproc),[(0,Transform(),1)])\n\n\t# If one particle and reference, align and return\n\tif nimg==1:\n\t\tif averager[0]!=\"mean\" : raise Exception,\"Cannot perform correct average of single particle\"\n\t\tali=align_one(get_image(images,0,normproc),ref,prefilt,align,aligncmp,ralign,raligncmp)\n\t\ttry: ali[\"model_id\"]=ref[\"model_id\"]\n\t\texcept: pass\n\t\tsim=ali.cmp(scmp[0],ref,scmp[1])\t\t\t# compare similarity to reference (may use a different cmp() than the aligner)\n\t\treturn (ali,[(sim,ali[\"xform.align2d\"],1)])\n\n\t# If we don't have a reference image, we need to make one\n\tif ref==None :\n\t\tif verbose : print \"Generating reference\"\n#\t\tsigs=[(get_image(i)[\"sigma\"],i) for i in range(nimg)]\t\t# sigma for each input image, inefficient\n#\t\tref=get_image(images,max(sigs)[1])\n\t\tref=get_image(images,0,normproc)\t\t\t\t\t\t\t\t\t\t# just start with the first, as EMAN1\n\n\t\t# now align and average the set to the gradually improving average\n\t\tfor i in range(1,nimg):\n\t\t\tif verbose>1 :\n\t\t\t\tprint \".\",\n\t\t\t\tsys.stdout.flush()\n\t\t\tali=align_one(get_image(images,i,normproc),ref,prefilt,align,aligncmp,ralign,raligncmp)\n\t\t\tref.add(ali)\n\n\t\t# A little masking and centering\n\t\ttry:\n\t\t\tgmw=max(5,ref[\"nx\"]/16)\t\t# gaussian mask width\n\t\t\t#ref.process_inplace(\"filter.highpass.gauss\",{\"cutoff_pixels\":min(ref[\"nx\"]/10,5)})\t# highpass to reduce gradient issues\n\t\t\t#ref.process_inplace(\"normalize.circlemean\")\n\t\t\t#ref2=ref.process(\"mask.gaussian\",{\"inner_radius\":ref[\"nx\"]/2-gmw,\"outer_radius\":gmw/1.3})\n\t\t\t#ref2.process_inplace(\"filter.lowpass.gauss\",{\"cutoff_abs\":0.07})\t# highpass to reduce gradient issues\n\t\t\t#ref2.process_inplace(\"normalize.circlemean\")\n\t\t\t#ref2.process_inplace(\"threshold.binary\",{\"value\":ref[\"mean\"]+ref[\"sigma\"]*1.5})\n\t\t\t#ref2.process_inplace(\"xform.centerofmass\",{\"threshold\":0.5})\t\t\t\t\t\t# TODO: should probably check how well this works\n\t\t\t#fxf=ref2[\"xform.align2d\"]\n\t\t\t#ref.translate(fxf.get_trans())\n\t\t\t\n\t\t\tif center:\t#jesus\n\t\t\t\tref.process_inplace(center)\n\t\t\tref.process_inplace(\"normalize.circlemean\",{\"radius\":ref[\"nx\"]/2-gmw})\n\t\t\tref.process_inplace(\"mask.gaussian\",{\"inner_radius\":ref[\"nx\"]/2-gmw,\"outer_radius\":gmw/1.3})\n\t\t\tref_orient=None\n\t\texcept:\n\t\t\ttraceback.print_exc()\n\telse:\n\t\ttry: ref_orient=ref[\"xform.projection\"]\n\t\texcept: ref_orient=None\n\n\t\ttry: ref_model=ref[\"model_id\"]\n\t\texcept: ref_model=0\n\n\tif verbose>1 : print \"\"\n\n\tinit_ref=ref.copy()\n\n\t# Iterative alignment\n\tptcl_info=[None]*nimg\t\t# empty list of particle info\n\n\t# This is really niter+1 1/2 iterations. It gets terminated 1/2 way through the final loop\n\tfor it in range(niter+2):\n\t\tif verbose : print \"Starting iteration %d\"%it\n\t\tif callback!=None : callback(int(it*100/(niter+2)))\n\n\t\tmean,sigma=0.0,1.0\t\t# defaults for when similarity isn't computed\n\n\t\t# Evaluate quality from last iteration, and set a threshold for keeping particles\n\t\tif it>0:\n\t\t\t# measure statistics of quality values\n\t\t\tmean,sigma=0,0\n\t\t\tfor sim,xf,use in ptcl_info:\n\t\t\t\tmean+=sim\n\t\t\t\tsigma+=sim**2\n\t\t\tmean/=len(ptcl_info)\n\t\t\tsigma=sqrt(sigma/len(ptcl_info)-mean**2)\n\n\t\t\t# set a threshold based on statistics and options\n\t\t\tif keepsig:\t\t\t\t\t# keep a relative fraction based on the standard deviation of the similarity values\n\t\t\t\tthresh=mean+sigma*keep\n\t\t\t\tif verbose>1 : print \"mean = %f\\tsigma = %f\\tthresh=%f\"%(mean,sigma,thresh)\n\t\t\telse:\t\t\t\t\t\t# keep an absolute fraction of the total\n\t\t\t\tl=[i[0] for i in ptcl_info]\n\t\t\t\tl.sort()\n\t\t\t\ttry: thresh=l[int(len(l)*keep)]\n\t\t\t\texcept:\n\t\t\t\t\tif verbose: print \"Keeping all particles\"\n\t\t\t\t\tthresh=l[-1]+1.0\n\n\t\t\tif verbose:\n\t\t\t\tprint \"Threshold = %1.4f Quality: min=%f max=%f mean=%f sigma=%f\"%(thresh,min(ptcl_info)[0],max(ptcl_info)[0],mean,sigma)\n\n\t\t\t# mark the particles to keep and exclude\n\t\t\tnex=0\n\t\t\tfor i,pi in enumerate(ptcl_info):\n\t\t\t\tif pi[0]>thresh :\n\t\t\t\t\tnex+=1\n\t\t\t\t\tptcl_info[i]=(pi[0],pi[1],0)\n\t\t\t\telif pi[2]==0:\n\t\t\t\t\tptcl_info[i]=(pi[0],pi[1],1)\n\n\t\t\tif verbose : print \"%d/%d particles excluded\"%(nex,len(ptcl_info))\n\n\t\t\t# if all of the particles were thrown out for some reason, we keep the best one\n\t\t\tif nex==len(ptcl_info) :\n\t\t\t\tbest=ptcl_info.index(min(ptcl_info))\n\t\t\t\tptcl_info[best]=(ptcl_info[best][0],ptcl_info[best][1],1)\n\t\t\t\tif verbose : print \"Best particle reinstated\"\n\n\t\tif it==niter+1 : break\t\t# This is where the loop actually terminates. This makes sure that inclusion/exclusion is updated at the end\n\n\t\t# Now align and average\n\t\tavgr=Averagers.get(averager[0], averager[1])\n\t\tfor i in range(nimg):\n\t\t\tif callback!=None and nimg%10==9 : callback(int((it+i/float(nimg))*100/(niter+2.0)))\n\t\t\tptcl=get_image(images,i,normproc)\t\t\t\t\t# get the particle to align\n\t\t\tali=align_one(ptcl,ref,prefilt,align,aligncmp,ralign,raligncmp) # align to reference\n\t\t\tsim=ali.cmp(scmp[0],ref,scmp[1])\t\t\t# compare similarity to reference (may use a different cmp() than the aligner)\n\t\t\tif saveali and it==niter : ali.write_image(\"aligned.hdf\",-1)\n\n\t\t\ttry: use=ptcl_info[i][2]\n\t\t\texcept: use=1\n\t\t\tif use :\n\t\t\t\tavgr.add_image(ali)\t\t\t\t# only include the particle if we've tagged it as good\n\t\t\t\tif verbose>1 :\n\t\t\t\t\tsys.stdout.write(\".\")\n\t\t\t\t\tsys.stdout.flush()\n\t\t\telif verbose>1:\n\t\t\t\tsys.stdout.write(\"X\")\n\t\t\t\tsys.stdout.flush()\n\t\t\tptcl_info[i]=(sim,ali[\"xform.align2d\"],use)\n\n\t\tif verbose>1 : print \"\"\n\n\t\tref=avgr.finish()\n\t\tref[\"class_ptcl_qual\"]=mean\n\t\tref[\"class_ptcl_qual_sigma\"]=sigma\n\n\t\t# A little masking before the next iteration\n\t\tgmw=max(5,ref[\"nx\"]/12)\t\t# gaussian mask width\n\t\tref.process_inplace(\"normalize.circlemean\",{\"radius\":ref[\"nx\"]/2-gmw})\n\t\tif automask :\n\t\t\tref.process_inplace(\"mask.auto2d\",{\"nmaxseed\":10,\"nshells\":gmw-2,\"nshellsgauss\":gmw,\"sigma\":0.2})\n\t\telse :\n\t\t\tref.process_inplace(\"mask.gaussian\",{\"inner_radius\":ref[\"nx\"]/2-gmw,\"outer_radius\":gmw/1.3})\n\n\tif ref_orient!=None :\n\t\tref[\"xform.projection\"]=ref_orient\n\t\tref[\"model_id\"]=ref_model\n\treturn [ref,ptcl_info]", "def test_avg_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(s.avg_grade(3)),\"B\")", "def classification_score(self, x, y):\t\n\t\tpass", "def analyze(self, event):\n electrons = Collection(event, \"Electron\")\n muons = Collection(event, \"Muon\")\n jets = Collection(event, \"Jet\")\n njets = len(jets)\n mht = ROOT.TLorentzVector()\n for lep in filter(self.muSel,muons):\n mht += lep.p4()\n for lep in filter(self.elSel,electrons):\n mht += lep.p4()\n goodjet = [ 0 for i in xrange(njets) ]\n for i,j in enumerate(jets):\n if not self.jetSel(j): continue\n if j.muonIdx1 != -1 and j.muonIdx1 < njets:\n if self.muSel(muons[j.muonIdx1]): continue # prefer the muon\n if j.muonIdx2 != -1 and j.muonIdx2 < njets:\n if self.muSel(muons[j.muonIdx2]): continue # prefer the muon\n if j.electronIdx1 != -1 and j.electronIdx1 < njets:\n if self.elSel(electrons[j.electronIdx1]): continue # prefer the electron\n if j.electronIdx2 != -1 and j.electronIdx2 < njets:\n if self.elSel(electrons[j.electronIdx2]): continue # prefer the electron\n goodjet[i] = 1\n mht += j.p4()\n self.out.fillBranch(\"MHT_pt\", mht.Pt())\n self.out.fillBranch(\"MHT_phi\", -mht.Phi()) # note the minus\n self.out.fillBranch(\"Jet_mhtCleaning\", goodjet)\n return True", "def backpropagate(self, search_path, value):\n\n for node in search_path:\n node.n_visits += 1\n node.n_a[node.action_taken] += 1 \n # Incremental mean calculation\n node.q_a[node.action_taken] = (node.q_a[node.action_taken] * \n (node.n_visits - 1) + value) / \\\n node.n_visits", "def error_analysis(predictions, gold, result_collector):\n # scores = defaultdict(list)\n for iteration_id, texts in predictions.items():\n # map iteration id to fold\n fold = str(int(iteration_id) / 5)\n for tid, pred_tree in texts.items():\n gold_tree = gold[tid]\n print(iteration_id, fold, tid)\n print(gold_tree.get_triples())\n print(pred_tree.get_triples())\n for level, scores in eval_prediction([gold_tree], [pred_tree]):\n result_collector.add_result(tid, fold, level, scores)\n print(\"Done.\")", "def predict(self,entry):\n assert self.root is not None,\"Decision tree is not initialized\"\n return self.root.predict(entry)", "def predict(self, example):\n return self.decisionTree.traverse_tree(example)", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_classifier.predict(data)", "def calculate_classifiers(self, segmented_image, grand_truth_image):\n grand_truth_image = np.array(grand_truth_image)\n\n rows = grand_truth_image.shape[0]\n cols = grand_truth_image.shape[1]\n\n global TP, FP, TN, FN, RN\n\n TP, FP, TN, FN, RN = 0, 0, 0, 0, 0\n\n # It calculates the classifers\n for x in range(0, rows):\n for y in range(0, cols):\n\n if grand_truth_image[x][y] == 255 and segmented_image[x][y] == 255:\n TP += 1\n elif grand_truth_image[x][y] == 0 and segmented_image[x][y] == 255:\n FP += 1\n elif grand_truth_image[x][y] == 0 and segmented_image[x][y] == 0:\n TN += 1\n elif grand_truth_image[x][y] == 255 and segmented_image[x][y] == 0:\n FN += 1\n else:\n RN += 1", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def _tree_predict(self, estimator_id, X):\n return self.estimators_[estimator_id].predict(X) * self.learning_rate", "def classifier(decision_tree,data):\n dt = copy.deepcopy(decision_tree) # copy to maintain original decision tree\n cur_attr = list(dt)[0] # 'cur_attr' is first selected attribute\n \n while True:\n dt = dt[cur_attr] # 'dt' is sub decision tree \n value = data[cur_attr] # 'value' is data's attribute value\n\n # if there is no dictionary type instance, dt[value] is class label\n if not isinstance(dt[value],dict): \n return dt[value]\n\n dt = dt[value] # 'dt' is branches of value\n cur_attr = list(dt)[0] # update cur_attr", "def analyze(self, event):\n jets = Collection(event, \"Jet\")\n\n BTagWeightN = 1.0\n BTagWeightN_up = 1.0\n BTagWeightN_down = 1.0\n BTagWeightN_FS = 1.0\n BTagWeightN_up_FS = 1.0\n BTagWeightN_down_FS = 1.0\n BTagWeightD = 1.0\n BTagWeightNHeavy = 1.0\n BTagWeightNHeavy_up = 1.0\n BTagWeightNHeavy_down = 1.0\n BTagWeightNHeavy_FS = 1.0\n BTagWeightNHeavy_up_FS = 1.0\n BTagWeightNHeavy_down_FS = 1.0\n BTagWeightDHeavy = 1.0\n BTagWeightNLight = 1.0\n BTagWeightNLight_FS = 1.0\n BTagWeightNLight_up = 1.0\n BTagWeightNLight_up_FS= 1.0\n BTagWeightNLight_down = 1.0\n BTagWeightNLight_down_FS = 1.0\n BTagWeightDLight = 1.0\n\n for jet in jets:\n pt = jet.pt\n eta = abs(jet.eta)\n flavor = jet.hadronFlavour\n\n if not ( pt > self.jetPtMin and eta < self.jetEtaMax): continue\n\n if flavor == 5:\n pt_bin = self.h_eff_b.GetXaxis().FindBin(pt); \n if pt_bin > self.h_eff_b.GetXaxis().GetNbins():\n pt_bin = self.h_eff_b.GetXaxis().GetNbins(); \n eta_bin = self.h_eff_b.GetYaxis().FindBin(eta); \n if eta_bin > self.h_eff_b.GetYaxis().GetNbins():\n eta_bin = self.h_eff_b.GetYaxis().GetNbins();\n\n eff = self.h_eff_b.GetBinContent(pt_bin, eta_bin);\n\n elif flavor == 4:\n pt_bin = self.h_eff_c.GetXaxis().FindBin(pt); \n if pt_bin > self.h_eff_c.GetXaxis().GetNbins():\n pt_bin = self.h_eff_c.GetXaxis().GetNbins(); \n eta_bin = self.h_eff_c.GetYaxis().FindBin(eta); \n if eta_bin > self.h_eff_c.GetYaxis().GetNbins():\n eta_bin = self.h_eff_c.GetYaxis().GetNbins();\n\n eff = self.h_eff_c.GetBinContent(pt_bin, eta_bin);\n\n else:\n pt_bin = self.h_eff_udsg.GetXaxis().FindBin(pt); \n if pt_bin > self.h_eff_udsg.GetXaxis().GetNbins():\n pt_bin = self.h_eff_udsg.GetXaxis().GetNbins(); \n eta_bin = self.h_eff_udsg.GetYaxis().FindBin(eta); \n if eta_bin > self.h_eff_udsg.GetYaxis().GetNbins():\n eta_bin = self.h_eff_udsg.GetYaxis().GetNbins();\n\n eff = self.h_eff_udsg.GetBinContent(pt_bin, eta_bin);\n \n if self.FastSim:\n btagSF = jet.btagSF\n btagSF_FS=jet.btagSF_FS\n btagSF_up_FS = jet.btagSF_FS_up\n btagSF_down_FS = jet.btagSF_FS_down\n btagSF_down = jet.btagSF_down\n btagSF_up = jet.btagSF_up\n else:\n btagSF = jet.btagSF\n btagSF_FS= 1.0\n btagSF_up = jet.btagSF_up\n btagSF_down = jet.btagSF_down\n btagSF_up_FS = 1.0\n btagSF_down_FS = 1.0\n \n if jet.btagDeepB > self.bDiscCut:\n #check if eff is zero\n if eff < 0.001:\n eff = 0.001\n \n BTagWeightN *= btagSF * eff\n BTagWeightN_FS *= btagSF_FS * eff\n BTagWeightN_up *= btagSF_up * eff\n BTagWeightN_down *= btagSF_down * eff\n BTagWeightN_up_FS *= btagSF_up_FS * eff\n BTagWeightN_down_FS *= btagSF_down_FS * eff\n\n if abs(flavor) == 5:\n BTagWeightNHeavy *= btagSF * eff\n BTagWeightNHeavy_FS *= btagSF_FS * eff\n BTagWeightNHeavy_up *= btagSF_up * eff\n BTagWeightNHeavy_down *= btagSF_down * eff\n BTagWeightNHeavy_up_FS *= btagSF_up_FS * eff\n BTagWeightNHeavy_down_FS *= btagSF_down_FS * eff\n BTagWeightDHeavy *= eff\n else:\n BTagWeightNLight *= btagSF * eff\n BTagWeightNLight_FS *= btagSF_FS * eff\n BTagWeightNLight_up *= btagSF_up * eff\n BTagWeightNLight_down *= btagSF_down * eff\n BTagWeightNLight_up_FS *= btagSF_up_FS * eff\n BTagWeightNLight_down_FS *= btagSF_down_FS * eff\n BTagWeightDLight *= eff\n\n BTagWeightD *= eff\n else:\n #check if eff is 1.0\n if eff > 0.999:\n eff = 0.999\n\n BTagWeightN *= 1 - btagSF * eff\n BTagWeightN_FS *= 1 - btagSF_FS * eff\n BTagWeightN_up *= 1 - btagSF_up * eff\n BTagWeightN_down *= 1 - btagSF_down * eff\n BTagWeightN_up_FS *= 1 - btagSF_up_FS * eff\n BTagWeightN_down_FS *= 1 - btagSF_down_FS * eff\n\n if abs(flavor) == 5:\n BTagWeightNHeavy *= 1 - btagSF * eff\n BTagWeightNHeavy_FS *= 1 - btagSF_FS * eff\n BTagWeightNHeavy_up *= 1 - btagSF_up * eff\n BTagWeightNHeavy_down *= 1 - btagSF_down * eff\n BTagWeightNHeavy_up_FS *= 1 - btagSF_up_FS * eff\n BTagWeightNHeavy_down_FS *= 1 - btagSF_down_FS * eff\n BTagWeightDHeavy *= 1 - eff\n else:\n BTagWeightNLight *= 1 - btagSF * eff\n BTagWeightNLight_FS *= 1 - btagSF_FS * eff\n BTagWeightNLight_up *= 1 - btagSF_up * eff\n BTagWeightNLight_up_FS *= 1 - btagSF_up_FS * eff\n BTagWeightNLight_down *= 1 - btagSF_down * eff\n BTagWeightNLight_down_FS *= 1 - btagSF_down_FS * eff\n BTagWeightDLight *= 1 - eff\n\n BTagWeightD *= 1 - eff\n \n if self.FastSim:\n self.out.fillBranch(\"BTagWeight_FS\", BTagWeightN_FS / BTagWeightD)\n self.out.fillBranch(\"BTagWeight_Up_FS\", BTagWeightN_up_FS / BTagWeightD)\n self.out.fillBranch(\"BTagWeight_Down_FS\", BTagWeightN_down_FS / BTagWeightD)\n self.out.fillBranch(\"BTagWeightHeavy_FS\", BTagWeightNHeavy_FS / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightHeavy_Up_FS\", BTagWeightNHeavy_up_FS / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightHeavy_Down_FS\", BTagWeightNHeavy_down_FS / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightLight_FS\", BTagWeightNLight_FS / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeightLight_Up_FS\", BTagWeightNLight_up_FS / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeightLight_Down_FS\", BTagWeightNLight_down_FS / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeight\", BTagWeightN / BTagWeightD)\n self.out.fillBranch(\"BTagWeight_Up\", BTagWeightN_up / BTagWeightD)\n self.out.fillBranch(\"BTagWeight_Down\", BTagWeightN_down / BTagWeightD)\n self.out.fillBranch(\"BTagWeightHeavy\", BTagWeightNHeavy / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightHeavy_Up\", BTagWeightNHeavy_up / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightHeavy_Down\", BTagWeightNHeavy_down / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightLight\", BTagWeightNLight / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeightLight_Up\", BTagWeightNLight_up / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeightLight_Down\", BTagWeightNLight_down / BTagWeightDLight)\n return True", "def cstree_predict(value_dict, tree, order, sample, i, data):\n numerator = cstree_likelihood(sample, order, tree, data)\n order_of_var = order.index(i)\n samples = [np.array(list(sample[:order_of_var])+[val]+list(sample[order_of_var+1:]))\n for val in value_dict[i]]\n # sum_j P(X1=x1,...,Xi-1=xi-1,Xi+1=xi+1,...,Xn|Xi=j)P(Xi=j)\n denominator = sum([cstree_likelihood(s, order, tree, data) for s in samples])\n return numerator/denominator", "def importances(self):\n # TODO your code here\n # compute feature importances by asking each tree to\n # add_mdis to a “running total” dictionary, and then dividing by the number\n # of trees to get the overall mean\n mdis = {}\n for tree in self.trees:\n tree.add_mdis(mdis)\n for k in mdis:\n mdis[k] = mdis[k] / len(self.trees)\n return mdis", "def hotaverage( names):\n rs = radioastronomy.Spectrum() # create input and average structures\n nhot = 0\n\n avenames = names # create a list of files to average\n\n # for all input files\n for filename in names:\n\n parts = filename.split('/')\n nparts = len(parts)\n if nparts == 1:\n aname = parts[0]\n else:\n aname = parts[nparts-1]\n\n parts = aname.split('.')\n nparts = len(parts)\n if nparts < 2:\n print 'File is not an astronomy file: ',filename\n continue\n else:\n extension = parts[nparts-1]\n\n extension = extension.upper()\n if extension != 'HOT': # speed up by only looking at hot load files\n continue\n \n rs.read_spec_ast(filename)\n\n if rs.telel > 0: # only working with hot load, skip elevation > 0.\n continue\n\n avenames[nhot] = filename\n nhot = nhot + 1\n # end of for all files loop\n\n nhot, hot = average( avenames[0:nhot]) # now use generic program for averages\n if nhot < 1:\n print 'No hot load files; can not calibrate!'\n exit()\n\n return nhot, hot", "def get_average_mark(self, test):\n return", "def classify(sample,currentNode):\n \n while(currentNode.data == 0):\n splitAttribute,splitValue= currentNode.split\n if sample[int(splitAttribute)-1]>float(splitValue):\n currentNode = currentNode.greater\n else:\n currentNode = currentNode.lesser\n return currentNode.data", "def find_prediction_success_rate(decision_tree, test_examples, attributes):\n totalCorrect = 0\n for example in test_examples:\n actualResult = example[14]\n prediction = decision_tree_prediction(example, decision_tree, attributes)\n if prediction == actualResult:\n totalCorrect = totalCorrect + 1\n return totalCorrect / len(test_examples)", "def _classify(self, example):\n neighbors = self.find_neighbor(example)\n class_label = self.find_response(neighbors)\n return class_label", "def predict_example(x, tree):\r\n # generatng a list of keys of the tree\r\n keys = list(tree.keys())\r\n for key in keys:\r\n \t# seperating our attribute, value and prediction - true/false\r\n attribute, value, pred = key\r\n for i in range(0, len(x)):\r\n # checking if attribute matches\r\n if i == attribute:\r\n # checking if value matches\r\n if x[i] == value:\r\n # descend if subtree\r\n if isinstance(tree[key],dict):\r\n return predict_example(x, tree[key])\r\n else:\r\n return tree[key]\r\n else:\r\n # else classify false preds\r\n elsekey = (attribute, value, 'False')\r\n # descend if subtree\r\n if isinstance(tree[elsekey],dict):\r\n return predict_example(x, tree[elsekey])\r\n else:\r\n return tree[elsekey]", "def _eed_compute(sentence_level_scores: List[Tensor]) ->Tensor:\n if len(sentence_level_scores) == 0:\n return tensor(0.0)\n average = sum(sentence_level_scores) / tensor(len(sentence_level_scores))\n return average", "def evaluate(self, featureset):\r\n #sequence, tag = featureset\r\n gs, labels = [], []\r\n for s, t in featureset:\r\n gs.append(t)\r\n label = self.tagger.choose_tag(s)\r\n labels.append(label)\r\n print (t, label)\r\n\r\n assert(len(gs) == len(labels))\r\n self.write_to_file(labels)\r\n words = self.tagger.test(self.r.test_sents, word=True)\r\n print (accuracy_score(gs, labels))", "def test(self, test_sample_indices):\n\n if self._root_node is None:\n print('Decision tree must be trained before testing.')\n sys.exit(1)\n return self._classify_samples(self._curr_dataset.samples,\n self._curr_dataset.sample_class,\n self._curr_dataset.sample_costs,\n test_sample_indices,\n self._curr_dataset.sample_index_to_key)", "def gate_average(recurrent_net, sample_input):\n length = sample_input.size\n score, select = np.zeros(length), np.zeros(length)\n for i in range(length):\n select[i], score[i] = recurrent_net.activate([sample_input[i]])\n select = sigmoid(select)\n return np.sum(select * score) / np.sum(select)", "def classify_treeInsert(self, query_name, cluster_naming_function):\n classes = Set(self.class_map.values())\n\n full_dist_matrix = self.orig_dist_matrix.drop(query_name)\n full_dist_matrix = full_dist_matrix.drop(query_name, axis=1)\n\n if PROGRESS: print '\\nStarting treeInsert!'\n \n #1] Build a tree for each class\n class_trees = {}\n all_elements = full_dist_matrix.columns.values.tolist()\n classes_done = 0\n num_of_classes = len(classes)\n for c in classes:\n\n #1a. Construct a mini distance matrix for the current class\n nonclass_members = [i for i in all_elements if self.class_map[i] != c]\n class_dist_matrix = full_dist_matrix.drop(nonclass_members)\n class_dist_matrix = class_dist_matrix.drop(nonclass_members, axis=1)\n\n #1b. Build class tree\n if PROGRESS: print 'Building class tree for ' + c\n\n class_njt = NJTree()\n class_njt.build(class_dist_matrix, self.class_map, myClusterNaming)\n class_trees[c] = class_njt\n classes_done = classes_done + 1\n\n if PROGRESS:\n print str(classes_done) + \" classes down, \" + str(num_of_classes - classes_done) \n + \" to go...\"\n\n #2] Determine the insertion cost of each tree\n class_insert_costs = {}\n for c,class_tree in class_trees.iteritems():\n\n #2a. Find insertion cost of each leaf in the tree\n leaves = [i for i in class_tree.tree.nodes() if class_tree.isLeaf(i)] \n leaf_insert_costs = {}\n for leaf_i in leaves:\n\n parent_i = class_tree.tree.neighbors(leaf_i)[0] \n cons = ({'type': 'eq',\n 'fun': lambda x: x[0] + x[1] - nx.shortest_path_length(class_tree.tree, \n source=parent_i, target=leaf_i, weight='length')})\n optimum_leaf_insert_cost = optimize.minimize(_leaf_insertion_cost, [0,0,0], \n args=(class_tree.orig_dist_matrix, leaf_i, leaves, query_name, self), method='SLSQP', \n constraints=cons)\n\n if DEBUG:\n print \"Optimum cost for \", leaf_i, \" : \", optimum_leaf_insert_cost.x[0]\n\n leaf_insert_costs[leaf_i] = optimum_leaf_insert_cost.x[0]\n \n class_insert_costs[c] = min(list(leaf_insert_costs.values()))\n\n #3] Output the class name of tree with minimum insertion cost\n min_insert_cost = min(list(class_insert_costs.values()))\n for c,cost in class_insert_costs.iteritems():\n if cost==min_insert_cost:\n return c\n break", "def apply_classifier(self):\n for detected_object in self.detected_objects:\n detected_object.predict_class(self.original_image)", "def class_average(X):\n\t# compute average row vector\n\tmean_vector = np.mean(X, axis = 0)\n\treturn(mean_vector)", "def calculate_average_precision(class_name='', current_neuron_index=current_neuron_index, acts=acts,\n no_files_in_label=no_files_in_label, verbose=verbose, minx='',Q_stop=''):\n #\n current_neuron = acts.get_activations_for_neuron(current_neuron_index) # get the neuron's data\n x_data = current_neuron.vector # get the activations without classes\n if minx == '':\n minx = min(x_data) # this grabs all the points\n # grab your list of points\n local_list, selected_activations = grab_points_for_a_cluster(current_neuron_index,\n min_selected_x_data=minx,\n max_selected_x_data=max(x_data),\n acts=acts,\n x_data=x_data,\n verbose=verbose)\n if not Q_stop == '':\n Q = Q_stop\n else:\n Q = len(local_list) # total length of list\n # get the test class (this is the correct class or 'A')\n if class_name == '':\n test_class = local_list[-1][0]\n else:\n test_class = class_name\n N_test = no_files_in_label[test_class] # no of items in class A\n # set up counters\n AP = 0 # average precision\n count_of_test_class = 0\n # loop backwards through the list, abs j is the position in a 1-indexed list\n # values for i == -1\n# current_class = local_list[-1][0]\n# if (current_class == test_class):\n# count_of_test_class = count_of_test_class + 1 # we found A\n# precs_x = count_of_test_class /1\n recall_x = 0\n Ave_precs_x = 0\n for i in range(Q):\n j = -(i + 1) # 1 indexed\n recall_x_minus_1 = recall_x\n current_class = local_list[j][0] # current class\n if j == -Q:\n # if the whole of local_list is the same class (this accounts for zero indexing)\n if verbose:\n print(current_class)\n print('{}/{}'.format(count_of_test_class, abs(j)))\n j = j -1 # really this is here so we can check j\n #break\n if count_of_test_class == N_test:\n #we've found them all\n if verbose:\n print('found all {} of {}, stopping...'.format(N_test, current_class))\n print('{}/{}'.format(count_of_test_class, abs(j)))\n break\n if (current_class == test_class):\n count_of_test_class = count_of_test_class + 1 #n A\n precs_x = count_of_test_class /(abs(j)) # N.b. this is the sum, we divide by j on the output\n recall_x = count_of_test_class / N_test\n delta_recall_x = recall_x - recall_x_minus_1 # difference in recall between this point nd the next\n weight_precs_x = precs_x * delta_recall_x # weighted precsion at point x (we do average via weighted sum)\n Ave_precs_x = Ave_precs_x + weight_precs_x # average_precision evaluated at point x\n return Ave_precs_x, precs_x, recall_x", "def classify(self, testInstance):\n return self.fire(testInstance) > 0.5", "def leafScore(self) :\n return 0", "def calculate(self, treename=\"Events\"):\n import ROOT as r\n\n fin = r.TFile(self.name)\n if not fin: raise Exception(\"File {0} does not exist, so cannot calculate nevents!\".format(self.name))\n\n t = fin.Get(treename)\n if not t: raise Exception(\"Tree {0} in file {1} does not exist, so cannot calculate nevents!\".format(treename,self.name))\n d_nevts = {}\n for do_negative in [True,False]:\n key = \"nevts_neg\" if do_negative else \"nevts\"\n obj = t.GetUserInfo()\n if obj and obj.FindObject(key):\n d_nevts[key] = obj.FindObject(key)\n if d_nevts[key]:\n d_nevts[key] = int(d_nevts[key].GetVal())\n else:\n d_nevts[key] = t.GetEntries(\"genps_weight < 0\" if do_negative else \"\")\n return d_nevts[\"nevts\"], d_nevts[\"nevts_neg\"]" ]
[ "0.6027639", "0.5959959", "0.5897434", "0.5878511", "0.58665943", "0.58361167", "0.5765075", "0.5708641", "0.5605437", "0.55976415", "0.5585661", "0.55516666", "0.5534588", "0.55194116", "0.5513875", "0.5391394", "0.5380517", "0.5374991", "0.5372819", "0.5360519", "0.535744", "0.53571343", "0.5342949", "0.5313004", "0.5307396", "0.52854604", "0.52740604", "0.52416605", "0.5233034", "0.52207005", "0.5216737", "0.5202392", "0.51849854", "0.5172583", "0.5169279", "0.51668197", "0.512235", "0.51178765", "0.5105556", "0.50917804", "0.5091678", "0.50699717", "0.5069234", "0.50600904", "0.5044098", "0.50294673", "0.5028374", "0.5013036", "0.500483", "0.5004422", "0.4983993", "0.49788463", "0.49764407", "0.49764407", "0.49638826", "0.49633554", "0.4961959", "0.4958092", "0.49577865", "0.49575865", "0.49562982", "0.49561352", "0.49520153", "0.49494123", "0.49475384", "0.49407348", "0.49406147", "0.4938054", "0.49373198", "0.4937094", "0.49339134", "0.49301273", "0.492308", "0.49079055", "0.4895088", "0.4893471", "0.48909745", "0.48865187", "0.4884327", "0.48839903", "0.48817793", "0.4878752", "0.4861867", "0.4847104", "0.48420078", "0.48398408", "0.4837654", "0.48295376", "0.48278698", "0.48097554", "0.48034278", "0.47938028", "0.47915217", "0.4790252", "0.47900864", "0.47874215", "0.47858444", "0.47839123", "0.4778842", "0.47787338" ]
0.8286223
0
Node frontiers generator using breadthfirst search.
def bfs_nodes_generator(graph, source, reverse=...): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def breadthfirst(self):\n import os\n cwd = os.getcwd()\n os.chdir('/Users/raj/Documents/algorithms_in_python/linked_lists/')\n from linked_collections import LinkedQueue\n os.chdir(cwd) # change to cwd\n if not self.is_empty():\n lq = LinkedQueue()\n lq.enqueue(self.root())\n while not lq.is_empty():\n p = lq.dequeue()\n yield p\n for c in self.children(p):\n lq.enqueue(c)", "def _breadthfirst(self,root, action=lambda nodes: print(nodes)):\n nodes = []\n breadth = Queue()\n visited = []\n\n breadth.enqueue(root)\n visited.append(root)\n\n while breadth.front:\n front = breadth.dequeue()\n nodes.append(front.value)\n\n for child in self._adjacency_list.get(front.value):\n if not child.start_vertex in visited:\n visited.append(child.start_vertex)\n breadth.enqueue(child.start_vertex) \n\n return nodes", "def breadthfirst(self):\n if not self.is_empty():\n fringe = LinkedQueue() # known positions not yet yielded\n fringe.enqueue(self.root()) # starting with the root\n while not fringe.is_empty():\n p = fringe.dequeue() # remove from front of the queue\n yield p # report this position\n for c in self.children(p):\n fringe.enqueue(c) # add children to back of queue", "def breadth_first_search(self):\r\n queue = [self.root]\r\n while queue:\r\n node = queue.pop()\r\n yield node\r\n queue.extend(node.children)", "def breadth_first(self):\n nodes_to_vist = []\n curr = self._root\n nodes_to_vist.append(curr)\n while len(nodes_to_vist):\n curr = nodes_to_vist[0]\n if curr._lkid:\n nodes_to_vist.append(curr._lkid)\n if curr._rkid:\n nodes_to_vist.append(curr._rkid)\n yield curr._data\n nodes_to_vist.remove(curr)", "def breadth_first(self):\n q = Queue()\n q.enqueue(self)\n while q.size() > 0:\n node = q.dequeue()\n yield node.val\n if node.left:\n q.enqueue(node.left)\n if node.right:\n q.enqueue(node.right)", "def breadth_first_traverse(self) -> Generator:\n assist_queue = deque()\n assist_queue.append(self.root_node)\n while assist_queue:\n current_node = assist_queue.popleft()\n yield current_node\n if current_node.children:\n for child in current_node.children:\n assist_queue.append(child)", "def breadthFirstSearch(problem):\n #import pdb;pdb.set_trace()\n frontier = util.Queue()\n start_node = problem.getStartState()\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,()))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n\n explored.add(node[0])\n # exploratory code for SUPER-optimal solution:\n # by saving the path in explored, we assure that we explore the same cell even if\n # two different actions go through it:\n #explored.add(node)\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1]\n next_actions = actions + (action,)\n new_node = (nextState, next_actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def breadth_first(self):\n import queue\n keeper = queue.Queue()\n keeper.enqueue(self)\n while(keeper.size() != 0):\n temp = keeper.dequeue()\n if temp.val is not None:\n yield temp.val\n if temp.left is not None:\n keeper.enqueue(temp.left)\n if temp.right is not None:\n keeper.enqueue(temp.right)", "def breadth_first_search(root_node):\n if root_node.goal_test():\n return root_node\n\n frontier = [root_node]\n explored = []\n\n while frontier:\n node = frontier.pop(0)\n explored.append(node)\n\n for successor in node.generate_successors():\n if not successor:\n continue\n if not (successor.is_in(frontier) and successor.is_in(explored)):\n if successor.goal_test():\n return successor\n frontier.append(successor)\n return None # No Solution", "def breadth_first_traversal(self, start):\n visited = []\n visited.append(start)\n start_visited = visited\n while True:\n temp = []\n for node_ in start_visited:\n for i in self.neighbors(node_):\n if i not in visited:\n visited.append(i)\n temp.append(i)\n start_visited = temp\n if not temp:\n break\n return visited", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n if problem.isGoalState(initialNode.state):\n return initialNode.solution()\n\n frontier = util.Queue() #bfs uses a queue\n frontier.push(initialNode)\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored and child not in frontier.list:\n if problem.isGoalState(child.state):\n return child.solution()\n frontier.push(child)\n return []\n util.raiseNotDefined()", "def breadth_first(self, start_node):\n \n # try:\n if start_node not in self._adjacency_list:\n raise KeyError('Nodes are not in the graph')\n\n q = Queue()\n q.enqueue(start_node)\n visited_nodes = {}\n visited_nodes[start_node] = True\n output = []\n\n while len(q):\n cur = q.dequeue()\n output.append(cur)\n neighbors = self._adjacency_list[cur]\n for n in neighbors:\n if n[0] not in visited_nodes:\n q.enqueue(n[0]) \n visited_nodes[n[0]] = True\n return output\n # except Exception as error:\n # return(f'{error}')", "def bft(self, starting_vertex):\n # Create a q and enqueue starting vertex\n qq = Queue()\n qq.enqueue([starting_vertex])\n # Create a set of traversed vertices\n visited = set()\n # eldest = []\n depth_counter = {} \n starter = 0 \n # visited = []\n # While queue is not empty:\n while qq.size() > 0:\n # dequeue/pop the first vertex\n path = qq.dequeue()\n # if not visited\n # print(visited)\n starter += 1\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n # print(path[-1])\n depth_counter[starter] = path[-1]\n # mark as visited\n visited.add(path[-1])\n # visited.append(path[-1])\n # enqueue all neightbors\n \n if not self.get_neighbors(path[-1]):\n \n if starting_vertex == path[-1]:\n return -1\n else:\n # print(\"eldest ancestor:\",path[-1])\n depth_counter[starter] = path[-1]\n # print(depth_counter)\n # eldest.append(path[-1])\n else:\n # starter += 1\n for next_vert in self.get_neighbors(path[-1]): \n new_path = list(path)\n new_path.append(next_vert)\n qq.enqueue(new_path)\n\n\n return depth_counter[starter]", "def breadthFirstSearchPaths(problem):\n #import pdb;pdb.set_trace()\n frontier = util.Queue()\n start_node = problem.getStartState()\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,()))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n\n #explored.add(node[0])\n # exploratory code for SUPER-optimal solution:\n # by saving the path in explored, we assure that we explore the same cell even if\n # two different actions go through it:\n explored.add(node)\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1]\n next_actions = actions + (action,)\n new_node = (nextState, next_actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def _bfs_nodes(cls, graph, source, size, **kwargs):\n if size < 1:\n return iter(())\n\n return itertools.chain(\n (source,),\n itertools.islice((v for u, v in nx.bfs_edges(graph, source)), size-1)\n )", "def bfs_edges_generator(graph, source, reverse=...):\n ...", "def breadthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Queue() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]", "def breadth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n q = collections.deque(initial_nodes_iter)\n while q:\n node = q.popleft()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n q.extend(connected_to_functor(node))", "def breadth_first_list(graph, current=\"a\"):\n queue = []\n queue.append(current)\n while queue:\n current = queue.pop(0)\n print(current)\n for node in graph.get(current):\n queue.append(node)", "def bfs_iterative(graph,start):\n\tvisited = set()\n\twatched = set()\n\tnodes_queue = [start] # List that helps as queue\n\twatched.add(start)\n\t\n\twhile nodes_queue:\n\t\tcurrent_node = nodes_queue.pop(0)\n\n\t\tprint(\"visiting\",current_node)\n\t\tvisited.add(current_node)\n\t\t\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append(adjacent_node)\n\t\t\t\t#path.add(adjacent_node)", "def bfs(self):\r\n Q = [self.head()]\r\n visited = []\r\n while Q != []:\r\n cur = Q[0]\r\n visited.append(cur)\r\n Q = Q[1:]\r\n Q.extend([ch for ch in self.get_deps(cur.get_field('index'))])\r\n for x in reversed(visited):\r\n yield x", "def breadthFirstSearch(problem):\n\n frontier = util.Queue()\n # print 'Create frontier'\n initial_node = node(problem.getStartState(), 0, [], 0)#(state,depth,path_actions,path_cost)\n frontier.push(initial_node)\n # print 'Push ',repr(initial_node.state)\n frontierSet = set([initial_node.state])\n explored = set() #initialize the explored set to be empty\n\n while True:\n if frontier.isEmpty() == True: raise Exception, \"The frontier was emptied\"#if the frontier is empty then return failure\n currNode = frontier.pop()#HERE1\n frontierSet.remove(currNode.state)\n # print 'Remove',repr(currNode.state)\n # print 'State: ' + repr(currNode.state) + '. Depth: ' + repr(currNode.depth) + '. Path Cost: ' + repr(currNode.path_cost) + '. Path Actions: ' + repr(currNode.path_actions) + '.\\n'\n if problem.isGoalState(currNode.state) == True:\n print 'Goal reached!'\n return currNode.path_actions\n explored.add(currNode.state)\n for succ in problem.getSuccessors(currNode.state):\n # print 'Succ: ',repr(succ[0])\n succNode = node(succ[0], currNode.depth + 1, currNode.path_actions + [succ[1],], currNode.path_cost + succ[2])\n if (succNode.state not in explored) and (succNode.state not in frontierSet):\n \"\"\"Aca si hay que verificar si es que ya esta en la frontera porque es formato FIFO. Entonces los nodos que estan en la lista\n necesariamente van a ser verificados antes de que se vuelva a insertar otro.\n \"\"\"\n frontier.push(succNode)\n # print 'Push ',repr(succNode.state)\n frontierSet.add(succNode.state)", "def breadth_first_search(initial_state):\n list_of_processed_nodes = []\n num_unprocessed_nodes = 0#\n num_unconsidered_children = 0#\n\n initial_node = Node(state=initial_state)\n node_deque = collections.deque()\n node_deque.append(initial_node)\n goal_state_found = False\n goal_node = None\n\n while len(node_deque) > 0 and not goal_state_found:\n e = node_deque.popleft()\n #pdb.set_trace()\n if e in list_of_processed_nodes:\n num_unprocessed_nodes += 1\n continue\n else:\n list_of_processed_nodes.append(e)\n\n list_of_children_nodes, num_unconsidered_children = generate_children_nodes(\n curr_node=e, list_of_processed_nodes=list_of_processed_nodes,\n running_count_of_children_dups=num_unconsidered_children#\n )\n \n for child_node in list_of_children_nodes:\n #print 'Node {0} with goal status {1}'.format(child_node.index, child_node.state.snake_ate_food)\n if child_node.state.goal_state_reached():\n #print \"Goal state reached with node index {0}\".format(child_node.index)\n goal_state_found = True\n goal_node = child_node\n break\n else:\n #print \"Adding to deque node index {0}\".format(child_node.index)\n node_deque.append(child_node)\n\n if len(node_deque) == 0 and not goal_state_found:\n print '*'*40\n print 'NO SOLUTION PATH FOUND'\n print '*'*40\n sys.exit(0)\n\n #pdb.set_trace()#\n # Summary & results\n #print '{0} nodes processed!'.format(len(list_of_processed_nodes))\n #print '{0} nodes already visited, skipped!'.format(num_unprocessed_nodes)\n #print '{0} node children skipped!'.format(num_unconsidered_children)\n #os.system('say -v \"Victoria\" \"done\"')\n\n return goal_node, list_of_processed_nodes", "def breadthFirstSearch(problem):\r\n\t\"*** YOUR CODE HERE ***\"\r\n\tfrom game import Directions\r\n\t#i = 0\r\n\tfrontera=util.Queue()\r\n\testadoInicial= problem.getStartState()\r\n\tfrontera.push((estadoInicial, [],0))\r\n\tvisitados=[]\r\n\tvisitados.append(estadoInicial)\r\n\r\n\twhile not(frontera.isEmpty()):\r\n\t\t(estado, camino, costo) =frontera.pop()\r\n\t\tif(problem.isGoalState(estado)):\r\n\t\t\tbreak\r\n\r\n\t\tsucesores=problem.getSuccessors(estado)\r\n\t\tfor sucesor in sucesores:\r\n\t\t\t#i = i+1\r\n\t\t\t#print (i)\r\n\t\t\tif sucesor[0] not in visitados:\r\n\t\t\t\tfrontera.push((sucesor[0], camino + [sucesor[1]], costo + sucesor[2]))\r\n\t\t\t\tvisitados.append(sucesor[0])\r\n\tprint ('Cantidad de nodos en memoria: {}').format(len(frontera.list))\r\n\treturn camino", "def breadth_first(graph,start, end):\n queue = []\n queue.append([start])\n while queue:\n path = queue.pop(0)\n node = path[-1]\n if node == end:\n return path\n for adjacent in graph.get(node, []):\n new_path = list(path)\n new_path.append(adjacent)\n queue.append(new_path)", "def topological_nodes_generator(graph, reverse=...):\n ...", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n startState = problem.getStartState()\n explored = set()\n Frontier = util.Queue()\n Frontier.push([[startState,None,0]])\n while not Frontier.isEmpty():\n StateTriples = Frontier.pop()\n node = StateTriples[-1][0]\n if problem.isGoalState(node):\n solution = []\n for i in StateTriples[1:]:\n solution = solution + [i[1]]\n return solution\n if node not in explored:\n explored.add(node)\n for i in problem.getSuccessors(node):\n Frontier.push(StateTriples+[list(i)])\n print(Frontier.isEmpty())\n util.raiseNotDefined()", "def _create_node_iterator(self) -> Iterator[GraphNode]:\n return\n yield", "def bft(self, starting_vertex):\n \"\"\" FIFO is LILO\n Create a queue\n Enqueue starting Vertex\n Create a set top store visited\n \n While the queue is NOT empty: e.g. > 0\n Dequeue the first Vertex\n Check IF NOT visited:\n Mark as visited\n enqueue ALL neighbors found if not already in queue\n \"\"\"\n # FIFO \n q = Queue() # create a queue ( e.g. empty [] )\n q.enqueue(starting_vertex) # Enqeue starting at vertex\n visited = set() # Create a set to store visited\n\n while q.size() > 0: # While the queue is NOT empty:\n # while q: # ERROR: Will add None into v, breaks _get_neighbors\n v = q.dequeue() # dequeue the first vertex\n\n if v not in visited: # Check IF NOT visited: \n print(v)\n visited.add(v) # if NOT visited, add to visited set\n\n for n in self.get_neighbors(v): # loop through all neighbors of v \n # if n not in q.queue: # !!! OPTIMIZATION !!!\n # q.enqueue(n) # enqueue ALL neighbors found (ex. add to end of queue)\n\n q.enqueue(n) # enqueue ALL neighbors found (ex. add to end of queue)", "def breadth_first_traversal(self):\n breadth_first = []\n h = self.root.get_height() \n for i in range(h+2): \n self.level = []\n self.print_level(self.root, i + 1) \n breadth_first.append(self.level)\n return breadth_first", "def BreadthFirstSearch(graph, source):\r\n \r\n # Dictionary dataInfo will be used to store the information about each vertex. (Ancestors, descendants, distance from source, and color)\r\n dataInfo = {} \r\n \r\n # List queue will be used to store the vertices currently in the queue, these vertices will all be gray.\r\n queue = []\r\n \r\n # Loops through the vertices in the graph, creates a key in the dictionary for each vertice, with default values.\r\n for vertex in graph[\"V\"]:\r\n dataInfo[str(vertex)] = {\"ancestor\": \"\", \"descendants\": [], \"distance\": \"\", \"color\": \"white\"}\r\n \r\n # At key source (variable) in dataInfo dictionary, key ancestor is set to have no value other than \"NA\" (as it is the starting point), and distance to 0 (as it will always be zero as it is the source).\r\n dataInfo[str(source)][\"ancestor\"] = \"NA\"\r\n dataInfo[str(source)][\"distance\"] = 0\r\n\r\n def symmetricVertex(edge, otherVertex):\r\n \r\n \"\"\"\r\n Function symmetricVertex takes arguments edge, a list of an edge from the graph dictionary, and otherVertex, an integer that is the other vertex in the edge with the sourceVertex. The function will return the point other than the otherVertex, and will be used to find adjacent vertices relative to the current vertex in the queue. Example: edge ([1, 2]), otherVertex (1), the function will return 2.\r\n \"\"\"\r\n \r\n for num in edge:\r\n if num != otherVertex:\r\n return num\r\n \r\n\r\n def pathFinder(graph, sourceVertex):\r\n \r\n \"\"\"\r\n Function pathFinder takes arguments graph, a dictionary, with the same keys for the edges and the vertices and sourceVertex, an integer. The function will loop through all of the edges in the graph and find adjacent vertices relative to the current sourceVertex. sourceVertex values will be in the queue. The function will edit dictionaries and lists, not return any value.\r\n \"\"\"\r\n \r\n # List removeEdges will be used to store the edges that will be removed from the graph dictionary after the loop ends. Makes the code more efficient, as you don't want to loop through a million vertices every time, now do you?\r\n removeEdges = []\r\n \r\n # Loop through edges in the graph, will be used to find adjacent vertices.\r\n for edge in graph[\"E\"]:\r\n \r\n # If the sourceVertex is in the edge and the edge is not discovered yet, then edit and change values in the main dictionary, dataInfo.\r\n if (sourceVertex in edge) and (dataInfo[str(symmetricVertex(edge, sourceVertex))] != \"gray\"):\r\n otherVertex = symmetricVertex(edge, sourceVertex)\r\n \r\n # Adds variable otherVertex to the descendants of the sourceVertex.\r\n dataInfo[str(sourceVertex)][\"descendants\"].append(otherVertex)\r\n \r\n # Updates key(otherVertex) to correct values. Ancestor is always the sourceVertex, the distance is always the distance of sourceVertex incremented by one, and the color is updated to gray as it is added to the queue.\r\n dataInfo[str(otherVertex)] = {\"ancestor\": sourceVertex, \"descendants\": [], \"distance\": (dataInfo[str(sourceVertex)][\"distance\"] + 1), \"color\": \"gray\"}\r\n \r\n # Edge includes two discovered edges, so it will be removed to stop redundancy. It is added to the removeEdges list.\r\n removeEdges.append(edge)\r\n \r\n # Appends the discovered vertex to the queue.\r\n queue.append(otherVertex)\r\n \r\n # After the loop ends, the edges that contain the source vertex have been exhausted, so the color is updated to black.\r\n dataInfo[str(sourceVertex)][\"color\"] = \"black\" \r\n \r\n # If the sourceVertex is in the queue, it is removed, as all of the edges containing it have been exhausted.\r\n if sourceVertex in queue:\r\n queue.remove(sourceVertex)\r\n \r\n # Loop through the edges in the removeEdges list, each edge will be removed.\r\n for edge in removeEdges:\r\n graph[\"E\"].remove(edge)\r\n \r\n # The function pathFinder is called on the graph and the source vertex, which sets up the queue.\r\n pathFinder(graph, source)\r\n \r\n # While the list queue contains values, the pathFinder function is called on the graph, and the queue value at index 0.\r\n while len(queue) != 0:\r\n pathFinder(graph, queue[0])\r\n \r\n # Loop below is for formatting of the data, makes it easier to read.\r\n for key in dataInfo:\r\n print \"Vertex: \" + key + \", Distance: \" + str(dataInfo[key][\"distance\"]) + \", Ancestor: \" + str(dataInfo[key][\"ancestor\"]) + \", Descendants: \" + str(dataInfo[key][\"descendants\"]) + \", Color: \" + str(dataInfo[key][\"color\"]) + \".\" \r\n \r\n # Returns dictionary dataInfo.\r\n return dataInfo", "def bfs(maze, current_node):\n q = collections.deque()\n\n q.append(current_node)\n\n while len(q) > 0:\n current_node = q.popleft()\n maze[current_node.row][current_node.cell] = 1\n yield maze\n\n for neighbour in get_neighbours(maze, current_node):\n if maze[neighbour.row][neighbour.cell] == 2:\n backtrack(maze, neighbour)\n yield maze\n return\n else:\n q.append(neighbour)\n maze[neighbour.row][neighbour.cell] = -2\n\n yield maze\n maze[current_node.row][current_node.cell] = -3\n time.sleep(args.speed)", "def breadthFirstSearch(problem):\n start_node = (problem.getStartState(), [], 0)\n explored = []\n frontier = util.Queue()\n frontier.push(start_node)\n\n if problem.isGoalState(problem.getStartState()):\n return []\n\n while not frontier.isEmpty():\n (current_state, actions, costs) = frontier.pop()\n if current_state not in explored:\n explored.append(current_state)\n if problem.isGoalState(current_state):\n return actions\n for child_node in problem.getSuccessors(current_state):\n next_state = child_node[0]\n next_action = child_node[1]\n next_cost = child_node[2]\n\n next_node = (next_state, actions + [next_action], costs + next_cost)\n frontier.push(next_node)\n return []", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n \"\"\"\n ALGORITHM FOR bFS \n Create a queue Q\n enqueue root node to Q\n while Q is not empty:\n dequeu an item v from Q\n mark the item v as visited \n for each node w that is directed from v:\n enqueue w to Q\n \n \n \"\"\"\n\n fringes = util.Queue()\n explored =[]\n fringes.push((problem.getStartState(),[]))\n\n while(not fringes.isEmpty()):\n currentNode,currDir = fringes.pop()\n if problem.isGoalState(currentNode):\n goal = currentNode\n pathToGoal = currDir\n #print \"final path is : \", pathToGoal\n\n break\n # print \"HOraaay goal has been found === > \", currentNode\n\n if not (currentNode in explored):\n explored.append(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n fringes.push((childNode[0],currDir+[childNode[1]]))\n\n\n return pathToGoal", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from game import Directions\n waiting_list = util.Queue()\n # QUEUE\n # FIFO \n visited = set()\n parents = {}\n #collections.defaultdict(collections.UserDict)\n sequence = []\n start_state = problem.getStartState()\n for action in problem.getSuccessors(start_state):\n # in order to push full-state values\n waiting_list.push(action)\n \n while not waiting_list.isEmpty():\n node = waiting_list.pop()\n visited.add(node[0])\n for action in problem.getSuccessors(node[0]):\n \n #if child.STATE is not in explored or frontier then\n if action[0] not in visited:\n parents[action[0]] = {'parent':node} \n waiting_list.push(action)\n if problem.isGoalState(action[0]):\n target_state = action \n \n \n while target_state[0] in parents.keys():\n temp=parents[target_state[0]]['parent']\n sequence.append(target_state[1])\n target_state = temp\n sequence.append(target_state[1])\n return sequence[::-1]", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n #Queue to hold the node along with the path taken from the start node to reach that node\n queue = Queue()\n #Set to hold the node explored.\n explorednode = set()\n # Get the start node.\n startnode = problem.getStartState()\n print startnode\n # Push the starting node on the Queue along with an empty set to know the direction in order to reach the node.\n queue.push((startnode,[]))\n\n # Loop till the queue is empty\n while queue.isEmpty() is not True:\n # Pop the currentnode and the direction from the queue\n currentnode, direction = queue.pop()\n # Check if the currentnode is not in explorednode.\n if currentnode not in explorednode:\n # We will now add the node to set of explored node.\n explorednode.add(currentnode)\n # If the node is the goal. We made it!!\n if problem.isGoalState(currentnode):\n # The direction holds the way to reach till the goal from the start node.\n print currentnode\n return direction\n # Loop for each successor(child) of the current node.\n for (successor, action, stepCost) in problem.getSuccessors(currentnode):\n # If the successor(child) is not explored\n if successor not in explorednode:\n # Add the successor to the queue along with the path to reach it.\n queue.push((successor, direction + [action]))\n util.raiseNotDefined()", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n startState=problem.getStartState()\n currentLocation = startState\n\n #for GSA implementation\n exploredStates = []\n exploredStates.append(startState)\n \n #To transform the graph to stack for better access in BFS\n frontierQueue = util.Queue()\n for frontier in problem.getSuccessors(startState):\n frontierRoute = frontier + (frontier[1],)\n frontierQueue.push(frontierRoute)\n\n currentRoute = []\n\n #start BFS\n while not(frontierQueue.isEmpty()):\n currentStage = frontierQueue.pop()\n currentState = currentStage[0]\n currentRoute = currentStage[3] \n \n if problem.isGoalState(currentState): \n break\n \n if currentState not in exploredStates:\n for frontier in problem.getSuccessors(currentState):\n if frontier[0] not in exploredStates:\n nextRoute = currentRoute + \",\" + frontier[1]\n frontierRoute = frontier + (nextRoute,)\n frontierQueue.push(frontierRoute)\n \n exploredStates.append(currentState)\n return currentRoute.split(\",\")\n\n util.raiseNotDefined()", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\" \n startState = problem.getStartState()\n visitedNodes = []\n fringe = util.Queue()\n cost = 0 \n if (problem.isGoalState(startState) == True ):\n return [] # No Specific Actions\n else :\n fringe.push((startState , [] , cost ))\n while ( fringe.isEmpty() == False ):\n currentState , actions , cost = fringe.pop()\n \"\"\" get the latest node in the Queue \"\"\"\n \n if ( problem.isGoalState(currentState) == True ): \n \"\"\" check if the node is our goal or not \"\"\"\n #print(\"Final Path : \" + str(actions))\n return actions\n else:\n if ( (currentState in visitedNodes) == False ): \n \"\"\" check if this node is alreay visited or needs to be extended ? \"\"\"\n visitedNodes.append(currentState)\n currentNodeSuccessors = problem.getSuccessors(currentState)\n for node in currentNodeSuccessors :\n if(not node in visitedNodes):\n state , action , cost = node \n if ( not state in visitedNodes):\n fringe.push((state , actions + [action] , cost ))\n \n util.raiseNotDefined()", "def breadth_first_graph_search(problem):\n node = Node(problem.initial)\n if problem.goal_test(node.state):\n return node\n frontier = collections.deque([node])\n explored = set()\n while frontier:\n node = frontier.popleft()\n explored.add(node.state)\n for child in node.expand(problem):\n if child.state not in explored and child not in frontier:\n if problem.goal_test(child.state):\n return child\n frontier.append(child)\n return None", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringe = util.Queue()\n return GraphSearch(problem, 'bfs').search(fringe)", "def find_reachable_nodes_from(self, start_node, **kwargs):\r\n\t\treturn BreadthFirstTraverser(start_node, **kwargs)", "def test_breath_first_traversal(our_bsts):\n bft = []\n for i in our_bsts[0].breadth_first_traversal():\n bft.append(i)\n assert bft == our_bsts[3]", "def breadth_first_traversal(self, cur_node=None):\n if cur_node is None:\n cur_node = self.root\n if cur_node is None:\n return\n q = []\n q.append(cur_node)\n while len(q) > 0:\n cur_node = q.pop(0)\n yield cur_node.data\n if cur_node.left:\n q.append(cur_node.left)\n if cur_node.right:\n q.append(cur_node.right)", "def breadthFirstSearch(problem):\n explored = set()\n frontier = util.Queue()\n start_state = problem.getStartState()\n frontier.push(start_state)\n parent_hash = {}\n parent_hash[start_state] = (None, None)\n\n def get_path(state):\n path_stack = util.Stack()\n actions = []\n current = state\n while parent_hash[current][0] is not None:\n path_stack.push(parent_hash[current][0])\n current = parent_hash[current][1]\n while not path_stack.isEmpty():\n actions.append(path_stack.pop())\n\n return actions\n\n while not frontier.isEmpty():\n node = frontier.pop()\n if problem.isGoalState(node):\n return get_path(node)\n explored.add(node)\n for state, action, _ in problem.getSuccessors(node):\n if state not in explored and state not in frontier.list:\n parent_hash[state] = (action, node)\n frontier.push(state)", "def bft(self, starting_vertex):\n # create an empty queue and enqueue the starting vertex ID\n queue = Queue()\n queue.enqueue(starting_vertex)\n # create an emtpy Set to stoe the visited vertices\n visited = set()\n # while the queue is not empty ...\n while queue.size() > 0:\n # dequeue the first vertex\n vert = queue.dequeue()\n # if that vertex has not been visited..\n if vert not in visited:\n # mark it as visited\n visited.add(vert)\n print(vert)\n # then add all of its neighbors to the back of the queue\n for neighbor in self.vertices[vert]: # self.get_neighbors(vert)\n queue.enqueue(neighbor)", "def bft(self, starting_vertex):\n \n \"\"\"\n Plan:\n - Start at given index. Add that index to the Q.\n - While len(Q) is greater than 0:\n - Check if q[0] has children.\n - If so then make sure children have not been visited, then add those children to the Q.\n - If they have been visited, skip over the child and DO NOT add to Q # !! will result in infinite loop !!\n \"\"\"\n\n queue = Q()\n visited = []\n\n queue.add(starting_vertex)\n\n while len(queue):\n current = queue.first()\n children = self.vertices[current]\n \n if len(children) > 0:\n for child in children:\n if child not in visited:\n queue.add(child)\n else: continue\n\n print(current)\n visited.append(current)\n queue.pop()", "def breadth_first(self):\n self.breadth_first_list = []\n queque = Queue()\n queque.enqueue(self.root)\n while not queque.is_empty():\n ele = queque.dequeue()\n self.breadth_first_list.append(ele.value)\n if ele.left is not None:\n queque.enqueue(ele.left)\n if ele.right is not None:\n queque.enqueue(ele.right)\n return self.breadth_first_list", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n queue = util.Queue() # queue for searshing the graph\n visited = [] # keep track of visited nodes\n start =problem.getStartState() # The start node\n queue.push((start, [])) # the sart state and empty path list is pushed to the queue\n \n while queue:\n (vrtx, path) = queue.pop()\n if vrtx not in visited: \n if problem.isGoalState(vrtx):\n return [p[1] for p in path]\n visited.append(vrtx)\n for successor in problem.getSuccessors(vrtx) :\n queue.push((successor[0], path+[successor]))\n\n util.raiseNotDefined()", "def breadth_first_search(self, start_point=None):\n\n self.initialization_list_empty_nodes(self.labyrinth_statistics[\"number_of_nodes\"])\n \n #If start_point is None, we set it to the node where the agent is in the labyrinth\n if start_point is None:\n start_point = self.agent_node\n\n #Initial situation of the algorithm\n queue = [start_point]\n start_point.status = 1\n start_point.distance_from_start_point = 0\n \n #While the queue is not empty, we analyze the nodes in it to empty it step by step\n while(len(queue) > 0):\n node_to_analyze = queue[0]\n for node in node_to_analyze.connected_to:\n if node.status == 0:\n node.pere = node_to_analyze\n node.distance_from_start_point = queue[0].distance_from_start_point + 1\n node.status = 1\n queue.append(node)\n queue.pop(0)\n node_to_analyze.status = 2", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Queue\n q = Queue()\n mapper = {} #child_point : (parent_point, direction_to_child)\n q.push(problem.getStartState())\n mapper[problem.getStartState()] = None #root\n\n while (not q.isEmpty()):\n point = q.pop()\n\n if (problem.isGoalState(point)):\n c = point\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0]\n l.reverse()\n print l\n return l\n\n else:\n for child in problem.getSuccessors(point):\n if (child[0] not in mapper):\n q.push(child[0])\n mapper[child[0]] = (point, child[1])\n\n # util.raiseNotDefined()", "def bft(self, starting_vertex):\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n q = Queue() # create an empty Queue \n q.enqueue(starting_vertex) # set enqueue with the starting vertex\n\n while q.size() > 0: # loop if the size is greater than 0\n v = q.dequeue() # dequeue and store \n\n if v not in visited: # if v has not in the set \n visited.add(v) # add v to the set \n print(v) \n # Then add all of its neighbors to the back of the queue\n for neighbor in self.vertices[v]: # loop through neighbors \n q.enqueue(neighbor) # add each neighbor to the end of the que ", "def _breadth_first(self, queue, elements=True):\n while not queue.is_empty():\n node = queue.dequeue()\n if elements:\n yield node.element()\n else:\n yield node\n for child in self.children(node):\n queue.enqueue(child)", "def traverse_breadth_first(self, src: int = 0, graph: GraphInterface = None):\n if not isinstance(graph, DiGraph) or graph is None or self._graph.get_node(src) is None:\n return\n curr = graph.get_node(src)\n\n q = Queue()\n\n q.put(curr)\n curr.tag += 1\n\n while not q.empty():\n\n curr = q.get()\n out_edges = graph.all_out_edges_of_node(curr.key)\n\n for i in out_edges:\n out_edge = out_edges[i]\n neighbor = graph.get_node(out_edge.dest) # Get curr's neighbor\n if neighbor.tag == curr.tag - 1:\n neighbor.tag += 1 # If un-tagged -> tag it.\n q.put(neighbor) # and enqueue it", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # BFS is identical to DFS, save for the data structure used to store the frontier\n\n # Frontier stored in a Queue\n frontier = util.Queue()\n\n # Visited states stored in a list\n visitedStates = []\n\n # Format of each element: (current coordinates, [path taken to get there])\n frontier.push((problem.getStartState(), []))\n\n # while there are still states to explore\n while not frontier.isEmpty():\n\n # store the current state and path in separate variables\n currentState, pathTaken = frontier.pop()\n\n # for skipping states that have already been visited\n if currentState in visitedStates:\n continue\n\n # for returning the correct path to the goal state upon discovering it\n if problem.isGoalState(currentState):\n return pathTaken\n\n # count the current state as \"visited\"\n visitedStates.append(currentState)\n\n # for each successor state, check whether they have already been visited. if not, add their coordinates to the frontier, and append their respective direction to the path list\n for coordinates, direction, cost in problem.getSuccessors(currentState):\n\n if coordinates not in visitedStates:\n\n frontier.push((coordinates, pathTaken + [direction]))\n\n util.raiseNotDefined()", "def bfs(graph,start):\n #keeps track of nodes to be visited\n queue = []\n #keeps track of nodes already visited\n explored = []\n queue.append(start)\n while queue:\n #remove first node from queue\n curr_node = queue.pop(0)\n #check if node is visited\n if curr_node not in explored:\n explored.append(curr_node)\n adjacent_nodes = graph[curr_node]\n #add adjacent nodes to queue\n for i in adjacent_nodes:\n queue.append(i)\n return explored", "def bfs(array, neighbors, start, similar):\n match = get_item(array, start)\n block = {start}\n visit = deque(block)\n child = deque.popleft\n while visit:\n node = child(visit)\n for offset in neighbors:\n index = get_next(node, offset)\n if index not in block:\n block.add(index)\n if is_valid(array, index):\n value = get_item(array, index)\n if similar(value, match):\n visit.append(index)\n yield node", "def _forest_nodes(self):\n\n self.arbor._grow_tree(self)\n root = self.root\n for link in root._links:\n yield self.arbor._generate_tree_node(self.root, link)", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # initialize frontier using initial state of problem\n current_state = problem.getStartState()\n frontier = util.Queue()\n frontier.push(current_state)\n\n # initialize explored set to be empty\n explored_set = []\n\n # a dictionary to save how to get to certain states from initial state\n actions_list = {current_state:[]}\n\n # loop while we still have unexplored nodes\n while not frontier.isEmpty():\n\n # choose a leaf node and remove it from frontier\n leaf_node = frontier.pop()\n\n # add the node to explored set\n explored_set.append(leaf_node)\n\n # expand the chosen node\n # and add to the frontier if not in frontier and explored set\n for successor in problem.getSuccessors(leaf_node):\n child, action, _ = successor\n if child not in explored_set and child not in frontier.list:\n # return the solution if it is the goal state\n if problem.isGoalState(child):\n return actions_list[leaf_node] + [action]\n frontier.push(child)\n actions_list[child] = actions_list[leaf_node] + [action]\n else:\n # search through all but still can't find a solution -> failed!\n return 'failure'", "def bfs(maze):\n # TODO: Write your code here\n frontier = Queue()\n visited = []\n path = []\n ret = []\n objectives = maze.getObjectives()\n start = State(maze.getStart()[0], maze.getStart()[1], objectives.copy())\n frontier.put(start)\n explored = []\n \n\n while not frontier.empty(): # while frontier queue is not empty\n\n currentState = frontier.get()\n currentCell = currentState.cell()\n objectivesLeft = currentState.objectives()\n\n if objectivesLeft.count(currentCell) != 0:\n\n objectivesLeft.remove(currentCell)\n \n # all objectives found, initialise backtrace and exit loop\n # if len(objectivesLeft) == 0:\n path.append(currentState)\n ret.append(currentCell)\n visited.append(currentState)\n break\n\n # current cell is not objective nor visited\n if visited.count(currentState) == 0:\n explored.append(currentCell)\n neighbors = maze.getNeighbors(currentCell[0], currentCell[1])\n\n for i in neighbors:\n\n neighbor = State(i[0], i[1], objectivesLeft)\n\n # if neighbor is not visited, add it to the frontier\n if visited.count(neighbor) == 0:\n neighbor.setParent(currentState)\n frontier.put(neighbor)\n\n visited.append(currentState)\n\n #backtrace\n while path[0] != start:\n\n currentState = path[0]\n path.insert(0, currentState.parent())\n ret.insert(0, currentState.parent().cell())\n\n return ret", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n #the logic is same as for DFS just that i made use of a Queue data structure\n #Here the queue acts as a FIFO queue\n neighbourNodes = util.Queue()\n moves = []\n neighbourNodes.push((problem.getStartState(),moves))\n seenNodes = set()\n\n while not neighbourNodes.isEmpty():\n poppedNodeState, poppedNodeAction= neighbourNodes.pop()\n if(poppedNodeState in seenNodes):\n continue\n if problem.isGoalState(poppedNodeState):\n return poppedNodeAction\n seenNodes.add(poppedNodeState)\n for state, action, cost in problem.getSuccessors(poppedNodeState):\n if(state in seenNodes):\n continue\n neighbourNodes.push((state, poppedNodeAction+[action]))\n return moves\n #util.raiseNotDefined()", "def BFS(self, start_vertex):\n yield from self._search(start_vertex, kind='BFS')", "def bfs(graph, start_vertex):\n\n queue = deque()\n queue.appendleft(start_vertex)\n explored_vertices = [start_vertex]\n\n while len(queue) != 0:\n vertex = queue.pop()\n neighbours = graph.neighbours(vertex)\n for neighbour in neighbours:\n if neighbour not in explored_vertices:\n explored_vertices.append(neighbour)\n queue.appendleft(neighbour)\n\n return explored_vertices", "def traverse_breadth_first(self, fn):\n queue = deque([self.root])\n while len(queue) > 0:\n node = queue.popleft()\n fn(node)\n queue.extend(node.children)", "def bfs(graph, start_node):\n start_node.distance = 0\n start.set_predecessor(None)\n queue = list()\n queue.append(start_node)\n while (len(queue) > 0):\n current_vertex = queue.pop()\n current_vertex.setState = \"visiting\"\n for vertex in current_vertex.links():\n if (vertex.getState == \"unvisited\"):\n vertex.setState == \"tobevisited\"\n vertex.set_predecessor(current_vertex)\n vertex.distance = current_vertex.distance + 1\n queue.append(vertex)\n current_vertex.setState = \"visited\"", "def customBreadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n i = 0\n dirList = []\n closed = util.Counter()\n fringe = util.Queue()\n state = problem.getStartState()\n followPac = []\n closed[hash(state)] = 1\n\n for triple in problem.getSuccessors(state):\n fringe.push((triple, dirList.copy()))\n while not fringe.isEmpty():\n i += 1\n state = fringe.pop()\n succ = state[0][0]\n act = state[0][1]\n cost = state[0][2]\n dirList = state[1]\n dirList.append(act)\n \n if problem.isGoalState(succ):\n return dirList\n if problem.isPacman(succ):\n followPac.append(dirList.copy())\n if closed[hash(succ)] == 0:\n closed[hash(succ)] = 1\n for triple in problem.getSuccessors(succ):\n fringe.push((triple, dirList.copy()))\n if not followPac:\n return\n followPac = max(followPac, key=lambda x: len(x))\n last = followPac.pop()\n followPac.append(last)\n followPac.append('place')\n followPac.append(reverse[last])\n return followPac.copy()", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def bft(self, starting_vertex):\n \"\"\"\n Loop over every vertex in the queue. Print each vertex\n as we come to it. Find all the edges of the current vertex\n and add them to the queue and the cache.\n \"\"\" \n queue = [starting_vertex]\n isQueued = {starting_vertex}\n for vertex in queue:\n print(vertex)\n for edge in self.get_neighbors(vertex):\n if edge not in queue:\n queue.append(edge)\n isQueued.add(edge)", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # fringe priority queue\n fringe = util.PriorityQueue()\n fringe.push([problem.getStartState()],1) # fringe will have (priority, order, [s0,s1,..])\n\n # closed set\n closed = []\n\n i = 0\n while not fringe.isEmpty():\n\n # get highest priority path for expansion e.g. [s0,s2,s4]\n path_exp = fringe.pop()\n\n # take last node in path e.g. s4\n node_exp = path_exp[-1]\n\n # check goal state\n if problem.isGoalState(node_exp): # check if goal\n actions = actions_for_path(problem,path_exp)\n #import pdb; pdb.set_trace()\n return actions\n\n # add expanded node into closed set e.g. [s0,s1,s2]\n if node_exp not in closed:\n closed.append(node_exp)\n else:\n # if it's in the closed set, don't expand\n continue\n\n # get sucessors to expand fringe\n successors = problem.getSuccessors(node_exp)\n for successor in successors:\n # unpack states, actions\n ss,aa,_ = successor\n if ss not in closed:\n path = path_exp+[ss]\n # expand fringe by adding candidate paths, prioritize by len of path\n fringe.push(path,len(path))\n\n #i+=1\n if i==1000:\n import pdb; pdb.set_trace()\n\n util.raiseNotDefined()", "def breadth_first_search(problem):\n fringe = util.Queue()\n return general_search(problem, fringe)", "def breadth_first_search(self, vertex):\n\n visited = [False] * self.V\n queue = list()\n # Appending the vertex to an empty queue\n queue.append(vertex)\n\n # Marking the Visiting Vertex as True\n visited[vertex] = True\n print(\"\\n\\nBreadth First Search: \", end=\" \")\n while queue:\n # Popping the First Element in queue\n s = queue.pop(0)\n print(s, end=\" \")\n\n # Visiting the adjacent vertices of queue\n # And Validating if the vertex is visited\n for i in self.adj_list[s]:\n if not visited[i]:\n queue.append(i)\n visited[i] = True", "def breadth_traverse(self):\n\n output = []\n q = Queue()\n q.enqueue(self.root)\n while q.peek():\n curr = q.dequeue()\n output.append(curr.value)\n if curr.l_child:\n q.enqueue(curr.l_child)\n if curr.r_child:\n q.enqueue(curr.r_child)\n return output", "def breadth_fs(start: Vector2D, goal: Vector2D, grid: Scene, *args) -> (list, list):\n frontier = Queue() # nodes to be explored\n prev_node = dict() # maps n to node that precedes it in cheapest currently-known path from start to n\n explored = [] # keeps track of previously explored nodes, to be drawn later\n\n prev_node[start] = None\n frontier.put(start)\n\n while not frontier.empty():\n current = frontier.get()\n grid.set_cell(current, Cell(val = CellType.searched))\n \n for neighbor in grid.get_unexplored_neighbors(current):\n prev_node[neighbor] = current\n frontier.put(neighbor)\n explored.append(neighbor)\n if neighbor == goal:\n return (reconstruct_path(goal, prev_node), explored)\n\n grid.set_cell(neighbor, Cell(val = CellType.searched))\n\n # If frontier empty but goal was never reached, no solution was found\n return ([], explored)", "def __iter__(self):\n root = self.root()\n queue = Queue()\n queue.enqueue(root)\n return self._breadth_first(queue)", "def breadthFirstSearch(problem):\n visitedStates = set([])\n startState = problem.getStartState()\n expandedStates = set([])\n\n fringe = util.Queue()\n fringe.push((startState, []))\n visitedStates.add(startState)\n\n while not fringe.isEmpty():\n state, actions = fringe.pop()\n\n if(problem.isGoalState(state)):\n return actions\n\n expandedStates.add(state)\n for nextState, action, cost in problem.getSuccessors(state):\n if(nextState not in visitedStates):\n visitedStates.add(nextState)\n fringe.push((nextState, actions + [action]))\n\n return []", "def dfs_edges_generator(graph, source, reverse=...):\n ...", "def breadth_first_traversal(self, start_node, visitor_function=None, max_depth=None):\n self._reset_traversal_state()\n\n if isinstance(start_node, str):\n start_node = self.nodes[start_node]\n\n if not isinstance(start_node, ProcessNode):\n raise TypeError('Expect start_node to either be a string or a ProcessNode. Got [{}] instead'.format(\n str(type(start_node))))\n\n start_node.discovery_time = 1\n queue = collections.deque()\n queue.appendleft(start_node)\n\n while len(queue) > 0:\n node = queue.pop()\n assert NodeColor.WHITE == node.color\n\n if node.predecessor is not None:\n node.discovery_time = node.predecessor.discovery_time + 1\n\n self._visit_enter(node, visitor_function)\n\n node.color = NodeColor.GRAY\n\n if max_depth is None or node.discovery_time + 1 < max_depth:\n for descendant in self.edges[node]:\n if NodeColor.WHITE == descendant:\n descendant.predecessor = node\n queue.appendleft(descendant)\n\n node.finishing_time = self.time\n node.color = NodeColor.BLACK\n\n self._visit_exit(node, visitor_function)", "def breadth_first_search(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = []\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.append(state)\r\n\r\n while (True):\r\n state = fringe[0]\r\n del fringe[0]\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n #states = problem.get_successors(state)\r\n for stat in states:\r\n if stat[0] not in path and stat[0] not in fringe:\r\n fringe.append(stat[0])\r\n\r\n while (True):\r\n if state == problem.get_start_state():\r\n break\r\n for key, val in acts.items():\r\n for va in val:\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n\r\n final.reverse()\r\n\r\n return final", "def breadthFirstSearch(problem):\n from game import Directions\n North = Directions.NORTH\n South = Directions.SOUTH\n East = Directions.EAST\n West = Directions.WEST \n \n pathDict = {}\n visited = set()\n #visited start\n visited.add(problem.getStartState())\n #initial successors\n successor = problem.getSuccessors(problem.getStartState())\n for initSucc in successor:\n pathDict[initSucc[0]] = [initSucc[1]]\n #loop\n while (1):\n #if fringe = null, return failure\n if (len(successor) == 0):\n print \"Fringe is empty\"\n return util.raiseNotDefined()\n #(v, path) = fringe.pop\n succLocation = successor[0][0]\n succDirection = successor[0][1]\n del successor[0]\n #if isGoal = true, return path\n if problem.isGoalState(succLocation):\n return pathDict[succLocation]\n #if visited = false\n if succLocation not in visited:\n #visited = true\n visited.add(succLocation)\n #L = expand(v,path)\n tempSuccList = problem.getSuccessors(succLocation)\n #Fringe <- L\n for succ in tempSuccList:\n repeat = False\n for s in successor:\n if (s[0] == succ[0]):\n repeat = True\n if (repeat == False):\n successor.append(succ)\n pathDict[succ[0]] = []\n pathDict[succ[0]].extend(pathDict[succLocation])\n pathDict[succ[0]].append(succ[1])", "def breadth_first_traversal(self, start_val):\n traversed = []\n visited, queue = set(), []\n queue.append(start_val)\n while queue:\n current = queue.pop(0)\n if current in visited:\n continue\n visited.add(current)\n queue.extend(self._g[current])\n traversed.append(current)\n return traversed", "def bfs(graph, initial_node, dest_node):\n return queue_search(graph, initial_node, dest_node, queue.Queue())", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n visited=[]\n \n node=dict()\n start=problem.getStartState()\n node['parent']=None\n node['direction']=None\n node['state']=start\n \n \n qu.push(node)\n lis.append(node)\n \n print qu.list\n while qu.isEmpty()!=True:\n node=qu.pop()\n pos=node['state']\n visited.append(pos)\n print visited\n if problem.isGoalState(pos):\n print \"found\"\n return getPath(problem,node)\n #break\n suc=problem.getSuccessors(pos)\n if suc ==None:\n continue \n \n print suc\n for step in suc:\n #if step not in dic :\n if step[0] not in visited:\n childnode={}\n childnode['parent']=pos\n childnode['direction']=step[1]\n childnode['state']=step[0]\n qu.push(childnode)\n lis.append(childnode)\n \n\n #util.raiseNotDefined()", "def __iter__(self):\n # set current node to front node\n current = self.front\n # while current != None\n while current:\n # send out current node's data\n yield current.data\n # move to next node\n current = current.prior", "def breadthFirstSearch(initialState, finalState):\n\n def exploreNext(neighbor, move):\n \"\"\"Finds out if the neighbor is withinf the boundaries and explore it.\n `explored` is the set used in the BFS function.\n `stateQueue` is the queue inside the BFS function.\n `currentState` is each visited node inside the loop of the BFS function.\n\n \"\"\"\n if (neighbor != None and tuple(neighbor) not in explored):\n nextState = State(neighbor)\n nextState.path = currentState.path.copy()\n nextState.path.append(move)\n stateQueue.append(nextState)\n\n stateQueue = deque([]) # List of States\n explored = set() # Set of tuples of each visited state of the puzzle\n sizeBytesCounter = 0\n\n # Init queue\n stateQueue.append(State(initialState))\n\n # while queue is not empty\n while stateQueue:\n currentState = stateQueue.popleft()\n sizeBytesCounter += sys.getsizeof(currentState)\n\n # Add an unmodified list to the set, a tuple\n explored.add(tuple(currentState.puzzle))\n\n if finalState == currentState.puzzle:\n return currentState, explored, sizeBytesCounter\n \n # Create a node of the current state\n currentNode = Node(currentState.puzzle)\n\n # Iterate over posible paths\n exploreNext(*currentNode.up())\n exploreNext(*currentNode.down())\n exploreNext(*currentNode.left())\n exploreNext(*currentNode.right())\n \n return None", "def breadth_first_traversal(self, callback: Callable[[Tree], None]) -> None:\n nodes_to_visit = []\n nodes_to_visit.append(self)\n\n while nodes_to_visit:\n temp_node = nodes_to_visit.pop()\n callback(temp_node)\n\n # Enqueuing child nodes in order (left to right) in order to traverse\n # breadth first, from left to right. More specifically, the nodes will be visited \n # in ascending order based on The visual example provided below.\n # tree: \n # 1\n # / \\\n # 2 3\n # / \\ / \\\n # 4 5 6 7\n for node in temp_node.children:\n nodes_to_visit = [node] + nodes_to_visit", "def bfs(self, starting_vertex, destination_vertex):\n \"\"\" FIFO ir LILO\n Create a queue\n Enqueue PATH to starting Vertex\n Create a set top store visited vertices\n While the queue is NOT empty: e.g. > 0\n Dequeue the first PATH Vertex\n Get Vertex from END of PATH\n Check IF NOT visited:\n Mark as visited\n check if vertex is destination_vertex\n If TRUE, return path\n enqueue PATH to ALL of neighbors \n make COPY of current path\n add neighbor to path copy\n enqueue copy \n \"\"\"\n\n q = Queue() # Create a queue\n q.enqueue([starting_vertex]) # Enqueue starting at vertex into Queue (list)\n visited = set() # Create a set to store visited \n \n while q.size() > 0: # While the queue is NOT empty: \n path = q.dequeue() # Dequeue the first PATH Vertices\n v = path[-1] # Get Vertex from END of PATH\n\n if v not in visited: # Check IF NOT visited:\n visited.add(v) # Mark as visited\n\n if v == destination_vertex: # check if vertex is destination_vertex\n return path # If TRUE, return path, DONE\n\n for n in self.get_neighbors(v): # enqueue PATH to ALL of neighbors\n path_c = path [:] # make COPY of current path\n path_c.append(n) # add neighbor to path copy\n q.enqueue(path_c) # enqueue copy", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n st = util.Queue()\n strt = problem.getStartState()\n st.push(strt) \n visited = [strt]\n came_from ={}\n came_from [strt] =(None,None)\n\n while not st.isEmpty():\n state = st.pop()\n if problem.isGoalState(state) :\n break\n nodes = problem.getSuccessors(state)\n for (successor,action,cost) in nodes:\n if successor not in visited :\n st.push(successor)\n came_from[successor] = (state , action)\n visited.append(successor) \n \n # exit while\n actions = []\n while(state != strt) :\n (parent,action) =came_from[state]\n state = parent\n actions.append(action)\n actions.reverse()\n return actions", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # Get the start node\n start_state = problem.getStartState()\n print(start_state)\n\n # Define a stack\n plan_stack = util.Queue()\n start_plan = [start_state] # node, cost\n plan_stack.push(start_plan)\n\n # Visited nodes\n visited_nodes = set(start_state)\n\n goal_found = False\n\n while not goal_found:\n # Get the plan from the stack\n plan_to_expand = plan_stack.pop()\n node_to_exp = plan_to_expand[-1]\n all_nxt_nodes = problem.getSuccessors(node_to_exp)\n\n # Traverse through all the next nodes\n for nxt_node in all_nxt_nodes:\n nxt_pos = nxt_node[0]\n\n if nxt_pos in visited_nodes: # Check if node is already visited\n continue\n\n visited_nodes.add(nxt_pos) # Add the node to visited nodes\n nxt_plan = plan_to_expand + [nxt_pos] # add node to the plan\n plan_stack.push(nxt_plan) # push the plan into the stack\n goal_found = problem.isGoalState(nxt_pos) # Check if goal is achieved\n if goal_found:\n break\n \n \n print(goal_found)\n print(nxt_plan)\n\n moves = []\n # Convert plan to moves\n for i in range(len(nxt_plan) - 1):\n for nxt_node in problem.getSuccessors(nxt_plan[i]):\n nxt_pos = nxt_node[0]\n nxt_mv = nxt_node[1]\n if nxt_pos == nxt_plan[i+1]:\n moves.append(nxt_mv)\n break\n \n return moves\n\n \n\n # Calculate the minimum plan cost \n #min_val = float(\"inf\")\n #for one_plan in plan_stack:\n # plan_cost = one_plan[1]\n # if plan_cost < min_val:\n # min_val = plan_cost\n\n ## Expand the nodes with minimum plan cost\n #for one_plan in plan_stack:\n # plan_cost = one_plan[1]\n # if plan_cost == min_val:\n # plan_step = one_plan[0] \n # # Expand the last node of plan\n # last_node = plan_step[end]\n # for nxt_node in problem.getSuccessors(last_node):\n\n\n\n util.raiseNotDefined()", "def breadthFirstSearch(problem):\n stack = Queue()\n\n visited = []\n parent_dict = dict()\n start_state = problem.getStartState()\n stack.push(start_state)\n actions_dict = dict()\n final_actions = []\n discovered = [problem.getStartState]\n\n if problem.isGoalState(problem.getStartState()):\n return []\n\n while not stack.isEmpty():\n current_state = stack.pop()\n visited.append(current_state)\n\n if problem.isGoalState(current_state):\n break\n\n successors = problem.getSuccessors(current_state)\n for s in successors:\n if s[0] not in visited and s[0] not in discovered:\n stack.push(s[0])\n parent_dict[s[0]] = current_state\n actions_dict[(current_state, s[0])] = s[1]\n discovered.append(s[0])\n\n while current_state is not start_state:\n parent = parent_dict[current_state]\n final_actions.append(actions_dict[parent, current_state])\n current_state = parent\n\n final_actions.reverse()\n return final_actions", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n frontier = util.Queue()\n start = problem.getStartState()\n record = [] # gonna use dictionary to keep track of movements\n frontier.push(start)\n explored = [start]\n\n location = 0 # to remember which successor part im accessing\n action = 1\n\n while not frontier.isEmpty():\n current_location = frontier.pop()\n print(current_location)\n\n if problem.isGoalState(current_location):\n break\n\n\n for each in problem.getSuccessors(current_location):\n if each[location] not in explored:\n frontier.push(each[location])\n record.append({'From': current_location, 'To': each[location], 'By': each[action]})\n explored.append(each[location])\n\n while not problem.isGoalState(record[-1]['To']): # loop removes last couple of movements which don't lead to goal\n record.remove(record[-1])\n\n x = len(record)\n while x - 1 != 0: # loop clears out actions that dont come from previous position\n if record[x - 1]['From'] != record[x - 2]['To']: # starts from goal and works backwards\n record.remove(record[x - 2])\n x = len(record)\n else:\n x -= 1\n\n return [path['By'] for path in record]\n\n return []", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n result = []\n qu = util.Queue()\n visited = set([])\n current = [problem.getStartState()]\n\n qu.push(current)\n\n while not qu.isEmpty():\n current = qu.pop()\n visited.add(current[-1])\n\n if problem.isGoalState(current[-1]):\n result = current\n break\n\n for each in problem.getSuccessors(current[-1]):\n if each[0] not in visited:\n temp = list(current)\n temp.append(each[0])\n qu.push(temp)\n visited.add(each[0])\n\n path = get_path(result)\n return path\n util.raiseNotDefined()", "def bfs(self, startNode):\n queue = Queue()\n\n # Mark all the nodes as not visited\n visited = {}\n for node in self.getNodes():\n visited[node] = False\n\n queue.enqueue(startNode)\n\n while not queue.isEmpty():\n s = queue.dequeue()\n visited[s] = True\n print s,\n\n # enqueue all the adjacent vertices to s\n # if they've not already been visited\n\n for adjacentNode in self.getAdjacent(s):\n if visited[adjacentNode] is False:\n queue.enqueue(adjacentNode)\n visited[adjacentNode] = True", "def bft(self, starting_vertex):\n # create an empty queueueueueueueueueue class\n to_visit = Queue()\n # create an empty set\n visited = set()\n # populating the queueueueueueue with our starting vertex\n to_visit.enqueue(starting_vertex)\n\n # while loop to run while the queueueueueue is not empty\n while to_visit.size() > 0:\n v = to_visit.dequeue()\n # checking to see if the dequeueueued vertex is in our set or not\n if v not in visited:\n # if it is then it gets printed out\n print(v)\n # it then gets added to the visited set\n visited.add(v)\n # now we are checking the neighbors of the vertex and adding them\n # to the queueueueueueue\n for n in self.vertices[v]:\n to_visit.enqueue(n)", "def bfs(self, starting_vertex, destination_vertex): # great if you know to result is somewhere close to the root/start\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n q = Queue() # create an empty Queue\n q.enqueue([starting_vertex]) # set the starting_vertex with enqueue \n\n while q.size() > 0:\n path = q.dequeue() # dequeue and store first path\n v = path[-1] # store the vertex from the end of path \n\n if v == destination_vertex: # if v is equal to the destination_vertex\n return path # return the path \n\n if v not in visited: # if v has not been visited yet \n visited.add(v) # add v to the vistied set \n\n for neighbor in self.vertices[v]: # loop through the neighbors \n path_copy = list(path) # make a copy of the path \n path_copy.append(neighbor) # append each neighbor to the back of the path copy \n q.enqueue(path_copy) # enqueue the path copy to the queue ", "def nodes(self):\n root = self.root()\n queue = Queue()\n queue.enqueue(root)\n return self._breadth_first(queue, elements=False)", "def bfs(maze):\n # TODO: Write your code here.\n start = maze.getStart() \n frontier = [] \n path = [] \n dim = maze.getDimensions()\n objs = maze.getObjectives()\n rows = dim[0]\n cols = dim[1]\n visited = {} # visited as a dictionary\n for i in range (0, rows):\n for j in range (0, cols):\n visited[(i,j)] = (-1, -1)\n frontier.append(start)\n visited[(i,j)] = (-2, -2)\n while frontier:\n pt = frontier.pop(0)\n if maze.isObjective(pt[0], pt[1]) == True:\n break\n else:\n list_of_neighbors = maze.getNeighbors(pt[0], pt[1])\n for i in list_of_neighbors:\n if visited.get(i) == (-1, -1): \n frontier.append(i)\n visited[i] = pt \n while pt != start:\n path.append(pt)\n pt = visited.get(pt)\n path.append(start)\n path.reverse()\n return path", "def BFT(tree):\n queue = [tree]\n bft_nodelist = []\n while queue:\n node = queue.pop(0)\n bft_nodelist.append(node)\n queue += node.nodelist\n return bft_nodelist", "def bfs(initial_state, dimension=3):\n\t\n\treturn search(initial_state, Frontier(Queue), dimension)" ]
[ "0.71898806", "0.7085778", "0.70385784", "0.6970572", "0.6837096", "0.67863566", "0.6713473", "0.66816336", "0.6657518", "0.6647682", "0.66333216", "0.6629434", "0.6573334", "0.6550337", "0.6511887", "0.65066606", "0.64886534", "0.6471162", "0.64685374", "0.64553446", "0.64019185", "0.6400742", "0.6380698", "0.6369547", "0.6360887", "0.6345833", "0.629878", "0.62940055", "0.62487954", "0.62337357", "0.6223272", "0.62134916", "0.62132764", "0.62080026", "0.62054443", "0.6195681", "0.6181415", "0.61691856", "0.61566496", "0.6148432", "0.61405694", "0.6139164", "0.6135234", "0.6131309", "0.61180466", "0.61097455", "0.60986763", "0.60903895", "0.6086687", "0.6069292", "0.60677314", "0.60627526", "0.6048957", "0.60410434", "0.60402", "0.60331035", "0.60319376", "0.6018219", "0.601183", "0.6006802", "0.60006964", "0.5988543", "0.5985189", "0.59737533", "0.59717053", "0.5961526", "0.59584385", "0.59584385", "0.59584385", "0.595702", "0.59509", "0.59497684", "0.5932302", "0.593093", "0.5926221", "0.5921812", "0.59168535", "0.59068984", "0.5893128", "0.5887246", "0.588711", "0.5876872", "0.58761007", "0.58697265", "0.5864407", "0.5848589", "0.583714", "0.5831558", "0.58296657", "0.58254874", "0.58244026", "0.58115894", "0.5810047", "0.5807841", "0.5806248", "0.5797594", "0.5796356", "0.57778454", "0.575723", "0.575411" ]
0.74444324
0
Edges frontiers generator using breadthfirst search.
def bfs_edges_generator(graph, source, reverse=...): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfs_nodes_generator(graph, source, reverse=...):\n ...", "def breadthfirst(self):\n import os\n cwd = os.getcwd()\n os.chdir('/Users/raj/Documents/algorithms_in_python/linked_lists/')\n from linked_collections import LinkedQueue\n os.chdir(cwd) # change to cwd\n if not self.is_empty():\n lq = LinkedQueue()\n lq.enqueue(self.root())\n while not lq.is_empty():\n p = lq.dequeue()\n yield p\n for c in self.children(p):\n lq.enqueue(c)", "def breadthfirst(self):\n if not self.is_empty():\n fringe = LinkedQueue() # known positions not yet yielded\n fringe.enqueue(self.root()) # starting with the root\n while not fringe.is_empty():\n p = fringe.dequeue() # remove from front of the queue\n yield p # report this position\n for c in self.children(p):\n fringe.enqueue(c) # add children to back of queue", "def bft(self, starting_vertex):\n \"\"\"\n Loop over every vertex in the queue. Print each vertex\n as we come to it. Find all the edges of the current vertex\n and add them to the queue and the cache.\n \"\"\" \n queue = [starting_vertex]\n isQueued = {starting_vertex}\n for vertex in queue:\n print(vertex)\n for edge in self.get_neighbors(vertex):\n if edge not in queue:\n queue.append(edge)\n isQueued.add(edge)", "def bfs(graph, start_vertex):\n\n queue = deque()\n queue.appendleft(start_vertex)\n explored_vertices = [start_vertex]\n\n while len(queue) != 0:\n vertex = queue.pop()\n neighbours = graph.neighbours(vertex)\n for neighbour in neighbours:\n if neighbour not in explored_vertices:\n explored_vertices.append(neighbour)\n queue.appendleft(neighbour)\n\n return explored_vertices", "def dfs_edges_generator(graph, source, reverse=...):\n ...", "def bft(self, starting_vertex):\n # create an empty queue and enqueue the starting vertex ID\n queue = Queue()\n queue.enqueue(starting_vertex)\n # create an emtpy Set to stoe the visited vertices\n visited = set()\n # while the queue is not empty ...\n while queue.size() > 0:\n # dequeue the first vertex\n vert = queue.dequeue()\n # if that vertex has not been visited..\n if vert not in visited:\n # mark it as visited\n visited.add(vert)\n print(vert)\n # then add all of its neighbors to the back of the queue\n for neighbor in self.vertices[vert]: # self.get_neighbors(vert)\n queue.enqueue(neighbor)", "def bft(self, starting_vertex):\n \"\"\" FIFO is LILO\n Create a queue\n Enqueue starting Vertex\n Create a set top store visited\n \n While the queue is NOT empty: e.g. > 0\n Dequeue the first Vertex\n Check IF NOT visited:\n Mark as visited\n enqueue ALL neighbors found if not already in queue\n \"\"\"\n # FIFO \n q = Queue() # create a queue ( e.g. empty [] )\n q.enqueue(starting_vertex) # Enqeue starting at vertex\n visited = set() # Create a set to store visited\n\n while q.size() > 0: # While the queue is NOT empty:\n # while q: # ERROR: Will add None into v, breaks _get_neighbors\n v = q.dequeue() # dequeue the first vertex\n\n if v not in visited: # Check IF NOT visited: \n print(v)\n visited.add(v) # if NOT visited, add to visited set\n\n for n in self.get_neighbors(v): # loop through all neighbors of v \n # if n not in q.queue: # !!! OPTIMIZATION !!!\n # q.enqueue(n) # enqueue ALL neighbors found (ex. add to end of queue)\n\n q.enqueue(n) # enqueue ALL neighbors found (ex. add to end of queue)", "def breadth_first_traversal(self, start):\n visited = []\n visited.append(start)\n start_visited = visited\n while True:\n temp = []\n for node_ in start_visited:\n for i in self.neighbors(node_):\n if i not in visited:\n visited.append(i)\n temp.append(i)\n start_visited = temp\n if not temp:\n break\n return visited", "def bfs(self):\r\n Q = [self.head()]\r\n visited = []\r\n while Q != []:\r\n cur = Q[0]\r\n visited.append(cur)\r\n Q = Q[1:]\r\n Q.extend([ch for ch in self.get_deps(cur.get_field('index'))])\r\n for x in reversed(visited):\r\n yield x", "def bft(self, starting_vertex):\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n q = Queue() # create an empty Queue \n q.enqueue(starting_vertex) # set enqueue with the starting vertex\n\n while q.size() > 0: # loop if the size is greater than 0\n v = q.dequeue() # dequeue and store \n\n if v not in visited: # if v has not in the set \n visited.add(v) # add v to the set \n print(v) \n # Then add all of its neighbors to the back of the queue\n for neighbor in self.vertices[v]: # loop through neighbors \n q.enqueue(neighbor) # add each neighbor to the end of the que ", "def _bfs_nodes(cls, graph, source, size, **kwargs):\n if size < 1:\n return iter(())\n\n return itertools.chain(\n (source,),\n itertools.islice((v for u, v in nx.bfs_edges(graph, source)), size-1)\n )", "def bfs_iterative(graph,start):\n\tvisited = set()\n\twatched = set()\n\tnodes_queue = [start] # List that helps as queue\n\twatched.add(start)\n\t\n\twhile nodes_queue:\n\t\tcurrent_node = nodes_queue.pop(0)\n\n\t\tprint(\"visiting\",current_node)\n\t\tvisited.add(current_node)\n\t\t\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append(adjacent_node)\n\t\t\t\t#path.add(adjacent_node)", "def _breadthfirst(self,root, action=lambda nodes: print(nodes)):\n nodes = []\n breadth = Queue()\n visited = []\n\n breadth.enqueue(root)\n visited.append(root)\n\n while breadth.front:\n front = breadth.dequeue()\n nodes.append(front.value)\n\n for child in self._adjacency_list.get(front.value):\n if not child.start_vertex in visited:\n visited.append(child.start_vertex)\n breadth.enqueue(child.start_vertex) \n\n return nodes", "def bft(self, starting_vertex):\n # Create a q and enqueue starting vertex\n qq = Queue()\n qq.enqueue([starting_vertex])\n # Create a set of traversed vertices\n visited = set()\n # eldest = []\n depth_counter = {} \n starter = 0 \n # visited = []\n # While queue is not empty:\n while qq.size() > 0:\n # dequeue/pop the first vertex\n path = qq.dequeue()\n # if not visited\n # print(visited)\n starter += 1\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n # print(path[-1])\n depth_counter[starter] = path[-1]\n # mark as visited\n visited.add(path[-1])\n # visited.append(path[-1])\n # enqueue all neightbors\n \n if not self.get_neighbors(path[-1]):\n \n if starting_vertex == path[-1]:\n return -1\n else:\n # print(\"eldest ancestor:\",path[-1])\n depth_counter[starter] = path[-1]\n # print(depth_counter)\n # eldest.append(path[-1])\n else:\n # starter += 1\n for next_vert in self.get_neighbors(path[-1]): \n new_path = list(path)\n new_path.append(next_vert)\n qq.enqueue(new_path)\n\n\n return depth_counter[starter]", "def edges(self):\n\t\tleftback = self.center + self.left*self.wr - self.forward*self.hr\n\t\tleftfront = self.center + self.left*self.wr + self.forward*self.hr\n\t\trightfront = self.center - self.left*self.wr + self.forward*self.hr\n\t\trightback = self.center - self.left*self.wr - self.forward*self.hr\n\t\tyield (leftback, leftfront)\n\t\tyield (leftfront, rightfront)\n\t\tyield (rightfront, rightback)\n\t\tyield (rightback, leftback)", "def breadth_first(self):\n nodes_to_vist = []\n curr = self._root\n nodes_to_vist.append(curr)\n while len(nodes_to_vist):\n curr = nodes_to_vist[0]\n if curr._lkid:\n nodes_to_vist.append(curr._lkid)\n if curr._rkid:\n nodes_to_vist.append(curr._rkid)\n yield curr._data\n nodes_to_vist.remove(curr)", "def breadth_first(graph,start, end):\n queue = []\n queue.append([start])\n while queue:\n path = queue.pop(0)\n node = path[-1]\n if node == end:\n return path\n for adjacent in graph.get(node, []):\n new_path = list(path)\n new_path.append(adjacent)\n queue.append(new_path)", "def bfs(maze):\n # TODO: Write your code here\n frontier = Queue()\n visited = []\n path = []\n ret = []\n objectives = maze.getObjectives()\n start = State(maze.getStart()[0], maze.getStart()[1], objectives.copy())\n frontier.put(start)\n explored = []\n \n\n while not frontier.empty(): # while frontier queue is not empty\n\n currentState = frontier.get()\n currentCell = currentState.cell()\n objectivesLeft = currentState.objectives()\n\n if objectivesLeft.count(currentCell) != 0:\n\n objectivesLeft.remove(currentCell)\n \n # all objectives found, initialise backtrace and exit loop\n # if len(objectivesLeft) == 0:\n path.append(currentState)\n ret.append(currentCell)\n visited.append(currentState)\n break\n\n # current cell is not objective nor visited\n if visited.count(currentState) == 0:\n explored.append(currentCell)\n neighbors = maze.getNeighbors(currentCell[0], currentCell[1])\n\n for i in neighbors:\n\n neighbor = State(i[0], i[1], objectivesLeft)\n\n # if neighbor is not visited, add it to the frontier\n if visited.count(neighbor) == 0:\n neighbor.setParent(currentState)\n frontier.put(neighbor)\n\n visited.append(currentState)\n\n #backtrace\n while path[0] != start:\n\n currentState = path[0]\n path.insert(0, currentState.parent())\n ret.insert(0, currentState.parent().cell())\n\n return ret", "def BFS(self, start_vertex):\n yield from self._search(start_vertex, kind='BFS')", "def BreadthFirstSearch(graph, source):\r\n \r\n # Dictionary dataInfo will be used to store the information about each vertex. (Ancestors, descendants, distance from source, and color)\r\n dataInfo = {} \r\n \r\n # List queue will be used to store the vertices currently in the queue, these vertices will all be gray.\r\n queue = []\r\n \r\n # Loops through the vertices in the graph, creates a key in the dictionary for each vertice, with default values.\r\n for vertex in graph[\"V\"]:\r\n dataInfo[str(vertex)] = {\"ancestor\": \"\", \"descendants\": [], \"distance\": \"\", \"color\": \"white\"}\r\n \r\n # At key source (variable) in dataInfo dictionary, key ancestor is set to have no value other than \"NA\" (as it is the starting point), and distance to 0 (as it will always be zero as it is the source).\r\n dataInfo[str(source)][\"ancestor\"] = \"NA\"\r\n dataInfo[str(source)][\"distance\"] = 0\r\n\r\n def symmetricVertex(edge, otherVertex):\r\n \r\n \"\"\"\r\n Function symmetricVertex takes arguments edge, a list of an edge from the graph dictionary, and otherVertex, an integer that is the other vertex in the edge with the sourceVertex. The function will return the point other than the otherVertex, and will be used to find adjacent vertices relative to the current vertex in the queue. Example: edge ([1, 2]), otherVertex (1), the function will return 2.\r\n \"\"\"\r\n \r\n for num in edge:\r\n if num != otherVertex:\r\n return num\r\n \r\n\r\n def pathFinder(graph, sourceVertex):\r\n \r\n \"\"\"\r\n Function pathFinder takes arguments graph, a dictionary, with the same keys for the edges and the vertices and sourceVertex, an integer. The function will loop through all of the edges in the graph and find adjacent vertices relative to the current sourceVertex. sourceVertex values will be in the queue. The function will edit dictionaries and lists, not return any value.\r\n \"\"\"\r\n \r\n # List removeEdges will be used to store the edges that will be removed from the graph dictionary after the loop ends. Makes the code more efficient, as you don't want to loop through a million vertices every time, now do you?\r\n removeEdges = []\r\n \r\n # Loop through edges in the graph, will be used to find adjacent vertices.\r\n for edge in graph[\"E\"]:\r\n \r\n # If the sourceVertex is in the edge and the edge is not discovered yet, then edit and change values in the main dictionary, dataInfo.\r\n if (sourceVertex in edge) and (dataInfo[str(symmetricVertex(edge, sourceVertex))] != \"gray\"):\r\n otherVertex = symmetricVertex(edge, sourceVertex)\r\n \r\n # Adds variable otherVertex to the descendants of the sourceVertex.\r\n dataInfo[str(sourceVertex)][\"descendants\"].append(otherVertex)\r\n \r\n # Updates key(otherVertex) to correct values. Ancestor is always the sourceVertex, the distance is always the distance of sourceVertex incremented by one, and the color is updated to gray as it is added to the queue.\r\n dataInfo[str(otherVertex)] = {\"ancestor\": sourceVertex, \"descendants\": [], \"distance\": (dataInfo[str(sourceVertex)][\"distance\"] + 1), \"color\": \"gray\"}\r\n \r\n # Edge includes two discovered edges, so it will be removed to stop redundancy. It is added to the removeEdges list.\r\n removeEdges.append(edge)\r\n \r\n # Appends the discovered vertex to the queue.\r\n queue.append(otherVertex)\r\n \r\n # After the loop ends, the edges that contain the source vertex have been exhausted, so the color is updated to black.\r\n dataInfo[str(sourceVertex)][\"color\"] = \"black\" \r\n \r\n # If the sourceVertex is in the queue, it is removed, as all of the edges containing it have been exhausted.\r\n if sourceVertex in queue:\r\n queue.remove(sourceVertex)\r\n \r\n # Loop through the edges in the removeEdges list, each edge will be removed.\r\n for edge in removeEdges:\r\n graph[\"E\"].remove(edge)\r\n \r\n # The function pathFinder is called on the graph and the source vertex, which sets up the queue.\r\n pathFinder(graph, source)\r\n \r\n # While the list queue contains values, the pathFinder function is called on the graph, and the queue value at index 0.\r\n while len(queue) != 0:\r\n pathFinder(graph, queue[0])\r\n \r\n # Loop below is for formatting of the data, makes it easier to read.\r\n for key in dataInfo:\r\n print \"Vertex: \" + key + \", Distance: \" + str(dataInfo[key][\"distance\"]) + \", Ancestor: \" + str(dataInfo[key][\"ancestor\"]) + \", Descendants: \" + str(dataInfo[key][\"descendants\"]) + \", Color: \" + str(dataInfo[key][\"color\"]) + \".\" \r\n \r\n # Returns dictionary dataInfo.\r\n return dataInfo", "def breadth_first_search(self):\r\n queue = [self.root]\r\n while queue:\r\n node = queue.pop()\r\n yield node\r\n queue.extend(node.children)", "def breadth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n q = collections.deque(initial_nodes_iter)\n while q:\n node = q.popleft()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n q.extend(connected_to_functor(node))", "def breadth_first(self):\n q = Queue()\n q.enqueue(self)\n while q.size() > 0:\n node = q.dequeue()\n yield node.val\n if node.left:\n q.enqueue(node.left)\n if node.right:\n q.enqueue(node.right)", "def bft(self, starting_vertex):\n # create an empty queueueueueueueueueue class\n to_visit = Queue()\n # create an empty set\n visited = set()\n # populating the queueueueueueue with our starting vertex\n to_visit.enqueue(starting_vertex)\n\n # while loop to run while the queueueueueue is not empty\n while to_visit.size() > 0:\n v = to_visit.dequeue()\n # checking to see if the dequeueueued vertex is in our set or not\n if v not in visited:\n # if it is then it gets printed out\n print(v)\n # it then gets added to the visited set\n visited.add(v)\n # now we are checking the neighbors of the vertex and adding them\n # to the queueueueueueue\n for n in self.vertices[v]:\n to_visit.enqueue(n)", "def breadth_first(self):\n import queue\n keeper = queue.Queue()\n keeper.enqueue(self)\n while(keeper.size() != 0):\n temp = keeper.dequeue()\n if temp.val is not None:\n yield temp.val\n if temp.left is not None:\n keeper.enqueue(temp.left)\n if temp.right is not None:\n keeper.enqueue(temp.right)", "def bfs(maze):\n # TODO: Write your code here.\n start = maze.getStart() \n frontier = [] \n path = [] \n dim = maze.getDimensions()\n objs = maze.getObjectives()\n rows = dim[0]\n cols = dim[1]\n visited = {} # visited as a dictionary\n for i in range (0, rows):\n for j in range (0, cols):\n visited[(i,j)] = (-1, -1)\n frontier.append(start)\n visited[(i,j)] = (-2, -2)\n while frontier:\n pt = frontier.pop(0)\n if maze.isObjective(pt[0], pt[1]) == True:\n break\n else:\n list_of_neighbors = maze.getNeighbors(pt[0], pt[1])\n for i in list_of_neighbors:\n if visited.get(i) == (-1, -1): \n frontier.append(i)\n visited[i] = pt \n while pt != start:\n path.append(pt)\n pt = visited.get(pt)\n path.append(start)\n path.reverse()\n return path", "def bfs(graph,start):\n #keeps track of nodes to be visited\n queue = []\n #keeps track of nodes already visited\n explored = []\n queue.append(start)\n while queue:\n #remove first node from queue\n curr_node = queue.pop(0)\n #check if node is visited\n if curr_node not in explored:\n explored.append(curr_node)\n adjacent_nodes = graph[curr_node]\n #add adjacent nodes to queue\n for i in adjacent_nodes:\n queue.append(i)\n return explored", "def breadthFirstSearch(problem):\n #import pdb;pdb.set_trace()\n frontier = util.Queue()\n start_node = problem.getStartState()\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,()))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n\n explored.add(node[0])\n # exploratory code for SUPER-optimal solution:\n # by saving the path in explored, we assure that we explore the same cell even if\n # two different actions go through it:\n #explored.add(node)\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1]\n next_actions = actions + (action,)\n new_node = (nextState, next_actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def bfs(self, starting_vertex, destination_vertex):\n pass # TODO", "def bfs(array, neighbors, start, similar):\n match = get_item(array, start)\n block = {start}\n visit = deque(block)\n child = deque.popleft\n while visit:\n node = child(visit)\n for offset in neighbors:\n index = get_next(node, offset)\n if index not in block:\n block.add(index)\n if is_valid(array, index):\n value = get_item(array, index)\n if similar(value, match):\n visit.append(index)\n yield node", "def breadth_first_traverse(self) -> Generator:\n assist_queue = deque()\n assist_queue.append(self.root_node)\n while assist_queue:\n current_node = assist_queue.popleft()\n yield current_node\n if current_node.children:\n for child in current_node.children:\n assist_queue.append(child)", "def breadthFirstSearchPaths(problem):\n #import pdb;pdb.set_trace()\n frontier = util.Queue()\n start_node = problem.getStartState()\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,()))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n\n #explored.add(node[0])\n # exploratory code for SUPER-optimal solution:\n # by saving the path in explored, we assure that we explore the same cell even if\n # two different actions go through it:\n explored.add(node)\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1]\n next_actions = actions + (action,)\n new_node = (nextState, next_actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def breadth_first(self, start_node):\n \n # try:\n if start_node not in self._adjacency_list:\n raise KeyError('Nodes are not in the graph')\n\n q = Queue()\n q.enqueue(start_node)\n visited_nodes = {}\n visited_nodes[start_node] = True\n output = []\n\n while len(q):\n cur = q.dequeue()\n output.append(cur)\n neighbors = self._adjacency_list[cur]\n for n in neighbors:\n if n[0] not in visited_nodes:\n q.enqueue(n[0]) \n visited_nodes[n[0]] = True\n return output\n # except Exception as error:\n # return(f'{error}')", "def bfs(self, starting_vertex, destination_vertex):\n \"\"\" FIFO ir LILO\n Create a queue\n Enqueue PATH to starting Vertex\n Create a set top store visited vertices\n While the queue is NOT empty: e.g. > 0\n Dequeue the first PATH Vertex\n Get Vertex from END of PATH\n Check IF NOT visited:\n Mark as visited\n check if vertex is destination_vertex\n If TRUE, return path\n enqueue PATH to ALL of neighbors \n make COPY of current path\n add neighbor to path copy\n enqueue copy \n \"\"\"\n\n q = Queue() # Create a queue\n q.enqueue([starting_vertex]) # Enqueue starting at vertex into Queue (list)\n visited = set() # Create a set to store visited \n \n while q.size() > 0: # While the queue is NOT empty: \n path = q.dequeue() # Dequeue the first PATH Vertices\n v = path[-1] # Get Vertex from END of PATH\n\n if v not in visited: # Check IF NOT visited:\n visited.add(v) # Mark as visited\n\n if v == destination_vertex: # check if vertex is destination_vertex\n return path # If TRUE, return path, DONE\n\n for n in self.get_neighbors(v): # enqueue PATH to ALL of neighbors\n path_c = path [:] # make COPY of current path\n path_c.append(n) # add neighbor to path copy\n q.enqueue(path_c) # enqueue copy", "def bft(self, starting_vertex):\n # First we create an empty queue and enqueue the starting vertex\n qq = Queue()\n qq.enqueue(starting_vertex)\n\n # Then we create a set to store the vertices we visit\n visited = set()\n\n # Here we write a while loop that will run as long as the queue is not empty\n while qq.size() > 0:\n # Dequeue the first vertex\n # We dequeue the first vertex and set (v) to it\n v = qq.dequeue()\n\n # Next we check to see if that vertex has already been visited\n if v not in visited:\n # If if has not been visited, we print it and mark it as visited\n print(v)\n visited.add(v)\n\n # Then we add all of its neighbors to the back of the queue\n for next_vert in self.get_neighbors(v):\n qq.enqueue(next_vert)", "def bfs(gdict):\n q = deque()\n graph, vertices = create_graph(gdict, BFSVertex)\n\n v = input('Enter the start vertex or none for start with first vertex: ')\n print()\n if not v:\n v = vertices[0]\n\n try:\n v = graph[v]\n except KeyError:\n print('This vertex does not exist.')\n\n print(v)\n v.visit = 1\n q.append(v)\n while q:\n u = q.popleft()\n\n for a in u.adj:\n s = graph[a]\n if s.visit == 0:\n s.visit = 1\n s.distance = u.distance + 1\n s.father = u.name\n q.append(s)\n\n return graph", "def bft(self, starting_vertex):\n # TODO\n\n # mark all vertices as not visited \n visited = set()\n\n #create a queue\n queue = Queue()\n\n path = []\n\n #add starting vertex to the queueu\n queue.enqueue(starting_vertex)\n\n while len(queue.queue) > 0:\n # remove pop a vertex from the queue\n\n current_vertex = queue.dequeue()\n if current_vertex not in visited: \n path.append(current_vertex)\n visited.add(current_vertex)\n edges = self.get_neighbors(current_vertex)\n\n for edge in edges:\n queue.enqueue(edge)\n\n return path", "def bfs(maze, current_node):\n q = collections.deque()\n\n q.append(current_node)\n\n while len(q) > 0:\n current_node = q.popleft()\n maze[current_node.row][current_node.cell] = 1\n yield maze\n\n for neighbour in get_neighbours(maze, current_node):\n if maze[neighbour.row][neighbour.cell] == 2:\n backtrack(maze, neighbour)\n yield maze\n return\n else:\n q.append(neighbour)\n maze[neighbour.row][neighbour.cell] = -2\n\n yield maze\n maze[current_node.row][current_node.cell] = -3\n time.sleep(args.speed)", "def bft(self, starting_vertex):\n \n \"\"\"\n Plan:\n - Start at given index. Add that index to the Q.\n - While len(Q) is greater than 0:\n - Check if q[0] has children.\n - If so then make sure children have not been visited, then add those children to the Q.\n - If they have been visited, skip over the child and DO NOT add to Q # !! will result in infinite loop !!\n \"\"\"\n\n queue = Q()\n visited = []\n\n queue.add(starting_vertex)\n\n while len(queue):\n current = queue.first()\n children = self.vertices[current]\n \n if len(children) > 0:\n for child in children:\n if child not in visited:\n queue.add(child)\n else: continue\n\n print(current)\n visited.append(current)\n queue.pop()", "def bfs(self, starting_vertex, destination_vertex): # great if you know to result is somewhere close to the root/start\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n q = Queue() # create an empty Queue\n q.enqueue([starting_vertex]) # set the starting_vertex with enqueue \n\n while q.size() > 0:\n path = q.dequeue() # dequeue and store first path\n v = path[-1] # store the vertex from the end of path \n\n if v == destination_vertex: # if v is equal to the destination_vertex\n return path # return the path \n\n if v not in visited: # if v has not been visited yet \n visited.add(v) # add v to the vistied set \n\n for neighbor in self.vertices[v]: # loop through the neighbors \n path_copy = list(path) # make a copy of the path \n path_copy.append(neighbor) # append each neighbor to the back of the path copy \n q.enqueue(path_copy) # enqueue the path copy to the queue ", "def bfs(graph, start_node):\n start_node.distance = 0\n start.set_predecessor(None)\n queue = list()\n queue.append(start_node)\n while (len(queue) > 0):\n current_vertex = queue.pop()\n current_vertex.setState = \"visiting\"\n for vertex in current_vertex.links():\n if (vertex.getState == \"unvisited\"):\n vertex.setState == \"tobevisited\"\n vertex.set_predecessor(current_vertex)\n vertex.distance = current_vertex.distance + 1\n queue.append(vertex)\n current_vertex.setState = \"visited\"", "def greedy_bfs(start: Vector2D, goal: Vector2D, grid: Scene, heuristic: Callable[[Vector2D, Vector2D], float]) -> list:\n frontier = PriorityQueue() # nodes to be explored\n prev_node = dict() # maps n to node that precedes it in cheapest currently-known path from start to n\n explored = [] # keeps track of previously explored nodes, to be drawn later\n\n frontier.put(start, heuristic(start, goal))\n prev_node[start] = None\n\n while not frontier.empty():\n current = frontier.get()\n\n if current == goal: # solution found!\n return (reconstruct_path(goal, prev_node), explored[1:]) # [1: to remove 'start']\n\n grid.set_cell(current, Cell(val = CellType.searched))\n explored.append(current)\n \n for neighbor in grid.get_unexplored_neighbors(current):\n frontier.put(neighbor, heuristic(neighbor, goal))\n prev_node[neighbor] = current\n\n \n # If frontier empty but goal was never reached, no solution was found\n return ([], explored[1:]) # [1: to remove 'start']", "def bfs(self, starting_vertex, destination_vertex):\n # create an empty queue and enqueue A-PATH-TO the starting vertex ID\n # create a Set to store the visited vertices\n # while the queue is not empty ..\n # dequeue the first PATH\n # grab the last vertex from the PATH\n # if that vertex has not been visited ..\n # check if its the target\n #if yes, return path\n #mark it as visited\n # add A PATH TO its neighbots to the back of the queue\n # copt the path\n # append the neighbor to the back\n \n \n # create an empty Queue \n queue = Queue()\n #push the starting vertex ID as list\n queue.enqueue([starting_vertex])\n # create an empty Set to store the visited vertices\n visited = set()\n # while the queue is not empty ...\n while queue.size() > 0:\n # dequeue the first vertex\n path = queue.dequeue()\n vert = path[-1]\n # if that vertex has not been visited ..\n if vert not in visited:\n #check for target\n if vert == destination_vertex:\n return path\n # mark it is visited\n visited.add(vert)\n # then add all of its neighbors to the top of the stack\n for neighbor in self.vertices[vert]: #self.get_neighbors(vert)\n #copy path to avoid pass by reference\n new_path = list(path) # make a copy\n new_path.append(neighbor)\n queue.enqueue(new_path)", "def breadth_fs(start: Vector2D, goal: Vector2D, grid: Scene, *args) -> (list, list):\n frontier = Queue() # nodes to be explored\n prev_node = dict() # maps n to node that precedes it in cheapest currently-known path from start to n\n explored = [] # keeps track of previously explored nodes, to be drawn later\n\n prev_node[start] = None\n frontier.put(start)\n\n while not frontier.empty():\n current = frontier.get()\n grid.set_cell(current, Cell(val = CellType.searched))\n \n for neighbor in grid.get_unexplored_neighbors(current):\n prev_node[neighbor] = current\n frontier.put(neighbor)\n explored.append(neighbor)\n if neighbor == goal:\n return (reconstruct_path(goal, prev_node), explored)\n\n grid.set_cell(neighbor, Cell(val = CellType.searched))\n\n # If frontier empty but goal was never reached, no solution was found\n return ([], explored)", "def breadth_first_search(self, vertex):\n\n visited = [False] * self.V\n queue = list()\n # Appending the vertex to an empty queue\n queue.append(vertex)\n\n # Marking the Visiting Vertex as True\n visited[vertex] = True\n print(\"\\n\\nBreadth First Search: \", end=\" \")\n while queue:\n # Popping the First Element in queue\n s = queue.pop(0)\n print(s, end=\" \")\n\n # Visiting the adjacent vertices of queue\n # And Validating if the vertex is visited\n for i in self.adj_list[s]:\n if not visited[i]:\n queue.append(i)\n visited[i] = True", "def bft(self, starting_vertex):\n # create a plan to visit queue and add starting_vertex to it\n plan_to_visit = Queue()\n plan_to_visit.enqueue(starting_vertex)\n # create a set for visited_vertices\n visited_vertices = set()\n # while the plan_to visit queue is not Empty:\n while plan_to_visit.size() > 0:\n # dequeue the first vertex on the queue\n current_vertex = plan_to_visit.dequeue()\n # if its not been visited\n if current_vertex not in visited_vertices:\n # print the vertex\n print(current_vertex)\n # mark it as visited, (add it to visited_verticles)\n visited_vertices.add(current_vertex)\n # add all unvisited neighbors to the queue\n for neighbor in self.get_neighbors(current_vertex):\n if neighbor not in visited_vertices:\n plan_to_visit.enqueue(neighbor)", "def _BFS(self, start_vertex, visited, callback):\n queue = []\n queue.insert(0, start_vertex)\n visited.add(start_vertex)\n while queue:\n curr_vertex = queue.pop()\n callback(curr_vertex)\n for vertex in self.neighbors(curr_vertex):\n if vertex not in visited:\n queue.insert(0, vertex)\n visited.add(vertex)", "def breadthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Queue() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]", "def bfs(self, startNode):\n queue = Queue()\n\n # Mark all the nodes as not visited\n visited = {}\n for node in self.getNodes():\n visited[node] = False\n\n queue.enqueue(startNode)\n\n while not queue.isEmpty():\n s = queue.dequeue()\n visited[s] = True\n print s,\n\n # enqueue all the adjacent vertices to s\n # if they've not already been visited\n\n for adjacentNode in self.getAdjacent(s):\n if visited[adjacentNode] is False:\n queue.enqueue(adjacentNode)\n visited[adjacentNode] = True", "def get_bfs(self, s):\n # create a queue for BFS\n queue = []\n # mark all the vertices as not visited\n visited = [False] * (len(self.graph))\n # mark the start node as visited and enqueue it\n visited[s] = True\n queue.append(s)\n results = []\n\n while queue:\n # dequeue a vertex from queue and append to results.\n p = queue.pop(0)\n results.append(p)\n # get all adjacent vertices of the dequeued vertex s,\n # and for any unvisited adjacent, mark it visited and enqueue it.\n for v in self.graph[p]:\n if visited[v] is False:\n visited[v] = True\n queue.append(v)\n\n return results", "def breadth_first_list(graph, current=\"a\"):\n queue = []\n queue.append(current)\n while queue:\n current = queue.pop(0)\n print(current)\n for node in graph.get(current):\n queue.append(node)", "def main():\n n = int(input(\"Enter the number of nodes: \"))\n m = int(input(\"Enter the number of edges: \"))\n \n adjList = [[] for i in range(n)]\n \n print(\"Enter the edges: \")\n for i in range(m):\n x, y = input().split(\" \")\n x = int(x)\n y = int(y)\n adjList[x].append(y)\n adjList[y].append(x)\n \n s = int(input(\"Enter the source: \"))\n \n BFS(adjList, s, n)", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringe = util.Queue()\n return GraphSearch(problem, 'bfs').search(fringe)", "def test_breath_first_traversal(our_bsts):\n bft = []\n for i in our_bsts[0].breadth_first_traversal():\n bft.append(i)\n assert bft == our_bsts[3]", "def breadthFirstSearch(problem):\n\n frontier = util.Queue()\n # print 'Create frontier'\n initial_node = node(problem.getStartState(), 0, [], 0)#(state,depth,path_actions,path_cost)\n frontier.push(initial_node)\n # print 'Push ',repr(initial_node.state)\n frontierSet = set([initial_node.state])\n explored = set() #initialize the explored set to be empty\n\n while True:\n if frontier.isEmpty() == True: raise Exception, \"The frontier was emptied\"#if the frontier is empty then return failure\n currNode = frontier.pop()#HERE1\n frontierSet.remove(currNode.state)\n # print 'Remove',repr(currNode.state)\n # print 'State: ' + repr(currNode.state) + '. Depth: ' + repr(currNode.depth) + '. Path Cost: ' + repr(currNode.path_cost) + '. Path Actions: ' + repr(currNode.path_actions) + '.\\n'\n if problem.isGoalState(currNode.state) == True:\n print 'Goal reached!'\n return currNode.path_actions\n explored.add(currNode.state)\n for succ in problem.getSuccessors(currNode.state):\n # print 'Succ: ',repr(succ[0])\n succNode = node(succ[0], currNode.depth + 1, currNode.path_actions + [succ[1],], currNode.path_cost + succ[2])\n if (succNode.state not in explored) and (succNode.state not in frontierSet):\n \"\"\"Aca si hay que verificar si es que ya esta en la frontera porque es formato FIFO. Entonces los nodos que estan en la lista\n necesariamente van a ser verificados antes de que se vuelva a insertar otro.\n \"\"\"\n frontier.push(succNode)\n # print 'Push ',repr(succNode.state)\n frontierSet.add(succNode.state)", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n if problem.isGoalState(initialNode.state):\n return initialNode.solution()\n\n frontier = util.Queue() #bfs uses a queue\n frontier.push(initialNode)\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored and child not in frontier.list:\n if problem.isGoalState(child.state):\n return child.solution()\n frontier.push(child)\n return []\n util.raiseNotDefined()", "def dft(self, starting_vertex):\n \"\"\" LIFO\n Create a stack \n Push starting Vertex\n Create a set to store visited\n While the stack is NOT empty: e.g. > 0\n Pop the last added Vertex\n Check IF NOT visited:\n Mark as visited\n\n\n Push ALL of neighbors\n \"\"\"\n s = Stack() # Create a stack\n s.push(starting_vertex) # Push starting Vertex\n visited = set() # Create a set to store visited\n\n while s.size() > 0: # While the stack is NOT empty: e.g. > 0\n v = s.pop() # Pop the last added Vertex\n\n if v not in visited: # Check IF NOT visited: e.g. > 0\n print(v)\n visited.add(v) # Mark as visited\n\n for n in self.get_neighbors(v): # Check IF NOT visited:\n s.push(n) # Push ALL of neighbors ", "def bfs_paths(self, start: str, goal: str) -> List[Path]:\n queue = [(start, [start])]\n while queue:\n (node, path) = queue.pop(0)\n if node not in self.graph:\n yield []\n for _next in set(self.graph[node]) - set(path):\n if _next == goal:\n yield path + [_next]\n elif _next in self.graph:\n queue.append((_next, path + [_next]))", "def bfs(g: nx.Graph, start_node: Hashable) -> List[Hashable]:\n list_ = list(g.neighbors(start_node))\n len_graph = g.number_of_nodes()\n list2 = [start_node]\n while len(list2) < len_graph:\n for i in range(len(list_) - 1):\n if list_[0] not in list2:\n list2.append(list_[0])\n list_ += list(g.neighbors(list_[0]))\n list_.remove(list_[0])\n # nx.draw(g, with_labels=True)\n # plt.show()\n return list2", "def get_bipartition(g):\n # Write your code here.\n colorArr = [-1] * (len(g.nodes()) + 1)\n for node in g.nodes():\n start = g.neighbors(node)\n if len(start)>0:\n src = start.pop()\n break\n colorArr[src] = 1\n queue = []\n queue.append(src)\n while (queue):\n u = queue.pop()\n for v in g.nodes():\n if g.has_edge(u, v) and colorArr[v] == -1:\n colorArr[v] = 1 - colorArr[u]\n queue.append(v)\n elif g.has_edge(u, v) and colorArr[u] == colorArr[v]:\n return None\n\n red = set()\n for i in range(1, len(colorArr)):\n if colorArr[i] == 1:\n red.add(i)\n return list(red)\n\n\n\n # Hint! If you'd like to test out these commands without\n # writing a full-fledged program, you might want to familiarise\n # yourself with the Python interactive shell or IPython (available\n # on at least some Aalto IT computers)\n\n # Create a simple line graph g: \"(1)->(2)->(3)\"\n # (The creation parameter is a dict of {node: list_of_neighbors},\n # but this is not something you will be needing in your code.)\n # >>> from networkx import Graph \n # >>> g = Graph({1: [2], 2: [3]})\n # >>> g.number_of_nodes()\n # 3\n\n # Example. Iterate over the nodes and mark them as visited\n # >>> visited = set()\n # >>> for node in g.nodes_iter(): # There is also g.nodes(), which returns a list\n # ... # do some work here\n # ... visited.add(node)\n \n # Example. Given a Node v, get all nodes s.t. there is an edge between\n # v and that node\n # >>> g.neighbors(1)\n # [2]\n\n # Example. Get the edges of the graph:\n # >>> e.edges() # as with nodes, there is also g.edges_iter()\n # [(1, 2), (2, 3)]\n\n # For more information, consult the NetworkX documentation:\n # https://networkx.github.io/documentation/networkx-1.10/tutorial/tutorial.html", "def bfs(graph, initial_node, dest_node):\n return queue_search(graph, initial_node, dest_node, queue.Queue())", "def dft(self, starting_vertex):\n \"\"\"\n Loop until the stack is empty. Remove the last added\n vertex and store it's value as the current vertex. \n Print the current vertex then loop over all it's edges.\n Add them to the stack and the cache.\n \"\"\" \n stack = [starting_vertex]\n stacked = {starting_vertex}\n while len(stack) > 0:\n currentVertex = stack.pop(-1)\n print(currentVertex)\n for edge in self.get_neighbors(currentVertex):\n if edge not in stacked:\n stack.append(edge)\n stacked.add(edge)", "def _create_node_iterator(self) -> Iterator[GraphNode]:\n return\n yield", "def breadth_first_traversal(self):\n breadth_first = []\n h = self.root.get_height() \n for i in range(h+2): \n self.level = []\n self.print_level(self.root, i + 1) \n breadth_first.append(self.level)\n return breadth_first", "def bfs_path(G, source, destination):\n vertex_dict = dict(nx.bfs_predecessors(G, source))\n queue = deque()\n queue.append(destination)\n while queue[-1] != source:\n try:\n queue.append(vertex_dict[queue[-1]])\n except KeyError:\n print(f\"Source: {source}, Dest: {destination}\")\n print(f\"Key {queue[-1]} not found in\")\n print_dict(\"bfs\", vertex_dict)\n break\n queue.reverse()\n return queue", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n \"\"\"\n ALGORITHM FOR bFS \n Create a queue Q\n enqueue root node to Q\n while Q is not empty:\n dequeu an item v from Q\n mark the item v as visited \n for each node w that is directed from v:\n enqueue w to Q\n \n \n \"\"\"\n\n fringes = util.Queue()\n explored =[]\n fringes.push((problem.getStartState(),[]))\n\n while(not fringes.isEmpty()):\n currentNode,currDir = fringes.pop()\n if problem.isGoalState(currentNode):\n goal = currentNode\n pathToGoal = currDir\n #print \"final path is : \", pathToGoal\n\n break\n # print \"HOraaay goal has been found === > \", currentNode\n\n if not (currentNode in explored):\n explored.append(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n fringes.push((childNode[0],currDir+[childNode[1]]))\n\n\n return pathToGoal", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n startState=problem.getStartState()\n currentLocation = startState\n\n #for GSA implementation\n exploredStates = []\n exploredStates.append(startState)\n \n #To transform the graph to stack for better access in BFS\n frontierQueue = util.Queue()\n for frontier in problem.getSuccessors(startState):\n frontierRoute = frontier + (frontier[1],)\n frontierQueue.push(frontierRoute)\n\n currentRoute = []\n\n #start BFS\n while not(frontierQueue.isEmpty()):\n currentStage = frontierQueue.pop()\n currentState = currentStage[0]\n currentRoute = currentStage[3] \n \n if problem.isGoalState(currentState): \n break\n \n if currentState not in exploredStates:\n for frontier in problem.getSuccessors(currentState):\n if frontier[0] not in exploredStates:\n nextRoute = currentRoute + \",\" + frontier[1]\n frontierRoute = frontier + (nextRoute,)\n frontierQueue.push(frontierRoute)\n \n exploredStates.append(currentState)\n return currentRoute.split(\",\")\n\n util.raiseNotDefined()", "def bfs(g: nx.Graph, start_node: Any) -> list:\r\n\tx = [start_node]\r\n\tqueue = [start_node]\r\n\ttracks = {node: [] for node in g.nodes}\r\n\twhile queue:\r\n\t\telement = queue.pop(0)\r\n\t\ty = list(g.neighbors(element))\r\n\t\tfor node in y:\r\n\t\t\tif node not in x:\r\n\t\t\t\tx.append(node)\r\n\t\t\t\tqueue.append(node)\r\n\t\t\t\ttracks[node].extend((*tracks[element], element))\r\n\treturn x", "def bfs(self, start):\n output_list = []\n queue = Queue()\n queue.put(start)\n visited = set(start)\n while not queue.empty():\n current_node = queue.get()\n output_list.append(current_node)\n visited.add(current_node)\n for node in self.__graph[current_node].neighbours:\n if node.name not in visited:\n queue.put(node.name)\n return output_list", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n startState = problem.getStartState()\n explored = set()\n Frontier = util.Queue()\n Frontier.push([[startState,None,0]])\n while not Frontier.isEmpty():\n StateTriples = Frontier.pop()\n node = StateTriples[-1][0]\n if problem.isGoalState(node):\n solution = []\n for i in StateTriples[1:]:\n solution = solution + [i[1]]\n return solution\n if node not in explored:\n explored.add(node)\n for i in problem.getSuccessors(node):\n Frontier.push(StateTriples+[list(i)])\n print(Frontier.isEmpty())\n util.raiseNotDefined()", "def iteredges(self):\n for source, targets in self.successors.items():\n for target in targets:\n yield source, target", "def traverse_breadth_first(self, src: int = 0, graph: GraphInterface = None):\n if not isinstance(graph, DiGraph) or graph is None or self._graph.get_node(src) is None:\n return\n curr = graph.get_node(src)\n\n q = Queue()\n\n q.put(curr)\n curr.tag += 1\n\n while not q.empty():\n\n curr = q.get()\n out_edges = graph.all_out_edges_of_node(curr.key)\n\n for i in out_edges:\n out_edge = out_edges[i]\n neighbor = graph.get_node(out_edge.dest) # Get curr's neighbor\n if neighbor.tag == curr.tag - 1:\n neighbor.tag += 1 # If un-tagged -> tag it.\n q.put(neighbor) # and enqueue it", "def bfs(self, s, visit=None):\n visited = set()\n\n # initialize the queue with the start vertex\n queue = deque([s])\n \n # loop until the queue is empty\n while queue:\n\n # get the next vertex\n v = queue.popleft()\n\n # skip it if it's already visited\n if v in visited: continue\n\n # mark it visited, then invoke the visit function\n visited.add(v)\n if visit: visit(v)\n\n # add its out vertices to the queue\n queue.extend(self.out_vertices(v))\n\n # return the visited vertices\n return visited", "def bfs(self, start_node: int, flag: bool) :\n for n in self.dw_graph.get_all_v().values():\n n.visited = False\n queue = [self.dw_graph.nodes[start_node]]\n self.dw_graph.nodes[start_node].visited = True\n node_list = [start_node]\n while queue:\n current = queue.pop()\n if not flag:\n for e in self.dw_graph.all_out_edges_of_node(current.node_id).keys():\n if not self.dw_graph.nodes[e].visited:\n self.dw_graph.nodes[e].visited = True\n queue.append(self.dw_graph.nodes[e])\n node_list.append(e)\n else:\n for e in self.dw_graph.all_in_edges_of_node(current.node_id).keys():\n if not self.dw_graph.nodes[e].visited:\n self.dw_graph.nodes[e].visited = True\n queue.append(self.dw_graph.nodes[e])\n node_list.append(e)\n\n return node_list", "def get_forward_init(node, graph):\n\tedges = []\n\tfor e in node.edges:\n\t\tif node.label <= graph.nodes[e.to].label:\n\t\t\tedges.append(e)\n\treturn edges", "def BFSUtility(obj,visited,vertex):\n stack = []\n subGraph = []\n stack.insert(0,vertex)\n visited[vertex] = True\n while(stack):\n subGraph.append(stack.pop())\n for nbrVertex in obj.adjList[subGraph[-1]]:\n if visited[nbrVertex]:\n continue\n stack.insert(0,nbrVertex)\n visited[stack[0]] = True\n return subGraph", "def breadth_first_search(root_node):\n if root_node.goal_test():\n return root_node\n\n frontier = [root_node]\n explored = []\n\n while frontier:\n node = frontier.pop(0)\n explored.append(node)\n\n for successor in node.generate_successors():\n if not successor:\n continue\n if not (successor.is_in(frontier) and successor.is_in(explored)):\n if successor.goal_test():\n return successor\n frontier.append(successor)\n return None # No Solution", "def bfs(graph, i):\n visited = set()\n\n unexplored = deque()\n unexplored.append(i)\n\n while unexplored:\n curr = unexplored.popleft()\n visited.add(curr)\n edges = graph[curr]\n\n for edge in edges:\n if edge in visited:\n continue\n else:\n unexplored.appendleft(edge)\n\n return visited", "def bfs(self, starting_vertex, destination_vertex):\n # creating an empty list of visited vertices\n visited = []\n # creating a queue with the starting vertex in it\n queue = [[starting_vertex]]\n # while we have items in our queueueue\n while queue:\n # pop the first item in the queueueue\n path = queue.pop(0)\n # getting the last value in our path\n node = path[-1]\n # checking to see if it has been seen already or not\n if node not in visited:\n # checking the neighbors of our farthest node\n for n in self.vertices[node]:\n # creating a new path list and appending the nieghbors\n # to it and the queueueueue\n new_path = list(path)\n new_path.append(n)\n queue.append(new_path)\n # if the destination is in the new_path\n # we are done and return the new path\n if n == destination_vertex:\n return new_path\n # adding the node to the visited list\n visited.append(node)", "def topological_nodes_generator(graph, reverse=...):\n ...", "def bfs_visited(ugraph, start_node):\r\n queue = deque()\r\n visited = set() #Set is enough here.\r\n visited.add(start_node)\r\n queue.append(start_node)\r\n while len(queue) != 0:\r\n temp_node = queue.popleft()\r\n for neighbor in ugraph[temp_node]: #In graph theory, neighborhood is \r\n if neighbor not in visited: #well defined, so could be used directely.\r\n visited.add(neighbor)\r\n queue.append(neighbor)\r\n return visited", "def bfs_path(G, source, destination):\n vertex_dict = dict(nx.bfs_predecessors(G, source))\n queue = deque()\n queue.append(destination)\n while queue[-1] != source:\n queue.append(vertex_dict[queue[-1]])\n queue.reverse()\n return queue", "def bfs(self, starting_vertex, destination_vertex):\n visited = set()\n paths = [[starting_vertex]]\n \"\"\"\n For every list in paths. If the last item in the list is \n the destination return the list. If the last item is not \n in the visited cache add it and make a new path for all \n of it's edges. If the last item has been visited remove \n it from the paths list.\n \"\"\"\n for path in paths:\n vertex = path[-1]\n if vertex == destination_vertex:\n return path\n if vertex not in visited:\n visited.add(vertex)\n for key in self.get_neighbors(vertex):\n newPath = path + [key]\n paths.append(newPath)", "def bfs(get_neighbors, source, target):\n\n parents = {}\n visited = set()\n queue = collections.deque()\n queue.append(source)\n while queue:\n vertex = queue.popleft()\n if vertex == target:\n return _backtrack(target, lambda v: parents.get(v))\n if vertex not in visited:\n visited.add(vertex)\n for neighbor in filter(lambda n: n not in visited, get_neighbors(vertex)):\n queue.append(neighbor)\n parents[neighbor] = vertex\n return []", "def _forest_nodes(self):\n\n self.arbor._grow_tree(self)\n root = self.root\n for link in root._links:\n yield self.arbor._generate_tree_node(self.root, link)", "def edges(self):\n vertices = self.vertices(closed=True)\n\n for i in range(len(self)):\n yield(vertices[:, i], vertices[:, i+1])", "def breadth_first_traversal(self, start_val):\n traversed = []\n visited, queue = set(), []\n queue.append(start_val)\n while queue:\n current = queue.pop(0)\n if current in visited:\n continue\n visited.add(current)\n queue.extend(self._g[current])\n traversed.append(current)\n return traversed", "def one_to_all_bfs(start, num_vertexes, edges, INF=9223372036854775807):\n distances = [INF] * num_vertexes\n distances[start] = 0\n to_visit = [start]\n while to_visit:\n next_visit = []\n for frm in to_visit:\n for to in edges[frm]:\n new_cost = distances[frm] + 1\n if new_cost < distances[to]:\n distances[to] = new_cost\n next_visit.append(to)\n to_visit = next_visit\n return distances", "def bft(self, starting_vertex):\n # make a queue\n q = Queue()", "def test_bfs_visited(self):\n\n graph1 = {0: set()}\n self.assertEqual(p.bfs_visited(graph1, 0), set([0]))\n\n graph2 = {0: set([1]), 1: set([0])}\n self.assertEqual(p.bfs_visited(graph2,0 ), set([0,1]))\n self.assertEqual(p.bfs_visited(graph2,1 ), set([0,1]))\n\n graph3 = {0: set([]), 1: set([])}\n self.assertEqual(p.bfs_visited(graph3,0 ), set([0]))\n self.assertEqual(p.bfs_visited(graph3,1 ), set([1]))\n\n graph4 = {0: set([]), 1: set([])}\n self.assertEqual(p.bfs_visited(graph4,0 ), set([0]))\n self.assertEqual(p.bfs_visited(graph4,1 ), set([1]))\n\n graph5 = {0: set([]), 1: set([2]), 2: set([1])}\n self.assertEqual(p.bfs_visited(graph5,0 ), set([0]))\n self.assertEqual(p.bfs_visited(graph5,2 ), set([1,2]))\n\n graph6 = {\"a\": set([]), \"b\": set([\"c\"]), \"c\": set([\"d\",\"b\"]), \"d\":set([\"e\", \"c\"]), \"e\":set([\"d\"]) }\n self.assertEqual(p.bfs_visited(graph6,\"e\" ), set([\"b\",\"c\", \"d\", \"e\"]))\n self.assertEqual(p.bfs_visited(graph6,\"a\" ), set([\"a\"]))", "def dft(self, starting_vertex):\n # Create a s and push starting vertex\n ss = Stack()\n ss.push([starting_vertex])\n # Create a set of traversed vertices\n visited = []\n eldest = [] \n # While stack is not empty:\n while ss.size() > 0:\n # dequeue/pop the first vertex\n path = ss.pop()\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n # print(path[-1])\n # mark as visited\n visited.append(path[-1])\n print(visited)\n # enqueue all neightbors\n if not self.get_neighbors(path[-1]):\n if starting_vertex == path[-1]:\n return -1\n else:\n # print(\"eldest ancestor:\",path[-1])\n eldest.append(path[-1])\n\n for next_vert in self.get_neighbors(path[-1]):\n new_path = list(path)\n # print(new_path)\n new_path.append(next_vert)\n ss.push(new_path)\n \n return min(eldest)", "def bfs(initial_state, dimension=3):\n\t\n\treturn search(initial_state, Frontier(Queue), dimension)", "def __generate_edges(self):\n\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append( {vertex,neighbour} )\n return edges", "def BFS(maze: list, start: tuple, goal: tuple):\n n = len(maze) # Get the dimension of the maze\n\n #========================================#\n # Some data checking statements\n\n if (not is_valid(start, n)):\n print(\"BFS: Start indices outside maze dimensions\")\n return False\n elif (not is_valid(goal, n)):\n print(\"BFS: Goal indices outside maze dimensions\")\n return False\n\n # End data checking statements\n #========================================#\n\n number_of_nodes_visited = 0\n visited = copy.deepcopy(maze) # We can use a copy of the maze to keep track of visited squares (Considered using a set here, thought that time efficiency was important)\n # visited = list(map(list, maze)) # Alternative to using copy.deepcopy\n\n # Initialize a matrix of the same size as maze where each value is None.\n previous = [[None for i in range(n)] for j in range(n)]\n\n queue = deque() # Define our queue of \"fringe\" squares\n queue.append(start) # Push the start square into our queue\n visited[start[0]][start[1]] = 1 # Set our start to visited\n\n while (len(queue)): # While there exists items in the queue\n current = queue.popleft() # Pop the square at index 0\n number_of_nodes_visited += 1 # Increase number of nodes visited\n\n if (current == goal): # If current is the goal, we found it!\n # We now want to traverse back to make a path using our 'previous' matrix\n path = []\n while (current != None):\n path.append(current)\n current = previous[current[0]][current[1]]\n path.reverse()\n return (True, path, number_of_nodes_visited)\n\n current_i, current_j = current # Unpack the current pair\n \n # Now we want to add all unvisited squares that are possible to get to from the current square\n for i in range(len(nearby_offsets)):\n offset_i, offset_j = nearby_offsets[i]\n possible = (current_i + offset_i, current_j + offset_j)\n # print(f\"Current possible: {possible_i} {possible_j}\") # DEBUG\n if (is_valid(possible, n)): # If the calculated square is within the maze matrix\n # If possible has not been visited yet\n if (not visited[possible[0]][possible[1]]):\n queue.append(possible) # Add possible to our queue\n # Set possible to visited\n visited[possible[0]][possible[1]] = 1\n # Set the previous square for possible to the current square\n previous[possible[0]][possible[1]] = current\n # If the while loop goes out, and the queue is empty, then there is no possible path\n return (False, [], number_of_nodes_visited)", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # BFS is identical to DFS, save for the data structure used to store the frontier\n\n # Frontier stored in a Queue\n frontier = util.Queue()\n\n # Visited states stored in a list\n visitedStates = []\n\n # Format of each element: (current coordinates, [path taken to get there])\n frontier.push((problem.getStartState(), []))\n\n # while there are still states to explore\n while not frontier.isEmpty():\n\n # store the current state and path in separate variables\n currentState, pathTaken = frontier.pop()\n\n # for skipping states that have already been visited\n if currentState in visitedStates:\n continue\n\n # for returning the correct path to the goal state upon discovering it\n if problem.isGoalState(currentState):\n return pathTaken\n\n # count the current state as \"visited\"\n visitedStates.append(currentState)\n\n # for each successor state, check whether they have already been visited. if not, add their coordinates to the frontier, and append their respective direction to the path list\n for coordinates, direction, cost in problem.getSuccessors(currentState):\n\n if coordinates not in visitedStates:\n\n frontier.push((coordinates, pathTaken + [direction]))\n\n util.raiseNotDefined()", "def grbefgs(self):\n print('Performing GrBeFGS\\n')\n\n frontier = PriorityFrontier()\n\n initial_heuristic = self.get_heuristic(self.initial_state)\n initial_node = SearchNode(self.initial_state)\n frontier.insert(initial_node, initial_heuristic)\n\n visited_nodes = set()\n \n while True:\n if frontier.is_empty():\n # Search failure\n return GenericResult(failure=True)\n \n # Get the next leaf node from the frontier\n leaf_node = frontier.pop()\n \n # Add this node to the visited nodes set\n visited_nodes.add(leaf_node)\n \n # Check for the goal state\n if self.check_goal_state(leaf_node.state):\n # Search success\n # Return final state and list of actions along path to the goal\n # as part of the GenericResult class solution member\n return GenericResult(solution=Solution(final_state=leaf_node.state, actions=self.get_action_path(leaf_node)))\n \n # Generate all possible actions for the given state\n actions = self.get_actions(leaf_node.state)\n \n # Create search nodes from the generated actions\n for action in actions:\n # Generate a new state from the given action\n new_state = self.get_result(leaf_node.state, action)\n \n # Get the new state's heuristic\n new_heuristic = self.get_heuristic(new_state)\n\n # Create a new search node with the created state\n new_node = SearchNode(new_state, leaf_node, action)\n \n # If this node has already been visited, ignore it\n if new_node in visited_nodes:\n continue\n\n # Check for any nodes with the same state as new_state and with better h values that \n # have yet to be visited in the frontier before adding new_node\n if new_node in frontier:\n frontier_node = frontier.peek_node(new_node)\n frontier_heuristic = self.get_heuristic(frontier_node.state)\n\n if frontier_heuristic <= new_heuristic:\n # The original heuristic was less than or equal to the new node\n # Disregard the new node\n continue\n \n else:\n # The new node's heuristic is larger\n # Remove the original node from the frontier\n frontier.remove_node(frontier_node)\n \n # Add the new node to the frontier\n frontier.insert(new_node, new_heuristic)", "def bfs_visited(ugraph, start_node):\n \n visited = set([start_node])\n cola = deque([start_node])\n \n while len(cola)>0:\n node = cola.popleft() \n for neigh in ugraph[node]:\n if not neigh in visited:\n visited.add(neigh)\n cola.append(neigh)\n \n return visited", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n edges.append({vertex, neighbour})\n return edges", "def _breadth_first(self, queue, elements=True):\n while not queue.is_empty():\n node = queue.dequeue()\n if elements:\n yield node.element()\n else:\n yield node\n for child in self.children(node):\n queue.enqueue(child)" ]
[ "0.7032013", "0.65871656", "0.6558607", "0.6494155", "0.6486353", "0.64773285", "0.6464884", "0.6462215", "0.64562446", "0.64025325", "0.63937354", "0.6391087", "0.6381573", "0.6365035", "0.6322902", "0.6297517", "0.6248023", "0.623094", "0.6225877", "0.62127507", "0.6212067", "0.62103426", "0.6187688", "0.6183143", "0.61755496", "0.61587507", "0.6153769", "0.61276865", "0.6111982", "0.60608596", "0.6058173", "0.60504943", "0.60471153", "0.604269", "0.60388625", "0.6031192", "0.6025782", "0.6022614", "0.6002077", "0.59962654", "0.59850776", "0.5975387", "0.5974541", "0.5955416", "0.59531534", "0.5943535", "0.5939745", "0.592969", "0.5906053", "0.58850074", "0.58497053", "0.5843711", "0.5822667", "0.5810255", "0.5805056", "0.58001965", "0.57819635", "0.5774276", "0.57723117", "0.5752348", "0.574938", "0.5746584", "0.57428086", "0.5738127", "0.5734387", "0.57310915", "0.57204354", "0.5713314", "0.57079947", "0.57064575", "0.57002693", "0.5696476", "0.5692396", "0.5690728", "0.5681553", "0.5680233", "0.5678531", "0.5678207", "0.56744474", "0.5672082", "0.56657183", "0.5658699", "0.5642348", "0.5635605", "0.5619273", "0.56171834", "0.5615325", "0.56149846", "0.5605968", "0.5593899", "0.5590708", "0.5590336", "0.5576329", "0.5565533", "0.55640703", "0.5561652", "0.5560573", "0.55583006", "0.55554354", "0.5555245" ]
0.7154378
0
Node frontiers generator using topological traversal.
def topological_nodes_generator(graph, reverse=...): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfs_nodes_generator(graph, source, reverse=...):\n ...", "def _create_node_iterator(self) -> Iterator[GraphNode]:\n return\n yield", "def pre_order(self):\n for node_data in self._pre_order_helper(self._root):\n yield node_data", "def breadth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n q = collections.deque(initial_nodes_iter)\n while q:\n node = q.popleft()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n q.extend(connected_to_functor(node))", "def depth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n stack = list(initial_nodes_iter)\n while stack:\n node = stack.pop()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n stack.extend(connected_to_functor(node))", "def topological_sort_generator(self):\n from sage.graphs.linearextensions import LinearExtensions\n try:\n return LinearExtensions(self).list()\n except TypeError:\n raise TypeError('Digraph is not acyclic; there is no topological sort (or there was an error in sage/graphs/linearextensions.py).')", "def dfs_edges_generator(graph, source, reverse=...):\n ...", "def nodes_iter(topology):\n return topology.nodes_iter()", "def nodes_iter(self) -> Generator:\n for n in self.graph.nodes(data=True):\n yield n", "def _depth_first_iterate(graph, connected_to_functors, initial_nodes_iter):\n stack = list(initial_nodes_iter)\n while stack:\n node = stack.pop()\n node_attrs = graph.node[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n stack.extend(connected_to_functor(node))", "def _forest_nodes(self):\n\n self.arbor._grow_tree(self)\n root = self.root\n for link in root._links:\n yield self.arbor._generate_tree_node(self.root, link)", "def __iter__(self):\n # set current node to front node\n current = self.front\n # while current != None\n while current:\n # send out current node's data\n yield current.data\n # move to next node\n current = current.prior", "def predecessors(self, node: Node):\n return iter(self.get_node(node_id) for node_id in node.in_nodes_ids)", "def build_node_chains(self):\n\n self.node_chain_lookup = -np.ones(self.tri.npoints, dtype=np.int)\n self.node_chain_list = []\n\n node_chain_idx = 1\n\n self.node_chain_list.append([]) # placeholder for any isolated base-level nodes\n\n for node1 in self.node_high_to_low: \n if (self.node_chain_lookup[node1] != -1): \n continue\n\n junction, this_chain = self._node_walk_downhill(node1)\n\n if len(this_chain) > 1:\n self.node_chain_list.append(this_chain)\n \n self.node_chain_lookup[this_chain[0:-1]] = node_chain_idx \n if self.node_chain_lookup[this_chain[-1]] == -1:\n self.node_chain_lookup[this_chain[-1]] = node_chain_idx\n\n node_chain_idx += 1\n\n else: \n self.node_chain_list[0].append(this_chain[0])\n self.node_chain_lookup[this_chain[0]] = 0\n\n return", "def preorder_iterator(node):\n yield node\n for child in node.children:\n yield from preorder_iterator(child)", "def gen_graph(self):", "def _prog_nodes(self):\n\n self.arbor._grow_tree(self)\n my_node = self\n while my_node is not None:\n yield my_node\n ancestors = list(my_node.ancestors)\n if ancestors:\n my_node = my_node.arbor.selector(ancestors)\n else:\n my_node = None", "def get_predecessors(self, node): \n preds = []\n child_state = self.node_to_state(node)\n for it in self.predecessors:\n parent_node = (node[0] + it[0], node[1] + it[1])\n parent_state = self.node_to_state(parent_node)\n edge = self.interpolate(child_state, parent_state, self.distance_bw_states(child_state, parent_state)/self.path_resolution)\n preds.append([parent_node, edge])\n return preds", "def node_gen(self):\n for n in self.child_list:\n yield from n.node_gen\n yield self", "def gen_nodes(self):\n self.nodes = []\n for i in range(self.num_nodes):\n self.nodes.append(Node(self.fk))", "def iteredges(self):\n for source, targets in self.successors.items():\n for target in targets:\n yield source, target", "def predecessors(self):\n predecessors = []\n for inst in self.inst.uses():\n if inst.op_name != 'OpPhi':\n predecessors.append(inst.basic_block)\n return predecessors", "def _create_rel_iterator(self) -> Iterator[GraphRelationship]:\n for downstream_key in self.downstream_deps:\n relationship = GraphRelationship(\n start_key=self.table_key,\n start_label=TableMetadata.TABLE_NODE_LABEL,\n end_label=TableMetadata.TABLE_NODE_LABEL,\n end_key=downstream_key,\n type=TableLineage.ORIGIN_DEPENDENCY_RELATION_TYPE,\n reverse_type=TableLineage.DEPENDENCY_ORIGIN_RELATION_TYPE,\n attributes={}\n )\n yield relationship", "def generate(self,state0):\n knodes = []\n state = state0.copy()\n for node in self.nodes:\n zrun = node.zrun\n ok,state,F,Q = self.model.propagate(state,zrun)\n if (not ok): \n warning(\"kfilter.generate end due to propagation at \",zrun)\n debug('kfilter.generate nodes ',len(knodes))\n return knodes\n knode = node.generate(state)\n knodes.append(knode)\n state = knode.getstate('true').copy()\n debug('kfilter.generate nodes ',len(knodes))\n return knodes", "def each_step(graph):\n\n steps = graph.topological_sort()\n steps.reverse()\n\n for step in steps:\n deps = graph.downstream(step.name)\n yield (step, deps)", "def __generator(self, inp):\n nodes_input = 1\n for i in range(len(self.arch_G)):\n nodes_output = self.arch_G[i]\n inp = fc_layer(inp, nodes_input, nodes_output, 'G_' + str(i + 1) + '_')\n nodes_input = self.arch_G[i]\n\n return fc_layer(inp, self.arch_G[-1], 1,\n 'G_end_',\n final_layer=True)", "def _anchored_predecessors(self, n):\n\n # loop on all incoming edges\n for t in self.predecessors(n):\n \n # if predecessor is anchored\n # stop looking for (necessarily earlier) predecessors\n if t.anchored:\n yield t\n continue\n \n # if neighbor is not anchored\n # look one level deeper\n for tt in self._anchored_predecessors(t):\n yield tt", "def forward_graph(self):\n raise NotImplementedError", "def bfs_edges_generator(graph, source, reverse=...):\n ...", "def pipeline_dependencies_tasks(g):\n deps = dict()\n for step_name in nx.topological_sort(g):\n deps[step_name] = list(g.predecessors(step_name)) # copy list\n return deps", "def _build_chain(G, u, v, visited):\n while v not in visited:\n yield u, v\n visited.add(v)\n u, v = v, G.nodes[v]['parent']\n yield u, v", "def __iter__(self):\n while (self.pointsleft > 0):\n current = min(self.pointsleft, self.settings.LOCALSKIPNUM)\n for i in range(current):\n self.add(self.fabric.getcoordinate())\n self.pointsleft -= self.settings.LOCALSKIPNUM\n self.pointscontroller.set(self.graph)\n yield self.graph", "def ascend(self):\n node = self.parent\n while node:\n yield node\n node = node.parent", "def preorder(self):\n if not self.is_empty():\n for p in self._subtree_preorder(self.root()): # start recursion\n yield p", "def preorder(self):\n if not self.is_empty():\n for p in self._subtree_preorder(self.root()): # start recursion\n yield p", "def get_node_predecessors(\n self, u: Hashable, include_metadata: bool = False\n ) -> Generator:\n if include_metadata:\n return {\n e[\"source\"]: e\n for e in (\n self._g.V()\n .has(ID, u)\n .inE()\n .project(\"target\", \"source\", \"properties\")\n .by(__.inV().values(ID))\n .by(__.outV().values(ID))\n .by(__.valueMap(True))\n .toList()\n )\n }\n return self._g.V().out().has(ID, u).values(ID).toList()", "def _create_rel_iterator(self) -> Iterator[GraphRelationship]:\n for downstream_key in self.downstream_deps:\n relationship = GraphRelationship(\n start_key=self.column_key,\n start_label=ColumnMetadata.COLUMN_NODE_LABEL,\n end_label=ColumnMetadata.COLUMN_NODE_LABEL,\n end_key=downstream_key,\n type=ColumnLineage.ORIGIN_DEPENDENCY_RELATION_TYPE,\n reverse_type=ColumnLineage.DEPENDENCY_ORIGIN_RELATION_TYPE,\n attributes={}\n )\n yield relationship", "def get_all_predecessor_pairs(G):\n\n all_predecessor_pairs = []\n\n for dest_id in G.nodes:\n predecessors = set()\n add_predecessors(dest_id, predecessors, G, 1, 12)\n\n # create all pairs from the predecessors to the dest node and add to the list\n for predecessor_id in predecessors:\n pair = (predecessor_id, dest_id)\n all_predecessor_pairs.append(pair)\n\n\n # shuffle all pairs in-place. It would probably be more efficient to put the higher degree pairs last,\n # because then many shorter subroutes are automatically computed when these long routes are computed first,\n # but the efficiency improvement is only minor so to keep code simple we just shuffle all pairs.\n random.shuffle(all_predecessor_pairs)\n\n return all_predecessor_pairs", "def preorder(self):\n if not self.is_empty():\n for p in self._subtree_preorder(self.root()):\n yield p", "def preorder(self):\n if not self.is_empty():\n for p in self._subtree_preorder(self.root()):\n yield p", "def preorder_print(self, start, traversal):\n return traversal", "def preorder(self):\n\n traversal = []\n self.preorder_helper(self.root, traversal)\n return traversal", "def get_predecessors(vertex, graph):\n predecessors = list()\n predecessors.extend(graph.predecessors(vertex))\n return predecessors", "def chain_graph(self) -> nx.DiGraph:\n edg_lst = [\n (f\"p{idx}\", f\"p{idx+1}\", self.a[f\"p{idx+1}\"]) for idx in range(self.n)\n ]\n chain_graph = nx.DiGraph()\n chain_graph.add_weighted_edges_from(edg_lst)\n return chain_graph", "def _topological_sort_dfs(self) -> Iterator[BaseTransaction]:\n raise NotImplementedError", "def constructPaths(graph):\n\n paths = [ [] for x in xrange(len(graph)) ] # Initialise our list\n\n for i in xrange(len(graph)): # Iterate over all nodes\n\n index = i # Will be used to repeatedly get the predecessor\n\n # Setting up the initial values\n paths[i].append(i)\n\n while True:\n\n indexOfPred = graph[index].getPredecessor() # Getting the index of the predecessor of this node\n\n if indexOfPred == -1: # If it is the source vertex, break. (Will break if the current Node doesn't have a predecessor as well)\n\n break\n\n else:\n\n paths[i].append(indexOfPred) # Add the index of the predecessor to our path\n\n index = indexOfPred # Set index to be the index of the predecessor to repeatedly get predecessors\n\n return paths", "def pre_order_nodes(root):\n yield root\n\n if root.get_left():\n for node in pre_order_nodes(root.get_left()):\n yield node\n\n if root.get_right():\n for node in pre_order_nodes(root.get_right()):\n yield node", "def traverse_arcs_topo(self, start=None, end=None, reverse=False):\n for w in self.nodes:\n w.fan = 0\n if start is None:\n start = self.start\n if end is None:\n end = self.end\n if not reverse:\n # forward topological order\n for x in self.arcs:\n x.dest.fan += 1\n # Agenda of closed arcs\n Q = start.exits[:]\n while Q:\n e = Q[0]\n del Q[0]\n yield e\n e.dest.fan -= 1\n if e.dest.fan == 0:\n if e.dest == end:\n break\n Q.extend(e.dest.exits)\n else:\n # backward topological order\n for x in self.arcs:\n x.src.fan += 1\n Q = end.entries[:]\n while Q:\n e = Q[0]\n del Q[0]\n yield e\n e.src.fan -= 1\n if e.src.fan == 0:\n if e.src == start:\n break\n Q.extend(e.src.entries)", "def nodes_in_topological_order(self):\n if not self.sorted:\n self._topological_sort()\n return self._topological_order", "def iter_nodes(self):", "def breadth_first_traverse(self) -> Generator:\n assist_queue = deque()\n assist_queue.append(self.root_node)\n while assist_queue:\n current_node = assist_queue.popleft()\n yield current_node\n if current_node.children:\n for child in current_node.children:\n assist_queue.append(child)", "def _create_connections(self):\n self.predecessors = {}\n self.successors = {}\n for nd in self.nodes:\n self.predecessors[nd.name] = []\n self.successors[nd.name] = []\n\n for (nd_out, nd_in) in self.edges:\n self.predecessors[nd_in.name].append(nd_out)\n self.successors[nd_out.name].append(nd_in)", "def vertex_generator(self):\n for V in self.Vrepresentation():\n if V.is_vertex():\n yield V", "def get_node_predecessors(\n self, u: Hashable, include_metadata: bool = False\n ) -> Generator:\n my_id = self._names.get_id(u)\n if include_metadata:\n val = {}\n for vid in self._nk_graph.iterInNeighbors(my_id):\n v = self._names.get_name(vid)\n if self.is_directed():\n val[v] = self._meta.get_edge(v, u)\n else:\n try:\n val[v] = self._meta.get_edge(u, v)\n except KeyError:\n val[v] = self._meta.get_edge(v, u)\n return val\n\n return iter(\n [self._names.get_name(i) for i in self._nk_graph.iterInNeighbors(my_id)]\n )", "def _bfs_nodes(cls, graph, source, size, **kwargs):\n if size < 1:\n return iter(())\n\n return itertools.chain(\n (source,),\n itertools.islice((v for u, v in nx.bfs_edges(graph, source)), size-1)\n )", "def traverse(self, data=True):\n nodes = sorted(self.graph.nodes(), key=lambda x: key_to_numeric(x))\n for node in nodes:\n yield (node, self.graph.node[node]) if data else node", "def chain_graph(self) -> nx.DiGraph:\n edg_lst = [\n (f\"p{idx}\", f\"p{idx+1}\", self.d[f\"p{idx+1}\"]) for idx in range(self.n)\n ]\n chain_graph = nx.DiGraph()\n chain_graph.add_weighted_edges_from(edg_lst)\n return chain_graph", "def __iter__(self):\n graph = self._execution_graph\n if self._predecessors is None:\n predecessors = set(\n node for node in graph.bfs_predecessors_iter(self._atom)\n if graph.nodes[node]['kind'] in co.ATOMS)\n self._predecessors = predecessors.copy()\n else:\n predecessors = self._predecessors.copy()\n last = self._node\n for lvl, parent in enumerate(self._node.path_iter(include_self=False)):\n if not predecessors:\n break\n last_idx = parent.index(last.item)\n try:\n visible, removals = self._level_cache[lvl]\n predecessors = predecessors - removals\n except KeyError:\n visible = []\n removals = set()\n atom_it = tr.depth_first_reverse_iterate(\n parent, start_from_idx=last_idx)\n for atom in atom_it:\n if atom in predecessors:\n predecessors.remove(atom)\n removals.add(atom)\n visible.append(atom)\n if not predecessors:\n break\n self._level_cache[lvl] = (visible, removals)\n if LOG.isEnabledFor(logging.TRACE):\n visible_names = [a.name for a in visible]\n LOG.trace(\"Scope visible to '%s' (limited by parent '%s'\"\n \" index < %s) is: %s\", self._atom,\n parent.item.name, last_idx, visible_names)\n if self._names_only:\n yield [a.name for a in visible]\n else:\n yield visible\n last = parent", "def build_topological(node, parent, neighbors, visited, stack, parents):\r\n visited[node] = True\r\n\r\n for n in neighbors[node]:\r\n if not visited[n]:\r\n build_topological(n, node, neighbors, visited, stack, parents)\r\n\r\n parents[node] = parent\r\n stack.insert(0, node)", "def breadthfirst(self):\n import os\n cwd = os.getcwd()\n os.chdir('/Users/raj/Documents/algorithms_in_python/linked_lists/')\n from linked_collections import LinkedQueue\n os.chdir(cwd) # change to cwd\n if not self.is_empty():\n lq = LinkedQueue()\n lq.enqueue(self.root())\n while not lq.is_empty():\n p = lq.dequeue()\n yield p\n for c in self.children(p):\n lq.enqueue(c)", "def createTopologicalList(self):\n sortedList = list(self.node.items())\n sortedList.sort(key=lambda item : item[1].order)\n self.topologicalList = [i[0] for i in sortedList]\n \n # Add dummy element, since topological order starts at 1.\n self.topologicalList = [utils.NO_PATH_EXISTS] + self.topologicalList", "def find_reachable_nodes_from(self, start_node, **kwargs):\r\n\t\treturn BreadthFirstTraverser(start_node, **kwargs)", "def __iter__(self):\n for (_,_,path) in self.frontierpq:\n yield path", "def _pre_order_helper(self, node):\n curr = node\n yield curr._data\n if curr._lkid:\n for node_data in self._pre_order_helper(curr._lkid):\n yield node_data\n if curr._rkid:\n for node_data in self._pre_order_helper(curr._rkid):\n yield node_data", "def nodes(self):\n for node_set in self.itervalues():\n for node in node_set:\n yield node", "def _pfs_nodes(cls, graph, source, size, priority):\n if size < 1:\n return iter(())\n\n # use min-heap to implement (max) priority queue\n # use insertion order to break priority tie\n queue = []\n counter = itertools.count()\n push = lambda priority, node: heappush(queue, (-priority, next(counter), node))\n pop = partial(heappop, queue)\n\n visited = set()\n enqueued = set()\n push(priority(source), source)\n\n while queue and len(visited) < size:\n _, _, node = pop()\n\n if node in visited:\n continue\n\n visited.add(node)\n\n for neighbor in graph[node]:\n if neighbor not in enqueued:\n enqueued.add(neighbor)\n push(priority(neighbor), neighbor)\n\n return iter(visited)", "def _get_next_gate(cls, dag: DAGCircuit, node: DAGOpNode) -> Generator[DAGOpNode, None, None]:\n for next_node in dag.successors(node):\n if not isinstance(next_node, DAGOutNode):\n yield next_node", "def findTopologicalOrder(self):\n # This implementation temporarily messes with reverse stars, must fix at end\n numOrderedNodes = 0\n while numOrderedNodes < self.numNodes:\n nextNode = self.findLeastEnteringLinks()\n if len(self.node[nextNode].reverseStar) > 0:\n print(\"Error: Network given to findTopologicalOrder contains a cycle.\")\n raise BadNetworkOperationException\n numOrderedNodes += 1\n self.node[nextNode].order = numOrderedNodes\n self.node[nextNode].reverseStar = [0] * self.numLinks\n for ij in self.node[nextNode].forwardStar:\n self.node[self.link[ij].head].reverseStar.remove(ij)\n \n # Repopulate reverse star list\n for i in self.node:\n self.node[i].reverseStar = list()\n for ij in self.link:\n self.node[self.link[ij].head].reverseStar.append(ij)", "def pre_order_traversal(self, cur_node=None):\n if cur_node is None:\n cur_node = self.root\n if cur_node is None:\n return\n visited = []\n visited.append(cur_node)\n\n while len(visited) > 0:\n cur_node = visited.pop()\n yield cur_node.data\n if cur_node.right:\n visited.append(cur_node.right)\n if cur_node.left:\n visited.append(cur_node.left)", "def breadth_first_traversal(self, start):\n visited = []\n visited.append(start)\n start_visited = visited\n while True:\n temp = []\n for node_ in start_visited:\n for i in self.neighbors(node_):\n if i not in visited:\n visited.append(i)\n temp.append(i)\n start_visited = temp\n if not temp:\n break\n return visited", "def walk_forward_iter(self) -> Iterator[Tuple[str, bool, bool]]:\n return self._create_iter(\n self._sources,\n self._sinks,\n self._forward_mapping,\n self._backward_mapping,\n )", "def sort(self):\n while self.nodes != []:\n iterated = False\n for node in self.leaf_nodes():\n iterated = True\n self.prune_node(node)\n yield node\n if not iterated:\n raise CyclicGraphError(\"Sorting has found a cyclic graph.\")", "def pre_traversal(self):\n if self.root is None:\n return None\n else:\n node_stack = list()\n output_list = list()\n node = self.root\n while node is not None or len(node_stack):\n # if node is None which means it comes from a leaf-node' right,\n # pop the stack and get it's right node.\n # continue the circulating like this\n if node is None:\n node = node_stack.pop().right\n continue\n # save the front node and go next when left node exists\n while node.left is not None:\n node_stack.append(node)\n output_list.append(node.get_element())\n node = node.left\n output_list.append(node.get_element())\n node = node.right\n return output_list", "def starting_nodes(self):\n return self.starting_nodes_ #abstract requires this exists!", "def get_forward_init(node, graph):\n\tedges = []\n\tfor e in node.edges:\n\t\tif node.label <= graph.nodes[e.to].label:\n\t\t\tedges.append(e)\n\treturn edges", "def circulant_gen(min_order, max_order):\n\n for num_vertices in range(min_order, max_order + 1):\n all_j_values = [x for x in range(1, floor(num_vertices / 2.0))]\n j_values_iter = powerset(all_j_values)\n\n # for every possible offset combination\n for j_value_set in j_values_iter:\n # get the adjacency matrix of the circulant graph\n adj = circulant_adj(j_value_set, num_vertices)\n G = nx.from_numpy_matrix(adj)\n\n if G.size() > 0 and nx.is_connected(G):\n yield (G, name_circulant(num_vertices, j_value_set))", "def dependency_order(self):\n seen = set()\n\n def _prune_visited(node):\n if node in seen:\n return True\n seen.add(node)\n return False\n\n for target in self.targets:\n if target in seen:\n continue\n for node in target.postorder(prune_fn=_prune_visited):\n yield node.data", "def breadth_first(self):\n nodes_to_vist = []\n curr = self._root\n nodes_to_vist.append(curr)\n while len(nodes_to_vist):\n curr = nodes_to_vist[0]\n if curr._lkid:\n nodes_to_vist.append(curr._lkid)\n if curr._rkid:\n nodes_to_vist.append(curr._rkid)\n yield curr._data\n nodes_to_vist.remove(curr)", "def get_deterministic_topological_ordering(nodes, links, start_node):\n graph = DiGraph()\n graph.add_nodes_from(nodes)\n for link in links:\n graph.add_edge(*link)\n\n if not is_directed_acyclic_graph(graph):\n raise NetworkXUnfeasible\n\n task_names = sorted(graph.successors(start_node))\n task_set = set(task_names)\n graph.remove_node(start_node)\n\n result = [start_node]\n while task_names:\n for name in task_names:\n if graph.in_degree(name) == 0:\n result.append(name)\n\n # it is OK to modify task_names because we break out\n # of loop below\n task_names.remove(name)\n\n new_successors = [t for t in graph.successors(name)\n if t not in task_set]\n task_names.extend(new_successors)\n task_names.sort()\n task_set.update(set(new_successors))\n\n graph.remove_node(name)\n break\n\n return result", "def nodes(topology):\n return topology.nodes()", "def nodes(self): \n return [n for n in self.iternodes()]", "def preorder(self):\n return (node for node in self.get_preorder(self.root))", "def path(self):\r\n node, p = self, []\r\n while node:\r\n p.append(node)\r\n node = node.parent\r\n yield from reversed(p)", "def topologicalSort(self):\r\n visited = [False]*self.vertices \r\n stack =[]\r\n \"\"\"\r\n using stack, problems with using code given by\r\n professor (using queues) so I'm using a new approach\r\n \"\"\"\r\n for i in range(self.vertices):\r\n \"\"\"\r\n traversing thru number of vertices, checking\r\n if false, and if is, goes to helper method\r\n \"\"\"\r\n if visited[i] == False: \r\n self.topologicalSortUtil(i,visited,stack) \r\n \r\n\r\n print(stack)", "def _subtree_preorder(self, p):\n yield p # visit p before its subtrees\n for c in self.children(p): # for each child c\n for other in self._subtree_preorder(c): # do preorder of c's subtree\n yield other # yielding each to our caller", "def successors(self, node: Node):\n return iter(self.get_node(node_id) for node_id in node.out_nodes_ids)", "def update_predecessors(graph, path):\n\n for i in range(1,len(path)):\n graph.Dictionary[graph.Keys[graph.Vertices.index(path[i])]].predecessor = copy.deepcopy(path[i-1])", "def _breadthfirst(self,root, action=lambda nodes: print(nodes)):\n nodes = []\n breadth = Queue()\n visited = []\n\n breadth.enqueue(root)\n visited.append(root)\n\n while breadth.front:\n front = breadth.dequeue()\n nodes.append(front.value)\n\n for child in self._adjacency_list.get(front.value):\n if not child.start_vertex in visited:\n visited.append(child.start_vertex)\n breadth.enqueue(child.start_vertex) \n\n return nodes", "def forwarding(predecessor, source):\n # Defining the list of nodes that will be checked\n nodes = list(predecessor.keys())\n nodes.remove(source)\n\n # Getting minimum node (initial node) and removing it from the list\n T = dict.fromkeys(nodes, [])\n\n # Looping through notes and getting the next hop node\n for n in nodes:\n nextnode = n\n while nextnode != source:\n T[n] = (source, nextnode)\n # This is presented in the from that was presented to us in the lectures\n nextnode = predecessor[nextnode][0]\n return T", "def iterative_dfs(starting_vertex, graph):\n starting_vertex.discovered = True\n starting_vertex.discovery_edge = Graph.Edge(starting_vertex, None, None) # Dummy edge\n walk = starting_vertex\n\n while walk is not None:\n has_to_go_back = True\n for edge in graph.incident_edges(walk):\n opposite = edge.opposite(walk)\n if not opposite.discovered:\n opposite.discovered = True\n opposite.discovery_edge = edge\n walk = opposite\n has_to_go_back = False\n break\n\n if has_to_go_back:\n walk = walk.discovery_edge.opposite(walk)\n\n starting_vertex.discovery_edge = None # Remove dummy edge", "def pre_order_traversal(self):\n\n elements = []\n\n ##visit base node\n elements.append(self.data)\n\n ##visit left tree\n if self.left:\n elements += self.left.pre_order_traversal()\n\n #visit right tree\n if self.right:\n elements += self.right.pre_order_traversal()\n\n return elements", "def pre_order(self, node: \"BSTNode\") -> Iterable[\"BSTNode\"]:\n if not node:\n raise StopIteration\n yield node\n if node.left:\n for _node in self.pre_order(node.left):\n yield _node\n if node.right:\n for _node in self.pre_order(node.right):\n yield _node", "def _generate_vertexes(self):\n # generate list of sets for each vms\n self._graph = [set() for _ in range(self._vm_count)]", "def generate_dag_graph(self):\n # generate ranom graph\n G = nx.DiGraph()\n G.add_nodes_from(range(self.nodes))\n return self.fix_graph(G)", "def bfs_iterative(graph,start):\n\tvisited = set()\n\twatched = set()\n\tnodes_queue = [start] # List that helps as queue\n\twatched.add(start)\n\t\n\twhile nodes_queue:\n\t\tcurrent_node = nodes_queue.pop(0)\n\n\t\tprint(\"visiting\",current_node)\n\t\tvisited.add(current_node)\n\t\t\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append(adjacent_node)\n\t\t\t\t#path.add(adjacent_node)", "def createGraph(self):\n \n for episode in self.episodes:\n listeSuccessors = [episode[episode[:,1] > episode[i,1]][:,0] # List of list of successors for each user\n for i in range(len(episode))] \n for i, successeur in enumerate(listeSuccessors): # for the list of successors of each user\n for v in successeur: # for every successor of a user\n u, proba = episode[i,0], np.random.random() # Generate a probability so within (0,1)\n self.successors[u][v] = proba # u ---(proba)---> v \n self.predecessors[v][u] = proba # v ---(proba)---> u", "def _subtree_preorder(self, p):\n yield p # visit p before its subtrees\n for c in self.children(p): # for each child c\n for other in self._subtree_preorder(c): # do preorder of c's subtree\n yield other # yielding each to our caller", "def forward(self, adj, z, n_nodes):\n x = z.repeat(n_nodes, 1)\n sequence = self.gcn(x, adj)\n\n return sequence", "def generation_next(prev_gen):\n next_gen = []\n\n # Iter through list of graphs\n for original_graph in prev_gen:\n # Select edges to nodes which are at distance 2\n select_edges = dist2_nodepairs(original_graph)\n\n # Go through the list of possible selected edges and add one\n for test_edge in select_edges:\n test_graph = original_graph.copy()\n test_graph.add_edge(*test_edge)\n if (not graph_exists(test_graph, next_gen)) \\\n and check_test_graph(test_graph):\n next_gen.append(test_graph)\n\n return next_gen", "def starting_nodes(self):\n # Level 0 nodes in a directed graph will have 1 or more out_edges but no in_edges\n nodes_with_outs = set(e[0] for e in self.G2.out_edges())\n nodes_with_ins = set(e[1] for e in self.G2.in_edges())\n return nodes_with_outs - nodes_with_ins" ]
[ "0.65818083", "0.631488", "0.60523885", "0.60450137", "0.60366327", "0.5984799", "0.59832644", "0.5886222", "0.5862201", "0.5833745", "0.57818055", "0.5777673", "0.57532954", "0.5735733", "0.5725604", "0.5718208", "0.5704789", "0.5663648", "0.5657863", "0.56538856", "0.5643337", "0.56393594", "0.56135976", "0.5611871", "0.5610509", "0.5605352", "0.5598771", "0.55880463", "0.5564255", "0.556156", "0.5538313", "0.5525677", "0.55249155", "0.55105025", "0.5496213", "0.54929256", "0.548738", "0.5473872", "0.547341", "0.547341", "0.54716563", "0.5463743", "0.5424242", "0.5415368", "0.5412457", "0.5406592", "0.5405055", "0.53937703", "0.5392991", "0.5389863", "0.53836095", "0.5376263", "0.53601134", "0.53505194", "0.5342419", "0.53404087", "0.5340196", "0.53332055", "0.5332652", "0.53295267", "0.53259575", "0.5317844", "0.53090304", "0.53082806", "0.5304514", "0.529732", "0.52911067", "0.5290635", "0.5287788", "0.52732456", "0.52632535", "0.52582055", "0.5241616", "0.5230863", "0.52301747", "0.5224597", "0.5214784", "0.5213335", "0.5211323", "0.52052575", "0.52040726", "0.5198254", "0.5194458", "0.5189158", "0.5188035", "0.51751626", "0.5172313", "0.5164829", "0.5157733", "0.51464", "0.5145669", "0.51454514", "0.5133825", "0.5127166", "0.5119344", "0.51176345", "0.5115166", "0.51146346", "0.5108802", "0.5105951" ]
0.75308436
0
Edge frontiers generator using depthfirstsearch (DFS). Multiple source nodes can be specified to start the DFS traversal. One needs to make sure that each source node belongs to different connected component, so the frontiers can be easily merged. Otherwise, the behavior is undefined.
def dfs_edges_generator(graph, source, reverse=...): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfs_nodes_generator(graph, source, reverse=...):\n ...", "def main():\n n = int(input(\"Enter the number of nodes: \"))\n m = int(input(\"Enter the number of edges: \"))\n \n adjList = [[] for i in range(n)]\n \n print(\"Enter the edges: \")\n for i in range(m):\n x, y = input().split(\" \")\n x = int(x)\n y = int(y)\n adjList[x].append(y)\n adjList[y].append(x)\n \n s = int(input(\"Enter the source: \"))\n \n DFS(adjList, s, n)", "def iterative_dfs(starting_vertex, graph):\n starting_vertex.discovered = True\n starting_vertex.discovery_edge = Graph.Edge(starting_vertex, None, None) # Dummy edge\n walk = starting_vertex\n\n while walk is not None:\n has_to_go_back = True\n for edge in graph.incident_edges(walk):\n opposite = edge.opposite(walk)\n if not opposite.discovered:\n opposite.discovered = True\n opposite.discovery_edge = edge\n walk = opposite\n has_to_go_back = False\n break\n\n if has_to_go_back:\n walk = walk.discovery_edge.opposite(walk)\n\n starting_vertex.discovery_edge = None # Remove dummy edge", "def dft_recursive(self, starting_vertex, visited=None):\n if visited is None: # if visited is None\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n if starting_vertex not in visited: # if starting_vertex has not been visited yet\n print(starting_vertex)\n visited.add(starting_vertex) # add to the set \n\n for neighbor in self.vertices[starting_vertex]: # loop through each neighbor \n self.dft_recursive(neighbor, visited) # call the dft_recursive method on each neighbor ", "def DFS(self, start_vertex):\n yield from self._search(start_vertex, kind='DFS')", "def bfs_edges_generator(graph, source, reverse=...):\n ...", "def DFS(self, start_vertex, verbose=True):\n if start_vertex is None:\n return None\n traversal = []\n visited = set()\n for vertex in self.vertices():\n if vertex not in visited:\n self._DFS(vertex, visited, traversal.append)\n if verbose:\n print('DFS(Graph) =', traversal)\n return traversal", "def dfs_recursive(self, starting_vertex, destination_vertex):\n pass # TODO", "def dfs_recursive(self, starting_vertex, destination_vertex):\n pass # TODO", "def dfs(self, starting_vertex, destination_vertex):\n # create an empty stack \n stack = Stack()\n #push the starting vertex ID as list\n stack.push([starting_vertex])\n # create an empty Set to store the visited vertices\n visited = set()\n # while the stack is not empty ...\n while stack.size() > 0:\n # pop the first vertex\n path = stack.pop()\n vert = path[-1]\n # if that vertex has not been visited ..\n if vert not in visited:\n #check for target\n if vert == destination_vertex:\n return path\n # mark it is visited\n visited.add(vert)\n # then add all of its neighbors to the top of the stack\n for neighbor in self.vertices[vert]: #self.get_neighbors(vert)\n #copy path to avoid pass by reference\n new_path = list(path) # make a copy\n new_path.append(neighbor)\n stack.push(new_path)", "def dfs(self, starting_vertex, destination_vertex):\n pass # TODO", "def dfs(self, starting_vertex, destination_vertex):\n \"\"\" LIFO\n Create a stack\n Create a set to store visited\n PUSH starting vertex into an array (STACK)\n While the STACK is NOT empty \n get((pop) first PATH vertex\n get Vertex from END of PATH\n check if NOT visited\n mark as visited\n check if vertex is destination_vertex\n If TRUE, return path \n PUSH path to ALL of neighbors\n make copy of current path\n add neighbor to path copy\n PUSH path copy\n \"\"\" \n s = Stack() # Create a stack\n s.push([starting_vertex]) # PUSH starting vertex into an array (STACK)\n visited = set() # Create a set to store visited\n\n while s.size() > 0: # While the STACK is NOT empty\n path = s.pop() # get(pop) first PATH vertex)\n v = path[-1] # get Vertex from END of PATH \n\n while v not in visited: # check if NOT visited\n visited.add(v) # mark as visited\n\n if v == destination_vertex: # check if vertex is destination_vertex\n return path # If TRUE, return path \n\n for n in self.get_neighbors(v): # PUSH path to ALL of neighbors\n path_c = path[:] # make copy of current path\n # path_c.extend([n]) # add neighbor to path copy\n path_c.append(n) # add neighbor to path copy\n s.push(path_c) # PUSH path copy", "def _bfs_nodes(cls, graph, source, size, **kwargs):\n if size < 1:\n return iter(())\n\n return itertools.chain(\n (source,),\n itertools.islice((v for u, v in nx.bfs_edges(graph, source)), size-1)\n )", "def dfs(self, starting_vertex, destination_vertex):\n # TODO", "def dft(self, starting_vertex):\n \"\"\" LIFO\n Create a stack \n Push starting Vertex\n Create a set to store visited\n While the stack is NOT empty: e.g. > 0\n Pop the last added Vertex\n Check IF NOT visited:\n Mark as visited\n\n\n Push ALL of neighbors\n \"\"\"\n s = Stack() # Create a stack\n s.push(starting_vertex) # Push starting Vertex\n visited = set() # Create a set to store visited\n\n while s.size() > 0: # While the stack is NOT empty: e.g. > 0\n v = s.pop() # Pop the last added Vertex\n\n if v not in visited: # Check IF NOT visited: e.g. > 0\n print(v)\n visited.add(v) # Mark as visited\n\n for n in self.get_neighbors(v): # Check IF NOT visited:\n s.push(n) # Push ALL of neighbors ", "def dft_recursive(self, starting_vertex, visited = None):\n \"\"\"\n Check if Vertex is in visited\n if NOT visited, add to visited set\n Call dft_recursive on every neighbor \n \n\n \"\"\"\n # 1) base case >> where to stop recursion\n # 2) calls itself from within\n # 3) each iteration approaches base case\n\n # 1) base case >> where to stop recursion\n\n # init a set that persists after recursions loops to save visited\n if visited == None:\n visited = set()\n\n if starting_vertex not in visited: # 1) & 3) Check if vertex has NOT been visited\n visited.add(starting_vertex) # if True, add to visited set\n\n print(starting_vertex)\n\n # perform recursion on neighbor\n for n in self.get_neighbors(starting_vertex):\n self.dft_recursive(n, visited) # 2) ", "def dft_recursive(self, starting_vertex, visited=None):\n \n # for vertex in self.get_neighbors(starting_vertex):\n # if vertex not in visited:\n # visited.add(vertex)\n # self.dft_recursive(vertex, visited)\n # return visited\n if visited == None:\n visited = set()\n print(starting_vertex)\n visited.add(starting_vertex)\n for v in self.get_neighbors(starting_vertex):\n if v not in visited:\n self.dft_recursive(v, visited)", "def dft_recursive(self, starting_vertex, visited=None):\n if visited is None:\n visited = set()\n visited.add(starting_vertex)\n print(starting_vertex)\n for neighb_vert in self.vertices[starting_vertex]:\n if neighb_vert not in visited:\n self.dft_recursive(neighb_vert, visited)", "def BreadthFirstSearch(graph, source):\r\n \r\n # Dictionary dataInfo will be used to store the information about each vertex. (Ancestors, descendants, distance from source, and color)\r\n dataInfo = {} \r\n \r\n # List queue will be used to store the vertices currently in the queue, these vertices will all be gray.\r\n queue = []\r\n \r\n # Loops through the vertices in the graph, creates a key in the dictionary for each vertice, with default values.\r\n for vertex in graph[\"V\"]:\r\n dataInfo[str(vertex)] = {\"ancestor\": \"\", \"descendants\": [], \"distance\": \"\", \"color\": \"white\"}\r\n \r\n # At key source (variable) in dataInfo dictionary, key ancestor is set to have no value other than \"NA\" (as it is the starting point), and distance to 0 (as it will always be zero as it is the source).\r\n dataInfo[str(source)][\"ancestor\"] = \"NA\"\r\n dataInfo[str(source)][\"distance\"] = 0\r\n\r\n def symmetricVertex(edge, otherVertex):\r\n \r\n \"\"\"\r\n Function symmetricVertex takes arguments edge, a list of an edge from the graph dictionary, and otherVertex, an integer that is the other vertex in the edge with the sourceVertex. The function will return the point other than the otherVertex, and will be used to find adjacent vertices relative to the current vertex in the queue. Example: edge ([1, 2]), otherVertex (1), the function will return 2.\r\n \"\"\"\r\n \r\n for num in edge:\r\n if num != otherVertex:\r\n return num\r\n \r\n\r\n def pathFinder(graph, sourceVertex):\r\n \r\n \"\"\"\r\n Function pathFinder takes arguments graph, a dictionary, with the same keys for the edges and the vertices and sourceVertex, an integer. The function will loop through all of the edges in the graph and find adjacent vertices relative to the current sourceVertex. sourceVertex values will be in the queue. The function will edit dictionaries and lists, not return any value.\r\n \"\"\"\r\n \r\n # List removeEdges will be used to store the edges that will be removed from the graph dictionary after the loop ends. Makes the code more efficient, as you don't want to loop through a million vertices every time, now do you?\r\n removeEdges = []\r\n \r\n # Loop through edges in the graph, will be used to find adjacent vertices.\r\n for edge in graph[\"E\"]:\r\n \r\n # If the sourceVertex is in the edge and the edge is not discovered yet, then edit and change values in the main dictionary, dataInfo.\r\n if (sourceVertex in edge) and (dataInfo[str(symmetricVertex(edge, sourceVertex))] != \"gray\"):\r\n otherVertex = symmetricVertex(edge, sourceVertex)\r\n \r\n # Adds variable otherVertex to the descendants of the sourceVertex.\r\n dataInfo[str(sourceVertex)][\"descendants\"].append(otherVertex)\r\n \r\n # Updates key(otherVertex) to correct values. Ancestor is always the sourceVertex, the distance is always the distance of sourceVertex incremented by one, and the color is updated to gray as it is added to the queue.\r\n dataInfo[str(otherVertex)] = {\"ancestor\": sourceVertex, \"descendants\": [], \"distance\": (dataInfo[str(sourceVertex)][\"distance\"] + 1), \"color\": \"gray\"}\r\n \r\n # Edge includes two discovered edges, so it will be removed to stop redundancy. It is added to the removeEdges list.\r\n removeEdges.append(edge)\r\n \r\n # Appends the discovered vertex to the queue.\r\n queue.append(otherVertex)\r\n \r\n # After the loop ends, the edges that contain the source vertex have been exhausted, so the color is updated to black.\r\n dataInfo[str(sourceVertex)][\"color\"] = \"black\" \r\n \r\n # If the sourceVertex is in the queue, it is removed, as all of the edges containing it have been exhausted.\r\n if sourceVertex in queue:\r\n queue.remove(sourceVertex)\r\n \r\n # Loop through the edges in the removeEdges list, each edge will be removed.\r\n for edge in removeEdges:\r\n graph[\"E\"].remove(edge)\r\n \r\n # The function pathFinder is called on the graph and the source vertex, which sets up the queue.\r\n pathFinder(graph, source)\r\n \r\n # While the list queue contains values, the pathFinder function is called on the graph, and the queue value at index 0.\r\n while len(queue) != 0:\r\n pathFinder(graph, queue[0])\r\n \r\n # Loop below is for formatting of the data, makes it easier to read.\r\n for key in dataInfo:\r\n print \"Vertex: \" + key + \", Distance: \" + str(dataInfo[key][\"distance\"]) + \", Ancestor: \" + str(dataInfo[key][\"ancestor\"]) + \", Descendants: \" + str(dataInfo[key][\"descendants\"]) + \", Color: \" + str(dataInfo[key][\"color\"]) + \".\" \r\n \r\n # Returns dictionary dataInfo.\r\n return dataInfo", "def dft(self, starting_vertex):\n # Create a s and push starting vertex\n ss = Stack()\n ss.push([starting_vertex])\n # Create a set of traversed vertices\n visited = []\n eldest = [] \n # While stack is not empty:\n while ss.size() > 0:\n # dequeue/pop the first vertex\n path = ss.pop()\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n # print(path[-1])\n # mark as visited\n visited.append(path[-1])\n print(visited)\n # enqueue all neightbors\n if not self.get_neighbors(path[-1]):\n if starting_vertex == path[-1]:\n return -1\n else:\n # print(\"eldest ancestor:\",path[-1])\n eldest.append(path[-1])\n\n for next_vert in self.get_neighbors(path[-1]):\n new_path = list(path)\n # print(new_path)\n new_path.append(next_vert)\n ss.push(new_path)\n \n return min(eldest)", "def dft_recursive(self, starting_vertex, visited=None):\n # First, we set our initial condition\n if visited is None:\n # If no nodes have been visited, we create a set to store the nodes we visit\n visited = set()\n\n # Then we add the starting vertex to the visited set\n visited.add(starting_vertex)\n print(starting_vertex)\n\n # Call the function recursively on neighbors not visited\n # Lastly we write a for loop that will recursively call dft_recursive()\n for neighbor in self.vertices[starting_vertex]:\n # For each vertex, we check to see if any of the neighbors have already been visited\n if neighbor not in visited:\n # And if we find a neighbor that has not been visited, we recursively call dft_recursive() and pass it the neighbor and updated visited set\n self.dft_recursive(neighbor, visited)", "def _dfs_cycle_forest(G, root=None):\n # Create a directed graph from the depth-first search tree with\n # root node `root` in which tree edges are directed toward the\n # root and nontree edges are directed away from the root. For\n # each node with an incident nontree edge, this creates a\n # directed cycle starting with the nontree edge and returning to\n # that node.\n #\n # The `parent` node attribute stores the parent of each node in\n # the DFS tree. The `nontree` edge attribute indicates whether\n # the edge is a tree edge or a nontree edge.\n #\n # We also store the order of the nodes found in the depth-first\n # search in the `nodes` list.\n H = nx.DiGraph()\n nodes = []\n for u, v, d in nx.dfs_labeled_edges(G, source=root):\n if d == 'forward':\n # `dfs_labeled_edges()` yields (root, root, 'forward')\n # if it is beginning the search on a new connected\n # component.\n if u == v:\n H.add_node(v, parent=None)\n nodes.append(v)\n else:\n H.add_node(v, parent=u)\n H.add_edge(v, u, nontree=False)\n nodes.append(v)\n # `dfs_labeled_edges` considers nontree edges in both\n # orientations, so we need to not add the edge if it its\n # other orientation has been added.\n elif d == 'nontree' and v not in H[u]:\n H.add_edge(v, u, nontree=True)\n else:\n # Do nothing on 'reverse' edges; we only care about\n # forward and nontree edges.\n pass\n return H, nodes", "def depth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n stack = list(initial_nodes_iter)\n while stack:\n node = stack.pop()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n stack.extend(connected_to_functor(node))", "def dfs_recursive(self, starting_vertex, destination_vertex, visited=None, path=None):\n if visited is None: # if visited is not empty\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n if path is None: # if the path is empty \n path = [] # create an empty list \n visited.add(starting_vertex) # add the starting_vertex to the set \n path = path + [starting_vertex] # set the path \n\n if starting_vertex == destination_vertex: # if the starting_vertex is equal to the destination_vertex\n return path # return the path \n\n for neighbor in self.vertices[starting_vertex]: # loop through neighbors \n if neighbor not in visited: # if the neighbor has not been visited \n new_path = self.dfs_recursive(neighbor, destination_vertex, visited, path) # create a new path using the dfs_recursive method\n\n if new_path: # if there is a new_path \n return new_path # return the new path \n\n return None # return None ", "def dfs(visited: list, graph: AdjList, node: int):\n if node not in visited:\n visited.append(node)\n for neighbour in graph[node]:\n dfs(visited, graph, neighbour)", "def dfs(starting_vertex):\n s = Stack()\n\n s.push([starting_vertex])\n\n while s.size() > 0:\n p = s.pop()\n l = p[-1]\n\n if l not in new_visited_rooms:\n return p\n neighbors = set(get_neighbors(l))\n \n for n in neighbors:\n new_path = p.copy()\n new_path.append(n)\n s.push(new_path)", "def dfs(self, starting_vertex, destination_vertex):\n # This solution takes a slightly different approach as to how we are storing the path\n # Now, we always queue up the next vertex we want to see, and a list of all the vertices we looked at to get here\n # so if we are queueing up vertex 3 from our example, the tuple we create will be (3, [1,2])\n # because we had to go through 1 and 2 to get here\n neighbors_to_visit = Stack()\n visited = set()\n # add the first vertex, and an empty list indicating that we have not been to any other vertices yet\n neighbors_to_visit.push([starting_vertex])\n # loop through the stack\n while neighbors_to_visit.size() > 0:\n path = neighbors_to_visit.pop()\n # pull out the current vertex so its easier to read\n vertex = path[-1] # last one in the path is our current vertex\n # if the vertex is the destination return it plus the path we took to get here\n if vertex == destination_vertex:\n return path\n # make sure the vertex isnt something we have seen already\n if vertex not in visited:\n # mark the vertex as visited\n visited.add(vertex)\n # add neighbors to the stack\n for neighbor in self.get_neighbors(vertex):\n new_path = path\n new_path.append(neighbor)\n neighbors_to_visit.push(new_path)", "def dfs(get_neighbors, source, target):\n\n parents = {}\n visited = set()\n stack = collections.deque()\n stack.append(source)\n while stack:\n vertex = stack.pop()\n if vertex == target:\n return _backtrack(target, lambda v: parents.get(v))\n visited.add(vertex)\n neighbors = [n for n in get_neighbors(vertex) if n not in visited]\n if neighbors:\n stack.append(vertex)\n stack.append(neighbors[0])\n parents[neighbors[0]] = vertex\n return []", "def dft(self, starting_vertex):\n \n visited = []\n stack = Stack()\n\n stack.add(starting_vertex)\n\n while len(stack):\n current = stack.pop()\n\n if current not in visited:\n print(current)\n visited.append(current)\n \n for child in self.vertices[current]:\n if child not in visited:\n stack.add(child)", "def directed_dfs(self,\n node_or_name: Union[str, Node],\n stop_at: Optional[Set[Node]] = None,\n go_up: bool = False,\n yield_start_node=False,\n visited=None):\n node = resolve_node_or_str(node_or_name, G=self)\n if visited is None:\n visited = {node}\n started = False\n if stop_at is None:\n stop_at = {}\n else:\n started = True\n if node in stop_at:\n return\n if started or yield_start_node:\n yield node\n if not go_up:\n for edge in self.out_edges(node.name):\n if edge.to_node in visited:\n continue\n visited.add(edge.to_node)\n yield from self.directed_dfs(edge.to_node, stop_at=stop_at, go_up=go_up, visited=visited)\n if go_up:\n for edge in self.in_edges(node.name):\n if edge.from_node in visited:\n continue\n visited.add(edge.from_node)\n yield from self.directed_dfs(edge.from_node, stop_at=stop_at, go_up=go_up, visited=visited)", "def dft(self, starting_vertex):\n # create an empty stack and push the starting vertex ID\n stack = Stack()\n stack.push(starting_vertex)\n # create an empty Set to store the visited vertices\n visited = set()\n # while the stack is not empty ...\n while stack.size() > 0:\n # pop the first vertex\n vert = stack.pop()\n # if that vertex has not been visited ..\n if vert not in visited:\n # mark it is visited\n visited.add(vert)\n print(vert)\n # then add all of its neighbors to the top of the stack\n for neighbor in self.vertices[vert]: #self.get_neighbors(vert)\n stack.push(neighbor)", "def dfs(self, starting_vertex, destination_vertex): # great for if you know the start and end, like a maze with 1 entry/1 exit\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n s = Stack() # create an empty Stack\n s.push([starting_vertex]) # push the starting vertex to the top of the stack \n\n while s.size() > 0: # loop if the size is greater than 0\n path = s.pop() # pop off the top element of the stack and store \n v = path[-1] # store the vertex from the end of path\n\n if v == destination_vertex: # if v is equal to the destination_vertex\n return path # return the path \n if v not in visited: # if v has not been visited yet \n visited.add(v) # add v to the vistied set \n\n for neighbor in self.vertices[v]: # loop through the neighbors\n path_copy = list(path) # make a copy of the path \n path_copy.append(neighbor) # append each neighbor to the back of the path copy \n s.push(path_copy) # push the path copy to the Stack", "def bfs_iterative(graph,start):\n\tvisited = set()\n\twatched = set()\n\tnodes_queue = [start] # List that helps as queue\n\twatched.add(start)\n\t\n\twhile nodes_queue:\n\t\tcurrent_node = nodes_queue.pop(0)\n\n\t\tprint(\"visiting\",current_node)\n\t\tvisited.add(current_node)\n\t\t\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append(adjacent_node)\n\t\t\t\t#path.add(adjacent_node)", "def _pfs_nodes(cls, graph, source, size, priority):\n if size < 1:\n return iter(())\n\n # use min-heap to implement (max) priority queue\n # use insertion order to break priority tie\n queue = []\n counter = itertools.count()\n push = lambda priority, node: heappush(queue, (-priority, next(counter), node))\n pop = partial(heappop, queue)\n\n visited = set()\n enqueued = set()\n push(priority(source), source)\n\n while queue and len(visited) < size:\n _, _, node = pop()\n\n if node in visited:\n continue\n\n visited.add(node)\n\n for neighbor in graph[node]:\n if neighbor not in enqueued:\n enqueued.add(neighbor)\n push(priority(neighbor), neighbor)\n\n return iter(visited)", "def dft_recursive(self, starting_vertex):\n \n visited = []\n\n def helper(vert, visited):\n visited.append(vert)\n print(vert)\n\n for child in self.vertices[vert]:\n if child not in visited:\n helper(child, visited)\n\n helper(starting_vertex, visited)", "def dfs_labeled_edges_generator(graph, source, reverse=..., has_reverse_edge=..., has_nontree_edge=..., return_labels=...): # -> tuple[Unknown, Unknown]:\n ...", "def dfs_iter(graph, start):\n # vkladam uzel a index potencialniho naslednika, kterym mam pokracovat\n stack = [(start, 0)]\n time = 1\n graph.discovery_time[start] = time\n graph.visited[start] = True\n\n while stack: # not empty\n u, v = stack.pop()\n\n while v < graph.size and not is_edge(graph, u, v):\n v += 1\n\n if v < graph.size:\n # found successor, u is not yet finished\n stack.append((u, v + 1))\n\n if not graph.visited[v]:\n # we have discovered v\n stack.append((v, 0))\n graph.parent[v] = u\n graph.visited[v] = True\n time += 1\n graph.discovery_time[v] = time\n else:\n # u has no more successors\n time += 1\n graph.finishing_time[u] = time", "def dfs_loop(graph_dict, nodes, track):\n\n for node in nodes:\n if node not in track.explored:\n track.current_source = node\n dfs(graph_dict, node, track)", "def depth_first_traversal(self, visitor_function=None):\n self._reset_traversal_state()\n self.time = 0\n\n result = False\n\n for n in self.nodes.values():\n if NodeColor.WHITE == n.color:\n stack = collections.deque()\n stack.append(n)\n\n while len(stack) > 0:\n node = stack.pop()\n\n if NodeColor.WHITE == node.color:\n # Need to stay on the stack until we're done exploring things connected to this node\n stack.append(node)\n\n self.time += 1\n node.discovery_time = self.time\n self._visit_enter(node, visitor_function)\n node.color = NodeColor.GRAY\n\n for descendant in self.edges[node]:\n self.logger.debug(\n 'Looking at [{}] -> [{} / {}]'.format(node.name, descendant.name, descendant.color))\n if NodeColor.WHITE == descendant.color:\n descendant.predecessor = node\n stack.append(descendant)\n elif NodeColor.GRAY == descendant.color:\n self.logger.debug(\n 'Found cycle involving edge [{}] -> [{}]'.format(node.name, descendant.name))\n result = True\n\n elif NodeColor.GRAY == node.color:\n self.time += 1\n node.color = NodeColor.BLACK\n node.finishing_time = self.time\n self._visit_exit(node, visitor_function)\n\n\n elif NodeColor.GRAY == n.color:\n self.logger.info('Found cycle involving node [{}]'.format(n.name))\n result = True\n\n return result", "def dfs_recursive(self, starting_vertex, target_vertex, visited=None, path=None):\n # if starting_vertex in visited:\n # return None\n # if starting_vertex == destination_vertex:\n # return [starting_vertex]\n # visited.add(starting_vertex)\n # for vertex in self.get_neighbors(starting_vertex):\n # path = self.dfs_recursive(vertex, destination_vertex, visited)\n # if path is not None: \n # return [starting_vertex] + path\n if visited == None:\n visited = set()\n if path == None:\n path = []\n visited.add(starting_vertex)\n path = path + [starting_vertex]\n if starting_vertex == target_vertex:\n return path\n for neighbor in self.get_neighbors(starting_vertex):\n if neighbor not in visited:\n new_path = self.dfs_recursive(neighbor, target_vertex, visited, path)\n if new_path is not None: \n return new_path\n return None", "def recursive_dfs(self, start, end, visited=None, path=None):\n # initialize path list with starting vertice\n if path is None:\n path = [start]\n\n # initialize empty set for visited vertices\n if visited is None:\n visited = set()\n \n # add starting vertice to visited vertices\n visited.add(start)\n\n # store all neighbors of start vertice in set\n neighbors = set([x for x in start.get_neighbors()])\n \n # initialize loop count as 0\n loop_count = 0\n\n # iterate through vertices\n for next in neighbors - visited:\n # remove visited path if it results in a dead end (incomplete)\n if loop_count > 0:\n path.pop()\n\n # add to list of visited vertices\n path.append(next)\n\n # full path completed\n if next == end:\n visited.add(next)\n ordered_vertice_path = ([(x.id) for x in path])\n print(\"There exists a path between vertex %s and %s: TRUE\" %(sys.argv[2], sys.argv[3]))\n print(\"Vertices in the path:\", ','.join(ordered_vertice_path))\n return True\n\n loop_count += 1\n\n self.recursive_dfs(next, end, visited, path)", "def dft(self, starting_vertex):\n \"\"\"\n Loop until the stack is empty. Remove the last added\n vertex and store it's value as the current vertex. \n Print the current vertex then loop over all it's edges.\n Add them to the stack and the cache.\n \"\"\" \n stack = [starting_vertex]\n stacked = {starting_vertex}\n while len(stack) > 0:\n currentVertex = stack.pop(-1)\n print(currentVertex)\n for edge in self.get_neighbors(currentVertex):\n if edge not in stacked:\n stack.append(edge)\n stacked.add(edge)", "def dfs_recursive(self, starting_vertex, destination_vertex, visited = None, path = None):\n # 1) base case >> where to stop recursion\n # 2) calls itself from within\n # 3) each iteration approaches base case\n\n # init a set that persists after recursions loops to save visited set & path list\n if visited == None:\n visited = set()\n\n if path == None:\n path = []\n\n if starting_vertex not in visited: # 1) & 3) Check if vertex has NOT been visited\n visited.add(starting_vertex) # if True, add to visited set\n path_c = path[:] # copy path\n path_c.append(starting_vertex)\n\n if starting_vertex == destination_vertex: # check if vertex is destination_vertex\n return path_c # If TRUE, return copied path \n\n # perform recursion on neighbor\n for n in self.get_neighbors(starting_vertex):\n new_path = self.dfs_recursive(n, destination_vertex, visited, path_c) # 2\n\n if new_path is not None:\n return new_path\n\n return None", "def DFSUtility(obj,vertex,visited,subGraph):\n visited[vertex] = True\n subGraph.append(vertex)\n for nxtVertex in obj.adjList[vertex]:\n if visited[nxtVertex]:\n continue\n DFSUtility(obj,nxtVertex,visited,subGraph)", "def bfs(self, starting_vertex, destination_vertex):\n # create an empty queue and enqueue A-PATH-TO the starting vertex ID\n # create a Set to store the visited vertices\n # while the queue is not empty ..\n # dequeue the first PATH\n # grab the last vertex from the PATH\n # if that vertex has not been visited ..\n # check if its the target\n #if yes, return path\n #mark it as visited\n # add A PATH TO its neighbots to the back of the queue\n # copt the path\n # append the neighbor to the back\n \n \n # create an empty Queue \n queue = Queue()\n #push the starting vertex ID as list\n queue.enqueue([starting_vertex])\n # create an empty Set to store the visited vertices\n visited = set()\n # while the queue is not empty ...\n while queue.size() > 0:\n # dequeue the first vertex\n path = queue.dequeue()\n vert = path[-1]\n # if that vertex has not been visited ..\n if vert not in visited:\n #check for target\n if vert == destination_vertex:\n return path\n # mark it is visited\n visited.add(vert)\n # then add all of its neighbors to the top of the stack\n for neighbor in self.vertices[vert]: #self.get_neighbors(vert)\n #copy path to avoid pass by reference\n new_path = list(path) # make a copy\n new_path.append(neighbor)\n queue.enqueue(new_path)", "def dfs_inorder_iter(graph, start_node):\n nonlocal t\n\n if visited[start_node]:\n return\n\n seen_once = {}\n nodes_seen = 0\n stack = [start_node]\n nodes_in_stack = set(stack)\n\n while stack:\n node = stack.pop()\n nodes_in_stack.remove(node)\n if not seen_once.get(node):\n # It's our first time visiting the node,\n # so put it back on the stack; we won't take\n # it off permanently until we're backtracking\n stack.append(node)\n nodes_in_stack.add(node)\n seen_once[node] = True\n for neighbor_node in graph[node]:\n if (not visited[neighbor_node]\n and not seen_once.get(neighbor_node)\n and neighbor_node not in nodes_in_stack):\n stack.append(neighbor_node)\n nodes_in_stack.add(neighbor_node)\n else:\n # We're backtracking\n visited[node] = True\n finishing_times[t] = node\n t += 1\n sccs[s] += 1", "def topological_nodes_generator(graph, reverse=...):\n ...", "def dft(self, starting_vertex):\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n s = Stack() # create an empty Stack \n s.push(starting_vertex) # push the starting_vertex to the top of the stack\n\n while s.size() > 0: # loop if the size is greater than 0\n v = s.pop() # pop off first element and store \n\n if v not in visited: # if v has not been visited yet\n visited.add(v) # add to the set \n print(v)\n for neighbor in self.vertices[v]: # loop through neighbors \n s.push(neighbor) # add each neighbor to the bottom of the stack", "def dfs( self ):\n\n #print self.state; \n #print self.visited;\n SearchProblem.stateVisited= SearchProblem.stateVisited+1 \n \n if self.stop: # check class variable and stop searching...\n return;\n\n for action in self.edges(): # consider each edge leading out of this node\n\n action.destination.path = self.path + str(action.label); \n # get the label associated with the\n # action and append it to the path\n # string\n\n action.destination.visited = self.visited.copy();\n # make copy of source node's visited set\n # and use it as destination node's\n # visited set\n\n action.destination.visited.add( repr(action.destination.state) );\n\n if action.destination.is_target(): \n # check if destination of edge is target node\n action.destination.target_found(); # perform target found action\n if not self.continue_search(): # stop searching if not required\n SearchProblem.stop = True; # set class variable to record that we\n break; # are done\n\n if repr(action.destination.state) in self.visited:\n continue; # skip if we've visited this one before\n\n action.destination.dfs(); # resume recursive search ", "def breadth_first_traversal(self, start_node, visitor_function=None, max_depth=None):\n self._reset_traversal_state()\n\n if isinstance(start_node, str):\n start_node = self.nodes[start_node]\n\n if not isinstance(start_node, ProcessNode):\n raise TypeError('Expect start_node to either be a string or a ProcessNode. Got [{}] instead'.format(\n str(type(start_node))))\n\n start_node.discovery_time = 1\n queue = collections.deque()\n queue.appendleft(start_node)\n\n while len(queue) > 0:\n node = queue.pop()\n assert NodeColor.WHITE == node.color\n\n if node.predecessor is not None:\n node.discovery_time = node.predecessor.discovery_time + 1\n\n self._visit_enter(node, visitor_function)\n\n node.color = NodeColor.GRAY\n\n if max_depth is None or node.discovery_time + 1 < max_depth:\n for descendant in self.edges[node]:\n if NodeColor.WHITE == descendant:\n descendant.predecessor = node\n queue.appendleft(descendant)\n\n node.finishing_time = self.time\n node.color = NodeColor.BLACK\n\n self._visit_exit(node, visitor_function)", "def dfs(self, starting_vertex, destination_vertex):\n # First, we create an empty stack and push the starting vertex onto the stack\n ss = Stack()\n ss.push([starting_vertex])\n\n # Then we create a set to store the vertices we visit\n visited = set()\n\n # We write a while loop that will run as long as the stack is not empty\n while ss.size() > 0:\n # We pop the node off the top of the stack and set (v) to it\n v = ss.pop()\n\n # Next we check to see if the last vertex has already been visited\n if v[-1] not in visited:\n # If it hasn't been visited, we check to see if it is the node we're looking\n if v[-1] == destination_vertex:\n # If it is, we return the list of nodes we followed to arrive at said destination\n return v\n\n # If it's not the node we're looking for, we mark it as visited\n visited.add(v[-1])\n\n # Lastly, we write a for loop to loop through the neighbors of the vertex we're looking at\n for next_vert in self.get_neighbors(v[-1]):\n # For each neighbor, we create a copy of the current path and append the neighbor, allowing us to create multiple paths forward depending on the number of neighbors a vertex has\n new_path = list(v)\n new_path.append(next_vert)\n\n # Then we push to the stack the path to the next neighbor\n ss.push(new_path)", "def breadth_first_traversal(self, start):\n visited = []\n visited.append(start)\n start_visited = visited\n while True:\n temp = []\n for node_ in start_visited:\n for i in self.neighbors(node_):\n if i not in visited:\n visited.append(i)\n temp.append(i)\n start_visited = temp\n if not temp:\n break\n return visited", "def dfs_recursive(self, starting_vertex, destination_vertex, cache = None):\n \"\"\"\n Make starting_vertex a list if it isn't one already. \n Create a cache set if one isn't passed in. Check if \n the last element in starting vertex is the destination. Check\n if the last element in the path is in the cache. If it's not \n add all of that vertex's edges to new lists to run new \n recursions.\n \"\"\"\n if not cache:\n cache = set()\n if not type(starting_vertex) is list:\n starting_vertex = [starting_vertex]\n\n currentVertex = starting_vertex[-1]\n\n if currentVertex == destination_vertex:\n return starting_vertex\n\n if currentVertex not in cache:\n cache.add(currentVertex)\n\n for edge in self.get_neighbors(currentVertex):\n newPath = starting_vertex.copy()\n newPath.append(edge)\n result = self.dfs_recursive(newPath, destination_vertex, cache)\n if result:\n return result\n\n else:\n return None", "def dfs_recursive(self, starting_vertex, destination_vertex, visited=None, path=None):\n\n if not visited: visited=[]\n if not path: path=[]\n\n visited.append(starting_vertex)\n path = path + [starting_vertex]\n\n if starting_vertex == destination_vertex: return path\n\n for neighbor in self.get_neighbors(starting_vertex):\n if neighbor not in visited:\n new_path = self.dfs_recursive(neighbor, destination_vertex, visited, path)\n if new_path is not None:\n return new_path\n\n return None", "def breadth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n q = collections.deque(initial_nodes_iter)\n while q:\n node = q.popleft()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n q.extend(connected_to_functor(node))", "def dfs(self, starting_vertex, destination_vertex):\n\n parents = {}\n\n for index, (p, c) in enumerate(self.vertices.items()):\n for child in c:\n if child not in parents:\n parents[child] = []\n parents[child].append(p)\n\n path = []\n current = destination_vertex\n path.append(destination_vertex)\n\n while len(parents[current]):\n parent = parents[current][0]\n path.append(parent)\n if parent == starting_vertex:\n break\n current = parent\n\n path.reverse()\n return path", "def DFS(obj,vertex,visited=dict()):\n validateVertex(vertex,obj.vertexList)\n #order = []\n #visited = dict()\n subGraph= []\n for ver in obj.vertexList:\n visited[ver] = False\n\n DFSUtility(obj,vertex,visited,subGraph)\n return subGraph", "def dfs(self, s):\n g = Graph(attr={DIRECTED: True})\n adjacent_type = '+' if DIRECTED in self.attr and self.attr[\n DIRECTED] else None\n # Insert s root node in stack \n stack = collections.deque()\n # Initial node does not have origin, it is represented by # \n stack.append(('#', s))\n\n while (len(stack) > 0):\n (source, target) = stack.pop()\n w = self.get_vertex(target)\n if DISCOVERED not in w.attributes or w.attributes[\n DISCOVERED] is False:\n w.attributes[DISCOVERED] = True\n g.add_vertex(w)\n if (source != '#'):\n g.add_edge(edge.Edge(source, w.id), True)\n for e in self.get_adjacent_vertices_by_vertex(w.id,\n adjacent_type):\n stack.append((w.id, e))\n return g", "def dfs(self, starting_vertex, destination_vertex):\n visited = set()\n paths = [[starting_vertex]]\n \"\"\"\n While the length of possible paths is not zero. \n Store the current path and remove it from possible \n paths. Return the last path if it's the destination. \n If the path hasn't been visited yet add it to the \n visited list and loop over it's edges creating paths \n to check later. \n \"\"\"\n while len(paths) > 0:\n path = paths.pop(-1)\n vertex = path[-1]\n if vertex == destination_vertex:\n return path\n if vertex not in visited:\n visited.add(vertex)\n for key in self.get_neighbors(vertex):\n newPath = path + [key]\n paths.append(newPath)", "def dft_recursive(self, starting_vertex, cache = None):\n \"\"\"\n If this is the first repetition create a cache set. If the \n current vertex is not in the cache add it and print the \n vertex. For every edge the vertex has run another repetition.\n \"\"\"\n if not cache:\n cache = set()\n if starting_vertex not in cache:\n cache.add(starting_vertex)\n print(starting_vertex)\n for edge in self.get_neighbors(starting_vertex):\n if edge not in cache:\n self.dft_recursive(edge, cache)", "def shortest_path(source, target):\n #although lecture checks for goal when a node is popped off the frontier, efficiency of search can be improved\n #by checking for a goal as nodes are ADDED. If goal detected, don't add it to frontier, just return the solution\n #immediately\n\n #create start point\n start = Node(state = source, parent = None, action = None)\n frontier = QueueFrontier()\n frontier.add(start)\n\n #create explored set\n explored = set()\n\n while True:\n #if nothing left in frontier, no path exists\n if frontier.empty():\n return None\n\n #choose a node from the frontier\n node = frontier.remove()\n #if node is goal, we have solution\n\n #add neighbors 2 frontier using function THATS ALR THERE DUMMY\n for (movie, star) in neighbors_for_person(node.state):\n newNode = Node(state = star, parent = node, action=movie)\n if not frontier.contains_state(newNode) and newNode.state not in explored:\n if newNode.state == target:\n #reverse the solution\n solution = []\n while newNode.parent is not None:\n actionTuple = (newNode.action, newNode.state)\n solution.append(actionTuple)\n newNode = newNode.parent\n solution.reverse()\n return solution\n else: frontier.add(newNode)\n\n #mark state as explored\n explored.add(node.state)", "def bft(self, starting_vertex):\n # Create a q and enqueue starting vertex\n qq = Queue()\n qq.enqueue([starting_vertex])\n # Create a set of traversed vertices\n visited = set()\n # eldest = []\n depth_counter = {} \n starter = 0 \n # visited = []\n # While queue is not empty:\n while qq.size() > 0:\n # dequeue/pop the first vertex\n path = qq.dequeue()\n # if not visited\n # print(visited)\n starter += 1\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n # print(path[-1])\n depth_counter[starter] = path[-1]\n # mark as visited\n visited.add(path[-1])\n # visited.append(path[-1])\n # enqueue all neightbors\n \n if not self.get_neighbors(path[-1]):\n \n if starting_vertex == path[-1]:\n return -1\n else:\n # print(\"eldest ancestor:\",path[-1])\n depth_counter[starter] = path[-1]\n # print(depth_counter)\n # eldest.append(path[-1])\n else:\n # starter += 1\n for next_vert in self.get_neighbors(path[-1]): \n new_path = list(path)\n new_path.append(next_vert)\n qq.enqueue(new_path)\n\n\n return depth_counter[starter]", "def dfs(self, start_node, cbfunc):\n visited = set()\n stack = [start_node]\n\n while len(stack) != 0:\n node = stack.pop()\n if node in visited:\n continue\n cbfunc(node)\n visited.add(node)\n for neighbor_node in node.each_neighbor():\n stack.append(neighbor_node)", "def find_reachable_nodes_from(self, start_node, **kwargs):\r\n\t\treturn BreadthFirstTraverser(start_node, **kwargs)", "def bfs(graph, source):\n visited = [False] * len(graph.graph)\n print(visited)\n\n result = \"\"\n queue = []\n\n queue.append(source)\n visited[source] = True\n\n while queue:\n source = queue.pop(0)\n result += str(source)\n\n while graph.graph[source] is not None:\n data = graph.graph[source].vertex\n if not visited[data]:\n queue.append(data)\n visited[data] = True\n graph.graph[source] = graph.graph[source].next\n return result", "def DFS(adj): # adj is the list of vertices in graph G\n\n global cc\n global visited\n\n for v in range(len(adj)): # adjacency list has length == number of nodes\n visited[v] = False\n cc = 1\n\n for v in range(len(adj)):\n if not visited[v]:\n explore(v)\n # increment connected component count after each return from explore()\n cc = cc + 1 # only increment for each unvisited node explored here\n return cc", "def dfs(start_vertex):\n # initially, the stack contains only the start vertex and visited_vertices is empty\n stack = deque()\n stack.append(start_vertex)\n visited_vertices = set()\n\n result = []\n while len(stack) > 0:\n # 1. pop a vertex from the stack\n current_vertex = stack.pop()\n\n # 2. ignoring this vertex if it has been visited\n if current_vertex in visited_vertices:\n continue\n\n # 3. mark as visited, so we will not visit it anymore\n visited_vertices.add(current_vertex)\n result.append(current_vertex.get_label())\n\n # 4. get all adjacent vertices which HAVE NOT been visited\n adjacent_vertices = []\n for edge in current_vertex.get_outbound_edges():\n adjacent_vertex = edge.get_end_vertex()\n if adjacent_vertex not in visited_vertices:\n adjacent_vertices.append(adjacent_vertex)\n\n # if necessary we may do some manipulation with adjacent_vertices, e.g. sort them\n # 5. add all adjacent vertices to the stack(DFS)\n stack.extend(adjacent_vertices)\n\n return result", "def bfs(self, starting_vertex, destination_vertex):\n visited = set()\n paths = [[starting_vertex]]\n \"\"\"\n For every list in paths. If the last item in the list is \n the destination return the list. If the last item is not \n in the visited cache add it and make a new path for all \n of it's edges. If the last item has been visited remove \n it from the paths list.\n \"\"\"\n for path in paths:\n vertex = path[-1]\n if vertex == destination_vertex:\n return path\n if vertex not in visited:\n visited.add(vertex)\n for key in self.get_neighbors(vertex):\n newPath = path + [key]\n paths.append(newPath)", "def bfs(graph, start_vertex):\n\n queue = deque()\n queue.appendleft(start_vertex)\n explored_vertices = [start_vertex]\n\n while len(queue) != 0:\n vertex = queue.pop()\n neighbours = graph.neighbours(vertex)\n for neighbour in neighbours:\n if neighbour not in explored_vertices:\n explored_vertices.append(neighbour)\n queue.appendleft(neighbour)\n\n return explored_vertices", "def bfs(get_neighbors, source, target):\n\n parents = {}\n visited = set()\n queue = collections.deque()\n queue.append(source)\n while queue:\n vertex = queue.popleft()\n if vertex == target:\n return _backtrack(target, lambda v: parents.get(v))\n if vertex not in visited:\n visited.add(vertex)\n for neighbor in filter(lambda n: n not in visited, get_neighbors(vertex)):\n queue.append(neighbor)\n parents[neighbor] = vertex\n return []", "def bfs(graph, start_node):\n start_node.distance = 0\n start.set_predecessor(None)\n queue = list()\n queue.append(start_node)\n while (len(queue) > 0):\n current_vertex = queue.pop()\n current_vertex.setState = \"visiting\"\n for vertex in current_vertex.links():\n if (vertex.getState == \"unvisited\"):\n vertex.setState == \"tobevisited\"\n vertex.set_predecessor(current_vertex)\n vertex.distance = current_vertex.distance + 1\n queue.append(vertex)\n current_vertex.setState = \"visited\"", "def dft(self, starting_vertex):\n # create a plan to visit stack and add starting_vertex to it\n plan_to_visit = Stack()\n plan_to_visit.push(starting_vertex)\n # create a set for visited_vertices\n visited_vertices = set()\n # while the plan_to_visit stack is not Empty:\n while plan_to_visit.size() > 0:\n # pop the first vertex on the stack\n current_vertex = plan_to_visit.pop()\n # if its not been visited\n if current_vertex not in visited_vertices:\n # print the vertex\n print(current_vertex)\n # mark it as visited, (add it to visited_verticles)\n visited_vertices.add(current_vertex)\n # add all unvisited neighbors to the queue\n for neighbor in self.get_neighbors(current_vertex):\n if neighbor not in visited_vertices:\n plan_to_visit.push(neighbor)", "def bfs(self, starting_vertex, destination_vertex):\n \n def populate_parents():\n parents = {\n # '1': [],\n # '2': [],\n # '3': [],\n }\n\n for index, (k, v) in enumerate(self.vertices.items()):\n parents[k] = []\n\n queue = Q()\n visited = []\n\n queue.add(starting_vertex)\n visited.append(starting_vertex)\n \n while len(queue):\n node = queue.pop()\n\n for child in self.vertices[node]:\n if child not in visited:\n queue.add(child)\n visited.append(child)\n parents[child].append(node)\n\n return parents\n\n parents = populate_parents()\n path = []\n current = destination_vertex\n path.append(destination_vertex)\n\n while len(parents[current]):\n parent = parents[current][0]\n path.append(parent)\n current = parent\n\n path.reverse()\n\n return path", "def __canReachDFS(current, visited):\n for neighbor in current.getConnections():\n # this check prevents cycles from infinitely looping\n if neighbor not in visited:\n visited.add(neighbor)\n __canReachDFS(neighbor, visited)", "def dfs(self):\n # Run time => O(V)\n for aVertex in self:\n aVertex.setColor('white')\n aVertex.setPredecessor(-1)\n # Run time => O(V)\n for aVertex in self:\n if aVertex.getColor() == 'white':\n self.dfs_visit(aVertex)", "def bfs(graph,start):\n #keeps track of nodes to be visited\n queue = []\n #keeps track of nodes already visited\n explored = []\n queue.append(start)\n while queue:\n #remove first node from queue\n curr_node = queue.pop(0)\n #check if node is visited\n if curr_node not in explored:\n explored.append(curr_node)\n adjacent_nodes = graph[curr_node]\n #add adjacent nodes to queue\n for i in adjacent_nodes:\n queue.append(i)\n return explored", "def ss_breadth_first_search_tree(\n graph: ScipyGraph, source_node: NodeID, depth_limit: int\n ) -> Tuple[NumpyNodeMap, NumpyNodeMap]:\n\n is_directed = ScipyGraph.Type.compute_abstract_properties(\n graph, {\"is_directed\"}\n )[\"is_directed\"]\n node_list: np.ndarray = graph.node_list\n depth_limit = len(node_list) - 1 if depth_limit == -1 else depth_limit\n source_node_position = np.flatnonzero(node_list == source_node).item()\n bfs_tree_csr = ss.csgraph.breadth_first_tree( # depth_limit is not used here!\n graph.value, source_node_position, directed=is_directed\n ).astype(bool)\n\n # Calcuate Depths\n depths = np.full(len(node_list), depth_limit + 1, dtype=int)\n depths[source_node_position] = 0\n current_node_positions = np.array([source_node_position], dtype=int)\n for depth in range(1, depth_limit + 1):\n selector = np.zeros(len(node_list), dtype=bool)\n selector[current_node_positions] = True\n current_node_positions = selector @ bfs_tree_csr\n if not current_node_positions.any():\n break\n depths[current_node_positions] = depth\n\n # Calculate Parents\n parents = np.empty(len(node_list), dtype=int)\n bfs_tree_coo = bfs_tree_csr.tocoo()\n parents[source_node_position] = source_node\n parents[bfs_tree_coo.col] = bfs_tree_coo.row\n\n # Ensure depth_limit\n valid_nodes = graph.node_list\n valid_depths_selector = depths <= depth_limit\n depths = depths[valid_depths_selector]\n parents = parents[valid_depths_selector]\n valid_nodes = valid_nodes[valid_depths_selector]\n depths_nodes = valid_nodes.copy()\n parents_nodes = valid_nodes.copy()\n\n node2depth = NumpyNodeMap(depths, depths_nodes)\n node2parent = NumpyNodeMap(parents, parents_nodes)\n\n return node2depth, node2parent", "def dft(self, starting_vertex):\n # First, we create an empty stack and push the starting vertex\n ss = Stack()\n ss.push(starting_vertex)\n\n # Then we create a set to store the vertices we visit\n visited = set()\n\n # Here we write a while loop that will run as long as the stack is not empty\n while ss.size() > 0:\n # We pop the node off the top of the stack and set (v) to it\n v = ss.pop()\n\n # Next we check to see if that vertex has already been visited\n if v not in visited:\n # If it hasn't been visited, we print it out and mark it as visited\n print(v)\n visited.add(v)\n\n # Lastly, we push all its neighbors on the stack\n for next_vert in self.get_neighbors(v):\n ss.push(next_vert)", "def dfs_recursive(self, starting_vertex, destination_vertex, visited=None, path=None):\n # First we write a couple if statements for the initial case where there are no visited nodes and therefore no paths\n if visited is None:\n # If nothing has been visited yet, we create an empty set\n visited = set()\n\n if path is None:\n # If there are no stored paths, we create an empty array to initialize path\n path = []\n\n # Then we add the starting vertex to the list of visited notes and create a copy of the current path plus the starting vertex\n visited.add(starting_vertex)\n new_path = path + [starting_vertex]\n\n # Then we check to see if the starting vertex is the destination vertex and return the path to said vertex\n if starting_vertex == destination_vertex:\n return new_path\n\n # Lastly, we write a for loop that loops through the neighbors of the starting vertex\n for neighbor in self.vertices[starting_vertex]:\n # For each neighbor, we check to see if it has been visited already\n if neighbor not in visited:\n # If it has not been visited, we create a variable pointing to the path to said neighbor and then recursively call the function, passing it the neighbor/node we're visiting next, the destination we're looking for, an updated list of visited nodes, and the updated path\n path_to_neighbor = self.dfs_recursive(\n neighbor, destination_vertex, visited, new_path)\n\n # Finally, once we have a path to the neighbor, we return it\n if path_to_neighbor:\n return path_to_neighbor", "def traverse_breadth_first(self, src: int = 0, graph: GraphInterface = None):\n if not isinstance(graph, DiGraph) or graph is None or self._graph.get_node(src) is None:\n return\n curr = graph.get_node(src)\n\n q = Queue()\n\n q.put(curr)\n curr.tag += 1\n\n while not q.empty():\n\n curr = q.get()\n out_edges = graph.all_out_edges_of_node(curr.key)\n\n for i in out_edges:\n out_edge = out_edges[i]\n neighbor = graph.get_node(out_edge.dest) # Get curr's neighbor\n if neighbor.tag == curr.tag - 1:\n neighbor.tag += 1 # If un-tagged -> tag it.\n q.put(neighbor) # and enqueue it", "def pathFinder(graph, sourceVertex):\r\n \r\n # List removeEdges will be used to store the edges that will be removed from the graph dictionary after the loop ends. Makes the code more efficient, as you don't want to loop through a million vertices every time, now do you?\r\n removeEdges = []\r\n \r\n # Loop through edges in the graph, will be used to find adjacent vertices.\r\n for edge in graph[\"E\"]:\r\n \r\n # If the sourceVertex is in the edge and the edge is not discovered yet, then edit and change values in the main dictionary, dataInfo.\r\n if (sourceVertex in edge) and (dataInfo[str(symmetricVertex(edge, sourceVertex))] != \"gray\"):\r\n otherVertex = symmetricVertex(edge, sourceVertex)\r\n \r\n # Adds variable otherVertex to the descendants of the sourceVertex.\r\n dataInfo[str(sourceVertex)][\"descendants\"].append(otherVertex)\r\n \r\n # Updates key(otherVertex) to correct values. Ancestor is always the sourceVertex, the distance is always the distance of sourceVertex incremented by one, and the color is updated to gray as it is added to the queue.\r\n dataInfo[str(otherVertex)] = {\"ancestor\": sourceVertex, \"descendants\": [], \"distance\": (dataInfo[str(sourceVertex)][\"distance\"] + 1), \"color\": \"gray\"}\r\n \r\n # Edge includes two discovered edges, so it will be removed to stop redundancy. It is added to the removeEdges list.\r\n removeEdges.append(edge)\r\n \r\n # Appends the discovered vertex to the queue.\r\n queue.append(otherVertex)\r\n \r\n # After the loop ends, the edges that contain the source vertex have been exhausted, so the color is updated to black.\r\n dataInfo[str(sourceVertex)][\"color\"] = \"black\" \r\n \r\n # If the sourceVertex is in the queue, it is removed, as all of the edges containing it have been exhausted.\r\n if sourceVertex in queue:\r\n queue.remove(sourceVertex)\r\n \r\n # Loop through the edges in the removeEdges list, each edge will be removed.\r\n for edge in removeEdges:\r\n graph[\"E\"].remove(edge)", "def dfs(graph, start):\n dfs_rec(graph, start, 0)", "def dfs_recursive(self, starting_vertex, target, visited=None, path=None):\n if visited is None:\n visited = set()\n if path is None:\n path = []\n \n visited.add(starting_vertex)\n path = path + [starting_vertex]\n if starting_vertex == target:\n return path\n for neighb_vert in self.vertices[starting_vertex]:\n if neighb_vert not in visited:\n new_path = self.dfs_recursive(neighb_vert, target, visited, path)\n if new_path:\n return new_path\n return None", "def dfs_paths(graph, start, goal, method='dfs'):\n \n # Define the search method\n stack_pop = -1\n if method == 'bfs':\n stack_pop = 0\n \n stack = [(start, [start])]\n while stack:\n (vertex, path) = stack.pop(stack_pop)\n neighbors = node_neighbors(graph, vertex)\n for next_node in set(neighbors) - set(path):\n if next_node == goal:\n yield path + [next_node]\n else:\n stack.append((next_node, path + [next_node]))", "def main():\n n = int(input(\"Enter the number of nodes: \"))\n m = int(input(\"Enter the number of edges: \"))\n \n adjList = [[] for i in range(n)]\n \n print(\"Enter the edges: \")\n for i in range(m):\n x, y = input().split(\" \")\n x = int(x)\n y = int(y)\n adjList[x].append(y)\n adjList[y].append(x)\n \n s = int(input(\"Enter the source: \"))\n \n BFS(adjList, s, n)", "def dfs_helper(self, start_node):\n ret_list = [start_node.value]\n\n start_node.visited = True\n for edge in start_node.edges:\n if not edge.node_to.visited:\n edge.node_to.visited = True\n ret_list.extend(self.dfs_helper(edge.node_to))\n\n return ret_list", "def dft_recursive(self, starting_vertex):\n # TODO\n # creating a function inside that includes a list\n # of previously traversed vertices\n def recursive(graph, traversed, vertex):\n # if the vertex is in traversed already, return none\n if vertex in traversed:\n return \n # otherwise we print it out\n print(vertex)\n # append the vertex to our traversed list\n traversed.add(vertex)\n # running the function on the neighbors of the vertex\n for val in graph[vertex]:\n recursive(graph, traversed, val)\n\n recursive(self.vertices, set(), starting_vertex)", "def dft(self, starting_vertex):\n # TODO\n # create an empty stack class and set\n to_visit = Stack()\n visited = set()\n # add the starting vertex to the stack\n to_visit.push(starting_vertex)\n # while loop to run while we still have elements in our stack\n while to_visit.size() > 0:\n # pops the last value in our stack\n v = to_visit.pop()\n # checks to see if the value has already been seen\n if v not in visited:\n # if not then it gets printed out\n print(v)\n # then added to the visited set\n visited.add(v)\n # adding the neighbors to the stack class \n for n in self.vertices[v]:\n to_visit.push(n)", "def recursive_dft(self, start, visited=[]):\n if start not in visited:\n visited.append(start)\n for i in self.neighbors(start):\n self.recursive_dft(i, visited)\n return visited", "def dfs(self):\n def add_to_stack(stack, done, src, path):\n for dest in self.edges[src]:\n if dest not in done:\n for step_path in self.edges[src][dest]:\n stack.append((dest, step_path, path))\n done.add(src)\n stack = [] # Stack of steps to take\n done = set() # Nodes we've visited\n # Seed the stack with all edges from the start cell.\n add_to_stack(stack, done, self.start_cell, '')\n while stack:\n (src, step_path, path) = stack.pop()\n path = path + step_path\n if src == self.exit_cell:\n return path\n add_to_stack(stack, done, src, path)\n return '' # No path found.", "def depthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Stack() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]", "def dfs(graph, start):\n\tstack,path = [start],[]\n\twhile stack:\n\t\tele = stack.pop()\n\t\tif ele in path:\n\t\t\tcontinue\n\t\telse:\n\t\t\tpath.append(ele)\n\t\t\tfor neighbours in graph[ele]:\n\t\t\t\tstack.append(neighbours)\n\n\treturn path", "def depth_fs(start: Vector2D, goal: Vector2D, grid: Scene, *args) -> (list, list):\n frontier = Stack()\n prev_node = dict()\n explored = []\n\n frontier.put(start)\n prev_node[start] = None\n\n while not frontier.empty():\n current = frontier.get()\n \n if current == goal:\n return (reconstruct_path(goal, prev_node), explored[1:]) # [1:] to remove start from list\n\n grid.set_cell(current, Cell(val = CellType.searched))\n explored.append(current)\n\n for neighbor in grid.get_unexplored_neighbors(current):\n prev_node[neighbor] = current\n frontier.put(neighbor)\n\n # grid.set_cell(neighbor, Cell(val = CellType.searched))\n \n # If frontier empty but goal was never reached, no solution was found\n return ([], explored[1:]) # [1:] to remove start from list", "def dijkstra(self, source=None, destination=None):\n for vertex in self.vertices():\n vertex.d = sys.maxint\n if not source:\n source = self.vertices()[0]\n q = simply_python.data_structures.FIFO_dict()\n source.d = 0\n q.append(source)\n while not q.isempty():\n source = q.pop()\n print source\n print source.d\n d = source.d\n for out_vertex in self.out_vertices(source):\n if out_vertex.d == sys.maxint:\n out_vertex.d = d + 1\n q.append(out_vertex)\n if out_vertex == destination:\n return out_vertex.d\n return d", "def bfs(self, starting_vertex, destination_vertex):\n # creating an empty list of visited vertices\n visited = []\n # creating a queue with the starting vertex in it\n queue = [[starting_vertex]]\n # while we have items in our queueueue\n while queue:\n # pop the first item in the queueueue\n path = queue.pop(0)\n # getting the last value in our path\n node = path[-1]\n # checking to see if it has been seen already or not\n if node not in visited:\n # checking the neighbors of our farthest node\n for n in self.vertices[node]:\n # creating a new path list and appending the nieghbors\n # to it and the queueueueue\n new_path = list(path)\n new_path.append(n)\n queue.append(new_path)\n # if the destination is in the new_path\n # we are done and return the new path\n if n == destination_vertex:\n return new_path\n # adding the node to the visited list\n visited.append(node)", "def bfs(self, starting_vertex, destination_vertex):\n \"\"\" FIFO ir LILO\n Create a queue\n Enqueue PATH to starting Vertex\n Create a set top store visited vertices\n While the queue is NOT empty: e.g. > 0\n Dequeue the first PATH Vertex\n Get Vertex from END of PATH\n Check IF NOT visited:\n Mark as visited\n check if vertex is destination_vertex\n If TRUE, return path\n enqueue PATH to ALL of neighbors \n make COPY of current path\n add neighbor to path copy\n enqueue copy \n \"\"\"\n\n q = Queue() # Create a queue\n q.enqueue([starting_vertex]) # Enqueue starting at vertex into Queue (list)\n visited = set() # Create a set to store visited \n \n while q.size() > 0: # While the queue is NOT empty: \n path = q.dequeue() # Dequeue the first PATH Vertices\n v = path[-1] # Get Vertex from END of PATH\n\n if v not in visited: # Check IF NOT visited:\n visited.add(v) # Mark as visited\n\n if v == destination_vertex: # check if vertex is destination_vertex\n return path # If TRUE, return path, DONE\n\n for n in self.get_neighbors(v): # enqueue PATH to ALL of neighbors\n path_c = path [:] # make COPY of current path\n path_c.append(n) # add neighbor to path copy\n q.enqueue(path_c) # enqueue copy", "def bfs(self, starting_vertex, destination_vertex): # great if you know to result is somewhere close to the root/start\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n q = Queue() # create an empty Queue\n q.enqueue([starting_vertex]) # set the starting_vertex with enqueue \n\n while q.size() > 0:\n path = q.dequeue() # dequeue and store first path\n v = path[-1] # store the vertex from the end of path \n\n if v == destination_vertex: # if v is equal to the destination_vertex\n return path # return the path \n\n if v not in visited: # if v has not been visited yet \n visited.add(v) # add v to the vistied set \n\n for neighbor in self.vertices[v]: # loop through the neighbors \n path_copy = list(path) # make a copy of the path \n path_copy.append(neighbor) # append each neighbor to the back of the path copy \n q.enqueue(path_copy) # enqueue the path copy to the queue ", "def DFS1(graph, start, end, path=[], shortest=None):\n path = path + [start]\n print 'Current DFS path:', printPath(path)\n if start == end:\n return path\n for node in graph.childrenOf(start):\n if node not in path: #avoid cycles\n if shortest == None or len(path) < len(shortest):\n newPath = DFS1(graph, node, end, path, shortest)\n if newPath != None:\n shortest = newPath\n return shortest", "def bfs(self, startNode):\n queue = Queue()\n\n # Mark all the nodes as not visited\n visited = {}\n for node in self.getNodes():\n visited[node] = False\n\n queue.enqueue(startNode)\n\n while not queue.isEmpty():\n s = queue.dequeue()\n visited[s] = True\n print s,\n\n # enqueue all the adjacent vertices to s\n # if they've not already been visited\n\n for adjacentNode in self.getAdjacent(s):\n if visited[adjacentNode] is False:\n queue.enqueue(adjacentNode)\n visited[adjacentNode] = True", "def dfs(graph, initial_node, dest_node):\n parents = {}\n dfs_rec(graph, initial_node, {}, parents)\n\n path = []\n current_node = dest_node\n while current_node != initial_node:\n next_node = parents[current_node]\n path = [g.Edge(next_node, current_node, graph.distance(next_node, current_node))] + path\n current_node = next_node\n\n return path" ]
[ "0.66623896", "0.65276676", "0.64679134", "0.64623046", "0.6430159", "0.63167006", "0.6277863", "0.625629", "0.625629", "0.6231696", "0.6226393", "0.6226135", "0.6202045", "0.6167126", "0.61345613", "0.61269605", "0.61263084", "0.6116274", "0.61129403", "0.60858756", "0.60563964", "0.6041509", "0.6039684", "0.5970186", "0.5969727", "0.5965366", "0.59308577", "0.592485", "0.5922951", "0.5893463", "0.58885103", "0.58480716", "0.58166087", "0.58001685", "0.579349", "0.5776946", "0.577246", "0.57682467", "0.57627755", "0.57528377", "0.57415044", "0.57346505", "0.5729867", "0.5728089", "0.571913", "0.5712128", "0.5708099", "0.5706021", "0.57054645", "0.57047546", "0.5704749", "0.57035667", "0.56942546", "0.5681054", "0.56796134", "0.5674302", "0.5667555", "0.5665029", "0.56473887", "0.56470835", "0.5641572", "0.56276613", "0.56224614", "0.5617321", "0.56076103", "0.55880284", "0.5585204", "0.5575912", "0.55685246", "0.5564954", "0.55640346", "0.55572027", "0.5550903", "0.55488974", "0.55330515", "0.55284905", "0.55206454", "0.5515337", "0.551289", "0.5509035", "0.5504594", "0.5499681", "0.5497857", "0.5497701", "0.5489898", "0.5487241", "0.54804075", "0.5464176", "0.5464048", "0.54554445", "0.5454474", "0.545328", "0.54313624", "0.5428901", "0.54140556", "0.5406508", "0.5400461", "0.5390145", "0.5386921", "0.5385221" ]
0.71058327
0
Produce edges in a depthfirstsearch (DFS) labeled by type.
def dfs_labeled_edges_generator(graph, source, reverse=..., has_reverse_edge=..., has_nontree_edge=..., return_labels=...): # -> tuple[Unknown, Unknown]: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_edges_by_vertex(self, id, type=0):\n edges = []\n for (source, target) in self.edges.keys():\n if type == 1:\n if source == id:\n edges.append((source, target))\n elif type == 2:\n if target == id:\n edges.append((source, target))\n else:\n if source == id or target == id:\n edges.append((source, target))\n return edges", "def test_multiple_edge_traversal_with_type_filtering(self):\r\n v = TestModel.create(count=1, text='Test1')\r\n\r\n v1 = TestModel.create()\r\n TestEdge.create(v, v1)\r\n\r\n v2 = TestModel.create()\r\n OtherTestEdge.create(v, v2)\r\n\r\n v3 = TestModel.create()\r\n YetAnotherTestEdge.create(v, v3)\r\n\r\n v4 = OtherTestModel.create()\r\n TestEdge.create(v, v4)\r\n\r\n v5 = OtherTestModel.create()\r\n OtherTestEdge.create(v, v5)\r\n\r\n v6 = OtherTestModel.create()\r\n YetAnotherTestEdge.create(v, v6)\r\n\r\n assert len(v.outV()) == 6\r\n\r\n assert len(v.outV(TestEdge, OtherTestEdge)) == 4\r\n assert len(v.outV(TestEdge, OtherTestEdge, types=[TestModel])) == 2", "def _dfs_cycle_forest(G, root=None):\n # Create a directed graph from the depth-first search tree with\n # root node `root` in which tree edges are directed toward the\n # root and nontree edges are directed away from the root. For\n # each node with an incident nontree edge, this creates a\n # directed cycle starting with the nontree edge and returning to\n # that node.\n #\n # The `parent` node attribute stores the parent of each node in\n # the DFS tree. The `nontree` edge attribute indicates whether\n # the edge is a tree edge or a nontree edge.\n #\n # We also store the order of the nodes found in the depth-first\n # search in the `nodes` list.\n H = nx.DiGraph()\n nodes = []\n for u, v, d in nx.dfs_labeled_edges(G, source=root):\n if d == 'forward':\n # `dfs_labeled_edges()` yields (root, root, 'forward')\n # if it is beginning the search on a new connected\n # component.\n if u == v:\n H.add_node(v, parent=None)\n nodes.append(v)\n else:\n H.add_node(v, parent=u)\n H.add_edge(v, u, nontree=False)\n nodes.append(v)\n # `dfs_labeled_edges` considers nontree edges in both\n # orientations, so we need to not add the edge if it its\n # other orientation has been added.\n elif d == 'nontree' and v not in H[u]:\n H.add_edge(v, u, nontree=True)\n else:\n # Do nothing on 'reverse' edges; we only care about\n # forward and nontree edges.\n pass\n return H, nodes", "def generate_edges(graph):\n edges = []\n\n # for each node in graph\n for node in graph:\n\n # for each neighbour node of a single node\n for neighbour in graph[node]:\n # if edge exists then append\n edges.append((node, neighbour))\n return edges", "def main():\n n = int(input(\"Enter the number of nodes: \"))\n m = int(input(\"Enter the number of edges: \"))\n \n adjList = [[] for i in range(n)]\n \n print(\"Enter the edges: \")\n for i in range(m):\n x, y = input().split(\" \")\n x = int(x)\n y = int(y)\n adjList[x].append(y)\n adjList[y].append(x)\n \n s = int(input(\"Enter the source: \"))\n \n DFS(adjList, s, n)", "def DFS(obj,vertex,visited=dict()):\n validateVertex(vertex,obj.vertexList)\n #order = []\n #visited = dict()\n subGraph= []\n for ver in obj.vertexList:\n visited[ver] = False\n\n DFSUtility(obj,vertex,visited,subGraph)\n return subGraph", "def get_type_dag(graph: Graph, entity_type_to_id: Dict[str, int]) -> Dict[int, DAGNode]:\n # dictionary pointing from entity type id to the corresponding node in the entity type DAG\n entity_type_dag = {}\n\n # extract equivalence class relation\n equivalent_classes = {}\n for subject, predicate, object in graph.triples((None, OWL.equivalentClass, None)):\n equivalent_classes[subject] = object\n equivalent_classes[object] = subject\n\n # iterate over class hierarchy\n for subject, predicate, object in graph.triples((None, RDFS.subClassOf, None)):\n\n # is the subject is an entity type or equivalent to an entity type\n subject_is_entity_type = (subject in entity_type_to_id or\n (subject in equivalent_classes and equivalent_classes[subject] in entity_type_to_id))\n # is the object is an entity type or equivalent to an entity type\n object_is_entity_type = (object in entity_type_to_id or\n (object in equivalent_classes and equivalent_classes[object] in entity_type_to_id))\n\n # if the subject is an entity type or equivalent to an entity type AND the object is an entity type or\n # equivalent to an entity type\n if subject_is_entity_type and object_is_entity_type:\n # replace subject and object with their equivalent entity type if thhey are not an entity type themselves\n if subject not in entity_type_to_id:\n subject = equivalent_classes[subject]\n if object not in entity_type_to_id:\n object = equivalent_classes[object]\n\n subject_id = entity_type_to_id[subject]\n object_id = entity_type_to_id[object]\n # add subject and object and their relation to the DAG\n if subject_id != object_id:\n if object_id not in entity_type_dag:\n entity_type_dag[object_id] = DAGNode(object_id, object)\n if subject_id not in entity_type_dag:\n entity_type_dag[subject_id] = DAGNode(subject_id, subject)\n\n # add DAG node of object as parent to the subject DAG node\n entity_type_dag[subject_id].parents.append(entity_type_dag[object_id])\n # add DAG node of the subject as child to the object DAG node\n entity_type_dag[object_id].children.append(entity_type_dag[subject_id])\n\n return entity_type_dag", "def directedDFS(digraph, start, end, maxTotalDist, maxDistOutdoors):\n return DFShort(digraph, start, end, [], None, maxTotalDist, maxDistOutdoors)", "def find_edges(\n self, eid, etype: Optional[Tuple[str, str, str]] = None, output_device=None\n ):\n\n if etype:\n src_type, connection_type, dst_type = etype\n eid = self.dgl_e_id_to_cugraph_id(eid, etype)\n # TODO: implement below\n src, dst = self.find_edges(eid, etype)\n src = torch.as_tensor(src, device=\"cuda\")\n dst = torch.as_tensor(dst, device=\"cuda\")\n src = self.cugraph_n_id_to_dgl_id(src, src_type)\n dst = self.cugraph_n_id_to_dgl_id(dst, dst_type)\n\n return src, dst", "def bfs_depth(g, s, discovered):\n\n level = [s]\n depth = 1\n\n while len(level) > 0:\n next_level = []\n\n for u in level:\n for e in g.incident_edges(u):\n v = e.opposite(u)\n if v not in discovered:\n discovered[v] = depth\n next_level.append(v)\n\n level = next_level\n depth += 1", "def dfs_edges_generator(graph, source, reverse=...):\n ...", "def DFS(self, start_vertex):\n yield from self._search(start_vertex, kind='DFS')", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({neighbour, vertex})\n return edges", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n edges.append({vertex, neighbour})\n return edges", "def go_recursive_on_type_level_dicothomic(node_type_level, n, search_for):\n\n if n['type'] in node_type_level:\n edge_type_level = node_type_level[n['type']]\n\n if search_for in edge_type_level:\n return edge_type_level[search_for]\n else:\n\n gen_function = n['graph'][father(n)['id']][n['id']]['generating_function']\n if gen_function in edge_type_level:\n node_type_level = edge_type_level[gen_function]\n\n if search_for in node_type_level:\n return node_type_level[search_for]\n else:\n try:\n return go_recursive_on_type_level_dicothomic(node_type_level, father(n), search_for)\n except IndexError:\n pass\n\n elif 'all' in edge_type_level:\n return edge_type_level['all'][search_for]\n\n elif 'all' in node_type_level:\n return node_type_level['all'][search_for]", "def dfs(visited: list, graph: AdjList, node: int):\n if node not in visited:\n visited.append(node)\n for neighbour in graph[node]:\n dfs(visited, graph, neighbour)", "def directed_dfs(digraph, start, end, max_total_dist, max_dist_outdoors):\n\n\n path = [[],0 , 0]\n best_path = get_best_path(digraph, start, end, path, max_dist_outdoors, max_total_dist, best_path = None)\n\n if best_path[0] is None:\n raise ValueError('No work')\n else :\n return best_path[0]", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({vertex, neighbour})\n return edges", "def __generate_edges(self):\n\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append( {vertex,neighbour} )\n return edges", "def __generate_edges(self):\r\n edges = []\r\n for vertex in self.__graph_dict:\r\n for neighbor in self.__graph_dict[vertex]:\r\n if {neighbor, vertex} not in edges:\r\n edges.append({vertex, neighbor})\r\n return edges", "def get_nodes_by_type(ntwrk, typ='switch'):\r\n return {k: v for el in ntwrk\r\n for k, v in el.items() if v['type'] == typ}", "def sample_etype_neighbors(\n g,\n nodes,\n etype_offset,\n fanout,\n edge_dir=\"in\",\n prob=None,\n replace=False,\n copy_ndata=True,\n copy_edata=True,\n etype_sorted=False,\n _dist_training=False,\n output_device=None,\n):\n if g.device != F.cpu():\n raise DGLError(\"The graph should be in cpu.\")\n # (BarclayII) because the homogenized graph no longer contains the *name* of edge\n # types, the fanout argument can no longer be a dict of etypes and ints, as opposed\n # to sample_neighbors.\n if not F.is_tensor(fanout):\n raise DGLError(\"The fanout should be a tensor\")\n if isinstance(nodes, dict):\n assert len(nodes) == 1, \"The input graph should not have node types\"\n nodes = list(nodes.values())[0]\n\n nodes = utils.prepare_tensor(g, nodes, \"nodes\")\n device = utils.context_of(nodes)\n nodes = F.to_dgl_nd(nodes)\n # treat etypes as int32, it is much cheaper than int64\n # TODO(xiangsx): int8 can be a better choice.\n fanout = F.to_dgl_nd(fanout)\n\n prob_array = _prepare_edge_arrays(g, prob)\n\n subgidx = _CAPI_DGLSampleNeighborsEType(\n g._graph,\n nodes,\n etype_offset,\n fanout,\n edge_dir,\n prob_array,\n replace,\n etype_sorted,\n )\n induced_edges = subgidx.induced_edges\n ret = DGLGraph(subgidx.graph, g.ntypes, g.etypes)\n\n # handle features\n # (TODO) (BarclayII) DGL distributed fails with bus error, freezes, or other\n # incomprehensible errors with lazy feature copy.\n # So in distributed training context, we fall back to old behavior where we\n # only set the edge IDs.\n if not _dist_training:\n if copy_ndata:\n node_frames = utils.extract_node_subframes(g, device)\n utils.set_new_frames(ret, node_frames=node_frames)\n\n if copy_edata:\n edge_frames = utils.extract_edge_subframes(g, induced_edges)\n utils.set_new_frames(ret, edge_frames=edge_frames)\n else:\n for i, etype in enumerate(ret.canonical_etypes):\n ret.edges[etype].data[EID] = induced_edges[i]\n\n return ret if output_device is None else ret.to(output_device)", "def dfs(g: nx.Graph, start_node: Any) -> str:\n\n way = []\n stack = [start_node]\n y = {node: [] for node in g.nodes}\n while stack:\n elem = stack.pop()\n way.append(elem)\n for node in list(g.neighbors(elem)):\n if node not in way:\n stack.append(node)\n y[node].extend((*y[elem], elem))\n print(y)\n return \"\".join(way)", "def get_dfs(self, s):\n results = []\n # mark all the vertices as not visited\n visited = [False] * (len(self.graph))\n self._dfs_recursive(s, visited, results)\n return results", "def dfs(g):\n global time\n time = 0\n\n for v in g:\n v.discovery = 0\n v.finish_time = 0\n v.color = 'white'\n\n for v in g:\n if v.color == 'white':\n dfs_visit(v)", "def dfs2(G):\r\n\r\n for v in V(G):\r\n v.visited = False\r\n\r\n result = []\r\n\r\n for v in V(G):\r\n if not v.visited:\r\n X = dfs2_visit(v)\r\n result.append(X)\r\n\r\n return result", "def dfs(graph, root, method='dfs', max_depth=10000):\n \n # Get node object from node ID\n root = graph.getnodes(root)\n \n # Define the search method\n stack_pop = -1\n if method == 'bfs':\n stack_pop = 0\n \n visited = []\n stack = [root.nid]\n depth = 0\n \n while stack or depth == max_depth:\n node = stack.pop(stack_pop)\n \n if node not in visited:\n visited.append(node)\n stack.extend(\n [x for x in node_neighbors(graph, node) if x not in visited])\n depth += 1\n \n return visited", "def dft_recursive(self, starting_vertex, visited=None):\n if visited is None: # if visited is None\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n if starting_vertex not in visited: # if starting_vertex has not been visited yet\n print(starting_vertex)\n visited.add(starting_vertex) # add to the set \n\n for neighbor in self.vertices[starting_vertex]: # loop through each neighbor \n self.dft_recursive(neighbor, visited) # call the dft_recursive method on each neighbor ", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringe = util.Stack()\n return GraphSearch(problem, 'dfs').search(fringe)", "def depth_first_traversal(self, visitor_function=None):\n self._reset_traversal_state()\n self.time = 0\n\n result = False\n\n for n in self.nodes.values():\n if NodeColor.WHITE == n.color:\n stack = collections.deque()\n stack.append(n)\n\n while len(stack) > 0:\n node = stack.pop()\n\n if NodeColor.WHITE == node.color:\n # Need to stay on the stack until we're done exploring things connected to this node\n stack.append(node)\n\n self.time += 1\n node.discovery_time = self.time\n self._visit_enter(node, visitor_function)\n node.color = NodeColor.GRAY\n\n for descendant in self.edges[node]:\n self.logger.debug(\n 'Looking at [{}] -> [{} / {}]'.format(node.name, descendant.name, descendant.color))\n if NodeColor.WHITE == descendant.color:\n descendant.predecessor = node\n stack.append(descendant)\n elif NodeColor.GRAY == descendant.color:\n self.logger.debug(\n 'Found cycle involving edge [{}] -> [{}]'.format(node.name, descendant.name))\n result = True\n\n elif NodeColor.GRAY == node.color:\n self.time += 1\n node.color = NodeColor.BLACK\n node.finishing_time = self.time\n self._visit_exit(node, visitor_function)\n\n\n elif NodeColor.GRAY == n.color:\n self.logger.info('Found cycle involving node [{}]'.format(n.name))\n result = True\n\n return result", "def edges(self) -> Iterable[Tuple[Node]]:\n edges = []\n for node in self.__graph_dict.keys():\n for neighbour in self.__graph_dict[node]:\n # Since all edges go both ways, we need only return one of them.\n if {neighbour, node} not in edges:\n edges.append({node, neighbour})\n yield (node, neighbour)", "def dfs( self ):\n\n #print self.state; \n #print self.visited;\n SearchProblem.stateVisited= SearchProblem.stateVisited+1 \n \n if self.stop: # check class variable and stop searching...\n return;\n\n for action in self.edges(): # consider each edge leading out of this node\n\n action.destination.path = self.path + str(action.label); \n # get the label associated with the\n # action and append it to the path\n # string\n\n action.destination.visited = self.visited.copy();\n # make copy of source node's visited set\n # and use it as destination node's\n # visited set\n\n action.destination.visited.add( repr(action.destination.state) );\n\n if action.destination.is_target(): \n # check if destination of edge is target node\n action.destination.target_found(); # perform target found action\n if not self.continue_search(): # stop searching if not required\n SearchProblem.stop = True; # set class variable to record that we\n break; # are done\n\n if repr(action.destination.state) in self.visited:\n continue; # skip if we've visited this one before\n\n action.destination.dfs(); # resume recursive search ", "def DFS(self, start_vertex, verbose=True):\n if start_vertex is None:\n return None\n traversal = []\n visited = set()\n for vertex in self.vertices():\n if vertex not in visited:\n self._DFS(vertex, visited, traversal.append)\n if verbose:\n print('DFS(Graph) =', traversal)\n return traversal", "def dfs(node, all_nodes, depth):\r\n node.depth = depth\r\n to_return = [node,]\r\n for subnode in all_nodes:\r\n if subnode.parent and subnode.parent.id == node.id:\r\n to_return.extend(dfs(subnode, all_nodes, depth+1))\r\n return to_return", "def find_edges(self):\n self.edges = [deepcopy(self.grid[0]), [], deepcopy(self.grid[-1]), []]\n for g in self.grid:\n self.edges[3].append(g[0])\n self.edges[1].append(g[-1])\n self.edges[2]\n self.edges[3]", "def depth_first_traversal(self, start):\n return self.recursive_dft(start, [])", "def visitEdges(self) -> None:\n\n for node in self.nodesMap_.values():\n for nodeInput in node.get_inputs():\n i = nodeInput[0]\n if i.get_name() not in self.nodesMap_:\n print(i.get_kind_name(), i.get_name())\n edgeStr = self.get_unique_vertex_name(i) + \":Outputs -> \"\n edgeStr += self.get_unique_vertex_name(node) + \":Inputs\"\n self.edges_.append(edgeStr)", "def graph():\n\n graph = {'A': ['B', 'C'],\n 'B': ['C', 'D'],\n 'C': ['D'],\n 'D': ['C'],\n 'E': ['F'],\n 'F': ['C']}\n\n def generate_edges(graph):\n \"\"\" Convert the dict representation of a graph into a list one\n - https://www.geeksforgeeks.org/generate-graph-using-dictionary-python/\n \"\"\"\n edges = []\n\n # for each node in graph\n for node in graph:\n\n # for each neighbour node of a single node\n for neighbour in graph[node]:\n # if edge exists then append\n edges.append((node, neighbour))\n return edges\n\n a = generate_edges(graph=graph)\n print(a)", "def directed_dfs(self,\n node_or_name: Union[str, Node],\n stop_at: Optional[Set[Node]] = None,\n go_up: bool = False,\n yield_start_node=False,\n visited=None):\n node = resolve_node_or_str(node_or_name, G=self)\n if visited is None:\n visited = {node}\n started = False\n if stop_at is None:\n stop_at = {}\n else:\n started = True\n if node in stop_at:\n return\n if started or yield_start_node:\n yield node\n if not go_up:\n for edge in self.out_edges(node.name):\n if edge.to_node in visited:\n continue\n visited.add(edge.to_node)\n yield from self.directed_dfs(edge.to_node, stop_at=stop_at, go_up=go_up, visited=visited)\n if go_up:\n for edge in self.in_edges(node.name):\n if edge.from_node in visited:\n continue\n visited.add(edge.from_node)\n yield from self.directed_dfs(edge.from_node, stop_at=stop_at, go_up=go_up, visited=visited)", "def get_adjacent_vertices_by_vertex(self, id, type=None):\n vertex = []\n for (source, target) in self.edges.keys():\n if type is None:\n if source == id:\n vertex.append(target)\n elif target == id:\n vertex.append(source)\n elif type == '+':\n if source == id:\n vertex.append(target)\n elif type == '-':\n if target == id:\n vertex.append(source)\n\n return vertex", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if (neighbour, vertex) not in edges:\n edges.append((vertex, neighbour))\n \n for pair in edges:\n for otherpair in edges:\n if pair[1] == otherpair[0]:\n edges.append((pair[0],otherpair[1]))\n return edges", "def _dfs(op, visited=None):\n visited = visited or set()\n ret = []\n for child in op.inputs:\n if child.op in visited:\n return ret\n visited.add(child.op)\n if child.op.type not in op_regularizer_manager.NON_PASS_THROUGH_OPS:\n ret.extend(_dfs(child.op, visited))\n if child.op.type in ('Conv2D',): # TODO: support depthwise conv.\n ret.append(child.op)\n return ret", "def print_node_edge_sets(labels, aside, paths, mode, outf):\n\t#print_gams_set(\"hide(node)\", \"hidden nodes\", aside)\n\t#print \"\"\n\n\t# genes without labels\n\tnovel=set.union(labels[\"unknown\"], aside)\n\tprint_gams_set(\"novelGene(node)\", \"unlabeled or hidden genes\", novel, out=outf)\n\toutf.write(\"\\n\")\n\n\t# interface nodes and edges - assume we've taken care of hiding\n\t# them according to the mode by now\n\thits=set()\n\tintNodes=set()\n\tintEdges=set()\t\n\t\n\t# { pathfinder : { pid : { \"nodes\":[], \"edges\":[] } } }\n\tfor pf in paths:\n\t\tfor pid in paths[pf]:\n\t\t\thits.add(paths[pf][pid][\"nodes\"][0])\n\t\t\tintNodes.add(paths[pf][pid][\"nodes\"][-2])\n\t\t\tintEdges.add(paths[pf][pid][\"edges\"][-1])\n\n\tprint_gams_set(\"hit(node)\", \"hits\", hits, out=outf)\n\toutf.write(\"\\n\")\n\tprint_gams_set(\"intNode(node)\", \"interface nodes\", intNodes, out=outf)\n\toutf.write(\"\\n\")\n\tprint_gams_set(\"intEdge(edge)\", \"interface edges\", intEdges, out=outf)\n\toutf.write(\"\\n\")", "def make_graph(self):\n # update the neighbors in the graph\n self.update_neighbors()\n\n # Go through each node and get their neighbors\n self.edges = []\n for node_name in self.nodes:\n\n # get the neighbors\n node_neighbors = self.nodes[node_name].neighbors\n\n # go through neighbors\n for neighbor_name in node_neighbors:\n\n # Make the edge key\n edge_key = \"-\".join(sorted([node_name, neighbor_name]))\n\n # Add it to the edge list if it is not already present\n if edge_key not in self.edges:\n\n self.edges.append(edge_key)\n\n return self.edges", "def get_undirected_edges(mapping, G):\n edge_types = utils.rels_types\n edges = {}\n for et in edge_types:\n edges[et] = {}\n for g in G.nodes:\n edges[et][mapping[g]] = []\n for s, t, meta in G.edges(data=True):\n #print(s, t)\n edges[meta['type']][mapping[s]].append(mapping[t])\n edges[meta['type']][mapping[t]].append(mapping[s])\n return edges", "def traverse(uri, results, source='s', target='o', visited={}, depth=0, maxdepth=2):\n\n log.info(\"{} Traversing from {} to {}\".format(\" \"*depth*10, source, target))\n# log.debug(visited)\n if uri in visited.keys():\n log.debug(u\"{} Already visited {}\".format(\" \"*depth*10, uri))\n return visited[uri], visited\n elif depth >= maxdepth:\n log.debug(u\"Maximum depth exceeded\")\n return [], visited\n\n log.debug(u\"{} Visiting {}\".format(\" \"*depth*10, uri))\n\n\n edges = {}\n edge_array = []\n\n for r in results:\n if r[source]['value'] != uri:\n # Continue to next result if this result does not apply to the current node\n continue\n\n children = []\n if r[target]['type'] not in ['literal', 'typed-literal']:\n log.debug(u\"{} Found child {}\".format(\" \"*depth*10, r[target]['value']))\n\n children, visited = traverse(r[target]['value'], results, source=source, target=target, visited=visited, depth=depth+1)\n\n node = {\n \"name\": r[target]['value'],\n \"size\": 1000,\n }\n\n if len(children) > 0:\n node[\"children\"] = children\n\n edges.setdefault(r['p']['value'], {}).setdefault('children', {})[r[target]['value']] = node\n\n # Iterate over the edges, to rewrite to arrays of dictionaries\n log.debug(u\"{} Rewriting children dictionary to array for {}\".format(\" \"*depth*10, uri))\n for pk, pv in edges.items():\n child_array = []\n for sk, sv in pv['children'].items():\n child_array.append(sv)\n edge_array.append({\n 'name': pk,\n 'children': child_array\n })\n\n visited[uri] = edge_array\n return edge_array, visited", "def _depth_first_directed(self, graph):\n \n # Figure out which subgraph this is\n sub = next((i+1 for i, g in enumerate(self.get_subgraphs()) if g==graph), None)\n # Log the Subgraph progress\n logger.info('Directing SUBGRAPH {} / {}'.format(sub, len(list(self.get_subgraphs()))))\n\n old_edges = graph.edges()\n dfs_edges = list(nx.traversal.dfs_edges(graph,\n self._graph_priority(graph.nodes())))\n #This debug message could be cleaner\n logger.debug('mapping {} -> {}'.format(old_edges, dfs_edges))\n graph.remove_edges_from(old_edges)\n graph.add_edges_from(dfs_edges)\n \n logger.info('DONE!')\n return graph", "def dft(self, starting_vertex):\n \"\"\" LIFO\n Create a stack \n Push starting Vertex\n Create a set to store visited\n While the stack is NOT empty: e.g. > 0\n Pop the last added Vertex\n Check IF NOT visited:\n Mark as visited\n\n\n Push ALL of neighbors\n \"\"\"\n s = Stack() # Create a stack\n s.push(starting_vertex) # Push starting Vertex\n visited = set() # Create a set to store visited\n\n while s.size() > 0: # While the stack is NOT empty: e.g. > 0\n v = s.pop() # Pop the last added Vertex\n\n if v not in visited: # Check IF NOT visited: e.g. > 0\n print(v)\n visited.add(v) # Mark as visited\n\n for n in self.get_neighbors(v): # Check IF NOT visited:\n s.push(n) # Push ALL of neighbors ", "def extract_edges(graph):\n return graph.get_edges()", "def dfs(self, s):\n g = Graph(attr={DIRECTED: True})\n adjacent_type = '+' if DIRECTED in self.attr and self.attr[\n DIRECTED] else None\n # Insert s root node in stack \n stack = collections.deque()\n # Initial node does not have origin, it is represented by # \n stack.append(('#', s))\n\n while (len(stack) > 0):\n (source, target) = stack.pop()\n w = self.get_vertex(target)\n if DISCOVERED not in w.attributes or w.attributes[\n DISCOVERED] is False:\n w.attributes[DISCOVERED] = True\n g.add_vertex(w)\n if (source != '#'):\n g.add_edge(edge.Edge(source, w.id), True)\n for e in self.get_adjacent_vertices_by_vertex(w.id,\n adjacent_type):\n stack.append((w.id, e))\n return g", "def recursive_dft(self, start, visited=[]):\n if start not in visited:\n visited.append(start)\n for i in self.neighbors(start):\n self.recursive_dft(i, visited)\n return visited", "def dfs(starting_vertex):\n s = Stack()\n\n s.push([starting_vertex])\n\n while s.size() > 0:\n p = s.pop()\n l = p[-1]\n\n if l not in new_visited_rooms:\n return p\n neighbors = set(get_neighbors(l))\n \n for n in neighbors:\n new_path = p.copy()\n new_path.append(n)\n s.push(new_path)", "def edges(self):\n return self.dovetails + self.containments + self.internals", "def depth_fs(start: Vector2D, goal: Vector2D, grid: Scene, *args) -> (list, list):\n frontier = Stack()\n prev_node = dict()\n explored = []\n\n frontier.put(start)\n prev_node[start] = None\n\n while not frontier.empty():\n current = frontier.get()\n \n if current == goal:\n return (reconstruct_path(goal, prev_node), explored[1:]) # [1:] to remove start from list\n\n grid.set_cell(current, Cell(val = CellType.searched))\n explored.append(current)\n\n for neighbor in grid.get_unexplored_neighbors(current):\n prev_node[neighbor] = current\n frontier.put(neighbor)\n\n # grid.set_cell(neighbor, Cell(val = CellType.searched))\n \n # If frontier empty but goal was never reached, no solution was found\n return ([], explored[1:]) # [1:] to remove start from list", "def build_graph(dfs_codes):\n\tg = graph.Graph()\n\tnumnodes = max([x[0] for x in dfs_codes] + [x[1] for x in dfs_codes])+1\n\tfor i in range(numnodes):\n\t\tn = graph.Node()\n\t\tg.nodes.append(n)\n\n\tfor idx,c in enumerate(dfs_codes):\n\t\tg.nodes[c.fromn].id = c.fromn\n\t\tg.nodes[c.fromn].label = c.from_label\n\t\tg.nodes[c.to].id = c.to\n\t\tg.nodes[c.to].label = c.to_label\n\n\t\te = graph.Edge()\n\t\te.id = g.nedges\n\t\te.fromn = c.fromn\n\t\te.to = c.to\n\t\te.label = c.edge_label\n\t\tg.nodes[c.fromn].edges.append(e)\n\t\t\"\"\"\n\t\tdouble edges ?\n\n\t\te2 = graph.Edge()\n\t\te2.id = e.id\n\t\te2.label = e.label\n\t\te2.fromn = c.to\n\t\te2.to = c.fromn\n\t\tg.nodes[c.to].edges.append(e2)\n\t\t\"\"\"\n\t\tg.nedges += 1\n\n\treturn g", "def dfs_iter(self, root):\n stack = self.dep_graph.out_edges(root)\n while stack:\n edge = stack.pop()\n stack += self.dep_graph.out_edges(edge[1])\n yield edge", "def getEdges(self):\n # for node in graph,\n # return node -> node for j in graph[node]\n\n return [\"->\".join([str(n1), str(n2)]) for n1 in self.graph.keys() for n2 in self.graph[n1]]", "def get_repeated_children_type(self):\n result_dict = dict()\n for v in self.graph.vertices():\n parent_type = self.graph.vp.type[v]\n if parent_type not in result_dict:\n result_dict[parent_type] = set()\n\n children_types = set()\n for u in v.out_neighbors():\n child_type = self.graph.vp.type[u]\n if child_type not in children_types:\n children_types.add(child_type)\n else:\n result_dict[parent_type].add(child_type)\n return result_dict", "def edges(self):\r\n return [\r\n (parent, child)\r\n for parent in self._children_of\r\n for child in self._children_of[parent]\r\n ]", "def _get_nodes_and_edges(dag: DAGNode):\n\n edges = []\n nodes = []\n\n def _dfs(node):\n nodes.append(node)\n for child_node in node._get_all_child_nodes():\n edges.append((child_node, node))\n return node\n\n dag.apply_recursive(_dfs)\n return nodes, edges", "def E(self, edge_type, feed=None, reverse=False):\n return super(Graph, self).E(edge_type, feed, reverse)", "def iterative_dfs(starting_vertex, graph):\n starting_vertex.discovered = True\n starting_vertex.discovery_edge = Graph.Edge(starting_vertex, None, None) # Dummy edge\n walk = starting_vertex\n\n while walk is not None:\n has_to_go_back = True\n for edge in graph.incident_edges(walk):\n opposite = edge.opposite(walk)\n if not opposite.discovered:\n opposite.discovered = True\n opposite.discovery_edge = edge\n walk = opposite\n has_to_go_back = False\n break\n\n if has_to_go_back:\n walk = walk.discovery_edge.opposite(walk)\n\n starting_vertex.discovery_edge = None # Remove dummy edge", "def create_edges_set(config, epoch_df, data_type, verbose=False):\n \n # Define type of data\n data = \"%s_samp_freq\" % data_type\n \n # Initialisation\n edges = []\n freq = config.getint(\"Sampling\", data)\n start = epoch_df.loc[(\"init\", \"t0\")]\n end = epoch_df.loc[(\"init\", \"t1\")]\n edges.append((start, end, freq))\n \n # Get epochs\n epoch_sections = [s for s in config.sections() if s.startswith(\"Epoch_\")]\n \n # Iterate over epochs\n for epoch in epoch_sections:\n # Parse\n epoch_name = epoch.split(\"_\")[1]\n start = epoch_df.loc[(epoch_name, \"t0\")]\n end = epoch_df.loc[(epoch_name, \"t1\")]\n\n # Adjust \n if config.has_option(epoch, data):\n efreq = config.getint(epoch, data)\n\n if config.has_option(epoch, \"%s_samp_t\" % data_type):\n tend = config.getfloat(epoch, \"%s_samp_t\" % data_type)\n edges.append((start, tend, efreq))\n edges.append((tend, end, freq))\n else:\n edges.append((start, end, efreq))\n freq = efreq\n else:\n edges.append((start, end, freq))\n \n return edges", "def _dfs(self, G, v):\n\n self._marked[v] = True\n for w in G.adj(v):\n if self._marked[w] is False:\n self._edge_to[w] = v\n self._dfs(G, w)", "def edges(self):\n edge_list = []\n for node1 in self.node_dict:\n for node2 in self.node_dict[node1]:\n edge_list.append((node1,\n node2,\n self.node_dict[node1][node2]))\n return edge_list", "def dft(self, starting_vertex):\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n s = Stack() # create an empty Stack \n s.push(starting_vertex) # push the starting_vertex to the top of the stack\n\n while s.size() > 0: # loop if the size is greater than 0\n v = s.pop() # pop off first element and store \n\n if v not in visited: # if v has not been visited yet\n visited.add(v) # add to the set \n print(v)\n for neighbor in self.vertices[v]: # loop through neighbors \n s.push(neighbor) # add each neighbor to the bottom of the stack", "def gen_graph(self):", "def dfs(graph, start):\n\tstack,path = [start],[]\n\twhile stack:\n\t\tele = stack.pop()\n\t\tif ele in path:\n\t\t\tcontinue\n\t\telse:\n\t\t\tpath.append(ele)\n\t\t\tfor neighbours in graph[ele]:\n\t\t\t\tstack.append(neighbours)\n\n\treturn path", "def generate_edgelist(ast_root):\n edges = []\n\n def walk_tree_and_add_edges(node):\n for child in node.children:\n edges.append([node.identifier, child.identifier])\n walk_tree_and_add_edges(child)\n\n walk_tree_and_add_edges(ast_root)\n\n return edges", "def dfs_from_vertex(graph, vertex):\n return dfs(graph, key=lambda x: x == vertex and 1 or 2)", "def dfs_paper(p, nodes, edges, retval, max_level=1):\n for cite in p.citations.all():\n add_node(cite, nodes, retval)\n if max_level > 0 :\n dfs_paper(cite, nodes, edges, retval, max_level-1)\n if (p.id,cite.id) not in edges:\n edges.append( (p.id,cite.id) )\n for rcite in Paper.objects.filter(citations__id__exact=p.id):\n add_node(rcite, nodes, retval)\n if max_level > 0 :\n dfs_paper(rcite, nodes, edges, retval, max_level-1)\n if (rcite.id, p.id) not in edges:\n edges.append( (rcite.id, p.id) )", "def edges(self):\r\n return self.__generate_edges()", "def edges(self):\n return self.generate_edges()", "def read_graph(args):\n dataset = pd.read_csv(args.features_path).values.tolist()\n edges = {}\n edges[\"positive_edges\"] = [edge[0:2] for edge in dataset if edge[2] == 1]\n edges[\"negative_edges\"] = [edge[0:2] for edge in dataset if edge[2] == -1]\n edges[\"ecount\"] = len(dataset)\n edges[\"ncount\"] = len(set([edge[0] for edge in dataset]+[edge[1] for edge in dataset]))\n return edges", "def dfs(get_neighbors, source, target):\n\n parents = {}\n visited = set()\n stack = collections.deque()\n stack.append(source)\n while stack:\n vertex = stack.pop()\n if vertex == target:\n return _backtrack(target, lambda v: parents.get(v))\n visited.add(vertex)\n neighbors = [n for n in get_neighbors(vertex) if n not in visited]\n if neighbors:\n stack.append(vertex)\n stack.append(neighbors[0])\n parents[neighbors[0]] = vertex\n return []", "def prot_df_to_graph(df, edge_dist_cutoff=4.5):\n\n node_pos = torch.FloatTensor(df[['x', 'y', 'z']].to_numpy())\n\n kd_tree = ss.KDTree(node_pos)\n edge_tuples = list(kd_tree.query_pairs(edge_dist_cutoff))\n edges = torch.LongTensor(edge_tuples).t().contiguous()\n\n node_feats = torch.FloatTensor([one_of_k_encoding_unk(e, prot_atoms) for e in df['element']])\n edge_feats = torch.FloatTensor(\n [1.0 / (np.linalg.norm(node_pos[i] - node_pos[j]) + 1e-5) for i, j in edge_tuples]).view(-1, 1)\n # feats = F.one_hot(elems, num_classes=len(atom_int_dict))\n\n return node_feats, edges, edge_feats, node_pos", "def dfs(start_vertex):\n # initially, the stack contains only the start vertex and visited_vertices is empty\n stack = deque()\n stack.append(start_vertex)\n visited_vertices = set()\n\n result = []\n while len(stack) > 0:\n # 1. pop a vertex from the stack\n current_vertex = stack.pop()\n\n # 2. ignoring this vertex if it has been visited\n if current_vertex in visited_vertices:\n continue\n\n # 3. mark as visited, so we will not visit it anymore\n visited_vertices.add(current_vertex)\n result.append(current_vertex.get_label())\n\n # 4. get all adjacent vertices which HAVE NOT been visited\n adjacent_vertices = []\n for edge in current_vertex.get_outbound_edges():\n adjacent_vertex = edge.get_end_vertex()\n if adjacent_vertex not in visited_vertices:\n adjacent_vertices.append(adjacent_vertex)\n\n # if necessary we may do some manipulation with adjacent_vertices, e.g. sort them\n # 5. add all adjacent vertices to the stack(DFS)\n stack.extend(adjacent_vertices)\n\n return result", "def create_graph_from_edges(edges):\n G = nx.Graph()\n for e in edges:\n p1 = e[0]\n p2 = e[1]\n dist = LA.norm(np.array(p2) - np.array(p1))\n G.add_edge(p1, p2, weight=dist)\n return G", "def multi_edge():\n from networkx.readwrite import json_graph\n import networkx as nx\n import autonetkit\n # returns a house graph\n data = {'directed': False,\n 'graph': [],\n 'links': [{'_ports': {'r4': 2, 'r5': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 1},\n {'_ports': {'r2': 3, 'r4': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r2': 4, 'r4': 3},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r3': 3, 'r5': 2},\n 'raw_interfaces': {},\n 'source': 1,\n 'target': 4},\n {'_ports': {'r1': 1, 'r2': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 3, 'r2': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 2, 'r3': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 4, 'r3': 4},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 5, 'r3': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r2': 2, 'r3': 2},\n 'raw_interfaces': {},\n 'source': 3,\n 'target': 4}],\n 'multigraph': True,\n 'nodes': [{'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r4 to r5', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth2'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r4',\n 'label': 'r4',\n 'x': 675,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r5 to r4', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r5 to r3', 'id': 'eth1'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r5',\n 'label': 'r5',\n 'x': 675,\n 'y': 500},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r1',\n 'label': 'r1',\n 'x': 350,\n 'y': 400},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r2 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r2',\n 'label': 'r2',\n 'x': 500,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r3 to r2', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r3 to r5', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r3',\n 'label': 'r3',\n 'x': 500,\n 'y': 500}]}\n graph = json_graph.node_link_graph(data)\n anm = autonetkit.anm.NetworkModel()\n g_in = anm.add_overlay(\"input\")\n g_in._replace_graph(nx.MultiGraph(graph))\n # TODO: check if should build overlays here rather than clone in?\n g_phy = anm[\"phy\"]\n g_phy._replace_graph(graph)\n return anm", "def build_edges(self):\n print(\"Constructing Edges.\")\n # -----------------------------------------\n # TODO: You should write this method!\n\n # Note: this method may take some time to run - it is likely to be O(N^2), and some lists have N = 10,000 words or more.\n # (I've had students decide that their program was \"broken\" and quit it before this process finished... every time,\n # not realizing that the program was working hard behind the scenes.)\n # I recommend that you keep track of the number of edges you have added, and if it is a multiple of 1000, print\n # something so that you know your program is making progress.\n n = len(self.vertices)\n\n\n\n \n # -----------------------------------------\n print(\"Done Constructing Edges.\\n------------------------------------\")", "def dfs(self, starting_vertex, destination_vertex): # great for if you know the start and end, like a maze with 1 entry/1 exit\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n s = Stack() # create an empty Stack\n s.push([starting_vertex]) # push the starting vertex to the top of the stack \n\n while s.size() > 0: # loop if the size is greater than 0\n path = s.pop() # pop off the top element of the stack and store \n v = path[-1] # store the vertex from the end of path\n\n if v == destination_vertex: # if v is equal to the destination_vertex\n return path # return the path \n if v not in visited: # if v has not been visited yet \n visited.add(v) # add v to the vistied set \n\n for neighbor in self.vertices[v]: # loop through the neighbors\n path_copy = list(path) # make a copy of the path \n path_copy.append(neighbor) # append each neighbor to the back of the path copy \n s.push(path_copy) # push the path copy to the Stack", "def iter_dfs(self, depth=0):\n yield self, depth\n yield from self.left.iter_dfs(depth=depth + 1)\n yield from self.right.iter_dfs(depth=depth + 1)", "def dft(self, starting_vertex):\n # create an empty stack and push the starting vertex ID\n stack = Stack()\n stack.push(starting_vertex)\n # create an empty Set to store the visited vertices\n visited = set()\n # while the stack is not empty ...\n while stack.size() > 0:\n # pop the first vertex\n vert = stack.pop()\n # if that vertex has not been visited ..\n if vert not in visited:\n # mark it is visited\n visited.add(vert)\n print(vert)\n # then add all of its neighbors to the top of the stack\n for neighbor in self.vertices[vert]: #self.get_neighbors(vert)\n stack.push(neighbor)", "def _dfs(\n self, kg: KG, entity: Vertex, is_reverse: bool = False\n ) -> List[Walk]:\n self.sampler.visited = set()\n walks: List[Walk] = []\n assert self.max_walks is not None\n\n rng = np.random.RandomState(self.random_state)\n\n while len(walks) < self.max_walks:\n sub_walk: Walk = (entity,)\n d = 1\n while d // 2 < self.max_depth:\n pred_obj = self.sampler.sample_hop(\n kg, sub_walk, d // 2 == self.max_depth - 1, is_reverse\n )\n if pred_obj is None:\n break\n\n if is_reverse:\n if (\n pred_obj[0] in self.communities\n and rng.random() < self.hop_prob\n ):\n community_nodes = self.labels_per_community[\n self.communities[pred_obj[0]]\n ]\n sub_walk = (\n pred_obj[1],\n rng.choice(community_nodes),\n ) + sub_walk\n else:\n sub_walk = (pred_obj[1], pred_obj[0]) + sub_walk\n else:\n if (\n pred_obj[1] in self.communities\n and rng.random() < self.hop_prob\n ):\n community_nodes = self.labels_per_community[\n self.communities[pred_obj[1]]\n ]\n sub_walk += (\n pred_obj[0],\n rng.choice(community_nodes),\n )\n else:\n sub_walk += (pred_obj[0], pred_obj[1])\n d = len(sub_walk) - 1\n walks.append(sub_walk)\n return list(walks)", "def dfs_iter(graph, start):\n # vkladam uzel a index potencialniho naslednika, kterym mam pokracovat\n stack = [(start, 0)]\n time = 1\n graph.discovery_time[start] = time\n graph.visited[start] = True\n\n while stack: # not empty\n u, v = stack.pop()\n\n while v < graph.size and not is_edge(graph, u, v):\n v += 1\n\n if v < graph.size:\n # found successor, u is not yet finished\n stack.append((u, v + 1))\n\n if not graph.visited[v]:\n # we have discovered v\n stack.append((v, 0))\n graph.parent[v] = u\n graph.visited[v] = True\n time += 1\n graph.discovery_time[v] = time\n else:\n # u has no more successors\n time += 1\n graph.finishing_time[u] = time", "def edges(self):\n es = []\n for vertex1 in self.vertices():\n for vertex2 in self.out_vertices(vertex1):\n es.append(self[vertex1][vertex2])\n return es", "def test_edge_instance_traversal_types(self):\r\n te = TestEdge.create(self.v1, self.v2)\r\n ote = OtherTestEdge.create(self.v1, self.v3)\r\n yate = YetAnotherTestEdge.create(self.v1, self.v4)\r\n\r\n out = self.v1.outV(te, ote)\r\n assert len(out) == 2\r\n assert self.v2.vid in [v.vid for v in out]\r\n assert self.v3.vid in [v.vid for v in out]\r\n\r\n out = self.v1.outV(ote, yate)\r\n assert len(out) == 2\r\n assert self.v3.vid in [v.vid for v in out]\r\n assert self.v4.vid in [v.vid for v in out]", "def edges(self):\n return self.__generate_edges()", "def edges(self):\n return self.__generate_edges()", "def edges(self):\n return self.__generate_edges()", "def dft_recursive(self, starting_vertex, visited=None):\n \n # for vertex in self.get_neighbors(starting_vertex):\n # if vertex not in visited:\n # visited.add(vertex)\n # self.dft_recursive(vertex, visited)\n # return visited\n if visited == None:\n visited = set()\n print(starting_vertex)\n visited.add(starting_vertex)\n for v in self.get_neighbors(starting_vertex):\n if v not in visited:\n self.dft_recursive(v, visited)", "def _DFS_loop(nodes, edges, t_n=None):\n\n if t_n is not None:\n n_t = dict((b,a) for a,b in t_n.items()) # {time: node}\n get_node_by_time = lambda time: time if t_n is None else n_t[time]\n get_time_by_node = lambda node: node if t_n is None else t_n[node]\n gen_edges = lambda node: map(get_time_by_node,edges[get_node_by_time(node)])\n\n explored = set()\n leader = dict()\n _DFS_loop.t = 0 # finishing time\n times = dict() # {time: node}\n\n def DFS(i):\n explored.add(i)\n leader[i] = s\n for j in gen_edges(i):\n if j not in explored:\n DFS(j)\n _DFS_loop.t += 1\n times[i] = _DFS_loop.t\n\n for i in nodes:\n if i not in explored:\n s = i # leader node\n DFS(i)\n\n leaders = defaultdict(list)\n for n,l in leader.items():\n leaders[get_node_by_time(l)].append(get_node_by_time(n))\n\n return times, leaders", "def edge_list_build(input_path, output_path):\n\n start_time = time.time()\n\n df = pd.read_csv(input_path, sep='\\t', header=None)\n\n for col in range(1, len(df.columns)):\n df.iloc[:, col] = df.iloc[:, col-1] + '_' + df.iloc[:, col]\n\n n_divs = len(df.columns) - 1\n\n\n dict_node_names = {}\n\n for id, node_name in enumerate(np.unique(df.values.flatten())):\n dict_node_names[node_name] = id + 1\n\n tmp_df = pd.DataFrame.from_dict(dict_node_names, orient='index')\n tmp_df.reset_index(inplace=True)\n tmp_df.rename({'index': 'nodes', 0: 'hash'}, inplace=True, axis=1)\n\n hash_df = tmp_df['nodes'].str.split('_', n=n_divs, expand=True)\n hash_df = pd.concat([hash_df, tmp_df['hash']], axis=1)\n\n for col_name in df.columns:\n df[col_name] = df[col_name].map(dict_node_names)\n\n df['root'] = 0\n colnames = df.columns.values\n colnames = list(colnames[-1:]) + list(colnames[:-1])\n df = df[colnames]\n\n df_tuples = pd.DataFrame()\n\n for i in range(len(df.columns) - 1):\n df_tuples[i] = list(df[df.columns[i:i + 2]].itertuples(index=False, name=None))\n del df\n gc.collect()\n\n nodes_list = []\n\n for col_id in range(0, df_tuples.shape[1]):\n father_child = df_tuples.iloc[:, col_id].drop_duplicates().values\n nodes_list.extend(father_child)\n\n graph = nx.DiGraph(nodes_list)\n graph_bfs = nx.bfs_tree(graph, 0)\n \n path = output_path + '.hashmap'\n hash_df.to_csv(path, index=False, sep='\\t')\n end_time = time.time()\n print(\"Time spent creating tree from csv file:\", end_time - start_time)\n return graph_bfs", "def get_nodes_by_type(self, node_type=None):\n target_nodes = []\n if node_type is not None:\n for node in self.nodes:\n if str(node_type).lower() == str(node.get('infos').get('type')).lower():\n target_nodes.append(node)\n return target_nodes", "def dft(self, starting_vertex):\n \n visited = []\n stack = Stack()\n\n stack.add(starting_vertex)\n\n while len(stack):\n current = stack.pop()\n\n if current not in visited:\n print(current)\n visited.append(current)\n \n for child in self.vertices[current]:\n if child not in visited:\n stack.add(child)", "def _find_cycle(subtypes: Dict[str, List[str]]) -> None:\n\n found_cycles = []\n\n def iterate(current_id, find_id):\n for t_entry in subtypes.get(current_id, []):\n if t_entry == find_id:\n found_cycles.append((find_id, current_id))\n iterate(t_entry, find_id)\n\n for the_id in subtypes['']:\n iterate(the_id, the_id)\n if len(found_cycles) > 0:\n for entry in found_cycles:\n logger.error(\n 'Cycle found with ids {} and {}'.format(entry[0], entry[1]))\n raise ValueError('cycles found in graph information')", "def get_dfs(self)->list:\n\t\tstack=[]\n\t\tdfs=[]\n\t\tstack.append(self)\n\t\twhile(len(stack)>0):\n\t\t\tnode=stack.pop(len(stack)-1)\n\t\t\tdfs.append(node.data)\n\t\t\tif(node.right!=None):\n\t\t\t\tstack.append(node.right)\n\t\t\tif(node.left!=None):\n\t\t\t\tstack.append(node.left)\n\t\treturn dfs", "def DFS(G: List, i: int, U: List) -> List:\n node = G[i]\n node.visited = True\n U.remove(i)\n for adj_node in node.adjacent:\n if not adj_node.visited:\n DFS(G, adj_node.value, U)\n return [G, U]", "def _DirectedTree(root,skeleton): \n nodel = set([root])\n edges = set(skeleton.edges)\n newEdges = []\n while edges:\n seen = set([])\n ledges = list(edges)\n for a,b in ledges:\n A = a in nodel\n B = b in nodel\n if A and B:\n seen.add((a,b)) \n elif A:\n newEdges.append((a,b))\n nodel.add(b)\n seen.add((a,b))\n elif B:\n newEdges.append((b,a))\n nodel.add(a)\n seen.add((a,b))\n edges -= seen\n return nx.DiGraph(newEdges)", "def DFSUtility(obj,vertex,visited,subGraph):\n visited[vertex] = True\n subGraph.append(vertex)\n for nxtVertex in obj.adjList[vertex]:\n if visited[nxtVertex]:\n continue\n DFSUtility(obj,nxtVertex,visited,subGraph)" ]
[ "0.6322782", "0.5792171", "0.5745061", "0.5557453", "0.55496603", "0.54645675", "0.5460162", "0.5454831", "0.53846216", "0.5355307", "0.5339173", "0.5338952", "0.53288114", "0.53112", "0.5310598", "0.52910846", "0.52882874", "0.5266239", "0.5237493", "0.52122957", "0.5211946", "0.5196208", "0.5194627", "0.51891625", "0.5184664", "0.5180019", "0.517738", "0.51746345", "0.5144447", "0.51406425", "0.51401937", "0.5127189", "0.51154083", "0.5110479", "0.51012653", "0.50999093", "0.5094264", "0.50888264", "0.50864094", "0.50623065", "0.50444156", "0.5034515", "0.50094926", "0.5007455", "0.49890196", "0.49880955", "0.4985177", "0.49640444", "0.49611014", "0.49555954", "0.49466538", "0.49434897", "0.4932139", "0.49267933", "0.4926433", "0.49245918", "0.49240717", "0.49108607", "0.4906371", "0.49046674", "0.48991242", "0.48876646", "0.4876272", "0.48600072", "0.4854763", "0.4850961", "0.48497394", "0.48463947", "0.4839872", "0.4838512", "0.4834386", "0.48328164", "0.48277828", "0.48189", "0.48091137", "0.48017326", "0.47959596", "0.47942907", "0.4785708", "0.47855633", "0.47787806", "0.4773079", "0.4768626", "0.47648656", "0.47627804", "0.4761252", "0.4755852", "0.47489366", "0.47489366", "0.47489366", "0.47470507", "0.4746141", "0.47459087", "0.47355452", "0.47287747", "0.47269663", "0.47242638", "0.47213668", "0.47126237", "0.47094136" ]
0.53314835
12
find the feature to use for the next node split and also find where the plit should be in that feature This loops through the split options within a feature to find the best gini score, then it loops through each feature to compare optimal gini scores
def find_split(self, X, y): choices = y.size if choices <= 1: return None, None # find the number of each option in the current node. options_parent = [np.sum(y == c) for c in range(self.num_outcomes)] # find the gini of current node. best_gini = 1.0 - sum((n / choices) ** 2 for n in options_parent) best_idx, best_split = None, None # loop through the features to get splits and options. for idx in range(self.num_features): splits, options = zip(*sorted(zip(X[:, idx], y))) num_left = [0] * self.num_outcomes num_right = options_parent.copy() for i in range(1, choices): c = options[i - 1] num_left[c] += 1 num_right[c] -= 1 gini_left = 1.0 - sum( (num_left[x] / i) ** 2 for x in range(self.num_outcomes) ) gini_right = 1.0 - sum( (num_right[x] / i) ** 2 for x in range(self.num_outcomes) ) gini = (i * gini_left + (choices - i) * gini_right) / choices if splits[i] == splits[i - 1]: continue if gini < best_gini: best_gini = gini best_idx = idx best_split = (splits[i] + splits[i - 1]) / 2 return best_idx, best_split
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_split(data):\n \"\"\" gets the best feature, and best value \"\"\"\n\n best_feature = None\n best_value = 0.0\n columns = data.columns\n gini_base = gini_impurity(data)\n n_rows = len(data.index) # total number of rows of data before split\n\n # Fininding which split yields the best gini gain\n max_gain = 0\n\n for i in range(len(columns)-1): # -1 b.c. class is final column\n xs = data[columns[i]].unique() # get values to test\n for x in xs: # test values\n # split dataset\n df_left = data[data[columns[i]] < x]\n df_right = data[data[columns[i]] >= x]\n\n # get gini impurities\n gini_left = gini_impurity(df_left)\n gini_right = gini_impurity(df_right)\n \n\n # Calculated weighted gini impurity\n w_left = len(df_left.index) / n_rows\n w_right = len(df_right.index) / n_rows\n\n w_gini = gini_left * w_left + gini_right * w_right\n \n\n # Calculate gini gain (we want to minimize w_gini for the smallest impurity. Ideal split is perfect Left=c1, Right=c2)\n # why not just find min w_gin instead of uding gini_gain and gini_base vaiables?\n gini_gain = gini_base - w_gini\n\n # check if this is the best split so far, store values, update max_gini\n if gini_gain > max_gain:\n best_feature = columns[i]\n best_value = x\n max_gain = gini_gain\n\n df_left = data.loc[data[best_feature] < best_value]\n df_right = data.loc[data[best_feature] >= best_value]\n \n\n return best_feature, best_value, df_left, df_right", "def __get_split_feature(self, data_set, target_feature, tree_features):\n\n if self.__criterion == 'entropy':\n feature_gains = {feature: self.__gain(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = max(feature_gains, key=feature_gains.get)\n return split_feature\n elif self.__criterion == 'gini':\n feature_ginis = {feature: self.__gini(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = min(feature_ginis, key=feature_ginis.get)\n return split_feature\n # TODO: I should check this (gini index).", "def determine_best_split(data, potential_splits, mltask):\n\n first_iteration = True\n for column_index in potential_splits:\n for value in potential_splits[column_index]:\n data_below,data_above = split_data(data, column_index, value)\n \n if mltask == 'regression':\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_mse)\n \n # classification\n else:\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_entropy)\n \n \n if first_iteration or current_overall_metric <= best_overall_metric:\n first_iteration = False\n \n best_overall_metric = current_overall_metric\n best_split_column = column_index\n best_split_value = value\n \n \n return best_split_column,best_split_value", "def find_split(eps, nfeats_test):\n # TODO your code here\n # Consider a random subset of features of the specified size\n total_feature_num = len(eps[0].values)\n if nfeats_test <= total_feature_num:\n features_to_test = sorted(random.sample(range(total_feature_num), nfeats_test))\n else:\n features_to_test = range(total_feature_num)\n\n # For each feature under consideration, and each splitting value:\n # various values that show up in the profiles for the feature at hand, and take the midpoints between adjacent values.\n # calculate the impurity of each of the two subsets of profiles split accordingly.\n res = None\n for feature in features_to_test:\n values_for_split = [ep[feature] for ep in eps]\n splitting_points = sorted(list(set(values_for_split)))\n # take the midpoints between adjacent values\n for i in range(len(splitting_points) - 1):\n splitting_points[i] = (splitting_points[i] + splitting_points[i + 1]) / 2\n splitting_points.pop()\n\n # calculate the impurity of each of the two subsets of profiles split accordingly.\n for sp_value in splitting_points:\n subset1 = [ep for ep in eps if ep[feature] < sp_value]\n gini1 = cal_gini(subset1)\n subset2 = [ep for ep in eps if ep[feature] >= sp_value]\n gini2 = cal_gini(subset2)\n # Sum these, weighted by the fraction of profiles in each subset.\n gini = len(subset1) / len(eps) * gini1 + len(subset2) / len(eps) * gini2\n if res is None:\n res = (feature, sp_value, gini)\n elif gini < res[2]:\n res = (feature, sp_value, gini)\n\n return res", "def choose_best_split(self, X_subset, y_subset):\n # YOUR CODE HERE\n feature_index = None\n threshold = None\n best_G = np.inf\n N = len(X_subset)\n \n for current_feature in range(X_subset.shape[1]):\n thresholds = np.unique(X_subset[:, current_feature])\n \n for t in thresholds:\n y_left, y_right = self.make_split_only_y(current_feature, t, X_subset, y_subset)\n H_L = self.H(y_left)\n H_R = self.H(y_right)\n \n G = (len(y_left) / N) * H_L + (len(y_right) / N) * H_R\n \n if G < best_G:\n best_G = G\n feature_index = current_feature\n threshold = t\n \n return feature_index, threshold", "def __find_best_split(self, x, y):\n data = np.transpose(np.vstack((np.transpose(x), y)))\n num_features = data.shape[1] - 1\n\n # initialise splitting rule components\n integer_splitting_rule = None\n feature_index_to_split = None\n max_info_gain = 0\n\n # iterate over all the features and find best splits within these\n for feature in range(num_features):\n info_gain, split_int = self.__find_best_split_in_feature(\n data[:, [feature, -1]])\n if info_gain is None:\n continue\n # update max info gain so far as it iterates over features\n if info_gain > max_info_gain:\n max_info_gain = info_gain\n feature_index_to_split = feature\n integer_splitting_rule = int(split_int)\n\n return feature_index_to_split, integer_splitting_rule", "def __find_best_split_in_feature(self, feature_and_class):\n\n # sort the feature and class and use changes in the class to reduce\n # number of potential split info gain calculations\n sorted_data = feature_and_class[\n feature_and_class[:, 0].astype(np.int).argsort()]\n potential_splits = self.__find_integers_with_class_change(sorted_data)\n info_gains = self.__info_gain_from_splits(potential_splits,\n sorted_data)\n\n # returning nothing in no information gains are found\n if len(info_gains) == 0:\n return None, None\n\n index = info_gains.index(max(info_gains))\n return info_gains[index], potential_splits[index]", "def pick_best_split(self,db,labels,ids,features=None):\n idlabels = [labels[id] for id in ids]\n if misclassification_error(idlabels) == 0:\n #base case: no misclassifications\n self.type = 'v'\n self.value = idlabels[0]\n return 0\n best = None\n bestCost = 0\n splitval = None\n discrete = True\n if features == None:\n if len(ids) < db.numFeatures():\n #look at all present features in the training set\n features = db.getPresentFeatures(ids)\n #print len(features),\"of\",db.numFeatures(),\"features selected\"\n else:\n features = range(db.numFeatures())\n elif callable(features):\n features = features()\n for i in features:\n if len(db.entryLists[i]) == 0: continue\n idiscrete = db.discreteFeature[i]\n if idiscrete:\n #count number of labels of a certain value\n splitter = defaultdict(lambda:defaultdict(int))\n #count of labels for missing values\n nmissing = defaultdict(int)\n for id in ids:\n val = db[i,id]\n if val is None:\n #missing values go down to all splits\n nmissing[labels[id]] += 1\n continue\n splitter[val][labels[id]] += 1\n if len(splitter) > continuous_variable_threshold:\n #print \"Determined to be a continuous variable\"\n idiscrete = False\n break\n if idiscrete:\n if len(splitter) <= 1:\n #only a single value\n continue\n #count number of missing values in all splits\n cmax = 0\n for k in splitter:\n for l,v in nmissing.iteritems():\n splitter[k][l] += v\n cmax = max(cmax,sum(splitter[k].values()))\n #shrink by fraction of (# of ids - largest child)/(# of ids)\n scale = (1.0-float(cmax)/float(len(ids)))*len(splitter)\n #evaluate cost\n cost = split_cost(splitter.values())*scale\n #print \"Split on\",i,\"information gain\",-cost,splitter.values()\n else:\n #continuous, need to learn the best split\n vals = []\n presentlabels = []\n nonelabels = []\n for id in ids:\n val = db[i,id]\n if val is None:\n nonelabels.append(labels[id])\n continue\n vals.append(val)\n presentlabels.append(labels[id])\n if len(vals) <= 1:\n print \"No values for feature\",i,\"?\"\n print vals\n continue\n #print \"Considering continuous split on\",i\n s,cost = best_split(vals,presentlabels,nonelabels)\n scale = (1.0-float(len(presentlabels)/2+len(nonelabels))/float(len(ids)))*2\n cost *= scale\n #print \"Result\",s,\"Information gain\",-cost\n \n if cost < bestCost:\n best = i\n bestCost = cost\n discrete = idiscrete\n if not idiscrete:\n splitval = s\n \n if best is None:\n self.type = 'v'\n if len(ids) > 0:\n self.value = vote(idlabels)\n return misclassification_error(idlabels)\n else:\n self.value = None\n return 0\n else:\n self.feature = best\n #discrete or inequality split\n if discrete:\n self.type = 's'\n else:\n self.type = 'i'\n self.value = splitval\n return bestCost", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def findBestValueSplitByGini(self, data, structure, colIndex):\n minGini, bestSplit = 1, []\n for i in range(0, len(data)-1):\n split = (float(data[i][colIndex]) + float(data[i+1][colIndex])) / 2\n giniSplit = self.calcGiniSplitBySplitValue(data, structure, colIndex, split)\n if giniSplit <= minGini:\n minGini = giniSplit\n bestSplit = [split, giniSplit]\n return bestSplit", "def detect_splits(self):\n logg.info(' abstracted graph will have {} nodes'.format(self.n_splits+1))\n indices_all = np.arange(self.X.shape[0], dtype=int)\n segs = [indices_all]\n if False: # this is safe, but not compatible with on-the-fly computation\n tips_all = np.array(np.unravel_index(np.argmax(self.Dchosen), self.Dchosen.shape))\n else:\n if self.iroot is not None:\n tip_0 = np.argmax(self.Dchosen[self.iroot])\n else:\n tip_0 = np.argmax(self.Dchosen[0]) # just a random index, here fixed to \"0\"\n tips_all = np.array([tip_0, np.argmax(self.Dchosen[tip_0])])\n # we keep a list of the tips of each segment\n segs_tips = [tips_all]\n if self.clusters_precomputed_names:\n self.segs_names_original = [', '.join(self.clusters_precomputed_names)]\n segs_undecided = [True]\n segs_adjacency = [[]]\n segs_distances = np.zeros((1, 1))\n segs_adjacency_nodes = [{}]\n # logg.info(' do not consider groups with less than {} points for splitting'\n # .format(self.min_group_size))\n for ibranch in range(self.n_splits):\n if self.clusters == 'unconstrained_segments':\n iseg, new_tips = self.select_segment(segs, segs_tips, segs_undecided)\n if iseg == -1:\n logg.info('... partitioning converged')\n break\n logg.info('... branching {}:'.format(ibranch + 1),\n 'split group', iseg)\n segs_distances = self.do_split(segs, segs_tips,\n segs_undecided,\n segs_adjacency,\n segs_distances,\n iseg, new_tips)\n else:\n logg.msg(' split', ibranch + 1, v=4)\n stop, segs_distances = self.do_split_constrained(segs, segs_tips,\n segs_adjacency,\n segs_adjacency_nodes,\n segs_distances)\n if stop: break\n\n # segments\n self.segs = segs\n self.segs_tips = segs_tips\n self.segs_sizes = []\n for iseg, seg in enumerate(self.segs): self.segs_sizes.append(len(seg))\n\n # the full, unscaled adjacency matrix\n self.segs_adjacency_full_attachedness = 1/segs_distances\n # if self.attachedness_measure == 'connectedness':\n # norm = np.sqrt(np.multiply.outer(self.segs_sizes, self.segs_sizes))\n # self.segs_adjacency_full_attachedness /= norm\n self.segs_adjacency_full_confidence, self.segs_adjacency_tree_confidence \\\n = self.compute_adjacency_confidence(\n self.segs_adjacency_full_attachedness,\n segs_adjacency,\n self.tree_based_confidence)\n np.fill_diagonal(self.segs_adjacency_full_attachedness, 0)", "def find_split(x, y):\n\n # Need the starting entropy so we can measure improvement...\n start_entropy = calculate_entropy(y)\n\n # Best thus far, initialised to a dud that will be replaced immediately...\n best = {'infogain': -np.inf}\n\n # Randomly allocate the splits to be traversed (without replacement)\n feature_total = x.shape[1]\n feature_subset_count = int(np.sqrt(feature_total))\n feature_subset = np.random.permutation(feature_total)[:feature_subset_count]\n\n # Loop every possible split of every feature...\n for feature_index in feature_subset:\n for split in np.unique(x[:, feature_index]):\n\n left_indices = []\n right_indices = []\n\n # Get index of rows where x[row_index,feature_index] <= split\n for row_index,row in enumerate(x):\n left_indices.append(row_index) if x[row_index,feature_index] <= split else right_indices.append(row_index)\n\n left_ys = y[left_indices]\n right_ys = y[right_indices]\n\n nleft = len(left_ys)\n nright = len(right_ys)\n ntotal = nleft + nright\n infogain = start_entropy - (nleft / ntotal) * calculate_entropy(left_ys) - (\n nright / ntotal) * calculate_entropy(right_ys)\n\n if infogain > best['infogain']:\n best = {'feature': feature_index,\n 'split': split,\n 'infogain': infogain,\n 'left_indices': left_indices,\n 'right_indices': right_indices}\n return best", "def _choose_best_feature(self, X, y, label, sample_weights=None):\n best_feature_idx = 0\n # YOUR CODE HERE\n # Note that you need to implement the sampling feature part here for random forest!\n # Hint: You may find `np.random.choice` is useful for sampling.\n # begin answer\n n_features = X.shape[1]\n if self.sample_feature:\n max_features=max(1, min(n_features, int(np.round(np.sqrt(n_features)))))\n new_features=np.random.choice(n_features, max_features, replace=False)\n new_X=X[:, new_features]\n else:\n new_X=X\n n_new_features=new_X.shape[1]\n #new_features=np.random.choice(n_features, n_features, replace=False)\n #old_cost=self.entropy(y, sample_weights)\n #use C4.5 algorirhm\n best_impurity=None\n best_feature_idx=0\n best_feature_val=X[0, 0]\n for i in range(n_new_features):\n unique_vals=np.unique(X[:,i])\n for value in unique_vals:\n sub1_X, sub1_y, label1, sub1_sample_weights, sub2_X, sub2_y, label2, sub2_sample_weights=self._split_dataset(X, y, label, i, value, sample_weights)\n if len(sub1_y)>0 and len(sub2_y)>0:\n new_impurity=self._impurity(y, sub1_y, sub2_y)\n if best_impurity is None or new_impurity > best_impurity:\n best_impurity=new_impurity\n best_feature_idx=i\n best_feature_val=value \n # end answer\n return best_feature_idx, best_feature_val", "def best_split(self):\n sub_group = []\n\n current_entropy = self.entropy(self._Passengers)\n best_gain = 0 # holds the best entropy difference so far\n best_split = self._Attr[0].get_name()\n relative_entropy = 0 # entropy while taking account for the size of the population\n\n for Attribute in self._Attr:\n relative_entropy = 0\n print(\"Attr considered: \" + Attribute.get_name())\n for Attr_option in Attribute.get_options():\n sub_group = []\n for Passenger in self._Passengers:\n if self.passenger_attr_option_check(Passenger,\n Attribute.get_name(),\n Attr_option): # if P.A = V\n sub_group.append(Passenger)\n if len(sub_group) > 0 and len(self._Passengers) > 0:\n relative_entropy += self.entropy(sub_group) * (len(sub_group)/len(self._Passengers))\n\n if current_entropy - relative_entropy > best_gain:\n best_gain = current_entropy - relative_entropy\n best_split = Attribute.get_name()\n\n print(f\"best split:{best_split} \\n with entropy gain of:\\n {best_gain}\")\n\n return best_split", "def best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\titerative_based_score = 0\n\t# given that all pairs use the same amount of features, the position 0 was arbitrarily selected to compute the number of features being used\n\tmin_number_features = int(0.15*len(train_features[0]))\n\tmax_number_features = int(0.85*len(train_features[0]))\n\n\t# min_number_features = 19\n\t# max_number_features = 20\n\n\titerative_based_selector = None\n\titerative_based_train_features_selected = None\n\titerative_based_test_features_selected = None\n\n\tfor i in range(min_number_features, max_number_features):\n\t\tprint(i)\n\t\ttemp_iterative_based_selector = RFE(RandomForestRegressor(n_estimators=100), n_features_to_select=i)\n\t\ttemp_iterative_based_selector.fit(train_features, train_similarity_target)\n\t\ttemp_iterative_based_train_features_selected = temp_iterative_based_selector.transform(train_features)\n\t\ttemp_iterative_based_test_features_selected = temp_iterative_based_selector.transform(test_features)\n\n\t\tregressor.fit(temp_iterative_based_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_iterative_based_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Iterative Based Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > iterative_based_score:\n\t\t\titerative_based_score = temp_score\n\t\t\titerative_based_selector = temp_iterative_based_selector\n\t\t\titerative_based_train_features_selected = temp_iterative_based_train_features_selected\n\t\t\titerative_based_test_features_selected = temp_iterative_based_test_features_selected\n\n\titerative_based_mask = iterative_based_selector.get_support()\n\tprint(\"This is the iterative based mask: \")\n\tprint(iterative_based_mask)\n\n\treturn iterative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask", "def _best_split(cls, X, y):\n n = X.shape[0]\n num_feature = X.shape[1]\n y_types = np.unique(y)\n\n # initialize\n min_score = float(n)\n feature_idx = None\n best_theta = None\n best_idx = None\n\n for feature_idx in xrange(num_feature):\n # counter for y\n cumulate_y = Counter()\n rest_y = Counter()\n for y_type in y_types:\n cnt = np.where(y == y_type)[0].shape[0]\n rest_y[y_type] = cnt\n\n # sorted data\n sorted_idx = np.argsort(X[:, feature_idx])\n sorted_X = np.copy(X)\n sorted_y = np.copy(y)\n sorted_X = sorted_X[sorted_idx]\n sorted_y = sorted_y[sorted_idx]\n #print \"_best_split:\", sorted_X.shape, sorted_y.shape\n\n for idx in xrange(n-1):\n theta = (sorted_X[idx, feature_idx] + sorted_X[idx + 1, feature_idx]) / 2\n y_label = sorted_y[idx]\n cumulate_y[y_label] += 1\n rest_y[y_label] -= 1\n left_cnt = sum(cumulate_y.values())\n right_cnt = sum(rest_y.values())\n w_1 = left_cnt * cls._gini_index(cumulate_y.values())\n w_2 = right_cnt * cls._gini_index(rest_y.values())\n score = w_1 + w_2\n if score < min_score:\n min_score = score\n best_theta = theta\n best_idx = feature_idx\n #print('new min score: %.3f' % score)\n #print('feature: %d, theta: %.3f' % (best_idx, best_theta))\n #print('left: %d, right: %d' % (left_cnt, right_cnt))\n print('feature: %d, theta: %.3f' % (best_idx, best_theta))\n return (best_idx, best_theta)", "def _best_split(cls, X, y):\n n = X.shape[0]\n num_feature = X.shape[1]\n y_types = np.unique(y)\n\n # initialize\n min_score = float(n)\n feature_idx = None\n best_theta = None\n best_idx = None\n\n for feature_idx in xrange(num_feature):\n # counter for y\n cumulate_y = Counter()\n rest_y = Counter()\n for y_type in y_types:\n cnt = np.where(y == y_type)[0].shape[0]\n rest_y[y_type] = cnt\n\n # sorted data\n sorted_idx = np.argsort(X[:, feature_idx])\n sorted_X = np.copy(X)\n sorted_y = np.copy(y)\n sorted_X = sorted_X[sorted_idx]\n sorted_y = sorted_y[sorted_idx]\n #print \"_best_split:\", sorted_X.shape, sorted_y.shape\n\n for idx in xrange(n-1):\n theta = (sorted_X[idx, feature_idx] + sorted_X[idx + 1, feature_idx]) / 2\n y_label = sorted_y[idx]\n cumulate_y[y_label] += 1\n rest_y[y_label] -= 1\n left_cnt = sum(cumulate_y.values())\n right_cnt = sum(rest_y.values())\n w_1 = left_cnt * cls._gini_index(cumulate_y.values())\n w_2 = right_cnt * cls._gini_index(rest_y.values())\n score = w_1 + w_2\n if score < min_score:\n min_score = score\n best_theta = theta\n best_idx = feature_idx\n #print('new min score: %.3f' % score)\n #print('feature: %d, theta: %.3f' % (best_idx, best_theta))\n #print('left: %d, right: %d' % (left_cnt, right_cnt))\n print('feature: %d, theta: %.3f' % (best_idx, best_theta))\n return (best_idx, best_theta)", "def best_split(self, X, y, attributes):\n if (self.criterion==\"information_gain\"):\n global_if = float('-inf') # the highest value of information gain/gini gain seen so far\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = information_gain(y,attr_val,self.type)\n if (cur_if>global_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr\n else:\n global_if = float('inf')\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = gini_gain(y,attr_val)\n if (global_if>cur_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr", "def get_best_split(rows):\n best_gain = 0\n best_question = None\n current_impurity = get_gini(rows)\n n_features = len(rows[0])\n\n for col in range(n_features):\n\n for row in rows:\n question = Question(col, row[col])\n true_rows, false_rows = partition(rows, question)\n\n if len(true_rows) == 0 or len(false_rows) == 0:\n break\n\n question_gain = get_info_gain(true_rows, false_rows, current_impurity)\n\n if question_gain >= best_gain:\n best_gain = question_gain\n best_question = question\n\n print(best_gain)\n print(best_question)\n return best_gain, best_question", "def __gini(self, data_set, split_feature, target_feature):\n frequencies = self.__calculate_frequency(data_set, split_feature)\n gini_value = 1.0\n\n # Calculate the gini of the data.\n for value, frequency in frequencies.items():\n probability = frequency / sum(frequencies.values())\n gini_value -= math.pow(probability, 2)\n\n return gini_value", "def test_gini_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1]), set([2])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1, 2:2})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.66)", "def find_best_split(rows):\n best_gain = 0 # keep track of the best information gain\n best_question = None # keep train of the feature / value that produced it\n current_uncertainty = gini(rows)\n n_features = len(rows[0]) - 1 # number of columns\n #print(\"n_features:\", n_features)\n\n for col in range(1,n_features): # for each feature\n # for each iteration this is the set of all values of a specific column, eg, All pixels number 0\n values = set([row[col] for row in rows]) # unique values in the column\n for val in values: # for each value\n\n # Create a question object for each val under a column, holding the val and the col number\n question = Question(col, val)\n\n # try splitting the dataset\n true_rows, false_rows = partition(rows, question)\n\n # Skip this split if it doesn't divide the\n # dataset.\n if len(true_rows) == 0 or len(false_rows) == 0:\n continue\n\n # Calculate the information gain from this split\n gain = info_gain(true_rows, false_rows, current_uncertainty)\n\n # You actually can use '>' instead of '>=' here\n # but I wanted the tree to look a certain way for our\n # toy dataset.\n if gain >= best_gain:\n best_gain, best_question = gain, question\n\n return best_gain, best_question", "def test_gini_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.5)", "def evaluate_split( df, attribute, split ):\n mask = df[attribute] <= split\n \n # split the dataset on the split attribute\n dfl = df[mask]\n dfr = df[~mask]\n \n \n # calculate weighting factors for child\n weighting_factor_left = float(dfl.shape[0])/df.shape[0]\n weighting_factor_right = float(dfr.shape[0])/df.shape[0]\n\n # calculate gini for left and right\n gini_parent = gini_impurity(df)\n gini_left = gini_impurity(dfl)\n gini_right = gini_impurity(dfr)\n \n # calculate weighted gini for this split \n weighted_gini = gini_parent - (weighting_factor_left*gini_left + weighting_factor_right*gini_right)\n return weighted_gini", "def aux_best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\tpercentile_score = 0\n\tpercentiles = [25, 35, 45, 50, 55, 65, 75]\n\t# percentiles = [45]\n\tpercentile_selector = None\n\tpercentile_train_features_selected = None\n\tpercentile_test_features_selected = None\n\n\tfor percentile in percentiles:\n\t\tprint(percentile)\n\t\ttemp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)\n\t\ttemp_percentile_selector.fit(train_features, train_similarity_target)\n\t\ttemp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)\n\t\ttemp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)\n\n\t\tregressor.fit(temp_percentile_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Percentile Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > percentile_score:\n\t\t\tpercentile_score = temp_score\n\t\t\tpercentile_selector = temp_percentile_selector\n\t\t\tpercentile_train_features_selected = temp_percentile_train_features_selected\n\t\t\tpercentile_test_features_selected = temp_percentile_test_features_selected\n\n\tpercentile_mask = percentile_selector.get_support()\n\tprint(\"This is the percentile mask: \")\n\tprint(percentile_mask)\n\n\tpercentile_mask = build_mask(percentile_mask, used_features)\n\tmask_save_path = os.path.join('feature_selection_masks', 'assin2_percentile_based_mask.txt')\n\tdebug_data(percentile_mask, mask_save_path)\n\n\treturn percentile_train_features_selected, percentile_test_features_selected, percentile_selector", "def _compute_best_split_and_push(self, node):\n\n node.split_info = self.splitter.find_node_split(\n node.sample_indices, node.histograms, node.sum_gradients,\n node.sum_hessians)\n\n if node.split_info.gain <= 0: # no valid split\n self._finalize_leaf(node)\n else:\n heappush(self.splittable_nodes, node)", "def get_split(self,X,y):\n \n BEST_COL = 0\n BEST_SPLIT =0\n BEST_IMPUR = 99\n for i,feature in enumerate(X.T):\n arg_sort=np.argsort(feature) #Sort the feature for optimizing the find of splitting points\n feature= feature[arg_sort]\n y_sort = y[arg_sort]\n splits = self.possible_splits(feature,y_sort) #Get \n\n impur,splits = self.test_split(feature,y_sort,splits) #Get impurity for splitting points\n best_idx = np.argmin(impur)\n best_impur = impur[best_idx]\n \n if best_impur==0.0: #Found perfect split, terminate\n return(i,splits[best_idx])\n elif best_impur<BEST_IMPUR:\n BEST_IMPUR=best_impur\n BEST_SPLIT=splits[best_idx]\n BEST_COL=i\n return (BEST_COL,BEST_SPLIT)", "def split_next(self):\n # Consider the node with the highest loss reduction (a.k.a. gain)\n node = heappop(self.splittable_nodes)\n\n tic = time()\n (sample_indices_left,\n sample_indices_right,\n right_child_pos) = self.splitter.split_indices(node.split_info,\n node.sample_indices)\n self.total_apply_split_time += time() - tic\n\n depth = node.depth + 1\n n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)\n n_leaf_nodes += 2\n\n left_child_node = TreeNode(depth,\n sample_indices_left,\n node.split_info.sum_gradient_left,\n node.split_info.sum_hessian_left,\n parent=node)\n right_child_node = TreeNode(depth,\n sample_indices_right,\n node.split_info.sum_gradient_right,\n node.split_info.sum_hessian_right,\n parent=node)\n left_child_node.sibling = right_child_node\n right_child_node.sibling = left_child_node\n node.right_child = right_child_node\n node.left_child = left_child_node\n\n # set start and stop indices\n left_child_node.partition_start = node.partition_start\n left_child_node.partition_stop = node.partition_start + right_child_pos\n right_child_node.partition_start = left_child_node.partition_stop\n right_child_node.partition_stop = node.partition_stop\n\n self.n_nodes += 2\n\n if self.max_depth is not None and depth == self.max_depth:\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n return left_child_node, right_child_node\n\n if (self.max_leaf_nodes is not None\n and n_leaf_nodes == self.max_leaf_nodes):\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n self._finalize_splittable_nodes()\n return left_child_node, right_child_node\n\n if left_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(left_child_node)\n if right_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(right_child_node)\n\n # Compute histograms of childs, and compute their best possible split\n # (if needed)\n should_split_left = left_child_node.value is None # node isn't a leaf\n should_split_right = right_child_node.value is None\n if should_split_left or should_split_right:\n\n # We will compute the histograms of both nodes even if one of them\n # is a leaf, since computing the second histogram is very cheap\n # (using histogram subtraction).\n n_samples_left = left_child_node.sample_indices.shape[0]\n n_samples_right = right_child_node.sample_indices.shape[0]\n if n_samples_left < n_samples_right:\n smallest_child = left_child_node\n largest_child = right_child_node\n else:\n smallest_child = right_child_node\n largest_child = left_child_node\n\n # We use the brute O(n_samples) method on the child that has the\n # smallest number of samples, and the subtraction trick O(n_bins)\n # on the other one.\n tic = time()\n smallest_child.histograms = \\\n self.histogram_builder.compute_histograms_brute(\n smallest_child.sample_indices)\n largest_child.histograms = \\\n self.histogram_builder.compute_histograms_subtraction(\n node.histograms, smallest_child.histograms)\n self.total_compute_hist_time += time() - tic\n\n tic = time()\n if should_split_left:\n self._compute_best_split_and_push(left_child_node)\n if should_split_right:\n self._compute_best_split_and_push(right_child_node)\n self.total_find_split_time += time() - tic\n\n return left_child_node, right_child_node", "def param_selection(df):\n n = df.count()\n numTrees = np.round(np.log10(n) * 100)\n maxDepth = np.round(np.log(n))\n minInstancesPerNode = np.round(np.log10(n) * (np.ceil(n / 500000) + 1))\n #maxBins = np.minimum(80, np.round(500 / np.log(n)))\n subsamplingRate = float(np.where(n > 500000, 0.6, 0.8))\n maxIter = np.round(np.log10(n) * 50)\n\n # minInstancesPerNode\n\n minInstancesPerNode = 200 if minInstancesPerNode > 200 else maxDepth\n minInstancesPerNode = 25 if minInstancesPerNode < 25 else minInstancesPerNode\n\n # maxDepth\n\n maxDepth = 15 if maxDepth > 15 else maxDepth\n maxDepth = 3 if maxDepth < 3 else maxDepth\n\n # maxIter applies to GBT\n\n maxIter = 200 if maxIter > 100 else maxIter\n maxIter = 50 if maxIter < 50 else maxIter\n\n # maxBins set to 32\n\n maxBins = 32\n\n print \"[Info] numTrees: \" + str(numTrees)\n print \"[Info] maxDepth: \" + str(maxDepth)\n print \"[Info] minInstancesPerNode: \" + str(minInstancesPerNode)\n print \"[Info] maxBins: \" + str(maxBins)\n print \"[Info] subsamplingRate: \" + str(subsamplingRate)\n print \"[Info] maxIter: \" + str(maxIter)\n\n return numTrees, maxDepth, minInstancesPerNode, maxBins, subsamplingRate, maxIter", "def finish_sensitivity(self):\n # do at most 1000 features\n idx = torch.randperm(self._features.shape[1])[:100]\n self._features = self._features[:, idx]\n\n weight = self.module.weight.data\n num_features_in = weight.shape[1]\n selected_in = torch.zeros(num_features_in).bool()\n\n # greedy approach to rank in features\n for rank in reversed(range(num_features_in)):\n error_best = torch.Tensor([np.Inf])\n best = None\n\n # loop through remaining features to see which to add next\n for idx_in in range(num_features_in):\n # it's already in the set, no need trying to add it...\n if selected_in[idx_in]:\n continue\n\n # try adding in feature j and compute error\n selected_in[idx_in] = 1\n error_with_j = (\n self._features[selected_in].sum(dim=0) ** 2\n ).sum()\n\n # see if it's better than previous best\n if error_with_j < error_best:\n error_best = error_with_j\n best = idx_in\n\n # remove j from selectedIn for now\n selected_in[idx_in] = 0\n\n # add best one from this round to selectedIn\n selected_in[best] = 1\n\n # also note the rank of best in the sensitivities\n self.sensitivity_in[best] = rank", "def best_split(self):\r\n best_splits = [[0, None, None]]\r\n impurity, best_S, best_xj = 0, None, None\r\n \r\n for xj in self.x_names:\r\n for S in self.potential_splits(xj):\r\n ir = float(self.impurity_reduction(xj, S))\r\n if ir > impurity:\r\n impurity, best_S, best_xj = ir, S, xj\r\n best_splits.append([S, xj])\r\n else: \r\n pass\r\n \r\n return best_S, best_xj", "def best_split1(self,X,attributes):\n if (self.criterion==\"information_gain\"):\n global_if = float('-inf') # the highest value of varience seen so far\n attr , val = None, None\n for attribute in attributes[::-1]:\n attr_val = pd.Series(X[attribute].unique()).sort_values(ignore_index=True)\n last_val = attr_val[0]\n for i in range(1,attr_val.size):\n cur_val = attr_val[i]\n valc = round((last_val+cur_val)/2,4)\n last_val = cur_val\n cur_if = information_gain1(valc,X[attribute],X[\"Output\"],self.type)\n if (cur_if>global_if):\n global_if,attr,val = cur_if,attribute,valc\n return attr,val\n else:\n global_if = float('inf') # the lowest value of varience seen so far\n attr , val = None, None\n for attribute in attributes[::-1]:\n attr_val = pd.Series(X[attribute].unique()).sort_values(ignore_index=True)\n last_val = attr_val[0]\n for i in range(1,attr_val.size):\n cur_val = attr_val[i]\n valc = round((last_val+cur_val)/2,4)\n last_val = cur_val\n cur_if = gini_gain1(X[\"Output\"],X[attribute], valc)\n if (global_if>cur_if):\n global_if,attr,val = cur_if,attribute,valc\n return attr,val", "def train(self):\n max_tuple = self.max_gain()\n # If that gain is 0 then every node should be a pure leaf (hopefully) and you can stop\n while max_tuple.gain != 0:\n max_tuple.node.split(max_tuple.attribute)\n max_tuple = self.max_gain()", "def get_best_split_all(x, y) -> Tuple[int, float, float]:\n m = x.shape[1]\n col_best_gin = np.ones(shape=m)\n col_best_val = np.ones(shape=m)\n for c in range(m):\n best = 1\n best_x = 0\n for i in np.unique(x[:, c]):\n gini = Tree.split(x[:, c], y, i)\n if gini < best:\n best = gini\n best_x = i\n col_best_gin[c] = best\n col_best_val[c] = best_x\n\n # Select best feature to split on\n col_idx = np.argmin(col_best_gin)\n # Convert to bool index\n col_idx = np.array(range(x.shape[1])) == col_idx\n\n return col_idx, col_best_val[col_idx], col_best_gin[col_idx]", "def find_best_free_param_configuration_LOO_adj_sen(p):\n\n measures_res = linux_base_path+ \"/measures_res\"+setup+\"/\"\n# measures_res = base_path +\"\\\\measures_res\"+setup+\"\\\\\"\n# nDCG_MAP_res = base_path +\"\\\\nDCG_MAP_res\\\\\"\n claim_dict = read_pickle(\"claim_dict\")\n claim_num_list = [4,7,17,21,36,37,39,40,41,42,45,46,47,50,51,53,54,55,57,58,59,60,61,62,66,69,70,79,80]\n# claim_num_list = [4,47,53,58,7,54]\n best_configuration_for_nDCG_AP_prec_at_k_left_out_res = {} #key is left out claim and and value is the alpha,beta,lambda configuration that led to best measures - avg nDCG and AP across the train claims\n measures_res_of_left_out_in_its_best_conf = {} #key - left out claim num, and value is the measures of it, in the best configuration without it.\n \n k_val = 50\n prec_at_k_train = rcdtype.recordtype('prec_at_k_train', 'at_5 at_10')\n max_prec_at_k = rcdtype.recordtype('max_prec_at_k', 'max_val max_conf')\n try:\n for left_out_claim_indx in range(len(claim_num_list)):\n max_nDCG = 0\n max_MAP = 0\n max_nDCG_conf = []\n max_MAP_conf = []\n max_prec_at_5 = max_prec_at_k(0,\"\")\n max_prec_at_10 = max_prec_at_k(0,\"\")\n \n left_out_claim_num = claim_num_list[left_out_claim_indx]\n temp_claim_num_list = claim_num_list[:]\n temp_claim_num_list.remove(left_out_claim_num)\n for alpha in range(0,7,1): #change just for test!\n for beta in range(0,10,1):\n for lambda_int in range(0,11,1):\n for delta_1 in range(0,10,1):\n for delta_2 in range(0,10,1):\n if not delta_1+delta_2 >9: \n lambda_f = turn_to_float([lambda_int])\n (alpha_f,beta_f,delta_1_f,delta_2_f) = turn_to_float([alpha,beta,delta_1,delta_2])\n measures_all_claims = utils_linux.read_pickle(measures_res+\"measures_all_claims_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_delta1_\"+str(delta_1_f)+\"_delta2_\"+str(delta_2_f)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(lambda_f))\n \n # AP_all_claims_curr_param_values = read_pickle(nDCG_MAP_res+\"AP_all_claims_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(lambda_f))\n # nDCG_all_claims_curr_param_values = read_pickle(nDCG_MAP_res+\"NDCG_all_claims_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(lambda_f)+\"_at_\"+str(p))\n # prec_at_k_all_claims_params_values = read_pickle(nDCG_MAP_res+\"prec_at_k_all_claims_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(lambda_f))\n avg_nDCG_on_train = 0\n MAP_on_train = 0\n p_at_k_train_avg = prec_at_k_train(0,0)\n for clm_num_train in temp_claim_num_list:\n avg_nDCG_on_train += measures_all_claims[str(clm_num_train)][0]\n MAP_on_train += measures_all_claims[str(clm_num_train)][1] #in this config' -> get the measures\n p_at_k_train_avg.at_5 += measures_all_claims[str(clm_num_train)][2]\n p_at_k_train_avg.at_10 += measures_all_claims[str(clm_num_train)][3]\n avg_nDCG_on_train = float(float(avg_nDCG_on_train)/float(len(temp_claim_num_list)))\n MAP_on_train = float(float(MAP_on_train)/float(len(temp_claim_num_list)))\n p_at_k_train_avg.at_5 = float(float(p_at_k_train_avg.at_5)/float(len(temp_claim_num_list)))\n p_at_k_train_avg.at_10 = float(float(p_at_k_train_avg.at_10)/float(len(temp_claim_num_list)))\n \n if avg_nDCG_on_train > max_nDCG:\n max_nDCG = avg_nDCG_on_train\n max_nDCG_conf = (alpha_f,beta_f,lambda_f,delta_1_f,delta_2_f)\n if MAP_on_train > max_MAP:\n max_MAP = MAP_on_train\n max_MAP_conf = (alpha_f,beta_f,lambda_f,delta_1_f,delta_2_f)\n if p_at_k_train_avg.at_5 > max_prec_at_5.max_val:\n max_prec_at_5.max_val = p_at_k_train_avg.at_5\n max_prec_at_5.max_conf = (alpha_f,beta_f,lambda_f,delta_1_f,delta_2_f)\n if p_at_k_train_avg.at_10 > max_prec_at_10.max_val:\n max_prec_at_10.max_val = p_at_k_train_avg.at_10\n max_prec_at_10.max_conf = (alpha_f,beta_f,lambda_f,delta_1_f,delta_2_f)\n best_configuration_for_nDCG_AP_prec_at_k_left_out_res[left_out_claim_num] = [(max_nDCG,max_nDCG_conf),(max_MAP,max_MAP_conf),(max_prec_at_5.max_val,max_prec_at_5.max_conf),(max_prec_at_10.max_val,max_prec_at_10.max_conf)]\n #finished leaving out,\n #now calculate the nDCG and MAP of the left out claims with its best configuration results\n avg_nDCG_on_left_out = 0\n MAP_on_left_out = 0\n avg_prec_at_5_on_left_out = 0\n avg_prec_at_10_on_left_out = 0\n for clm_num in claim_num_list:\n (best_alpha_nDCG,best_beta_nDCG,best_lambda_nDCG,best_delta1_nDCG,best_delta2_nDCG) = best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1]\n (best_alpha_MAP,best_beta_MAP,best_lambda_MAP,best_delta1_MAP,best_delta2_MAP) = best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1]\n (best_alpha_prec_at_5,best_beta_prec_at_5,best_lambda_prec_at_5,best_delta1_prec_at_5,best_delta2_prec_at_5) = best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1]\n (best_alpha_prec_at_10,best_beta_prec_at_10,best_lambda_prec_at_10,best_delta1_prec_at_10,best_delta2_prec_at_10) = best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1]\n #read the best config' dict\n best_config_of_nDCG_dict = read_pickle(measures_res+\"measures_all_claims_alpha_\"+str(best_alpha_nDCG)+\"_beta_\"+str(best_beta_nDCG)+\"_delta1_\"+str(best_delta1_nDCG)+\"_delta2_\"+str(best_delta2_nDCG)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_nDCG))\n best_config_of_AP_dict = read_pickle(measures_res+\"measures_all_claims_alpha_\"+str(best_alpha_MAP)+\"_beta_\"+str(best_beta_MAP)+\"_delta1_\"+str(best_delta1_MAP)+\"_delta2_\"+str(best_delta2_MAP)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_MAP))\n best_config_of_prec_at_5_dict = read_pickle(measures_res+\"measures_all_claims_alpha_\"+str(best_alpha_prec_at_5)+\"_beta_\"+str(best_beta_prec_at_5)+\"_delta1_\"+str(best_delta1_prec_at_5)+\"_delta2_\"+str(best_delta2_prec_at_5)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_prec_at_5)) #take only the first item in the tuple\n best_config_prec_of_at_10_dict = read_pickle(measures_res+\"measures_all_claims_alpha_\"+str(best_alpha_prec_at_10)+\"_beta_\"+str(best_beta_prec_at_10)+\"_delta1_\"+str(best_delta1_prec_at_10)+\"_delta2_\"+str(best_delta2_prec_at_10)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_prec_at_10)) #take only the second item in the tuple\n measures_res_of_left_out_in_its_best_conf[clm_num] = (best_config_of_nDCG_dict[str(clm_num)][0],best_config_of_AP_dict[str(clm_num)][1],best_config_of_prec_at_5_dict[str(clm_num)][2],best_config_prec_of_at_10_dict[str(clm_num)][3])\n avg_nDCG_on_left_out += best_config_of_nDCG_dict[str(clm_num)][0]\n MAP_on_left_out += best_config_of_AP_dict[str(clm_num)][1]\n avg_prec_at_5_on_left_out += best_config_of_prec_at_5_dict[str(clm_num)][2]\n avg_prec_at_10_on_left_out += best_config_prec_of_at_10_dict[str(clm_num)][3]\n \n save_pickle(measures_res+\"measures_res_of_left_out_in_its_best_conf_k_top_docs_\"+str(k_val)+\"_at_\"+str(p), measures_res_of_left_out_in_its_best_conf)\n #report the avg\n avg_nDCG_on_left_out = float(float(avg_nDCG_on_left_out)/float(len(claim_num_list))) \n MAP_on_left_out = float(float(MAP_on_left_out)/float(len(claim_num_list))) \n avg_prec_at_5_on_left_out = float(float(avg_prec_at_5_on_left_out)/float(len(claim_num_list)))\n avg_prec_at_10_on_left_out = float(float(avg_prec_at_10_on_left_out)/float(len(claim_num_list)))\n #write res to file:\n # claim text, the best nDCG conf and result on train, the nDCG it really has, and the same for AP\n with open(measures_res+\"nDCG_AP_prec_at_k_res_of_left_out_in_its_best_conf_k_top_docs_\"+str(k_val)+\"_at_\"+str(p)+\".csv\", 'wb') as csvfile:\n w = csv.writer(csvfile)\n row = \"claim|best_nDCG|alpha,beta,lambda,delta_1,delta_2,delta_3|best_AP|alpha,beta,lambda,delta_1,delta_2,delta_3|best_prec_at_5|alpha,beta,lambda,delta_1,delta_2,delta_3|best_prec_at_10|alpha,beta,lambda,delta_1,delta_2,delta_3\"\n w.writerow([row])\n for (clm_num,(nDCG,AP,prec_at_5,prec_at_10)) in measures_res_of_left_out_in_its_best_conf.items():\n row = claim_dict[str(clm_num)]+\"&\"+'%.3f'%nDCG+\"&\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][0])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][1])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][2])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][3])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][4])+\",\"+str(0.9-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][4]-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][3])\n row += \"&\"+'%.3f'%AP+\"&\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][0])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][1])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][2])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][3])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][4])+\",\"+str(0.9-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][4]-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][3])\n row += \"&\"+'%.3f'%prec_at_5+ \"&\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][0])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][1])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][2])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][3])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][4])+\",\"+str(0.9-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][4]-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][3])\n row += \"&\"+'%.3f'%prec_at_10+ \"&\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][0])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][1])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][2])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][3])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][4])+\",\"+str(0.9-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][4]-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][3])\n w.writerow([row])\n w.writerow([\"avg_nDCG_on_left_out: \"+ '%.4f'%avg_nDCG_on_left_out ])\n w.writerow([\"MAP_on_left_out: \"+ '%.4f'%MAP_on_left_out])\n w.writerow([\"avg_prec_at_5_on_left_out: \"+ '%.4f'%avg_prec_at_5_on_left_out])\n w.writerow([\"avg_prec_at_10_on_left_out: \"+ '%.4f'%avg_prec_at_10_on_left_out])\n except Exception as err: \n sys.stderr.write('problem in LOO') \n print err", "def findBestModel(X_train, X_test, Y_test, model='iForest'):\n if model == 'iForest':\n total_score = 0;\n parameters = [0,0,0,0]\n for max_features in range(1,X_train.shape[1]+1):\n for contamination in range(1,101):\n iForest = IsolationForest(n_estimators = 100, max_features = max_features, contamination = contamination/1000, random_state = 0).fit(X_train)\n \n scores = []\n for x_test,y_test in zip(X_test,Y_test):\n y_hat = iForest.predict(x_test)\n score = evaluate(y_test,y_hat) # returns similarity percentage\n scores.append(score)\n \n if sum(scores) > total_score:\n total_score = sum(scores)\n parameters[0] = max_features\n parameters[1] = contamination/1000\n parameters[2] = total_score\n parameters[3] = scores\n print(parameters, contamination)\n \n return parameters", "def _feature_selection(self , x ,y):\n # initialize good features list\n # and best scores to keep track of both\n good_features = []\n best_scores = []\n\n # calculating the number of features\n num_features = x.shape[1]\n\n # infinite loop\n while True:\n # intialize best feature and score of this loop\n this_feature = None\n best_score = 0\n\n # loop over all features\n for feature in range(num_features):\n # if feature is already in good features,\n # skip this for loop\n if feature in good_features:\n\n continue\n # selected features are all good till now\n # and current feature\n selected_features = good_features + [feature]\n # remove all other feature from the data\n xtrain = x[: , selected_features]\n # calculate the score , in our case AUC\n score = self.evaluate_score(xtrain , y)\n # if score is greater then the best score\n # of this loop, change best score and best feature\n if score > best_score:\n this_feature = feature\n best_score = score\n\n # if we have selected a feature , add it to\n # the good feature list and update best score list\n if this_feature != None:\n good_features.append(this_feature)\n best_scores.append(best_score)\n\n # if we did not improve during the last two rounds,\n # exit the while loop\n if len(best_score) > 2:\n if best_scores[-1] < best_scores[-2]:\n break\n\n # return the best score and good features\n # why do we remove the last data point?\n return best_scores[:-1] , good_features[:-1]", "def choose_split(data,treshold):\n n_features = len(data[0]) - 1 # number of columns\n quest_gain = [] # keep track of the gains and questions\n\n for col in range(1,n_features): # for each feature\n values = set([row[col] for row in data]) # unique values in the column\n for val in values: # for each value\n question = Question(col, val)\n \n # try splitting the dataset\n true_rows, false_rows = partition(data, question)\n\n # Skip this split if it doesn't divide the dataset.\n if len(true_rows) == 0 or len(false_rows) == 0:\n continue\n\n # Calculate the information gain from this split\n gain = info_gain(data, true_rows, false_rows)\n quest_gain.append(Question_gain(gain,question))\n\n possible_question = [] # possible questions to ask\n n_quest_gain = len(quest_gain)\n\n if n_quest_gain == 0:\n return float('Inf'), float('NaN') #\n\n for x in range(n_quest_gain):\n if (quest_gain[x].gain >= treshold):\n possible_question.append(Question_gain(quest_gain[x].gain,quest_gain[x].question))\n \n n_possible_question = len(possible_question)\n if n_possible_question == 0:\n return float('Inf'), float('NaN')\n\n if n_possible_question>=2:\n [i, j] = random.sample(range(0, n_possible_question), 2)\n else:\n i = j = random.randint(0,n_possible_question-1)\n\n if possible_question[i].gain>=possible_question[j].gain:\n return possible_question[i].gain, possible_question[i].question\n else:\n return possible_question[j].gain, possible_question[j].question", "def getNextNodeUsingCellDiff(kGoalState):\n \n global fringe\n global solutions\n\n \n\n\n\n minNode = None\n minCost = 99999999999\n minNodeIndex = -1\n\n \n pnode = None\n pcost = None\n\n if len(solutions)>0 and solutions[0] != None:\n pnode = solutions[0];\n pcost = getHValueForNode(pnode,kGoalState)\n #print pnode, pcost\n # raw_input()\n \n\n\n\n for idx,node in enumerate(fringe):\n #get the heu. function values\n g_value = getHValueForNode(node,kGoalState)\n \n\n if g_value < minCost:\n minNode = node\n minNodeIndex = idx\n minCost = g_value\n\n\n fringe.pop(minNodeIndex)\n c = getHValueForNode(minNode,kGoalState)\n if pnode != None:\n if c > pcost:\n minNode = None\n \n return minNode", "def fit(self, data, targets):\n # update these three\n self.idx = 0\n self.val = None\n self.left = None\n self.right = None\n ### YOUR CODE HERE\n # i have added a slow and a fast version\n \n num_points, num_features = data.shape\n # print('num points, num_features', num_points, num_features)\n \n def feat_score(feat_idx):\n feat = data[:, feat_idx].copy()\n perm = np.argsort(feat)\n s_feat = feat[perm]\n s_targets = targets[perm]\n target_var = ((s_targets - s_targets.mean())**2).sum()\n s_left, s_right = sum_squares(s_targets)\n def score(idx, _vals):\n ## slow version\n #left = _vals[0:idx]\n #right = _vals[idx:]\n #assert len(left) + len(right) == len(_vals), (len(left), len(right), len(_vals))\n #left_mean = np.mean(left)\n #right_mean = np.mean(right)\n #left_error = np.sum((left-left_mean)**2)\n #assert np.allclose(left_error, s_left[idx]) \n #right_error = np.sum((right-right_mean)**2)\n #assert np.allclose(right_error, s_right[idx])\n # return left_error+right_error\n # fast version\n return s_left[idx] + s_right[idx]\n # score for every split\n scores = np.array([score(x, s_targets) for x in range(0, num_points)])\n assert scores.min() <= target_var, target_var\n best_score_idx = np.argmin(scores)\n best_score = scores[best_score_idx]\n val = s_feat[best_score_idx]\n # print('best score', feat_idx, best_score, best_score_idx, val, s_feat[best_score_idx+1])\n \n return best_score, {'val': val, \n 'left': np.mean(s_targets[:best_score_idx]), \n 'right': np.mean(s_targets[best_score_idx:])\n } \n\n split_scores = []\n for f in range(0, num_features):\n total_score, _params = feat_score(f)\n split_scores.append(total_score)\n # print('score of {0} - {1}'.format(feat_names[f], total_score))\n # print('feature scores:', np.array(split_scores))\n best_feat = np.argmin(split_scores)\n best_score = split_scores[best_feat]\n # print('Best Feature idx: {0} - Best Cost: {1}'.format(best_feat, best_score))\n score_again, params = feat_score(best_feat)\n # print('double check score', score_again, best_score)\n self.idx = best_feat\n self.val = params['val']\n self.left = params['left']\n self.right = params['right']\n print(\"idx={}, val={}, left={}, right={}\".format(self.idx, self.val, self.left, self.right))\n assert not np.isnan(self.left)\n assert not np.isnan(self.right)\n ### END CODE", "def _bestFeat2split(dataSet, impurity_crit, min_impurity_decrease, min_samples_split):\n\t\tm, n = dataSet.shape\n\t\tbestFeatInd, bestVal = None, DecisionTree._make_leaf(dataSet, impurity_crit)\n\n\t\tif m < min_samples_split or len(set(dataSet[:,-1])) == 1:\n\t\t\treturn bestFeatInd, bestVal\n\n\t\timpurity = m * impurity_crit(dataSet)\n\t\tmin_impurity = np.inf\n\t\t\n\n\t\tfor feat_ind in range(n-1):\n\t\t\tif type(dataSet[:, feat_ind][0]) != str:\n\t\t\t\tuniqVal = set(dataSet[:, feat_ind])\n\t\t\telse:\n\t\t\t\tuniqVal = map(set, subsets(list(dataSet[:, feat_ind])))\n\t\t\tfor val in uniqVal:\n\t\t\t\tD1, D2 = DecisionTree._binarySplit(dataSet, feat_ind, val)\n\t\t\t\tif len(D1) < min_samples_split or len(D2) < min_samples_split:\n\t\t\t\t\tcontinue\n\t\t\t\tnew_impurity = len(D1)*impurity_crit(D1) + len(D2)*impurity_crit(D2)\n\t\t\t\tif impurity - new_impurity < min_impurity_decrease:\n\t\t\t\t\tcontinue\n\t\t\t\tif new_impurity < min_impurity:\n\t\t\t\t\tmin_impurity = new_impurity\n\t\t\t\t\tbestFeatInd = feat_ind; bestVal = val\n\t\treturn bestFeatInd, bestVal", "def compute_splits(self, G, nw_name='test', train_frac=0.51, split_alg='spanning_tree', owa=True, fe_ratio=1,\n split_id=0, verbose=False):\n # Compute train/test split\n if split_alg == 'random':\n tr_E, te_E = stt.rand_split_train_test(G, train_frac)\n train_E, test_E, G, mp = pp.relabel_nodes(tr_E, te_E, G.is_directed())\n elif split_alg == 'naive':\n train_E, test_E = stt.naive_split_train_test(G, train_frac)\n elif split_alg == 'spanning_tree':\n train_E, test_E = stt.split_train_test(G, train_frac)\n elif split_alg == 'fast':\n train_E, test_E = stt.quick_split(G, train_frac)\n train_E_false, test_E_false = stt.quick_nonedges(G, train_frac, fe_ratio)\n elif split_alg == 'timestamp':\n train_E, test_E, G = stt.timestamp_split(G, train_frac)\n train_E = set(zip(train_E[:, 0], train_E[:, 1]))\n test_E = set(zip(test_E[:, 0], test_E[:, 1]))\n else:\n raise ValueError('Split alg. {} unknown!'.format(split_alg))\n\n # Compute non-edges\n if split_alg != 'fast':\n num_fe_train = len(train_E) * fe_ratio\n num_fe_test = len(test_E) * fe_ratio\n if owa:\n train_E_false, test_E_false = stt.generate_false_edges_owa(G, train_E, test_E,\n num_fe_train, num_fe_test)\n else:\n train_E_false, test_E_false = stt.generate_false_edges_cwa(G, train_E, test_E,\n num_fe_train, num_fe_test)\n\n # Set class attributes to new values\n self.set_splits(train_E, train_E_false, test_E, test_E_false, directed=G.is_directed(), nw_name=nw_name,\n split_id=split_id, split_alg=split_alg, owa=owa, verbose=verbose)\n\n return train_E, train_E_false, test_E, test_E_false", "def get_next_split ( self, feature_matrix: np.ndarray, target_array: np.ndarray, tree_split: TreeSplits):\n # If only 1 y value, make a leaf node\n if len ( set ( target_array ) ) == 1:\n tree_split.updateTreeValues (\n feature_column = None,\n feature_value = None,\n node_type = None,\n nodes = {},\n children = target_array,\n )\n return tree_split\n\n # Get the presplit entropy\n presplit_entropy = self.evaluate_function ( target_array )\n\n column_values = {}\n for k, v in self.map_column_node_type.items():\n # If there's only one value in feature matrix \"X\", set the split value to infinity\n if len ( set ( feature_matrix [ :, k ] ) ) == 1:\n value = np.inf\n split = None\n class_ratios = 1\n elif v == \"continuous\":\n # Get the best possible continuous split for the column\n split, value, class_ratios = self.get_optimal_continuous_feature_split (\n feature_matrix = feature_matrix, target_array = target_array, feature_column = k\n )\n else:\n # Get the split value for the discrete column\n value, class_ratios = self.get_optimal_discrete_feature_split (\n feature_matrix = feature_matrix, target_array = target_array, feature_column = k\n )\n split = None\n\n column_values [ k ] = ( split, value, class_ratios )\n\n # Get the column with the largest gain ratio\n col_idx_with_min_value = max (\n column_values,\n key = lambda x: ( presplit_entropy - column_values.get ( x ) [ 1 ] )\n / column_values.get ( x ) [ 2 ],\n )\n\n # If stopping criteria are met or all splits are infinite, terminate the process\n if (\n self.early_stopping_comparison (\n column_values.get ( col_idx_with_min_value ) [ 1 ], self.early_stopping_value\n )\n ) or not np.isfinite ( column_values.get ( col_idx_with_min_value ) [ 1 ] ):\n self.get_terminal_node (\n feature_column = col_idx_with_min_value,\n feature_value = column_values [ col_idx_with_min_value ] [ 0 ],\n node = tree_split,\n feature_matrix = feature_matrix ,\n target_array = target_array,\n )\n return tree_split\n\n # If the best split is continuous, add a continuous node\n if self.map_column_node_type.get ( col_idx_with_min_value ) == \"continuous\":\n return self.get_continuous_node (\n feature_column = col_idx_with_min_value,\n feature_value = column_values [col_idx_with_min_value ] [ 0 ],\n feature_matrix = feature_matrix,\n target_array = target_array,\n node = tree_split,\n )\n\n # Otherwise, add a discrete node.\n else:\n return self.get_discrete_node (\n feature_matrix = feature_matrix,\n target_array = target_array,\n feature_value = column_values [ col_idx_with_min_value ] [ 0 ],\n feature_column = col_idx_with_min_value,\n node = tree_split,\n )\n # End get_next_split", "def compute_splits(feature_df, target_col, max_num_splits):\n tree_estimator = DecisionTreeClassifier(max_leaf_nodes=max_num_splits+1,\n class_weight='balanced',\n random_state=1407)\n\n tree_estimator.fit(feature_df, target_col)\n thresholds = tree_estimator.tree_.threshold[tree_estimator.tree_.children_left != _tree.TREE_LEAF]\n return sorted(thresholds)", "def _split_threshold(self, node):\n\n # define the score to improve upon\n if self.n_clusters >= self.min_leaves and node.size <= self.max_leaf_size:\n # split only if min(children scores) > node.score\n force_split = False\n best_score = node.score\n else:\n # force split: just take the best (even if children are worse)\n force_split = True\n best_score = None\n\n left, right = None, None\n\n # iterate over embedding dimensions (first ones are more reliable)\n # up to max_n_vec (included), until we found an improving split\n for _vec in range(self.n_vec):\n\n # get the candidate thresholds along this dimension\n threshs = self._get_candidate_thresholds(node, _vec)\n\n # look for an improving best split along this eigenvector\n for _t in threshs:\n # compute the split\n below_thresh = self.E[node.ids, _vec] < _t\n _lids = node.ids[below_thresh]\n _rids = node.ids[np.logical_not(below_thresh)]\n # check if the tubes are not too small\n _nl, _nr = len(_lids), len(_rids)\n is_valid = _nl >= self.min_leaf_size and _nr >= self.min_leaf_size\n if is_valid:\n # compute the score of the new tubes only\n _sl = self.get_tube_score(_lids)\n _sr = self.get_tube_score(_rids)\n # get the score of this split\n split_score = min(_sl, _sr)\n if best_score is None or split_score > best_score:\n # better split\n best_score = split_score\n node.has_children = True\n node.thresh = _t\n left = SpectralNode(\n _lids, _vec, score=_sl, name=node.name + \"0\")\n right = SpectralNode(\n _rids, _vec, score=_sr, name=node.name + \"1\")\n\n # check stopping criterion\n if node.has_children:\n # we found an improving split\n if _vec > 0 or not force_split:\n # found an improving non-forced split: stop here\n break\n\n return left, right", "def optimalize(): \n start = time()\n max = 0\n maxn=2\n maxm=3\n check = [(n,m) for n in range(24,30) for m in range(3,20)]\n dict = {}\n print \"start optimalization of: bigram-features,uniqueness\"\n for n,m in check:\n score=0\n print \">lem>>n(uniqueness):\"+str(n)\n print \">lem>>m(commonness):\"+str(m)\n wrds = common_but_unique(ngrams_dict(1,authors,compactcorpus,n,False),m)\n bigrams = common_but_unique(ngrams_dict(2,authors,compactcorpus,n,False),m)\n trigrams = common_but_unique(ngrams_dict(3,authors,compactcorpus,n,False),m)\n #pos_feat = [\"wrd:\"+wrd+\">\"+str(num) for wrd in wrds for num in range(0,1)]\n pos_feat = [\"bi:(\"+str(bi[0])+\",\"+str(bi[1])+\")>\"+str(num) for bi in bigrams for num in range(0,1)] + [\"wrd:\"+wrd+\">\"+str(num) for wrd in wrds for num in range(0,1)] + [\"tri:(\"+str(tri[0])+\",\"+str(tri[1])+\",\"+str(tri[2])+\")>\"+str(num) for tri in trigrams for num in range(0,1)]\n\n print \"number of features AFTER selection:\" + str(len(pos_feat))\n for x in range(0,4):\n data = split_train_test_data(authors, corp,45)\n train_set = [(feat_dict(pos_feat,d), c) for (d, c) in data[\"train\"]]\n train_set = [(feat_dict(pos_feat,d), c) for (d, c) in data[\"train\"]]\n test_set = [(feat_dict(pos_feat,d), c) for (d, c) in data[\"test\"]]\n classifier1 = NaiveBayesClassifier.train(train_set)\n acc = nltk.classify.accuracy(classifier1,test_set)\n print \"accuracy:\"+str(acc)\n score +=acc\n print \"time elapsed: \"+str(time()-start)\n print \"score(\" + str(n) +\")=\"+str(score/4)\n classifier1.show_most_informative_features(8)\n dict[(n,m)]=(score/4)\n if(score/4)>max:\n max = (score/4)\n maxn =n\n maxm = m\n print \"max score=\"+str(max)\n print \"where n = \"+str(maxn)\n print \"where m = \"+str(maxm)\n print \"time:\"+str(time()-start)\n writetofile(dict,\"optimalizedict_commonwrdsandbigrams_latest_lem.pkl\")", "def findBestScore():\n resultList = []\n BestScore = 0\n # iterate through different max_depths from 1 to 19\n for max_depth in range(1,20):\n dtree = tree.DecisionTreeClassifier(max_depth=max_depth)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n dtree = dtree.fit(cv_data_train, cv_target_train)\n dtree.feature_importances_\n trainng_score += [dtree.score(cv_data_train,cv_target_train)]\n testing_score += [dtree.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_depth = max_depth\n resultList += [[best_depth, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding max_depth is: ')\n return BestScore, best_depth", "def fit_greedy(data, nnbr=10, threshold=0.05, refit=refit_pll):\n n,m = data.shape;\n L = np.zeros((n,n)) # initialize parameters\n scores = np.zeros(n) \n data = data.astype(int)\n for i in range(n):\n Ni = []\n while (len(Ni)<nnbr):\n Vi = (0*data[i,:] + sum(data[j,:]*(2**jj) for jj,j in enumerate(Ni))).astype(int)\n Vsz = int(Vi.max()+1)\n for j in range(n):\n if j==i or j in Ni: scores[j]=0.; continue\n pIJV = Factor( [Var(0,2),Var(1,2),Var(2,Vsz)] , 0.)\n # pIJV[data[i,:],data[j,:],Vi] += 1. # Test??\n for k in range(m): pIJV[data[i,k],data[j,k],Vi[k]] += 1.\n pV = pIJV.marginal([2]); pV /= (pV.sum()+1e-20);\n pIJV /= (pIJV.sum([0])+1e-20)\n scores[j] = ((pIJV.condition({0:1,1:1})-pIJV.condition({0:1,1:0})).abs()*pV).sum()\n jmax = int(np.argmax(scores))\n if scores[jmax] < threshold: break\n Ni.append(jmax)\n # TODO: prune back each list?\n #print(i,\" : \",Ni)\n L[i,Ni] = 1.\n L = L*L.T # \"and\" connectivity: keep only if edges (i,j) and (j,i) present?\n model = Ising(L);\n refit(model,data)\n return model", "def select_model():\r\n from sklearn import tree\r\n import graphviz\r\n\r\n ValidationSetAndLabels = AllSets[1]\r\n ValLabels = ValidationSetAndLabels[:, [-1]] # extract labels (last column)\r\n ValSet = np.delete(ValidationSetAndLabels, -1, axis=1) # delete labels\r\n\r\n TrainingSetAndLabels = AllSets[2]\r\n TrainLabels = TrainingSetAndLabels[:, [-1]] # extract labels (last column)\r\n TrainSet = np.delete(TrainingSetAndLabels, -1, axis=1) # delete labels\r\n\r\n \"\"\"\r\n This is the code to select the best hyperparameter (part b)\r\n\r\n for SplitCriterion in ['entropy', 'gini']:\r\n print \"Criterion: \" + SplitCriterion + '\\n'\r\n\r\n for MaxDepth in [int(depth) for depth in np.linspace(1, np.log2(TrainSet.shape[1]), 5)]:\r\n print \"max_depth: \" + str(MaxDepth) + '\\n'\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion=SplitCriterion, max_depth=MaxDepth)\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n print \"Accuracy for this test is: %f %%\" %Accuracy\r\n print '\\n'\r\n\r\n print '\\n'\r\n \"\"\"\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion='entropy', max_depth=12)\r\n\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n dot_data = tree.export_graphviz(MyTree, out_file=None, max_depth=2,\r\n feature_names=AllSets[3], filled=True, rounded=True, special_characters=True,\r\n class_names=TrainLabels.flatten().astype(str))\r\n graph = graphviz.Source(dot_data)\r\n graph.render(\"output\")", "def mts_ls1(current_x, current_fitness, best_x, best_fitness, improve, search_range, task, rng, bonus1=10, bonus2=1,\n sr_fix=0.4, **_kwargs):\n if not improve:\n search_range /= 2\n i_fix = np.argwhere(search_range < 1e-15)\n search_range[i_fix] = task.range[i_fix] * sr_fix\n improve = False\n grade = 0.0\n for i in range(len(current_x)):\n x_old = current_x[i]\n current_x[i] = x_old - search_range[i]\n current_x = task.repair(current_x, rng)\n new_fitness = task.eval(current_x)\n if new_fitness < best_fitness:\n grade = grade + bonus1\n best_x = current_x.copy()\n best_fitness = new_fitness\n if new_fitness == current_fitness:\n current_x[i] = x_old\n elif new_fitness > current_fitness:\n current_x[i] = x_old + 0.5 * search_range[i]\n current_x = task.repair(current_x, rng)\n new_fitness = task.eval(current_x)\n if new_fitness < best_fitness:\n grade = grade + bonus1\n best_x = current_x.copy()\n best_fitness = new_fitness\n if new_fitness >= current_fitness:\n current_x[i] = x_old\n else:\n grade = grade + bonus2\n improve = True\n current_fitness = new_fitness\n else:\n grade = grade + bonus2\n improve = True\n current_fitness = new_fitness\n return current_x, current_fitness, best_x, best_fitness, improve, grade, search_range", "def feature_selection(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\t# percentile selector\n\tpercentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask = best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# model based selector\n\tmodel_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask = best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# iterative based selector\n\titerative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask = best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\tall_scores = []\n\n\tregressor.fit(train_features, train_similarity_target)\n\tprint(\"The score on all features: %.3f\" % regressor.score(test_features, test_similarity_target))\n\tall_scores.append(regressor.score(test_features, test_similarity_target))\n\n\t# show results for the percentile selector\n\tall_scores.append(percentile_score)\n\n\t# show results for the model based selector\n\tall_scores.append(model_based_score)\n\n\t# show results for the iterative based selector\n\tall_scores.append(iterative_based_score)\n\n\tmax_value_position = all_scores.index(max(all_scores))\n\n\tif max_value_position == 0:\n\t\tprint(\"Returning all features!\\n\")\n\t\treturn train_features, test_features\n\telif max_value_position == 1:\n\t\tpercentile_mask = build_mask(percentile_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'percentile_mask.txt')\n\t\tdebug_data(percentile_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the percentile selector!\\n\")\n\t\treturn percentile_selector, percentile_train_features_selected, percentile_test_features_selected\n\telif max_value_position == 2:\n\t\tmodel_based_mask = build_mask(model_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'model_based_mask.txt')\n\t\tdebug_data(model_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the model based selector!\\n\")\n\t\treturn model_based_selector, model_based_train_features_selected, model_based_test_features_selected\n\telse:\n\t\titerative_based_mask = build_mask(iterative_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'iterative_based_mask.txt')\n\t\tdebug_data(iterative_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the iterative based selector!\\n\")\n\t\treturn iterative_based_selector, iterative_based_train_features_selected, iterative_based_test_features_selected", "def split(self, X, y, feature_array):\n n, p = X.shape\n\n best_gain = 0\n best_split_point = 0\n best_feature_id = -1\n for feature_id in feature_array:\n cur_gain, cur_split_point = self.find_best_split(\n X[:, feature_id], y)\n if cur_gain > best_gain - self.eps:\n best_gain = cur_gain\n best_split_point = cur_split_point\n best_feature_id = feature_id\n\n assert(best_feature_id != -1)\n\n x = X[:, best_feature_id]\n left_index = x < best_split_point\n right_index = x >= best_split_point\n\n self.split_id = best_feature_id\n self.split_val = best_split_point\n\n return (left_index, right_index)", "def findRFBestN():\n resultList = []\n BestScore = 0\n nList = [ n for n in range(1,200) if n%10 == 0]\n for n in nList:\n rforest = ensemble.RandomForestClassifier(max_depth=5, n_estimators=n)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n rforest = rforest.fit(cv_data_train, cv_target_train)\n trainng_score += [rforest.score(cv_data_train,cv_target_train)]\n testing_score += [rforest.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_n = n\n resultList += [[n, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding n_estimator is: ')\n return BestScore, best_n", "def choose_bestnext(self, round):\n board_percentage = []\n \n for i in self.possible_coords:\n iSq = round.getSq(i[0], i[1])\n \n if round.pr_hook(iSq) == ' X ':\n sq_percentage = []\n surroundings = iSq.point_neighbors()\n \n for j in surroundings:\n jSq = round.getSq(j[0], j[1])\n\n if round.as_int(jSq) != None:\n count_X = 0\n count_F = 0\n check = jSq.point_neighbors()\n\n for k in check:\n kSq = round.getSq(k[0], k[1])\n if round.pr_hook(kSq) == ' X ':\n count_X += 1\n elif round.pr_hook(kSq) == ' f ':\n count_F += 1 \n if count_X != 0:\n sq_percentage.append((jSq.mine_neighbors() - count_F)/ count_X)\n\n avg_percent = 0\n if len(sq_percentage) == 0:\n avg_percent = 0.8\n elif sq_percentage.count(1) != 0:\n avg_percent = 1\n round.flagSq(i[0], i[1])\n else:\n sum_so_far = 0\n for p in sq_percentage:\n sum_so_far += p\n avg_percent = sum_so_far / len(sq_percentage)\n \n board_percentage.append(avg_percent)\n\n else:\n board_percentage.append(100)\n\n sorted_percentages = board_percentage.copy()\n sorted_percentages.sort()\n\n best_choice = board_percentage.index(sorted_percentages[0])\n\n return self.possible_coords[best_choice]", "def extractBestAlgorithms(args = algs2009, f_factor=2,\n target_lb=1e-8, target_ub=1e22):\n\n # TODO: use pproc.TargetValues class as input target values\n # default target values:\n targets = pproc.TargetValues(\n 10**np.arange(np.log10(max((1e-8, target_lb))),\n np.log10(target_ub) + 1e-9, 0.2))\n # there should be a simpler way to express this to become the\n # interface of this function\n\n print 'Loading algorithm data from given algorithm list...\\n' \n\n verbose = True\n dsList, sortedAlgs, dictAlg = pproc.processInputArgs(args, verbose=verbose)\n\n print 'This may take a while (depending on the number of algorithms)'\n\n selectedAlgsPerProblem = {}\n for f, i in pproc.dictAlgByFun(dictAlg).iteritems():\n for d, j in pproc.dictAlgByDim(i).iteritems():\n selectedAlgsPerProblemDF = []\n best = BestAlgSet(j)\n \n for i in range(0, len(best.target)):\n t = best.target[i]\n # if ((t <= target_ub) and (t >= target_lb)):\n if toolsstats.in_approximately(t,\n targets((f, d), discretize=True)):\n # add best for this target:\n selectedAlgsPerProblemDF.append(best.algs[i])\n \n # add second best or all algorithms that have an ERT\n # within a factor of f_factor of the best:\n secondbest_ERT = np.infty\n secondbest_str = ''\n secondbest_included = False \n for astring in j:\n currdictalg = dictAlg[astring].dictByDim()\n if currdictalg.has_key(d):\n curralgdata = currdictalg[d][f-1] \n currERT = curralgdata.detERT([t])[0]\n if (astring != best.algs[i]):\n if (currERT < secondbest_ERT):\n secondbest_ERT = currERT\n secondbest_str = astring\n if (currERT <= best.detERT([t])[0] * f_factor):\n selectedAlgsPerProblemDF.append(astring)\n secondbest_included = True\n if not (secondbest_included) and (secondbest_str != ''):\n selectedAlgsPerProblemDF.append(secondbest_str)\n \n if len(selectedAlgsPerProblemDF) > 0:\n selectedAlgsPerProblem[(d, f)] = selectedAlgsPerProblemDF\n \n print 'pre-processing of function', f, 'done.' \n \n print 'loading of best algorithm(s) data done.'\n \n countsperalgorithm = {}\n for (d, f) in selectedAlgsPerProblem:\n print 'dimension:', d, ', function:', f\n setofalgs = set(selectedAlgsPerProblem[d,f])\n \n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += selectedAlgsPerProblem[d,f].count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"\n \n return selectedalgsperdimension", "def best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\tpercentile_score = 0\n\tpercentiles = [25, 35, 45, 50, 55, 65, 75]\n\t# percentiles = [45]\n\tpercentile_selector = None\n\tpercentile_train_features_selected = None\n\tpercentile_test_features_selected = None\n\n\tfor percentile in percentiles:\n\t\tprint(percentile)\n\t\ttemp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)\n\t\ttemp_percentile_selector.fit(train_features, train_similarity_target)\n\t\ttemp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)\n\t\ttemp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)\n\n\t\tregressor.fit(temp_percentile_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Percentile Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > percentile_score:\n\t\t\tpercentile_score = temp_score\n\t\t\tpercentile_selector = temp_percentile_selector\n\t\t\tpercentile_train_features_selected = temp_percentile_train_features_selected\n\t\t\tpercentile_test_features_selected = temp_percentile_test_features_selected\n\n\tpercentile_mask = percentile_selector.get_support()\n\tprint(\"This is the percentile mask: \")\n\tprint(percentile_mask)\n\n\treturn percentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask", "def main():\n housing = pd.read_csv(\"Data/train_original.csv\")\n housing[\"TotalSF\"] = (\n housing[\"TotalBsmtSF\"] + housing[\"1stFlrSF\"] + housing[\"2ndFlrSF\"]\n )\n training_features, testing_features, training_target, testing_target = impute_dummify_and_split(\n housing, drop_target=False\n )\n\n p_values = [\n (c, pearsonr(training_features[\"SalePrice\"], training_features[c])[1])\n for c in training_features.columns\n ]\n\n p_value_limits = [0.05]\n\n result = []\n ps_and_cols = {}\n\n for p_value_limit in p_value_limits:\n\n high_ps = list(\n map(lambda t: t[0], sorted(p_values, key=lambda t1: t1[1])[:15])\n )\n\n print(training_features[high_ps].corr())\n\n columns = [p[0] for p in p_values if p[1] < p_value_limit]\n\n training_features_restricted = training_features[columns].drop(\n \"SalePrice\", axis=\"columns\"\n )\n\n testing_features_restricted = testing_features[columns].drop(\n \"SalePrice\", axis=\"columns\"\n )\n\n for model in (\n linear_model.Lasso(alpha=2.1),\n linear_model.Ridge(alpha=2.1),\n ):\n\n model.fit(training_features_restricted, training_target)\n\n train_score = model.score(\n training_features_restricted, training_target\n )\n\n test_score = model.score(\n testing_features_restricted, testing_target\n )\n\n name = str(model).split(\"(\")[0]\n\n result = result + [\n (\n \"_2_restrict_features\",\n name,\n \"p value limit: {:.3f}, alpha: 2.1\".format(p_value_limit),\n train_score,\n test_score,\n )\n ]\n\n print(ps_and_cols)\n return training_features[high_ps].corr()", "def sub_select_features(features, strategy):\n\n def extract_one_index(y_val):\n index_ones = []\n y_prev = 0\n start_stop = []\n if y_val[-1] == 1:\n y_val = y_val.tolist() + [0]\n for i, y in enumerate(y_val):\n if y_prev == 0 and y == 1:\n start_stop = [i]\n if y_prev == 1 and y == 0:\n start_stop.append(i)\n index_ones.append(start_stop)\n y_prev = y\n return index_ones\n\n def wrapper(start_stop, maxi):\n size = start_stop[1] - start_stop[0]\n bound = (size+1)//2\n return [max(0, start_stop[0]-bound), min(maxi, start_stop[1]+bound)]\n\n def deduce_index_to_keep(one_index, maxi):\n wrapped = [wrapper(start_stop, maxi) for start_stop in one_index]\n to_keep = [idx for idx in range(wrapped[0][0], wrapped[0][1])]\n for start_stop in wrapped[1:]:\n to_keep += [idx for idx in range(start_stop[0], start_stop[1]) if idx > to_keep[-1]]\n return to_keep\n\n if strategy == 0:\n new_features = features # We do nothing\n\n else:\n new_features = dict()\n for which in ['train', 'test']:\n one_id = extract_one_index(features['y_'+which])\n true_idx = deduce_index_to_keep(one_id, len(features['y_'+which]))\n try:\n new_features['x_'+which] = features['x_'+which][true_idx]\n new_features['y_'+which] = features['y_'+which][true_idx]\n except IndexError as e:\n print(which)\n print(features['x_'+which].shape)\n print(features['y_'+which].shape)\n print(one_id)\n raise e\n\n return new_features", "def findBestColumnSplitByGini(self, data, structure):\n minGini, bestSplit = 1, None\n for colName in list(structure.keys())[:-1]:\n giniSplit = self.calcGiniSplitByColumn(data, structure, colName)\n if giniSplit <= minGini:\n minGini = giniSplit\n bestSplit = colName\n return bestSplit", "def optGM(objective_function: \"function\",\n set_of_mols_par: SetOfMolecules,\n subset_of_mols: SetOfMolecules,\n min_subset_of_mols: SetOfMolecules,\n chg_method: ChargeMethod,\n num_of_samples: int,\n num_of_candidates: int) -> namedtuple:\n\n print(\" Sampling...\")\n samples = lhs(num_of_samples, chg_method.params_bounds)\n\n print(\" Calculating of objective function for samples...\")\n samples_rmsd = [objective_function(sample, chg_method, min_subset_of_mols) for sample in samples]\n\n print(\"\\x1b[2K Selecting candidates...\")\n best_samples = samples[list(map(samples_rmsd.index, nsmallest(num_of_candidates * 100, samples_rmsd)))]\n best_samples_rmsd = [objective_function(sample, chg_method, set_of_mols_par) for sample in best_samples]\n candidates = best_samples[list(map(best_samples_rmsd.index, nsmallest(num_of_candidates, best_samples_rmsd)))]\n\n print(\"\\x1b[2K Local minimizating...\")\n all_loc_min_course = []\n opt_candidates = []\n for params in candidates:\n opt_params, _, loc_min_course = local_minimization(objective_function, subset_of_mols, chg_method, params)\n all_loc_min_course.append(loc_min_course[0])\n opt_candidates.append(opt_params)\n\n opt_candidates_rmsd = [objective_function(candidate, chg_method, set_of_mols_par) for candidate in opt_candidates]\n final_candidate_obj_val = nsmallest(1, opt_candidates_rmsd)\n final_candidate_index = opt_candidates_rmsd.index(final_candidate_obj_val)\n final_candidate = opt_candidates[final_candidate_index]\n\n print(\"\\x1b[2K Final local minimizating...\")\n final_params, final_obj_val, loc_min_course = local_minimization(objective_function, set_of_mols_par, chg_method, final_candidate)\n all_loc_min_course[final_candidate_index].extend(loc_min_course[0])\n\n return namedtuple(\"chgs\", [\"params\",\n \"obj_val\",\n \"loc_min_courses\"])(final_params,\n final_obj_val,\n all_loc_min_course)", "def heuristic_function(self, node_current: PriorityNode) -> float:\n ########################################################################\n # todo: Implement your own heuristic cost calculation here. #\n # Hint: #\n # Use the State of the current node and the information from the #\n # planning problem, as well as from the scenario. #\n # Some helper functions for your convenience can be found in #\n # ./search_algorithms/base_class.py #\n ########################################################################\n output_logs = False\n if output_logs:\n print(\"##################\")\n print(\"current time step: \", node_current.list_paths[-1][-1].time_step)\n print(\"current problem mode\", self.planningProblemType)\n print(\"depth tree: \", node_current.depth_tree)\n currentorient = node_current.list_paths[-1][-1].orientation\n currentpos = node_current.list_paths[-1][-1].position\n currenttimestep = node_current.list_paths[-1][-1].time_step\n currentVel = node_current.list_paths[-1][-1].velocity\n\n # Test if reached goal:\n if self.reached_goal(node_current.list_paths[-1]):\n return 0.0\n # Test if route planner failed to find a path: \n if self.routeplannerresult is None:\n return np.inf\n\n ############ Detect cars in front:\n # calc cost based on distance to gool following the refrence path:\n # loop through all obstacles at time step x and find if any is close of current pos:\n if not self.disableObstAvoidance:\n for obst in self.list_obstacles:\n obstPos = obst.state_at_time(currenttimestep)\n if currentorient is not None and obstPos is not None:\n disttoobst = self.euclidean_distance(currentpos, obstPos.position)\n lookaheadVar = 1.375 * currentVel + 2.5\n if disttoobst <= lookaheadVar:\n # calc orientation diff between car and obstacle:\n vectorToObst = np.array([currentpos, obstPos.position])\n vectorToObstOrient = self.calc_angle_of_position(vectorToObst, currentpos)\n orientdiff = self.calc_orientation_diff(currentorient, vectorToObstOrient)\n if abs(orientdiff) <= 0.261799:\n if not 'velocity' in obstPos.attributes:\n continue\n if node_current.list_paths[-1][-1].velocity > obstPos.velocity and obstPos.velocity != 0:\n return np.inf\n \n # get index of closest object to the ego vehicle:\n index_smallest_dist = self.get_index_nearest_obst_infront(node_current)\n \n # use the index to locate vehicle to calc cost: \n if index_smallest_dist != -1:\n # found the index of vehicle with smallest distance to ego car:\n obst = self.list_obstacles[index_smallest_dist]\n obstPos = obst.state_at_time(currenttimestep)\n if obstPos is not None and 'velocity' in obstPos.attributes:\n if obstPos.velocity == 0:\n cost = node_current.list_paths[-1][-1].velocity\n return cost\n if node_current.list_paths[-1][-1].velocity > obstPos.velocity:\n return np.inf\n cost = abs(node_current.list_paths[-1][-1].velocity - obstPos.velocity)\n return cost\n #########################################################\n\n # Decide based on planning problem type how to calculate cost\n if self.planningProblemType == 'ModeA':\n # Call function for planning problem with desired time, position, speed and orientation\n cost = self.cost_for_modeA_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeA cost func: \", cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'ModeB':\n # Call function for planning problem with desired time, position and velocity:\n cost = self.cost_for_modeB_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeB cost func: \", cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'ModeC':\n # Call function for planning problem with desired time, position and orientation:\n cost = self.cost_for_modeC_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeB cost func: \", cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'ModeD':\n # Call function for planning problem with desired time and position:\n cost = self.cost_for_modeD_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeB cost func: \", cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'Survival':\n # Call function for planning problem with desired time:\n cost = self.cost_for_Survival_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeB cost func: \", cost)\n if cost < 0:\n return 0\n return cost", "def features_selection(x_train, y_train,x_val,x_test,model,feature_list):\n n_features = x_train.shape[1]\n print(\"n_features original: \",n_features)\n if model == 'LR':\n estimator = LogisticRegression(random_state = 442, penalty = 'elasticnet', solver= 'saga',l1_ratio=0.5)\n if model == 'SVM':\n estimator = svm.LinearSVC(class_weight = 'balanced', random_state = 442)\n if model == 'SGD':\n estimator = SGDClassifier(class_weight = 'balanced', random_state = 442)\n if model == 'ADA':\n estimator = AdaBoostClassifier(DecisionTreeClassifier(max_depth=5, class_weight = 'balanced'),random_state = 442)\n if model == 'RF':\n estimator = RandomForestClassifier(random_state=442, class_weight = 'balanced')\n if model == 'GBT':\n estimator = GradientBoostingClassifier(random_state = 442)\n if model == 'XGBT':\n ratio = float(np.sum(y_train == 0)) / np.sum(y_train==1)\n estimator = XGBClassifier(seed = 442,eval_metric = 'auc', scale_pos_weight = ratio)\n if model == 'LightGB':\n ratio = float(np.sum(y_train == 0)) / np.sum(y_train==1)\n estimator = lgb.LGBMClassifier(seed = 442, scale_pos_weight = ratio)\n\n print(\"Searching RFE\")\n classifier = RFE(estimator=estimator, step=1)\n model = Pipeline([('classifier', classifier)])\n parameters = {'classifier__n_features_to_select': [int(n_features*0.25),int(n_features*0.5),int(n_features*0.75),n_features]}\n grid = GridSearchCV(model, parameters, cv=3)\n grid.fit(x_train, y_train)\n num_features = grid.best_params_\n num_features = re.sub(r'[^\\d]','',str(num_features))\n print(\"Optimal number of features\",num_features)\n\n print(\"SelectKBest\")\n selector = SelectKBest(f_classif, k=int(num_features)) #we pass the \"optimal number of features\" discovered in the previous pass\n selector.fit(x_train, y_train)\n x_train = selector.transform(x_train).astype('float32')\n x_val = selector.transform(x_val).astype('float32')\n x_test = selector.transform(x_test).astype('float32')\n feature_list = [feature_list[i] for i in selector.get_support(indices=True)]\n return x_train, x_val, x_test,feature_list, num_features", "def data_split(df, best_feature, info_gain_dict, dt_dict,\r\n curr_node, depth, continous = False):\r\n \r\n depth -= 1\r\n # decrease the depth count\r\n no_data = False\r\n # default flag for data check\r\n match_threshold_df = df[df[best_feature] == info_gain_dict[best_feature][0]]\r\n # subset the data if threshold is matched\r\n if not len(match_threshold_df):\r\n # no more data points\r\n no_data = True\r\n match_threshold_df = df\r\n # go back to prev dataframe\r\n else:\r\n pass\r\n \r\n mismatch_threshold_df = df[df[best_feature] != info_gain_dict[best_feature][0]]\r\n # subset the data if there is a mismatch\r\n if not len(mismatch_threshold_df):\r\n # if no more data points\r\n no_data = True\r\n mismatch_threshold_df = df\r\n # go back to prev dataframe\r\n else:\r\n pass\r\n decision_tree(match_threshold_df, dt_dict, curr_node, best_feature,\r\n align_dir = \"equal\", depth=depth, no_data = no_data)\r\n # function call to grow tree on the left side\r\n decision_tree(mismatch_threshold_df, dt_dict, curr_node, best_feature,\r\n align_dir = \"not_equal\", depth=depth, no_data = no_data)\r\n # function call to grow the tree on the right side\r", "def findBestSplitInDataByInfoGain(self, data, structure, colName):\n colIndex, maxInfoGain, bestSplit = structure[colName]['index'], 0, []\n for i in range(0, len(data)-1):\n split = (float(data[i][colIndex]) + float(data[i+1][colIndex])) / 2\n infoGain = self.calcInfoGainBySplitValue(data, structure, colName, split)\n if infoGain >= maxInfoGain:\n bestSplit = [split, infoGain]\n maxInfoGain = infoGain\n return bestSplit", "def main():\n \n # The following 5 command lines can be outcommented if the features are already created.\n # There is no need to process the data every single time.\n # Fine tuning the learning algorythm is much faster without that extra step.\n \n # by reading the train dataset the feature index is created.\n # First calling of the processdata function\n # Data limited to 300000\n featureIndexes = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000)\n print \"featureIndex generated!\"\n print len(featureIndexes)\n\n # Trainfeature is created using the indexfeatures...\n # Second calling of the processdata function\n trainFeatures, trainTargets, trainItemIds, trainPrices, trainUrls, trainPhones, trainEmails, trainLength = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000) # Original itemsLimit=300000\n\n # Building the test dataset... just like the training...\n testFeatures, testItemIds, testPrices, testUrls, testPhones, testEmails, testLength = processData(os.path.join(dataFolder,\"avito_test.tsv\"), featureIndexes)\n\n # Dumping data into file...\n # joblib.dump((trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds), os.path.join(dataFolder,\"train_data.pkl\"))\n joblib.dump((trainFeatures,trainTargets,trainItemIds,trainPrices,trainUrls,trainPhones,trainEmails,trainLength,\n testFeatures, testItemIds,testPrices,testUrls,testPhones,testEmails,testLength), os.path.join(dataFolder,\"SeparatedByCategory.pkl\"))\n\n\n # loading data pack...\n # trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds = joblib.load(os.path.join(dataFolder,\"train_data.pkl\"))\n\n #logging.info(\"Feature preparation done, fitting model...\")\n\n # Stochastic gradient model", "def getAllContributingAlgorithmsToBest(algnamelist, target_lb=1e-8, \n target_ub=1e2):\n \n print \"Generating best algorithm data from given algorithm list...\\n\", \n customgenerate(algnamelist)\n \n bestalgfilepath = 'bestCustomAlg'\n picklefilename = os.path.join(bestalgfilepath, 'bestalg.pickle')\n fid = open(picklefilename, 'r')\n bestalgentries = pickle.load(fid)\n fid.close()\n print 'loading of best algorithm data done.'\n \n countsperalgorithm = {}\n for (d, f) in bestalgentries:\n print 'dimension:', d, ', function:', f\n print f\n setofalgs = set(bestalgentries[d,f].algs)\n # pre-processing data to only look at targets >= target_lb:\n correctedbestalgentries = []\n for i in range(0,len(bestalgentries[d,f].target)):\n if ((bestalgentries[d,f].target[i] >= target_lb) and\n (bestalgentries[d,f].target[i] <= target_ub)):\n \n correctedbestalgentries.append(bestalgentries[d,f].algs[i])\n print len(correctedbestalgentries)\n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += correctedbestalgentries.count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"", "def most_discriminating( features_df, labels_df, top=5):\n \n columns = features_df.shape[1]\n labels_df = labels_df[['file', 'candy_id']].set_index('file')\n qualities = np.zeros(columns)\n \n _left = 0\n _right = 1\n\n _c = 0\n _h = 1\n\n # globals\n cases = float(labels_df['candy_id'].count()) # total cases\n\n p_c_A = (labels_df['candy_id'] == 0).sum() / cases\n p_h_A = 1.0 - p_c_A\n\n\n for feature in range(columns):\n\n branch_cases = np.zeros(2) # total on each branch\n pi = np.zeros(2) # proportion on each branch\n\n split = np.array([\n #c, h\n [0, 0], #left\n [0, 0] #right\n ])\n\n for index, value in features_df[feature].iteritems():\n split[value][labels_df.loc[index][0]] += 1\n\n branch_cases[_left] = split[_left].sum()\n branch_cases[_right] = split[_right].sum()\n \n if branch_cases[_left] == 0.0 or branch_cases[_right] == 0.0:\n qualities[feature] = 0\n continue\n \n pi[_left] = branch_cases[_left] / cases\n pi[_right] = branch_cases[_right] / cases\n\n p_c_B = split[_left][_c] / branch_cases[_left]\n p_h_B = split[_left][_h] / branch_cases[_left]\n\n p_c_C = split[_right][_c] / branch_cases[_right]\n p_h_C = split[_right][_h] / branch_cases[_right]\n\n gini_tree = 1.0 - (math.pow(p_c_A, 2) + math.pow(p_h_A, 2))\n\n gini_left = 1.0 - (math.pow(p_c_B, 2) + math.pow(p_h_B, 2))\n gini_right = 1.0 - (math.pow(p_c_C, 2) + math.pow(p_h_C, 2))\n\n quality = gini_tree - pi[_left] * gini_left - pi[_right] * gini_right\n\n qualities[feature] = quality\n return list(reversed(qualities.argsort()))[:top]", "def DecisionTreeAlgorithm(df, mltask, counter = 0, min_samples = 2, max_depth = 5, random_subspace = None):\n\n if counter == 0:\n global COLUMN_HEADERS, FEATURE_TYPE\n COLUMN_HEADERS = df.columns\n FEATURE_TYPE = hf.determine_type_of_feature(df)\n data = df.values\n else:\n data = df\n \n if (check_purity(data)) or (len(data) < min_samples) or (counter == max_depth):\n leaf = create_leaf(data, mltask)\n return leaf\n \n else:\n counter += 1\n \n potential_splits = get_potential_split(data, random_subspace)\n split_column,split_value = determine_best_split(data, potential_splits, mltask)\n data_below,data_above = split_data(data,split_column,split_value)\n \n if (len(data_below) == 0) or (len(data_above) == 0):\n leaf = create_leaf(data, mltask)\n return leaf\n \n feature_name = COLUMN_HEADERS[split_column]\n type_of_feature = FEATURE_TYPE[split_column]\n if type_of_feature == 'continuous':\n question = '{} <= {}'.format(feature_name,split_value)\n else:\n question = '{} = {}'.format(feature_name,split_value)\n sub_tree = {question:[]}\n \n yes_answer = DecisionTreeAlgorithm(data_below, mltask, counter, min_samples, max_depth, random_subspace)\n no_answer = DecisionTreeAlgorithm(data_above, mltask, counter, min_samples, max_depth, random_subspace)\n \n if yes_answer == no_answer :\n sub_tree = yes_answer\n else :\n sub_tree[question].append(yes_answer)\n sub_tree[question].append(no_answer)\n \n return sub_tree", "def evaluate_hmdb51_fusion():\n vlen = 0\n ob_suffix = '-max.feat.npy.gz'\n fv_suffix = '_fv.npy.gz'\n ob_root = '/home/syq/research_final/data/features/ob_hmdb51_pooled_python/'\n fv_root = '/home/syq/research_final/data/dense-traj/fv_hmdb51_python/'\n hmdb_splits = 'testTrainMulti_7030_splits/'\n categories = os.listdir(fv_root)\n weight = 1.0\n weights = [i / 20.0 for i in range(21)]\n acc_to_weights = {}\n\n for weight in weights:\n print \"Weight: %.2f\" % weight\n accs = np.zeros(3)\n for splitnum in range(1,4):\n ts = time.time()\n trainfiles, testfiles = hmdb51_splits.loadsplit(categories,\n hmdb_splits,\n splitnum)\n print 'Have %d train files' % len(trainfiles)\n print 'Have %d test files' % len(testfiles)\n\n if not vlen:\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(trainfiles[0][0][:-4],\n ob_suffix)),\"rb\")\n vlen_ob = len(np.load(fp))\n fp.close()\n print \"OB vector length is %d\" % vlen_ob\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(trainfiles[0][0][:-4],\n fv_suffix)),\"rb\")\n vlen_fv = len(np.load(fp))\n fp.close()\n print \"IDTFV vector length is %d\" % vlen_fv\n\n Dtrain_ob = np.zeros( (len(trainfiles),vlen_ob), np.float32 )\n Dtrain_fv = np.zeros( (len(trainfiles),vlen_fv), np.float32 )\n\n Ytrain = np.ones ( (len(trainfiles) )) * -1000\n\n for fi,f in enumerate(trainfiles):\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(f[0][:-4],\n ob_suffix)),\"rb\")\n Dtrain_ob[fi][:] = np.load(fp)\n fp.close()\n Ytrain[fi] = f[1]\n\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(f[0][:-4],\n fv_suffix)),\"rb\")\n Dtrain_fv[fi][:] = np.load(fp)\n fp.close()\n\n Dtest_ob = np.zeros( (len(testfiles),vlen_ob), np.float32 )\n Dtest_fv = np.zeros( (len(testfiles),vlen_fv), np.float32 )\n\n Ytest = np.ones ( (len(testfiles) )) * -1000\n\n for fi,f in enumerate(testfiles):\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(f[0][:-4],\n ob_suffix)),\"rb\")\n Dtest_ob[fi][:] = np.load(fp)\n fp.close()\n Ytest[fi] = f[1]\n\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(f[0][:-4],\n fv_suffix)),\"rb\")\n Dtest_fv[fi][:] = np.load(fp)\n fp.close()\n\n \"\"\"\n Early fusion\n Dtrain = np.hstack((Dtrain_ob, Dtrain_fv))\n Dtest = np.hstack((Dtest_ob, Dtest_fv))\n\n clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=8)\n acc = clf.fit(Dtrain, Ytrain).score(Dtest, Ytest)\n \"\"\"\n fv_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=8)\n\n ob_clf = OneVsRestClassifier(estimator=SVC(C=10,\n cache_size=1000,\n kernel='linear',\n probability=True),\n n_jobs=-1)\n\n # Get probabilities for late fusion\n Dtrain_fv = fv_clf.fit(Dtrain_fv, Ytrain).decision_function(Dtrain_fv)\n Dtrain_ob = ob_clf.fit(Dtrain_ob, Ytrain).decision_function(Dtrain_ob)\n Dtest_fv = fv_clf.decision_function(Dtest_fv)\n Dtest_ob = ob_clf.decision_function(Dtest_ob)\n\n # Scale decision values b/w 0 and 1\n Dtrain_fv = preprocessing.normalize(Dtrain_fv)\n Dtrain_ob = preprocessing.normalize(Dtrain_ob)\n Dtest_fv = preprocessing.normalize(Dtest_fv)\n Dtest_ob = preprocessing.normalize(Dtest_ob)\n\n # Late fusion\n scores_train = (Dtrain_fv * weight) + (Dtrain_ob * (1 - weight))\n latefusion_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=-1)\n latefusion_clf.fit(scores_train, Ytrain)\n\n scores_test = (Dtest_fv * weight) + (Dtest_ob * (1 - weight))\n acc = latefusion_clf.score(scores_test, Ytest)\n print 'Fold', splitnum, 'late fusion acc', acc\n print \"Train & testing time %.3f\" % (time.time() - ts)\n accs[splitnum-1] = acc\n acc_to_weights[weight] = accs\n\n print \"Mean accuracy: %.3f\" % accs.mean()\n with open(\"hmdb51_weight_gridsearch.txt\", \"w\") as f:\n for weight, accs in acc_to_weights.items():\n f.write(str(weight) + str(accs) + '\\n')\n return acc_to_weights\n\n \"\"\"\n with open('fv_hmdb51_accs.txt', 'w') as f:\n f.write(\"%s\\nMean:%.3f\" % (str(accs), np.mean(accs)))\n \"\"\"", "def collect_best_features(self):\n bincsp = self.binary_csp # just to make code shorter\n n_folds = len(self.binary_csp.folds)\n n_class_pairs = len(self.binary_csp.class_pairs)\n result_shape = (n_folds, n_class_pairs)\n self.train_feature = np.empty(result_shape, dtype=object)\n self.train_feature_full_fold = np.empty(result_shape, dtype=object)\n self.test_feature = np.empty(result_shape, dtype=object)\n self.test_feature_full_fold = np.empty(result_shape, dtype=object)\n self.selected_filters_per_filterband = np.empty(result_shape, dtype=object)\n for fold_i in range(n_folds):\n for class_pair_i in range(n_class_pairs):\n bin_csp_train_features = deepcopy(bincsp.train_feature[\n self.selected_filter_inds, fold_i, class_pair_i])\n bin_csp_train_features_full_fold = deepcopy(\n bincsp.train_feature_full_fold[\n self.selected_filter_inds,\n fold_i, class_pair_i])\n bin_csp_test_features = deepcopy(bincsp.test_feature[\n self.selected_filter_inds, fold_i, class_pair_i])\n bin_csp_test_features_full_fold = deepcopy(\n bincsp.test_feature_full_fold[\n self.selected_filter_inds,fold_i, class_pair_i])\n selected_filters_per_filt = self.select_best_filters_best_filterbands(\n bin_csp_train_features, max_features=self.n_features,\n forward_steps=self.forward_steps, \n backward_steps=self.backward_steps,\n stop_when_no_improvement=self.stop_when_no_improvement)\n self.train_feature[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_train_features, selected_filters_per_filt)\n self.train_feature_full_fold[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_train_features_full_fold, selected_filters_per_filt)\n \n self.test_feature[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_test_features, selected_filters_per_filt)\n self.test_feature_full_fold[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_test_features_full_fold, selected_filters_per_filt)\n \n self.selected_filters_per_filterband[fold_i, class_pair_i] = \\\n selected_filters_per_filt", "def main_predefined_split():\n\n average_performance = []\n fold_num = 'predefined'\n output_file_folder = \"output/{}\".format(args.experiment_name)\n output_file_name = \"{}/lnnel_{}.csv\".format(output_file_folder, fold_num)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = output_file_name\n\n if args.use_blink:\n df_train = pd.read_csv(\"./data/lcquad/blink/lcquad_train_sorted.csv\")\n df_test = pd.read_csv(\"./data/lcquad/blink/lcquad_test_sorted.csv\")\n else:\n df_train = pd.read_csv(\"./data/lcquad/dbpedia/lcquad_train_sorted.csv\")\n df_test = pd.read_csv(\"./data/lcquad/dbpedia/lcquad_test_sorted.csv\")\n\n # filter out the questions with single positive or many negatives in trianing set\n filtered_question_mentions = []\n for qm in df_train.QuestionMention.unique():\n df_ = df_train[df_train.QuestionMention == qm]\n if df_.Label.sum() == 0:\n filtered_question_mentions.append(qm)\n if df_.Label.sum() == 1 and df_.shape[0] == 1:\n filtered_question_mentions.append(qm)\n # print(df_.Label.values)\n df_train_split_filtered = df_train[~df_train.QuestionMention.isin(filtered_question_mentions)]\n df_train_split_filtered = df_train_split_filtered.sort_values(by=['QuestionMention', 'Label'])\n df_train = df_train_split_filtered\n\n # train\n features_train = np.array(\n [np.fromstring(s[1:-1], dtype=np.float, sep=', ') for s in df_train.Features.values])\n x_train = torch.from_numpy(features_train).float()\n y_train = torch.from_numpy(df_train.Label.values).float().reshape(-1, 1)\n m_labels_train = df_train.Mention_label.values\n ques_train = df_train.Question.values\n\n # test\n features_test = np.array(\n [np.fromstring(s[1:-1], dtype=np.float, sep=', ') for s in df_test.Features.values])\n x_test = torch.from_numpy(features_test).float()\n y_test = torch.from_numpy(df_test.Label.values).float().reshape(-1, 1)\n m_labels_test = df_test.Mention_label.values\n ques_test = df_test.Question.values\n\n # train model and evaluate\n model = pick_model(args.model_name, args.alpha)\n model = model.to(device)\n\n # move to gpu\n x_train, y_train = x_train.to(device), y_train.to(device)\n x_test, y_test = x_test.to(device), y_test.to(device)\n\n print(model)\n\n print(\"model: \", args.model_name, args.alpha)\n print(model(x_train, m_labels_train))\n\n print(\"y_train sum\", sum(y_train), sum(y_train) / len(y_train))\n print(\"y_test sum\", sum(y_test), sum(y_test) / len(y_test))\n\n # aggregate the data into train, val, and test\n train_data = (x_train, y_train, m_labels_train, ques_train)\n print(\"train:\", x_train.shape, y_train.shape, m_labels_train.shape, ques_train.shape)\n test_data = (x_test, y_test, m_labels_test, ques_test)\n print(\"test:\", x_test.shape, y_test.shape, m_labels_test.shape, ques_test.shape)\n\n # check class distribution\n print(\"y_train sum\", sum(y_train), sum(y_train) / len(y_train))\n print(\"y_test sum\", sum(y_test), sum(y_test) / len(y_test))\n\n train(model, train_data, test_data, test_data, args.checkpoint_name, args.num_epoch, args.margin,\n args.learning_rate)\n test_pred, best_scores = test(x_test, m_labels_test, ques_test, args.alpha, args.checkpoint_name,\n args.model_name,\n args.output_file_name)\n with open(args.log_file_name, 'a') as f:\n f.write(\n \"model={}; use_fixed_threshold={}; alpha={}; p={}; r={}; f1={}; lr={}; margin={}\\n\".format(\n args.model_name,\n args.use_fixed_threshold,\n args.alpha,\n best_scores[\n 'precision'],\n best_scores[\n 'recall'],\n best_scores['f1'],\n args.learning_rate,\n args.margin))\n print(\"model={}; use_fixed_threshold={}; alpha={}; p={}; r={}; f1={}\\n\".format(args.model_name,\n args.use_fixed_threshold,\n args.alpha,\n best_scores['precision'],\n best_scores['recall'],\n best_scores['f1']))\n average_performance.append([best_scores['precision'], best_scores['recall'], best_scores['f1']])\n\n average_performance = np.array(average_performance)\n print(\"Avg performance is prec - rec - f1: \", average_performance.mean(0))", "def getSplit(self):\n b_index, b_value, b_score, b_groups = 999, 999, 999, None\n for j in range(len(self[0]) - 1):\n for i in range(len(self)):\n groups = self.splitAttribute(j, self[i][j]) # lit, big\n gini = self.giniIndex(groups)\n if gini < b_score and (j, \"%.1f\" % self[i][j]) not in self.atr:\n b_index, b_value, b_score, b_groups = j, self[i][\n j], gini, groups\n return b_index, b_value, b_groups, b_score", "def getFeaturesMinMax(self, gameState, a, oldGameState):\n features = util.Counter()\n if(a==None):\n newGameState = gameState\n else:\n newGameState = gameState.generateSuccessor(self.index, a)\n myOldState = oldGameState.getAgentState(self.index)\n myNewState = newGameState.getAgentState(self.index)\n friendState = gameState.getAgentState((self.index+2)%4) \n \n oldfood = self.getFood(gameState)\n oldpills = self.getCapsules(oldGameState)\n pills = self.getCapsules(newGameState)\n \n \n\n ghostScore = self.getGhostScore(gameState, newGameState, a)\n foodScore = self.getFoodScore(newGameState, oldfood)\n pacmanScore = self.getPacmanScore(gameState, newGameState, a)\n captureScore = self.getCaptureScore(newGameState, myOldState, myNewState)\n friendScore = self.getFriendScore(myNewState, friendState, a)\n pillScore = self.getPillScore(newGameState, oldpills)\n wallScore = self.getWallScore(newGameState)\n\n #print(str(a)+\":\"+str(foodScore)+\",\"+str(ghostScore)+\",\"+str(captureScore)+\",\"+str(myNewState))\n features['foodScore'] = foodScore\n features['pileScore'] = pillScore\n features['ghostScore'] = ghostScore\n features['captureScore'] = captureScore\n features['pacmanScore'] = pacmanScore\n features['friendScore'] = friendScore\n features['wallScore'] = wallScore\n\n\n return features", "def fit(self, X, y=None):\n super().fit(X, y)\n cur_state = self._last_state_\n\n cur_state[\"n_clusters\"] = int(self.n_clusters)\n if cur_state[\"n_clusters\"] < 0:\n raise ValueError(\"n_clusters must be >= 0\")\n\n cur_state[\"gini_threshold\"] = float(self.gini_threshold)\n if not (0.0 <= cur_state[\"gini_threshold\"] <= 1.0):\n raise ValueError(\"gini_threshold not in [0,1]\")\n\n _postprocess_options = (\"boundary\", \"none\", \"all\")\n cur_state[\"postprocess\"] = str(self.postprocess).lower()\n if cur_state[\"postprocess\"] not in _postprocess_options:\n raise ValueError(\"postprocess should be one of %r\"%_postprocess_options)\n\n cur_state[\"compute_full_tree\"] = bool(self.compute_full_tree)\n cur_state[\"compute_all_cuts\"] = bool(self.compute_all_cuts)\n\n\n # apply the Genie++ algorithm (the fast part):\n res = internal.genie_from_mst(self._mst_dist_, self._mst_ind_,\n n_clusters=cur_state[\"n_clusters\"],\n gini_threshold=cur_state[\"gini_threshold\"],\n noise_leaves=(cur_state[\"M\"]>1),\n compute_full_tree=cur_state[\"compute_full_tree\"],\n compute_all_cuts=cur_state[\"compute_all_cuts\"])\n\n self.n_clusters_ = res[\"n_clusters\"]\n self.labels_ = res[\"labels\"]\n self._links_ = res[\"links\"]\n self._iters_ = res[\"iters\"]\n\n if self.labels_ is not None:\n self._postprocess(cur_state[\"M\"], cur_state[\"postprocess\"])\n\n if cur_state[\"compute_full_tree\"]:\n Z = internal.get_linkage_matrix(self._links_,\n self._mst_dist_, self._mst_ind_)\n self.children_ = Z[\"children\"]\n self.distances_ = Z[\"distances\"]\n self.counts_ = Z[\"counts\"]\n\n return self", "def tuneRandomForest(train_set):\n\n auc_score = make_scorer(roc_auc_score)\n acc = make_scorer(accuracy_score)\n\n train_set = pd.read_csv(train_set, sep=\"\\t\", low_memory=False)\n\n train_output = train_set[\"output\"].values\n train_features = train_set[train_set.columns.drop([\"labels\", \"output\"])].values\n\n #X_train, X_test, y_train, y_test = train_test_split(train_features, train_output, test_size=0.20)\n\n # define parameters to be optimized\n parameters = {\n 'n_estimators': [int(x) for x in range(200, 3000, 300)],\n 'max_features': ['log2', 'sqrt', \"auto\"],\n 'criterion': [\"gini\", \"entropy\"],\n }\n #plotGrid(parameters, script_path + \"/results/GridSearchPlot.png\")\n\n scores = ['precision', 'recall', 'f1', auc_score, acc] # compute efficiency based on scores\n for score in scores:\n print(\"# Tuning hyper-parameters for %s\" % score)\n\n tune_search = GridSearchCV(\n RandomForestClassifier(n_jobs=-1),\n parameters,\n scoring=score\n )\n #tune_search.fit(X_train, y_train)\n tune_search.fit(train_features, train_output)\n print(tune_search.best_params_)\n\n means = tune_search.cv_results_['mean_test_score']\n stds = tune_search.cv_results_['std_test_score']\n for mean, std, params in zip(means, stds, tune_search.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\" % (mean, std * 2, params))\n\n #y_true, y_pred = y_test, tune_search.predict(X_test)\n # print(classification_report(y_true, y_pred))\n #print()", "def prune_path(clf, X, y, max_n_leaves=10, n_iter=10,\n test_size=0.1, random_state=None, n_jobs=1):\n \n\n from sklearn.base import clone\n from sklearn.cross_validation import StratifiedShuffleSplit,ShuffleSplit\n from sklearn.metrics import roc_auc_score,mean_squared_error\n from multiprocessing.dummy import Pool as ThreadPool\n from itertools import repeat\n import pandas as pd\n #import copy\n \n #classification score\n def my_auc(estimator, X, y):\n y_score = estimator.predict_proba(X)[:,1] # You could also use the binary predict, but probabilities should give you a more realistic score.\n return roc_auc_score(y, y_score)\n \n #regression score\n def my_nmse(estimator, X, y):\n y_pre = estimator.predict(X) # You could also use the binary predict, but probabilities should give you a more realistic score.\n return -mean_squared_error(y, y_pre)\n \n\n if len(np.unique(y)) == 2: \n scoring_fuc = my_auc\n \n else:\n scoring_fuc = my_nmse\n \n def multip_run(fuction,task_zip,n_jobs = 1):\n\n #Multi-process Run\n\n pool = ThreadPool(processes=n_jobs)\n results = pool.starmap(fuction, task_zip)\n pool.close()\n pool.join()\n return results \n\n def OneFoldCut(clf,X_train, y_train,X_test,y_test,max_n_leaves):\n estimator = clone(clf)\n \n fitted = estimator.fit(X_train, y_train)\n \n if max_n_leaves < get_n_leaves(fitted):\n n_leaves = max_n_leaves\n \n else:\n n_leaves = get_n_leaves(fitted)\n \n print('###### Iters true start leaves is %d #######' % n_leaves)\n \n #cut_num = list(range(2,n_leaves, 1))\n cut_num = list(range(n_leaves-1,1,-1))\n #n = len(cut_num)\n loc_indexs = []\n loc_scores = []\n for i in cut_num:\n #clf1 = copy.deepcopy(fitted)\n #clf1 = clone(fitted)\n #clf1.prune(i)\n fitted.prune(i)\n onescore = scoring_fuc(fitted,X_test,y_test)\n #onescore = scoring_fuc(clf1,X_test,y_test)\n loc_scores.append(onescore)\n loc_indexs.append(i)\n \n S = pd.DataFrame(loc_scores,index=loc_indexs)\n\n return S\n\n\n #scores = list()\n if len(np.unique(y)) == 2: \n kf = StratifiedShuffleSplit(y,\n n_iter = n_iter, \n test_size= test_size,\n random_state=random_state)\n else:\n kf = ShuffleSplit(len(y),\n n_iter = n_iter, \n test_size= test_size,\n random_state=random_state)\n \n X_trains = [X[tr] for tr,ts in kf]\n y_trains = [y[tr] for tr,ts in kf]\n \n X_tests = [X[ts] for tr,ts in kf]\n y_tests = [y[ts] for tr,ts in kf]\n \n task_zip = zip(repeat(clf),\n X_trains,\n y_trains,\n X_tests,\n y_tests,\n repeat(max_n_leaves))\n \n scores = multip_run(OneFoldCut,task_zip,n_jobs = n_jobs)\n \n df = pd.concat(scores,axis=1)\n df.columns = range(len(df.columns))\n\n return df #zip(*scores)", "def detect():\n\n _pass_done = 0\n _improve = True\n new_mod = modularity()\n cur_mod = -999999999.0\n rl = random.sample(range(0, node_count), node_count)\n while _improve & (_pass_done < max_pass) & (new_mod - cur_mod > min_mod):\n cur_mod = new_mod\n _improve = False\n _pass_done += 1\n for node_tmp in rl:\n n = node_tmp\n nc = bl[n]\n ncomm = neigh_comm(n)\n remove(n, nc, ncomm[nc])\n best_c = nc\n best_l = 0.0\n best_incre = 0.0\n for c in ncomm:\n incre = modularity_gain(n, c, ncomm[c])\n if incre > best_incre:\n best_incre = incre\n best_c = c\n best_l = ncomm[c]\n insert(n, best_c, best_l)\n if best_c != nc:\n _improve = True\n new_mod = modularity()\n print new_mod", "def train(self, x, y, feature_search=None,\n max_depth=8, min_samples_split=2, min_samples_leaf=1):\n if self._depth < max_depth and x.shape[0] > min_samples_split:\n\n # Retrieve best split coordinates based on gini impurity\n # and two groups\n self._feature_idx, self._split_value, group_1, group_2 = \\\n split_search(x, y, min_samples_leaf, feature_search)\n\n if self._feature_idx is not np.NaN:\n # Recursively split and train child nodes\n self._left_child = Node(self._depth + 1)\n self._right_child = Node(self._depth + 1)\n self._left_child.train(*group_1, feature_search, max_depth,\n min_samples_split,\n min_samples_leaf)\n self._right_child.train(*group_2, feature_search, max_depth,\n min_samples_split,\n min_samples_leaf)\n else:\n # Impossible to split. Convert to leaf node\n # This will occur when observations are\n # identical in a given node\n self._sprout(y)\n else:\n # End condition met. Convert to leaf node\n self._sprout(y)", "def compute_splits(self, G, nw_name='test', train_frac=0.51, split_alg='spanning_tree', split_id=0, verbose=False):\n # Compute train/test split\n if split_alg == 'random':\n tr_E, te_E = stt.rand_split_train_test(G, train_frac)\n train_E, test_E, G, mp = pp.relabel_nodes(tr_E, te_E, G.is_directed())\n elif split_alg == 'naive':\n train_E, test_E = stt.naive_split_train_test(G, train_frac)\n elif split_alg == 'spanning_tree':\n train_E, test_E = stt.split_train_test(G, train_frac)\n elif split_alg == 'fast':\n train_E, test_E = stt.quick_split(G, train_frac)\n elif split_alg == 'timestamp':\n train_E, test_E, _ = stt.timestamp_split(G, train_frac)\n else:\n raise ValueError('Split alg. {} unknown!'.format(split_alg))\n\n # Make sure the edges are numpy arrays\n train_E = np.array(list(train_E))\n test_E = np.array(list(test_E))\n\n # Get the labels of train and test\n a = nx.adjacency_matrix(G, nodelist=range(len(G.nodes)))\n tr_labels = np.ravel(a[train_E[:, 0], train_E[:, 1]])\n te_labels = np.ravel(a[test_E[:, 0], test_E[:, 1]])\n\n # Split train and test edges in those with positive and negative signs\n pos_tr_e = train_E[np.where(tr_labels == 1)[0], :]\n neg_tr_e = train_E[np.where(tr_labels == -1)[0], :]\n pos_te_e = test_E[np.where(te_labels == 1)[0], :]\n neg_te_e = test_E[np.where(te_labels == -1)[0], :]\n\n # Make a train graph with appropriate weights +1 / -1\n H = G.copy()\n H.remove_edges_from(test_E)\n\n # Set class attributes to new values\n self.set_splits(train_E=pos_tr_e, train_E_false=neg_tr_e, test_E=pos_te_e, test_E_false=neg_te_e,\n directed=G.is_directed(), nw_name=nw_name, TG=H, split_id=split_id,\n split_alg=split_alg, verbose=verbose)\n\n return pos_tr_e, neg_tr_e, pos_te_e, neg_te_e", "def mts_ls2(current_x, current_fitness, best_x, best_fitness, improve, search_range, task, rng, bonus1=10, bonus2=1,\n sr_fix=0.4, **_kwargs):\n if not improve:\n search_range /= 2\n i_fix = np.argwhere(search_range < 1e-15)\n search_range[i_fix] = task.range[i_fix] * sr_fix\n improve, grade = False, 0.0\n for _ in range(len(current_x)):\n d = -1 + rng.random(len(current_x)) * 2\n r = rng.choice([0, 1, 2, 3], len(current_x))\n new_x = task.repair(np.vectorize(move_x)(current_x, r, d, search_range, operator.sub), rng)\n new_fitness = task.eval(new_x)\n if new_fitness < best_fitness:\n grade, best_x, best_fitness = grade + bonus1, new_x.copy(), new_fitness\n elif new_fitness != current_fitness:\n if new_fitness > current_fitness:\n new_x = task.repair(np.vectorize(move_x)(current_x, r, d, search_range, operator.add), rng)\n new_fitness = task.eval(new_x)\n if new_fitness < best_fitness:\n grade, best_x, best_fitness = grade + bonus1, new_x.copy(), new_fitness\n elif new_fitness < current_fitness:\n grade, current_x, current_fitness, improve = grade + bonus2, new_x.copy(), new_fitness, True\n else:\n grade, current_x, current_fitness, improve = grade + bonus2, new_x.copy(), new_fitness, True\n return current_x, current_fitness, best_x, best_fitness, improve, grade, search_range", "def _find_split(self, X, y, n_features):\r\n splits_info = []\r\n\r\n # Select features to consider\r\n features = self._feature_selection.get_features(n_features, self._feature_prob)\r\n\r\n # Get candidate splits\r\n for feature_id in features:\r\n for split_value in compute_split_values(X[:, feature_id]):\r\n splits_info.append(\r\n compute_split_info(self._split_criterion, X, y, feature_id, split_value, self._min_samples_leaf))\r\n\r\n splits = []\r\n for split_info in splits_info:\r\n if split_info is not None:\r\n gain, feature_id, split_value = split_info\r\n split = Split(feature_id, value=split_value, gain=gain)\r\n splits.append(split)\r\n else:\r\n continue\r\n\r\n selected_split = self._split_chooser.get_split(splits)\r\n return selected_split", "def find_best_split(self, x, y):\n\n # check cornor case: all same x\n n = y.size\n\n if all(x == x[0]):\n return (0, amin(x) - self.eps)\n\n sort_index = argsort(x)\n x_sorted = x[sort_index]\n y_sorted = y[sort_index]\n\n # build potential split index array\n split_index_array = array([i for i in range(1, n)\n if x_sorted[i] != x_sorted[i - 1]\n and y_sorted[i] != y_sorted[i - 1]])\n\n # split_index_array = linspace(\n # 0, y.size, num=min(5, ceil(n / 5)), endpoint=False, dtype='int')\n # split_index_array = split_index_array[1:]\n\n best_split_index = 0\n best_gain = 0\n h_x = self.cur_entropy\n\n for split_index in split_index_array:\n left_entropy = self.entropy(y_sorted[:split_index])\n right_entropy = self.entropy(y_sorted[split_index:])\n h_xy = (split_index * left_entropy +\n (n - split_index) * right_entropy) / n\n cur_gain = h_x - h_xy\n\n if cur_gain > best_gain:\n best_gain = cur_gain\n best_split_index = split_index\n\n if best_split_index != 0:\n best_split_point = (x_sorted[best_split_index] +\n x_sorted[best_split_index - 1]) / 2\n else:\n best_split_point = x_sorted[best_split_index] - self.eps\n\n return (best_gain, best_split_point)", "def feature_static_heuristic(self):\n pass", "def getNextNodeUsingTotalStepsToTravel(kGoalState):\n \n global fringe\n global solutions\n\n\n\n minNode = None\n minCost = 99999999999\n minNodeIndex = -1\n\n \n pnode = None\n pcost = None\n\n if len(solutions)>0 and solutions[0] != None:\n pnode = solutions[0];\n pcost = getTotalStepsToReachGoalState(pnode,kGoalState)\n # print pnode, pcost\n # raw_input()\n \n\n\n\n for idx,node in enumerate(fringe):\n #get the heu. function values\n g_value =getTotalStepsToReachGoalState(node,kGoalState)\n \n\n if g_value < minCost:\n minNode = node\n minNodeIndex = idx\n minCost = g_value\n\n\n fringe.pop(minNodeIndex)\n c = getTotalStepsToReachGoalState(minNode,kGoalState)\n if pnode != None:\n if c > pcost:\n minNode = None\n \n return minNode", "def CrossGeneEpistasisLasso(str_inputFilePath_feature, str_inputFileName_phenotype, str_inputFileName_score = \"\", str_outputFilePath = \"\", int_kOfKFold = 2, int_nJobs = 1):\n \n ### set default output path\n if str_outputFilePath == \"\":\n str_outputFilePath = os.path.abspath(os.path.join(str_inputFilePath_feature, os.pardir)) + \"/crossGeneResult/\"\n ### if output folder doesn't exist then create it\n if not os.path.exists(str_outputFilePath):\n os.makedirs(str_outputFilePath)\n \n ### set default score file name\n if str_inputFileName_score == \"\":\n for str_fileName in os.listdir(str_inputFilePath_feature):\n if str_fileName.startswith(\"All_Lasso\"):\n str_inputFileName_score = os.path.join(str_inputFilePath_feature, str_fileName)\n \n #-------------------------\n # load data\n #-------------------------\n ### scan score file and exclude useless genes\n dict_score = {}\n with open(str_inputFileName_score, \"r\") as file_inputFile:\n file_inputFile.readline()\n for line in file_inputFile:\n list_thisScore = line.strip().split(\",\")\n if list_thisScore[1] == \"MemErr\" or float(list_thisScore[1]) == 0.0:\n pass\n else:\n dict_score[list_thisScore[0]] = float(list_thisScore[1])\n \n ### get all the file names of feature file\n list_featureFileName = []\n for str_fileName in os.listdir(str_inputFilePath_feature):\n if \"Feature.csv\" in str_fileName:\n list_featureFileName.append(str_fileName)\n \n ### get all selected snp ids\n list_genotype_rsid = []\n for item in list_featureFileName:\n with open(os.path.join(str_inputFilePath_feature, item), \"r\") as file_inputFile:\n ### grep the header\n list_rsids = file_inputFile.readline().strip().split(\",\")\n for rsid in list_rsids:\n list_genotype_rsid.append(rsid)\n np_genotype_rsid = np.array(list_genotype_rsid)\n \n ### count lines of input files\n int_num_genotype = len(np_genotype_rsid)\n int_num_phenotype = sum(1 for line in open(str_inputFileName_phenotype))\n \n ### get phenotype file\n list_phenotype = []\n with open(str_inputFileName_phenotype, 'r') as file_inputFile:\n for line in file_inputFile:\n list_phenotype.append(line.strip().split(\",\"))\n np_phenotype = np.array(list_phenotype, dtype=np.float)\n del list_phenotype\n \n ### get genotype file\n ### declare a dictionary for mapping snp and gene\n dict_geneMap ={}\n idx_genotype_rsid = 0\n np_genotype = np.empty([int_num_phenotype, int_num_genotype], dtype='int8')\n for item in list_featureFileName:\n with open(os.path.join(str_inputFilePath_feature, item), \"r\") as file_inputFile:\n ### grep feature from header of feature file\n list_rsids = file_inputFile.readline().strip().split(\",\")\n for rsid in list_rsids:\n ### key: rsIDs of a feature; value: gene symbol\n dict_geneMap[rsid] = item.split(\"_\")[0]\n idx_phenotype = 0\n ### read feaure and write into np_genotype\n for line in file_inputFile:\n np_genotype[idx_phenotype, idx_genotype_rsid:idx_genotype_rsid + len(list_rsids)] = np.array([float(x) for x in line.strip().split(\",\")], dtype='int')\n idx_phenotype = idx_phenotype + 1\n idx_genotype_rsid = idx_genotype_rsid + len(list_rsids)\n \n #-------------------------\n # preprocess data\n #-------------------------\n ### f regression feature selection\n np_fRegression = -np.log10(f_regression(np_genotype.astype(int), np_phenotype[:, -1].astype(float))[1])\n np_selectedIdx = np.array([x > 5 for x in np_fRegression])\n np_genotype = np_genotype[:, np_selectedIdx]\n np_genotype_rsid = np_genotype_rsid[np_selectedIdx]\n if np_genotype_rsid.shape[0] == 0:\n print(\"step5: There is no variant past the f regression feature selection.\")\n return 0.0, 0.0\n\n ### select degree 1 feature\n np_genotype_rsid_degree = np.array([str(x).count('*') + 1 for x in np_genotype_rsid])\n np_selectedIdx = np.array([x == 1 for x in np_genotype_rsid_degree])\n np_genotype_degree1 = np_genotype[:, np_selectedIdx]\n np_genotype_degree1_rsid = np_genotype_rsid[np_selectedIdx]\n \n ### remove redundant polynomial features\n if np_genotype_degree1.shape[1] > 0:\n np_genotype_degree1, np_selectedIdx = np.unique(np_genotype_degree1, axis=1, return_index=True)\n np_genotype_degree1_rsid = np_genotype_degree1_rsid[np_selectedIdx]\n \n ### generate cross gene interations\n if np_genotype_degree1.shape[1] > 0:\n np_genotype_crossGene_rsid, np_genotype_crossGene = FeatureEncoderLasso(np_genotype_degree1_rsid, np_genotype_degree1, np_phenotype, 1)\n \n ### remove degree 1 feature from dataset\n np_selectedIdx = np.array([x != 1 for x in np_genotype_rsid_degree])\n np_genotype = np_genotype[:, np_selectedIdx]\n np_genotype_rsid = np_genotype_rsid[np_selectedIdx]\n \n ### concatenate cross gene interations\n if np_genotype_degree1.shape[1] > 0:\n np_genotype = np.concatenate((np_genotype, np_genotype_crossGene), axis=1)\n np_genotype_rsid = np.concatenate((np_genotype_rsid, np_genotype_crossGene_rsid))\n \n #-------------------------\n # select feature\n #-------------------------\n ### random lasso feature selection\n np_randWeight = np.array(RandomizedLassoRegression(np_genotype, np_phenotype[:, -1].astype(float)))\n np_selectedIdx = np.array([x >= 0.1 for x in np_randWeight])\n np_randWeight = np_randWeight[np_selectedIdx]\n np_genotype = np_genotype[:, np_selectedIdx]\n np_genotype_rsid = np_genotype_rsid[np_selectedIdx]\n if np_genotype_rsid.shape[0] == 0:\n print(\"step5: There is no variant past the random lasso feature selection.\")\n return 0.0, 0.0\n \n #-------------------------\n # build model\n #-------------------------\n float_AVG_S_P_test, np_weight = LassoRegressionCV(np_genotype, np_phenotype[:, -1].astype(float), int_kOfKFold, int_nJobs)\n float_AVG_S_P_train = LassoRegression(np_genotype, np_phenotype[:, -1].astype(float), int_nJobs)\n \n ### filter out zero-weight features\n np_selectedIdx = np.array([x != 0.0 for x in np_weight])\n np_weight = np_weight[np_selectedIdx]\n np_genotype = np_genotype[:, np_selectedIdx]\n np_genotype_rsid = np_genotype_rsid[np_selectedIdx]\n if np_genotype_rsid.shape[0] == 0:\n print(\"step5: There is no variant past the f regression feature selection.\")\n return 0.0, 0.0\n \n #-------------------------\n # analyze result\n #-------------------------\n ### calculate student t-test p-value\n np_fRegression = -np.log10(f_regression(np_genotype.astype(int), np_phenotype[:, -1].astype(float))[1])\n \n ### calculate genotype frequency\n np_genotypeFreq = np.sum(np_genotype, axis=0).astype(float) / np_genotype.shape[0]\n \n #-------------------------\n # output results\n #-------------------------\n ### output statistics of features\n with open(os.path.join(str_outputFilePath, \"Result.csv\"), \"w\") as file_outputFile:\n file_outputFile.writelines(\"rsid,weight,student-t-test_log_p-value,genotype_frequency,geneSymbol,singleGeneScore\" + \"\\n\")\n for idx_feature in range(0, np_genotype_rsid.shape[0]):\n ### if this feature is single gene epistasis\n if np_genotype_rsid[idx_feature,] in dict_geneMap.keys():\n str_thisOutput = str(np_genotype_rsid[idx_feature,]) + \",\" + str(np_weight[idx_feature,]) + \",\" + str(np_fRegression[idx_feature,]) + \",\" + str(np_genotypeFreq[idx_feature]) + \",\" + str(dict_geneMap[np_genotype_rsid[idx_feature,]]).split(\"@\")[0] + \",\" + str(dict_score[dict_geneMap[np_genotype_rsid[idx_feature,]]]) + \"\\n\"\n file_outputFile.writelines(str_thisOutput)\n ### else this feature is cross gene epistasis\n else:\n str_thisOutput = str(np_genotype_rsid[idx_feature,]) + \",\" + str(np_weight[idx_feature,]) + \",\" + str(np_fRegression[idx_feature,]) + \",\" + str(np_genotypeFreq[idx_feature]) + \",\" + str(dict_geneMap[np_genotype_rsid[idx_feature,].split(\"*\")[0]]).split(\"@\")[0] + \"*\" + str(dict_geneMap[np_genotype_rsid[idx_feature,].split(\"*\")[1]]).split(\"@\")[0] + \", \" + \"\\n\"\n file_outputFile.writelines(str_thisOutput)\n\n ### output feature\n with open(os.path.join(str_outputFilePath, \"Feature.csv\"), \"w\") as file_outputFile:\n file_outputFile.writelines(\",\".join(np_genotype_rsid) + \"\\n\")\n for idx_subject in range(0, np_genotype.shape[0]):\n file_outputFile.writelines(\",\".join(np_genotype[idx_subject, :].astype(str)) + \"\\n\")\n\n #-------------------------\n # dump persistent model\n #-------------------------\n RegressorModelPersistence(np_genotype, np_phenotype[:, -1].astype(int), str_outputFilePath, int_nJobs)\n\n print(\"step5: Detect cross gene epistasis. DONE! (Training score:\" + \"{0:.2f}\".format(float_AVG_S_P_train) + \"; \" + str(int_kOfKFold) + \"-fold Test Score:\" + \"{0:.2f}\".format(float_AVG_S_P_test) + \")\")\n \n return float_AVG_S_P_train, float_AVG_S_P_test", "def split_at_nodes(shp):\n nodes = find_nodes(shp)\n nodeIds = list(nodes)\n nodeIds.sort()\n nodeIds = dict([(node,i) for i,node in enumerate(nodeIds)])\n \n for road in shp:\n vrts = road.vertices\n midVrts = set(road.vertices[1:-1]) #we know end points are nodes\n midNodes = midVrts.intersection(nodes) # find any nodes in the middle of the feature.\n midIdx = [vrts.index(node) for node in midNodes] # Get their indices\n midIdx.sort()\n if midIdx:\n #print vrts\n starts = [0]+midIdx\n stops = [x+1 for x in midIdx]+[None]\n for start,stop in zip(starts,stops):\n feat = pysal.cg.Chain(vrts[start:stop])\n rec = (nodeIds[feat.vertices[0]],nodeIds[feat.vertices[-1]],False)\n yield feat,rec\n else:\n rec = (nodeIds[road.vertices[0]],nodeIds[road.vertices[-1]],False)\n yield road,rec", "def split(features, groundtruths, n_split):\n\n if n_split == 1:\n return features, groundtruths\n\n tags = list(set(groundtruths))\n new_index = {}\n for tag in tags:\n new_index[tag] = []\n for index, gt in enumerate(groundtruths):\n new_index[gt].append(index)\n new_feats = []\n new_gts = []\n for i in range(0, n_split):\n indexes = []\n for tag in tags:\n ref = len(new_index[tag])/n_split\n indexes.append(new_index[tag][ref*i:ref*(i+1)])\n \"\"\"\n ..todo:: manage multiple tags!\n \"\"\"\n indexes = indexes[0] + indexes[1]\n # print(features[:5])\n # print(len(indexes))\n # print(len(indexes[0]))\n # print(len(indexes[1]))\n # sys.exit()\n indexes.sort()\n new_gts.append([groundtruths[j] for j in indexes])\n new_feats.append([features[j] for j in indexes])\n return new_feats, new_gts", "def optimization_parameters():\n param_distributions = {\n \"n_estimators\": list(range(50, 300, 50)),\n \"max_features\": [\"auto\", \"log2\"],\n \"max_depth\": list(range(1, 21, 2)),\n \"min_samples_leaf\": list(range(4, 22, 2)),\n \"min_samples_split\": list(range(5, 30, 5)),\n \"criterion\": [\"gini\", \"entropy\"],\n }\n param_grid = {\n \"n_estimators\": list(range(50, 300, 50)),\n \"max_depth\": list(range(1, 21, 2)),\n \"min_samples_leaf\": list(range(4, 22, 2)),\n \"min_samples_split\": list(range(5, 30, 5)),\n \"criterion\": [\"gini\", \"entropy\"],\n }\n\n rfc = RandomForestClassifier()\n\n # 5 * 10 * 9 * 5 * 2 = 4500 iterations\n # will take a lot of time\n model = GridSearchCV(\n estimator=rfc,\n param_grid=param_grid,\n scoring=\"accuracy\",\n verbose=10,\n n_jobs=1,\n cv=5,\n )\n # initiates Randomized Search \n model = RandomizedSearchCV(\n estimator=rfc,\n param_distributions=param_distributions,\n n_iter=20,\n scoring='accuracy',\n verbose=10,\n n_jobs=1,\n cv=5,\n )\n \n # fit and predict the model\n model.fit(x_train, y_train)\n pred = model.predict(x_test)\n \n # define evaluation metric as accuracy score\n acc = accuracy_score(y_test, pred) * 100\n print(f\"RandomForestClassifier with GridSearchCV: {acc:0.2f}%\")\n print(\"Best parameters set:\")\n\n # extract best parameters \n best_parameters = model.best_estimator_.get_params()\n for param_name in sorted(param_grid.keys()):\n print(f\"\\t{param_name}: {best_parameters[param_name]}\")", "def choose_best_feature(data_set):\n feature_size = len(data_set[0]) - 1\n base_entropy = calc_entropy(data_set)\n best_info_gain = 0.0; best_feature = -1\n for i in xrange(feature_size):\n feat_list = [eg[i] for eg in data_set]\n unique_values = set(feat_list)\n new_entropy = 0.0\n for value in unique_values:\n sub_ds = splite_dataset(data_set, i, value)\n prob = len(sub_ds) / float(len(data_set))\n new_entropy += prob * calc_entropy(sub_ds)\n info_gain = base_entropy - new_entropy\n if info_gain > best_info_gain:\n best_info_gain = info_gain\n best_feature = i\n\n return best_feature", "def findfeatures(self):\n self.set_wdiff()\n\n #xp, wp=st.findfeatures(self.xarr, self.farr, self.slines, self.sfluxes,\n # self.ws, mdiff=self.mdiff, wdiff=self.wdiff, sigma=self.sigma, niter=self.niter, sections=3)\n xp,wp=st.crosslinematch(self.xarr, self.farr, self.slines, self.sfluxes,\n self.ws, mdiff=self.mdiff, wdiff=20, sigma=self.sigma, niter=self.niter)\n for x, w in zip(xp, wp):\n if w not in self.wp and w>-1: \n self.xp.append(x)\n self.wp.append(w)\n self.plotFeatures()\n self.redraw_canvas()", "def optimize(self, maxiter):\n for iteration in range(maxiter):\n self.sortParticles()\n self.phi = int(phiMin + iteration *((phiMax - phiMin) / float(maxiter)))\n self.cluster()\n #self.ConnectClusters()\n for i in range(self.n_particles):\n x = self.particles_pos[i]\n v = self.velocities[i]\n p_best = self.p_best[i]\n self.velocities[i] = self.update_velocity(x, v, p_best , self.g_best , self.getLbestOfCluster(self.getClusterOfParticle(i)) , i)\n self.particles_pos[i] = self.update_position(x, v)\n # Update the best position for particle i\n if self.func(self.particles_pos[i]) < self.func(p_best):\n self.p_best[i] = self.particles_pos[i]\n # Update the best position overall\n if self.func(self.particles_pos[i]) < self.func(self.g_best):\n \n self.g_best = self.particles_pos[i]\n return self.g_best, self.func(self.g_best)", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n # Initialize data structures\n parent_node = {}\n path_to_node = {}\n priority_queue = util.PriorityQueue()\n\n p_c = 0.5\n h_c = 1 - p_c\n\n # Get the start node\n start_node = problem.getStartState()\n parent_node[start_node] = None\n path_to_node[start_node] = []\n priority_queue.update(start_node, 0)\n\n #goal_found = False\n\n while not priority_queue.isEmpty():\n # Get the next node\n node_to_expand = priority_queue.pop()\n # Check if goal state is reached\n if problem.isGoalState(node_to_expand):\n break\n next_nodes = problem.getSuccessors(node_to_expand)\n path_to_parent = path_to_node[node_to_expand]\n\n for one_node in next_nodes:\n point, move, cost = one_node\n curr_path = path_to_node[node_to_expand] + [move]\n curr_cost = problem.getCostOfActions(curr_path)\n heuristic_cost = heuristic(point, problem)\n # Check if current node already exists in the previously visited nodes\n if point in path_to_node:\n prev_cost = problem.getCostOfActions(path_to_node[point])\n if prev_cost > curr_cost:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n else:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n # current_cost = problem.getCostOfActions(point) * p_c + heuristic(point, problem) * h_c\n\n print(node_to_expand) \n return path_to_node[node_to_expand]\n \n# nodes_to_expand = set()\n# # get max value node in the fringe node\n# min_val = float(\"inf\")\n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost < min_val:\n# min_val = total_cost\n# \n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost == min_val:\n# nodes_to_expand.add(one_node)\n# fringe_node.remove(one_node)\n#\n# # Expand the fringe node \n# for one_node in nodes_to_expand:\n# path_to_parent = path_to_point[one_node]\n# for nxt_node in problem.getSuccessors(one_node):\n# pos = nxt_node[0]\n# mv = nxt_node[1]\n# # check if point already present in path to point\n# prev_cost = float(\"inf\")\n# if pos in cost_to_point:\n# prev_cost = cost_to_point[pos]\n# new_path = path_to_parent + [mv]\n# if prev_cost > problem.getCostOfActions(new_path):\n# path_to_point[pos] = new_path\n# cost_to_point[pos] = problem.getCostOfActions(new_path)\n# fringe_node.append(pos)\n#\n# # Check if destination is reached in the fringe node\n# for one_node in fringe_node:\n# if problem.isGoalState(one_node):\n# final_node = one_node\n# goal_found = True\n# break\n# \n# #print(len(fringe_node))\n# print(final_node)\n# print(path_to_point[final_node])\n# return path_to_point[final_node] \n\n util.raiseNotDefined()", "def optimal_perturbed_splitting(self,acc=1.e-12,rmax=50.01,tol=1.e-13,algorithm='split'):\n from nodepy.utils import bisect\n try:\n import cvxpy as cvx\n except:\n algorithm = 'split'\n\n if algorithm == 'LP':\n r=bisect(0,rmax,acc,tol,self.lp_perturb)\n elif algorithm == 'split':\n r=bisect(0,rmax,acc,tol,self.is_splittable)\n\n d,alpha,alphatilde=self.resplit(r,tol=tol)\n return r,d,alpha,alphatilde", "def mts_ls1v1(current_x, current_fitness, best_x, best_fitness, improve, search_range, task, rng, bonus1=10, bonus2=1,\n sr_fix=0.4, **_kwargs):\n if not improve:\n search_range /= 2\n i_fix = np.argwhere(search_range < 1e-15)\n search_range[i_fix] = task.range[i_fix] * sr_fix\n improve, d, grade = False, rng.uniform(-1, 1, task.dimension), 0.0\n for i in range(len(current_x)):\n x_old = current_x[i]\n current_x[i] = x_old - search_range[i] * d[i]\n current_x = task.repair(current_x, rng)\n new_fitness = task.eval(current_x)\n if new_fitness < best_fitness:\n grade, best_x, best_fitness = grade + bonus1, current_x.copy(), new_fitness\n elif new_fitness == current_fitness:\n current_x[i] = x_old\n elif new_fitness > current_fitness:\n current_x[i] = x_old + 0.5 * search_range[i]\n current_x = task.repair(current_x, rng)\n new_fitness = task.eval(current_x)\n if new_fitness < best_fitness:\n grade, best_x, best_fitness = grade + bonus1, current_x.copy(), new_fitness\n elif new_fitness >= current_fitness:\n current_x[i] = x_old\n else:\n grade, improve, current_fitness = grade + bonus2, True, new_fitness\n else:\n grade, improve, current_fitness = grade + bonus2, True, new_fitness\n return current_x, current_fitness, best_x, best_fitness, improve, grade, search_range", "def test_max_features_gridsearch(self):\n X,Y,Z = self.create_bin_data()\n t = self.check_task('RFC nt=1;e=1;c=gini;mf=[0.0001, 0.1, 0.3, 0.8]', X, Y, Z)\n self.assertEquals(t.parameters['max_features'], [1, 0.1, 0.3, 0.8])", "def run(self):\n print(' strategies...')\n matrix_file = ''\n matrix_s, matrix_c = None, None\n # run for all but the optimal version\n item2matrix = os.path.join(self.data_set.base_folder, 'item2matrix.txt')\n for rec_type in self.data_set.graphs:\n for graph in self.data_set.graphs[rec_type]:\n print(' ', graph)\n gt_graph = load_graph(graph)\n for strategy in Strategy.strategies:\n if strategy == 'optimal':\n continue\n print(' ', strategy)\n m_new = self.data_set.matrices[rec_type][graph][strategy][0]\n m_newc = self.data_set.matrices[rec_type][graph][strategy][1]\n debug(' ----', m_new)\n debug(' ----', m_newc)\n if not m_new:\n debug(' ---- not m_new')\n matrix_s, matrix_c, matrix_file = None, None, None\n elif matrix_file != m_new:\n matrix_s = SimilarityMatrix(item2matrix, m_new)\n matrix_c = SimilarityMatrix(item2matrix, m_newc)\n matrix_file = m_new\n debug(' ---- matrix_file != m_new')\n # for miss in self.data_set.missions[rec_type][graph][strategy]:\n for miss in Mission.missions:\n print(' ', miss)\n if 'Information Foraging' in miss or 'Berrypicking' in miss:\n matrix = matrix_c\n else:\n matrix = matrix_s\n for m in self.data_set.missions[rec_type][graph][strategy][miss]:\n for ti in xrange(len(m.targets_original)):\n start = m.path[-2] if m.path else m.start\n debug('++++' * 16, 'mission', ti, '/',\n len(m.targets_original))\n debug(m.targets_original[ti])\n self.navigate(gt_graph, strategy, m, start,\n None, matrix)\n if ti > 0 and len(m.targets_original[ti]) == len(m.targets[0]):\n # print('breaking...')\n m.reset()\n break\n if not (ti + 1) == len(m.targets_original):\n m.path.append(u'*')\n m.reset()\n\n # run the simulations for the optimal solution\n print(' optimal...')\n for rec_type in self.data_set.graphs:\n for graph in self.data_set.graphs[rec_type]:\n print(' ', graph)\n sp_file = graph.rsplit('.', 1)[0] + '.npy'\n with open(sp_file, 'rb') as infile:\n sp = pickle.load(infile)\n for miss in self.data_set.missions[rec_type][graph]['optimal']:\n for m in self.data_set.missions[rec_type][graph]['optimal'][miss]:\n for ti in xrange(len(m.targets_original)):\n start = m.path[-2] if m.path else m.start\n debug('++++' * 16, 'mission', ti, '/', len(m.targets_original))\n debug(m.targets_original[ti])\n self.optimal_path(m, start, sp)\n if not (ti + 1) == len(m.targets_original):\n m.path.append(u'*')\n m.reset()\n\n # # DEBUG\n # item2matrix = os.path.join(self.data_set.base_folder, 'item2matrix.txt')\n # for rec_type in ['rbar']:\n # for graph in self.data_set.graphs[rec_type]:\n # print(' ', graph)\n # gt_graph = load_graph(graph)\n # sp_file = graph.rsplit('.', 1)[0] + '.npy'\n # with open(sp_file, 'rb') as infile:\n # sp = pickle.load(infile)\n # m_newc = self.data_set.matrices[rec_type][graph]['title'][1]\n # matrix = SimilarityMatrix(item2matrix, m_newc)\n # sc = 'Berrypicking'\n # mc1 = self.data_set.missions[rec_type][graph]['title'][sc]\n # mc2 = self.data_set.missions[rec_type][graph]['optimal'][sc]\n # mc3 = self.data_set.missions[rec_type][graph]['random'][sc]\n # for m1, m2, m3 in zip(\n # mc1,\n # mc2,\n # mc3\n # ):\n # # evalute with title strategy\n # for ti in xrange(len(m1.targets_original)):\n # start = m1.path[-2] if m1.path else m1.start\n # debug('++++' * 16, 'mission', ti, '/', len(m1.targets_original))\n # # debug(m1.targets_original[ti])\n # self.navigate(gt_graph, 'title', m1, start, None, matrix)\n # # print(m1.path, ti, len(m1.targets_original[ti]), len(m1.targets[0]))\n # if ti > 0 and len(m1.targets_original[ti]) == len(m1.targets[0]):\n # # print('breaking...')\n # m1.reset()\n # break\n # if not (ti + 1) == len(m1.targets_original):\n # m1.path.append(u'*')\n # m1.reset()\n #\n # # evaluate with optimal strategy\n # for ti in xrange(len(m2.targets_original)):\n # start = m2.path[-2] if m2.path else m2.start\n # # debug('++++' * 16, 'mission', ti, '/', len(m2.targets_original))\n # # debug(m2.targets_original[ti])\n # self.optimal_path(m2, start, sp)\n # if not (ti + 1) == len(m2.targets_original):\n # m2.path.append(u'*')\n # m2.reset()\n # # pdb.set_trace()\n #\n # # if len(m1.path) < len(m2.path):\n # # print(len(m1.path), len(m2.path))\n # # pdb.set_trace()\n # # m1.compute_stats()\n # # m2.compute_stats()\n # # if m1.stats[-1] > m2.stats[-1]:\n # # print(m1.stats)\n # # print(m2.stats)\n # # pdb.set_trace()\n #\n # print('MISSION COLLECTION DONE')\n # mc1.compute_stats()\n # mc2.compute_stats()\n # print(mc1.stats[-1], mc2.stats[-1])\n # pdb.set_trace()\n\n # fname_5 = u'../data/bookcrossing/graphs/rbar_5.gt'\n # fname_20 = u'../data/bookcrossing/graphs/rbar_20.gt'\n # sp_file_5 = fname_5.rsplit('.', 1)[0] + '.npy'\n # sp_file_20 = fname_20.rsplit('.', 1)[0] + '.npy'\n # with open(sp_file_5, 'rb') as infile:\n # sp_5 = pickle.load(infile)\n # with open(sp_file_20, 'rb') as infile:\n # sp_20 = pickle.load(infile)\n # sc = 'Berrypicking'\n # mc_5 = self.data_set.missions['rbar'][fname_5]['optimal'][sc]\n # mc_52 = self.data_set.missions['rbar'][fname_5]['title'][sc]\n # mc_20 = self.data_set.missions['rbar'][fname_20]['optimal'][sc]\n # mc_202 = self.data_set.missions['rbar'][fname_20]['title'][sc]\n # for m5, m20, m52, m202 in zip(\n # mc_5,\n # mc_20,\n # mc_52,\n # mc_202\n # ):\n # # evaluate 5 with optimal strategy\n # for ti in xrange(len(m5.targets_original)):\n # start = m5.path[-2] if m5.path else m5.start\n # self.optimal_path(m5, start, sp_5)\n # if not (ti + 1) == len(m5.targets_original):\n # m5.path.append(u'*')\n # m5.reset()\n #\n # # evaluate 20 with optimal strategy\n # for ti in xrange(len(m20.targets_original)):\n # start = m20.path[-2] if m20.path else m20.start\n # self.optimal_path(m20, start, sp_20)\n # if not (ti + 1) == len(m20.targets_original):\n # m20.path.append(u'*')\n # m20.reset()\n #\n # # if len(m5.path) < len(m20.path) or \\\n # if m5.path.count('*') > m20.path.count('*'):\n # print(len(m5.path))\n # for part in ' '.join(m5.path[2:]).split('*'):\n # print(' ', part)\n # print(len(m20.path))\n # for part in ' '.join(m20.path[2:]).split('*'):\n # print(' ', part)\n # pdb.set_trace()\n #\n # print('MISSION COLLECTION DONE')\n # mc_5.compute_stats()\n # mc_20.compute_stats()\n # print(mc_5.stats[-1], mc_20.stats[-1])\n #\n # for m5, m20 in zip(mc_5.missions, mc_20.missions):\n # if m5.stats[-1] > m20.stats[-1]:\n # print(m5.stats)\n # print(m20.stats)\n # pdb.set_trace()\n # pdb.set_trace()\n\n # write the results to a file\n # self.write_paths()\n self.save()", "def brute_tree(XTRAIN,istopTRAIN,XTEST,istopTEST):\n \n ntrain=XTRAIN.shape[0]\n ntest=XTEST.shape[0]\n \n if np.sum(istopTRAIN)==0:\n return 0,[]\n\n cost0=np.zeros(Ngammas*Nreps)\n cost1=np.zeros(Ngammas*Nreps)\n cost0test=np.zeros(Ngammas*Nreps)\n cost1test=np.zeros(Ngammas*Nreps)\n \n precisionTRAIN=np.zeros(Ngammas*Nreps)\n precisionTEST=np.zeros(Ngammas*Nreps)\n recallTEST=np.zeros(Ngammas*Nreps)\n rate=np.zeros(Ngammas*Nreps)\n \n for iii in range(Ngammas):\n \n gamma=GAMMA[iii]\n \n for jjj in range(Nreps):\n \n \"\"\" train a tree using training data with random splitting \"\"\"\n \n tree_hyperparameters['class_weight']={0:1,1:gamma}\n clf=tree.DecisionTreeClassifier(**tree_hyperparameters)\n clf.fit(XTRAIN,istopTRAIN)\n \n \"\"\"\" record costs and precision on validation data \"\"\"\n \n pTRAIN=clf.predict(XTRAIN)\n precisionTRAIN[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==1),sum(pTRAIN))\n cost0[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==0)\n cost1[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 0 and istopTRAIN[i]==1)\n \n \"\"\" record precision on test data \"\"\"\n \n pTEST=clf.predict(XTEST)\n precisionTEST[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1),sum(pTEST))\n recallTEST[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1)/sum(istopTEST)\n cost0test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==0)\n cost1test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 0 and istopTEST[i]==1)\n \n \"\"\" record positive rate on full data \"\"\"\n \n rate[iii*Nreps+jjj]=(sum(pTRAIN)+sum(pTEST))/(ntrain+ntest)\n \n \"\"\" Compute Pareto front for validation data \"\"\"\n \n Pareto = Lower_Convex_Hull(np.concatenate((cost0.reshape(-1,1),cost1.reshape(-1,1)),1))\n \n \"\"\" make some nice plots for whoever is watching \"\"\"\n \n plt.figure(figsize=(10,5))\n plt.subplot(121)\n plt.plot(cost0,cost1,'.')\n plt.plot(cost0[Pareto],cost1[Pareto],'d')\n plt.xlabel('errors on class zero training data')\n plt.ylabel('errors on class one training data')\n\n plt.subplot(122)\n plt.plot(cost0test,cost1test,'.')\n plt.plot(cost0test[Pareto],cost1test[Pareto],'d')\n plt.xlabel('errors on class zero test data')\n plt.ylabel('errors on class one test data')\n plt.show()\n \n plt.figure(figsize=(15,5))\n plt.subplot(131)\n plt.semilogy(precisionTRAIN,rate,'.')\n plt.semilogy(precisionTRAIN[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on training data')\n plt.ylabel('positive rate')\n\n plt.subplot(132) \n plt.semilogy(precisionTEST,rate,'.')\n plt.semilogy(precisionTEST[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('positive rate')\n\n plt.subplot(133) \n plt.plot(precisionTEST,recallTEST,'.')\n plt.plot(precisionTEST[Pareto],recallTEST[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('recall on test data')\n plt.show() \n \n return {'cost0':cost0,'cost1':cost1,'cost0test':cost0test,'cost1test':cost1test,'precisionTRAIN':precisionTRAIN,'precisionTEST':precisionTEST,'recallTEST':recallTEST,'rate':rate,'Pareto':Pareto}", "def data_mining_features(index,input_string_x1,input_string_x2,vocab_word2index,word_vec_fasttext_dict,word_vec_word2vec_dict,tfidf_dict,n_gram=8):\r\n input_string_x1=input_string_x1.decode(\"utf-8\")\r\n input_string_x2 = input_string_x2.decode(\"utf-8\")\r\n #1. get blue score vector\r\n feature_list=[]\r\n #get blue score with n-gram\r\n for i in range(n_gram):\r\n x1_list=split_string_as_list_by_ngram(input_string_x1,i+1)\r\n x2_list = split_string_as_list_by_ngram(input_string_x2, i + 1)\r\n blue_score_i_1 = compute_blue_ngram(x1_list,x2_list)\r\n blue_score_i_2 = compute_blue_ngram(x2_list,x1_list)\r\n feature_list.append(blue_score_i_1)\r\n feature_list.append(blue_score_i_2)\r\n\r\n #2. get length of questions, difference of length\r\n length1=float(len(input_string_x1))\r\n length2=float(len(input_string_x2))\r\n length_diff=(float(abs(length1-length2)))/((length1+length2)/2.0)\r\n feature_list.append(length_diff)\r\n\r\n #3. how many words are same, how many words are unique\r\n sentence_diff_overlap_features_list=get_sentence_diff_overlap_pert(index,input_string_x1,input_string_x2)\r\n feature_list.extend(sentence_diff_overlap_features_list)\r\n\r\n #4. question 1,2 start with how/why/when\r\n #how_why_feature_list=get_special_start_token(input_string_x1,input_string_x2,special_start_token)\r\n #print(\"how_why_feature_list:\",how_why_feature_list)\r\n #feature_list.extend(how_why_feature_list)\r\n\r\n #5.edit distance\r\n edit_distance=float(edit(input_string_x1, input_string_x2))/30.0\r\n feature_list.append(edit_distance)\r\n\r\n #6.cos distance from sentence embedding\r\n x1_list=token_string_as_list(input_string_x1, tokenize_style='word')\r\n x2_list = token_string_as_list(input_string_x2, tokenize_style='word')\r\n distance_list_fasttext = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict)\r\n distance_list_word2vec = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_word2vec_dict, tfidf_dict)\r\n #distance_list2 = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict,tfidf_flag=False)\r\n #sentence_diffence=np.abs(np.subtract(sentence_vec_1,sentence_vec_2))\r\n #sentence_multiply=np.multiply(sentence_vec_1,sentence_vec_2)\r\n feature_list.extend(distance_list_fasttext)\r\n feature_list.extend(distance_list_word2vec)\r\n #feature_list.extend(list(sentence_diffence))\r\n #feature_list.extend(list(sentence_multiply))\r\n return feature_list", "def search(self):\n self.iteration = 0\n while self.iteration < self.maxIterations:\n self.GAStep()\n self.iteration += 1\n\n print (\"Total iterations: \",self.iteration)\n print (\"Best Solution: \", self.best.getFitness())" ]
[ "0.7151503", "0.64216876", "0.6352301", "0.6302367", "0.62781364", "0.62125915", "0.6181519", "0.6135771", "0.60826075", "0.6048427", "0.5933168", "0.590119", "0.5873095", "0.58700764", "0.5845192", "0.583579", "0.5818668", "0.5818668", "0.58096075", "0.5801238", "0.5736901", "0.57084674", "0.5700281", "0.5676648", "0.56521", "0.56396794", "0.5639582", "0.5626537", "0.5621652", "0.56026095", "0.5540939", "0.55228674", "0.5519763", "0.551568", "0.55078506", "0.5497097", "0.5489115", "0.5488764", "0.54850537", "0.5474473", "0.5469085", "0.54606944", "0.54544157", "0.5450705", "0.5432448", "0.5415102", "0.54141027", "0.5401419", "0.5375595", "0.5364444", "0.5344203", "0.5336465", "0.5332241", "0.53286403", "0.5325271", "0.5324051", "0.53239816", "0.5310048", "0.5296425", "0.5286626", "0.52812815", "0.5280985", "0.52734077", "0.52500683", "0.5247478", "0.5241667", "0.5240806", "0.5239428", "0.52391136", "0.52362835", "0.52340996", "0.5232408", "0.52298987", "0.5224952", "0.5223049", "0.52214825", "0.5221186", "0.5215743", "0.5213064", "0.5203289", "0.5202083", "0.5198661", "0.51976544", "0.51971596", "0.51942897", "0.5186403", "0.51807153", "0.5175395", "0.51705986", "0.5163833", "0.5155486", "0.5150557", "0.51498526", "0.5147555", "0.5140864", "0.5137862", "0.513431", "0.51311266", "0.5131076", "0.51267403" ]
0.66302925
1
Predict class for a single sample.
def _predict(self, inputs): node = self.tree_ while node.left: if inputs[node.feature_index] < node.split: node = node.left else: node = node.right return node.predicted_class
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, sample, **kwargs):\r\n return self.model.predict(sample, **kwargs)", "def predict_class(self, feature):\n return self._clf.predict(feature)", "def predict_class(self, feature):\n return self._clf.predict(feature)", "def predict(self, sample, **kwargs):\n return self.model.predict(sample, **kwargs)", "def predict(self, x, **kwargs):\n kwargs = self.filter_sk_params(Sequential.predict_classes, kwargs)\n classes = self.model.predict_classes(x, **kwargs)\n return self.classes_[classes]", "def predict(self, samples):\n predictions = np.zeros(len(samples), int)\n class_predictions = np.zeros(self.class_count)\n \n for i in range(len(samples)): # Loop over each sample\n for j in range(self.class_count): # Loop over each class\n class_predictions[j] = self.p_ys[j] # Get p(y) for class j\n class_predictions[j] *= np.dot(samples[i,:-1], self.p_xi_given_ys[j]) \\\n + np.dot( np.ones((np.shape(samples[i,:-1]))) - samples[i,:-1], np.ones((np.shape(self.p_xi_given_ys[j]))) - self.p_xi_given_ys[j])\n \"\"\"\n np.dot(samples[i,:-1], self.p_xi_given_ys[j])\n for k in range(self.feature_count): # Loop over each feature \n # Multiply p(y) by p(xi|y) \n if(samples[i][k] == 1):\n class_predictions[j] *= self.p_xi_given_ys[j][k] \n else:\n class_predictions[j] *= 1 - self.p_xi_given_ys[j][k]\n \"\"\"\n \n predictions[i] = np.argmax(class_predictions) # Prediction is class with highest probability.\n \n return predictions", "def predict(self, samples):\n predictions = np.zeros(len(samples), int)\n class_predictions = np.zeros(self.class_count)\n \n for i in range(len(samples)): # Loop over each sample\n for j in range(self.class_count): # Loop over each class\n class_predictions[j] = self.p_ys[j] # Get p(y) for class j \n \n # Multiply p(y) by p(xi|y) \n class_predictions[j] += np.dot(samples[i], self.p_xi_given_ys[j])\n \n predictions[i] = np.argmax(class_predictions) # Prediction is class with highest probability.\n \n return predictions", "def predict(self, X):\n\n\t\tn_samples = X.shape[0]\n\t\tpredicted = np.zeros(n_samples)\n\n\t\tfor i in xrange(n_samples):\n\t\t\tpredicted[i] = self.classify_example(X[i])\n\n\t\treturn predicted", "def predict(self):\n raise NotImplementedError", "def predict_class(self, X_new):\n result = [self.predict_class_single(x) for x in X_new]\n return result", "def predict(self, X):\n\t\tR = self.predict_soft(X)\t\t\t\t\t\t\t\t\t\t\t# compute soft output values\n\t\tY = R.argmax(1)\t\t\t\t\t\t\t\t\t\t\t\t\t\t# get index of maximum response\n\t\treturn self.classes[Y]\t\t\t\t\t\t\t\t\t\t\t\t# convert to saved class values", "def predict(self, X: np.ndarray):\n return np.apply_along_axis(self.estimate_class, 1, X)", "def fit_predict(self):\n self.classifier = self.model\n self.classifier.fit(self.X_sample, self.y_sample)\n self.y_pred = self.classifier.predict(self.X_test)", "def predict(self, X):\n\n # Get a matrix with the probabilities of a sample belonging to each class.\n probs = self.predict_proba(X)\n\n # Get the predicted classes by choosing the class which has biggest probability.\n y_ = np.argmax(probs, axis=1)\n\n # Get the original class ints before one hot encoding\n y = self.oneHot_.retransform(y_)\n\n return y", "def predict(self, x):\n # *** START CODE HERE ***\n return self.clf.predict_classes(x.reshape(x.shape[0], 28, 28, 1))\n # *** END CODE HERE ***", "def predict(self, review):\n raise NotImplementedError", "def _predict(self, classify: np.array, n_preds=1):\r\n tmp = classify.argsort()[:, :n_preds] # Return the index of the best label classification\r\n preds = copy(tmp) # allow to copy tmp\r\n for index, target in enumerate(self.targets):\r\n preds = np.where(tmp == index, target, preds) # Return the target label corresponding to the index\r\n self.preds = preds", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(cls, input):\n clf = cls.get_model()\n return clf.predict(input)", "def predict(cls, input):\n clf = cls.get_model()\n print('input=')\n print(input)\n return clf.predict(input)", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_classifier.predict(data)", "def model_predict(img_path):\n img = open_image(img_path)\n pred_class, pred_idx, outputs = learn.predict(img)\n print(pred_class)\n return pred_class", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict(self, X):", "def predict(self, X):", "def predict_class(self, inputs):\n if not self.trained:\n if self.verbose:\n print(\"KMeans Model Class - Predict Class Function: No trained model\")\n return -1\n\n\n return self.cluster_classes[self.model.predict(inputs)]", "def predict(self, samples): \n return self.random_forest.predict(samples)", "def predict(self, X):\n return self.classifier.predict(X)", "def predict(self, X):\n raise NotImplementedError", "def predict(self, testData=[]):\n result = []\n for classValue in self._classAttrs:\n #print(f'Computing Label: {classValue}, {self._classLabelMap[classValue]}')\n result.append(self._computeCondProb(testData, classValue))\n return self._classLabelMap[result.index(max(result))]", "def predict(self):\n probabilities = self.probability_array()\n # THIS ASSUMES the classifiers are in order: 0th column of the\n # probabilities corresponds to label = 0, ..., 9th col is for 9.\n classes = np.argmax(probabilities, axis=1)\n return classes", "def predict(self, sample):\r\n # sample dimension controlled in _feedwardSignal\r\n return self._feedward_signal(sample)", "def predict(self, X):\n prob = self.predict_proba(X)\n if self.rule == 'fda':\n prob_1 = prob[:, :self.n_class_]\n prob_2 = prob[:, self.n_class_:]\n return np.vstack((self.labels_[prob_1.argmax(1)], self.labels_[prob_2.argmax(1)]))\n else:\n return self.labels_[prob.argmax(1)]", "def clf1_predict(self):\n self._pred_clf_1 = self._clf1.predict(self._vectorized_input)[0]", "def _predict(self, X):\n raise NotImplementedError", "def _predict_one_class(self, X: np.ndarray, class_idx: int):\n return (\n np.array(\n [\n self.fitted_distributions[col_idx][class_idx].pdf(\n X[:, col_idx]\n ) # get PDF if Gaussian\n if self.column_distribution_map[col_idx] == \"gaussian\"\n else self.fitted_distributions[col_idx][class_idx].p[\n X[:, col_idx].astype(\"int\") # get p if multinomial\n ]\n for col_idx in range(X.shape[1]) # For each column in X\n ]\n ).prod(axis=0)\n * self.prior.p[class_idx]\n )", "def predict(data, samples, classifier='SVM',\r\n classification='combined', selectFeatures=('CUK', 10)):\r\n if (classification == \"trained\"):\r\n classifyTrained = True\r\n classifySurface = False\r\n elif (classification == 'surface'):\r\n classifyTrained = False\r\n classifySurface = True\r\n else:\r\n classifyTrained = True\r\n classifySurface = True\r\n if (classifier == \"SVM\"):\r\n clf = cl.classifyDataSVM(data, classifyTrained,\r\n classifySurface, selectFeatures,scaling=False)\r\n elif (classifier == \"DT\"):\r\n clf = cl.classifyDataDT(data, classifyTrained,\r\n classifySurface, selectFeatures,scaling=False)\r\n elif (classifier == \"KNN\"):\r\n clf = cl.classifyDataKNN(data, classifyTrained,\r\n classifySurface, selectFeatures,scaling=False)\r\n elif (classifier == \"LogReg\"):\r\n clf = cl.classifyDataLR(data, classifyTrained,\r\n classifySurface, selectFeatures,scaling=False)\r\n else:\r\n print (str(classifier) + \" is not a valid option\")\r\n \r\n [samples, _,_,_] = clf.extractData(samples,scaling=False)\r\n \r\n predictions = [clf.predict(s) for s in samples]\r\n return predictions", "def predict_only(self):", "def predict(self, data):\n\t\traise NotImplementedError", "def predict(self, datum):\r\n probs = {}\r\n for class_ in set(self.train_classes):\r\n probs[class_] = self.distribution.class_prob[class_] * reduce(lambda x,y:x*y, [self.distribution.prob(feat_ind_feat[0],feat_ind_feat[1],class_) for feat_ind_feat in enumerate(datum)])\r\n return max(probs, key=lambda x:probs[x])", "def predict_one(tree, sample):\n if tree['leaf']:\n return tree['class']\n\n else:\n if sample[tree['feature']] <= tree['split']:\n return predict_one(tree['left'], sample)\n else:\n return predict_one(tree['right'], sample)", "def class_predict(trained_model, X_test, y_test, image_name):\n if MODEL == 1:\n return class_predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 3:\n return class_predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 2:\n return class_predict_2(trained_model, X_test, y_test)\n else:\n # For models 4, 5 and 6\n return class_predict_3(trained_model, X_test, y_test, image_name)", "def predict(self, X):\n if isinstance(self.model, ClassifierMixin):\n scores = self._decision_function(X)\n if len(scores.shape) == 1:\n indices = (scores > 0).astype(np.int)\n else:\n indices = scores.argmax(axis=1)\n return self.classes_[indices]\n else:\n return self._decision_function(X)", "def predict(self, X_pred):\n \n with tf.Session() as sess:\n self.saver.restore(sess, self.log_dir + '/model')\n\n y_pred = sess.run(self.output_class, feed_dict={self.X_tf: X_pred, self.keep_prob: 1.0})\n return y_pred", "def predict(self, X):\n score = self.decision_function(X)\n decisions = self.loss_._score_to_decision(score)\n return self.classes_.take(decisions, axis=0)", "def predict(self, X):\n scores = self.decision_function(X)\n if self.classes.shape[0] == 2:\n indices = np.array(scores > 0, dtype=np.int)\n else:\n indices = scores.argmax(axis=1)\n return self.classes[np.ravel(indices)]", "def predict(self, X):\n res = self.predict_proba(X)\n positive_mask = res >= 0.5\n negative_mask = res < 0.5\n res[positive_mask] = self.POSITIVE_CLASS\n res[negative_mask] = self.NEGATIVE_CLASS\n return res", "def predict(self, X):\r\n \r\n # To speed up, we apply the scoring function to all the instances\r\n # at the same time.\r\n scores = X.dot(self.w)\r\n \r\n # Create the output array.\r\n # At the positions where the score is positive, this will contain\r\n # self.positive class, otherwise self.negative_class.\r\n out = numpy.select([scores>=0.0, scores<0.0], [self.positive_class, \r\n self.negative_class])\r\n return out", "def predict(self, predPoints=None):", "def predict(self, X):\n\t\tproba = numpy.array(self.predict_proba(X))\n\t\treturn self.classes_.take(numpy.argmax(proba, axis=0))", "def predict(self, **kwargs):\n raise NotImplementedError", "def predict_class(self, original_image_numpy: np.ndarray) -> None:\n from app.dl_model.image import ClassifierInput\n # scale up coordinates\n self.scale_up_coordinates()\n x1, y1, x2, y2 = [int(coord) for coord in self.scale_coordinates.round()]\n # crop original numpy image\n numpy_image = original_image_numpy[y1:y2, x1:x2, :].copy()\n # create classifier input object\n classifier_input = ClassifierInput(numpy_image, new_shape=(224, 224))\n # classify input\n prediction = classifier_input.predict_class()\n # set attributes\n self.class_name = prediction.class_name # update class_name\n self.conf = prediction.conf # update probability\n self.product_id = prediction.product_id # set product external id\n self.detection_index = prediction.detection_index # set detection index\n self.top_k_names = prediction.top_k_names # set top k names list\n self.top_k_indices = prediction.top_k_indices # set top k detection index\n self.top_k_confidences = prediction.top_k_confidences # set top k confidieces values\n self.top_k_product_ids = prediction.top_k_product_ids # set top k product external ids", "def _predict(self, testX):\n pass", "def predict_single(self,example):\n example = np.array(example)\n weights = np.square(self.X-example)\n weights = np.sum(weights,axis=1)\n weights = -0.5*weights/np.square(self.k)\n weights = np.exp(weights)\n model = LinearRegression()\n model.fit(X=self.X,y=self.y,sample_weight=weights)\n return model.predict(np.reshape(example,[1,-1]))[0]", "def predict(x):\n\n scores = np.zeros(shape=(len(classes_def), len(x)))\n\n for idx, c in enumerate(classes_def):\n\n model_name = model_name_pre + c + model_name_post\n print('Loading model', model_name, 'and making predictions..')\n model = load_model(model_name)\n\n scores[idx] = model.predict(x).reshape(len(x))\n\n out = []\n\n for predictions in scores.T:\n # Majority vote\n max_idx_for_sample = 0\n max_prob_for_sample = 0\n for i, prediction in enumerate(predictions):\n if prediction > max_prob_for_sample:\n max_idx_for_sample = i\n\n out.append(classes_def[max_idx_for_sample])\n\n return out", "def predict(self,X): \n return self._predict(X)", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_svm.predict(data)", "def predict(self, instances):\r\n raise NotImplementedError", "def predict(self, Xtt):\n # predict outputs for test dataset\n self.logger.info(\n self.__name__ + ' predicts on {:d} samples.'.format(Xtt.shape[0]))\n pass", "def predict(self, x):\n raise NotImplementedError(\"Please Implement this method\")", "def predict(self, seq):\n raise Exception(\"You cannot predict with a base predictor.\")", "def predict(self, features):\n scores = self.predict_proba(features)\n return self.classes[np.argmax(scores)]", "def predict(self): \n return self.model.predict(self.test_x)", "def predict(self, X):\n (t0, t1, t2) = self.theta\n g = lambda x: t0 + t1 * x[0] + t2 * x[1]\n return np.array([\n self.classes[1] if g(x) > 0 else self.classes[0]\n for x in X\n ])", "def predict(self, X):\r\n num_test = X.shape[0]\r\n # lets make sure that the output type matches the input type\r\n Ypred = np.zeros(num_test, dtype = self.ytr.dtype)\r\n\r\n # loop over all test rows\r\n for i in range(num_test):\r\n print (\"Testing example \" + str(i))\r\n distances = np.sum(np.abs(self.Xtr - X[i,:]), axis = 1)\r\n # distances = self.chi2_distance(self.Xtr, X[i,:])\r\n min_index = np.argmin(distances) # get the index with smallest distance\r\n Ypred[i] = self.ytr[min_index] # predict the label of the nearest example\r\n print (\"Class Label: \" + str(Yte[i]) + \" \" + \"Predicted label: \" + str(Ypred[i]))\r\n return Ypred", "def predict(self, X):\n raise NotImplementedError('Abstract method \"predict\" must be '\n 'specialised!')", "def predict_class(clf, X_test, Y_test, labels=None, stats_fname=None):\n expected = Y_test\n if isinstance(clf, KerasModel):\n char_probs = clf.predict(X_test)\n predicted = np.argmax(char_probs, axis=1)\n\n if len(Y_test.shape) > 1:\n expected = np.argmax(Y_test, axis=1)\n else:\n predicted = clf.predict(X_test)\n\n conf_mat = metrics.confusion_matrix(\n expected, predicted, labels=range(len(labels))\n )\n\n stats = {\n 'Accuracy': metrics.accuracy_score(expected, predicted),\n 'F1': metrics.f1_score(expected, predicted, average='weighted'),\n 'Precision': metrics.precision_score(expected, predicted,\n average='weighted'),\n 'Recall': metrics.recall_score(expected, predicted,\n average='weighted')\n }\n print('Accuracy: %f' % stats['Accuracy'])\n print('F1: %f' % stats['F1'])\n print('percision: %f' % stats['Precision'])\n print('recall: %f' % stats['Recall'])\n\n save_conf_mat(conf_mat, stats, labels, stats_fname)\n\n return predicted", "def predict(self, X):\n\n # this will be an np.array of integers representing classes\n lp_prediction = self.classifier.predict(self.ensure_input_format(X))\n\n return self.inverse_transform(lp_prediction)", "def predict(self, X):\n # Check the fit method has been called\n utils.validation.check_is_fitted(self, 'classes_')\n\n # Check the input\n X = utils.check_array(X, **SKLEARN_INPUT_X_PARAMS)\n\n # Make a prediction for each observation\n y_pred = np.empty(shape=len(X))\n for i, (x, _) in enumerate(stream.iter_numpy(X)):\n y_pred[i] = self.instance_.predict_one(x)\n\n return y_pred", "def _predict(self,\n X=None):\n\n if X is None:\n ypred = self.clf_pipeline.predict(self.Xtrain)\n else:\n ypred = self.clf_pipeline.predict(X)\n\n return ypred", "def _predict(self, x):\n pass", "def predict(self, X):\n\n stuff = self._vectorizer.transform(X)\n result = self._classifier.predict(stuff)\n return result\n pass", "def predict(self, X):\n check_is_fitted(self, [\"posterior_matrix_\"])\n X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)\n return self.classes_[self.predict_proba(X).argmax(axis=1)]", "def predict(self, X, pred_batch_size=None):", "def predict(model, X):\n\tmodel.eval()\n\t# make the predictions\n\tscores = model.forward(X)\n\n\t# scores contains, for each example, two scores that can be interpreted as the\n\t# probability of each example belonging to each of the classes. To select the\n\t# final predicted label, we will select the class with higher probability.\n\tpredicted_labels = scores.argmax(dim=-1) # predicted_labels shape: (n_examples)\n\n\treturn predicted_labels", "def target_predict(self, inp):\n return self.target_model.predict(inp)", "def predict(self, src): # real signature unknown; restored from __doc__\n pass", "def predict(self, X, dropout = False):\n a1, z2, a2, z3, a3 = self.forward(X, self.w1, self.w2, do_dropout = False)\n #z3 is of dimension output units x num_samples. each row is an array representing the likelihood that the sample belongs to the class label given by the index...\n #ex: first row of z3 = [0.98, 0.78, 0.36]. This means our network has 3 output units = 3 class labels. And this instance most likely belongs to the class given by the label 0.\n y_pred = np.argmax(a3, axis = 0)\n return y_pred", "def predict(self,X):\n y_pred = np.random.choice(self.labels, size=(X.shape[0],), p=self.thresholds)\n return y_pred", "def make_predictions(self):\n if is_classification(self.model):\n if self.ct == None:\n prediction = self.model.predict(self.input_data.to_numpy())\n probabilities = self.model.predict_proba(self.input_data.to_numpy())\n return prediction, probabilities\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n probabilities = self.model.predict_proba(self.data_into_model())\n return prediction, probabilities\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.model))\n \n else:\n if self.ct == None:\n prediction = self.model.predict(self.input_data)\n return prediction\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n return prediction\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.self.model))", "def predict_single(self, line):\n # print(line)\n prob_list = {}\n for claz in self.class_list:\n prob_list[claz] = 1\n\n # for each cat column\n for col in self.cat_cols:\n val = line[col]\n for claz in self.class_list:\n prob_list[claz] *= self.prob_hub[col][claz][val]\n\n # for each num column\n for col in self.num_cols:\n val = line[col]\n # for each class\n for claz in self.class_list:\n mean, std = self.prob_hub[col][claz]\n prob_list[claz] *= calculate_prob(val, mean, std)\n\n return max(prob_list.items(), key=operator.itemgetter(1))[0]", "def predict(self, obs):\n pass", "def predict(self, X, **kwargs):\n return Learner.predict(self, X, **kwargs)", "def bl_predict(self, n_samples, data=None):\n\n if data is None:\n data = self.datas[self.train_idx]\n\n y_train = data.gen_labels()\n bl = DummyClassifier()\n bl.fit(np.random.rand(len(y_train), 1), y_train)\n\n return self._predict_proba(bl, np.random.rand(n_samples, 1))", "def predict(self, example):\n return self.decisionTree.traverse_tree(example)", "def predict(self,Xpred, nsamples=2000, tune=100, progress=True, points2=[]):\n if self.type_y=='affine':\n return self.predict_affine(Xpred, nsamples, tune, progress, points2)\n elif self.type_y=='regression':\n return self.predict_regression(Xpred, nsamples, tune, progress, points2)\n elif self.type_y=='mixed':\n return self.predict_mixed(Xpred, nsamples, tune, progress, points2)", "def predict_category(self):\n pass", "def predict(self, x):\n features = self._get_features(x)\n\n y_pred = self.classifier.predict(features)\n\n return y_pred", "def predict(self, dataset, output_type='class', batch_size=64):\n if not isinstance(dataset, (_tc.SFrame, _tc.SArray, _tc.Image)):\n raise TypeError('dataset must be either an SFrame, SArray or turicreate.Image')\n if(batch_size < 1):\n raise ValueError(\"'batch_size' must be greater than or equal to 1\")\n\n dataset, unpack = self._canonize_input(dataset)\n\n extracted_features = self._extract_features(dataset, batch_size=batch_size)\n return unpack(self.classifier.predict(extracted_features, output_type=output_type))", "def predict(self, data_in):\n pass", "def _get_prediction(self):\n raise NotImplementedError", "def classify(self,X):\n return int(self.classifier.predict(self.scaler.transform(X)))", "def classify(self,X):\n return int(self.classifier.predict(self.scaler.transform(X)))" ]
[ "0.7528597", "0.7466587", "0.7466587", "0.7458586", "0.7371801", "0.73080134", "0.7273918", "0.7189897", "0.70958567", "0.7088658", "0.702793", "0.70140815", "0.6998257", "0.6997045", "0.6972628", "0.69581044", "0.6956856", "0.6954656", "0.6954656", "0.6954656", "0.6941434", "0.69336116", "0.69226104", "0.6901456", "0.6900637", "0.6900637", "0.6900637", "0.6900637", "0.68999016", "0.68999016", "0.68999016", "0.6882813", "0.6882813", "0.6846207", "0.68461293", "0.68438315", "0.68415403", "0.6826762", "0.68264437", "0.68224746", "0.6819159", "0.681018", "0.6791704", "0.67860967", "0.67840177", "0.6783452", "0.6776943", "0.676988", "0.6765701", "0.6763719", "0.6762694", "0.67617774", "0.6750563", "0.67489487", "0.6737409", "0.67359006", "0.6733832", "0.67289215", "0.6720848", "0.67141086", "0.6707472", "0.6705866", "0.6705379", "0.6702672", "0.66992235", "0.6698638", "0.669861", "0.66894", "0.6688194", "0.6686609", "0.66841644", "0.66575533", "0.66443336", "0.6644008", "0.6641511", "0.663872", "0.66362035", "0.66295046", "0.66207725", "0.66149014", "0.6611964", "0.65974176", "0.6597161", "0.65824944", "0.65688986", "0.65633893", "0.655343", "0.6538996", "0.65385425", "0.65303934", "0.65298504", "0.65287113", "0.65231115", "0.65153724", "0.650584", "0.64997226", "0.6497995", "0.6493037", "0.64851665", "0.64824337", "0.64824337" ]
0.0
-1
A class without the key_fields annotation should raise a RuntimeError
def testNoKeyFields(): with pytest.raises(RuntimeError): class AnnotatedNode(Node): x: str y: int def __init__(self, x: str, y: int): self.x = x self.y = y @property def _display(self) -> str: return self.x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_key_no_data(self):\n key = Key({})\n\n assert key.warning is None\n assert key.in_car is None", "def _validate(self):\n fields, schema = self.__dict__, self._def.default\n extra_fields = fields.viewkeys() - schema.viewkeys()\n if len(extra_fields) > 0:\n raise AttributeError('Fields found that are not in the schema: %r' % (list(extra_fields)))\n for key in fields.iterkeys():\n if type(fields[key]) is not type(schema[key]):\n raise AttributeError('Invalid %s for field \"%s\", should be %s' %\n (type(fields[key]), key, type(schema[key])))", "def test_set_non_dictionary_based_field(self):\n self.assertRaises(TypeError, self._p.set_fields, '')", "def test_entities__Entity__getRawField__1(entity):\n with pytest.raises(KeyError):\n entity.getRawField('asdf')", "def test_throws_base_price_missing_key(self):\n with pytest.raises(marshmallow.ValidationError):\n BasePrice.Schema().loads(json.dumps(base_price_missing_key))", "def test_unknown_fields_are_not_allowed() -> None:\n with pytest.raises(pydantic.ValidationError):\n r4.Meta(unknown_field=True)", "def testBadKeys(self):\n # Ignore access to protected members\n # pylint: disable=W0212\n self.assertRaises(DOLAPI._DOLAPIError,\n self.badauth.table,\n self.dataset,\n self.table)", "def test_defining_a_primary_key_counter_column_fails(self):\r\n with self.assertRaises(TypeError):\r\n class model(Model):\r\n partition = columns.UUID(primary_key=True, default=uuid4)\r\n cluster = columns.Counter(primary_ley=True)\r\n counter = columns.Counter()\r\n\r\n # force it\r\n with self.assertRaises(ModelDefinitionException):\r\n class model(Model):\r\n partition = columns.UUID(primary_key=True, default=uuid4)\r\n cluster = columns.Counter()\r\n cluster.primary_key = True\r\n counter = columns.Counter()", "def test_keyerror(self):\n try:\n self.db['foo']\n except KeyError, e:\n assert \"no key 'foo' in database <SequenceFileDB\" in str(e), str(e)", "def test_defining_only_or_defer_on_nonexistant_fields_fails(self):", "def __missing__(self, key):\n return key", "def test_missing_required_field_raises_error():\n with pytest.raises(ValidationError):\n Entity()", "def test_missing_mandatory(self):\n try:\n CollectorUpdate()\n self.assertFalse(\"RuntimeError expected\")\n except RuntimeError as exception:\n assert_that(str(exception), equal_to(\"Missing keys: 'stage', 'status', 'timestamp'\"))", "def test_entities__Entity__getField__1(entity):\n with pytest.raises(KeyError):\n entity.getField('asdf')", "def _check_key(self, key):\n raise NotImplementedError", "def test_declare_error(self):\n\n with self.assertRaises(ModelDeclareError) as ctx:\n class Foo(Base):\n pass\n\n with self.assertRaises(ModelDeclareError) as ctx:\n class Foo(Base):\n _id = IDField()\n _id_2 = IDField()", "def test_raise_if_no_attr(self):\n self.assertRaises(AttributeError, self.Model.set_primary_key, 'asdf')", "def testMissingKeys(self):\n self.assertRaises(ValueError,\n self.unauth.table,\n self.dataset,\n self.table)", "def test_raise_on_missing_critical(self):\n name_for_field = 'absent_field'\n field_opts = {'names': (name_for_field, 'absent'), 'alt_field': '', 'computed': False}\n critical_fields = {'absent_field': field_opts}\n with self.assertRaises(ImproperlyConfigured):\n self.form.fields_for_critical(critical_fields)", "def check_keys(self):", "def test_keys_failure(self):\n storage = Storage()\n storage._keys_dict = {'1': 'one',\n 'abc': '1'}\n self.assertRaises(StoragePatternError, storage.keys, 'ab[cd')", "def test_fields(self):\n\n class Foo(Model):\n field1 = StringField()\n field2 = IntegralField()\n\n assert hasattr(Foo, \"_fields\")\n assert type(Foo._fields) is dict\n\n assert not hasattr(Foo, \"field1\")\n assert \"field1\" in Foo._fields\n assert type(Foo._fields[\"field1\"]) is StringField\n\n assert not hasattr(Foo, \"field2\")\n assert \"field2\" in Foo._fields\n assert type(Foo._fields[\"field2\"]) is IntegralField", "def test_normal_fields_can_be_defined_between_primary_keys(self):", "def test_attempting_to_save_abstract_model_fails(self):\r\n with self.assertRaises(CQLEngineException):\r\n AbstractModelWithFullCols.create(pkey=1, data=2)", "def __missing__(self, key):\n global MISSING\n MISSING = key # For debugging - save name of missing key\n return INVALID", "def test_primary_key(self):\r\n\r\n # This should just work.\r\n class AutoFieldKey(models.Model):\r\n key = models.AutoField(primary_key=True)\r\n AutoFieldKey.objects.create()\r\n\r\n # This one can be exactly represented.\r\n class CharKey(models.Model):\r\n id = models.CharField(primary_key=True, max_length=10)\r\n CharKey.objects.create(id='a')\r\n\r\n # Some rely on unstable assumptions or have other quirks and\r\n # should warn.\r\n\r\n# # TODO: Warning with a range limitation.\r\n# with self.assertRaises(Warning):\r\n#\r\n# class IntegerKey(models.Model):\r\n# id = models.IntegerField(primary_key=True)\r\n# IntegerKey.objects.create(id=1)\r\n\r\n# # TODO: date/times could be resonably encoded / decoded as\r\n# # strings (in a reversible manner) for key usage, but\r\n# # would need special handling and continue to raise an\r\n# # exception for now\r\n# with self.assertRaises(Warning):\r\n#\r\n# class DateKey(models.Model):\r\n# id = models.DateField(primary_key=True, auto_now=True)\r\n# DateKey.objects.create()\r\n\r\n# # TODO: There is a db.Email field that would be better to\r\n# # store emails, but that may prevent them from being\r\n# # used as keys.\r\n# with self.assertRaises(Warning):\r\n#\r\n# class EmailKey(models.Model):\r\n# id = models.EmailField(primary_key=True)\r\n# EmailKey.objects.create(id='[email protected]')\r\n\r\n# # TODO: Warn that changing field parameters breaks sorting.\r\n# # This applies to any DecimalField, so should belong to\r\n# # the docs.\r\n# with self.assertRaises(Warning):\r\n#\r\n# class DecimalKey(models.Model):\r\n# id = models.DecimalField(primary_key=True, decimal_places=2,\r\n# max_digits=5)\r\n# DecimalKey.objects.create(id=1)\r\n\r\n # Some cannot be reasonably represented (e.g. binary or string\r\n # encoding would prevent comparisons to work as expected).\r\n with self.assertRaises(DatabaseError):\r\n\r\n class FloatKey(models.Model):\r\n id = models.FloatField(primary_key=True)\r\n FloatKey.objects.create(id=1.0)\r\n\r\n # TODO: Better fail during validation or creation than\r\n # sometimes when filtering (False = 0 is a wrong key value).\r\n with self.assertRaises(DatabaseError):\r\n\r\n class BooleanKey(models.Model):\r\n id = models.BooleanField(primary_key=True)\r\n BooleanKey.objects.create(id=True)\r\n len(BooleanKey.objects.filter(id=False))", "def __init__(self, key):\n self.key = key", "def test_no_extra_fields():\n t_task = Task()\n t_dict = t_task._asdict()\n assert len(t_dict) <= 4", "def __init__(self, key=None):\n self.key = key", "def __missing__(self, key):\n raise KeyNotInContextError(f\"{key} not found in the pypyr context.\")", "def __init__(self, key):\n Base.__init__(self, key)", "def __init__(self, key):\n Base.__init__(self, key)", "def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")", "def test_throws_item_missing_key(self):\n with pytest.raises(marshmallow.ValidationError):\n Item.Schema().loads(json.dumps(item_missing_key))", "def fail(self, key: str, **kwargs):\n warnings.warn(\n '`Field.fail` is deprecated. Use `raise self.make_error(\"{}\", ...)` instead.'.format(\n key\n ),\n RemovedInMarshmallow4Warning,\n stacklevel=2,\n )\n raise self.make_error(key=key, **kwargs)", "def test_validate_non_included_keys():\n field = PartialDictField(included_keys=['a'], value_field=CharField(max_length=5),\n required=False)\n data = {'b': '123456'}\n try:\n field.run_validators(data)\n except ValidationError:\n assert False, 'Got a ValidationError for a non-included key'", "def test_base_schema_ignores_unknown_fields():\n assert BaseSchema().load({\"unknown\": \"field\"}) == {}", "def validate_key(self, key: keyType) -> bool:\n if isinstance(key, (dict,bool)):\n raise Exception\n if key is None:\n raise Exception\n # Numerical key object has no len(),\n # so explicitly specify which types are not allowed to use empty value as keys\n if isinstance(key, (str, tuple, set, list)) and (len(key) == 0):\n raise Exception\n return True", "def __init__(self):\n raise", "def test_with_nonexisting_attr(create_file_with_text):\n test_class = KeyValueStorage(create_file_with_text)\n with pytest.raises(ValueError, match=\"No such key\"):\n test_class[\"wrong_attribute\"]", "def test_non_hashable1(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar\n d = {}\n self.assertRaises(TypeError, hash, xp)\n self.assertRaises(TypeError, d.setdefault, xp, 'key')", "def test_raise_error_unknown_field():\n\n options = {'fields': ['kHello']}\n\n with pytest.raises(KeyError) as excinfo:\n process.Packager(options)\n excinfo.match('Field ([a-zA-Z].*) not found in file list.')", "def test_item_class_relaxed_validation_01(non_conformant_data: NonConformantData):\n login_dict = non_conformant_data.data_for_name(\"login-field-missing-id\")\n with pytest.raises(OPInvalidItemException):\n OPLoginItem(login_dict)\n set_relaxed_validation_for_class(OPLoginItem)\n assert OPLoginItem(login_dict)", "def test_create_bad_pkey(self):\n class Testing(self.base):\n __tablename__ = \"testing_table\"\n bad_id = Column(Integer, primary_key=True)\n\n class UserAPI(API):\n model = Testing\n session = self.session\n\n t = UserAPI()\n self.assertRaises(AttributeError, lambda: t.pkey)", "def test_raises_on_missing_needed_fields(self):\n test_name = \"impossible_creature_not_present\"\n self.form.constructor_fields = [*self.form.constructor_fields, test_name]\n message = \"The fields for email, username, and constructor must be set in fields. \"\n self.assertNotIn(test_name, self.form.base_fields)\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.confirm_required_fields()", "def test_empty_model(self):\n\n class Foo(Model):\n pass\n\n assert hasattr(Foo, \"_fields\")\n assert type(Foo._fields) is dict\n assert len(Foo._fields.items()) == 0", "def __init__(self, key: bytes):\n\n if len(key) != 32:\n raise ValueError('Key must be 32 bytes long')\n self.key = key", "def __init__(\n self,\n key, # type: Key\n exclude_from_indexes=() # type: Iterable[str]\n ):\n self.key = key\n self.exclude_from_indexes = set(exclude_from_indexes)\n self.properties = {}", "def test_raises_on_constructor_fields_error(self):\n self.form.constructor_fields = None\n message = \"Expected a list of field name strings for constructor_fields. \"\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.confirm_required_fields()", "def test_wrong_key(self):\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro(\"\")\n assert FinderInsideProException.EXCEPTION_TEXT_KEY_NOT_SET in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG\n\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro('aaa')\n assert FinderInsideProException.EXCEPTION_TEXT_WRONG_KEY in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG", "def test_fieldname_exc(self):\n ds = self.f.create_dataset('foo', (100,), 'f')\n self.assertRaises(ValueError, ds.__getitem__, (0, 'a'))", "def __init__(self, **kwargs):\n for type_hint in self.__fields_types__.values():\n if type_hint is ForwardRef or ForwardRef in get_args(type_hint):\n raise Warning(\"Not all type hints were evaluated.\")\n errors = []\n for name in kwargs:\n if ((getattr(self, name, None) is not None\n and name not in self.__fields_types__)\n or name in self._forbidden_fields):\n errors.append(f\" This attribute name is reserved: '{name}'.\")\n if errors:\n raise ValueError(\"\\n\" + \"\\n\".join(errors))\n for k, v in kwargs.items():\n setattr(self, k, v)", "def test_toomanykeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n recordparser.getfields, \"1234567890\", \"10s\", (\"key1\", \"key2\"))", "def test_defining_a_non_counter_column_fails(self):\r\n with self.assertRaises(ModelDefinitionException):\r\n class model(Model):\r\n partition = columns.UUID(primary_key=True, default=uuid4)\r\n counter = columns.Counter()\r\n text = columns.Text()", "def test_key_none(self):\n try:\n AlphaVantage()\n self.fail(msg='A None api key must raise an error')\n except ValueError:\n self.assertTrue(True)", "def test_key_none(self):\n try:\n AlphaVantage()\n self.fail(msg='A None api key must raise an error')\n except ValueError:\n self.assertTrue(True)", "def test_attribute_missing_validation():\n\n @attr.s\n class Foo(object):\n something = attr.ib()\n\n with pytest.raises(UnextractableSchema):\n extract_jsonschema(Foo)", "def testKeyInfoTooShort(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='x', keyInfo='xx')", "def test_register_error_empty_key(self):\n registry = ClassRegistry('element')\n\n with self.assertRaises(ValueError):\n # noinspection PyUnusedLocal\n @registry.register(None)\n class Ponyta(Pokemon):\n element = 'fire'\n\n with self.assertRaises(ValueError):\n # noinspection PyUnusedLocal\n @registry.register('')\n class Rapidash(Pokemon):\n element = 'fire'\n\n with self.assertRaises(ValueError):\n # noinspection PyUnusedLocal\n @registry.register\n class Mew(Pokemon):\n element = None\n\n with self.assertRaises(ValueError):\n # noinspection PyUnusedLocal\n @registry.register\n class Mewtwo(Pokemon):\n element = ''", "def _validateKey(self, key, cls = None):\n\n key_class_types = [self._BaseKey__class, self._LocalKey__class,\n self._MsgKey__class, ErrorMsgManager]\n\n if cls:\n if inspect.isclass(cls) and cls in key_class_types:\n classes = [cls]\n else:\n return None\n else:\n classes = key_class_types\n return any([isinstance(key, cls) for cls in classes])", "def error(self, key, **kwargs):\n try:\n msg = self.error_messages[key]\n except KeyError:\n class_name = self.__class__.__name__\n raise AssertionError('Error with key={} is not found for class={}'.format(key, class_name))\n message_string = msg.format(**kwargs)\n raise ValidationError(message_string, code=key)", "def test_prediction_key_required(self):\n self._config['Prediction key'] = ''\n with self.assertRaisesRegex(ValueError,\n 'Please provide the prediction key'):\n self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)", "def _yamlAttributeKeys(self):\n raise NotImplementedError", "def test_multiple_polymorphic_key_failure(self):\r\n with self.assertRaises(models.ModelDefinitionException):\r\n class M(models.Model):\r\n partition = columns.Integer(primary_key=True)\r\n type1 = columns.Integer(polymorphic_key=True)\r\n type2 = columns.Integer(polymorphic_key=True)", "def _KeyMissing(side):\n return 'Key missing from %s' % side", "def _check_key_type(cls, key: Any) -> K:\n if not isinstance(key, cls.keytype):\n raise KeyError(\n f\"{cls!r} accepts only keys of type {cls.keytype!r}, \"\n f\"got {type(key)!r}\"\n )\n return cast(K, key)", "def test_minimal_validation(self, asset_class: Type[_PandasDataAsset]):\n with pytest.raises(pydantic.ValidationError) as exc_info:\n asset_class( # type: ignore[call-arg]\n name=\"test\",\n invalid_keyword_arg=\"bad\",\n )\n\n errors_dict = exc_info.value.errors()\n assert {\n \"loc\": (\"invalid_keyword_arg\",),\n \"msg\": \"extra fields not permitted\",\n \"type\": \"value_error.extra\",\n } == errors_dict[ # the extra keyword error will always be the last error\n -1 # we don't care about any other errors for this test\n ]", "def _pre_put_hook(self): # pylint: disable=g-bad-name\n super(BaseModel, self)._pre_put_hook() # pylint: disable=protected-access\n self.GenerateKey()", "def test_error():\n with pytest.raises(AttributeError):\n doc = []\n model._extract_keywords_single_doc(doc)", "def test_raise_on_corrupt_computed_fields(self):\n initial = self.form.computed_fields\n self.form.computed_fields = 'This is a broken value'\n with self.assertRaises(ImproperlyConfigured):\n self.form.get_computed_field_names([], self.form.fields)\n self.form.computed_fields = None\n with self.assertRaises(ImproperlyConfigured):\n self.form.get_computed_field_names([], self.form.fields)\n self.form.computed_fields = initial", "def test_incorrect_prediction_key(self):\n self._config['Prediction key'] = 'wrong_key'\n with self.assertRaisesRegex(ValueError, 'Invalid prediction key'):\n self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)", "def test_toofewkeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n recordparser.getfields, \"1234567890\", \"10s\", ())", "def test_getObjectByKey_raises_KeyError(self):\n try:\n self.tile_bucket.getObjectByKey('foo_key')\n except KeyError:\n return\n assert(False)", "def test_required():\n schema = Schema({Required('q'): 1})\n # Can't use nose's raises (because we need to access the raised\n # exception, nor assert_raises which fails with Python 2.6.9.\n try:\n schema({})\n except Invalid as e:\n assert_equal(str(e), \"required key not provided @ data['q']\")\n else:\n assert False, \"Did not raise Invalid\"", "def test_getter_key_error(self):\n root = netapp_api.NaElement('root')\n self.mock_object(root, 'get_child_by_name', return_value=None)\n self.mock_object(root, 'has_attr', return_value=None)\n\n self.assertRaises(KeyError,\n netapp_api.NaElement.__getitem__,\n root, '123')", "def test_config_key_error():\n c = core.Config()\n\n with pytest.raises(KeyError):\n c['doesNotExist']", "def __init__(self, obj, path, notes=()):\n format_dict = {'attribute': path[-1], 'object_name': obj._name}\n message = (\"'{attribute}' is not allowed in '{object_name}'\"\n .format(**format_dict))\n notes = [obj.help(return_help=True)] + list(notes)\n super(PlotlyDictKeyError, self).__init__(\n message=message, path=path, notes=notes\n )", "def test_del_attribute_is_assigned_properly(self):\r\n class DelModel(Model):\r\n id = columns.UUID(primary_key=True, default=lambda:uuid4())\r\n key = columns.Integer(primary_key=True)\r\n data = columns.Integer(required=False)\r\n\r\n model = DelModel(key=4, data=5)\r\n del model.data\r\n with self.assertRaises(AttributeError):\r\n del model.key", "def test_tag_keys_dynamic_field_validation_failure(self):\n tag_keys = [\"valid_tag\"]\n query_params = {\"bad_tag\": \"*\"}\n serializer = OCIGroupBySerializer(data=query_params, tag_keys=tag_keys)\n with self.assertRaises(serializers.ValidationError):\n serializer.is_valid(raise_exception=True)", "def __init__(self, key = None):\n self.key = key\n self.response_format = 'json'\n \n if self.key is None:\n raise NoAPIKeyException('Warning: Missing API Key. Please visit ' + API_SIGNUP_PAGE + ' to register for a key.')", "def __init__(self):\n self._key = ''", "def test_missingKey(self):\n self.assertIsNone(self.users.key(\"mystery domain\", \"mystery user\"))", "def test_allow_extra_keys(self):\n from natcap.invest import validation\n\n args = {'a': 'a', 'b': 'b'}\n spec = {\n 'a': {\n 'type': 'freestyle_string',\n 'name': 'a',\n 'about': 'a freestyle string',\n 'required': True\n }\n }\n message = 'DEBUG:natcap.invest.validation:Provided key b does not exist in MODEL_SPEC'\n\n with self.assertLogs('natcap.invest.validation', level='DEBUG') as cm:\n validation.validate(args, spec)\n self.assertTrue(message in cm.output)", "def test_raises_when_accessing_none_implementation(self):\n\n class APIObj(\n platform.APIObject,\n collections.namedtuple(\"APIObj\", \"implementation\"),\n ):\n def __new__(cls):\n return super().__new__(cls, implementation=None)\n\n obj = APIObj()\n\n with pytest.raises(AttributeError) as exc_info:\n obj.implementation # pylint: disable=pointless-statement\n\n assert \"invalid access to 'implementation': not initialized\" in str(\n exc_info.value\n )", "def test_no_help_key(self):\n test_obj = _MadCapFlareMixinTest()\n\n self.assertRaises(\n ImproperlyConfigured,\n test_obj.get_help_key)\n\n self.assertRaises(\n ImproperlyConfigured,\n test_obj.get_context_data)", "def __init__(self, **data):\n super().__init__(**data)\n unknowns = set(data.keys()) - set(self.__dict__.keys())\n for arg in unknowns:\n msg = (\n f\"Response contains unknown attribute: `{arg}`, which was discarded.\"\n \" This warning may be safely ignored. Please consider upgrading Tekore.\"\n )\n warn(msg, UnknownModelAttributeWarning, stacklevel=5)", "def test_from_dict_bad_event_key(self):\n from google.appengine.ext import ndb\n\n from sosbeacon.event.event import Event\n from sosbeacon.event.message import Message\n\n event_key = ndb.Key(Event, 1)\n\n self.assertRaisesRegexp(\n Exception, \"Event not found\",\n Message.from_dict, {'event': event_key})", "def __missing__(self, key):\n self[key] = self.factory(key)\n return self[key]", "def test_attempting_query_on_abstract_model_fails(self):\r\n with self.assertRaises(CQLEngineException):\r\n iter(AbstractModelWithFullCols.objects(pkey=5)).next()", "def key_type(self):\n raise exceptions.NotImplementedError()", "def test_dupe_keys():\n assert_raises(voluptuous.SchemaError, Schema,\n {Required(\"id\"): str, Required(\"id\"): int})\n assert_raises(voluptuous.SchemaError, Schema,\n {Required(\"id\"): str, \"id\": str})\n assert_raises(voluptuous.SchemaError, Schema,\n {voluptuous.Optional(\"id\"): str, Required(\"id\"): int})", "def test_no_api_key(self):\n\n self.assertRaises(Exception, kaput.init, None, '123')", "def testKeyInfoTooLong(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey',\n keyInfo='xxxxx')", "def test_model_formfield_doesnt_raise(self):\n try:\n fields_for_model(Color())\n except AttributeError:\n self.fail(\"Raised Attribute Error\")", "def test_init(self):\n person = Person('test_person_a')\n self.assertEqual(person.name, 'test_person_a')\n self.assertEqual(person.address, '123 Fake Street')\n self.assertEqual(person.email, '[email protected]')\n\n with self.assertRaises(KeyError):\n Person('fake_person')", "def _check_field_annotations(cls: Type):\n cls_annotations = cls.__dict__.get(\"__annotations__\", {})\n cls.__annotations__ = cls_annotations\n\n for field_name, field in cls.__dict__.items():\n if not isinstance(field, (StrawberryField, dataclasses.Field)):\n # Not a dataclasses.Field, nor a StrawberryField. Ignore\n continue\n\n # If the field is a StrawberryField we need to do a bit of extra work\n # to make sure dataclasses.dataclass is ready for it\n if isinstance(field, StrawberryField):\n\n # Make sure the cls has an annotation\n if field_name not in cls_annotations:\n # If the field uses the default resolver, the field _must_ be\n # annotated\n if not field.base_resolver:\n raise MissingFieldAnnotationError(field_name)\n\n # The resolver _must_ have a return type annotation\n # TODO: Maybe check this immediately when adding resolver to\n # field\n if field.base_resolver.type is None:\n raise MissingReturnAnnotationError(field_name)\n\n cls_annotations[field_name] = field.base_resolver.type\n\n # TODO: Make sure the cls annotation agrees with the field's type\n # >>> if cls_annotations[field_name] != field.base_resolver.type:\n # >>> # TODO: Proper error\n # >>> raise Exception\n\n # If somehow a non-StrawberryField field is added to the cls without annotations\n # it raises an exception. This would occur if someone manually uses\n # dataclasses.field\n if field_name not in cls_annotations:\n # Field object exists but did not get an annotation\n raise MissingFieldAnnotationError(field_name)", "def test_entities__Entity__getClass__2():\n e = Entity(None, IDummy, None)\n with pytest.raises(ValueError):\n e.getClass()", "def validate_instruction_keys(instruction: TransactionInstruction, expected: int) -> None:\n if len(instruction.keys) < expected:\n raise ValueError(f\"invalid instruction: found {len(instruction.keys)} keys, expected at least {expected}\")", "def test_class_errored(self, cls, exception):", "def test_id_field_is_not_created(self):\r\n assert not hasattr(AbstractModel, 'id')\r\n assert not hasattr(AbstractModelWithCol, 'id')" ]
[ "0.664005", "0.64458597", "0.63761204", "0.63392276", "0.620251", "0.61894745", "0.6169179", "0.611666", "0.60839826", "0.60760987", "0.6069411", "0.6068303", "0.60593605", "0.60562086", "0.6022622", "0.60070866", "0.5998208", "0.59926015", "0.59468085", "0.59364104", "0.5934645", "0.59262913", "0.58830374", "0.5865956", "0.5852171", "0.584993", "0.5848283", "0.5838684", "0.5835953", "0.5835846", "0.58323926", "0.58323926", "0.58107543", "0.58045536", "0.57720906", "0.576315", "0.57600385", "0.5749434", "0.5734964", "0.5727367", "0.5710829", "0.5709084", "0.5701598", "0.57013285", "0.5673675", "0.56697637", "0.56697553", "0.56555176", "0.56511736", "0.5649707", "0.5646471", "0.56387997", "0.5634837", "0.56343347", "0.56283295", "0.56283295", "0.5616781", "0.5607543", "0.5602598", "0.55961657", "0.5593219", "0.5590709", "0.5579064", "0.5573749", "0.555313", "0.55519193", "0.5534687", "0.55266637", "0.55258167", "0.55189246", "0.5506024", "0.5497593", "0.5488335", "0.5485679", "0.54755694", "0.54716504", "0.54655766", "0.54323786", "0.54296666", "0.54294264", "0.54263", "0.5420944", "0.54158926", "0.54059863", "0.5403466", "0.5390581", "0.5387498", "0.53801924", "0.53744346", "0.5363138", "0.5351949", "0.5343104", "0.53428984", "0.5342379", "0.5339955", "0.53319365", "0.5329238", "0.5326876", "0.5323006", "0.53206104" ]
0.7018351
0
First node's fields should be updated with the second nodes
def testMergeRejectsUnequalNodes(): n1 = DummyNode(x=1, y=2, z=4) n2 = DummyNode(x=1, y=3, z=3) with pytest.raises(TypeError): n1.merge_with(n2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mergeNodes(new, t1, t2):\n \n if t1 and t2:\n new.val = t1.val + t2.val\n elif not t1:\n new.val = t2.val\n elif not t2:\n new.val = t1.val", "def update(self, other):\n self._start = other._start\n self._end = other._end\n self._nodes = {k: v.copy() for k,v in other._nodes.iteritems()}\n self._edges = {k: set(v) for k,v in other._edges.iteritems()}\n self._names = set(other._names)\n self.current = other.current", "def update(self, other):\n b = self.hallucinate_merge(other)\n self.l_child = b.l_child\n self.r_child = b.r_child", "def merge_nodes(self, parent, child):\n parent.key += child.key\n parent.real = child.real\n parent.value = child.value\n parent.children = child.children", "def assertNodesEqual(self, first, second):\n def get_attrs(l):\n result = []\n for n in l:\n result.append((n.service, n.address, n.version, n.properties))\n return result\n self.assertEqual(get_attrs(first), get_attrs(second))", "def __swap_kv(self, node1, node2):\r\n node1.key, node2.key = node2.key, node1.key\r\n node1.value, node2.value = node2.value, node1.value", "def _redirect(self, node1, node2):\n if node1.parent.right is node1:\n node1.parent.right = node2\n else:\n node1.parent.left = node2", "def assertNodesEqual(self, a, b):\n self.assertEqual((a.version, a.address, a.service, a.properties),\n (b.version, b.address, b.service, b.properties))", "def test_update_node_second_level_component_with_first_level_parent(self):\n payload = {\n 'data': [{\n 'type': 'nodes',\n 'id': self.public_project._id\n }, {\n 'type': 'nodes',\n 'id': self.first_level_component._id\n }, {\n 'type': 'nodes',\n 'id': self.second_level_component._id\n }]\n }\n res = self.app.put_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)\n self.view_only_link.reload()\n assert_equal(res.status_code, 200)\n assert_equal(len(res.json['data']), 3)\n assert_in(self.public_project, self.view_only_link.nodes.all())\n assert_in(self.first_level_component, self.view_only_link.nodes.all())\n assert_in(self.second_level_component, self.view_only_link.nodes.all())", "def merge_sidewalks(sidewalk_network1, sidewalk_network2):\n\n for node in sidewalk_network1.nodes.get_list():\n node.confirmed = True\n\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n in_other = False\n same_node = None\n for other_sidewalk_node in sidewalk_network1.nodes.get_list():\n if sidewalk_node.location() == other_sidewalk_node.location():\n in_other = True\n same_node = other_sidewalk_node\n if not in_other: # If street network 2 contains the node but street network 1 does not\n sidewalk_network1.add_node(sidewalk_node) # Add node from street network 2 to street network 1\n else: # If both networks contain the node\n sidewalk_network2.nodes.update(sidewalk_node.id, same_node)\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n\n network1_dict = {}\n for sidewalk_node in sidewalk_network1.nodes.get_list():\n network1_dict[sidewalk_node.location] = sidewalk_node\n\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n if sidewalk_node.location not in network1_dict:\n sidewalk_network1.add_node(sidewalk_node)\n else:\n sidewalk_network2.nodes.update(sidewalk_node.id, network1_dict[sidewalk_node.location])\n\n # add new ways from sidewalk_network2 to sidewalk_network1\n for way in sidewalk_network2.ways.get_list():\n # ensure all ways have correct nids, if incorrect update to correct nid from network1\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid) is None:\n way.swap_nodes(nid, sidewalk_network2.nodes.get(nid).id)\n\n has_confirmed_parents = False\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid).confirmed:\n has_confirmed_parents = True\n if not has_confirmed_parents:\n sidewalk_network1.add_way(way)\n\n return sidewalk_network1", "def test_set_node_second_level_component_with_first_level_parent(self):\n payload = {\n 'data': [\n {\n 'type': 'nodes',\n 'id': self.first_level_component._id\n },\n {\n 'type': 'nodes',\n 'id': self.second_level_component._id\n }\n ]\n }\n res = self.app.post_json_api(self.url, payload, auth=self.user.auth)\n self.view_only_link.reload()\n assert_equal(res.status_code, 201)\n assert_in(self.first_level_component, self.view_only_link.nodes.all())\n assert_in(self.second_level_component, self.view_only_link.nodes.all())", "def new_data(first: dict, second: dict, changeables: tuple):\n for name, field in first.items():\n if name not in changeables:\n second[name] = field", "def merge(self):\n leftSon = self.nodes.getNode(0)\n rightSon = self.nodes.getNode(1)\n self.nodes.removeNode(0)\n self.nodes.removeNode(0)\n self.nodes.addNode(Node(leftSon, rightSon))", "def merge(self, a, b):\n old_id, target_id = sorted((self.node_id[a], self.node_id[b]), key = lambda id: len(self.groups[id]))\n for node in self.groups[old_id]:\n self.node_id[node] = target_id\n self.groups[target_id] |= self.groups[old_id]\n del self.groups[old_id]", "def test_update_node_second_level_component_without_first_level_parent(self):\n payload = {\n 'data': [{\n 'type': 'nodes',\n 'id': self.public_project._id\n }, {\n 'type': 'nodes',\n 'id': self.second_level_component._id\n }]\n }\n res = self.app.put_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)\n self.view_only_link.reload()\n assert_equal(res.status_code, 200)\n assert_equal(len(res.json['data']), 2)\n assert_in(self.public_project, self.view_only_link.nodes.all())\n assert_in(self.second_level_component, self.view_only_link.nodes.all())", "def merge_graph(self, other):\n self.add_nodes( (nLabel,nInfo) for nLabel,nInfo in other.nodes() )\n \n for nLabel,nInfo in other.nodes():\n for edgeLabel,edgeInfo in other.edgesFrom(nLabel):\n self.add_edge(edgeLabel,edgeInfo)", "def relate(self, other):\n ...", "def node_diff(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n if self.node_dict1 is None or self.node_dict2 is None:\n self.make_node_dict()\n # Initialize dictonaries to keep track of the nodes in respnse 1 and response 2\n g1={}\n g2={}\n # Set to keep track of the union of all curie ids\n curie_set = set()\n for curie in self.node_dict1.keys():\n g1[curie] = {}\n # intersection is only in the g1 dictionary\n g1[curie]['intersection'] = set()\n # node section keeps track of node ids associated with each node i.e. \"n0\"\n g1[curie]['node'] = set()\n curie_set.add(curie)\n for curie in self.node_dict2.keys():\n g2[curie] = {}\n # node section keeps track of node ids associated with each node i.e. \"n0\"\n g2[curie]['node'] = set()\n curie_set.add(curie)\n node_names1 = []\n node_names2 = []\n\n # extract all node ids (i.e. \"n0\",\"n1\",ect...)\n if len(self.input1['question_graph']['nodes'])>0:\n if 'id' in self.input1['question_graph']['nodes'][0]:\n node_names1 = [x['id'] for x in self.input1['question_graph']['nodes']]\n elif 'node_id' in self.input1['question_graph']['nodes'][0]:\n node_names1 = [x['node_id'] for x in self.input1['question_graph']['nodes']]\n if len(self.input2['question_graph']['nodes'])>0:\n if 'id' in self.input2['question_graph']['nodes'][0]:\n node_names2 = [x['id'] for x in self.input2['question_graph']['nodes']]\n elif 'node_id' in self.input2['question_graph']['nodes'][0]:\n node_names2 = [x['node_id'] for x in self.input2['question_graph']['nodes']]\n \n # initialize the result dictonary\n diff_dict = {}\n diff_dict[\"-1|-1\"] = {'intersection':[],'g1-g2':[],'g2-g1':[]}\n # initialize node id tuple keys\n for id1 in node_names1:\n for id2 in node_names2:\n diff_dict[id1+\"|\"+id2] = {'intersection':[],'g1-g2':[],'g2-g1':[]}\n # iterate through answers\n for answer1 in self.input1['answers']:\n for answer2 in self.input2['answers']:\n for id1 in answer1['node_bindings'].keys():\n # This is to handle cases where answer node id has a list or a string\n if isinstance(answer1['node_bindings'][id1], str):\n bindings1 = [answer1['node_bindings'][id1]]\n elif isinstance(answer1['node_bindings'][id1], list):\n bindings1 = answer1['node_bindings'][id1]\n for curie1 in bindings1:\n # store node id\n g1[curie1]['node'].add(id1)\n for id2 in answer2['node_bindings'].keys():\n # This is to handle cases where answer node id has a list or a string\n if isinstance(answer2['node_bindings'][id2], str):\n bindings2 = [answer2['node_bindings'][id2]]\n elif isinstance(answer2['node_bindings'][id2], list):\n bindings2 = answer2['node_bindings'][id2]\n for curie2 in bindings2:\n # store node id\n g2[curie2]['node'].add(id2)\n if curie1 == curie2:\n # stor intersection tuple\n g1[curie1]['intersection'].add(id1+\"|\"+id2)\n # iterate through all curies\n for curie in curie_set:\n # check if curie is from answer 1\n if curie in g1.keys():\n # check if in intersection\n if len(g1[curie]['intersection'])>0:\n diff_dict[\"-1|-1\"]['intersection'] += [self.node_dict1[curie]]\n for id1 in node_names1:\n for id2 in node_names2:\n node_tuple = id1+\"|\"+id2\n if id1 in g1[curie]['node'] and id2 in g2[curie]['node']:\n diff_dict[node_tuple]['intersection'] += [self.node_dict1[curie]]\n elif id1 in g1[curie]['node']:\n diff_dict[node_tuple]['g1-g2'] += [self.node_dict1[curie]]\n elif id2 in g2[curie]['node']:\n diff_dict[node_tuple]['g2-g1'] += [self.node_dict1[curie]]\n # If not in intersection store in g1-g2\n else:\n diff_dict[\"-1|-1\"]['g1-g2'] += [self.node_dict1[curie]]\n for id1 in g1[curie]['node']:\n # iterate through all answer 2 ids\n for id2 in node_names2:\n diff_dict[id1+\"|\"+id2]['g1-g2'] += [self.node_dict1[curie]]\n # if not in g1 but in g2 then in g2-g1\n elif curie in g2.keys():\n diff_dict[\"-1|-1\"]['g2-g1'] += [self.node_dict2[curie]]\n for id2 in g2[curie]['node']:\n # iterate through all answer 1 ids\n for id1 in node_names1:\n diff_dict[id1+\"|\"+id2]['g2-g1'] += [self.node_dict2[curie]]\n return diff_dict", "def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)", "def test_create_get_delete_update_node(self):\n node_dict_1 = {\n 'host_name': 'abc',\n 'local_router_id': '1.1.1.1',\n 'as_num': 100,\n 'bgpls_id': '0.0.0.0',\n 'igp_id': '0.0.0.0'\n }\n node_dict_2 = {\n 'host_name': 'def',\n 'local_router_id': '2.2.2.2',\n 'as_num': 100,\n 'bgpls_id': '0.0.0.0',\n 'igp_id': '0.0.0.0'\n }\n\n # create two objects\n node1 = Node(**node_dict_1)\n node2 = Node(**node_dict_2)\n Node.create_object(self.database, node1.__dict__)\n Node.create_object(self.database, node2.__dict__)\n self.assertEqual(2, Node.count(self.database))\n\n # get one object\n node1 = Node.get_object(self.database, host_name='abc')\n self.assertEqual(node_dict_1['local_router_id'], node1.get('local_router_id'))\n\n # get objects\n nodes = Node.get_objects(self.database, as_num=100)\n self.assertEqual(2, len(nodes))\n\n # update one object\n self.assertEqual(0, Node.count(self.database, local_router_id='3.3.3.3'))\n node_db_obj = Node.update_object(\n self.database, {'local_router_id': '3.3.3.3'}, host_name='abc')\n self.assertEqual('3.3.3.3', node_db_obj.get('local_router_id'))\n self.assertEqual(1, Node.count(self.database, local_router_id='3.3.3.3'))\n\n # update more than objects\n self.assertEqual(2, Node.count(self.database, as_num=100))\n update_count = Node.update_objects(\n self.database, {'as_num': 200}, igp_id='0.0.0.0')\n self.assertEqual(2, update_count)\n self.assertEqual(2, Node.count(self.database, as_num=200))\n\n # delete objects\n Node.delete_object(self.database, host_name='abc')\n self.assertEqual(1, Node.count(self.database))", "def test_updated_nodes():\n assert_missing_node(10000)\n assert_cached_node(10001, (10.0, 40.0))\n assert_cached_node(10002, (10.1, 40.0))\n place_10001 = query_row(db_conf, 'osm_places', 10001)\n assert place_10001['name'] == 'Bar', place_10001\n place_10002 = query_row(db_conf, 'osm_places', 10002)\n assert place_10002['name'] == 'Baz', place_10002", "def make_link(Graph, node1, node2):\n if node1 not in Graph:\n Graph[node1] = {}\n (Graph[node1])[node2] = 1\n if node2 not in Graph:\n Graph[node2] = {}\n (Graph[node2])[node1] = 1\n return Graph", "def add_node_pairs(self, node_a,node_b):\r\n \r\n if node_b is not None : \r\n self.nodes[node_a].append(node_b)", "def copy(node1, node2):\n \n \n # ITERATE OVER ALL PARMS IN NODE1, AND CHECK IF THE PARM EXISTS IN NODE2\n for p in node1.parms():\n if node2.parm(p.name()):\n p2 = node2.parm(p.name())\n \n # TEMPORARILY CLEAR KEYFRAMES. WE WILL RESTORE THEM LATER IF THEY EXIST ON NODE1'S PARM\n p2.deleteAllKeyframes()\n \n # SEE IF WE CAN JUST SET THE EXPRESSION OF THE PARAMETER\n try:\n p2.setExpression(p.expression())\n except:\n # IF NOT, TRY SETTING THE UNEXPANDED STRING. IF THAT DOESN'T WORK, JUST SET THE EVAL VALUE OF THE PARM\n try:\n p2.set(p.unexpandedString())\n except:\n p2.set(p.eval())\n \n # SET KEYFRAMES IF THEY EXIST ON NODE1\n if p.keyframes():\n p2.setKeyframes(p.keyframes())", "def merge_nodes(self,n0,n1):\n # -- Sanity checks - does not yet allow for collapsing edges.\n\n # if they share any cells, would update the cells, but for now\n # just signal failure.\n n0_cells=list(self.node_to_cells(n0))\n n1_cells=list(self.node_to_cells(n1))\n cell_to_edge_cache={}\n\n for c in n1_cells:\n if c in n0_cells:\n print(\"cell %d common to both nodes\"%c)\n raise GridException(\"Not ready for merging nodes in the same cell\")\n # otherwise record and fix up below\n\n # while we're looping, cache the edges as they will\n # be mutated along the way.\n cell_to_edge_cache[c]=self.cell_to_edges(c).copy()\n\n # do they share an edge, but not already fixed in the above stanza?\n j=self.nodes_to_edge(n0,n1)\n if j is not None:\n raise GridException(\"Not ready for merging endpoints of an edge\")\n\n edge_map={} # index of superceded edge => superceding edge\n\n # Update edges of n1 to point to n0\n # if that would cause a duplicate edge, then the n1 version is deleted\n n1_edges=list(self.node_to_edges(n1)) # make copy since we'll mutate it\n for j in n1_edges:\n if self.edges['nodes'][j,0]==n1:\n nj=0\n elif self.edges['nodes'][j,1]==n1:\n nj=1\n else:\n assert False # sanity check\n newnodes=self.edges[j]['nodes'].copy()\n newnodes[nj]=n0\n # it's possible that this is an edge which already exists\n jother=self.nodes_to_edge(*newnodes)\n if jother is not None:\n # want to keep jother, delete j. but is there info on\n # cells which should be brought over?\n edge_map[j]=jother\n # wait to delete j until after cells have been moved to jother.\n else:\n self.log.debug(\"Modifying edge j=%d\"%j)\n self.modify_edge(j,nodes=newnodes)\n\n # -- Transition any cells. \n for c in n1_cells:\n # update the node list:\n cnodes=self.cell_to_nodes(c).copy()\n nc=list(cnodes).index(n1)\n cnodes[nc]=n0\n\n # Dangerous to use cell_to_edges, since it may\n # have to consult the edge topology, which is disrupted\n # in the above code. \n # cell_to_edges: first checks cells['edges'], may \n # go to cell_to_nodes(c): that's safe.\n # and nodes_to_edge\n # -> node_to_edges, which in turn may consult self.edges['nodes']\n\n #cedges=self.cell_to_edges(c).copy()\n cedges=cell_to_edge_cache[c]\n\n for ji,j in enumerate(cedges):\n if j in edge_map:\n # is there were edges['cells'] should be updated?\n\n # sever the edge=>cell pointer, to p\n # could just set to [-1,-1], but this keeps things very explicit\n # for debugging\n j_cells=list(self.edges['cells'][j])\n j_cells_side=j_cells.index(c)\n j_cells[ j_cells_side ] = -1\n self.modify_edge(j,cells=j_cells)\n\n # and modify the receiving edge, too\n jo=edge_map[j]\n jo_cells=list(self.edges['cells'][jo])\n # which side of jo? a bit tedious...\n if list(self.edges['nodes'][j]).index(n1) == list(self.edges['nodes'][jo]).index(n0):\n # same orientation\n jo_cells_side=j_cells_side\n elif list( self.edges['nodes'][j]).index(n1) == 1-list(self.edges['nodes'][jo]).index(n0):\n jo_cells_side=1-j_cells_side\n else:\n raise Exception(\"Failed in some tedium\")\n assert jo_cells[jo_cells_side]<0\n jo_cells[jo_cells_side]=c\n self.modify_edge(edge_map[j],cells=jo_cells)\n # yikes. any chance that worked?\n\n cedges[ji]=edge_map[j]\n\n # maybe this is where we'd update cells['edges'] too?\n self.modify_cell(c,nodes=cnodes,edges=cedges)\n\n for dead_edge in edge_map:\n self.delete_edge(dead_edge)\n\n self.delete_node(n1)", "def side_renaming(network1, network2):\n\n # There is probably faster way to perform this, optimize later if needed\n for i in range(len(network1.nodes)):\n \n if (network1.nodes[i][\"group\"] == \"#fcae91FF\"):\n network1.nodes[i][\"T1\"] = \"0\"\n\n elif (network1.nodes[i][\"group\"] == \"#7828a0FF\"):\n network1.nodes[i][\"T1\"] = \"1\"\n \n else:\n print(\"Error with group encoding!\")\n \n \n for i in range(len(network2.nodes)):\n \n if (network2.nodes[i][\"group\"] == \"#fcae91FF\"):\n network2.nodes[i][\"T2\"] = \"0\"\n \n elif (network2.nodes[i][\"group\"] == \"#7828a0FF\"):\n network2.nodes[i][\"T2\"] = \"1\"\n \n else:\n print(\"This should not be printed! Error with group encoding!\")\n\n return network1, network2", "def _update_with_node(self, node: Node) -> None:\n\t\t# Get and test name\n\t\tname = node.name\n\t\tif name not in self.node_names:\n\t\t\t# Add if not added\n\t\t\tself.node_names.append(name)\n\t\t\t# Modify attributes to say \"Attribute - \" in the front\n\t\t\tattrs: List[str] = []\n\t\t\tfor attr in node.attributes:\n\t\t\t\tattrs.append(\"Attribute - \" + attr.title())\n\t\t\t# Create set, use Node attributes as base\n\t\t\tself.subnode_names[name] = set(attrs)\n\n\t\t# Iterate over SubNodes\n\t\tfor subnode in node.subnodes:\n\t\t\t# Set and test name\n\t\t\ts_name = subnode.name\n\t\t\tself.subnode_names[name].add(s_name)\n\n\t\t# Iterate over nodes\n\t\tfor nested_node in node.nodes:\n\t\t\tself._update_with_node(nested_node)", "def update(self, initial, follows):", "def update(self):\n\n for node in self.nodes:\n for edge in node.edges:\n for i, edge_node in enumerate(edge.nodes):\n if edge_node.id != node.id:\n edge_node.add_edge(edge)\n\n return self", "def update_one_node_from_pbs_data(node, attr_dict):\n # put node under a subcluster if it does not have any yet\n if not node.subcluster:\n for id,node_regexp in SubCluster.objects.filter(server=node.server).values_list('id','node_regexp'):\n if re.match(node_regexp,node.name):\n node.subcluster_id = id\n node.save()\n break\n # fill node's np if it is not present\n if not node.np:\n node.np = attr_dict['np']\n node.save()\n\n new_states = []\n if attr_dict.has_key('state'):\n# node.state.clear()\n for statename in attr_dict['state'].split(','):\n #node.state.add(NodeState.objects.get(name=statename.strip()))\n new_states.append(NodeState.objects.get(name=statename.strip()))\n attr_dict['state'] = new_states\n\n\n new_properties = []\n if attr_dict.has_key('properties'):\n# node.properties.clear()\n for propertyname in attr_dict['properties'].split(','):\n np,created = NodeProperty.objects.get_or_create(name=propertyname.strip())\n if created:\n print(\"New property created: %s\" % propertyname)\n new_properties.append(np)\n# node.properties.add(np)\n attr_dict['properties'] = new_properties\n\n new_jobs = []\n if attr_dict.has_key('jobs'):\n slot_jobs = dict([tuple(j.strip().split('/')) for j in attr_dict['jobs'].split(',')])\n for slotstr, longjobid in slot_jobs.items():\n slot = int(slotstr)\n# js,created = getJobSlot(slot=slot,node=node)\n# if created:\n# logging.info(\"new jobslot will be created: slot: %d, node name: %s\" % (slot,name))\n jobid = int(longjobid.split('.')[0])\n new_jobs.append(jobid)\n \n# js.livejob,created = LiveJob.objects.get_or_create(jobid=jobid, server=node.server)\n# if created:\n# logging.info(\"new livejob created: %d\" % jobid)\n# js.save()\n attr_dict['jobs'] = new_jobs\n return attr_dict", "def update(self, other):\n fields = None\n if isinstance(other, dict):\n fields = other\n elif isinstance(other, Torrent):\n fields = other.fields\n else:\n raise ValueError('Cannot update with supplied data')\n for k, v in fields.iteritems():\n self.fields[k.replace('-', '_')] = v", "def test_allow_multiples(self):\r\n o1 = self.b1.get(self.key)\r\n o2 = self.b2.get(self.key)\r\n\r\n o1.set_data(\"object-1\")\r\n o1.store()\r\n o2.set_data(\"object-2\")\r\n o2.store()\r\n\r\n conflicted = self.b1.get(self.key)\r\n siblings = filter(bool, (s.get_data() for s in conflicted.get_siblings()))\r\n self.assertEqual(len(siblings), 2)", "def update_node2edge(self):\n self.node2edge = {e.child : e for e in self.edge}\n childrenset = set(self.node2edge.keys())\n rootset = set(e.parent for e in self.edge).difference(childrenset)\n if len(rootset) > 1:\n raise Warning(\"there should be a single root: \" + str(rootset))\n if len(rootset) == 0:\n raise Exception(\"there should be at least one root!\")\n self.root = rootset.pop()", "def update_nodes(nodes, bb):\n \n for node in nodes:\n node.set(\"label\", update_bb_string(node.get_attributes()[\"label\"], bb))\n node.set_name(update_node_name(node.get_name(), bb))", "def test_link_attribute_update(self):\n entries = {\n 'cn=huidige leden,ou=groups,dc=esmgquadrivium,dc=nl': {'cn': ['Huidige leden']},\n 'cn=agroup,ou=groups,dc=esmgquadrivium,dc=nl': {'cn': ['agroup']},\n 'uid=aperson,ou=people,dc=esmgquadrivium,dc=nl': {'uid': ['aperson']},\n }\n actual = clone(entries, link_attribute='linkID')\n person_id = Person.objects.first().id\n group1_id = QGroup.objects.get(name='Huidige leden').id\n group2_id = QGroup.objects.get(~Q(name='Huidige leden')).id\n\n expect = [ModifyOperation('uid=aperson,ou=people,dc=esmgquadrivium,dc=nl', 'linkID', [person_id]),\n ModifyOperation('cn=huidige leden,ou=groups,dc=esmgquadrivium,dc=nl', 'linkID', [group1_id]),\n ModifyOperation('cn=agroup,ou=groups,dc=esmgquadrivium,dc=nl', 'linkID', [group2_id])]\n self.assertCountEqual(expect, actual)", "def remap_nodes(self, new_node_mapping):\n # because all nodes are SchemaNodeIDs (i.e. objects), we only need to reassign nodes one way\n # changes propagate to chains, chain root_nodes, and parents automatically\n for chain in self.chains:\n for edge in chain:\n head, tail = edge\n if head in new_node_mapping.keys():\n head.value = new_node_mapping[head]\n if tail in new_node_mapping.keys():\n tail.value = new_node_mapping[tail]", "def update_rec(self):\n import copy\n \n self.leftrec, self.rightrec = copy.copy(self.rec), copy.copy(self.rec)\n self.leftrec[2*self.dim + 1], self.rightrec[2*self.dim] = self.node.dimension[self.dim], self.node.dimension[self.dim]", "def breadth_first_update(self, extra_roots=[], extra_updated=set()):\n queue = []\n updated = extra_updated\n for k in self.__node_dict.keys():\n if len(self.__node_dict[k].inputs) == 0:\n queue.append(self.__node_dict[k])\n queue.extend(extra_roots)\n while (len(queue) != 0):\n node_to_update = queue.pop(0)\n # print('update {}'.format(node_to_update.uid))\n if node_to_update not in updated:\n node_to_update.update()\n updated.add(node_to_update)\n for element in node_to_update.outputs:\n child = element['to_node']\n if all([i['from_node'] in updated for i in child.inputs]):\n queue.append(child)\n # print('----done----')", "def update(self) -> None:\n\t\t# Clear attributes that will be updates\n\t\tself.node_names: List[str] = []\n\t\tself.subnode_names: Dict[str, Set[str]] = {}\n\t\t# Iterate over RootNodes\n\t\tname: str\n\t\ts_name: str\n\t\tfor rootnode in self.root_nodes:\n\t\t\t# Iterate over Nodes\n\t\t\tfor node in rootnode.nodes:\n\t\t\t\tself._update_with_node(node)\n\t\t\tif len(rootnode.subnodes):\n\t\t\t\t# Create Set in subnode_names for the RootNode's SubNodes\n\t\t\t\tself.subnode_names[rootnode.name] = set()\n\t\t\t\t# Iterate over SubNodes\n\t\t\t\tfor subnode in rootnode.subnodes:\n\t\t\t\t\tself.subnode_names[rootnode.name].add(subnode.name)", "def join_nodes_in_both_trees(tree1, nodeAinT1, cladeA,\n tree2, nodeBinT2, cladeB, test=False):\n cladeA = set(cladeA)\n cladeB = set(cladeB)\n leaves1 = get_leaf_set(tree1)\n leaves2 = get_leaf_set(tree2)\n\n cladeAisT1 = leaves1 == cladeA\n cladeBisT2 = leaves2 == cladeB\n\n # Handle adding all of tree1 into tree 2 and vice versa!!\n if cladeAisT1 and cladeBisT2:\n # Done\n print(\"Nodes are tree1 and tree2...\")\n if test:\n return [None, None]\n root = dendropy.Node()\n root.add_child(nodeAinT1)\n root.add_child(nodeBinT2)\n tree1 = dendropy.Tree(seed_node=root)\n tree1.is_rooted = True\n tree2 = None\n elif cladeAisT1:\n # Add all of tree 1 into tree 2\n print(\"Add all of tree 1 into tree 2\")\n if test:\n return [None, None]\n [tree2, nodeBinT2] = extract_nodes_from_split(tree2, nodeBinT2,\n cladeB)\n root = dendropy.Node()\n root.add_child(nodeAinT1)\n root.add_child(tree2.seed_node)\n tree1 = dendropy.Tree(seed_node=root)\n tree1.is_rooted = True\n tree2 = None\n elif cladeBisT2:\n # Add all of tree 2 into tree 1\n print(\"Add all of tree 2 into tree 1\")\n if test:\n return [None, None]\n [tree1, nodeAinT1] = extract_nodes_from_split(tree1, nodeAinT1,\n cladeA)\n root = dendropy.Node()\n root.add_child(tree1.seed_node)\n root.add_child(nodeBinT2)\n tree1 = dendropy.Tree(seed_node=root)\n tree1.is_rooted = True\n tree2 = None\n else:\n # Make the join!\n print(\"Making join...\")\n [tree1, nodeAinT1] = extract_nodes_from_split(tree1, nodeAinT1,\n cladeA)\n [tree2, nodeBinT2] = extract_nodes_from_split(tree2, nodeBinT2,\n cladeB)\n\n root1 = dendropy.Node()\n root1.add_child(tree1.seed_node)\n root1.add_child(deepcopy(nodeBinT2)) # TODO: Remove deep copies!\n tree1 = dendropy.Tree(seed_node=root1)\n tree1.is_rooted = True\n\n root2 = dendropy.Node()\n root2.add_child(tree2.seed_node)\n root2.add_child(deepcopy(nodeAinT1)) # TODO: Remove deep copies!\n tree2 = dendropy.Tree(seed_node=root2)\n tree2.is_rooted = True\n\n return [tree1, tree2]", "def test_set_node_second_level_component_without_first_level_parent(self):\n payload = {\n 'data': [\n {\n 'type': 'nodes',\n 'id': self.second_level_component._id\n },\n ]\n }\n res = self.app.post_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)\n self.view_only_link.reload()\n assert_equal(res.status_code, 201)\n assert_equal(len(res.json['data']), 2)\n assert_in(self.public_project, self.view_only_link.nodes.all())\n assert_in(self.second_level_component, self.view_only_link.nodes.all())", "def _swap(self, node1, node2):\n arr = self._array\n arr[node1._index], arr[node2._index] = arr[node2._index], \\\n arr[node1._index]\n # Swap indices stored in nodes as well\n node1._index, node2._index = node2._index, node1._index", "def make_node_dict(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n self.node_dict1 = {}\n for node in self.input1['knowledge_graph']['nodes']:\n self.node_dict1[node['id']] = node\n self.node_dict2 = {}\n for node in self.input2['knowledge_graph']['nodes']:\n self.node_dict2[node['id']] = node", "def other(self, node):\n if node == self.__node_a:\n return self.__node_b\n elif node == self.__node_b:\n return self.__node_a", "def merge(self, other_btree):\n pass", "def union(self, node1, node2):\n\n root1 = self.root(node1)\n root2 = self.root(node2)\n\n if root1 == root2:\n return\n\n if node1 < node2:\n self.set[root2] = root1\n self.root(node2)\n else:\n self.set[root1] = root2\n self.root(node1)", "def update_nodes(nodes, sc, organization, org_id, site_names):\n for node in nodes:\n print(\"=\" * 75)\n print(\"Node:\", node[\"id\"], node[\"serial\"], node[\"model\"])\n print(\"org:\", node[\"org\"], organization)\n print(\"site:\", node[\"site\"])\n print(\"location:\", node[\"location\"])\n\n site_id = node[\"site\"]\n site_name = site_names[site_id]\n print(\"\\nSetting location to '{}'\".format(site_name))\n node[\"location\"] = site_name\n result = sc.put(\"node/\" + node[\"id\"], data=node)\n print(\"updated location:\", result[\"location\"])\n print(\"Response:\", sc.response.status_code, sc.response.reason, \"\\n\")\n print()", "def join_nodes_in_one_tree(tree1, nodeAinT1, cladeA, tree2, nodeBinT2,\n cladeB):\n [tree1, nodeAinT1] = extract_nodes_from_split(tree1, nodeAinT1, cladeA)\n [tree2, nodeBinT2] = extract_nodes_from_split(tree2, nodeBinT2, cladeB)\n\n root = dendropy.Node()\n root.add_child(deepcopy(nodeAinT1)) # TODO: Remove deep copies!\n root.add_child(tree2.seed_node)\n tree2 = dendropy.Tree(seed_node=root)\n tree2.is_rooted = True\n\n return [tree1, tree2]", "def update_one_node(node):\n conn = pbs.pbs_connect(node.server.name.encode('iso-8859-1', 'replace'))\n if conn==-1:\n logging.error(\"Cannot connect to %s - live data will be missing\" % server.name)\n return\n statnodes = pbs.pbs_statnode(conn, node.name.encode('iso-8859-1', 'replace') , [], \"\")\n pbs.pbs_disconnect(conn)\n\n if len(statnodes)==0:\n logging.error(\"pbs_statnode failed for node: %s\" % node.name)\n return\n if len(statnodes)>1:\n logging.warning(\"pbs_statnode returned more than one records for node: %s\" % node.name)\n\n attr_dict = dict([ (x.name,x.value) for x in statnodes[0].attribs])\n update_one_node_from_pbs_data(node, attr_dict)\n node.save()", "def union(node1, node2):\n node1_root = find(node1)\n node2_root = find(node2)\n if node1_root == node2_root:\n return\n if node1_root.rank < node2_root.rank:\n node1_root.parent = node2_root\n elif node2_root.rank > node2_root.rank:\n node2_root.parent = node1_root\n else:\n node2_root.parent = node1_root\n node1_root.rank = node1_root.rank + 1", "def update(self, edges) -> None:\n for v1, v2 in edges:\n self.add(v1, v2)", "def recoverTree(self, root: TreeNode) -> None:\n self.inorder(root)\n self.first.val, self.second.val = self.second.val, self.first.val", "def modify_d2(d1, d2):\n val_list = [i for i in d2.keys()]\n \n for key in val_list:\n for i in range(len(d2[key])):\n try:\n val = d1[d2[key][i][2]]\n d2[key][i][2] = val\n if None in d2[key][i]:\n d2[key][i].remove(None)\n except:\n pass\n return d2", "def set_both_connections(self, new_node):\n distance_to_new = self.current_node.distance_between(new_node.location)\n self.current_node.set_adjacent_from_direction(distance_to_new, new_node)\n reverse_distance = new_node.distance_between(self.current_node.location)\n new_node.set_adjacent_from_direction(reverse_distance, self.current_node)", "def recoverTree(self, root: Optional[TreeNode]) -> None:\n self.inorder(root)\n self.first.val,self.second.val=self.second.val,self.first.val", "def union(self, node1, node2):\n root1 = self.find(node1)\n root2 = self.find(node2)\n if root1 != root2: # only merge if the connected components differ\n if self.ranks[root1] > self.ranks[root2]:\n self.parents[root2] = root1\n else:\n self.parents[root1] = root2\n if self.ranks[root1] == self.ranks[root2]:\n self.ranks[root2] += 1", "def sub_graph_merging(self):", "def replace_values(dfg1, dfg2):\r\n for edge in dfg1:\r\n if edge in dfg2:\r\n dfg1[edge] = dfg2[edge]\r\n return dfg1", "def _merge_two(self, obj1, obj2):\r\n for uniq_ident in obj2.keys():\r\n if (uniq_ident not in obj1) \\\r\n or (obj1[uniq_ident]['modified'] \\\r\n < obj2[uniq_ident]['modified']):\r\n obj1[uniq_ident] = obj2[uniq_ident]\r\n\r\n return obj1 # self._dict_to_list(obj1)\r", "def _fields_sync(self, values):\n # 1. From UPSTREAM: sync from parent\n if values.get('parent_id') or values.get('type') == 'contact':\n # 1a. Commercial fields: sync if parent changed\n if values.get('parent_id'):\n self._commercial_sync_from_company()\n # 1b. Address fields: sync if parent or use_parent changed *and* both are now set\n if self.parent_id and self.type == 'contact' and self.is_company == False:\n onchange_vals = self.onchange_parent_id().get('value', {})\n self.update_address(onchange_vals)\n\n # 2. To DOWNSTREAM: sync children\n self._children_sync(values)", "def _add(self, node1, node2):\r\n\r\n self._graph[node1].add(node2)", "def _update_input_after_create_node(self):\n for node in self._normal_node_map.values():\n for src_node_id, input_attr in dict(node.inputs).items():\n node.delete_inputs(src_node_id)\n if not self._is_node_exist(node_id=src_node_id):\n message = f\"The input node could not be found by node id({src_node_id}) \" \\\n f\"while updating the input of the node({node})\"\n logger.warning(message)\n\n continue\n\n src_node = self._get_normal_node(node_id=src_node_id)\n input_attr['shape'] = src_node.output_shape\n input_attr['data_type'] = src_node.output_data_type\n node.add_inputs(src_name=src_node.name, input_attr=input_attr)", "def set_second_incident_node(self, second_incident_node):\n # overwrite the existing second incident node with the input second incident Node object\n self.second_incident_node = second_incident_node", "def update_to(self, new):\r\n if self.idhex != new.idhex:\r\n plog(\"ERROR\", \"Update of router \"+self.nickname+\"changes idhex!\")\r\n for i in new.__dict__.iterkeys():\r\n if i == \"refcount\" or i == \"_generated\": continue\r\n self.__dict__[i] = new.__dict__[i]", "def update(self, other):\n for (ngram, value) in other.items():\n self[ngram] = value", "def _mutate_node(self, node):\n self.idx += 1\n\n if self.idx != self.r:\n return\n\n # Exclude some things like signatures, etc.\n exclusions = ['signature', 'crc']\n for ex in exclusions:\n if ex in node._pfp__name.lower():\n return\n\n if type(node) == pfp.fields.Dom:\n return\n elif self._base_name(node) == 'Struct':\n # This is a container, interested in\n # its children nodes\n return\n elif self._base_name(node) == 'Array':\n print(\"%s is an Array of %s (%s)\" % (node._pfp__name,\n node.field_cls, node.width))\n # I can change the data at once:\n node.raw_data = \"cacaca\"\n\n # Or iterate through its elements:\n # for e in node:\n # e._pfp__set_value(e._pfp__value + 1)\n else:\n # CORE TYPE\n # This is supposed to cast\n print('CORE TYPE?')\n node._pfp__set_value(1337)", "def _traverse_1_0_1(item, nodes):\n if 'content' in item.keys():\n ids = []\n for node in item['content']:\n nodes[node['id']] = node\n ids.append(node['id'])\n _traverse_1_0_1(node, nodes)\n item['content'] = ids", "def update(self):\n diff = self._diff()\n if not diff:\n # Nothing to do!\n return\n self.parent.update_node(self, diff)", "def merge(self, other):\n\n for child in other.children:\n self.add_deep_copy_of(child, merged=True)", "def swap(self, subtree_a, subtree_b):\n\n temp1 = subtree_a.parent\n temp2 = subtree_b.parent\n\n temp1.children[temp1.children.index(subtree_a)] = subtree_b\n temp2.children[temp2.children.index(subtree_b)] = subtree_a\n \n subtree_a.parent = temp2\n subtree_b.parent = temp1\n\n self.propogate_subtree(subtree_a)\n self.propogate_subtree(subtree_b)", "def connect_one_way(node1, node2, weight):\n node1.add_or_update_neighbour(node2, weight)", "def connect_both(node1, node2, weight):\n connect_one_way(node1, node2, weight)\n connect_one_way(node2, node1, weight)", "def test_updated_nodes1():\n road = query_row(db_conf, 'osm_roads', 60000)\n assert_almost_equal(road['geometry'].length, 14035.61150207768)", "def relations_from(self, start_node):", "def prepare_node_attrs(self):", "def describe_update(node_before: Node, node_after: Node) -> UpdateDescription:\n\n before_des = node_before.to_text()\n after_des = node_after.to_text()\n\n if node_before.algorithm != node_after.algorithm:\n if node_before.operation != node_after.operation:\n diff = \"This step in the first query performs \" + node_before.algorithm.lower() + \\\n \", but the second one performs \" + node_after.algorithm.lower() + \".\"\n else:\n diff = \"This step in both queries perform \" + node_before.operation.lower() + \\\n \". However, in the first query \"+ node_before.algorithm.lower() + \" is performed\" + \\\n ', and in the second query ' + node_after.algorithm.lower() + \" is performed\" + \".\"\n else:\n diff = \"This step in both queries perform \" + node_before.algorithm.lower() + \", but \"\n differences = [] # differences between node1 and node2\n node1_special = [] # something node1 has but node2 doesn't have\n node2_special = [] # something node2 has but node1 doesn't have\n if not _is_output_name_same(node_before, node_after):\n differences.append(\"output name\")\n for attr in interested_attrs:\n if _exists_attr(attr, node_before, node_after) == 1:\n differences.append(attr)\n node1_special.append(attr)\n elif _exists_attr(attr, node_before, node_after) == 2:\n differences.append(attr)\n node2_special.append(attr)\n elif _exists_attr(attr, node_before, node_after) == 12 and \\\n node_before.attributes[attr] != node_after.attributes[attr]:\n differences.append(attr)\n differences = [d.lower() for d in differences]\n if len(differences) == 1:\n diff += differences[0] + \" is different. \"\n else:\n diff += \", \".join(differences[:-1])\n diff += \" and \" + differences[-1] + \" are different. \"\n\n node1_special_key = [s1.lower() for s1 in node1_special]\n node1_special_value = [\n node_before.attributes[s1]\n for s1 in node1_special\n ]\n node2_special_key = [s2.lower() for s2 in node2_special]\n node2_special_value = [\n node_after.attributes[s1]\n for s1 in node2_special\n ]\n node1_display = [\n \" \".join([k,v])\n for k, v in zip(node1_special_key, node1_special_value)\n ]\n node2_display = [\n \" \".join([k, v])\n for k, v in zip(node2_special_key, node2_special_value)\n ]\n if len(node1_display) == 1:\n diff += \"Query 1 has \" + node1_display[0] + \\\n \", but query 2 doesn't have \" + node1_special_key[0] + \". \"\n if len(node1_display) > 1:\n diff += \"Query 1 has \" + \", \".join(node1_display[:-1]) + \\\n \" and \" + node1_display[-1] +\", but query 2 doesn't have them. \"\n\n if len(node2_display) == 1:\n diff += \"Query 2 has \" + node2_display[0] + \\\n \", but query 1 doesn't have \" + node2_special_key[0] + \". \"\n if len(node2_display) > 1:\n diff += \"Query 2 has \" + \", \".join(node2_display[:-1]) + \\\n \" and \" + node2_display[-1] + \", but query 1 doesn't have them. \"\n\n return UpdateDescription(before_des, after_des, diff)", "def test_changes_being_propagated(self):\n self.create_master_and_slaves()\n self.node.title=\"Changed Title\"\n self.node.save(propagate=True)", "def test9_test_from_list_with_update():\n with open(\"./Output/should_be_yed.test9_test_from_list_with_update.graphml\", \"r\") as f:\n expected_output = f.read() \n yed_diagram = create_yed_diagram(node_duplicates=\"update\")\n data1 = [\n {\n \"source\": \"switch-1\",\n \"src_label\": \"GigabitEthernet4/6\",\n \"target\": {\n \"bottom_label\": \"\",\n \"id\": \"switch-2\",\n \"top_label\": \"10.13.1.7\"\n },\n \"trgt_label\": \"GigabitEthernet1/5\"\n },\n {\n \"source\": \"switch-1\",\n \"src_label\": \"GigabitEthernet1/1\",\n \"target\": {\n \"bottom_label\": \"\",\n \"id\": \"switch-3\",\n \"top_label\": \"10.17.14.1\"\n },\n \"trgt_label\": \"GigabitEthernet0/1\"\n },\n {\n \"source\": \"switch-1\",\n \"src_label\": \"GigabitEthernet1/2\",\n \"target\": {\n \"bottom_label\": \"\",\n \"id\": \"switch-4\",\n \"top_label\": \"10.17.14.2\"\n },\n \"trgt_label\": \"GigabitEthernet0/10\"\n }\n ]\n data2 = [\n {\n \"source\": \"switch-2\",\n \"src_label\": \"GigabitEthernet1/5\",\n \"target\": {\n \"bottom_label\": \"\",\n \"id\": \"switch-1\",\n \"top_label\": \"10.13.1.17\"\n },\n \"trgt_label\": \"GigabitEthernet4/6\"\n }\n ]\n yed_diagram.from_list(data1)\n yed_diagram.from_list(data2)\n ret = yed_diagram.dump_xml()\n assert normalize_xml(ret) == normalize_xml(expected_output)", "def _match_identical_nodes(self):\n\n for job_name_b in self._topo_b_nodes:\n for job_name_a in self._unresolved_a_nodes:\n if self._is_node_identical(job_name_a, job_name_b):\n self._identical_nodes[job_name_b] = job_name_a\n self._unresolved_a_nodes.remove(job_name_a)\n self._unresolved_b_nodes.remove(job_name_b)\n break", "def merge_two_personroot_nodes(left_personroot_node: Node, right_personroot_node: Node) -> None:\n global _graph\n\n if left_personroot_node is None or right_personroot_node is None:\n print('merge_two_personroot_nodes(): Error: (one of the) nodes is None.')\n return\n\n if left_personroot_node['name'] != 'person-root' \\\n or right_personroot_node['name'] != 'person-root':\n print('merge_two_personroot_nodes(): not anticipated: (one of the) nodes '\n + 'are not \"person-root\".')\n return\n\n if left_personroot_node == right_personroot_node:\n # They are already connected, we are done.\n return\n\n # There are two possible reasons why it can happen that two person-root nodes\n # of two nodes to insert are different:\n # (1) It can happen e.g. in case a personal ID (ISNI, ORCID, etc.) is assigned\n # to two or more different persons.\n # Of course, that should not happen. Most probably this in a typo in a source system.\n # (2) The two nodes refer to the same person, but originate from different source\n # systems.\n # E.g. harvest of system 1 results in ORCID and ISNI of the same person, which have a\n # common person-root. Harvest of system 2 results in EMAIL with another person-root.\n # Now a subsequent harvest results in ORCID and EMAIL of the same person. Then there\n # are two different person-roots which need to be merged.\n # Both can happen, but we cannot know if it is either (1) or (2).\n\n now = datetime.now()\n timestamp = now.strftime('%Y%m%d-%H%M%S')\n count = 0\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n what_happened += 'Merged person-root node \"'\n what_happened += right_personroot_node['_key'] + '\" to this person-root node '\n what_happened += 'and then deleted it. This was the history of the deleted node:'\n left_personroot_node['_history'].append(what_happened)\n for history in right_personroot_node['_history']:\n count += 1\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n what_happened += history\n left_personroot_node['_history'].append(what_happened)\n\n count += 1\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n what_happened += 'End of history of the deleted node.'\n left_personroot_node['_history'].append(what_happened)\n\n count += 1\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n what_happened += 'These were the neighbors of the deleted node, '\n what_happened += 'now merged with the neighbors of this node:'\n left_personroot_node['_history'].append(what_happened)\n\n count += 1\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n for edge_from_right_node in get_edges(right_personroot_node):\n right_node = edge_from_right_node.end_node\n if right_node is None:\n continue\n if right_node == right_personroot_node:\n continue\n\n what_happened += '\"' + str(right_node['_key']) + '\" '\n edge_delete1 = LINKS_TO(right_personroot_node, right_node)\n edge_delete2 = LINKS_TO(right_node, right_personroot_node)\n edge_create1 = LINKS_TO(left_personroot_node, right_node)\n edge_create2 = LINKS_TO(right_node, left_personroot_node)\n # _graph.delete() also deletes 'right_personroot_node'.\n # TODO: There seems to be a bug here. It does not only delete 'right_personroot_node', but sometimes it also\n # deletes other nodes which have more than one edge, such as an 'organization' node connected to multiple\n # person-root nodes (including right_personroot_node).\n # The problem is that _graph.separate() does not seem to work, which seems to be the 'best' function\n # since it only deletes edges. Use with caution (or don't use).\n _graph.delete(edge_delete1)\n _graph.delete(edge_delete2)\n _graph.merge(edge_create1 | edge_create2, 'RCGNode', '_key')\n\n what_happened += '.'\n left_personroot_node['_history'].append(what_happened)\n\n count += 1\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n what_happened += 'End of list of neighbors of the deleted node.'\n left_personroot_node['_history'].append(what_happened)\n _graph.push(left_personroot_node)\n return", "def update_nodes(self):\n raise NotImplementedError('ERROR: sweeper has to implement update_nodes(self)')", "def merge(self, g1, g2):\n logger = logging.getLogger(__name__)\n \n \n g = BaseGraph()\n g.copy_graph_from(g1)\n\n plwn2sumo_dict = defaultdict(set)\n plwn2sumo_dict = self.get_plwn2sumo_dict()\n\n synset_on_vertex_dict = {}\n for node in g.all_nodes():\n synset_id = node.synset.synset_id\n if synset_id in synset_on_vertex_dict:\n logger.warning(\"ID of some synset is not unique.\")\n continue\n synset_on_vertex_dict[synset_id] = node\n\n num_of_edge = 0\n for edge in g2.all_edges():\n num_of_edge += 1\n logger.info(\"%d/%d\", num_of_edge, g2.num_edges())\n\n parent_sumo_concept = edge.source().sumo\n child_sumo_concept = edge.target().sumo\n\n if parent_sumo_concept not in plwn2sumo_dict:\n logger.warning(\"The mapping file doesn't contain sumo concept '%s'.\", parent_sumo_concept)\n continue\n if child_sumo_concept not in plwn2sumo_dict:\n logger.warning(\"The mapping file doesn't contain sumo concept '%s'.\", child_sumo_concept)\n continue\n\n for parent_syn_id in plwn2sumo_dict[parent_sumo_concept]:\n if parent_syn_id not in synset_on_vertex_dict:\n logger.warning(\"The mapping file contains synset '%d' that is not in the graph.\", parent_syn_id)\n continue\n p_node = synset_on_vertex_dict[parent_syn_id]\n for child_syn_id in plwn2sumo_dict[child_sumo_concept]:\n if child_syn_id not in synset_on_vertex_dict:\n logger.warning(\"The mapping file contains synset '%d' that is not in the graph.\", child_syn_id)\n continue\n ch_node = synset_on_vertex_dict[child_syn_id]\n \n g.add_edge(p_node,\n ch_node,\n [(\"rel\", edge.rel)],\n simply=True)\n \n\n return g", "def test_grouping(self):\n n1 = self.create(NodeItem, UML.Node)\n n2 = self.create(NodeItem, UML.Node)\n\n self.group(n1, n2)\n\n assert n2.subject in n1.subject.nestedNode\n assert n1.subject not in n2.subject.nestedNode", "def update_node(self, node):\n return node.update()", "def relations_to(self, end_node):", "def merge_node(**kwargs):\n\n if kwargs['TypeName'] is None:\n kwargs['TypeName'] = 'None'\n\n kwargs['Hash'] = get_node_hash(kwargs)\n\n if '(*)' in kwargs['TypeDefinition']:\n # The type contains a pointer to a function prototype\n # args['TypeDefinition'] = HRESULT (*)(IRpcChannelBuffer *, RPCOLEMESSAGE *, ULONG *)\n # args['TypeName'] = SendReceive\n index = kwargs['TypeDefinition'].find('(*)')\n TypeDefinition = kwargs['TypeDefinition'][:index + 2] + kwargs['TypeName'] \\\n + ')' + kwargs['TypeDefinition'][index + 3:]\n kwargs['TypeName'] = ';'\n kwargs['TypeDefinition'] = TypeDefinition\n\n if not nodes_cache.get(kwargs['Hash']):\n nodes_cache.update({kwargs['Hash']: (kwargs['TypeDefinition'], kwargs['TypeName'], kwargs['NodeLabel'])})\n if kwargs['StartNodeHash'] != kwargs['Hash']:\n relationships_cache.add(kwargs['StartNodeHash'] + \" \" + kwargs['Hash'] + \" \" + kwargs['RelationshipType'])\n\n return kwargs['Hash']", "def test_node_pointing_properties():\n a_left = Node(7, data='pl left')\n a_right = Node(42, data='pl right')\n a = Node(13, data='pl a', left=a_left, right=a_right)\n assert a.left.val == 7\n assert a.right.val == 42", "def testMergeNoEdges():\n\n n1 = DummyNode(x=1, y=2, z=4)\n n2 = DummyNode(x=1, y=2, z=3)\n\n assert n1.z == 4\n\n n1.merge_with(n2)\n\n assert n1.z == 3", "def merge_personroots_of_two_nodes(name1: str, value1: str,\n name2: str, value2: str) -> None:\n left_node = read_node(name=name1, value=value1)\n right_node = read_node(name=name2, value=value2)\n if left_node is None or right_node is None:\n return\n\n left_personroot_node = get_or_create_personroot_node(person_node=left_node)\n right_personroot_node = get_or_create_personroot_node(person_node=right_node)\n if left_personroot_node is None or right_personroot_node is None:\n return\n\n if left_personroot_node == right_personroot_node:\n # They are already connected, we are done.\n return\n\n merge_two_personroot_nodes(left_personroot_node=left_personroot_node,\n right_personroot_node=right_personroot_node)\n return", "def add(element1, element2):\n \n newtag = Tag.addTags(element1.tag, element2.tag)\n \n #have to wrap the attributes in dict() to avoid a bus error\n newattribs = Attrib.addAttribs(dict(element1.attrib), dict(element2.attrib))\n \n element1.tag = newtag\n element1.text = Text.addText(element1.text, element2.text)\n element1.tail = Text.addText(element1.tail, element2.tail)\n \n for i in element1.attrib:\n del element1.attrib[i]\n for key in newattribs.keys():\n try:\n element1.set(key, newattribs[key])\n except TypeError:\n log = logging.getLogger()\n log.error('TypeError: %s' % str(sys.exc_info()[1]))\n log.error('key = %s\\tnewattribs[key] = %s' % (str(key), str(newattribs[key])))\n raise\n \n return element1", "def swap_nodes(self, a, b):\n if a == b:\n return\n if len(self) < 2:\n return\n\n nodeA = nodeB = None\n curr_node = self._header\n\n while curr_node is not None and not (nodeA and nodeB):\n if curr_node._element == a and not nodeA:\n nodeA = curr_node\n elif curr_node._element == b and not nodeB:\n nodeB = curr_node\n curr_node = curr_node._next\n\n if curr_node is None:\n raise Empty(\"Not in list\")\n\n precessorA = nodeA._prev\n successorA = nodeA._next\n precessorB = nodeB._prev\n successorB = nodeB._next\n\n precessorA._next = successorA._prev = nodeB\n precessorB._next = successorB._prev = nodeA\n\n nodeA._prev, nodeB._prev = nodeB._prev, nodeA._prev\n nodeA._next, nodeB._next = nodeB._next, nodeA._next", "def add_node(old_node_dict, old_to_new_node_ids_dict, new_accession, new_db_api, aliases):\n\n # getting the old node id, and the old node's properties\n old_node_id = old_node_dict[\"id\"]\n old_node_alt_accession = old_node_dict[\"alt_accession\"]\n old_node_name = old_node_dict[\"name\"]\n tax_id = old_node_dict[\"tax_id\"]\n pathways = old_node_dict[\"pathways\"]\n\n if aliases:\n aliases += \"|\" + old_node_dict[\"name\"]\n else:\n aliases = old_node_dict[\"name\"]\n\n if old_node_dict[\"aliases\"]:\n aliases += \"|\" + old_node_dict[\"aliases\"]\n\n new_node_dict = {\n \"name\" : new_accession,\n \"alt_accession\" : old_node_alt_accession,\n \"tax_id\" : tax_id,\n \"pathways\" : pathways,\n \"aliases\" : aliases,\n \"topology\": \"\"\n }\n\n # inserting the node to the PSI-MI SQLite\n new_db_api.insert_unique_node(new_node_dict)\n new_node_dict['id'] = new_db_api.last_row_id\n # getting the new last row id of the inserted node\n new_node_id = new_node_dict['id']\n\n # if the node maps to more than one swissprot uniprot id it will be inserted for every swissprot id and\n # this function will be called for every insertion\n if not old_to_new_node_ids_dict.has_key(old_node_id):\n old_to_new_node_ids_dict[old_node_id] = [new_node_id]\n else:\n old_to_new_node_ids_dict[old_node_id].append(new_node_id)", "def addGeounitNodes(node1, node2):\n \n from operator import add\n \n argsDict = {} \n argsDict[\"raw\"] = node1.raw + node2.raw\n argsDict[\"raw_housing\"] = node1.raw_housing + node2.raw_housing\n if node1.syn and node2.syn:\n argsDict[\"syn\"] = node1.syn + node2.syn\n if node1.cons and node2.cons:\n argsDict[\"cons\"] = addConstraints(node1.cons,node2.cons)\n else:\n argsDict[\"cons\"] = {}\n if node1.invar and node2.invar:\n argsDict[\"invar\"] = addInvariants(node1.invar,node2.invar)\n else:\n argsDict[\"invar\"] = {}\n argsDict[\"geocodeDict\"] = node1.geocodeDict\n \n aggregatedNode = nodes.geounitNode(node1.geocode, **argsDict)\n \n return aggregatedNode", "def headsofunion(h1, h2):\n res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)\n return {ctx.node() for ctx in res}", "def _replace(self, x, y):\n y.parent = x.parent\n if x is self.root:\n self.root = y\n return\n elif x is x.parent.left:\n x.parent.left = y\n else:\n x.parent.right = y\n\n self.update(y, -1)", "def connect_nodes(self):\n for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):\n if src_id != trg_id:\n app.edges[src_id].add(trg_id)\n self.mark_as_unsaved()\n self.update()", "def test_check_duplication_entry_at_restoring_two_chain(self):\n ref_entity = Entity.objects.create(name=\"ReferredEntity\", created_user=self._user)\n ref_entries = [\n Entry.objects.create(name=\"ref-%d\" % i, created_user=self._user, schema=ref_entity)\n for i in range(3)\n ]\n ref_entity_2 = Entity.objects.create(name=\"ReferredEntity2\", created_user=self._user)\n ref_entries_2 = [\n Entry.objects.create(name=\"ref2-%d\" % i, created_user=self._user, schema=ref_entity_2)\n for i in range(3)\n ]\n\n # initialize EntityAttrs\n attr_info = {\n \"obj\": {\"type\": AttrTypeValue[\"object\"], \"value\": ref_entries[0]},\n \"arr_obj\": {\n \"type\": AttrTypeValue[\"array_object\"],\n \"value\": ref_entries[1:],\n },\n }\n attr_info_2 = {\n \"obj\": {\"type\": AttrTypeValue[\"object\"], \"value\": ref_entries_2[0]},\n \"arr_obj\": {\n \"type\": AttrTypeValue[\"array_object\"],\n \"value\": ref_entries_2[1:],\n },\n }\n for attr_name, info in attr_info.items():\n # create EntityAttr object with is_delete_in_chain object\n attr = EntityAttr.objects.create(\n name=attr_name,\n type=info[\"type\"],\n is_delete_in_chain=True,\n created_user=self._user,\n parent_entity=self._entity,\n )\n\n if info[\"type\"] & AttrTypeValue[\"object\"]:\n attr.referral.add(ref_entity)\n\n self._entity.attrs.add(attr)\n\n for attr_name, info in attr_info_2.items():\n # create EntityAttr object with is_delete_in_chain object\n attr = EntityAttr.objects.create(\n name=attr_name,\n type=info[\"type\"],\n is_delete_in_chain=True,\n created_user=self._user,\n parent_entity=ref_entity,\n )\n\n if info[\"type\"] & AttrTypeValue[\"object\"]:\n attr.referral.add(ref_entity_2)\n\n ref_entity.attrs.add(attr)\n\n # initialize target entry\n entry = Entry.objects.create(name=\"entry\", schema=self._entity, created_user=self._user)\n entry.complement_attrs(self._user)\n\n for attr_name, info in attr_info.items():\n attr = entry.attrs.get(schema__name=attr_name)\n attr.add_value(self._user, info[\"value\"])\n\n ref_entries[0].complement_attrs(self._user)\n for attr_name, info in attr_info_2.items():\n attr = ref_entries[0].attrs.get(schema__name=attr_name)\n attr.add_value(self._user, info[\"value\"])\n\n # delete target entry at first\n entry.delete()\n # sync referral entries from database\n [x.refresh_from_db() for x in ref_entries]\n [x.refresh_from_db() for x in ref_entries_2]\n\n self.assertFalse(ref_entries_2[1].is_active)\n\n # create same name entry\n Entry.objects.create(name=\"ref2-1\", created_user=self._user, schema=ref_entity_2)\n\n # check duplicate entry\n ret = entry.check_duplication_entry_at_restoring(entry_chain=[])\n self.assertTrue(ret)", "def union(self, a, b):\n if (a in self.node_id) and (b in self.node_id) and (self.node_id[a] != self.node_id[b]):\n self.merge(a, b)\n elif (a in self.node_id) or (b in self.node_id):\n self.add(a,b)\n else:\n self.create_new_group(a,b)", "def overwrite_field(self,cells=None,edges=None,source='depth_max',target='depth_mean'):\n if cells is not None:\n self.cells[target][cells]=self.cells[source][cells]\n if edges is not None:\n self.edges[target][edges]=self.edges[source][edges]", "def _attach(self, p, t1, t2):\n node = self._validate(p)\n if not self.is_leaf(p):\n raise ValueError('position must be leaf')\n if not type(self) is type(t1) is type(t2): # all 3 trees must be same type\n raise TypeError('Tree types must match')\n self._size += len(t1) + len(t2)\n if not t1.is_empty(): # attched t1 as left subtree of node\n t1._root._parent = node\n node._left = t1._root\n t1._root = None # set t1 instance to empty\n t1._size = 0\n if not t2.is_empty(): # attched t2 as right subtree of node\n t2._root._parent = node\n node._right = t2._root\n t2._root = None # set t2 instance to empty\n t2._size = 0", "def updateNode(self,updates=None):\n\n logging.info(\"editing this node\")\n if updates is not None:\n for key in updates:\n setattr(self,key,updates[key])\n memcache.set(self.cid,self)" ]
[ "0.65650755", "0.63940614", "0.6173685", "0.61118114", "0.6035203", "0.6019497", "0.59971595", "0.5948517", "0.5918453", "0.5903322", "0.5868278", "0.57634944", "0.57632744", "0.57355", "0.57273066", "0.57142293", "0.56911355", "0.56886685", "0.56635535", "0.5628927", "0.56055725", "0.55853903", "0.55739737", "0.55636734", "0.5556606", "0.5548197", "0.55423623", "0.5519405", "0.55168647", "0.55155087", "0.5482634", "0.54791814", "0.547195", "0.5443041", "0.5425605", "0.54251593", "0.5418061", "0.540684", "0.5403774", "0.53644174", "0.53557324", "0.53509086", "0.53505987", "0.53446114", "0.5339875", "0.5331421", "0.5322575", "0.53085", "0.5306427", "0.53024936", "0.5295214", "0.52914953", "0.52869385", "0.5281978", "0.52814096", "0.52759814", "0.5272431", "0.52703404", "0.52684414", "0.52519566", "0.52458656", "0.5240623", "0.522696", "0.52195746", "0.5213547", "0.5209253", "0.5207037", "0.51966023", "0.51923114", "0.51915646", "0.5186293", "0.517495", "0.5174144", "0.5171674", "0.5164192", "0.51600206", "0.51542455", "0.5153265", "0.5152972", "0.5150228", "0.514518", "0.5136156", "0.51355374", "0.51317513", "0.5131202", "0.5126398", "0.51234215", "0.5121291", "0.51211286", "0.51169485", "0.5116199", "0.5110361", "0.5108347", "0.51020736", "0.50958616", "0.50944823", "0.5091344", "0.5088935", "0.50861156", "0.5081634", "0.5076421" ]
0.0
-1
First node's fields should be updated with the second nodes
def testMergeNoEdges(): n1 = DummyNode(x=1, y=2, z=4) n2 = DummyNode(x=1, y=2, z=3) assert n1.z == 4 n1.merge_with(n2) assert n1.z == 3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mergeNodes(new, t1, t2):\n \n if t1 and t2:\n new.val = t1.val + t2.val\n elif not t1:\n new.val = t2.val\n elif not t2:\n new.val = t1.val", "def update(self, other):\n self._start = other._start\n self._end = other._end\n self._nodes = {k: v.copy() for k,v in other._nodes.iteritems()}\n self._edges = {k: set(v) for k,v in other._edges.iteritems()}\n self._names = set(other._names)\n self.current = other.current", "def update(self, other):\n b = self.hallucinate_merge(other)\n self.l_child = b.l_child\n self.r_child = b.r_child", "def merge_nodes(self, parent, child):\n parent.key += child.key\n parent.real = child.real\n parent.value = child.value\n parent.children = child.children", "def assertNodesEqual(self, first, second):\n def get_attrs(l):\n result = []\n for n in l:\n result.append((n.service, n.address, n.version, n.properties))\n return result\n self.assertEqual(get_attrs(first), get_attrs(second))", "def __swap_kv(self, node1, node2):\r\n node1.key, node2.key = node2.key, node1.key\r\n node1.value, node2.value = node2.value, node1.value", "def _redirect(self, node1, node2):\n if node1.parent.right is node1:\n node1.parent.right = node2\n else:\n node1.parent.left = node2", "def assertNodesEqual(self, a, b):\n self.assertEqual((a.version, a.address, a.service, a.properties),\n (b.version, b.address, b.service, b.properties))", "def test_update_node_second_level_component_with_first_level_parent(self):\n payload = {\n 'data': [{\n 'type': 'nodes',\n 'id': self.public_project._id\n }, {\n 'type': 'nodes',\n 'id': self.first_level_component._id\n }, {\n 'type': 'nodes',\n 'id': self.second_level_component._id\n }]\n }\n res = self.app.put_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)\n self.view_only_link.reload()\n assert_equal(res.status_code, 200)\n assert_equal(len(res.json['data']), 3)\n assert_in(self.public_project, self.view_only_link.nodes.all())\n assert_in(self.first_level_component, self.view_only_link.nodes.all())\n assert_in(self.second_level_component, self.view_only_link.nodes.all())", "def merge_sidewalks(sidewalk_network1, sidewalk_network2):\n\n for node in sidewalk_network1.nodes.get_list():\n node.confirmed = True\n\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n in_other = False\n same_node = None\n for other_sidewalk_node in sidewalk_network1.nodes.get_list():\n if sidewalk_node.location() == other_sidewalk_node.location():\n in_other = True\n same_node = other_sidewalk_node\n if not in_other: # If street network 2 contains the node but street network 1 does not\n sidewalk_network1.add_node(sidewalk_node) # Add node from street network 2 to street network 1\n else: # If both networks contain the node\n sidewalk_network2.nodes.update(sidewalk_node.id, same_node)\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n\n network1_dict = {}\n for sidewalk_node in sidewalk_network1.nodes.get_list():\n network1_dict[sidewalk_node.location] = sidewalk_node\n\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n if sidewalk_node.location not in network1_dict:\n sidewalk_network1.add_node(sidewalk_node)\n else:\n sidewalk_network2.nodes.update(sidewalk_node.id, network1_dict[sidewalk_node.location])\n\n # add new ways from sidewalk_network2 to sidewalk_network1\n for way in sidewalk_network2.ways.get_list():\n # ensure all ways have correct nids, if incorrect update to correct nid from network1\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid) is None:\n way.swap_nodes(nid, sidewalk_network2.nodes.get(nid).id)\n\n has_confirmed_parents = False\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid).confirmed:\n has_confirmed_parents = True\n if not has_confirmed_parents:\n sidewalk_network1.add_way(way)\n\n return sidewalk_network1", "def test_set_node_second_level_component_with_first_level_parent(self):\n payload = {\n 'data': [\n {\n 'type': 'nodes',\n 'id': self.first_level_component._id\n },\n {\n 'type': 'nodes',\n 'id': self.second_level_component._id\n }\n ]\n }\n res = self.app.post_json_api(self.url, payload, auth=self.user.auth)\n self.view_only_link.reload()\n assert_equal(res.status_code, 201)\n assert_in(self.first_level_component, self.view_only_link.nodes.all())\n assert_in(self.second_level_component, self.view_only_link.nodes.all())", "def new_data(first: dict, second: dict, changeables: tuple):\n for name, field in first.items():\n if name not in changeables:\n second[name] = field", "def merge(self):\n leftSon = self.nodes.getNode(0)\n rightSon = self.nodes.getNode(1)\n self.nodes.removeNode(0)\n self.nodes.removeNode(0)\n self.nodes.addNode(Node(leftSon, rightSon))", "def merge(self, a, b):\n old_id, target_id = sorted((self.node_id[a], self.node_id[b]), key = lambda id: len(self.groups[id]))\n for node in self.groups[old_id]:\n self.node_id[node] = target_id\n self.groups[target_id] |= self.groups[old_id]\n del self.groups[old_id]", "def test_update_node_second_level_component_without_first_level_parent(self):\n payload = {\n 'data': [{\n 'type': 'nodes',\n 'id': self.public_project._id\n }, {\n 'type': 'nodes',\n 'id': self.second_level_component._id\n }]\n }\n res = self.app.put_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)\n self.view_only_link.reload()\n assert_equal(res.status_code, 200)\n assert_equal(len(res.json['data']), 2)\n assert_in(self.public_project, self.view_only_link.nodes.all())\n assert_in(self.second_level_component, self.view_only_link.nodes.all())", "def merge_graph(self, other):\n self.add_nodes( (nLabel,nInfo) for nLabel,nInfo in other.nodes() )\n \n for nLabel,nInfo in other.nodes():\n for edgeLabel,edgeInfo in other.edgesFrom(nLabel):\n self.add_edge(edgeLabel,edgeInfo)", "def relate(self, other):\n ...", "def node_diff(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n if self.node_dict1 is None or self.node_dict2 is None:\n self.make_node_dict()\n # Initialize dictonaries to keep track of the nodes in respnse 1 and response 2\n g1={}\n g2={}\n # Set to keep track of the union of all curie ids\n curie_set = set()\n for curie in self.node_dict1.keys():\n g1[curie] = {}\n # intersection is only in the g1 dictionary\n g1[curie]['intersection'] = set()\n # node section keeps track of node ids associated with each node i.e. \"n0\"\n g1[curie]['node'] = set()\n curie_set.add(curie)\n for curie in self.node_dict2.keys():\n g2[curie] = {}\n # node section keeps track of node ids associated with each node i.e. \"n0\"\n g2[curie]['node'] = set()\n curie_set.add(curie)\n node_names1 = []\n node_names2 = []\n\n # extract all node ids (i.e. \"n0\",\"n1\",ect...)\n if len(self.input1['question_graph']['nodes'])>0:\n if 'id' in self.input1['question_graph']['nodes'][0]:\n node_names1 = [x['id'] for x in self.input1['question_graph']['nodes']]\n elif 'node_id' in self.input1['question_graph']['nodes'][0]:\n node_names1 = [x['node_id'] for x in self.input1['question_graph']['nodes']]\n if len(self.input2['question_graph']['nodes'])>0:\n if 'id' in self.input2['question_graph']['nodes'][0]:\n node_names2 = [x['id'] for x in self.input2['question_graph']['nodes']]\n elif 'node_id' in self.input2['question_graph']['nodes'][0]:\n node_names2 = [x['node_id'] for x in self.input2['question_graph']['nodes']]\n \n # initialize the result dictonary\n diff_dict = {}\n diff_dict[\"-1|-1\"] = {'intersection':[],'g1-g2':[],'g2-g1':[]}\n # initialize node id tuple keys\n for id1 in node_names1:\n for id2 in node_names2:\n diff_dict[id1+\"|\"+id2] = {'intersection':[],'g1-g2':[],'g2-g1':[]}\n # iterate through answers\n for answer1 in self.input1['answers']:\n for answer2 in self.input2['answers']:\n for id1 in answer1['node_bindings'].keys():\n # This is to handle cases where answer node id has a list or a string\n if isinstance(answer1['node_bindings'][id1], str):\n bindings1 = [answer1['node_bindings'][id1]]\n elif isinstance(answer1['node_bindings'][id1], list):\n bindings1 = answer1['node_bindings'][id1]\n for curie1 in bindings1:\n # store node id\n g1[curie1]['node'].add(id1)\n for id2 in answer2['node_bindings'].keys():\n # This is to handle cases where answer node id has a list or a string\n if isinstance(answer2['node_bindings'][id2], str):\n bindings2 = [answer2['node_bindings'][id2]]\n elif isinstance(answer2['node_bindings'][id2], list):\n bindings2 = answer2['node_bindings'][id2]\n for curie2 in bindings2:\n # store node id\n g2[curie2]['node'].add(id2)\n if curie1 == curie2:\n # stor intersection tuple\n g1[curie1]['intersection'].add(id1+\"|\"+id2)\n # iterate through all curies\n for curie in curie_set:\n # check if curie is from answer 1\n if curie in g1.keys():\n # check if in intersection\n if len(g1[curie]['intersection'])>0:\n diff_dict[\"-1|-1\"]['intersection'] += [self.node_dict1[curie]]\n for id1 in node_names1:\n for id2 in node_names2:\n node_tuple = id1+\"|\"+id2\n if id1 in g1[curie]['node'] and id2 in g2[curie]['node']:\n diff_dict[node_tuple]['intersection'] += [self.node_dict1[curie]]\n elif id1 in g1[curie]['node']:\n diff_dict[node_tuple]['g1-g2'] += [self.node_dict1[curie]]\n elif id2 in g2[curie]['node']:\n diff_dict[node_tuple]['g2-g1'] += [self.node_dict1[curie]]\n # If not in intersection store in g1-g2\n else:\n diff_dict[\"-1|-1\"]['g1-g2'] += [self.node_dict1[curie]]\n for id1 in g1[curie]['node']:\n # iterate through all answer 2 ids\n for id2 in node_names2:\n diff_dict[id1+\"|\"+id2]['g1-g2'] += [self.node_dict1[curie]]\n # if not in g1 but in g2 then in g2-g1\n elif curie in g2.keys():\n diff_dict[\"-1|-1\"]['g2-g1'] += [self.node_dict2[curie]]\n for id2 in g2[curie]['node']:\n # iterate through all answer 1 ids\n for id1 in node_names1:\n diff_dict[id1+\"|\"+id2]['g2-g1'] += [self.node_dict2[curie]]\n return diff_dict", "def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)", "def test_create_get_delete_update_node(self):\n node_dict_1 = {\n 'host_name': 'abc',\n 'local_router_id': '1.1.1.1',\n 'as_num': 100,\n 'bgpls_id': '0.0.0.0',\n 'igp_id': '0.0.0.0'\n }\n node_dict_2 = {\n 'host_name': 'def',\n 'local_router_id': '2.2.2.2',\n 'as_num': 100,\n 'bgpls_id': '0.0.0.0',\n 'igp_id': '0.0.0.0'\n }\n\n # create two objects\n node1 = Node(**node_dict_1)\n node2 = Node(**node_dict_2)\n Node.create_object(self.database, node1.__dict__)\n Node.create_object(self.database, node2.__dict__)\n self.assertEqual(2, Node.count(self.database))\n\n # get one object\n node1 = Node.get_object(self.database, host_name='abc')\n self.assertEqual(node_dict_1['local_router_id'], node1.get('local_router_id'))\n\n # get objects\n nodes = Node.get_objects(self.database, as_num=100)\n self.assertEqual(2, len(nodes))\n\n # update one object\n self.assertEqual(0, Node.count(self.database, local_router_id='3.3.3.3'))\n node_db_obj = Node.update_object(\n self.database, {'local_router_id': '3.3.3.3'}, host_name='abc')\n self.assertEqual('3.3.3.3', node_db_obj.get('local_router_id'))\n self.assertEqual(1, Node.count(self.database, local_router_id='3.3.3.3'))\n\n # update more than objects\n self.assertEqual(2, Node.count(self.database, as_num=100))\n update_count = Node.update_objects(\n self.database, {'as_num': 200}, igp_id='0.0.0.0')\n self.assertEqual(2, update_count)\n self.assertEqual(2, Node.count(self.database, as_num=200))\n\n # delete objects\n Node.delete_object(self.database, host_name='abc')\n self.assertEqual(1, Node.count(self.database))", "def test_updated_nodes():\n assert_missing_node(10000)\n assert_cached_node(10001, (10.0, 40.0))\n assert_cached_node(10002, (10.1, 40.0))\n place_10001 = query_row(db_conf, 'osm_places', 10001)\n assert place_10001['name'] == 'Bar', place_10001\n place_10002 = query_row(db_conf, 'osm_places', 10002)\n assert place_10002['name'] == 'Baz', place_10002", "def make_link(Graph, node1, node2):\n if node1 not in Graph:\n Graph[node1] = {}\n (Graph[node1])[node2] = 1\n if node2 not in Graph:\n Graph[node2] = {}\n (Graph[node2])[node1] = 1\n return Graph", "def add_node_pairs(self, node_a,node_b):\r\n \r\n if node_b is not None : \r\n self.nodes[node_a].append(node_b)", "def copy(node1, node2):\n \n \n # ITERATE OVER ALL PARMS IN NODE1, AND CHECK IF THE PARM EXISTS IN NODE2\n for p in node1.parms():\n if node2.parm(p.name()):\n p2 = node2.parm(p.name())\n \n # TEMPORARILY CLEAR KEYFRAMES. WE WILL RESTORE THEM LATER IF THEY EXIST ON NODE1'S PARM\n p2.deleteAllKeyframes()\n \n # SEE IF WE CAN JUST SET THE EXPRESSION OF THE PARAMETER\n try:\n p2.setExpression(p.expression())\n except:\n # IF NOT, TRY SETTING THE UNEXPANDED STRING. IF THAT DOESN'T WORK, JUST SET THE EVAL VALUE OF THE PARM\n try:\n p2.set(p.unexpandedString())\n except:\n p2.set(p.eval())\n \n # SET KEYFRAMES IF THEY EXIST ON NODE1\n if p.keyframes():\n p2.setKeyframes(p.keyframes())", "def merge_nodes(self,n0,n1):\n # -- Sanity checks - does not yet allow for collapsing edges.\n\n # if they share any cells, would update the cells, but for now\n # just signal failure.\n n0_cells=list(self.node_to_cells(n0))\n n1_cells=list(self.node_to_cells(n1))\n cell_to_edge_cache={}\n\n for c in n1_cells:\n if c in n0_cells:\n print(\"cell %d common to both nodes\"%c)\n raise GridException(\"Not ready for merging nodes in the same cell\")\n # otherwise record and fix up below\n\n # while we're looping, cache the edges as they will\n # be mutated along the way.\n cell_to_edge_cache[c]=self.cell_to_edges(c).copy()\n\n # do they share an edge, but not already fixed in the above stanza?\n j=self.nodes_to_edge(n0,n1)\n if j is not None:\n raise GridException(\"Not ready for merging endpoints of an edge\")\n\n edge_map={} # index of superceded edge => superceding edge\n\n # Update edges of n1 to point to n0\n # if that would cause a duplicate edge, then the n1 version is deleted\n n1_edges=list(self.node_to_edges(n1)) # make copy since we'll mutate it\n for j in n1_edges:\n if self.edges['nodes'][j,0]==n1:\n nj=0\n elif self.edges['nodes'][j,1]==n1:\n nj=1\n else:\n assert False # sanity check\n newnodes=self.edges[j]['nodes'].copy()\n newnodes[nj]=n0\n # it's possible that this is an edge which already exists\n jother=self.nodes_to_edge(*newnodes)\n if jother is not None:\n # want to keep jother, delete j. but is there info on\n # cells which should be brought over?\n edge_map[j]=jother\n # wait to delete j until after cells have been moved to jother.\n else:\n self.log.debug(\"Modifying edge j=%d\"%j)\n self.modify_edge(j,nodes=newnodes)\n\n # -- Transition any cells. \n for c in n1_cells:\n # update the node list:\n cnodes=self.cell_to_nodes(c).copy()\n nc=list(cnodes).index(n1)\n cnodes[nc]=n0\n\n # Dangerous to use cell_to_edges, since it may\n # have to consult the edge topology, which is disrupted\n # in the above code. \n # cell_to_edges: first checks cells['edges'], may \n # go to cell_to_nodes(c): that's safe.\n # and nodes_to_edge\n # -> node_to_edges, which in turn may consult self.edges['nodes']\n\n #cedges=self.cell_to_edges(c).copy()\n cedges=cell_to_edge_cache[c]\n\n for ji,j in enumerate(cedges):\n if j in edge_map:\n # is there were edges['cells'] should be updated?\n\n # sever the edge=>cell pointer, to p\n # could just set to [-1,-1], but this keeps things very explicit\n # for debugging\n j_cells=list(self.edges['cells'][j])\n j_cells_side=j_cells.index(c)\n j_cells[ j_cells_side ] = -1\n self.modify_edge(j,cells=j_cells)\n\n # and modify the receiving edge, too\n jo=edge_map[j]\n jo_cells=list(self.edges['cells'][jo])\n # which side of jo? a bit tedious...\n if list(self.edges['nodes'][j]).index(n1) == list(self.edges['nodes'][jo]).index(n0):\n # same orientation\n jo_cells_side=j_cells_side\n elif list( self.edges['nodes'][j]).index(n1) == 1-list(self.edges['nodes'][jo]).index(n0):\n jo_cells_side=1-j_cells_side\n else:\n raise Exception(\"Failed in some tedium\")\n assert jo_cells[jo_cells_side]<0\n jo_cells[jo_cells_side]=c\n self.modify_edge(edge_map[j],cells=jo_cells)\n # yikes. any chance that worked?\n\n cedges[ji]=edge_map[j]\n\n # maybe this is where we'd update cells['edges'] too?\n self.modify_cell(c,nodes=cnodes,edges=cedges)\n\n for dead_edge in edge_map:\n self.delete_edge(dead_edge)\n\n self.delete_node(n1)", "def side_renaming(network1, network2):\n\n # There is probably faster way to perform this, optimize later if needed\n for i in range(len(network1.nodes)):\n \n if (network1.nodes[i][\"group\"] == \"#fcae91FF\"):\n network1.nodes[i][\"T1\"] = \"0\"\n\n elif (network1.nodes[i][\"group\"] == \"#7828a0FF\"):\n network1.nodes[i][\"T1\"] = \"1\"\n \n else:\n print(\"Error with group encoding!\")\n \n \n for i in range(len(network2.nodes)):\n \n if (network2.nodes[i][\"group\"] == \"#fcae91FF\"):\n network2.nodes[i][\"T2\"] = \"0\"\n \n elif (network2.nodes[i][\"group\"] == \"#7828a0FF\"):\n network2.nodes[i][\"T2\"] = \"1\"\n \n else:\n print(\"This should not be printed! Error with group encoding!\")\n\n return network1, network2", "def _update_with_node(self, node: Node) -> None:\n\t\t# Get and test name\n\t\tname = node.name\n\t\tif name not in self.node_names:\n\t\t\t# Add if not added\n\t\t\tself.node_names.append(name)\n\t\t\t# Modify attributes to say \"Attribute - \" in the front\n\t\t\tattrs: List[str] = []\n\t\t\tfor attr in node.attributes:\n\t\t\t\tattrs.append(\"Attribute - \" + attr.title())\n\t\t\t# Create set, use Node attributes as base\n\t\t\tself.subnode_names[name] = set(attrs)\n\n\t\t# Iterate over SubNodes\n\t\tfor subnode in node.subnodes:\n\t\t\t# Set and test name\n\t\t\ts_name = subnode.name\n\t\t\tself.subnode_names[name].add(s_name)\n\n\t\t# Iterate over nodes\n\t\tfor nested_node in node.nodes:\n\t\t\tself._update_with_node(nested_node)", "def update(self, initial, follows):", "def update(self):\n\n for node in self.nodes:\n for edge in node.edges:\n for i, edge_node in enumerate(edge.nodes):\n if edge_node.id != node.id:\n edge_node.add_edge(edge)\n\n return self", "def update_one_node_from_pbs_data(node, attr_dict):\n # put node under a subcluster if it does not have any yet\n if not node.subcluster:\n for id,node_regexp in SubCluster.objects.filter(server=node.server).values_list('id','node_regexp'):\n if re.match(node_regexp,node.name):\n node.subcluster_id = id\n node.save()\n break\n # fill node's np if it is not present\n if not node.np:\n node.np = attr_dict['np']\n node.save()\n\n new_states = []\n if attr_dict.has_key('state'):\n# node.state.clear()\n for statename in attr_dict['state'].split(','):\n #node.state.add(NodeState.objects.get(name=statename.strip()))\n new_states.append(NodeState.objects.get(name=statename.strip()))\n attr_dict['state'] = new_states\n\n\n new_properties = []\n if attr_dict.has_key('properties'):\n# node.properties.clear()\n for propertyname in attr_dict['properties'].split(','):\n np,created = NodeProperty.objects.get_or_create(name=propertyname.strip())\n if created:\n print(\"New property created: %s\" % propertyname)\n new_properties.append(np)\n# node.properties.add(np)\n attr_dict['properties'] = new_properties\n\n new_jobs = []\n if attr_dict.has_key('jobs'):\n slot_jobs = dict([tuple(j.strip().split('/')) for j in attr_dict['jobs'].split(',')])\n for slotstr, longjobid in slot_jobs.items():\n slot = int(slotstr)\n# js,created = getJobSlot(slot=slot,node=node)\n# if created:\n# logging.info(\"new jobslot will be created: slot: %d, node name: %s\" % (slot,name))\n jobid = int(longjobid.split('.')[0])\n new_jobs.append(jobid)\n \n# js.livejob,created = LiveJob.objects.get_or_create(jobid=jobid, server=node.server)\n# if created:\n# logging.info(\"new livejob created: %d\" % jobid)\n# js.save()\n attr_dict['jobs'] = new_jobs\n return attr_dict", "def update(self, other):\n fields = None\n if isinstance(other, dict):\n fields = other\n elif isinstance(other, Torrent):\n fields = other.fields\n else:\n raise ValueError('Cannot update with supplied data')\n for k, v in fields.iteritems():\n self.fields[k.replace('-', '_')] = v", "def test_allow_multiples(self):\r\n o1 = self.b1.get(self.key)\r\n o2 = self.b2.get(self.key)\r\n\r\n o1.set_data(\"object-1\")\r\n o1.store()\r\n o2.set_data(\"object-2\")\r\n o2.store()\r\n\r\n conflicted = self.b1.get(self.key)\r\n siblings = filter(bool, (s.get_data() for s in conflicted.get_siblings()))\r\n self.assertEqual(len(siblings), 2)", "def update_node2edge(self):\n self.node2edge = {e.child : e for e in self.edge}\n childrenset = set(self.node2edge.keys())\n rootset = set(e.parent for e in self.edge).difference(childrenset)\n if len(rootset) > 1:\n raise Warning(\"there should be a single root: \" + str(rootset))\n if len(rootset) == 0:\n raise Exception(\"there should be at least one root!\")\n self.root = rootset.pop()", "def update_nodes(nodes, bb):\n \n for node in nodes:\n node.set(\"label\", update_bb_string(node.get_attributes()[\"label\"], bb))\n node.set_name(update_node_name(node.get_name(), bb))", "def test_link_attribute_update(self):\n entries = {\n 'cn=huidige leden,ou=groups,dc=esmgquadrivium,dc=nl': {'cn': ['Huidige leden']},\n 'cn=agroup,ou=groups,dc=esmgquadrivium,dc=nl': {'cn': ['agroup']},\n 'uid=aperson,ou=people,dc=esmgquadrivium,dc=nl': {'uid': ['aperson']},\n }\n actual = clone(entries, link_attribute='linkID')\n person_id = Person.objects.first().id\n group1_id = QGroup.objects.get(name='Huidige leden').id\n group2_id = QGroup.objects.get(~Q(name='Huidige leden')).id\n\n expect = [ModifyOperation('uid=aperson,ou=people,dc=esmgquadrivium,dc=nl', 'linkID', [person_id]),\n ModifyOperation('cn=huidige leden,ou=groups,dc=esmgquadrivium,dc=nl', 'linkID', [group1_id]),\n ModifyOperation('cn=agroup,ou=groups,dc=esmgquadrivium,dc=nl', 'linkID', [group2_id])]\n self.assertCountEqual(expect, actual)", "def remap_nodes(self, new_node_mapping):\n # because all nodes are SchemaNodeIDs (i.e. objects), we only need to reassign nodes one way\n # changes propagate to chains, chain root_nodes, and parents automatically\n for chain in self.chains:\n for edge in chain:\n head, tail = edge\n if head in new_node_mapping.keys():\n head.value = new_node_mapping[head]\n if tail in new_node_mapping.keys():\n tail.value = new_node_mapping[tail]", "def update_rec(self):\n import copy\n \n self.leftrec, self.rightrec = copy.copy(self.rec), copy.copy(self.rec)\n self.leftrec[2*self.dim + 1], self.rightrec[2*self.dim] = self.node.dimension[self.dim], self.node.dimension[self.dim]", "def breadth_first_update(self, extra_roots=[], extra_updated=set()):\n queue = []\n updated = extra_updated\n for k in self.__node_dict.keys():\n if len(self.__node_dict[k].inputs) == 0:\n queue.append(self.__node_dict[k])\n queue.extend(extra_roots)\n while (len(queue) != 0):\n node_to_update = queue.pop(0)\n # print('update {}'.format(node_to_update.uid))\n if node_to_update not in updated:\n node_to_update.update()\n updated.add(node_to_update)\n for element in node_to_update.outputs:\n child = element['to_node']\n if all([i['from_node'] in updated for i in child.inputs]):\n queue.append(child)\n # print('----done----')", "def update(self) -> None:\n\t\t# Clear attributes that will be updates\n\t\tself.node_names: List[str] = []\n\t\tself.subnode_names: Dict[str, Set[str]] = {}\n\t\t# Iterate over RootNodes\n\t\tname: str\n\t\ts_name: str\n\t\tfor rootnode in self.root_nodes:\n\t\t\t# Iterate over Nodes\n\t\t\tfor node in rootnode.nodes:\n\t\t\t\tself._update_with_node(node)\n\t\t\tif len(rootnode.subnodes):\n\t\t\t\t# Create Set in subnode_names for the RootNode's SubNodes\n\t\t\t\tself.subnode_names[rootnode.name] = set()\n\t\t\t\t# Iterate over SubNodes\n\t\t\t\tfor subnode in rootnode.subnodes:\n\t\t\t\t\tself.subnode_names[rootnode.name].add(subnode.name)", "def join_nodes_in_both_trees(tree1, nodeAinT1, cladeA,\n tree2, nodeBinT2, cladeB, test=False):\n cladeA = set(cladeA)\n cladeB = set(cladeB)\n leaves1 = get_leaf_set(tree1)\n leaves2 = get_leaf_set(tree2)\n\n cladeAisT1 = leaves1 == cladeA\n cladeBisT2 = leaves2 == cladeB\n\n # Handle adding all of tree1 into tree 2 and vice versa!!\n if cladeAisT1 and cladeBisT2:\n # Done\n print(\"Nodes are tree1 and tree2...\")\n if test:\n return [None, None]\n root = dendropy.Node()\n root.add_child(nodeAinT1)\n root.add_child(nodeBinT2)\n tree1 = dendropy.Tree(seed_node=root)\n tree1.is_rooted = True\n tree2 = None\n elif cladeAisT1:\n # Add all of tree 1 into tree 2\n print(\"Add all of tree 1 into tree 2\")\n if test:\n return [None, None]\n [tree2, nodeBinT2] = extract_nodes_from_split(tree2, nodeBinT2,\n cladeB)\n root = dendropy.Node()\n root.add_child(nodeAinT1)\n root.add_child(tree2.seed_node)\n tree1 = dendropy.Tree(seed_node=root)\n tree1.is_rooted = True\n tree2 = None\n elif cladeBisT2:\n # Add all of tree 2 into tree 1\n print(\"Add all of tree 2 into tree 1\")\n if test:\n return [None, None]\n [tree1, nodeAinT1] = extract_nodes_from_split(tree1, nodeAinT1,\n cladeA)\n root = dendropy.Node()\n root.add_child(tree1.seed_node)\n root.add_child(nodeBinT2)\n tree1 = dendropy.Tree(seed_node=root)\n tree1.is_rooted = True\n tree2 = None\n else:\n # Make the join!\n print(\"Making join...\")\n [tree1, nodeAinT1] = extract_nodes_from_split(tree1, nodeAinT1,\n cladeA)\n [tree2, nodeBinT2] = extract_nodes_from_split(tree2, nodeBinT2,\n cladeB)\n\n root1 = dendropy.Node()\n root1.add_child(tree1.seed_node)\n root1.add_child(deepcopy(nodeBinT2)) # TODO: Remove deep copies!\n tree1 = dendropy.Tree(seed_node=root1)\n tree1.is_rooted = True\n\n root2 = dendropy.Node()\n root2.add_child(tree2.seed_node)\n root2.add_child(deepcopy(nodeAinT1)) # TODO: Remove deep copies!\n tree2 = dendropy.Tree(seed_node=root2)\n tree2.is_rooted = True\n\n return [tree1, tree2]", "def test_set_node_second_level_component_without_first_level_parent(self):\n payload = {\n 'data': [\n {\n 'type': 'nodes',\n 'id': self.second_level_component._id\n },\n ]\n }\n res = self.app.post_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)\n self.view_only_link.reload()\n assert_equal(res.status_code, 201)\n assert_equal(len(res.json['data']), 2)\n assert_in(self.public_project, self.view_only_link.nodes.all())\n assert_in(self.second_level_component, self.view_only_link.nodes.all())", "def _swap(self, node1, node2):\n arr = self._array\n arr[node1._index], arr[node2._index] = arr[node2._index], \\\n arr[node1._index]\n # Swap indices stored in nodes as well\n node1._index, node2._index = node2._index, node1._index", "def make_node_dict(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n self.node_dict1 = {}\n for node in self.input1['knowledge_graph']['nodes']:\n self.node_dict1[node['id']] = node\n self.node_dict2 = {}\n for node in self.input2['knowledge_graph']['nodes']:\n self.node_dict2[node['id']] = node", "def other(self, node):\n if node == self.__node_a:\n return self.__node_b\n elif node == self.__node_b:\n return self.__node_a", "def merge(self, other_btree):\n pass", "def union(self, node1, node2):\n\n root1 = self.root(node1)\n root2 = self.root(node2)\n\n if root1 == root2:\n return\n\n if node1 < node2:\n self.set[root2] = root1\n self.root(node2)\n else:\n self.set[root1] = root2\n self.root(node1)", "def update_nodes(nodes, sc, organization, org_id, site_names):\n for node in nodes:\n print(\"=\" * 75)\n print(\"Node:\", node[\"id\"], node[\"serial\"], node[\"model\"])\n print(\"org:\", node[\"org\"], organization)\n print(\"site:\", node[\"site\"])\n print(\"location:\", node[\"location\"])\n\n site_id = node[\"site\"]\n site_name = site_names[site_id]\n print(\"\\nSetting location to '{}'\".format(site_name))\n node[\"location\"] = site_name\n result = sc.put(\"node/\" + node[\"id\"], data=node)\n print(\"updated location:\", result[\"location\"])\n print(\"Response:\", sc.response.status_code, sc.response.reason, \"\\n\")\n print()", "def join_nodes_in_one_tree(tree1, nodeAinT1, cladeA, tree2, nodeBinT2,\n cladeB):\n [tree1, nodeAinT1] = extract_nodes_from_split(tree1, nodeAinT1, cladeA)\n [tree2, nodeBinT2] = extract_nodes_from_split(tree2, nodeBinT2, cladeB)\n\n root = dendropy.Node()\n root.add_child(deepcopy(nodeAinT1)) # TODO: Remove deep copies!\n root.add_child(tree2.seed_node)\n tree2 = dendropy.Tree(seed_node=root)\n tree2.is_rooted = True\n\n return [tree1, tree2]", "def update_one_node(node):\n conn = pbs.pbs_connect(node.server.name.encode('iso-8859-1', 'replace'))\n if conn==-1:\n logging.error(\"Cannot connect to %s - live data will be missing\" % server.name)\n return\n statnodes = pbs.pbs_statnode(conn, node.name.encode('iso-8859-1', 'replace') , [], \"\")\n pbs.pbs_disconnect(conn)\n\n if len(statnodes)==0:\n logging.error(\"pbs_statnode failed for node: %s\" % node.name)\n return\n if len(statnodes)>1:\n logging.warning(\"pbs_statnode returned more than one records for node: %s\" % node.name)\n\n attr_dict = dict([ (x.name,x.value) for x in statnodes[0].attribs])\n update_one_node_from_pbs_data(node, attr_dict)\n node.save()", "def union(node1, node2):\n node1_root = find(node1)\n node2_root = find(node2)\n if node1_root == node2_root:\n return\n if node1_root.rank < node2_root.rank:\n node1_root.parent = node2_root\n elif node2_root.rank > node2_root.rank:\n node2_root.parent = node1_root\n else:\n node2_root.parent = node1_root\n node1_root.rank = node1_root.rank + 1", "def update(self, edges) -> None:\n for v1, v2 in edges:\n self.add(v1, v2)", "def recoverTree(self, root: TreeNode) -> None:\n self.inorder(root)\n self.first.val, self.second.val = self.second.val, self.first.val", "def modify_d2(d1, d2):\n val_list = [i for i in d2.keys()]\n \n for key in val_list:\n for i in range(len(d2[key])):\n try:\n val = d1[d2[key][i][2]]\n d2[key][i][2] = val\n if None in d2[key][i]:\n d2[key][i].remove(None)\n except:\n pass\n return d2", "def set_both_connections(self, new_node):\n distance_to_new = self.current_node.distance_between(new_node.location)\n self.current_node.set_adjacent_from_direction(distance_to_new, new_node)\n reverse_distance = new_node.distance_between(self.current_node.location)\n new_node.set_adjacent_from_direction(reverse_distance, self.current_node)", "def recoverTree(self, root: Optional[TreeNode]) -> None:\n self.inorder(root)\n self.first.val,self.second.val=self.second.val,self.first.val", "def union(self, node1, node2):\n root1 = self.find(node1)\n root2 = self.find(node2)\n if root1 != root2: # only merge if the connected components differ\n if self.ranks[root1] > self.ranks[root2]:\n self.parents[root2] = root1\n else:\n self.parents[root1] = root2\n if self.ranks[root1] == self.ranks[root2]:\n self.ranks[root2] += 1", "def sub_graph_merging(self):", "def replace_values(dfg1, dfg2):\r\n for edge in dfg1:\r\n if edge in dfg2:\r\n dfg1[edge] = dfg2[edge]\r\n return dfg1", "def _merge_two(self, obj1, obj2):\r\n for uniq_ident in obj2.keys():\r\n if (uniq_ident not in obj1) \\\r\n or (obj1[uniq_ident]['modified'] \\\r\n < obj2[uniq_ident]['modified']):\r\n obj1[uniq_ident] = obj2[uniq_ident]\r\n\r\n return obj1 # self._dict_to_list(obj1)\r", "def _fields_sync(self, values):\n # 1. From UPSTREAM: sync from parent\n if values.get('parent_id') or values.get('type') == 'contact':\n # 1a. Commercial fields: sync if parent changed\n if values.get('parent_id'):\n self._commercial_sync_from_company()\n # 1b. Address fields: sync if parent or use_parent changed *and* both are now set\n if self.parent_id and self.type == 'contact' and self.is_company == False:\n onchange_vals = self.onchange_parent_id().get('value', {})\n self.update_address(onchange_vals)\n\n # 2. To DOWNSTREAM: sync children\n self._children_sync(values)", "def _add(self, node1, node2):\r\n\r\n self._graph[node1].add(node2)", "def _update_input_after_create_node(self):\n for node in self._normal_node_map.values():\n for src_node_id, input_attr in dict(node.inputs).items():\n node.delete_inputs(src_node_id)\n if not self._is_node_exist(node_id=src_node_id):\n message = f\"The input node could not be found by node id({src_node_id}) \" \\\n f\"while updating the input of the node({node})\"\n logger.warning(message)\n\n continue\n\n src_node = self._get_normal_node(node_id=src_node_id)\n input_attr['shape'] = src_node.output_shape\n input_attr['data_type'] = src_node.output_data_type\n node.add_inputs(src_name=src_node.name, input_attr=input_attr)", "def set_second_incident_node(self, second_incident_node):\n # overwrite the existing second incident node with the input second incident Node object\n self.second_incident_node = second_incident_node", "def update_to(self, new):\r\n if self.idhex != new.idhex:\r\n plog(\"ERROR\", \"Update of router \"+self.nickname+\"changes idhex!\")\r\n for i in new.__dict__.iterkeys():\r\n if i == \"refcount\" or i == \"_generated\": continue\r\n self.__dict__[i] = new.__dict__[i]", "def update(self, other):\n for (ngram, value) in other.items():\n self[ngram] = value", "def _mutate_node(self, node):\n self.idx += 1\n\n if self.idx != self.r:\n return\n\n # Exclude some things like signatures, etc.\n exclusions = ['signature', 'crc']\n for ex in exclusions:\n if ex in node._pfp__name.lower():\n return\n\n if type(node) == pfp.fields.Dom:\n return\n elif self._base_name(node) == 'Struct':\n # This is a container, interested in\n # its children nodes\n return\n elif self._base_name(node) == 'Array':\n print(\"%s is an Array of %s (%s)\" % (node._pfp__name,\n node.field_cls, node.width))\n # I can change the data at once:\n node.raw_data = \"cacaca\"\n\n # Or iterate through its elements:\n # for e in node:\n # e._pfp__set_value(e._pfp__value + 1)\n else:\n # CORE TYPE\n # This is supposed to cast\n print('CORE TYPE?')\n node._pfp__set_value(1337)", "def _traverse_1_0_1(item, nodes):\n if 'content' in item.keys():\n ids = []\n for node in item['content']:\n nodes[node['id']] = node\n ids.append(node['id'])\n _traverse_1_0_1(node, nodes)\n item['content'] = ids", "def update(self):\n diff = self._diff()\n if not diff:\n # Nothing to do!\n return\n self.parent.update_node(self, diff)", "def merge(self, other):\n\n for child in other.children:\n self.add_deep_copy_of(child, merged=True)", "def swap(self, subtree_a, subtree_b):\n\n temp1 = subtree_a.parent\n temp2 = subtree_b.parent\n\n temp1.children[temp1.children.index(subtree_a)] = subtree_b\n temp2.children[temp2.children.index(subtree_b)] = subtree_a\n \n subtree_a.parent = temp2\n subtree_b.parent = temp1\n\n self.propogate_subtree(subtree_a)\n self.propogate_subtree(subtree_b)", "def connect_one_way(node1, node2, weight):\n node1.add_or_update_neighbour(node2, weight)", "def connect_both(node1, node2, weight):\n connect_one_way(node1, node2, weight)\n connect_one_way(node2, node1, weight)", "def test_updated_nodes1():\n road = query_row(db_conf, 'osm_roads', 60000)\n assert_almost_equal(road['geometry'].length, 14035.61150207768)", "def relations_from(self, start_node):", "def prepare_node_attrs(self):", "def describe_update(node_before: Node, node_after: Node) -> UpdateDescription:\n\n before_des = node_before.to_text()\n after_des = node_after.to_text()\n\n if node_before.algorithm != node_after.algorithm:\n if node_before.operation != node_after.operation:\n diff = \"This step in the first query performs \" + node_before.algorithm.lower() + \\\n \", but the second one performs \" + node_after.algorithm.lower() + \".\"\n else:\n diff = \"This step in both queries perform \" + node_before.operation.lower() + \\\n \". However, in the first query \"+ node_before.algorithm.lower() + \" is performed\" + \\\n ', and in the second query ' + node_after.algorithm.lower() + \" is performed\" + \".\"\n else:\n diff = \"This step in both queries perform \" + node_before.algorithm.lower() + \", but \"\n differences = [] # differences between node1 and node2\n node1_special = [] # something node1 has but node2 doesn't have\n node2_special = [] # something node2 has but node1 doesn't have\n if not _is_output_name_same(node_before, node_after):\n differences.append(\"output name\")\n for attr in interested_attrs:\n if _exists_attr(attr, node_before, node_after) == 1:\n differences.append(attr)\n node1_special.append(attr)\n elif _exists_attr(attr, node_before, node_after) == 2:\n differences.append(attr)\n node2_special.append(attr)\n elif _exists_attr(attr, node_before, node_after) == 12 and \\\n node_before.attributes[attr] != node_after.attributes[attr]:\n differences.append(attr)\n differences = [d.lower() for d in differences]\n if len(differences) == 1:\n diff += differences[0] + \" is different. \"\n else:\n diff += \", \".join(differences[:-1])\n diff += \" and \" + differences[-1] + \" are different. \"\n\n node1_special_key = [s1.lower() for s1 in node1_special]\n node1_special_value = [\n node_before.attributes[s1]\n for s1 in node1_special\n ]\n node2_special_key = [s2.lower() for s2 in node2_special]\n node2_special_value = [\n node_after.attributes[s1]\n for s1 in node2_special\n ]\n node1_display = [\n \" \".join([k,v])\n for k, v in zip(node1_special_key, node1_special_value)\n ]\n node2_display = [\n \" \".join([k, v])\n for k, v in zip(node2_special_key, node2_special_value)\n ]\n if len(node1_display) == 1:\n diff += \"Query 1 has \" + node1_display[0] + \\\n \", but query 2 doesn't have \" + node1_special_key[0] + \". \"\n if len(node1_display) > 1:\n diff += \"Query 1 has \" + \", \".join(node1_display[:-1]) + \\\n \" and \" + node1_display[-1] +\", but query 2 doesn't have them. \"\n\n if len(node2_display) == 1:\n diff += \"Query 2 has \" + node2_display[0] + \\\n \", but query 1 doesn't have \" + node2_special_key[0] + \". \"\n if len(node2_display) > 1:\n diff += \"Query 2 has \" + \", \".join(node2_display[:-1]) + \\\n \" and \" + node2_display[-1] + \", but query 1 doesn't have them. \"\n\n return UpdateDescription(before_des, after_des, diff)", "def test_changes_being_propagated(self):\n self.create_master_and_slaves()\n self.node.title=\"Changed Title\"\n self.node.save(propagate=True)", "def test9_test_from_list_with_update():\n with open(\"./Output/should_be_yed.test9_test_from_list_with_update.graphml\", \"r\") as f:\n expected_output = f.read() \n yed_diagram = create_yed_diagram(node_duplicates=\"update\")\n data1 = [\n {\n \"source\": \"switch-1\",\n \"src_label\": \"GigabitEthernet4/6\",\n \"target\": {\n \"bottom_label\": \"\",\n \"id\": \"switch-2\",\n \"top_label\": \"10.13.1.7\"\n },\n \"trgt_label\": \"GigabitEthernet1/5\"\n },\n {\n \"source\": \"switch-1\",\n \"src_label\": \"GigabitEthernet1/1\",\n \"target\": {\n \"bottom_label\": \"\",\n \"id\": \"switch-3\",\n \"top_label\": \"10.17.14.1\"\n },\n \"trgt_label\": \"GigabitEthernet0/1\"\n },\n {\n \"source\": \"switch-1\",\n \"src_label\": \"GigabitEthernet1/2\",\n \"target\": {\n \"bottom_label\": \"\",\n \"id\": \"switch-4\",\n \"top_label\": \"10.17.14.2\"\n },\n \"trgt_label\": \"GigabitEthernet0/10\"\n }\n ]\n data2 = [\n {\n \"source\": \"switch-2\",\n \"src_label\": \"GigabitEthernet1/5\",\n \"target\": {\n \"bottom_label\": \"\",\n \"id\": \"switch-1\",\n \"top_label\": \"10.13.1.17\"\n },\n \"trgt_label\": \"GigabitEthernet4/6\"\n }\n ]\n yed_diagram.from_list(data1)\n yed_diagram.from_list(data2)\n ret = yed_diagram.dump_xml()\n assert normalize_xml(ret) == normalize_xml(expected_output)", "def _match_identical_nodes(self):\n\n for job_name_b in self._topo_b_nodes:\n for job_name_a in self._unresolved_a_nodes:\n if self._is_node_identical(job_name_a, job_name_b):\n self._identical_nodes[job_name_b] = job_name_a\n self._unresolved_a_nodes.remove(job_name_a)\n self._unresolved_b_nodes.remove(job_name_b)\n break", "def merge_two_personroot_nodes(left_personroot_node: Node, right_personroot_node: Node) -> None:\n global _graph\n\n if left_personroot_node is None or right_personroot_node is None:\n print('merge_two_personroot_nodes(): Error: (one of the) nodes is None.')\n return\n\n if left_personroot_node['name'] != 'person-root' \\\n or right_personroot_node['name'] != 'person-root':\n print('merge_two_personroot_nodes(): not anticipated: (one of the) nodes '\n + 'are not \"person-root\".')\n return\n\n if left_personroot_node == right_personroot_node:\n # They are already connected, we are done.\n return\n\n # There are two possible reasons why it can happen that two person-root nodes\n # of two nodes to insert are different:\n # (1) It can happen e.g. in case a personal ID (ISNI, ORCID, etc.) is assigned\n # to two or more different persons.\n # Of course, that should not happen. Most probably this in a typo in a source system.\n # (2) The two nodes refer to the same person, but originate from different source\n # systems.\n # E.g. harvest of system 1 results in ORCID and ISNI of the same person, which have a\n # common person-root. Harvest of system 2 results in EMAIL with another person-root.\n # Now a subsequent harvest results in ORCID and EMAIL of the same person. Then there\n # are two different person-roots which need to be merged.\n # Both can happen, but we cannot know if it is either (1) or (2).\n\n now = datetime.now()\n timestamp = now.strftime('%Y%m%d-%H%M%S')\n count = 0\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n what_happened += 'Merged person-root node \"'\n what_happened += right_personroot_node['_key'] + '\" to this person-root node '\n what_happened += 'and then deleted it. This was the history of the deleted node:'\n left_personroot_node['_history'].append(what_happened)\n for history in right_personroot_node['_history']:\n count += 1\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n what_happened += history\n left_personroot_node['_history'].append(what_happened)\n\n count += 1\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n what_happened += 'End of history of the deleted node.'\n left_personroot_node['_history'].append(what_happened)\n\n count += 1\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n what_happened += 'These were the neighbors of the deleted node, '\n what_happened += 'now merged with the neighbors of this node:'\n left_personroot_node['_history'].append(what_happened)\n\n count += 1\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n for edge_from_right_node in get_edges(right_personroot_node):\n right_node = edge_from_right_node.end_node\n if right_node is None:\n continue\n if right_node == right_personroot_node:\n continue\n\n what_happened += '\"' + str(right_node['_key']) + '\" '\n edge_delete1 = LINKS_TO(right_personroot_node, right_node)\n edge_delete2 = LINKS_TO(right_node, right_personroot_node)\n edge_create1 = LINKS_TO(left_personroot_node, right_node)\n edge_create2 = LINKS_TO(right_node, left_personroot_node)\n # _graph.delete() also deletes 'right_personroot_node'.\n # TODO: There seems to be a bug here. It does not only delete 'right_personroot_node', but sometimes it also\n # deletes other nodes which have more than one edge, such as an 'organization' node connected to multiple\n # person-root nodes (including right_personroot_node).\n # The problem is that _graph.separate() does not seem to work, which seems to be the 'best' function\n # since it only deletes edges. Use with caution (or don't use).\n _graph.delete(edge_delete1)\n _graph.delete(edge_delete2)\n _graph.merge(edge_create1 | edge_create2, 'RCGNode', '_key')\n\n what_happened += '.'\n left_personroot_node['_history'].append(what_happened)\n\n count += 1\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n what_happened += 'End of list of neighbors of the deleted node.'\n left_personroot_node['_history'].append(what_happened)\n _graph.push(left_personroot_node)\n return", "def update_nodes(self):\n raise NotImplementedError('ERROR: sweeper has to implement update_nodes(self)')", "def merge(self, g1, g2):\n logger = logging.getLogger(__name__)\n \n \n g = BaseGraph()\n g.copy_graph_from(g1)\n\n plwn2sumo_dict = defaultdict(set)\n plwn2sumo_dict = self.get_plwn2sumo_dict()\n\n synset_on_vertex_dict = {}\n for node in g.all_nodes():\n synset_id = node.synset.synset_id\n if synset_id in synset_on_vertex_dict:\n logger.warning(\"ID of some synset is not unique.\")\n continue\n synset_on_vertex_dict[synset_id] = node\n\n num_of_edge = 0\n for edge in g2.all_edges():\n num_of_edge += 1\n logger.info(\"%d/%d\", num_of_edge, g2.num_edges())\n\n parent_sumo_concept = edge.source().sumo\n child_sumo_concept = edge.target().sumo\n\n if parent_sumo_concept not in plwn2sumo_dict:\n logger.warning(\"The mapping file doesn't contain sumo concept '%s'.\", parent_sumo_concept)\n continue\n if child_sumo_concept not in plwn2sumo_dict:\n logger.warning(\"The mapping file doesn't contain sumo concept '%s'.\", child_sumo_concept)\n continue\n\n for parent_syn_id in plwn2sumo_dict[parent_sumo_concept]:\n if parent_syn_id not in synset_on_vertex_dict:\n logger.warning(\"The mapping file contains synset '%d' that is not in the graph.\", parent_syn_id)\n continue\n p_node = synset_on_vertex_dict[parent_syn_id]\n for child_syn_id in plwn2sumo_dict[child_sumo_concept]:\n if child_syn_id not in synset_on_vertex_dict:\n logger.warning(\"The mapping file contains synset '%d' that is not in the graph.\", child_syn_id)\n continue\n ch_node = synset_on_vertex_dict[child_syn_id]\n \n g.add_edge(p_node,\n ch_node,\n [(\"rel\", edge.rel)],\n simply=True)\n \n\n return g", "def test_grouping(self):\n n1 = self.create(NodeItem, UML.Node)\n n2 = self.create(NodeItem, UML.Node)\n\n self.group(n1, n2)\n\n assert n2.subject in n1.subject.nestedNode\n assert n1.subject not in n2.subject.nestedNode", "def update_node(self, node):\n return node.update()", "def relations_to(self, end_node):", "def merge_node(**kwargs):\n\n if kwargs['TypeName'] is None:\n kwargs['TypeName'] = 'None'\n\n kwargs['Hash'] = get_node_hash(kwargs)\n\n if '(*)' in kwargs['TypeDefinition']:\n # The type contains a pointer to a function prototype\n # args['TypeDefinition'] = HRESULT (*)(IRpcChannelBuffer *, RPCOLEMESSAGE *, ULONG *)\n # args['TypeName'] = SendReceive\n index = kwargs['TypeDefinition'].find('(*)')\n TypeDefinition = kwargs['TypeDefinition'][:index + 2] + kwargs['TypeName'] \\\n + ')' + kwargs['TypeDefinition'][index + 3:]\n kwargs['TypeName'] = ';'\n kwargs['TypeDefinition'] = TypeDefinition\n\n if not nodes_cache.get(kwargs['Hash']):\n nodes_cache.update({kwargs['Hash']: (kwargs['TypeDefinition'], kwargs['TypeName'], kwargs['NodeLabel'])})\n if kwargs['StartNodeHash'] != kwargs['Hash']:\n relationships_cache.add(kwargs['StartNodeHash'] + \" \" + kwargs['Hash'] + \" \" + kwargs['RelationshipType'])\n\n return kwargs['Hash']", "def test_node_pointing_properties():\n a_left = Node(7, data='pl left')\n a_right = Node(42, data='pl right')\n a = Node(13, data='pl a', left=a_left, right=a_right)\n assert a.left.val == 7\n assert a.right.val == 42", "def merge_personroots_of_two_nodes(name1: str, value1: str,\n name2: str, value2: str) -> None:\n left_node = read_node(name=name1, value=value1)\n right_node = read_node(name=name2, value=value2)\n if left_node is None or right_node is None:\n return\n\n left_personroot_node = get_or_create_personroot_node(person_node=left_node)\n right_personroot_node = get_or_create_personroot_node(person_node=right_node)\n if left_personroot_node is None or right_personroot_node is None:\n return\n\n if left_personroot_node == right_personroot_node:\n # They are already connected, we are done.\n return\n\n merge_two_personroot_nodes(left_personroot_node=left_personroot_node,\n right_personroot_node=right_personroot_node)\n return", "def add(element1, element2):\n \n newtag = Tag.addTags(element1.tag, element2.tag)\n \n #have to wrap the attributes in dict() to avoid a bus error\n newattribs = Attrib.addAttribs(dict(element1.attrib), dict(element2.attrib))\n \n element1.tag = newtag\n element1.text = Text.addText(element1.text, element2.text)\n element1.tail = Text.addText(element1.tail, element2.tail)\n \n for i in element1.attrib:\n del element1.attrib[i]\n for key in newattribs.keys():\n try:\n element1.set(key, newattribs[key])\n except TypeError:\n log = logging.getLogger()\n log.error('TypeError: %s' % str(sys.exc_info()[1]))\n log.error('key = %s\\tnewattribs[key] = %s' % (str(key), str(newattribs[key])))\n raise\n \n return element1", "def swap_nodes(self, a, b):\n if a == b:\n return\n if len(self) < 2:\n return\n\n nodeA = nodeB = None\n curr_node = self._header\n\n while curr_node is not None and not (nodeA and nodeB):\n if curr_node._element == a and not nodeA:\n nodeA = curr_node\n elif curr_node._element == b and not nodeB:\n nodeB = curr_node\n curr_node = curr_node._next\n\n if curr_node is None:\n raise Empty(\"Not in list\")\n\n precessorA = nodeA._prev\n successorA = nodeA._next\n precessorB = nodeB._prev\n successorB = nodeB._next\n\n precessorA._next = successorA._prev = nodeB\n precessorB._next = successorB._prev = nodeA\n\n nodeA._prev, nodeB._prev = nodeB._prev, nodeA._prev\n nodeA._next, nodeB._next = nodeB._next, nodeA._next", "def add_node(old_node_dict, old_to_new_node_ids_dict, new_accession, new_db_api, aliases):\n\n # getting the old node id, and the old node's properties\n old_node_id = old_node_dict[\"id\"]\n old_node_alt_accession = old_node_dict[\"alt_accession\"]\n old_node_name = old_node_dict[\"name\"]\n tax_id = old_node_dict[\"tax_id\"]\n pathways = old_node_dict[\"pathways\"]\n\n if aliases:\n aliases += \"|\" + old_node_dict[\"name\"]\n else:\n aliases = old_node_dict[\"name\"]\n\n if old_node_dict[\"aliases\"]:\n aliases += \"|\" + old_node_dict[\"aliases\"]\n\n new_node_dict = {\n \"name\" : new_accession,\n \"alt_accession\" : old_node_alt_accession,\n \"tax_id\" : tax_id,\n \"pathways\" : pathways,\n \"aliases\" : aliases,\n \"topology\": \"\"\n }\n\n # inserting the node to the PSI-MI SQLite\n new_db_api.insert_unique_node(new_node_dict)\n new_node_dict['id'] = new_db_api.last_row_id\n # getting the new last row id of the inserted node\n new_node_id = new_node_dict['id']\n\n # if the node maps to more than one swissprot uniprot id it will be inserted for every swissprot id and\n # this function will be called for every insertion\n if not old_to_new_node_ids_dict.has_key(old_node_id):\n old_to_new_node_ids_dict[old_node_id] = [new_node_id]\n else:\n old_to_new_node_ids_dict[old_node_id].append(new_node_id)", "def addGeounitNodes(node1, node2):\n \n from operator import add\n \n argsDict = {} \n argsDict[\"raw\"] = node1.raw + node2.raw\n argsDict[\"raw_housing\"] = node1.raw_housing + node2.raw_housing\n if node1.syn and node2.syn:\n argsDict[\"syn\"] = node1.syn + node2.syn\n if node1.cons and node2.cons:\n argsDict[\"cons\"] = addConstraints(node1.cons,node2.cons)\n else:\n argsDict[\"cons\"] = {}\n if node1.invar and node2.invar:\n argsDict[\"invar\"] = addInvariants(node1.invar,node2.invar)\n else:\n argsDict[\"invar\"] = {}\n argsDict[\"geocodeDict\"] = node1.geocodeDict\n \n aggregatedNode = nodes.geounitNode(node1.geocode, **argsDict)\n \n return aggregatedNode", "def headsofunion(h1, h2):\n res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)\n return {ctx.node() for ctx in res}", "def _replace(self, x, y):\n y.parent = x.parent\n if x is self.root:\n self.root = y\n return\n elif x is x.parent.left:\n x.parent.left = y\n else:\n x.parent.right = y\n\n self.update(y, -1)", "def connect_nodes(self):\n for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):\n if src_id != trg_id:\n app.edges[src_id].add(trg_id)\n self.mark_as_unsaved()\n self.update()", "def test_check_duplication_entry_at_restoring_two_chain(self):\n ref_entity = Entity.objects.create(name=\"ReferredEntity\", created_user=self._user)\n ref_entries = [\n Entry.objects.create(name=\"ref-%d\" % i, created_user=self._user, schema=ref_entity)\n for i in range(3)\n ]\n ref_entity_2 = Entity.objects.create(name=\"ReferredEntity2\", created_user=self._user)\n ref_entries_2 = [\n Entry.objects.create(name=\"ref2-%d\" % i, created_user=self._user, schema=ref_entity_2)\n for i in range(3)\n ]\n\n # initialize EntityAttrs\n attr_info = {\n \"obj\": {\"type\": AttrTypeValue[\"object\"], \"value\": ref_entries[0]},\n \"arr_obj\": {\n \"type\": AttrTypeValue[\"array_object\"],\n \"value\": ref_entries[1:],\n },\n }\n attr_info_2 = {\n \"obj\": {\"type\": AttrTypeValue[\"object\"], \"value\": ref_entries_2[0]},\n \"arr_obj\": {\n \"type\": AttrTypeValue[\"array_object\"],\n \"value\": ref_entries_2[1:],\n },\n }\n for attr_name, info in attr_info.items():\n # create EntityAttr object with is_delete_in_chain object\n attr = EntityAttr.objects.create(\n name=attr_name,\n type=info[\"type\"],\n is_delete_in_chain=True,\n created_user=self._user,\n parent_entity=self._entity,\n )\n\n if info[\"type\"] & AttrTypeValue[\"object\"]:\n attr.referral.add(ref_entity)\n\n self._entity.attrs.add(attr)\n\n for attr_name, info in attr_info_2.items():\n # create EntityAttr object with is_delete_in_chain object\n attr = EntityAttr.objects.create(\n name=attr_name,\n type=info[\"type\"],\n is_delete_in_chain=True,\n created_user=self._user,\n parent_entity=ref_entity,\n )\n\n if info[\"type\"] & AttrTypeValue[\"object\"]:\n attr.referral.add(ref_entity_2)\n\n ref_entity.attrs.add(attr)\n\n # initialize target entry\n entry = Entry.objects.create(name=\"entry\", schema=self._entity, created_user=self._user)\n entry.complement_attrs(self._user)\n\n for attr_name, info in attr_info.items():\n attr = entry.attrs.get(schema__name=attr_name)\n attr.add_value(self._user, info[\"value\"])\n\n ref_entries[0].complement_attrs(self._user)\n for attr_name, info in attr_info_2.items():\n attr = ref_entries[0].attrs.get(schema__name=attr_name)\n attr.add_value(self._user, info[\"value\"])\n\n # delete target entry at first\n entry.delete()\n # sync referral entries from database\n [x.refresh_from_db() for x in ref_entries]\n [x.refresh_from_db() for x in ref_entries_2]\n\n self.assertFalse(ref_entries_2[1].is_active)\n\n # create same name entry\n Entry.objects.create(name=\"ref2-1\", created_user=self._user, schema=ref_entity_2)\n\n # check duplicate entry\n ret = entry.check_duplication_entry_at_restoring(entry_chain=[])\n self.assertTrue(ret)", "def union(self, a, b):\n if (a in self.node_id) and (b in self.node_id) and (self.node_id[a] != self.node_id[b]):\n self.merge(a, b)\n elif (a in self.node_id) or (b in self.node_id):\n self.add(a,b)\n else:\n self.create_new_group(a,b)", "def overwrite_field(self,cells=None,edges=None,source='depth_max',target='depth_mean'):\n if cells is not None:\n self.cells[target][cells]=self.cells[source][cells]\n if edges is not None:\n self.edges[target][edges]=self.edges[source][edges]", "def _attach(self, p, t1, t2):\n node = self._validate(p)\n if not self.is_leaf(p):\n raise ValueError('position must be leaf')\n if not type(self) is type(t1) is type(t2): # all 3 trees must be same type\n raise TypeError('Tree types must match')\n self._size += len(t1) + len(t2)\n if not t1.is_empty(): # attched t1 as left subtree of node\n t1._root._parent = node\n node._left = t1._root\n t1._root = None # set t1 instance to empty\n t1._size = 0\n if not t2.is_empty(): # attched t2 as right subtree of node\n t2._root._parent = node\n node._right = t2._root\n t2._root = None # set t2 instance to empty\n t2._size = 0", "def updateNode(self,updates=None):\n\n logging.info(\"editing this node\")\n if updates is not None:\n for key in updates:\n setattr(self,key,updates[key])\n memcache.set(self.cid,self)" ]
[ "0.65650755", "0.63940614", "0.6173685", "0.61118114", "0.6035203", "0.6019497", "0.59971595", "0.5948517", "0.5918453", "0.5903322", "0.5868278", "0.57634944", "0.57632744", "0.57355", "0.57273066", "0.57142293", "0.56911355", "0.56886685", "0.56635535", "0.5628927", "0.56055725", "0.55853903", "0.55739737", "0.55636734", "0.5556606", "0.5548197", "0.55423623", "0.5519405", "0.55168647", "0.55155087", "0.5482634", "0.54791814", "0.547195", "0.5443041", "0.5425605", "0.54251593", "0.5418061", "0.540684", "0.5403774", "0.53644174", "0.53557324", "0.53509086", "0.53505987", "0.53446114", "0.5339875", "0.5331421", "0.5322575", "0.53085", "0.5306427", "0.53024936", "0.5295214", "0.52914953", "0.52869385", "0.5281978", "0.52814096", "0.52759814", "0.5272431", "0.52703404", "0.52684414", "0.52519566", "0.52458656", "0.5240623", "0.522696", "0.52195746", "0.5213547", "0.5209253", "0.5207037", "0.51966023", "0.51923114", "0.51915646", "0.5186293", "0.517495", "0.5174144", "0.5171674", "0.5164192", "0.51600206", "0.51542455", "0.5153265", "0.5152972", "0.5150228", "0.514518", "0.5136156", "0.51355374", "0.51317513", "0.5131202", "0.5126398", "0.51234215", "0.51211286", "0.51169485", "0.5116199", "0.5110361", "0.5108347", "0.51020736", "0.50958616", "0.50944823", "0.5091344", "0.5088935", "0.50861156", "0.5081634", "0.5076421" ]
0.5121291
87
creates randomized colors of shape size_x by size_y
def create_world(size_x = 100, size_y=100): colors = np.random.randint(0,2,(size_x,size_y)).tolist() for row in range(len(colors)): for col in range(len(colors[row])): if (colors[row][col]== 1): colors[row][col] = 'R' else: colors[row][col] = 'G' r = [[10.0 for i in range(size_y)] for i in range(size_x)] g = [[10.0 for i in range(size_y)] for i in range(size_x)] b = [[10.0 for i in range(size_y)] for i in range(size_x)] RGB = [] for i in range(size_x): for j in range(size_y): if colors[i][j] == 'R': r[i][j] = 255.0 else: b[i][j] = 255.0 RGB.append(b[i][j]) RGB.append(r[i][j]) RGB.append(g[i][j]) RGB = np.array(RGB).reshape(size_x,size_y,3) return RGB, colors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def randColor():\r\n return np.array([random.random(), random.random(), random.random()]).reshape((1, 1, 3))", "def random_color_gen():\n r = randint(0, 255)\n g = randint(0, 255)\n b = randint(0, 255)\n return [r, g, b]", "def random_color(num):\n # 为每个类别的边界框随机匹配相应颜色\n np.random.seed(80)\n COLORS = np.random.randint(0, 256, size=(num, 3), dtype='uint8') #\n return COLORS", "def random_color():\n colormode(255)\n return randint(0, 255), randint(0, 255), randint(0, 255)", "def mutate(self, size):\n rand = random.random()\n if rand <= 0.5:\n print u\"changing colour\"\n idx = random.randrange(0, 4)\n value = random.randrange(0, 256)\n colour = list(self.colour)\n colour[idx] = value\n self.colour = tuple(colour)\n else:\n print u\"changing point\"\n idx = random.randrange(0, len(self.points))\n point = generate_point(size[0], size[1])\n self.points[idx] = point", "def create_random_color(self):\n # Create a list of n colors.\n n = 4\n dc = 1.0 / (n-1)\n color_list = [i*dc for i in range(n)]\n\n if self.is_scaffold:\n rgb = [1.0, 1.0, 1.0]\n else:\n rgb = [random.choice(color_list) for i in range(3)]\n # Don't generate blue (that's for a scaffold in cadnano) or black.\n if (rgb[0] == 0.0) and (rgb[1] == 0.0):\n rgb[0] = random.choice(color_list[1:])\n if rgb[2] == 0.0: \n rgb[2] = random.choice(color_list[1:]) \n #__if (rgb[0] == 0) and (rgb[1] == 0)\n #__if self.is_scaffold\n return rgb", "def generate_random_colours_list(rng: random.Random, size: int) -> List[TupleInt3]:\n return [random_colour(rng) for _ in range(size)]", "def implement_random(self):\n shape = set()\n for coord in INDICES:\n if randint(0, 1):\n shape.add(coord)\n self.implement_shape(shape)", "def randcolor():\n return (randint(0,255), randint(0,255), randint(0,255))", "def generate_random_data(size, x_min=X_MIN, x_max=X_MAX, y_min=Y_MIN, y_max=Y_MAX):\n result = []\n for _i in range(size):\n result.append((randint(x_min, x_max), randint(y_min, y_max)))\n\n return result", "def random_color():\n\n rgbl=[255,0,0]\n random.shuffle(rgbl)\n return tuple(rgbl)", "def color_from_ind(i: int) -> np.ndarray:\n np.random.seed(i)\n return np.random.random(3)", "def random_shape(height, width):\n # Shape\n shape = random.choice([\"square\", \"circle\", \"triangle\"])\n # Color\n color = tuple([random.randint(0, 255) for _ in range(3)])\n # Center x, y\n buffer = 20\n y = random.randint(buffer, height - buffer - 1)\n x = random.randint(buffer, width - buffer - 1)\n # Size\n s = random.randint(buffer, height // 4)\n return shape, color, (x, y, s)", "def random_color() -> Tuple[int, int, int]:\n return randrange(0, 255), randrange(0, 255), randrange(0, 255)", "def createPickColor():\n color_list = []\n\n for i in range(50, 450, 100): #Create the 4 shapes to show colors\n point1 = g.Point(50, i)\n point2 = g.Point(100, i+50)\n shape = g.Rectangle(point1, point2)\n color_list.append(shape)\n\n #Set the right colors\n color_list[0].setFill(\"Blue\")\n color_list[1].setFill(\"Green\")\n color_list[2].setFill(\"Yellow\")\n color_list[3].setFill(\"Red\")\n\n return color_list", "def randomcolour(self):\n r = random.randrange(1, 255)\n g = random.randrange(1, 255)\n b = random.randrange(1, 255)\n self.colour((r,g,b))", "def _random_color() -> List[float]:\n return [np.random.uniform(), np.random.uniform(), np.random.uniform()]", "def randcolor():\r\n r = random(0.0, 1.0)\r\n g = random(0.0, 1.0)\r\n b = random(0.0, 1.0)\r\n return vec(r, g, b) # A color is a three-element vec\r", "def _genRandomColor():\n b = random.randint(0, 255)\n g = random.randint(0, 255)\n r = random.randint(0, 255)\n return (b, g, r)", "def generate_colour():\n red = random.randrange(0, 256)\n green = random.randrange(0, 256)\n blue = random.randrange(0, 256)\n alpha = random.randrange(0, 256)\n return (red, green, blue, alpha)", "def colors(k): \n ret = []\n for i in range(k):\n ret.append((random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)))\n return ret", "def test_shaped_instance(self, seed):\n dim = Dimension(\"yolo\", \"norm\", 0.9, shape=(3, 2))\n samples = dim.sample(seed=seed)\n assert len(samples) == 1\n assert_eq(dists.norm.rvs(0.9, size=(3, 2)), samples[0])\n\n assert dim.shape == (3, 2)\n\n dim = Dimension(\"yolo\", \"norm\", 0.9, shape=4)\n samples = dim.sample(seed=seed)\n assert len(samples) == 1\n assert_eq(dists.norm.rvs(0.9, size=4), samples[0])\n\n assert dim.shape == (4,)", "def create_color():\n r = random.randint(0,255)\n g = random.randint(0,255)\n b = random.randint(0,255)\n a = random.randint(0,255)\n return introcs.RGB(r,g,b,a)", "def createColors():\n\n colors = \"Blue\", \"Green\", \"Yellow\", \"Red\"\n color_list = []\n color_colum = []\n\n for i in range(15): #Create 2D list of 15*25 with colors\n color_colum = []\n for k in range(25):\n color_colum.append(random.choice(colors))\n color_list.append(color_colum)\n \n return color_list", "def uniqueish_color():\n return plt.cm.gist_ncar(np.random.random())", "def create_rand_grid(grid_size):\n\n return [[randint(0, 1) for x in range(0, grid_size)] for y in range(0, grid_size)]", "def rand_branch_color():\n red = random.randint(0, 100)\n green = random.randint(175, 255)\n blue = random.randint(0, 100)\n return (red, green, blue)", "def genrandimg(args) -> None:\n\n size = (int(args.x), int(args.y))\n fp = Image.new(\"RGB\", size)\n data = []\n\n if not args.c: # If color\n for i in range(size[0]*size[1]):\n r = random.choice([0x00, 0xff])\n data.append((r, r, r)) # Each RGB value is the same random value\n else: # Else black-and-white\n for i in range(size[0]*size[1]):\n r = [random.choice(range(0, 256)) for _ in range(0, 3)]\n r = (r[0], r[1], r[2]) # Choose 3 random numbers for different RGB values\n data.append(r)\n\n fp.putdata(data)\n print(\"Saving to %s...\" % args.o)\n fp.save(args.o)\n fp.close()", "def make_random_nodes(size=100, ndim=2, expand=True):\n \n coords = np.random.random(size=size*ndim).reshape((-1,ndim))\n if expand:\n coords = coords * size**(1/ndim)\n return coords", "def _rand_color(self):\n\n return self._rand_elem(COLOR_NAMES)", "def get_random_color():\n\n def get_random_value():\n \"\"\" Return a random value between 0.0 and 1.0 \"\"\"\n return randint(0, 255) / 256.0\n\n return Vector(get_random_value(), get_random_value(), get_random_value())", "def gen_ball():\n ball_radius = randint(30, 80)\n ball_x = randint(ball_radius, screen_width - ball_radius)\n ball_y = randint(ball_radius, screen_height - ball_radius)\n ball_color = COLORS[randint(0, len(COLORS) - 1)]\n return [ball_color, ball_x, ball_y, ball_radius]", "def random_grid_generator(self, *input_shape):\n rnd = np.random.RandomState(1)\n rnd.rand(input_shape)", "def shuffle_colors(mutated_genome):\n mutated_genome", "def rand_bottom_color():\n red = random.randint(120, 160)\n green = random.randint(0, 90)\n blue = random.randint(0, 40)\n return (red, green, blue)", "def random_image(self, height, width):\n # Pick random background color (blueish)\n bg_color = np.array([random.randint(200, 255) for _ in range(3)])\n bg_color[0] = bg_color[0] / 4\n bg_color[1] = bg_color[1] / 4\n # Generate a few random ships and record their\n # bounding boxes\n ships = []\n boxes = []\n N = random.randint(1, 4)\n for _ in range(N):\n shape, dims = self.random_ship(height, width)\n ships.append((shape, dims))\n x, y, s, _, _ = dims\n boxes.append([y - s, x - s, y + s, x + s])\n # Apply non-max suppression wit 0.3 threshold to avoid\n # ships covering each other\n keep_ixs = utils.non_max_suppression(\n np.array(boxes), np.arange(N), 0.3)\n ships = [s for i, s in enumerate(ships) if i in keep_ixs]\n return bg_color, ships", "def solid(t, coord, ii, n_pixels, random_values):\n\n\n return (100,100,100)", "def __init__(self, x, y):\n self.x_cord = x\n self.y_cord = y\n self.color_list = ['#FE1B04', '#FE9C04', '#F6FE04', '#01FD03', '#01FDB4', '#0174FD', '#C801FD']\n self.color = random.choice(self.color_list)\n self.occupied = None", "def __init__(self):\n # start x position\n self.x = random.randrange(size_x)\n # start y position\n self.y = - random.randrange(100)\n # drift x (amount of change each loop along the x axis)\n self.dx = random.randrange(3) - random.randrange(6)\n # drift y (amount of change each loop along the y axis)\n self.dy = random.randrange(1, 20) + random.randrange(4)\n # the size of the circular snowflake\n self.size = random.randrange(1, 4)\n # the colour of the snowflake (from sludgy grey to snowy white)\n c = random.randrange(200, 256)\n self.color = [c, c, c]", "def random_colour(rng: random.Random) -> TupleInt3:\n r = rng.randint(0, 255)\n g = rng.randint(0, 255)\n b = rng.randint(0, 255)\n return r, g, b", "def get_random_color():\n r=random.randint(0,255)\n g=random.randint(0,255)\n b=random.randint(0,255)\n return(r,g,b)", "def randcolour():\n colour = [0,0,0]\n while sum(colour)<450:\n for i in range(3):\n colour[i] = int(random.random()*255)\n return(tuple(colour))", "def random_crop_params(self, img, output_size):\n w, h = img.size\n th, tw = output_size\n if w == tw and h == th:\n return 0, 0, h, w\n\n i = random.randint(0, h - th)\n j = random.randint(0, w - tw) \n return i, j, th, tw", "def random_color():\n return random.choice(colors)", "def generate_world(x_size, y_size):\n\n\tdef make_blank_world():\n\t\t\"\"\"\n\t\tCreates an x-by-y list of lists of zeroes.\n\t\t\"\"\"\n\t\tblank_array = [[Blank() for j in range(y_size + 1)] for i in range(x_size + 1)]\n\t\treturn blank_array\n\n\n\tdef check_surroundings(x_coord, y_coord, value):\n\t\t\"\"\"\n\t\tIf the variable world has already been defined, it checks all x and y coords within one square (aka, checks the 8 surrounding squares) for a given value. If that value is present in 1 or more squares, returns True; else, False.\n\t\t\"\"\"\n\t\tfor i in range(3):\n\t\t\tfor j in range(3):\n\t\t\t\texamining = world[x_coord - 1 + i][y_coord - 1 + j]\n\t\t\t\tif examining.name == value:\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\tpass\n\t\treturn False\n\n\n\tworld = make_blank_world()\n\n\tworld[random.randint(2, x_size-2)][random.randint(2, y_size-2)] = Water()\n\n\tfor i in range(x_size):\n\t\tfor j in range(y_size):\n\t\t\tseed = random.random()\n\t\t\tif check_surroundings(i, j, 'water'):\n\t\t\t\tif seed >= 0.5:\n\t\t\t\t\tworld[i][j] = Water()\n\t\t\t\telif seed >= 0.4:\n\t\t\t\t\tworld[i][j] = Tree()\n\t\t\t\telse:\n\t\t\t\t\tworld[i][j] = Grass()\n\t\t\telif not check_surroundings(i, j, 'tree'):\n\t\t\t\tif seed >= 0.5:\n\t\t\t\t\tworld[i][j] = Tree()\n\t\t\t\telse:\n\t\t\t\t\tworld[i][j] = Grass()\n\t\t\telse:\n\t\t\t\tworld[i][j] = Grass()\n\treturn [row[:y_size+1] for row in world[:x_size+1]]", "def _random_color():\n return random.randint(0, 255)", "def get_random_color():\n def get_random_value():\n \"\"\" Return a random value between 0.0 and 1.0 \"\"\"\n return randint(0, 255) / 256.0\n return Vector(get_random_value(), get_random_value(), get_random_value())", "def _random_color(self):\n levels = range(0, 256)\n return tuple(random.choice(levels) for _ in range(3))", "def square_parameters_from_seed(seed):\n # canvas\n canvas_mode = \"RGB\"\n canvas_size_in_pixels = (200, 200)\n canvas_background_color_rgb = (255, 255, 255) # white\n \n # rectangle\n rectangle_position = position_from_seed(seed) # ((0, 0), (10, 10))\n rectangle_fill = color_from_seed(seed)\n rectangle_outline = color_from_seed(seed + str(random.randint(1, 100))) # offset\n \n return canvas_mode, canvas_size_in_pixels, canvas_background_color_rgb, rectangle_position, rectangle_fill, rectangle_outline", "def makeRGB(ncol = 16, minc = 32, maxc = 216):\n subd = int((maxc - minc)/ncol)\n numpy.random.seed(1)\n RGB = [[]]\n for r in range(minc, maxc, subd):\n for g in range(minc, maxc, subd):\n for b in range(minc, maxc, subd):\n RGB.append(numpy.array([r,g,b]))\n #print \"# of colors: \", len(self.RGB)\n rgb_order = numpy.random.permutation(len(RGB)) # randomize the order\n RGB = [RGB[x] for x in rgb_order]\n return RGB", "def rand_color(x):\n random.seed(x)\n color = random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)\n color = '#%02x%02x%02x' % color\n return color", "def randcol():\n col = [randint(0,255) for _ in range(3)]\n return gfx.Color(tuple(col))", "def random_sample(grid_size):\r\n g = grid_size\r\n x_range = g[1] - g[0]\r\n\r\n y_range = g[3] - g[2]\r\n\r\n x_off = g[0]\r\n y_off = g[2]\r\n (x,y) = (x_range*np.random.ranf()+x_off,y_range*np.random.ranf()+y_off) \r\n return (x,y)", "def get_color_list(cluster_count):\n color_list = []\n for i in xrange(cluster_count):\n color_list.append(random_color_gen())\n return color_list", "def randomBitmap(size):\n\n b = bitmap(size)\n xmax, ymax = size\n for x in xrange(xmax):\n for y in xrange(ymax):\n b.set(x, y, random.randint(0,1))\n return b", "def new_color(mutated_genome):\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if color_mode == 'RGB':\n color_red = random.randint(0,255)\n color_green = random.randint(0,255)\n color_blue = random.randint(0,255)\n color = (color_red, color_blue, color_green)\n else: #color_mode == 'L':\n color = random.randint(0,255)\n mutated_genome[index][0] = color", "def random_color() -> Tuple[int, ...]:\n red = random.randrange(0, 255)\n blue = random.randrange(0, 255)\n green = random.randrange(0, 255)\n return (red, blue, green)", "def random_colors(N,bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i/N,1,brightness)for i in range(N)]\n colors = list(map(lambda c: clolorsys.hsv_to_rgb(*c),hsv))\n random.shuffle(colors)\n return colors", "def gen_shape(size):\n\n def _factors(n):\n \"\"\" Returns the divisors of n\n\n >>> _factors(4)\n {1, 2, 4}\"\"\"\n gen = ([i, n // i] for i in range(1, int(n ** 0.5) + 1) if n % i == 0)\n return set(sum(gen, []))\n\n assert size > 0\n if size == 1:\n return (1,)\n\n shape = []\n rem = int(size / np.prod(shape))\n while rem > 1:\n if len(shape) > 6:\n shape.append(rem)\n break\n\n shape.append(np.random.choice(list(_factors(rem))))\n rem = int(size / np.prod(shape))\n\n return tuple(int(i) for i in shape)", "def random_shapes(image_shape, max_shapes, *, min_shapes=1, min_size=2,\n max_size=None, multichannel=True, num_channels=3,\n shape_names=None, intensity_range=None, allow_overlap=False,\n num_trials=30, random_state=None, class_is=None,\n fill_is=None, colors=None, textures=None,\n background_texture=None):\n\n assert class_is in ['shape', 'fill']\n assert fill_is in ['shape', 'random']\n\n if min_size > image_shape[0] or min_size > image_shape[1]:\n raise ValueError(\n 'Minimum dimension must be less than ncols and nrows')\n max_size = max_size or max(image_shape[0], image_shape[1])\n\n if not multichannel:\n num_channels = 1\n\n if intensity_range is None:\n intensity_range = (0, 254) if num_channels == 1 else ((0, 254), )\n else:\n tmp = (intensity_range, ) if num_channels == 1 else intensity_range\n for intensity_pair in tmp:\n for intensity in intensity_pair:\n if not (0 <= intensity <= 255):\n msg = 'Intensity range must lie within (0, 255) interval'\n raise ValueError(msg)\n\n image_shape = (image_shape[0], image_shape[1], num_channels)\n if background_texture is None:\n image = np.full(image_shape, 255, dtype=np.uint8)\n else:\n image = background_texture.copy()\n target = np.full(image_shape, 0, dtype=np.uint8)\n filled = np.zeros(image_shape, dtype=bool)\n labels = []\n masks = []\n\n num_shapes = random_state.randint(min_shapes, max_shapes + 1)\n\n # One random color per shape, unconnected to the shape itself. This\n # allows one to test a model's ability to segment the shapes with\n # respect to shape only, irrespective of color.\n if colors is None and textures is None:\n colors = _generate_random_colors(\n num_shapes, num_channels, intensity_range, random_state)\n\n # Create a list of (SHAPE, COLOR, CLASS) tuples.\n\n samples = []\n\n shape_choices = []\n if shape_names is None:\n shape_choices = SHAPE_CHOICES\n else:\n generator_map = {get_shape_name(sc): sc for sc in SHAPE_CHOICES}\n for shape_name in shape_names:\n shape_choices.append(generator_map[shape_name])\n\n for shape_num in range(num_shapes):\n object_spec = generate_object_spec(\n shape_choices,\n colors=colors, textures=textures, fill_is=fill_is,\n class_is=class_is, random_state=random_state)\n\n shape_size = (min_size, max_size)\n\n for trial_num in range(num_trials):\n # Pick start coordinates.\n column = random_state.randint(image_shape[1])\n row = random_state.randint(image_shape[0])\n point = (row, column)\n try:\n mask_idx, label = object_spec.generator(\n point, image_shape, shape_size, random_state)\n except ArithmeticError:\n # Couldn't fit the shape, skip it.\n continue\n\n print('Fit a shape')\n\n mask = np.zeros(image.shape[:2]).astype(np.uint8)\n cmask = mask.copy()\n mask[mask_idx] = 1\n\n # Check if there is an overlap where the mask is nonzero.\n if allow_overlap or not filled[mask].any():\n print('Going to overlay a shape')\n mask[mask_idx] = 255\n print('findContours')\n cv2.imwrite('mask.png', mask)\n print('mask', mask.min(), mask.max(), file=sys.stderr)\n _, contours, _ = cv2.findContours(\n mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n print('drawContours', file=sys.stderr)\n cmask = cv2.drawContours(cmask, contours, -1, (255,255,255), 1)\n cmask = mask.astype(bool)\n # Calling `overlay_object` has side effects, such\n # as setting the pixels of `filled` to `True` where\n # the object exists.\n image = overlay_object(\n image, target, filled, cmask, object_spec)\n labels.append(label)\n break\n else:\n warn('Could not fit any shapes to image, '\n 'consider reducing the minimum dimension')\n\n if not multichannel:\n image = np.squeeze(image, axis=2)\n\n return image, labels, target", "def gen_data(min_coord, max_coord, size):\r\n data = np.random.randint(min_coord, max_coord, size)\r\n return data", "def getRandColor():\n\treturn (randrange(0,256), randrange(0,256), randrange(0,256))", "def uniform_random(self) -> None:\n\n size = self.circ_size\n random.seed(self.seed)\n\n gates = [self.h, self.x, self.y, self.z, self.s, self.t, self.cx]\n candidates = set(range(size))\n\n for i in range(size):\n for j in range(size):\n to_apply = random.choice(gates)\n\n num_qubits = 2 if to_apply == self.cx else 1\n targets = random.sample(candidates, num_qubits)\n to_apply(*targets)\n\n if self.meas: self.measure(self.qr, self.cr)", "def change_color():\n return random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)", "def __call__(self, shape):\n return np.random.uniform(low=self.minval, high=self.maxval, size=shape)", "def generate_image(filename, x_size=350, y_size=350):\n global timeflag\n timeflag = 0\n\n # Functions for red, green, and blue channels - where the magic happens!\n red_function = build_random_function(13, 15)\n green_function = build_random_function(13, 15)\n blue_function = build_random_function(13,15)\n print \"red_function:\\t\" + str(red_function)+\"\\n\"\n print \"green_function:\\t\" + str(green_function)+\"\\n\"\n print \"blue_function:\\t\" + str(blue_function)+\"\\n\"\n\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y)),\n color_map(evaluate_random_function(green_function, x, y)),\n color_map(evaluate_random_function(blue_function, x, y))\n )\n\n im.save(filename)", "def create_random_shapes(shapesList):\n for _ in range(5):\n shapesList.append( Circle(randint(1,5)) )\n\n for _ in range(5):\n shapesList.append( Rectangle(randint(1,5), randint(1,5)) )", "def random(self, lower, upper, shape):\n return np.random.uniform(lower, upper, shape)", "def random(self, lower, upper, shape):\n return np.random.uniform(lower, upper, shape)", "def __init__(self, shape_num):\n self.shape_num = shape_num\n if shape_num == 1:\n self.width = 4\n self.height = 4\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[0][2] = 1\n self.grid[1][2] = 1\n self.grid[2][2] = 1\n self.grid[3][2] = 1\n self.color = Color.SilverPink\n elif shape_num == 2:\n self.width = 3\n self.height = 3\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[0][1] = 1\n self.grid[0][2] = 1\n self.grid[1][2] = 1\n self.grid[2][2] = 1\n self.color = Color.TuftsBlue\n elif shape_num == 3:\n self.width = 3\n self.height = 3\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[2][1] = 1\n self.grid[0][2] = 1\n self.grid[1][2] = 1\n self.grid[2][2] = 1\n self.color = Color.ChromeYellow\n elif shape_num == 4:\n self.width = 2\n self.height = 2\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[0][0] = 1\n self.grid[0][1] = 1\n self.grid[1][0] = 1\n self.grid[1][1] = 1\n self.color = Color.Independence\n elif shape_num == 5:\n self.width = 3\n self.height = 3\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[1][0] = 1\n self.grid[2][0] = 1\n self.grid[0][1] = 1\n self.grid[1][1] = 1\n self.color = Color.ForestGreen\n elif shape_num == 6:\n self.width = 3\n self.height = 3\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[1][1] = 1\n self.grid[0][2] = 1\n self.grid[1][2] = 1\n self.grid[2][2] = 1\n self.color = Color.Byzantine\n elif shape_num == 7:\n self.width = 3\n self.height = 3\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[0][0] = 1\n self.grid[1][0] = 1\n self.grid[1][1] = 1\n self.grid[2][1] = 1\n self.color = Color.Coquelicot\n self.top_space = self.get_top_space()\n self.bottom_space = self.get_bottom_space()\n self.x = int((12 - self.width) / 2)\n self.y = 1 - self.top_space\n self.last_drop_time = perf_counter()", "def paint_a_picture():\n # Make a training set (many random i,j coord and an x by y box around that coord to start with)\n # Throw it into the net\n # Test how it does for some random coordinate inputs\n pass", "def randomcorners():\n r = lambda x: random.randint(int(x*0.4), int(x*0.6))\n cx = r(gs.DEFAULTS['width'])\n cy = r(gs.DEFAULTS['height'])\n\n w = int(gs.DEFAULTS['width'] * random.random() * 0.2)\n h = int(gs.DEFAULTS['height'] * random.random() * 0.2)\n\n rcrns = [(cx-w, cy-h), (cx+w, cy-h), (cx+w, cy+h), (cx-w, cy+h)]\n random.shuffle(rcrns)\n\n return rcrns", "def randcolor(self, left_edge, right_edge):\n color_pixel = (\n random.randint(\n left_edge, right_edge), random.randint(\n left_edge, right_edge), random.randint(\n left_edge, right_edge))\n return color_pixel", "def randomize(x, y):\r\n permutation = np.random.permutation(y.shape[0])\r\n shuffled_x = x[permutation, :, :, :]\r\n shuffled_y = y[permutation]\r\n return shuffled_x, shuffled_y", "def get_random_coords(width, height):\n return randrange(1, width-2), randrange(1, height-2)", "def random(self, width, height, seed = None):\n self.grid = [ [''] * width for i in range(height) ]\n random.seed(seed)\n start = ( random.randint(0, len(self.grid) - 1),\n random.randint(0, len(self.grid[0]) - 1)\n )\n visited = set([start])\n self._createPath(start, visited)\n start = ( random.randint(0, len(self.grid) - 1), 0 )\n finish = ( random.randint(0, len(self.grid) - 1),\n len(self.grid[0]) - 1 )\n self.grid[start[0]][start[1]] += '^'\n self.grid[finish[0]][finish[1]] += '$'\n return self.grid, start, finish", "def random_color_func(word=None, font_size=None, position=None,\n orientation=None, font_path=None, random_state=None):\n if random_state is None:\n random_state = Random()\n return \"hsl(%d, 80%%, 50%%)\" % random_state.randint(0, 255)", "def draw_multicolor_square(t,sz):\r\n for i in [\"red\", \"purple\", \"hotpink\", \"blue\"]:\r\n t.color(i)\r\n t.forward(sz)\r\n t.left(90)", "def form(x, y, s):\n rnd = int(random(3))\n shuffle(colors) # this is my own implementation of shuffle (rn_utils)\n noStroke()\n fill(colors[0])\n pushMatrix()\n translate(x, y)\n rotate(int(random(4)) * PI * 0.5)\n if random(1) < 0.5:\n rect(0, 0, s + 0.9, s, s, 0, s, s)\n # myShape(s * 0.75, -s * 0.25, s * 0.5, 0);\n else:\n rect(0, 0, s + 0.9, s, s, s, 0, s)\n # myShape(s * 0.75, s * 0.25, s * 0.5, TAU * 0.75);\n\n fill(colors[3])\n ellipse(0, 0, s * 0.8, s * 0.8)\n\n fill(colors[1])\n ellipse(0, 0, s * 0.5, s * 0.5)\n\n # if (rnd == 0) drawVortex(0, 0, s * 0.5);\n # if (rnd == 1) ellipse(0, 0, s * 0.5, s * 0.5);\n # if (rnd == 2) {\n # \tfill(colors[1]);\n # \tellipse(0, 0, s * 0.5, s * 0.5);\n # \tdrawHeart(0, s * 0.05, s * 0.35);\n # }\n\n if random(1) < 0.1:\n fill(colors[0])\n arc(0, 0, s, s, PI, TAU)\n\n popMatrix()", "def random_block(data, shape, rng=None):\n if rng is None:\n rng = np.random\n d_shape = data.shape\n assert len(d_shape) == len(shape)\n corner = [rng.randint(diff + 1)\n for diff in (np.array(d_shape) - np.array(shape))]\n slices = [slice(c, c + size) for c, size in zip(corner, shape)]\n return data[slices]", "def gen_super_ball():\n super_ball_radius = 80\n super_ball_x = randint(super_ball_radius, screen_width - super_ball_radius)\n super_ball_y = randint(super_ball_radius, screen_height - super_ball_radius)\n super_ball_color = (RED, YELLOW, GREEN)\n return [super_ball_color, super_ball_x, super_ball_y, super_ball_radius]", "def fillColors(self, color, size):\n lst = []\n for row in range(size):\n s = []\n for column in range(size):\n s.append(color)\n lst.append(s)\n return lst", "def clusterGen(k, colors):\n clusters = []\n # generate the clusters randomly\n # for i in range(k):\n # r = random.randint(0, 255)\n # g = random.randint(0, 255)\n # b = random.randint(0, 255)\n # clusters.append((r, g, b))\n\n # generate the clusters that exist in colors\n l = len(colors)\n for i in range(k):\n r = random.randint(0, l)\n cluster = colors[r][1]\n clusters.append(cluster)\n\n return clusters", "def generate_image(self):\n\t\tcenters = self.generate_centers()\n\t\timg = Image.new('RGB', (self.config.image_size, self.config.image_size), color=(0,0,0))\n\t\tshapes = np.random.randint(2, size=len(centers))\n\t\tdrawer = ImageDraw.Draw(img)\n\t\tr = int(0.05 * self.config.image_size)\n\t\tR = []\n\t\tfor i in range(len(centers)):\n\t\t\tcoor = (centers[i][0] - r , centers[i][1] - r, centers[i][0] + r, centers[i][1] + r)\n\t\t\tif shapes[i] < 0.5:\n\t\t\t\tdrawer.rectangle(coor, fill=COLOR[i])\n\t\t\telse:\n\t\t\t\tdrawer.ellipse(coor, fill=COLOR[i])\n\t\t\tR.append([centers[i], i, shapes[i]])\n\t\treturn np.array(img), R", "def _get_rand_array(self):\n return np.random.random((self.w + 1, self.h + 1, 2))", "def randomize(x, y):\n permutation = np.random.permutation(y.shape[0])\n shuffled_x = x[permutation, :, :, :]\n shuffled_y = y[permutation]\n return shuffled_x, shuffled_y", "def randomize(x, y, x_len):\r\n permutation = np.random.permutation(y.shape[0])\r\n shuffled_x = x[permutation, :]\r\n shuffled_y = y[permutation]\r\n shuffled_x_len = x_len[permutation]\r\n return shuffled_x, shuffled_y, shuffled_x_len", "def mondrian(shape=(256, 256), nx=5, ny=8, seed=4):\n rstate = np.random.RandomState(seed)\n min_dx = 0\n while min_dx < 3:\n xp = np.sort(np.round(rstate.rand(nx - 1) * shape[0]).astype(np.int))\n xp = np.concatenate(((0,), xp, (shape[0],)))\n min_dx = np.min(np.diff(xp))\n min_dy = 0\n while min_dy < 3:\n yp = np.sort(np.round(rstate.rand(ny - 1) * shape[1]).astype(np.int))\n yp = np.concatenate(((0,), yp, (shape[1],)))\n min_dy = np.min(np.diff(yp))\n img = np.zeros(shape)\n for ix, x in enumerate(xp[:-1]):\n for iy, y in enumerate(yp[:-1]):\n slices = [slice(x, xp[ix + 1]), slice(y, yp[iy + 1])]\n val = rstate.rand(1)[0]\n img[slices] = val\n return img", "def in_square():\n return np.random.random_sample(size=2)", "def gencastshapes():\n for n in range(32):\n yield [n]\n ndim = randrange(4, 6)\n minshape = 1 if randrange(100) > 80 else 2\n yield [randrange(minshape, 5) for _ in range(ndim)]\n ndim = randrange(2, 4)\n minshape = 1 if randrange(100) > 80 else 2\n yield [randrange(minshape, 5) for _ in range(ndim)]", "def gencastshapes():\n for n in range(32):\n yield [n]\n ndim = randrange(4, 6)\n minshape = 1 if randrange(100) > 80 else 2\n yield [randrange(minshape, 5) for _ in range(ndim)]\n ndim = randrange(2, 4)\n minshape = 1 if randrange(100) > 80 else 2\n yield [randrange(minshape, 5) for _ in range(ndim)]", "def color(step: int=10) -> Tuple[int, int, int]:\n # Randomly seed the r g b values\n r, g, b = (random_uniform(0, 255), random_uniform(0, 255),\n random_uniform(0, 255))\n\n # Randomly determine if each r g and b value is increasing or not\n r_inc = True\n g_inc = True\n b_inc = True\n r_step = random_uniform(step)\n g_step = random_uniform(step)\n b_step = random_uniform(step)\n\n # Yield the initial r, g, b values\n yield r, g, b\n\n # Loop and yeild forever\n while True:\n # If r is increasing\n if r_inc:\n # Increment r by the step\n r += r_step\n # Ensure that the next step will be within the limits\n # if not then set the flag to decreasing\n r_inc = r < 255 - r_step\n # If r is decreasing\n else:\n # Decrement r by the step\n r -= r_step\n # Ensure that the next step will be within the limits\n # if not then set the flag to increasing\n r_inc = r < r_step\n\n # See above\n if g_inc:\n g += g_step\n g_inc = g < 255 - g_step\n else:\n g -= g_step\n g_inc = g < g_step\n\n # See above\n if b_inc:\n b += b_step\n b_inc = b < 255 - b_step\n else:\n b -= b_step\n b_inc = b < b_step\n\n # Yield the red, green, and blue values\n yield r, g, b", "def getRandomColor():\n r = randint(0, 255)\n g = randint(0, 255)\n b = randint(0, 255)\n return \"rgb(\" + str(r) + \", \" + str(g) + \", \" + str(b) +\")\"", "def randomize(self, seed_density):\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (rand.random() <= seed_density):\r\n self.cells[x][y] = 1", "def random_colors(N, bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors", "def random_colors(N, bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors", "def random_colors(N, bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors", "def generate_rgb(exist_colors: List[List[int, int, int]]) -> List[int, int, int]:\n largest_min_distance = 0\n best_color = random_rgb()\n if len(exist_colors) > 0:\n for _ in range(100):\n color = random_rgb()\n current_min_distance = min(_color_distance(color, c) for c in exist_colors)\n if current_min_distance > largest_min_distance:\n largest_min_distance = current_min_distance\n best_color = color\n _validate_color(best_color)\n return best_color", "def setRandomColor():\n setColor(getRandomColor())", "def generateData(numPoints,x,y):\n\tfor i in range(0,numPoints):\n\t\tif (i % 2 == 0):\n\t\t\tx.append(random.normalvariate(25, 15))\n\t\t\ty.append(random.normalvariate(25, 15))\n\t\t\t \n\t\t\t\n\t\telse:\n\t\t\tx.append(random.normalvariate(75, 15))\n\t\t\ty.append(random.normalvariate(75, 15))" ]
[ "0.72354615", "0.681743", "0.67759746", "0.65923506", "0.647772", "0.6391293", "0.6389239", "0.6381156", "0.63713896", "0.6345931", "0.6305542", "0.62956303", "0.62650055", "0.62281466", "0.6208814", "0.6204611", "0.61832035", "0.617489", "0.6139714", "0.6136026", "0.6124625", "0.60273397", "0.5979785", "0.5954721", "0.5953351", "0.59367096", "0.59269786", "0.5872487", "0.5867898", "0.58649373", "0.5863814", "0.5857725", "0.5849454", "0.58488363", "0.5840831", "0.5819631", "0.58142674", "0.5808373", "0.5803865", "0.580289", "0.57961285", "0.5793648", "0.57897997", "0.5780756", "0.577911", "0.57772267", "0.57746154", "0.5768984", "0.5761045", "0.575557", "0.57533544", "0.5751436", "0.5747045", "0.5746949", "0.5743405", "0.57424295", "0.5739999", "0.5727315", "0.5725776", "0.5714488", "0.5712618", "0.5671577", "0.56647426", "0.56626636", "0.5660762", "0.56543434", "0.5651858", "0.5646305", "0.5646305", "0.56461775", "0.56359875", "0.56201756", "0.56035423", "0.5598364", "0.55977815", "0.55950975", "0.5589667", "0.55865", "0.55818075", "0.55785084", "0.55709827", "0.55676335", "0.55430406", "0.5537412", "0.55317825", "0.5529022", "0.5527047", "0.55225253", "0.5510649", "0.5506659", "0.5506659", "0.5502224", "0.5496834", "0.5496452", "0.5491433", "0.5491433", "0.5491433", "0.5485239", "0.5464394", "0.5462579" ]
0.7120646
1
r"""Return the current running average.
def get_current(self): return self.x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate(self):\n avg = self.sum / self.n if self.n != 0 else 0\n self.running_avg.append(avg)\n return avg", "def average(self):\n return (self.current + self.last) / 2.0", "def current_mean(self):\r\n values = self._timings\r\n return np.mean(values)", "def average(self):\n return self.summation() / self.count()", "def get_avg(self) -> float:\n if self._cur_elem_count < 1:\n return 0\n self._mtx.acquire()\n avg = self._sum / float(self._cur_elem_count)\n self._mtx.release()\n return avg", "def avg(self):\n return sum(self.times) / len(self.times)", "def avg(self):\n return sum(self.times) / len(self.times)", "def avg(self):\n return sum(self.times) / len(self.times)", "def average_waiting(self):\n return self._average_waiting", "def getAverage(self):\n return sum(self.scores) / len(self.scores)", "def averageTime(self):\n \n pass", "def get_runs_to_average(self):\n\n if Test.performance_params: return int(Test.performance_params[1])\n elif self._check_performance: return self._runs_to_average\n else: return None", "def get_value(\n self\n ) -> float:\n\n return self.average", "def average_pending(self):\n return self._average_pending", "def average(self):\n if self._average is None: # only first time\n self._average = self._obj.mean(dim='t')\n self._average.attrs = self._obj.attrs # we need units in quiver\n\n return self._average", "def get_average(self) -> float:\n return sum(self._scores) / len(self._scores)", "def average(self, start, end):\n return self.integrate(start, end) / (end - start)", "def running_avg (mylist, N):\n import numpy as np\n \n cumsum = np.cumsum(np.insert(mylist, 0, 0))\n return (cumsum[N:] - cumsum[:-N]) / float(N)", "def avg(self):\n if not self.committed_together:\n return 0\n\n return round(statistics.mean(self.committed_together))", "def load_average(self):\n return _favg(self.load_samples)", "def average(self):\n return self.properties.get('average')", "def getAvg(self):\r\n\t\treturn self.data['avg']", "def average(self):\n return np.mean(self.buf[:self._size], axis=0)", "def mean(self) -> typing.Tuple[float, float]:\r\n self.clean_window()\r\n return (\r\n (self.sum_frames_rec / self.window_size),\r\n (self.sum_frames_proc / self.window_size)\r\n )", "def get_average(self):\n self.avg = math.floor((self.maths + self.phy + self.che) / 3, )\n self.assign_grade()\n return self.avg\n # End of method get_average", "def avgcpu(self):\n return (self._total_cpu['value'] / self._total_cpu['count']) if self._total_cpu['count'] else 0", "def average(self):\n total = 0\n for t in self.memory:\n total += t.reward\n return total/self.__len__()", "def getAvg(self):\r\n\t\tdata = self.pair.data\r\n\t\tif data['avg'] == None:\r\n\t\t\treturn None\r\n\t\treturn 1. / self.pair.data['avg']", "def ram_average(self):\n return _favg(self.ram_samples)", "def avgtr(self):\n return np.diff(self.trtimes).mean()", "def running_mean(x, N):\n cumsum = np.cumsum(np.insert(x, 0, 0))\n return (cumsum[N:] - cumsum[:-N]) / float(N)", "def get_mean(self):\n return self.serie.mean()", "def sm_measure_current(self,num_readings=1):\n self.sm.set_measurement_function(\"CURRENT\")\n self.sm.format_readings(\"CURRENT\")\n ret = average(self.sm.take_measurement(num_readings))\n self.sm_restore_display\n return ret", "def average(self):\n if self._average is None:\n self._average = sum([df.df for df in self])/len(self)\n return self._average", "def _get_mean(self, sums, step):\n\n return sums/step", "def average_performance(self):\n\n print(f\"Average performance: {self.performance / 10}\")", "def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm", "def get_average_complete_progress(self):\n cnt_events = len(self.get_events())\n if cnt_events > 0:\n return sum([self.get_average_progress(event) for event in self.get_events()]) * 1.0 / cnt_events\n return 0.0", "def update_average(self,result):\n a = 1/self.iters\n b = 1 - a\n self.average = a * result + b * self.average\n self.iters += 1", "def mean(self):\n return self._mean", "def mean(self):\n return self._mean", "def print_avg():", "def avgtime(self):\n return (self._total_time['value'] / 1000) / self._total_time['count'] if self._total_time['count'] else 0", "def em_mean(self) -> float:\n if self.__total_pulls == 0:\n raise Exception('Number of pulls is 0. No empirical mean.')\n return self.__total_rewards / self.__total_pulls", "def avg_training_time(self):\n return self._avg_training_time", "def averaging(self, value: int):\n self._averaging = value\n\n self.events.averaging()\n self._update_avg()\n\n self.refresh()", "def calculateAverage(self): \n if not self.lastTransferAverage: \n size=[0,0,0,0]\n for i in range(0,4):\n if self.lastTransferredChannel & (1 << i):\n size[i]=self.lastNbrSamplesPerSeg\n self.lastAverageArray = [zeros(size[0]),zeros(size[1]),zeros(size[2]),zeros(size[3])]\n nbrSamp=self.lastNbrSamplesPerSeg\n for i in range(0,4):\n if self.lastTransferredChannel & (1 << i):\n nbrSeg=self.lastNbrSegmentsArray[i]\n for j in range (0,nbrSamp):\n for k in range(0,nbrSeg): \n self.lastAverageArray[i][j]+=self.lastWaveformArray[i][k*nbrSamp+j]\n self.lastAverageArray[i][j]/=nbrSeg\n self.lastAverageCalculated=True\n else: print \"NOn averaged data are not available\"", "def get_average(data):\n average = sum(data) / len(data)\n\n return average", "def mean(self):\r\n\t\treturn sum(self.sample)/len(self.sample)", "def mean(self):\n return self.vmean", "def value(self):\n if len(self.fscore_history) == 0:\n return 0\n else:\n return np.mean(self.fscore_history)", "def average(self, returns):\r\n return returns.mean() * self.day", "def running_mean(x, N):\n cumsum = np.cumsum(np.insert(x, 0, 0)) \n return (cumsum[N:] - cumsum[:-N]) / float(N)", "def get_average_progress(self, event=None):\n cnt_items = len(self)\n sum_progress = None\n\n if event == self.EVENT_TOTAL_PROGRESS:\n return self.get_average_complete_progress()\n\n if event is None:\n sum_progress = sum(map(lambda node: node.get_average_progress(), self.values()))\n else:\n sum_progress = self.get_sum_progress_event(event)\n\n if cnt_items > 0:\n return sum_progress * 1.0 / cnt_items\n\n return 0.0", "def getavgvel(self):\n if self.total_time:\n return (6.28)/(self.total_time)", "def mean_run_time(self) -> float:\n return float(self.result_array.sum(axis=0).mean())", "def mean(self) -> float:\n return self._interval_sum / len(self.intervals)", "def get_avg_loss(self):\n if self.n_batches > 0:\n avg_loss = self.loss / self.n_batches\n self.loss = 0\n self.n_batches = 0\n return avg_loss\n else:\n return 0", "def global_mean(self):\n if self._global_mean is None:\n self._global_mean = np.mean([r for (_, _, r) in\n self.all_ratings()])\n\n return self._global_mean", "def average(self):\n s = self.sum()\n flat_shape = self.flatten_shape(self.shape)\n num_of_elements = fct.reduce(opr.mul, flat_shape, 1)\n average = s / num_of_elements\n return average", "def avg_inference_time(self):\n return self._avg_inference_time", "def getMean(self):\n return self.mean", "def average_speed(self):\n return self._average_speed", "def running_mean(sequence: list):\n if not sequence:\n return []\n\n mean = []\n \"\"\"\n [1] = 1 / 1\n [1,2] = 3 / 2 \n [1,2,3] = 6 / 3\n \"\"\"\n for idx, num in enumerate(sequence):\n\n sum_total = sum(sequence[:(idx + 1)])\n result = sum_total / (idx + 1)\n\n mean.append(round(result, 2))\n\n return mean", "def average(self,start_window, end_window):\n query = f\"select avg(age) from `{self.table_id}` where timestamp between {start_window} and {end_window}\"\n query_job = self.client.query(query)\n return query_job.result", "def calcAverage(dat):\n return sum(dat)/len(dat)", "def get_mean(self):\n self.meanval = np.mean(self.adulist)", "def main():\n print(average([2, 4, 6, 8, 20, 50, 70]))", "def mean(self):\n\n return time_stat(self, stat=\"mean\")", "def readAvg(self):\n self.flushInput()\n\n if (self.model == 'TDS'):\n self.write('ACQuire:NUMAVg?\\n')\n return int(self.readline())\n #elif (self.model == 'GDS'):\n # FIXME: I'll implement this later. I need to do some\n # testing, re: whether GDS returns the actual average\n # number, or log-base-2 of the average number.", "def average(data, event):\n if len(data) == 0:\n return 0\n\n score = 0\n # scores = []\n count = 0\n for i in data:\n count += 1\n if event == 'Swim' or event == 'Run':\n num = time_seconds(i[event])\n #print(\"first if\")\n #Sprint(num)\n else:\n num = int(i[event])\n #print(\"second if\")\n #print(num)\n #scores[count] =\n #print(\"end of loop count\" + str(count))\n score += num\n #print (\"score\" + str(score))\n\n # total = 0\n # for x in range(0,len(scores)):\n # total += scores[x]\n score = float(score)\n\n return score / count", "def calculateAverage(self, data):\n\n nValidTrials = data['nValid'][-1]\n nRewardTrials = data['nRewarded'][-1]\n return float(nRewardTrials)/nValidTrials", "def get_avg_points(self):\n pass", "def get_mean(self):\n try:\n return sum(self.speakers.values()) / len(self.speakers)\n except (ZeroDivisionError):\n return 0.0", "def averaged_risk(self):\n return self._averaged_risk", "def averaged_risk(self):\n return self._averaged_risk", "def mean(self):\n return self._mean_func", "def avg_hops(self):\n return self._avg_hops", "def pc_work_time_avg(self) -> \"float\":\n return _beamforming_swig.randomsampler_sptr_pc_work_time_avg(self)", "def get_metric(self, reset: bool = False):\n average_value = self._total_value / self._count if self._count > 0 else 0\n if reset:\n self.reset()\n return average_value", "def pc_work_time_avg(self) -> \"float\":\n return _beamforming_swig.doaesprit_sptr_pc_work_time_avg(self)", "def incavg(val = None):\n\n cnt = 0\n avg = 0\n \n if not val is None:\n cnt = 1\n avg = val\n\n while True:\n val = (yield avg)\n\n if val is None:\n pass # next was called\n elif cnt == 0: # first value\n cnt = 1\n avg = val\n else:\n cnt += 1\n avg = avg + (val - avg) / float(cnt)", "def mean(self) -> float:\n return self._data.mean()", "def pc_work_time_avg(self) -> \"float\":\n return _beamforming_swig.beamformer_sptr_pc_work_time_avg(self)", "def getJudgeAverage(self):\n\n try:\n judgeNotesLogger.info(\"getJudgeAverage: Retrieving Judge Average from '%s'\", self.notesFile)\n ratingSum = self.getRatingSum()\n self.average = ratingSum / self.numJudgedFiles\n judgeNotesLogger.debug(\"getJudgeAverage: '%s' / '%s' = '%s'\", str(ratingSum),\n str(self.numJudgedFiles), str(self.average))\n except:\n judgeNotesLogger.warning(\"getJudgeAverage: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))", "def mean_value(self):\n\n return self._system.mean()", "def get_current_loss(self):\n return sum(self.recent_loss_array)/sum(self.recent_loss_bs_array)", "def calc_running_avg_loss(loss, running_avg_loss, summary_writer, step, decay=0.99):\r\n if running_avg_loss == 0:\t# on the first iteration just take the loss\r\n running_avg_loss = loss\r\n else:\r\n running_avg_loss = running_avg_loss * decay + (1 - decay) * loss\r\n running_avg_loss = min(running_avg_loss, 12)\t# clip\r\n loss_sum = tf.Summary()\r\n tag_name = 'running_avg_loss/decay=%f' % (decay)\r\n loss_sum.value.add(tag=tag_name, simple_value=running_avg_loss)\r\n summary_writer.add_summary(loss_sum, step)\r\n logging.info('running_avg_loss: %f', running_avg_loss)\r\n return running_avg_loss", "def get_mean(self):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")", "def cum_avg(mylist):\n cumsum, cum_aves = [0], []\n \n for i, x in enumerate(mylist, 1):\n cumsum.append(cumsum[i-1] + x)\n cum_ave = (cumsum[i])/(i)\n cum_aves.append(cum_ave)\n \n return cum_aves", "def average_rating(self):\n ratings = Rating.objects.filter(game=self)\n\n if len(ratings):\n # Sum all of the ratings for the game\n total_rating = 0\n for rating in ratings:\n total_rating += rating.value\n\n # Calculate the averge and return it.\n average = total_rating / len(ratings)\n return average\n\n # else: \n return 0", "def mean_average_position():\n pass", "def avgX(self):\n return np.mean(self.getx())", "def avg():\n\n # call sum method to add up the values in the collection & div by the num of items\n # call len method to compute the # of vals in collection which is divided by sum total \n mean = sum(inlist) / len(inlist)\n return mean \n\n # alternate method would be calling the reduce method with lamda \n # return reduce(lambda a, b: a + b, inlist) / len(inlist)", "def _baseline_value(self):\n t = self['primary']\n return np.median(t.data[:int(10e-3/t.dt)])", "def mean_STD(self,counter):\n \n \n pass", "def _avg_sample(self):\n samples = [0] * self.num_samples\n for i in range(self.num_samples):\n samples[i] = self.sensor.measure_distance()\n time.sleep(self.sample_delay)\n if self.drop_extremes:\n samples.sort()\n samples = samples[1:-1]\n return sum(samples) / len(samples)", "def PM_averages(self):\n return int(self.ask(self.headStr('PM')+'AVG?'))", "def get_running_mean(data,time_window):\n \n print('--> Starting to calculate running mean') \n timer_start = dt.now()\n filt = [1./float(time_window)]*int(time_window)\n running_mean = np.apply_along_axis(lambda m: np.convolve(m, filt, mode='valid'), axis=0, arr=data)\n running_mean = np.append(np.ones([len(data)-len(running_mean),*data.shape[1:]])*np.nan,running_mean,axis=0)\n print('--> Completed calculating running mean (%.1f seconds)' \\\n % (dt.now()-timer_start).total_seconds())\n return running_mean", "def mean(self):\n return self.sum / self.sum_weights", "def get_session_mean():\n try:\n float_times, len_times = convert_to_float(times, 'average')\n return add_zero(round(sum(float_times) / len_times, 2))\n except ZeroDivisionError:\n return \"\"" ]
[ "0.8117838", "0.7966467", "0.7702329", "0.73908114", "0.7304135", "0.72477657", "0.72477657", "0.72477657", "0.7218233", "0.7120395", "0.7092222", "0.7014438", "0.6919139", "0.6896268", "0.6838284", "0.6832612", "0.6796031", "0.67581385", "0.6699277", "0.6677944", "0.6667284", "0.6653532", "0.6649288", "0.66455144", "0.66367584", "0.6627842", "0.66154367", "0.66113824", "0.66105634", "0.6596803", "0.6587595", "0.6578735", "0.65706044", "0.6568766", "0.6557095", "0.65292424", "0.65184945", "0.6504683", "0.65038425", "0.6493262", "0.6493262", "0.64553285", "0.64548147", "0.644089", "0.64341843", "0.6432483", "0.64089113", "0.6404245", "0.6404085", "0.64022315", "0.6399192", "0.6376075", "0.6373743", "0.6369944", "0.63630295", "0.6353521", "0.63377416", "0.6334266", "0.6327003", "0.6315976", "0.63147175", "0.6311026", "0.63102454", "0.63078207", "0.6302265", "0.6300757", "0.6299686", "0.62940985", "0.62731814", "0.62715805", "0.62676126", "0.6266507", "0.62663114", "0.62653595", "0.62503356", "0.62503356", "0.62501657", "0.62434536", "0.6234348", "0.6218618", "0.6205228", "0.6187658", "0.61858964", "0.6181874", "0.6181727", "0.61666626", "0.61555153", "0.6152591", "0.61367", "0.6125378", "0.6124288", "0.61237997", "0.611139", "0.61068976", "0.61001045", "0.6097284", "0.6096141", "0.6095701", "0.60866445", "0.60854787", "0.60772645" ]
0.0
-1
Finds a dihedral angle adjacent to the selected atoms that includes a new atom
def _find_dihedral(selected): atom_name = lambda atom: atom.fullName() atom_mass = lambda atom: atom.mass() # Loop over possible nearest neighbors for a2 in selected: # Find the new atom attached_to_a2 = sorted([a for a in a2.bondedTo() \ if a not in selected], key=atom_name) for a1 in sorted(attached_to_a2, key=atom_mass, reverse=True): # Find the third atom attached_to_a3 = sorted([a for a in a2.bondedTo() \ if (a in selected) and (a!=a1)], key=atom_name) for a3 in sorted(attached_to_a3, key=atom_mass, reverse=True): # Find the last atom attached_to_a4 = sorted([a for a in a3.bondedTo() \ if (a in selected) and (a!=a2)], key=atom_name) for a4 in sorted(attached_to_a4, key=atom_mass, reverse=True): return (a1, a2, a3, a4) print 'Selected atoms:', selected raise Exception('No new dihedral angle found!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dihedral_calculator():\n\n\t# Prime with first 3 points\n\tp1 = Vector3((yield None))\n\tp2 = Vector3((yield None))\n\tp3 = Vector3((yield None))\n\n\t# Set up for first angle\n\tlastpoint = p3\n\tlastdisp = p3 - p2\n\tlastnormal = ((p2 - p1) @ lastdisp).normalize()\n\n\tangle = None\n\n\t# For each point starting with the 4th, we can compute a new angle\n\twhile True:\n\n\t\t# Yield the last angle (None the first time), get the next point\n\t\tnextpoint = Vector3((yield angle))\n\n\t\t# Displacement from previous point to current\n\t\tnextdisp = nextpoint - lastpoint\n\n\t\t# Normal vector to plane containing last 3 points\n\t\tnextnormal = (lastdisp @ nextdisp).normalize()\n\n\t\t# This one's complicated... see step 3 in source.\n\t\tx = lastnormal * nextnormal\n\t\ty = (lastnormal @ lastdisp.normalize()) * nextnormal\n\t\tangle = -math.atan2(y, x)\n\n\t\t# Current values used as previous in next loop\n\t\tlastpoint = nextpoint\n\t\tlastdisp = nextdisp\n\t\tlastnormal = nextnormal", "def get_dihedral_angles(self):\n mol = self.m\n c1 = mol.GetConformer(-1)\n torsma = '[!$(*#*)&!D1]~[!$(*#*)&!D1]'\n q = Chem.MolFromSmarts(torsma)\n matches = mol.GetSubstructMatches(q)\n nmat = len(matches)\n dic = {}\n for match in matches:\n j = match[0]\n k = match[1]\n bond = mol.GetBondBetweenAtoms(j, k)\n aj = mol.GetAtomWithIdx(j)\n ak = mol.GetAtomWithIdx(k)\n hj, hk = [ _hyb[_a.GetHybridization()] for _a in [aj,ak] ]\n iok1 = ( hj not in [2,3] )\n iok2 = ( hk not in [2,3] )\n if iok1 or iok2: continue\n for b1 in aj.GetBonds():\n if (b1.GetIdx() == bond.GetIdx()):\n continue\n i = b1.GetOtherAtomIdx(j)\n for b2 in ak.GetBonds():\n if (b2.GetIdx() == bond.GetIdx()) or (b2.GetIdx() == b1.GetIdx()):\n continue\n l = b2.GetOtherAtomIdx(k)\n # skip 3-membered rings\n if (l == i):\n continue\n _dang = rdMolTransforms.GetDihedralDeg(c1, i,j,k,l)\n dang = abs(_dang)\n assert dang <= 180.0\n ias4 = (i,j,k,l)\n if not self.wH:\n if np.any([ self.zs[iaa]==1 for iaa in ias4 ]):\n continue\n if self.key in ['z']:\n #print('atsi=',ias4, 'zsi=', [_zs[iaa] for iaa in ias4])\n zi,zj,zk,zl = [ self.zs[iaa] for iaa in ias4 ]\n if (zj==zk and zi>zl) or (zj>zk):\n ias4 = (l,k,j,i)\n #torsions.append(ias4)\n #_zi,_zj,_zk,_zl = [ zs[_] for _ in ias4 ]\n #typez = '%d-%d-%d-%d'%(_zi,_zj,_zk,_zl)\n type4 = tuple([self.zs[iaa] for iaa in ias4])\n if type4 in list(dic.keys()):\n dic[type4] += [dang]\n else:\n dic[type4] = [dang]\n elif self.key in ['ia','i']:\n type4 = ias4\n dic[type4] = dang\n else:\n raise Exception('#unknown key')\n return dic", "def sp2_dihedrals(atoms):\n\n #problems with atoms inbuilt dihedral method (doesn't match gaussview/jmol at all)\n #so we'll use one taken from http://stackoverflow.com/questions/20305272/dihedral-torsion-angle-from-four-points-in-cartesian-coordinates-in-python\n def get_dihedral(p):\n b = p[:-1] - p[1:]\n b[0] *= -1\n v = np.array([v - (v.dot(b[1])/b[1].dot(b[1])) * b[1] for v in [b[0], b[2]]])\n # Normalize vectors\n v /= np.sqrt(np.einsum('...i,...i', v, v)).reshape(-1,1)\n b1 = b[1] / np.linalg.norm(b[1])\n x = np.dot(v[0], v[1])\n m = np.cross(v[0], b1)\n y = np.dot(m, v[1])\n return np.degrees(np.arctan2(y, x))\n\n mol = to_molmod(atoms)\n data = []\n\n for i in range(len(atoms)):\n if len(mol.graph.neighbors[i]) == 3:\n atom_indices = [i] + list(mol.graph.neighbors[i])\n atom_positions = np.array([atoms[temp_index].position for temp_index in atom_indices])\n #dihedral = atoms.get_dihedral(atom_indices)\n dihedral = get_dihedral(atom_positions)\n result = (i, dihedral)\n data.append(result)\n\n return data", "def calculate_dihedral_angles(mol, dihedral_atom_sets):\n\n # Create list for the dihedrals (to be ordered in the same order as the input dihedral sets)\n dihedral_angles = []\n # Now calculate the dihedral angles between the sets identified previously\n conf = mol.GetConformer()\n # Loop through the angles => 2-3 is the rotatable bonds, 1,4 are the neighbours of 2,3 respectively\n for at1, at2, at3, at4 in dihedral_atom_sets:\n # Get the coordinates of the positions\n pos1 = conf.GetAtomPosition(at1)\n pos2 = conf.GetAtomPosition(at2)\n pos3 = conf.GetAtomPosition(at3)\n pos4 = conf.GetAtomPosition(at4)\n # Need to calculate three vectors 1->2, 2->3, 3->4\n vec1 = pos2 - pos1\n vec2 = pos3 - pos2\n vec3 = pos4 - pos3\n # Get the normals to the two planes (vec1-vec2 plane and vec2-vec3 plane))\n cross12 = vec1.CrossProduct(vec2)\n cross23 = vec2.CrossProduct(vec3)\n # Normalise the normals\n cross12.Normalize()\n cross23.Normalize()\n # Calculate dot-product and then inverse cosine to get the angle\n dot_prod = cross12.DotProduct(cross23)\n dihedral_rad = math.acos(dot_prod)\n dihedral_deg = 180*dihedral_rad/math.pi\n dihedral_angles.append(dihedral_deg)\n return dihedral_angles", "def calc_dihedral(v1, v2, v3, v4):\n ab = v1 - v2\n cb = v3 - v2\n db = v4 - v3\n u = ab ** cb\n v = db ** cb\n w = u ** v\n angle = u.angle(v)\n # Determine sign of angle\n try:\n if cb.angle(w) > 0.001:\n angle = -angle\n except ZeroDivisionError:\n # dihedral=pi\n pass\n return angle", "def addDihedralBond(a1, a2, length, angleInfo, dihedInfo):\n\n\tif a1.molecule == a2.molecule:\n\t\traise ValueError(\"Atoms to be bonded must be in different models\")\n\n\t# first, get the distance correct\n\tfrom chimera import Xform, cross, angle, Point\n\tdvector = a1.xformCoord() - a2.xformCoord()\n\tdvector.length = dvector.length + length\n\topenState = a2.molecule.openState\n\topenState.globalXform(Xform.translation(dvector))\n\n\t# then angle\n\tif angleInfo:\n\t\tatoms, angleVal = angleInfo\n\t\tp1, p2, p3 = [a.xformCoord() for a in atoms]\n\t\taxis = cross(p1-p2, p2-p3)\n\t\tcurAngle = angle(p1, p2, p3)\n\t\tdelta = angleVal - curAngle\n\t\tv2 = p2 - Point(0.0, 0.0, 0.0)\n\t\ttrans1 = Xform.translation(v2)\n\t\tv2.negate()\n\t\ttrans2 = Xform.translation(v2)\n\t\ttrans1.multiply(Xform.rotation(axis, delta))\n\t\ttrans1.multiply(trans2)\n\t\topenState.globalXform(trans1)", "def getDihedrals(self):\n uniqKpList = self.getFlagData('DIHEDRAL_FORCE_CONSTANT')\n uniqPeriodList = self.getFlagData('DIHEDRAL_PERIODICITY')\n uniqPhaseList = self.getFlagData('DIHEDRAL_PHASE')\n # for list below, true atom number = abs(index)/3 + 1\n dihCodeHList = self.getFlagData('DIHEDRALS_INC_HYDROGEN')\n dihCodeNonHList = self.getFlagData('DIHEDRALS_WITHOUT_HYDROGEN')\n dihCodeList = dihCodeHList + dihCodeNonHList\n properDih = []\n improperDih = []\n condProperDih = [] # list of dihedrals condensed by the same quartet\n #atomPairs = []\n atomPairs = set()\n for i in xrange(0, len(dihCodeList), 5):\n idAtom1 = dihCodeList[i] / 3 # remember python starts with id 0\n idAtom2 = dihCodeList[i+1] / 3\n # 3 and 4 indexes can be negative: if id3 < 0, end group interations\n # in amber are to be ignored; if id4 < 0, dihedral is improper\n idAtom3raw = dihCodeList[i+2] / 3 # can be negative -> exclude from 1-4vdw\n idAtom4raw = dihCodeList[i+3] / 3 # can be negative -> Improper\n idAtom3 = abs(idAtom3raw)\n idAtom4 = abs(idAtom4raw)\n dihTypeId = dihCodeList[i+4] - 1\n atom1 = self.atoms[idAtom1]\n atom2 = self.atoms[idAtom2]\n atom3 = self.atoms[idAtom3]\n atom4 = self.atoms[idAtom4]\n kPhi = uniqKpList[dihTypeId] # already divided by IDIVF\n period = int(uniqPeriodList[dihTypeId]) # integer\n phase = uniqPhaseList[dihTypeId]# angle given in rad in prmtop\n atoms = [atom1, atom2, atom3, atom4]\n dihedral = Dihedral(atoms, kPhi, period, phase)\n if idAtom4raw > 0:\n try: atomsPrev = properDih[-1].atoms\n except: atomsPrev = []\n properDih.append(dihedral)\n if idAtom3raw < 0 and atomsPrev == atoms:\n condProperDih[-1].append(dihedral)\n else:\n condProperDih.append([dihedral])\n pair = (atom1, atom4)\n #if atomPairs.count(pair) == 0 and idAtom3raw > 0:\n if idAtom3raw > 0:\n atomPairs.add(pair)\n else:\n improperDih.append(dihedral)\n try: atomPairs = sorted(atomPairs)\n except: pass\n self.properDihedrals = properDih\n self.improperDihedrals = improperDih\n self.condensedProperDihedrals = condProperDih # [[],[],...]\n self.atomPairs = atomPairs # set((atom1, atom2), ...)\n self.printDebug(\"getDihedrals done\")", "def bond_angles_wrt_bond(current, next, xy, NL, KL):\n n_tmp = NL[next, np.argwhere(KL[next].ravel())]\n if len(n_tmp) == 1:\n print 'le: The bond is a lone bond, not part of a triangle, so returning neighbor as next particle'\n neighbors = n_tmp\n else:\n neighbors = np.delete(n_tmp, np.where(n_tmp == current)[0])\n # print 'n_tmp = ', n_tmp\n # print 'neighbors = ', neighbors\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[next, 1],\n xy[neighbors, 0] - xy[next, 0]).ravel() -\n np.arctan2(xy[current, 1] - xy[next, 1],\n xy[current, 0] - xy[next, 0]).ravel(),\n 2 * np.pi)\n return angles, neighbors", "def generate_dihedral_matrices(protein):\n\n #double check maths for this to be safe (particularly signs)\n\n natoms = len(protein.atoms)\n ndihedrals = len(protein.dihedrals)\n\n A = np.zeros([ndihedrals, 3*natoms])\n force_constants = np.zeros(ndihedrals)\n for dihedral in protein.dihedrals:\n \n atom1_id = dihedral.atom1.id\n atom2_id = dihedral.atom2.id\n atom3_id = dihedral.atom3.id\n atom4_id = dihedral.atom4.id\n\n atom1_xyz = dihedral.atom1.xyz\n atom2_xyz = dihedral.atom2.xyz\n atom3_xyz = dihedral.atom3.xyz\n atom4_xyz = dihedral.atom4.xyz\n\n four_centre_length = np.linalg.norm(atom1_xyz - atom4_xyz)\n\n row = A[dihedral.id]\n row[[3*atom1_id, (3*atom1_id)+1, (3*atom1_id)+2]] = -((atom1_xyz - atom3_xyz) + (atom4_xyz - atom2_xyz))/four_centre_length \n row[[3*atom2_id, (3*atom2_id)+1, (3*atom2_id)+2]] = -((atom2_xyz - atom1_xyz) + (atom2_xyz - atom3_xyz) + (atom2_xyz - atom4_xyz))/four_centre_length\n row[[3*atom3_id, (3*atom3_id)+1, (3*atom3_id)+2]] = -((atom3_xyz - atom4_xyz) + (atom3_xyz - atom1_xyz) + (atom3_xyz - atom2_xyz))/four_centre_length\n row[[3*atom4_id, (3*atom4_id)+1, (3*atom4_id)+2]] = -((atom4_xyz - atom2_xyz) + (atom1_xyz - atom3_xyz))/four_centre_length\n\n force_constant = dihedral.force_constant\n force_constants[dihedral.id] = force_constant\n\n A = scipy.sparse.csr_matrix(A)\n G = scipy.sparse.diags(force_constants)\n\n return (A, G)", "def calculate_dihedral_atom_equivalences(mol1, mol2):\n\n # Check that the mols are identical-ish\n if mol1.GetNumHeavyAtoms() != mol2.GetNumHeavyAtoms():\n raise EqualityError('Molecules are not identical (Num Atoms) {!s} != {!s}.\\n{!s}\\n{!s}'.format(mol1.GetNumHeavyAtoms(),mol2.GetNumHeavyAtoms(),Chem.MolToSmiles(mol1),Chem.MolToSmiles(mol2)))\n if mol1.GetNumBonds() != mol2.GetNumBonds():\n raise EqualityError('Molecules are not identical (Num Bonds) {!s} != {!s}:\\n{!s}\\n{!s}'.format(mol1.GetNumBonds(),mol2.GetNumBonds(),Chem.MolToSmiles(mol1), Chem.MolToSmiles(mol2)))\n\n # Gets a list of lists of atoms in mol1 (12,16,3, ...) that match the atoms in mol2 (1,2,3, ...)\n match_patterns = mol1.GetSubstructMatches(mol2, uniquify=False)\n # Get the quadruplets to calculate the dihedrals from for mol1\n mol1_atom_sets = identify_rotatable_bond_atom_pairs(mol1)\n num_atms = mol1.GetNumHeavyAtoms()\n # List for returning\n paired_atom_sets = []\n # Iterate through the different ways of overlaying the molecule (ensures we get the minimum rmsd)\n for match_pattern in match_patterns:\n # Translate from the atoms in mol1 to the atoms in mol2 (for this match_pattern)\n trans_dict = dict(zip(match_pattern, range(0,num_atms)))\n # Translate the atoms in mol1 to the atoms in mol2\n mol2_atom_sets = [ tuple([trans_dict[atm] for atm in bond_set]) for bond_set in mol1_atom_sets]\n # Add to list\n paired_atom_sets.append((mol1_atom_sets, mol2_atom_sets))\n # Check that the atom types are identical (test)\n mol1_atom_types = [ tuple([mol1.GetAtomWithIdx(atm).GetAtomicNum() for atm in bond_set]) for bond_set in mol1_atom_sets]\n mol2_atom_types = [ tuple([mol2.GetAtomWithIdx(atm).GetAtomicNum() for atm in bond_set]) for bond_set in mol2_atom_sets]\n assert mol1_atom_types == mol2_atom_types, \"ATOM TYPES ARE NOT THE SAME ON THE DIHEDRAL ANGLE TO BE CALCULATED - THERE'S BEEN A MATCHING ERROR\"\n # Return the list of lists of paired atoms between the structures\n return paired_atom_sets", "def calculate_dihedral_angle_differences(mol1, mol2):\n\n # Get the dihedrals to calculate for both of the molecules (possibly multiple ways of overlaying the mols if symmetry exists)\n atom_sets = calculate_dihedral_atom_equivalences(mol1, mol2)\n # list of possible rmsds for the molecule\n differences = []\n # Iterate through and calculate the rmsd for each set of atom equivalences\n for mol1_atom_set, mol2_atom_set in atom_sets:\n # Calculate the dihedrals of both\n mol1_dihedrals = calculate_dihedral_angles(mol1, mol1_atom_set)\n mol2_dihedrals = calculate_dihedral_angles(mol2, mol2_atom_set)\n # Calculate the differences squared for each angle difference\n diffs = [an1-an2 for an1, an2 in zip(mol1_dihedrals,mol2_dihedrals)]\n # Append list of angle differences\n differences.append(diffs)\n\n return atom_sets, differences", "def selectLinkedElement():\n\n collector = FilteredElementCollector(doc).ToElementIds()\n wrongAngle = []\n for id in collector:\n \n element= doc.GetElement(id)\n\n if element.get_Parameter(BuiltInParameter.FABRICATION_PART_ANGLE) is not None:\n try:\n chord = element.CenterlineLength\n angle = element.get_Parameter(BuiltInParameter.FABRICATION_PART_ANGLE).AsDouble()\n angle = degrees(angle)\n diameter = element.get_Parameter(BuiltInParameter.FABRICATION_PART_DIAMETER_IN).AsDouble()\n radius = ((360/angle)*chord )/(pi*2)\n \n if round(radius,4) == round(diameter,4):\n wrongAngle.append(id)\n\n except Exception as ex:\n print(ex, str(id))\n pass\n\n wrongAngle = List[ElementId](wrongAngle)\n uidoc.Selection.SetElementIds(wrongAngle)", "def planInternal(r):\n\t# First find the atoms that are connected to preceding\n\t# or succeeding residues. If none, pick an arbitrary atom.\n\t# These atoms are always interpolated in Cartesian space.\n\tplan = []\n\tdone = set([])\n\ttodo = []\n\tm = r.molecule\n\tneighbors = set([m.residueBefore(r), m.residueAfter(r)])\n\tfixed = set([])\n\tfor a0 in r.atoms:\n\t\tfor na in a0.primaryNeighbors():\n\t\t\tif na.residue in neighbors:\n\t\t\t\tfixed.add(a0)\n\t\t\t\tbreak\n\tif not fixed:\n\t\tfixed.add(r.atoms[0])\n\tfor a0 in fixed:\n\t\tplan.append((interpCartesian, (a0,)))\n\t\t_finished(a0, done, todo)\n\n\t# Now we look for atoms that are connected to those in\n\t# \"fixed\". If we can find three atoms that define a\n\t# dihedral, we use dihedral interpolation; otherwise\n\t# we use Cartesian interpolation.\n\twhile todo:\n\t\tna, a = todo.pop(0)\n\t\tif na in done:\n\t\t\t# May be part of a loop and have been\n\t\t\t# visited via another path\n\t\t\tcontinue\n\t\tanchors = _findAnchor(a, done)\n\t\tif len(anchors) >= 2:\n\t\t\t# Found two anchor atoms connected to the\n\t\t\t# fixed atom, we can use them for defining\n\t\t\t# the dihedral\n\t\t\tplan.append((interpInternal,\n\t\t\t\t\t(na, a, anchors[0], anchors[1])))\n\t\t\t_finished(na, done, todo)\n\t\t\tcontinue\n\t\tif len(anchors) == 1:\n\t\t\t# Found one anchor atom connected to the\n\t\t\t# fixed atom, so we need to get another\n\t\t\t# anchor atom connected to the one we found\n\t\t\t# (but is not our original fixed atom)\n\t\t\tanchors2 = _findAnchor(anchors[0], done, a)\n\t\t\tif len(anchors2) >= 1:\n\t\t\t\tplan.append((interpInternal,\n\t\t\t\t\t(na, a, anchors[0], anchors2[0])))\n\t\t\t\t_finished(na, done, todo)\n\t\t\t\tcontinue\n\t\t# Cannot find three fixed atoms to define dihedral.\n\t\t# Use Cartesian interpolation for this atom.\n\t\tplan.append((interpCartesian, (na,)))\n\t\t_finished(na, done, todo)\n\treturn plan", "def get_dihedral(p0,p1,p2,p3,unit):\n if unit == 'Ang':\n p0 = p0*0.529177249\n p1 = p1*0.529177249\n p2 = p2*0.529177249\n p3 = p3*0.529177249\n\n b0 = -1.0*(p1 - p0)\n b1 = p2 - p1\n b2 = p3 - p2\n\n # normalize b1 so that it does not influence magnitude of vector\n # rejections that come next\n b1 /= linalg.norm(b1)\n\n # vector rejections\n # v = projection of b0 onto plane perpendicular to b1\n # = b0 minus component that aligns with b1\n # w = projection of b2 onto plane perpendicular to b1\n # = b2 minus component that aligns with b1\n v = b0 - dot(b0, b1)*b1\n w = b2 - dot(b2, b1)*b1\n\n # angle between v and w in a plane is the torsion angle\n # v and w may not be normalized but that's fine since tan is y/x\n x = dot(v, w)\n y = dot(cross(b1, v), w)\n return degrees(arctan2(y, x))\n\n #q1 = subtract(p1,p0) # b - a \n #q2 = subtract(p2,p1) # c - b \n #q3 = subtract(p3,p2) # d - c\n #print(q1,q2)\n\n #q1_x_q2 = cross(q1,q2) \n #q2_x_q3 = cross(q2,q3)\n\n #n1 = q1_x_q2/sqrt(dot(q1_x_q2,q1_x_q2)) \n #n2 = q2_x_q3/sqrt(dot(q2_x_q3,q2_x_q3))\n\n #u1 = n2\n #u3 = q2/(sqrt(dot(q2,q2))) \n #u2 = cross(u3,u1)\n\n #cos_theta = dot(n1,u1)\n #sin_theta = dot(n1,u2)\n ## Calculate theta\n #theta = -atan2(sin_theta,cos_theta)\n ## it is different from atan2 from fortran math.atan2(y,x)\n #theta_deg = degrees(theta)\n #return(theta_deg)", "def identify_bonds(chosen_atom, atom_list):\n list_of_hydrogens = ['H15', 'H14', 'H13', 'H12', 'H11', 'H10', 'H9', 'H8', 'H7', 'H6', 'H5', 'H4', 'H3', 'H2', 'H1'] \n if ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name != \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 2)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n elif ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name == \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.8)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n else:\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 1.6) and (abs(chosen_atom.y - atom.y) <= 1.6) and (abs(chosen_atom.z - atom.z) <= 1.6))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.6)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n for elements in nearby_atoms:\n if (check_if_no_bond(chosen_atom, elements, bond_list, bond_list_3) == True):\n nearby_atoms.remove(elements)\n if (len(nearby_atoms) == len(identified_bonds)):\n return identified_bonds\n else:\n return []", "def getDihedrals(self):\n try:\n return self._dihedralList\n except AttributeError:\n pass\n forceConstant=self._raw_data[\"DIHEDRAL_FORCE_CONSTANT\"]\n phase=self._raw_data[\"DIHEDRAL_PHASE\"]\n periodicity=self._raw_data[\"DIHEDRAL_PERIODICITY\"]\n dihedralPointers = self._raw_data[\"DIHEDRALS_INC_HYDROGEN\"] \\\n +self._raw_data[\"DIHEDRALS_WITHOUT_HYDROGEN\"]\n self._dihedralList=[]\n forceConstConversionFactor = (units.kilocalorie_per_mole).conversion_factor_to(units.kilojoule_per_mole)\n for ii in range(0,len(dihedralPointers),5):\n if int(dihedralPointers[ii])<0 or int(dihedralPointers[ii+1])<0:\n raise Exception(\"Found negative dihedral atom pointers %s\"\n % ((dihedralPointers[ii],\n dihedralPointers[ii+1],\n dihedralPointers[ii+2],\n dihedralPointers[ii+3]),))\n iType=int(dihedralPointers[ii+4])-1\n self._dihedralList.append((int(dihedralPointers[ii])//3,\n int(dihedralPointers[ii+1])//3,\n abs(int(dihedralPointers[ii+2]))//3,\n abs(int(dihedralPointers[ii+3]))//3,\n float(forceConstant[iType])*forceConstConversionFactor,\n float(phase[iType]),\n int(0.5+float(periodicity[iType]))))\n return self._dihedralList", "def set_potential_aperture_angle_to(self, atom_hash, new_distance):\n #TODO: Finish this.\n pseudopotentials = self.identify_pseudocarbon_potentials(atom_hash)\n potential_coords_list = []\n deletion_list = []\n\n for pseudopotential in pseudopotentials:\n # get rotation axis via cross-products\n # if 3 atoms within pseudo-distance this is an sp3 pseudo-carbon\n if len(pseudopotentials) == 3:\n pass\n\n # if 4 atoms within pseudo-distance this is an sp2 2e pseudo-carbon\n elif len(pseudopotentials) == 4:\n pass\n\n\n # if 6 atoms within pseudo-distance this is an sp2 pseudo-carbon\n elif len(pseudopotentials) == 6:\n pass\n\n # apply euler-rodriguez\n\n vector_from_pseudo_carbon = self.vectorise_atom(pseudopotential['#']) - self.vectorise_atom(atom_hash)\n new_vector_from_pseudocarbon = self.lengtherise_vector(vector_from_pseudo_carbon, new_distance)\n new_potential_coordinates = self.vectorise_atom(atom_hash) + new_vector_from_pseudocarbon\n\n potential_coords_list.append(new_potential_coordinates)\n deletion_list.append(pseudopotential['#'])\n\n self.delete_specified_atoms(deletion_list)\n for potential_coord in potential_coords_list:\n self.write_coord(potential_coord, overwrite=False)", "def dilation(hexs, diameter):\n new_hex_set = set(hexs)\n for a_hex in hexs:\n adjacent_hexs = h3.k_ring(a_hex, diameter)\n new_hex_set = new_hex_set.union(adjacent_hexs)\n new_hexs = list(new_hex_set)\n return new_hexs", "def _get_dihedral_types(\n structure, use_rb_torsions, use_dihedrals, epsilon_conversion_factor\n):\n if use_rb_torsions:\n unique_dihedral_types = _get_unique_rb_torsion_types(\n structure, epsilon_conversion_factor\n )\n\n dihedral_types = [\n unique_dihedral_types[\n _get_dihedral_rb_torsion_key(\n dihedral, epsilon_conversion_factor\n )\n ]\n for dihedral in structure.rb_torsions\n ]\n\n elif use_dihedrals:\n print_warn_text = (\n \"WARNING: Using the charmm style and impropers is not \"\n \"available in the current version of this psf, pdb, and GOMC writer.\"\n )\n warn(print_warn_text)\n return None, None\n\n unique_dihedral_check_dict = OrderedDict()\n for i_value_dihed, i_key_dihed in unique_dihedral_types.items():\n i_value_duplicated = False\n for j_value_dihed, j_key_dihed in unique_dihedral_types.items():\n j_value_dihed_reorder = (\n j_value_dihed[0],\n j_value_dihed[1],\n j_value_dihed[2],\n j_value_dihed[3],\n j_value_dihed[4],\n j_value_dihed[5],\n j_value_dihed[6],\n j_value_dihed[7],\n j_value_dihed[11],\n j_value_dihed[10],\n j_value_dihed[9],\n j_value_dihed[8],\n j_value_dihed[15],\n j_value_dihed[14],\n j_value_dihed[13],\n j_value_dihed[12],\n )\n\n if i_value_dihed == j_value_dihed_reorder:\n i_value_duplicated = True\n if i_value_dihed[8] > j_value_dihed[8]:\n unique_dihedral_check_dict.update(\n {j_value_dihed: len(unique_dihedral_check_dict) + 1}\n )\n else:\n unique_dihedral_check_dict.update(\n {i_value_dihed: len(unique_dihedral_check_dict) + 1}\n )\n if i_value_duplicated is False:\n unique_dihedral_check_dict.update(\n {i_value_dihed: len(unique_dihedral_check_dict) + 1}\n )\n\n unique_dihedral_types = OrderedDict(\n [(y, x) for y, x in unique_dihedral_check_dict.items()]\n )\n\n return dihedral_types, unique_dihedral_types", "def _get_dihedral_rb_torsion_key(dihedral, epsilon_conversion_factor):\n\n lj_unit = 1 / epsilon_conversion_factor\n\n dihed_type_RB_c0 = round(dihedral.type.c0 * lj_unit, 8)\n dihed_type_RB_c1 = round(dihedral.type.c1 * lj_unit, 8)\n dihed_type_RB_c2 = round(dihedral.type.c2 * lj_unit, 8)\n dihed_type_RB_c3 = round(dihedral.type.c3 * lj_unit, 8)\n dihed_type_RB_c4 = round(dihedral.type.c4 * lj_unit, 8)\n dihed_type_RB_c5 = round(dihedral.type.c5 * lj_unit, 8)\n\n dihed_type_scee = round(dihedral.type.scee, 4)\n dihed_type_scnb = round(dihedral.type.scnb, 4)\n\n dihed_atom_1_type = dihedral.atom1.type\n dihed_atom_2_type = dihedral.atom2.type\n dihed_atom_3_type = dihedral.atom3.type\n dihed_atom_4_type = dihedral.atom4.type\n\n dihed_atom_1_res_type = dihedral.atom1.residue.name\n dihed_atom_2_res_type = dihedral.atom2.residue.name\n dihed_atom_3_res_type = dihedral.atom3.residue.name\n dihed_atom_4_res_type = dihedral.atom4.residue.name\n\n return (\n dihed_type_RB_c0,\n dihed_type_RB_c1,\n dihed_type_RB_c2,\n dihed_type_RB_c3,\n dihed_type_RB_c4,\n dihed_type_RB_c5,\n dihed_type_scee,\n dihed_type_scnb,\n dihed_atom_1_type,\n dihed_atom_2_type,\n dihed_atom_3_type,\n dihed_atom_4_type,\n dihed_atom_1_res_type,\n dihed_atom_2_res_type,\n dihed_atom_3_res_type,\n dihed_atom_4_res_type,\n )", "def find_girth(self):\r\n girth = sys.maxint\r\n face = []\r\n vertices = list(self.graph.vertices)\r\n shift = random.randint(0,len(vertices)-1)\r\n vertices = vertices[shift:] + vertices[:shift]\r\n random.shuffle(vertices)\r\n \r\n for vertex in vertices:\r\n s = set() # set of explored edge id\r\n distance = {}\r\n distance[vertex.id] = 0\r\n father = {}\r\n father[vertex.id] = (None, None) # (a,b) a is v_id, b is edge id\r\n nodes = [vertex.id] # stack for the vertices to start with\r\n while len(nodes) > 0:\r\n node = nodes.pop(0)\r\n v_a = self.graph.get_vertex(node)\r\n nbrs = list(v_a.neighbors)\r\n random.shuffle(nbrs)\r\n for edge in nbrs:\r\n if not edge.id in s:\r\n another = edge.get_another_vertex(node)\r\n if not distance.has_key(another):\r\n nodes.append(another)\r\n s.add(edge.id)\r\n father[another] = (node, edge.id)\r\n distance[another] = distance[node] + 1\r\n elif distance[another] + distance[node] + 1 < girth:\r\n girth = distance[another] + distance[node] + 1\r\n\r\n face = list()\r\n face.append(edge.id)\r\n start = father[another]\r\n while start[0] is not None:\r\n face.append(start[1])\r\n start = father[start[0]]\r\n face.reverse()\r\n start = father[node]\r\n while start[0] is not None:\r\n face.append(start[1])\r\n start = father[start[0]]\r\n\r\n cycle = []\r\n edge0 = self.graph.get_edge(face[0])\r\n edge1 = self.graph.get_edge(face[1])\r\n (a, b) = edge0.get_endpoints()\r\n if a in edge1.get_endpoints():\r\n a, b = b, a\r\n for e in face:\r\n cycle.append(a)\r\n a = self.graph.get_edge(e).get_another_vertex(a)\r\n # logger.info(\"girth: %s\",cycle)\r\n return (face, cycle)", "def test_dihedrals(pose):\n for i in range(1, pose.total_residue()+1):\n\n print \"\\n\"+str(pose.pdb_info.pose2pdb(i))\n try:\n print \"Phi: \"+repr(math.degrees(pose.phi(i)))\n print \"Psi: \"+repr(math.degrees(pose.psi(i)))\n print \"Omega:\"+repr(math.degrees(pose.omega(i)))\n except Exception:\n \"Print could not get dihedral for resnum \"+repr(i)\n\n return True", "def modify_cand():\n if col_i + 1 < len(lastrow):\n return (lastrow[col_i + 1] +\n diff(left_elem, right_elem, key=key + [left_i],\n minimal=minimal, verbose=False))", "def _determine_extra_angles(self, angle_force, reference_topology, growth_indices):\n from simtk import openmm\n import itertools\n from openeye import oechem, oeomega\n\n if len(growth_indices)==0:\n return\n angle_force_constant = 400.0*unit.kilojoules_per_mole/unit.radians**2\n atoms = list(reference_topology.atoms())\n growth_indices = list(growth_indices)\n #get residue from first atom\n residue = atoms[growth_indices[0].idx].residue\n try:\n oemol = FFAllAngleGeometryEngine._oemol_from_residue(residue)\n except Exception as e:\n print(\"Could not generate an oemol from the residue.\")\n print(e)\n\n #get the omega geometry of the molecule:\n\n omega = oeomega.OEOmega()\n omega.SetMaxConfs(1)\n omega.SetStrictStereo(False) #TODO: fix stereochem\n omega(oemol)\n\n #we now have the residue as an oemol. Time to find the relevant angles.\n #There's no equivalent to OEGetTorsions, so first find atoms that are relevant\n #TODO: find out if that's really true\n aromatic_pred = oechem.OEIsAromaticAtom()\n heavy_pred = oechem.OEIsHeavy()\n angle_criteria = oechem.OEAndAtom(aromatic_pred, heavy_pred)\n\n #get all heavy aromatic atoms:\n #TODO: do this more efficiently\n heavy_aromatics = list(oemol.GetAtoms(angle_criteria))\n for atom in heavy_aromatics:\n #bonded_atoms = [bonded_atom for bonded_atom in list(atom.GetAtoms()) if bonded_atom in heavy_aromatics]\n bonded_atoms = list(atom.GetAtoms())\n for angle_atoms in itertools.combinations(bonded_atoms, 2):\n angle = oechem.OEGetAngle(oemol, angle_atoms[0], atom, angle_atoms[1])\n atom_indices = [angle_atoms[0].GetData(\"topology_index\"), atom.GetData(\"topology_index\"), angle_atoms[1].GetData(\"topology_index\")]\n angle_radians = angle*unit.radian\n growth_idx = self._calculate_growth_idx(atom_indices, growth_indices)\n #If this is a CustomAngleForce, we need to pass the parameters as a list, and it will have the growth_idx parameter.\n #If it's a regular HarmonicAngleForce, there is no growth_index and the parameters are passed separately.\n if isinstance(angle_force, openmm.CustomAngleForce):\n angle_force.addAngle(atom_indices[0], atom_indices[1], atom_indices[2], [angle_radians, angle_force_constant, growth_idx])\n elif isinstance(angle_force, openmm.HarmonicAngleForce):\n angle_force.addAngle(atom_indices[0], atom_indices[1], atom_indices[2], angle_radians, angle_force_constant)\n else:\n raise ValueError(\"Angle force must be either CustomAngleForce or HarmonicAngleForce\")\n return angle_force", "def get_lig_dihedrals(np_xyz, lig_ndx, close_ndxs, inp):\n n_at1, n_at2 = np.sum(inp.lig1_n_per_bead), np.sum(inp.lig2_n_per_bead)\n n_core = int(len(np_xyz) - inp.lig1_num*n_at1 - inp.lig2_num*n_at2)\n core_xyz = np_xyz[:n_core]\n\n lig1_dihedrals, lig2_dihedrals = [], []\n\n if n_at1 >= 3:\n for i in range(inp.lig1_num):\n ndx0 = n_core + i*n_at1\n ndx1 = ndx0*1\n ndx2 = ndx1 + 1\n ndx3 = ndx1 + 2\n ndx4 = close_ndxs[lig_ndx[0][i]]#np.argsort(cdist([np_xyz[ndx1]], core_xyz))[0,0]\n dihedral = [ndx4, ndx1, ndx2, ndx3]\n lig1_dihedrals.append(dihedral)\n for j in range(n_at1-4):\n ndx1 = ndx0 + j\n ndx2 = ndx1 + 1\n ndx3 = ndx1 + 2\n ndx4 = ndx1 + 3\n dihedral = [ndx1, ndx2, ndx3, ndx4]\n lig1_dihedrals.append(dihedral)\n\n if n_at2 >= 3:\n for i in range(inp.lig2_num):\n ndx0 = n_core + n_at1*inp.lig1_num + i*n_at2\n ndx1 = ndx0*1\n ndx2 = ndx1 + 1\n ndx3 = ndx1 + 2\n ndx4 = close_ndxs[lig_ndx[1][i]]#np.argsort(cdist([np_xyz[ndx1]], core_xyz))[0,0]\n dihedral = [ndx4, ndx1, ndx2, ndx3]\n lig2_dihedrals.append(dihedral)\n for j in range(n_at2-4):\n ndx1 = ndx0 + j\n ndx2 = ndx1 + 1\n ndx3 = ndx1 + 2\n ndx4 = ndx1 + 3\n dihedral = [ndx1, ndx2, ndx3, ndx4]\n lig2_dihedrals.append(dihedral)\n\n return (lig1_dihedrals, lig2_dihedrals)", "def closer_ang(x,a,dir=0):\r\n if dir == 0:\r\n return a + smaller_ang(x-a)\r\n elif dir == 1:\r\n return a + (x-a)%(2*pi)\r\n elif dir == -1:\r\n return a + (x-a)%(2*pi) - 2*pi", "def find_contour(hole_atoms, atom_list):\n contour_atoms = []\n extra_atoms = []\n global bond_list\n bond_list = bond_list_1\n for atom in hole_atoms:\n c = [bond[0] for bond in identify_bonds(atom, atom_list) if ((bond[0] not in hole_atoms) and (bond[0] not in contour_atoms))]\n for element in c:\n contour_atoms.append(element)\n for atom in atom_list:\n c = [bond[0] for bond in identify_bonds(atom, atom_list)]\n count = 0\n for element in c:\n if element in contour_atoms:\n count += 1\n if (count >= 2):\n extra_atoms.append(atom)\n for atom in atom_list:\n c = [bond[0] for bond in identify_bonds(atom, atom_list)]\n for element in c:\n if ((element in contour_atoms) or (element in extra_atoms)):\n for i in [bond[0] for bond in identify_bonds(element, atom_list)]:\n if ((i in hole_atoms) and (atom not in hole_atoms) and (atom not in contour_atoms) and (atom not in extra_atoms)):\n extra_atoms.append(atom) \n \n contour_atoms = contour_atoms + extra_atoms\n \n extra_atoms2 = []\n for atom in contour_atoms:\n for atom2 in contour_atoms:\n if (atom != atom2):\n c = [bond[0] for bond in identify_bonds(atom, atom_list) if ((bond in identify_bonds(atom2, atom_list)) and (bond[0] not in (contour_atoms)))]\n if (len(c) != 0):\n extra_atoms2.append(c[0]) \n for element in extra_atoms2:\n contour_atoms.append(element)\n return contour_atoms", "def set_dihedral(self, pivots, scan, deg_increment):\n if deg_increment == 0:\n logger.warning('set_dihedral was called with zero increment for {label} with pivots {pivots}'.format(\n label=self.label, pivots=pivots))\n for rotor in self.rotors_dict.values(): # penalize this rotor to avoid inf. looping\n if rotor['pivots'] == pivots:\n rotor['times_dihedral_set'] += 1\n break\n else:\n for rotor in self.rotors_dict.values():\n if rotor['pivots'] == pivots and rotor['times_dihedral_set'] <= 10:\n rotor['times_dihedral_set'] += 1\n break\n else:\n logger.info('\\n\\n')\n for i, rotor in self.rotors_dict.items():\n logger.error('Rotor {i} with pivots {pivots} was set {times} times'.format(\n i=i, pivots=rotor['pivots'], times=rotor['times_dihedral_set']))\n raise RotorError('Rotors were set beyond the maximal number of times without converging')\n coordinates, atoms, _, _, _ = get_xyz_matrix(self.final_xyz)\n mol = molecules_from_xyz(self.final_xyz, multiplicity=self.multiplicity, charge=self.charge)[1]\n conf, rd_mol, indx_map = rdkit_conf_from_mol(mol, coordinates)\n rd_scan = [indx_map[i - 1] for i in scan] # convert the atom indices in `scan` to RDKit indices\n new_xyz = set_rdkit_dihedrals(conf, rd_mol, indx_map, rd_scan, deg_increment=deg_increment)\n self.initial_xyz = get_xyz_string(coords=new_xyz, symbols=atoms)", "def closer_angle(x, a, dir=0):\n if dir == 0:\n return a + smaller_angle(x-a)\n elif dir == 1:\n return a + (x-a)%(2*np.pi)\n elif dir == -1:\n return a + (x-a)%(2*np.pi) - 2*np.pi", "def calc_torsion(residues, include_residue=False, include_omega=False):\n\n\tlast_residue = None\n\tlast_contiguous = True\n\tlast_valid = False\n\n\tlast_omega = None\n\tlast_phi = None\n\n\tdef yield_vals(residue, omega, phi, psi):\n\t\tangles = (omega, phi, psi) if include_omega else (phi, psi)\n\t\treturn (residue, *angles) if include_residue else angles\n\n\tfor residue in residues:\n\n\t\t# Whether this residue is contiguous with the last and angles calculated\n\t\t# from that residue's atoms are valid\n\t\tis_contiguous = last_valid and residue.seq == last_residue.seq + 1\n\n\t\t# Reset the generator if not using atoms from last residue\n\t\tif not is_contiguous:\n\t\t\tangle_calculator = dihedral_calculator()\n\t\t\tangle_calculator.send(None) # Prime it\n\n\t\t# Get N, CA, and C atoms from residue\n\t\tbackbone_atoms = get_backbone_atoms(residue)\n\n\t\tif None in backbone_atoms:\n\t\t\t# Didn't get all backbone atoms - residue is invalid\n\t\t\tis_valid = False\n\t\t\tpsi = omega = phi = None\n\n\t\telse:\n\t\t\t# Residue good\n\t\t\tis_valid = True\n\n\t\t\t# Get backbone atom coords and calculate angles for residue\n\t\t\tbackbone_coords = [a.coord for a in backbone_atoms]\n\n\t\t\tpsi = angle_calculator.send(backbone_coords[0])\n\t\t\tomega = angle_calculator.send(backbone_coords[1])\n\t\t\tphi = angle_calculator.send(backbone_coords[2])\n\n\t\t# Yield angles for the previous residue (because calculating psi\n\t\t# required an atom from this residue)\n\t\tif last_residue is not None:\n\t\t\tyield yield_vals(\n\t\t\t\tlast_residue,\n\t\t\t\tlast_omega if last_contiguous else None,\n\t\t\t\tlast_phi if last_contiguous else None,\n\t\t\t\tpsi if is_contiguous else None,\n\t\t\t)\n\n\t\t# Keep track of state for previous residue\n\t\tlast_residue = residue\n\t\tlast_contiguous = is_contiguous\n\t\tlast_valid = is_valid\n\t\tlast_omega = omega\n\t\tlast_phi = phi\n\n\t# Last one is only partial - no value for psi\n\tyield yield_vals(\n\t\tlast_residue,\n\t\tlast_omega if last_contiguous else None,\n\t\tlast_phi if last_contiguous else None,\n\t\tNone\n\t)", "def get_inplane_angle(ima,ref, iring=1, fring=-1, ringstep=1, xtransSearch=0, ytransSearch=0, stp=1, center=1):\n\n\tfrom alignment import Numrinit, ringwe, Applyws, ormq\n\tfrom filter import fshift\n\n\tfirst_ring=int(iring); last_ring=int(fring); rstep=int(ringstep); xrng=int(xtransSearch); yrng=int(ytransSearch); step=int(stp)\t\n\tnx=ima.get_xsize()\n\tif(last_ring == -1): last_ring=int(nx/2)-2\n\tcnx = int(nx/2)+1\n \tcny = cnx\n \tmode = \"F\"\n \t#precalculate rings\n\tnumr = Numrinit(first_ring, last_ring, rstep, mode)\n \twr = ringwe(numr, mode)\n\tif(center==1):\n\t\tcs = [0.0]*2 # additio\n\t\tcs = ref.phase_cog()\n\t\tref1 = fshift(ref, -cs[0], -cs[1])\n\t\tcimage=Util.Polar2Dm(ref1, cnx, cny, numr, mode)\n\t\tcs = ima.phase_cog()\n\t\tima1 = fshift(ima, -cs[0], -cs[1])\n\telse:\n\t\tima1=ima.copy()\n\t\tcimage=Util.Polar2Dm(ref, cnx, cny, numr, mode)\n\tUtil.Frngs(cimage, numr)\n\tApplyws(cimage, numr, wr)\n\t[angt, sxst, syst, mirrort, peakt]=ormq(ima1, cimage, xrng, yrng, step, mode, numr, cnx, cny)\n\treturn angt,sxst, syst, mirrort, peakt", "def calcNadirAngle(ele):\n\n nadeg = np.arcsin(6378.0/26378.0 * np.cos(ele/180.*np.pi)) * 180./np.pi\n\n return nadeg", "def neighbors(pattern, d):\n tides = set([\"A\", \"C\", \"G\", \"T\"])\n if d == 0:\n return set([pattern])\n if len(pattern) == 1:\n return tides\n neighborhood = set([])\n suffix_neighbors = neighbors(pattern[1:], d)\n for text in suffix_neighbors:\n if ham_dist(pattern[1:], text) < d:\n for tide in tides:\n neighborhood.add(tide + text)\n else:\n neighborhood.add(pattern[0] + text)\n return neighborhood", "def test_terminal_rotamer_filtering(self):\n LIGAND_PATH = 'ligands/oleic_acid.pdb'\n\n ligand_path = get_data_file_path(LIGAND_PATH)\n molecule = Molecule(ligand_path, exclude_terminal_rotamers=True)\n\n rotamers_per_branch = molecule.rotamers\n\n assert len(rotamers_per_branch) == 2, \"Found an invalid number \" + \\\n \"of branches: {}\".format(len(rotamers_per_branch))\n\n atom_list_1 = list()\n atom_list_2 = list()\n rotamers = rotamers_per_branch[0]\n for rotamer in rotamers:\n atom_list_1.append(set([rotamer.index1, rotamer.index2]))\n\n rotamers = rotamers_per_branch[1]\n for rotamer in rotamers:\n atom_list_2.append(set([rotamer.index1, rotamer.index2]))\n\n EXPECTED_INDICES_1 = [set([9, 10]), set([8, 9]), set([7, 8]),\n set([6, 7]), set([5, 6]), set([2, 5]),\n set([0, 2]), set([0, 1])]\n\n EXPECTED_INDICES_2 = [set([12, 11]), set([12, 13]), set([13, 14]),\n set([14, 15]), set([15, 16]), set([16, 17]),\n set([17, 18])]\n\n where_1 = list()\n for atom_pair in atom_list_1:\n if atom_pair in EXPECTED_INDICES_1:\n where_1.append(1)\n elif atom_pair in EXPECTED_INDICES_2:\n where_1.append(2)\n else:\n where_1.append(0)\n\n where_2 = list()\n for atom_pair in atom_list_2:\n if atom_pair in EXPECTED_INDICES_1:\n where_2.append(1)\n elif atom_pair in EXPECTED_INDICES_2:\n where_2.append(2)\n else:\n where_2.append(0)\n\n assert (all(i == 1 for i in where_1)\n and all(i == 2 for i in where_2)) or \\\n (all(i == 2 for i in where_1)\n and all(i == 1 for i in where_2)), \"Invalid rotamer library \" + \\\n \"{}, {}\".format(where_1, where_2)\n\n assert (all(i == 1 for i in where_1)\n and all(i == 2 for i in where_2)\n and len(where_1) == len(EXPECTED_INDICES_1)\n and len(where_2) == len(EXPECTED_INDICES_2)) or \\\n (all(i == 2 for i in where_1)\n and all(i == 1 for i in where_2)\n and len(where_1) == len(EXPECTED_INDICES_2)\n and len(where_2) == len(EXPECTED_INDICES_1)), \"Unexpected \" + \\\n \"number of rotamers\"", "def _get_relevant_angle(self, atom1, atom2, atom3):\n atom1_angles = set(atom1.angles)\n atom2_angles = set(atom2.angles)\n atom3_angles = set(atom3.angles)\n relevant_angle_set = atom1_angles.intersection(atom2_angles, atom3_angles)\n\n # DEBUG\n if len(relevant_angle_set) == 0:\n print('atom1_angles:')\n print(atom1_angles)\n print('atom2_angles:')\n print(atom2_angles)\n print('atom3_angles:')\n print(atom3_angles)\n raise Exception('Atoms %s-%s-%s do not share a parmed Angle term' % (atom1, atom2, atom3))\n\n relevant_angle = relevant_angle_set.pop()\n if type(relevant_angle.type.k) != unit.Quantity:\n relevant_angle_with_units = self._add_angle_units(relevant_angle)\n else:\n relevant_angle_with_units = relevant_angle\n\n check_dimensionality(relevant_angle.type.theteq, unit.radians)\n check_dimensionality(relevant_angle.type.k, unit.kilojoules_per_mole/unit.radians**2)\n return relevant_angle_with_units", "def calc_dihedrals(points):\n\tpiter = iter(points)\n\n\tcalculator = dihedral_calculator()\n\tcalculator.send(None)\n\n\tfor i in range(3):\n\t\tcalculator.send(next(piter))\n\n\tfor point in piter:\n\t\tyield calculator.send(point)", "def find_path2(mol,atom0_index,atom1_index):\r\n atom0_index = atom0_index+1\r\n atom1_index = atom1_index+1\r\n atom_iter=ob.OBAtomAtomIter(mol.GetAtom(atom0_index))\r\n alist=[]\r\n \r\n index=0\r\n for a in atom_iter:\r\n alist.append(a.GetIdx())\r\n index=index+1\r\n #print('The list of bound atoms is:', alist)\r\n index=0\r\n depth=0\r\n finished=False\r\n for atom_index in alist:\r\n path=atom_index\r\n atom_iter=ob.OBAtomAtomIter(mol.GetAtom(atom_index))\r\n for a in atom_iter:\r\n #print(a.GetIdx())\r\n if a.GetIdx() ==atom1_index:\r\n finished=True\r\n break\r\n \r\n if finished:\r\n break\r\n if not finished:\r\n #print('Unable to find a path between atoms',atom0_index-1,' and ',atom1_index-1,'with a depth of 2')\r\n return -1\r\n path=path-1\r\n return path", "def calc_mainchain_bond_angle(self):\n aN = self.get_atom('N')\n aCA = self.get_atom('CA')\n aC = self.get_atom('C')\n aO = self.get_atom('O')\n aCB = self.get_atom('CB')\n\n naN = None\n naCA = None\n next_res = self.get_offset_residue(1)\n if next_res:\n naN = next_res.get_atom('N')\n naCA = next_res.get_atom('CA')\n\n N_CA_C = AtomMath.calc_angle(aN, aCA, aC)\n CA_C_O = AtomMath.calc_angle(aCA, aC, aO)\n N_CA_CB = AtomMath.calc_angle(aN, aCA, aCB)\n CB_CA_C = AtomMath.calc_angle(aCB, aCA, aC)\n CA_C_nN = AtomMath.calc_angle(aCA, aC, naN)\n C_nN_nCA = AtomMath.calc_angle(aC, naN, naCA)\n\n return (N_CA_C, N_CA_CB, CB_CA_C, CA_C_O, CA_C_nN, C_nN_nCA)", "def SetInitialRingBondAng(mol, ringpath):\n N = len(ringpath)\n atoms = [[ringpath[i], ringpath[(i+1)%N], ringpath[(i+2)%N]] for i in range(N)]\n first_ele = [mol.GetAtomWithIdx(x[0]).GetAtomicNum() for x in atoms]\n second_ele = [mol.GetAtomWithIdx(x[1]).GetAtomicNum() for x in atoms]\n third_ele = [mol.GetAtomWithIdx(x[2]).GetAtomicNum() for x in atoms]\n sorted_ele = list(zip(first_ele, second_ele, third_ele))\n bond = []\n for a in atoms:\n bond.append([int(mol.GetBondBetweenAtoms(a[i],a[i+1]).GetBondType()) for i in range(2)])\n elements_and_bond_order = [sorted_ele[i]+tuple(bond[i]) for i,j in enumerate(bond)]\n bondang = [bondtable.BONDANGLE_REF.get(x,\"Unknown bond length, please update BOND ANGLE table\") for x in elements_and_bond_order]\n return bondang", "def _adjust_refraction_to_boundary_angle(boundary: Line, new_angle: float) -> Tuple[float, float]:\n # TODO: verify this works with a non-vertical interface\n\n boundary_angle = boundary.angle % (2 * math.pi)\n if 0 <= boundary_angle < math.pi / 2: # in the first quadrant\n boundary_angle = boundary_angle\n new_angle = boundary_angle - math.pi / 2 + new_angle\n elif math.pi / 2 <= boundary_angle < math.pi: # in the second quadrant\n boundary_angle = math.pi - boundary_angle\n new_angle = math.pi / 2 - boundary_angle + new_angle\n elif math.pi <= boundary_angle < 3 * math.pi / 2: # in the third quadrant\n boundary_angle = math.pi - boundary_angle\n new_angle = boundary_angle - math.pi / 2 + new_angle\n elif 2 * math.pi / 3 <= boundary_angle < 2 * math.pi: # in the fourth quadrant\n boundary_angle = 2 * math.pi - boundary_angle\n new_angle = math.pi / 2 - boundary_angle - new_angle\n else:\n raise ValueError(f'Unexpected angle {boundary_angle}')\n return boundary_angle, new_angle", "def EuclidI9(self, angle: Angle, interesting=True) -> Line:\n a = angle.vertex_point\n line1, line2 = angle.line1, angle.line2\n # pick an arbitrary point on line1 that is not a.\n d: Point = line1.point1 if line1.point1 != a else line1.point2\n # Cut off point E from line2 with length AD\n e = self.EuclidI3(short_line=Line(a, d), long_line=line2, interesting=interesting).point2\n # We need to pick a point opposite DE from A to show which side to erect the equilateral triangle.\n # Start at D, and walk in the direction of D-A.\n side: Point = 2 * d - a\n line_de = self.add_line(d, e, interesting=interesting)\n f = self.EuclidI1(line_de, side, interesting=interesting)\n return self.add_line(a, f, interesting=interesting)", "def connect(ends):\n d = np.diff(ends, axis=0)[0]\n j = np.argmax(np.abs(d))\n D = d[j]\n aD = np.abs(D)\n return ends[0] + (np.outer(np.arange(aD + 1), d) + (aD >> 1)) // aD", "def is_dihedral(self):\n if self._is_dihedral is not None:\n return self._is_dihedral\n\n order = self.order()\n\n if order % 2 == 1:\n self._is_dihedral = False\n return False\n if order == 2:\n self._is_dihedral = True\n return True\n if order == 4:\n # The dihedral group of order 4 is the Klein 4-group.\n self._is_dihedral = not self.is_cyclic\n return self._is_dihedral\n if self.is_abelian:\n # The only abelian dihedral groups are the ones of orders 2 and 4.\n self._is_dihedral = False\n return False\n\n # Now we know the group is of even order >= 6, and nonabelian.\n n = order // 2\n\n # Handle special cases where there are exactly two generators.\n gens = self.generators\n if len(gens) == 2:\n x, y = gens\n a, b = x.order(), y.order()\n # Make a >= b\n if a < b:\n x, y, a, b = y, x, b, a\n # Using Theorem 2.1 of [Di3]:\n if a == 2 == b:\n self._is_dihedral = True\n return True\n # Using Theorem 1.1 of [Di3]:\n if a == n and b == 2 and y*x*y == ~x:\n self._is_dihedral = True\n return True\n\n # Proceed with algorithm of [Di1]\n # Find elements of orders 2 and n\n order_2, order_n = [], []\n for p in self.elements:\n k = p.order()\n if k == 2:\n order_2.append(p)\n elif k == n:\n order_n.append(p)\n\n if len(order_2) != n + 1 - (n % 2):\n self._is_dihedral = False\n return False\n\n if not order_n:\n self._is_dihedral = False\n return False\n\n x = order_n[0]\n # Want an element y of order 2 that is not a power of x\n # (i.e. that is not the 180-deg rotation, when n is even).\n y = order_2[0]\n if n % 2 == 0 and y == x**(n//2):\n y = order_2[1]\n\n self._is_dihedral = (y*x*y == ~x)\n return self._is_dihedral", "def myDihedralFunctionAirliner(Epsilon):\n BaseDihedral = 7\n\n # A simple model of a loaded wing shape:\n return BaseDihedral + Epsilon*Epsilon*10", "def angle_difference(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['angle_difference']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n for label in self.uuid_map:\n if label == 'LSTATE' or 'MAG' in label:\n continue\n distillate_label = get_distillate_label([label])\n\n # header\n inigen.emit_run_header(label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_ref_label = \"{0} {1}\".format(label, self.ref_name)\n dep_ref_name = fields['deps'][0]\n dep_ref_uuid = self.reference_uuid_map[label]\n dep_label = \"{0} {1}\".format(label, self.name)\n dep_name = fields['deps'][1]\n dep_uuid = self.uuid_map[label]\n deps = [[dep_ref_label, dep_ref_name, dep_ref_uuid], [dep_label, dep_name, dep_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}/{3}\".format(self.location, self.ref_name, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"ANGLE-DIFF\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map[label] = emitted[-2][-36:]\n\n filename = \"{0}/ANG-DIFF_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map", "def _hex_diamond_thing_indicies(half_height=3, half_width=3):\n return _populate_quadrants([(r,c) for r in range(half_height) for c in range(half_width) if r+c < (half_width+half_height)/2])", "def hbond(atoms, selection1=None, selection2=None, selection1_type='both',\n cutoff_dist=2.5, cutoff_angle=120,\n donor_elements=('O', 'N', 'S'), acceptor_elements=('O', 'N', 'S'),\n periodic=False):\n if not (atoms.element == \"H\").any():\n warnings.warn(\n \"Input structure does not contain hydrogen atoms, \"\n \"hence no hydrogen bonds can be identified\"\n )\n\n # Create AtomArrayStack from AtomArray\n if not isinstance(atoms, AtomArrayStack):\n atoms = stack([atoms])\n single_model = True\n else:\n single_model = False\n \n if periodic:\n box = atoms.box\n else:\n box = None\n \n # Mask for donor/acceptor elements\n donor_element_mask = np.isin(atoms.element, donor_elements)\n acceptor_element_mask = np.isin(atoms.element, acceptor_elements)\n\n if selection1 is None:\n selection1 = np.ones(atoms.array_length(), dtype=bool)\n if selection2 is None:\n selection2 = np.ones(atoms.array_length(), dtype=bool)\n\n if selection1_type == 'both':\n # The two selections are separated into three selections:\n # the original ones without the overlaping part\n # and one containing the overlap\n # This prevents redundant triplets and unnecessary computation \n overlap_selection = selection1 & selection2\n # Original selections without overlaping part\n exclusive_selection1 = selection1 & (~overlap_selection)\n exclusive_selection2 = selection2 & (~overlap_selection)\n \n # Put selections to list for cleaner iteration\n selections = [\n exclusive_selection1, exclusive_selection2, overlap_selection\n ]\n selection_combinations = [\n #(0,0), is not included, would be same selection\n # as donor and acceptor simultaneously\n (0,1),\n (0,2),\n (1,0),\n #(1,1), # same reason above\n (1,2),\n (2,0),\n (2,1),\n (2,2) # overlaping part, combination is necessary\n ]\n \n all_comb_triplets = []\n all_comb_mask = []\n for selection_index1, selection_index2 in selection_combinations:\n donor_mask = selections[selection_index1]\n acceptor_mask = selections[selection_index2]\n if np.count_nonzero(donor_mask) != 0 and \\\n np.count_nonzero(acceptor_mask) != 0:\n # Calculate triplets and mask\n triplets, mask = _hbond(\n atoms, donor_mask, acceptor_mask,\n donor_element_mask, acceptor_element_mask,\n cutoff_dist, cutoff_angle,\n box\n )\n all_comb_triplets.append(triplets)\n all_comb_mask.append(mask)\n # Merge results from all combinations\n triplets = np.concatenate(all_comb_triplets, axis=0)\n mask = np.concatenate(all_comb_mask, axis=1)\n\n elif selection1_type == 'donor':\n triplets, mask = _hbond(\n atoms, selection1, selection2,\n donor_element_mask, acceptor_element_mask,\n cutoff_dist, cutoff_angle,\n box\n )\n \n elif selection1_type == 'acceptor':\n triplets, mask = _hbond(\n atoms, selection2, selection1,\n donor_element_mask, acceptor_element_mask,\n cutoff_dist, cutoff_angle,\n box\n )\n \n else:\n raise ValueError(f\"Unkown selection type '{selection1_type}'\")\n\n if single_model:\n # For a atom array (not stack),\n # hbond_mask contains only 'True' values,\n # since all interaction are in the one model\n # -> Simply return triplets without hbond_mask\n return triplets\n else:\n return triplets, mask", "def get_contour(atom_list):\n initial = [atom for atom in atom_list if ((0 < len(identify_bonds(atom, atom_list)) < 3) and (check_connected(atom, identify_bonds(atom, atom_list)) == False))]\n \n extra_1 = []\n for atom in atom_list:\n neighbours = [bond[0] for bond in identify_bonds(atom, atom_list)]\n for i in neighbours:\n neighbours2 = [bond[0] for bond in identify_bonds(i, atom_list)]\n for j in neighbours2:\n if j in initial:\n extra_1.append(atom)\n\n extra_2 = []\n for atom in atom_list:\n neighbours = [bond[0] for bond in identify_bonds(atom, atom_list)]\n check = 0\n for i in neighbours:\n if i in initial:\n check += 1\n if ((check == 2) and (atom not in initial)):\n extra_2.append(atom) \n return (initial + extra_1 + extra_2)", "def find_4_dots(point_x, point_y):\r\n i = 0\r\n curve = [] #c**2 = a**2 + b**2 - 2ab*cos_alpha\r\n \r\n while i <= len(point_x) - 2: # except last angle\r\n x1 = point_x[i]\r\n y1 = point_y[i]\r\n x2 = point_x[i + 1]\r\n y2 = point_y[i + 1]\r\n x0 = point_x[i - 1]\r\n y0 = point_y[i - 1]\r\n \r\n a = ((x2 - x1)**2+(y2 - y1)**2)**0.5\r\n b = ((x1 - x0)**2+(y1 - y0)**2)**0.5\r\n c = ((x2 - x0)**2+(y2 - y0)**2)**0.5\r\n\r\n cos_alpha = (a**2 + b**2 - c**2)/(2*a*b)\r\n\r\n curve.append(abs(cos_alpha))\r\n i = i + 1\r\n # last angle \r\n x1 = point_x[-1]\r\n y1 = point_y[-1]\r\n x2 = point_x[0]\r\n y2 = point_y[0]\r\n x0 = point_x[-2]\r\n y0 = point_y[-2]\r\n \r\n a = ((x2 - x1)**2+(y2 - y1)**2)**0.5\r\n b = ((x1 - x0)**2+(y1 - y0)**2)**0.5\r\n c = ((x2 - x0)**2+(y2 - y0)**2)**0.5 \r\n\r\n cos_alpha = (a**2 + b**2 - c**2)/(2*a*b)\r\n curve.append(abs(cos_alpha))\r\n\r\n curve_out_of_repeats = []\r\n for j in range(len(curve)): # add delta for the reason of not mess in equal angles\r\n delta = 10**(-10)\r\n curve_out_of_repeats.append(curve[j] + delta*j)\r\n\r\n curve_out_of_edges = curve_out_of_repeats.copy()\r\n edge_angles = []\r\n edge_indexes = []\r\n edge_x = []\r\n edge_y = []\r\n for i in range(4):\r\n edge_angles.append(min(curve_out_of_edges))\r\n curve_out_of_edges.remove(min(curve_out_of_edges))\r\n\r\n for i in edge_angles:\r\n index = curve_out_of_repeats.index(i)\r\n edge_indexes.append(index)\r\n edge_indexes = sorted(edge_indexes)\r\n\r\n for i in edge_indexes:\r\n edge_x.append(point_x[i])\r\n edge_y.append(point_y[i])\r\n \r\n return edge_x, edge_y", "def _bond_dist(geom, a1, a2):\n if isinstance(geom, np.ndarray):\n geom = geom.flatten().tolist()\n a13 = a1 * 3\n a23 = a2 * 3\n\n xd = (geom[a13] - geom[a23])**2\n yd = (geom[a13 + 1] - geom[a23 + 1])**2\n zd = (geom[a13 + 2] - geom[a23 + 2])**2\n\n return (xd + yd + zd)**0.5", "def ecfp(mol,radius):\n #mol=Chem.AddHs(mol)\n bitInfo={}\n atoms_dict=invariants(mol)\n \n for idxs,i in atoms_dict.items():\n bitInfo[i]=bitInfo.get(i,())+((idxs,0),)\n \n neighborhoods=[]\n atom_neighborhoods=[len(mol.GetBonds())*bitarray('0') for a in mol.GetAtoms()]\n dead_atoms=len(mol.GetAtoms())*bitarray('0')\n \n for r in range(1,radius+1):\n round_ids={} #new bit ID this iteration\n round_atom_neighborhoods=copy.deepcopy(atom_neighborhoods) #bond to include under this r\n neighborhoods_this_round=[] #(round_atom_neighborhoods,round_ids,idxs)\n \n for idxs,a in enumerate(mol.GetAtoms()):\n if dead_atoms[idxs]:\n continue\n nbsr=[] #list to hash this iteration\n o_bond=bond(mol,idxs)\n for b in o_bond:\n round_atom_neighborhoods[idxs][b[2]] = True\n round_atom_neighborhoods[idxs] |= atom_neighborhoods[b[1]]\n nbsr.append((b[0],atoms_dict[b[1]]))\n nbsr=sorted(nbsr)\n nbsr=[item for sublist in nbsr for item in sublist]\n nbsr.insert(0,atoms_dict[idxs])\n nbsr.insert(0,r)\n \n round_ids[idxs]=get_hash(nbsr)\n neighborhoods_this_round.append((round_atom_neighborhoods[idxs],round_ids[idxs],idxs))\n for lst in neighborhoods_this_round:\n if lst[0] not in neighborhoods:\n bitInfo[lst[1]] = bitInfo.get(lst[1],())+((lst[2],r),)\n neighborhoods.append(lst[0])\n else:\n dead_atoms[lst[2]]=True\n atoms_dict=round_ids\n atom_neighborhoods=copy.deepcopy(round_atom_neighborhoods)\n return bitInfo", "def even_angles_cd(delta, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'P', phiEQpsi='Minus'):\n\tfrom math import pi, sqrt, cos, acos\n\tangles = []\n\tif (method == 'P'):\n\t\ttemp = Util.even_angles(delta, theta1, theta2, phi1, phi2)\n\t\t#\t\t phi, theta, psi\n\t\tfor i in xrange(len(temp)/3): angles.append([temp[3*i],temp[3*i+1],temp[3*i+2]]);\n\telse: #elif (method == 'S'):\n\t\tDeltaz = cos(theta2*pi/180.0)-cos(theta1*pi/180.0)\n\t\ts = delta*pi/180.0\n\t\tNFactor = 3.6/s\n\t\twedgeFactor = abs(Deltaz*(phi2-phi1)/720.0)\n\t\tNumPoints = int(NFactor*NFactor*wedgeFactor)\n\t\tangles.append([phi1, theta1, 0.0])\n\t\tz1 = cos(theta1*pi/180.0); \tphi=phi1 # initialize loop\n\t\tfor k in xrange(1,(NumPoints-1)):\n\t\t\tz=z1 + Deltaz*k/(NumPoints-1)\n\t\t\tr= sqrt(1-z*z)\n\t\t\tphi = phi1+(phi + delta/r -phi1)%(abs(phi2-phi1))\n\t\t\t#[k, phi,180*acos(z)/pi, 0]\n\t\t\tangles.append([phi, 180*acos(z)/pi, 0.0])\n\t\t#angles.append([p2,t2,0]) # This is incorrect, as the last angle is really the border, not the element we need. PAP 01/15/07\n\tif (phiEQpsi == 'Minus'):\n\t\tfor k in xrange(len(angles)): angles[k][2] = (720.0 - angles[k][0])%360.0\n\tif( theta2 == 180.0 ): angles.append( [0.0, 180.0, 0.0] )\n\n\treturn angles", "def pick_angle(self, angle_key: Union[EKT, str]) -> Optional[Union[Hedron, Dihedron]]:\n ...", "def are_torsions_same2(geo, geoi, idxs_lst):\n dtol = 0.09\n same_dihed = True\n for idxs in idxs_lst:\n val = dihedral_angle(geo, *idxs)\n vali = dihedral_angle(geoi, *idxs)\n valip = vali+2.*numpy.pi\n valim = vali-2.*numpy.pi\n vchk1 = abs(val - vali)\n vchk2 = abs(val - valip)\n vchk3 = abs(val - valim)\n if vchk1 > dtol and vchk2 > dtol and vchk3 > dtol:\n same_dihed = False\n return same_dihed", "def around(dist, ls):\n # at = system.GetAtom(oechem.OEHasAtomIdx(idx))\n\n # Atom set selection\n atom_set_around = set()\n\n # Create a OE bit vector mask for each atoms\n bv_around = oechem.OEBitVector(system.GetMaxAtomIdx())\n\n # Set the mask atom\n for at in system.GetAtoms():\n if at.GetIdx() in ls:\n bv_around.SetBitOn(at.GetIdx())\n\n # Predicate\n pred = oechem.OEAtomIdxSelected(bv_around)\n\n # Create the system molecule based on the atom mask\n molecules = oechem.OEMol()\n oechem.OESubsetMol(molecules, system, pred)\n\n # Create the Nearest neighbours\n nn = oechem.OENearestNbrs(system, float(dist))\n\n for nbrs in nn.GetNbrs(molecules):\n for atom in oechem.OEGetResidueAtoms(nbrs.GetBgn()):\n if atom.GetIdx() in ls:\n continue\n atom_set_around.add(atom.GetIdx())\n\n return atom_set_around", "def _get_reaction_path(self):\n ## check if the atoms are on the same side of the unit cell\n cell = self.atomsIS.get_cell() # same cell used in IS and FS hopefully\n # get the vector respresenting the difference of the two \n vector_all = self.atomsIS.get_positions() - self.atomsFS.get_positions()\n vectors = vector_all[self.indices]\n min_vec = []\n for v in vectors:\n vmin, vlen = geometry.find_mic(v, cell, pbc=True)\n min_vec.append(vmin)\n ravel_vec = np.ravel(min_vec)\n self.modes.append( ravel_vec / np.linalg.norm(ravel_vec) )", "def adjacent(C, D, b, rid_fac, abs_tol=1e-7):\n E = rid_fac.E_0\n af = rid_fac.af\n bf = rid_fac.bf\n #\n E_r = rid_fac.E_r\n ar = rid_fac.ar\n br = rid_fac.br\n # shape\n d = C.shape[1]\n k = D.shape[1]\n # E_r slices\n C_er = C[E_r, :]\n D_er = D[E_r, :]\n b_er = b[E_r]\n # stack\n c = -np.hstack([ar, np.zeros(k)])\n G = np.hstack([C_er, D_er])\n h = b_er\n A = np.hstack([af, np.zeros(k)])\n sol = solvers._solve_lp_using_cvxopt(\n c, G, h, A=A.T, b=bf * (1 - 0.01))\n if sol['status'] != \"optimal\":\n print(G)\n print(h)\n print(af)\n print(bf)\n print(ar)\n print(br)\n print(np.dot(af, ar))\n data = {}\n data[\"C\"] = C\n data[\"D\"] = D\n data[\"b\"] = b\n sio.savemat(\"matlabdata\", data)\n with open('polytope.p', 'wb') as f:\n pickle.dump(data, f)\n raise Exception(\n \"adjacent: Lp returned status \" + str(sol['status']))\n opt_sol = np.array(sol['x']).flatten()\n dual_opt_sol = np.array(sol['z']).flatten()\n x_opt = opt_sol[range(d)]\n y_opt = opt_sol[range(d, d + k)]\n if is_dual_degenerate(\n c.flatten(), G, h, A, bf * (1 - 0.01),\n opt_sol, dual_opt_sol, abs_tol=abs_tol):\n # If degenerate, compute affine hull and take preimage\n E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0]\n a_temp, b_temp = proj_aff(\n C_er[E_temp, :], D_er[E_temp, :], b_er[E_temp],\n expected_dim=1, abs_tol=abs_tol)\n E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol)\n if len(E_adj) == 0:\n data = {}\n data[\"C\"] = C\n data[\"D\"] = D\n data[\"b\"] = b\n data[\"Er\"] = E_r + 1\n data[\"ar\"] = ar\n data[\"br\"] = br\n data[\"Ef\"] = E + 1\n data[\"af\"] = af\n data[\"bf\"] = bf\n sio.savemat(\"matlabdata\", data)\n raise Exception(\n \"adjacent: equality set computation returned empty set\")\n else:\n r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol\n E_adj = np.nonzero(r)[0]\n C_eadj = C[E_adj, :]\n D_eadj = D[E_adj, :]\n b_eadj = b[E_adj]\n af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol)\n return E_adj, af_adj, bf_adj", "def coords_reachable(self, start, distance): # TODO: Accept a lambda that\n # determines blocked or not\n visited = set() # set of hexes\n visited.add(start)\n fringes = list() # array of arrays of hexes\n fringes.append([start])\n\n for idx in range(1, distance+1):\n fringes.append([])\n for coord in fringes[idx-1]:\n for direction in self.dirs:\n neighbor = coord+direction\n if neighbor not in visited: # TODO: add exemptions (impassable)\n #or mandatory neighbors (direct\n #connections)\n visited.add(neighbor)\n fringes[idx].append(neighbor)\n\n return visited", "def find_orbit4(ring: Lattice, dp: float = None, refpts: Refpts = None, *,\n dct: float = None,\n df: float = None,\n orbit: Orbit = None,\n keep_lattice: bool = False, **kwargs):\n if len([v for v in (dp, dct, df) if v is not None]) > 1:\n raise AtError(\"For off-momentum specification, only one of \"\n \"dp, dct and df may be specified\")\n if orbit is None:\n if df is not None:\n frf = ring.cell_revolution_frequency * ring.cell_harmnumber\n dct = -ring.cell_length * df / (frf+df)\n orbit = _orbit_dct(ring, dct, keep_lattice=keep_lattice, **kwargs)\n elif dct is not None:\n orbit = _orbit_dct(ring, dct, keep_lattice=keep_lattice, **kwargs)\n else:\n orbit = _orbit_dp(ring, dp, keep_lattice=keep_lattice, **kwargs)\n keep_lattice = True\n\n # bug in numpy < 1.13\n if ring.refcount(refpts) == 0:\n all_points = numpy.empty((0, 6), dtype=float)\n else:\n all_points = internal_lpass(ring, orbit.copy(order='K'), refpts=refpts,\n keep_lattice=keep_lattice)\n all_points = numpy.squeeze(all_points, axis=(1, 3)).T\n return orbit, all_points", "def GetRingBondAng(mol, ringpath):\n N = len(ringpath)\n atoms = [[ringpath[i], ringpath[(i+1)%N], ringpath[(i+2)%N]] for i in range(N)]\n molconf = mol.GetConformer()\n bondang =[rdMolTransforms.GetAngleRad(molconf, x[0], x[1], x[2]) for x in atoms]\n return bondang", "def match(ra1, dec1, ra2, dec2, tol, allmatches=False):\n \n ra1,ra2,dec1,dec2 = map(np.asarray, (ra1, ra2, dec1, dec2))\n\n abs = np.abs\n\n isorted = ra2.argsort()\n sdec2 = dec2[isorted]\n sra2 = ra2[isorted]\n\n LIM = tol * DEG_PER_ASEC\n\n match = []\n # use mean dec, assumes decs similar\n decav = np.mean(sdec2.mean() + dec1.mean())\n RA_LIM = LIM / cos(decav * RAD_PER_DEG)\n\n for ra,dec in zip(ra1,dec1):\n i1 = sra2.searchsorted(ra - RA_LIM)\n i2 = i1 + sra2[i1:].searchsorted(ra + RA_LIM)\n #print(i1,i2)\n close = []\n for j in xrange(i1,i2):\n if abs(dec - sdec2[j]) > LIM:\n continue\n else:\n # if ras and decs are within LIM arcsec, then\n # calculate actual separation:\n disq = ang_sep(ra, dec, sra2[j], sdec2[j])\n close.append((disq, j))\n\n close.sort()\n if not allmatches:\n # Choose the object with the closest separation inside the\n # requested tolerance, if one was found.\n if len(close) > 0:\n min_dist, jmin = close[0]\n if min_dist < LIM:\n match.append((isorted[jmin], min_dist))\n continue\n # otherwise no match\n match.append((-1,-1))\n else:\n # append all the matching objects\n jclose = []\n seps = []\n for dist,j in close:\n if dist < LIM:\n jclose.append(j)\n seps.append(dist)\n else:\n break\n match.append(fromarrays([isorted[jclose], seps],\n dtype=[(str('ind'),str('i8')),\n str(('sep'),str('f8'))\n ]))\n\n if not allmatches:\n # return both indices and separations in a recarray\n temp = np.rec.fromrecords(match, names=str('ind,sep'))\n # change to arcseconds\n temp.sep *= 3600.\n temp.sep[temp.sep < 0] = -1.\n return temp\n else:\n return match", "def calculate_angle(opp, adjacent):\n return math.degrees(math.atan((opp / adjacent)))", "def fangle_degr(self):\r\n\r\n return self._versor_1.angle_degr(self._versor_2)", "def getEdgeAngle():\n '''\n returns angle a\n a\n ◿\n b c\n '''\n ANGLE_OFFSET = 8 # How far off the angle measurements are in degrees.\n THRESHOLD = 220 # How much light must be reflected to 'notice' the desk.\n angle = 0\n while angle < panTilt.TLT_RANGE:\n angle += 1\n panTilt.tilt(int(angle))\n deskDetected = ir.readWithDelay()\n # print \"Angle:\", angle + ANGLE_OFFSET, \", ir reading:\", deskDetected\n if deskDetected > THRESHOLD or angle == panTilt.TLT_RANGE:\n # print \"-----------------------\"\n break # Break out of looking downwards loop\n panTilt.up() # Look up again\n return 90 - angle - ANGLE_OFFSET", "def find_rings(atom_list): \n CX_list = [atom0 for atom0 in atom_list if ((atom0.atom_name == \"CX\") or (atom0.atom_name == \"CY\"))]\n atom_dict = {}\n for atom0 in CX_list:\n if (len(identify_bonds(atom0, atom_list)) >= 2):\n atom_dict[atom0] = {}\n for atom1 in identify_bonds(atom0, atom_list):\n if ( ((atom1[0].atom_name == \"CX\") or (atom1[0].atom_name == \"CY\")) and (len(identify_bonds(atom1[0], atom_list)) >= 2) ):\n atom_dict[atom0][atom1[0]] = {}\n for atom2 in identify_bonds(atom1[0], atom_list):\n if ( ((atom2[0].atom_name == \"CX\") or (atom2[0].atom_name == \"CY\")) and (atom2[0] != atom0) and (len(identify_bonds(atom2[0], atom_list)) >= 2)):\n atom_dict[atom0][atom1[0]][atom2[0]] = {}\n for atom3 in identify_bonds(atom2[0], atom_list):\n if ( ((atom3[0].atom_name == \"CX\") or (atom3[0].atom_name == \"CY\")) and (atom3[0] != atom0) and (len(identify_bonds(atom3[0], atom_list)) >= 2)):\n atom_dict[atom0][atom1[0]][atom2[0]][atom3[0]] = [atom3[0].atom_number]\n rings = []\n for key in atom_dict.keys():\n for key2 in atom_dict[key].keys():\n for key3 in atom_dict[key][key2].keys():\n for key4 in atom_dict[key][key2][key3].keys():\n rings.append([key, key2, key3, key4])\n finite_rings = []\n for element in rings:\n for element2 in rings:\n if ((element[0] == element2[0]) and (element[3] == element2[3]) and (element[1] != element2[1]) and (element[1] != element2[2]) and (element[2] != element2[1]) and (element[2] != element2[2]) and (element[0] != element2[1] != element[3]) and (element[0] != element2[2] != element[3])):\n check = True\n for el in finite_rings:\n if ((element[0] in el) and (element[1] in el) and (element[2] in el) and (element[3] in el) and (element2[0] in el) and (element2[1] in el) and (element2[2] in el) and (element2[3] in el)):\n check = False\n if (check == True):\n finite_rings.append([element[0], element[1], element[2], element[3], element2[1], element2[2]])\n return finite_rings", "def find_degen(aln):\n\n codon_ind = find_aligned_codons(aln)\n aln2 = subalign(aln, codon_ind)\n\n pep_aln = mapalign(aln2, valfunc=seqlib.translate)\n pep = pep_aln.values()[0]\n identies = calc_conservation(pep_aln)\n\n degens = [-1] * aln.alignlen()\n\n for i in range(0, len(codon_ind), 3):\n if pep[i/3] == \"X\":\n continue\n degen = seqlib.AA_DEGEN[pep[i/3]]\n if identies[i/3] == 1.0:\n for j in range(3):\n degens[codon_ind[i+j]] = degen[j]\n\n return degens", "def points_in_circle(c, d):\n if d == 0:\n return set((c,))\n circle = set()\n x, y = (c[0] + d * directions[4][0], c[1] + d * directions[4][1])\n for m in directions:\n for i in range(1, d + 1):\n x, y = x + m[0], y + m[1]\n circle.add((x, y))\n return circle", "def get14Interactions(self):\n dihedralPointers = self._raw_data[\"DIHEDRALS_INC_HYDROGEN\"] \\\n +self._raw_data[\"DIHEDRALS_WITHOUT_HYDROGEN\"]\n returnList=[]\n charges=self.getCharges()\n length_conv = units.angstrom.conversion_factor_to(units.nanometers)\n ene_conv = units.kilocalories_per_mole.conversion_factor_to(\n units.kilojoules_per_mole)\n if self.chamber:\n parm_acoef = [float(x) for x in self._raw_data['LENNARD_JONES_14_ACOEF']]\n parm_bcoef = [float(x) for x in self._raw_data['LENNARD_JONES_14_BCOEF']]\n else:\n parm_acoef = [float(x) for x in self._raw_data['LENNARD_JONES_ACOEF']]\n parm_bcoef = [float(x) for x in self._raw_data['LENNARD_JONES_BCOEF']]\n nbidx = [int(x) for x in self._raw_data['NONBONDED_PARM_INDEX']]\n numTypes = self.getNumTypes()\n atomTypeIndexes=self._getAtomTypeIndexes()\n for ii in range(0, len(dihedralPointers), 5):\n if int(dihedralPointers[ii+2])>0 and int(dihedralPointers[ii+3])>0:\n iAtom = int(dihedralPointers[ii])//3\n lAtom = int(dihedralPointers[ii+3])//3\n iidx = int(dihedralPointers[ii+4]) - 1\n chargeProd = charges[iAtom]*charges[lAtom]\n typ1 = atomTypeIndexes[iAtom] - 1\n typ2 = atomTypeIndexes[lAtom] - 1\n idx = nbidx[numTypes*typ1+typ2] - 1\n if idx < 0: continue\n a = parm_acoef[idx]\n b = parm_bcoef[idx]\n try:\n epsilon = b * b / (4 * a) * ene_conv\n rMin = (2 * a / b) ** (1/6.0) * length_conv\n except ZeroDivisionError:\n rMin = 1\n epsilon = 0\n try:\n iScee = float(self._raw_data['SCEE_SCALE_FACTOR'][iidx])\n except KeyError:\n iScee = 1.0 if self.chamber else 1.2\n try:\n iScnb = float(self._raw_data['SCNB_SCALE_FACTOR'][iidx])\n except KeyError:\n iScnb = 1.0 if self.chamber else 2.0\n returnList.append((iAtom, lAtom, chargeProd, rMin, epsilon, iScee, iScnb))\n return returnList", "def validBond(index1, index2, direction):\n #print \"?valid bond: \", allAtoms[index1].pos, \" , \", allAtoms[index2].pos, direction\n cell1 = index1/numAtomsPerCell\n cell2 = index2/numAtomsPerCell\n #Find the coordinates of the cell in units of interaction cells\n posInX1 = int(cell1/(size*size))\n posInX2 = int(cell1/(size*size))\n leftover1 = cell1%(size*size)\n leftover2 = cell2%(size*size)\n posInY1 = int(leftover1/size)\n posInY2 = int(leftover2/size)\n posInZ1 = leftover1%size\n posInZ2 = leftover2%size\n \n #Now, a valid interaction can cross an interaction cell boundary in any direction,\n #but it has a maximum length of one interaction cell. However, I have made the minimum\n #size of this larger translated lattice equal to 3*3*3 interaction cells. Therefore,\n #when we hit an edge and get in invalid interaction, the cells will be at least 2\n #interaction cells apart in the direction of the interaction.\n if(direction[0]):\n if numpy.abs(posInX1 - posInX2)>1:\n #print \"false\"\n return False\n if(direction[1]):\n if numpy.abs(posInY1 - posInY2)>1:\n #print \"false\"\n return False\n if(direction[2]):\n if numpy.abs(posInZ1 - posInZ2)>1:\n #print \"false\"\n return False\n print #\"true\"\n return True\n\n #Old (incorrect) method:\n if 0:\r\n print \"?valid bond: \", allAtoms[index1].pos, \" , \", allAtoms[index2].pos, direction\r\n cell1 = index1/numAtomsPerCell\r\n cell2 = index2/numAtomsPerCell\r\n zRow1 = cell1/size#this relies on the list being created in the nested for loop that was used, z within y within x\r\n zRow2 = cell2/size\r\n if(zRow1 != zRow2 and direction[2]):\n print \"false\"\r\n return False\r\n xLayer1 = cell1/(size*size)\r\n xLayer2 = cell2/(size*size)\r\n if(xLayer1 != xLayer2 and direction[1]):\n print \"false\"\r\n return False\r\n #shouldn't have to check z, because if it's not valid in z direction, it would be off the list (>len(allAtoms))\n print \"true\"\r\n return True", "def findRings(graph):\n # TODO add a planarity check?\n rings5 = []\n rings6 = []\n if DEBUG: print \"- starting ring detection...\"\n for head in graph.keys():\n tier1 = graph[head]\n tier2 = []\n tier3 = []\n # populate tier2 \n for node1 in tier1:\n for tmp in graph[node1]:\n if not tmp == head and not tmp in tier2 and (not tmp in tier1) :\n tier2.append(tmp)\n # populate tier3\n for node2 in tier2:\n for tmp in graph[node2]:\n if (not tmp == head) and (not tmp in tier2) and (not tmp in tier1) and (not tmp in tier3):\n tier3.append(tmp)\n # 6 member rings\n for x in tier3:\n candidate = []\n for c in tier2:\n if x in graph[c]:\n if not c in candidate:\n candidate.append(c)\n if len(candidate) >1:\n r6 = [ head ] \n r6.append(x)\n r6 += candidate\n for c in candidate:\n r6 += intersect( graph[head], graph[c])\n r6.sort()\n if not r6 in rings6:\n rings6.append( r6 )\n if DEBUG: print \" 6member!\", r6\n break\n # 5 member rings\n for c1 in tier2:\n for c2 in tier2:\n if not c1 == c2:\n if (c2 in graph[c1]) and (c1 in graph[c2]):\n is_3_ring = False\n for k in graph[c1]:\n if k in graph[c2]: \n is_3_ring =True\n if DEBUG: print \" [ ...catched a cycle_3... ]\"\n break\n if not is_3_ring :\n r5 = [ head ] \n r5.append(c1)\n r5.append(c2)\n r5 += intersect( graph[head], graph[c1])\n r5 += intersect( graph[head], graph[c2])\n r5.sort()\n if not r5 in rings5:\n if DEBUG: print \" 5member ring!\",r5\n rings5.append(r5)\n break\n return rings5, rings6", "def _get_ring_nodes(m, namin=3, namax=9, remove_redudant=T):\n # first search for rings\n sets = []\n for i in range(namin, namax+1):\n #if i in [3,4,5]:\n pat_i = '*~1' + '~*'*(i-2) + '~*1'\n #else:\n # pat_i = '*:1' + ':*'*(i-2) + ':*1'\n Qi = Chem.MolFromSmarts( pat_i )\n for tsi in m.GetSubstructMatches(Qi):\n set_i = set(tsi)\n if set_i not in sets:\n sets.append( set(tsi) )\n if remove_redudant:\n # now remove those rings that are union of smaller rings\n n = len(sets)\n sets_remove = []\n ijs = itl.combinations( list(range(n)), 2 )\n sets_u = []\n for i,j in ijs:\n set_ij = sets[i].union( sets[j] )\n if (set_ij in sets) and (set_ij not in sets_remove):\n sets_remove.append( set_ij )\n sets_u = cim.get_compl(sets, sets_remove)\n else:\n sets_u = sets\n return sets_u", "def test_8():\n answer_pdb_str = \"\"\"\nATOM 1 N ARG A 1 26.061 12.824 1.988 1.00 0.00 N\nATOM 2 CA ARG A 1 27.253 12.525 2.773 1.00 0.00 C\nATOM 3 C ARG A 1 28.520 12.882 2.003 1.00 0.00 C\nATOM 4 O ARG A 1 28.853 12.243 1.005 1.00 0.00 O\nATOM 5 CB ARG A 1 27.280 11.041 3.156 1.00 10.00 C\nATOM 6 CG ARG A 1 26.107 10.591 4.022 1.00 10.00 C\nATOM 7 CD ARG A 1 26.118 11.230 5.409 1.00 10.00 C\nATOM 8 NE ARG A 1 27.283 10.828 6.201 1.00 10.00 N\nATOM 9 CZ ARG A 1 27.735 11.441 7.298 1.00 10.00 C\nATOM 10 NH1 ARG A 1 27.146 12.525 7.803 1.00 10.00 N\nATOM 11 NH2 ARG A 1 28.808 10.956 7.908 1.00 10.00 N\nATOM 12 N ALA A 2 29.223 13.907 2.474 1.00 0.00 N\nATOM 13 CA ALA A 2 30.455 14.351 1.832 1.00 0.00 C\nATOM 14 C ALA A 2 31.652 14.171 2.758 1.00 0.00 C\nATOM 15 O ALA A 2 31.775 14.859 3.772 1.00 0.00 O\nATOM 16 CB ALA A 2 30.331 15.807 1.408 1.00 0.00 C\nATOM 17 N HIS A 3 32.534 13.242 2.403 1.00 0.00 N\nATOM 18 CA HIS A 3 33.724 12.970 3.202 1.00 0.00 C\nATOM 19 C HIS A 3 34.993 13.295 2.422 1.00 0.00 C\nATOM 20 O HIS A 3 35.327 12.618 1.450 1.00 0.00 O\nATOM 21 CB HIS A 3 33.744 11.503 3.640 1.00 0.00 C\nATOM 22 CG HIS A 3 32.618 11.130 4.554 1.00 0.00 C\nATOM 23 ND1 HIS A 3 32.586 11.494 5.882 1.00 0.00 N\nATOM 24 CD2 HIS A 3 31.485 10.424 4.330 1.00 0.00 C\nATOM 25 CE1 HIS A 3 31.481 11.029 6.437 1.00 0.00 C\nATOM 26 NE2 HIS A 3 30.795 10.375 5.517 1.00 0.00 N\nATOM 27 N ALA A 4 35.698 14.335 2.856 1.00 0.00 N\nATOM 28 CA ALA A 4 36.932 14.752 2.201 1.00 0.00 C\nATOM 29 C ALA A 4 38.127 14.604 3.136 1.00 0.00 C\nATOM 30 O ALA A 4 38.248 15.329 4.124 1.00 0.00 O\nATOM 31 CB ALA A 4 36.812 16.192 1.723 1.00 0.00 C\nATOM 32 N ASP A 5 39.007 13.660 2.818 1.00 0.00 N\nATOM 33 CA ASP A 5 40.194 13.415 3.630 1.00 0.00 C\nATOM 34 C ASP A 5 41.467 13.708 2.841 1.00 0.00 C\nATOM 35 O ASP A 5 41.801 12.995 1.896 1.00 0.00 O\nATOM 36 CB ASP A 5 40.211 11.966 4.122 1.00 0.00 C\nATOM 37 CG ASP A 5 41.346 11.691 5.089 1.00 0.00 C\nATOM 38 OD1 ASP A 5 41.256 12.134 6.254 1.00 0.00 O\nATOM 39 OD2 ASP A 5 42.327 11.032 4.685 1.00 0.00 O\nATOM 40 N ALA A 6 42.172 14.763 3.238 1.00 0.00 N\nATOM 41 CA ALA A 6 43.409 15.152 2.570 1.00 0.00 C\nATOM 42 C ALA A 6 44.601 15.036 3.514 1.00 0.00 C\nATOM 43 O ALA A 6 44.722 15.797 4.474 1.00 0.00 O\nATOM 44 CB ALA A 6 43.294 16.573 2.039 1.00 0.00 C\nATOM 45 N GLU A 7 45.480 14.079 3.234 1.00 0.00 N\nATOM 46 CA GLU A 7 46.665 13.862 4.057 1.00 0.00 C\nATOM 47 C GLU A 7 47.940 14.122 3.261 1.00 0.00 C\nATOM 48 O GLU A 7 48.275 13.373 2.344 1.00 0.00 O\nATOM 49 CB GLU A 7 46.677 12.432 4.604 1.00 0.00 C\nATOM 50 CG GLU A 7 45.565 12.140 5.599 1.00 0.00 C\nATOM 51 CD GLU A 7 45.595 10.711 6.103 1.00 0.00 C\nATOM 52 OE1 GLU A 7 46.403 9.912 5.585 1.00 0.00 O\nATOM 53 OE2 GLU A 7 44.809 10.384 7.019 1.00 0.00 O\nATOM 54 N ALA A 8 48.647 15.189 3.620 1.00 0.00 N\nATOM 55 CA ALA A 8 49.886 15.550 2.941 1.00 0.00 C\nATOM 56 C ALA A 8 51.076 15.468 3.892 1.00 0.00 C\nATOM 57 O ALA A 8 51.196 16.264 4.823 1.00 0.00 O\nATOM 58 CB ALA A 8 49.776 16.951 2.356 1.00 0.00 C\nATOM 59 N ALA A 10 55.122 15.615 4.002 1.00 0.00 N\nATOM 60 CA ALA A 10 56.363 15.948 3.313 1.00 0.00 C\nATOM 61 C ALA A 10 57.551 15.898 4.269 1.00 0.00 C\nATOM 62 O ALA A 10 57.671 16.728 5.170 1.00 0.00 O\nATOM 63 CB ALA A 10 56.258 17.326 2.676 1.00 0.00 C\nATOM 64 N ASN A 11 58.427 14.919 4.065 1.00 0.00 N\nATOM 65 CA ASN A 11 59.606 14.759 4.908 1.00 0.00 C\nATOM 66 C ASN A 11 60.886 14.953 4.102 1.00 0.00 C\nATOM 67 O ASN A 11 61.222 14.136 3.244 1.00 0.00 O\nATOM 68 CB ASN A 11 59.609 13.379 5.562 1.00 0.00 C\nATOM 69 CG ASN A 11 58.532 13.236 6.620 1.00 0.00 C\nATOM 70 OD1 ASN A 11 58.296 14.149 7.410 1.00 0.00 O\nATOM 71 ND2 ASN A 11 57.872 12.083 6.640 1.00 0.00 N\nATOM 72 N ALA A 12 61.597 16.041 4.383 1.00 0.00 N\nATOM 73 CA ALA A 12 62.841 16.345 3.686 1.00 0.00 C\nATOM 74 C ALA A 12 64.025 16.328 4.646 1.00 0.00 C\nATOM 75 O ALA A 12 64.145 17.191 5.515 1.00 0.00 O\nATOM 76 CB ALA A 12 62.740 17.698 2.997 1.00 0.00 C\nATOM 77 N GLN A 13 64.899 15.340 4.481 1.00 0.00 N\nATOM 78 CA GLN A 13 66.076 15.209 5.332 1.00 0.00 C\nATOM 79 C GLN A 13 67.359 15.370 4.522 1.00 0.00 C\nATOM 80 O GLN A 13 67.695 14.521 3.697 1.00 0.00 O\nATOM 81 CB GLN A 13 66.071 13.849 6.037 1.00 0.00 C\nATOM 82 CG GLN A 13 67.212 13.651 7.023 1.00 0.00 C\nATOM 83 CD GLN A 13 67.140 12.317 7.739 1.00 0.00 C\nATOM 84 OE1 GLN A 13 66.251 11.506 7.477 1.00 0.00 O\nATOM 85 NE2 GLN A 13 68.078 12.082 8.650 1.00 0.00 N\nATOM 86 N ALA A 14 68.071 16.466 4.765 1.00 0.00 N\nATOM 87 CA ALA A 14 69.318 16.740 4.059 1.00 0.00 C\nATOM 88 C ALA A 14 70.500 16.757 5.022 1.00 0.00 C\nATOM 89 O ALA A 14 70.620 17.652 5.859 1.00 0.00 O\nATOM 90 CB ALA A 14 69.222 18.067 3.320 1.00 0.00 C\nATOM 91 N LEU A 15 71.372 15.761 4.897 1.00 0.00 N\nATOM 92 CA LEU A 15 72.547 15.660 5.755 1.00 0.00 C\nATOM 93 C LEU A 15 73.832 15.788 4.943 1.00 0.00 C\nATOM 94 O LEU A 15 74.168 14.907 4.151 1.00 0.00 O\nATOM 95 CB LEU A 15 72.541 14.325 6.508 1.00 0.00 C\nATOM 96 CG LEU A 15 71.415 14.114 7.526 1.00 0.00 C\nATOM 97 CD1 LEU A 15 71.462 12.699 8.081 1.00 0.00 C\nATOM 98 CD2 LEU A 15 71.487 15.136 8.654 1.00 0.00 C\nATOM 99 N ALA A 16 74.546 16.890 5.146 1.00 0.00 N\nATOM 100 CA ALA A 16 75.795 17.135 4.434 1.00 0.00 C\nATOM 101 C ALA A 16 76.975 17.185 5.399 1.00 0.00 C\nATOM 102 O ALA A 16 77.095 18.110 6.202 1.00 0.00 O\nATOM 103 CB ALA A 16 75.704 18.434 3.646 1.00 0.00 C\nATOM 104 N PHE A 17 77.845 16.184 5.313 1.00 0.00 N\nATOM 105 CA PHE A 17 79.018 16.111 6.177 1.00 0.00 C\nATOM 106 C PHE A 17 80.304 16.206 5.364 1.00 0.00 C\nATOM 107 O PHE A 17 80.640 15.296 4.606 1.00 0.00 O\nATOM 108 CB PHE A 17 79.005 14.807 6.980 1.00 0.00 C\nATOM 109 CG PHE A 17 77.889 14.721 7.981 1.00 0.00 C\nATOM 110 CD1 PHE A 17 77.992 15.351 9.210 1.00 0.00 C\nATOM 111 CD2 PHE A 17 76.735 14.009 7.693 1.00 0.00 C\nATOM 112 CE1 PHE A 17 76.966 15.272 10.133 1.00 0.00 C\nATOM 113 CE2 PHE A 17 75.706 13.927 8.612 1.00 0.00 C\nATOM 114 CZ PHE A 17 75.822 14.560 9.833 1.00 0.00 C\nATOM 115 N ALA A 18 81.021 17.314 5.528 1.00 0.00 N\nATOM 116 CA ALA A 18 82.272 17.529 4.810 1.00 0.00 C\nATOM 117 C ALA A 18 83.450 17.613 5.775 1.00 0.00 C\nATOM 118 O ALA A 18 83.570 18.567 6.543 1.00 0.00 O\nATOM 119 CB ALA A 18 82.186 18.797 3.973 1.00 0.00 C\nATOM 120 N TYR A 19 84.318 16.606 5.729 1.00 0.00 N\nATOM 121 CA TYR A 19 85.488 16.564 6.598 1.00 0.00 C\nATOM 122 C TYR A 19 86.777 16.625 5.785 1.00 0.00 C\nATOM 123 O TYR A 19 87.113 15.687 5.063 1.00 0.00 O\nATOM 124 CB TYR A 19 85.471 15.292 7.450 1.00 0.00 C\nATOM 125 CG TYR A 19 84.363 15.258 8.479 1.00 0.00 C\nATOM 126 CD1 TYR A 19 83.662 16.411 8.812 1.00 0.00 C\nATOM 127 CD2 TYR A 19 84.016 14.074 9.116 1.00 0.00 C\nATOM 128 CE1 TYR A 19 82.648 16.386 9.751 1.00 0.00 C\nATOM 129 CE2 TYR A 19 83.002 14.039 10.057 1.00 0.00 C\nATOM 130 CZ TYR A 19 82.323 15.197 10.369 1.00 0.00 C\nATOM 131 OH TYR A 19 81.313 15.166 11.305 1.00 0.00 O\nATOM 132 N ALA A 20 87.496 17.737 5.909 1.00 0.00 N\nATOM 133 CA ALA A 20 88.749 17.922 5.187 1.00 0.00 C\nATOM 134 C ALA A 20 89.925 18.039 6.151 1.00 0.00 C\nATOM 135 O ALA A 20 90.046 19.021 6.883 1.00 0.00 O\nATOM 136 CB ALA A 20 88.668 19.159 4.303 1.00 0.00 C\nATOM 137 N VAL A 21 90.791 17.030 6.145 1.00 0.00 N\nATOM 138 CA VAL A 21 91.959 17.017 7.018 1.00 0.00 C\nATOM 139 C VAL A 21 93.250 17.045 6.207 1.00 0.00 C\nATOM 140 O VAL A 21 93.585 16.079 5.521 1.00 0.00 O\nATOM 141 CB VAL A 21 91.967 15.769 7.925 1.00 0.00 C\nATOM 142 CG1 VAL A 21 93.241 15.722 8.760 1.00 0.00 C\nATOM 143 CG2 VAL A 21 90.735 15.749 8.820 1.00 0.00 C\nATOM 144 N ALA A 22 93.971 18.159 6.291 1.00 0.00 N\nATOM 145 CA ALA A 22 95.226 18.315 5.565 1.00 0.00 C\nATOM 146 C ALA A 22 96.400 18.465 6.527 1.00 0.00 C\nATOM 147 O ALA A 22 96.521 19.473 7.222 1.00 0.00 O\nATOM 148 CB ALA A 22 95.150 19.517 4.636 1.00 0.00 C\nTER\nATOM 149 N ARG B 1 27.961 0.504 1.988 1.00 0.00 N\nATOM 150 CA ARG B 1 29.153 0.205 2.773 1.00 0.00 C\nATOM 151 C ARG B 1 30.420 0.562 2.003 1.00 0.00 C\nATOM 152 O ARG B 1 30.753 -0.077 1.005 1.00 0.00 O\nATOM 153 CB ARG B 1 29.180 -1.279 3.156 1.00 10.00 C\nATOM 154 CG ARG B 1 28.007 -1.729 4.022 1.00 10.00 C\nATOM 155 CD ARG B 1 28.018 -1.090 5.409 1.00 10.00 C\nATOM 156 NE ARG B 1 29.183 -1.492 6.201 1.00 10.00 N\nATOM 157 CZ ARG B 1 29.635 -0.879 7.298 1.00 10.00 C\nATOM 158 NH1 ARG B 1 30.708 -1.364 7.908 1.00 10.00 N\nATOM 159 NH2 ARG B 1 29.046 0.205 7.803 1.00 10.00 N\nATOM 160 N ALA B 2 31.123 1.587 2.474 1.00 0.00 N\nATOM 161 CA ALA B 2 32.355 2.031 1.832 1.00 0.00 C\nATOM 162 C ALA B 2 33.552 1.851 2.758 1.00 0.00 C\nATOM 163 O ALA B 2 33.675 2.539 3.772 1.00 0.00 O\nATOM 164 CB ALA B 2 32.231 3.487 1.408 1.00 0.00 C\nATOM 165 N HIS B 3 34.434 0.922 2.403 1.00 0.00 N\nATOM 166 CA HIS B 3 35.624 0.650 3.202 1.00 0.00 C\nATOM 167 C HIS B 3 36.893 0.975 2.422 1.00 0.00 C\nATOM 168 O HIS B 3 37.227 0.298 1.450 1.00 0.00 O\nATOM 169 CB HIS B 3 35.644 -0.817 3.640 1.00 0.00 C\nATOM 170 CG HIS B 3 34.518 -1.190 4.554 1.00 0.00 C\nATOM 171 ND1 HIS B 3 34.311 -0.928 5.866 1.00 0.00 C\nATOM 172 CD2 HIS B 3 33.431 -1.925 4.134 1.00 0.00 N\nATOM 173 CE1 HIS B 3 33.113 -1.504 6.211 1.00 0.00 N\nATOM 174 NE2 HIS B 3 32.603 -2.100 5.148 1.00 0.00 C\nATOM 175 N ALA B 4 37.598 2.015 2.856 1.00 0.00 N\nATOM 176 CA ALA B 4 38.832 2.432 2.201 1.00 0.00 C\nATOM 177 C ALA B 4 40.027 2.284 3.136 1.00 0.00 C\nATOM 178 O ALA B 4 40.148 3.009 4.124 1.00 0.00 O\nATOM 179 CB ALA B 4 38.712 3.872 1.723 1.00 0.00 C\nATOM 180 N ASP B 5 40.907 1.340 2.818 1.00 0.00 N\nATOM 181 CA ASP B 5 42.094 1.095 3.630 1.00 0.00 C\nATOM 182 C ASP B 5 43.367 1.388 2.841 1.00 0.00 C\nATOM 183 O ASP B 5 43.701 0.675 1.896 1.00 0.00 O\nATOM 184 CB ASP B 5 42.111 -0.354 4.122 1.00 0.00 C\nATOM 185 CG ASP B 5 43.246 -0.629 5.089 1.00 0.00 C\nATOM 186 OD1 ASP B 5 43.158 -0.186 6.253 1.00 0.00 O\nATOM 187 OD2 ASP B 5 44.227 -1.288 4.683 1.00 0.00 O\nATOM 188 N ALA B 6 44.072 2.443 3.238 1.00 0.00 N\nATOM 189 CA ALA B 6 45.309 2.832 2.570 1.00 0.00 C\nATOM 190 C ALA B 6 46.501 2.716 3.514 1.00 0.00 C\nATOM 191 O ALA B 6 46.622 3.477 4.474 1.00 0.00 O\nATOM 192 CB ALA B 6 45.194 4.253 2.039 1.00 0.00 C\nATOM 193 N GLU B 7 47.380 1.759 3.234 1.00 0.00 N\nATOM 194 CA GLU B 7 48.565 1.542 4.057 1.00 0.00 C\nATOM 195 C GLU B 7 49.840 1.802 3.261 1.00 0.00 C\nATOM 196 O GLU B 7 50.175 1.053 2.344 1.00 0.00 O\nATOM 197 CB GLU B 7 48.577 0.112 4.604 1.00 0.00 C\nATOM 198 CG GLU B 7 47.465 -0.180 5.599 1.00 0.00 C\nATOM 199 CD GLU B 7 47.495 -1.609 6.103 1.00 0.00 C\nATOM 200 OE1 GLU B 7 48.305 -2.409 5.584 1.00 0.00 O\nATOM 201 OE2 GLU B 7 46.711 -1.936 7.018 1.00 0.00 O\nATOM 202 N ALA B 8 50.547 2.869 3.620 1.00 0.00 N\nATOM 203 CA ALA B 8 51.786 3.230 2.941 1.00 0.00 C\nATOM 204 C ALA B 8 52.976 3.148 3.892 1.00 0.00 C\nATOM 205 O ALA B 8 53.096 3.944 4.823 1.00 0.00 O\nATOM 206 CB ALA B 8 51.676 4.631 2.356 1.00 0.00 C\nATOM 207 N ALA B 10 57.022 3.295 4.002 1.00 0.00 N\nATOM 208 CA ALA B 10 58.263 3.628 3.313 1.00 0.00 C\nATOM 209 C ALA B 10 59.451 3.578 4.269 1.00 0.00 C\nATOM 210 O ALA B 10 59.571 4.408 5.170 1.00 0.00 O\nATOM 211 CB ALA B 10 58.158 5.006 2.676 1.00 0.00 C\nATOM 212 N ASN B 11 60.327 2.599 4.065 1.00 0.00 N\nATOM 213 CA ASN B 11 61.506 2.439 4.908 1.00 0.00 C\nATOM 214 C ASN B 11 62.786 2.633 4.102 1.00 0.00 C\nATOM 215 O ASN B 11 63.122 1.816 3.244 1.00 0.00 O\nATOM 216 CB ASN B 11 61.509 1.059 5.562 1.00 0.00 C\nATOM 217 CG ASN B 11 60.432 0.916 6.620 1.00 0.00 C\nATOM 218 OD1 ASN B 11 60.252 1.957 7.425 1.00 0.00 N\nATOM 219 ND2 ASN B 11 59.769 -0.116 6.713 1.00 0.00 O\nATOM 220 N ALA B 12 63.497 3.721 4.383 1.00 0.00 N\nATOM 221 CA ALA B 12 64.741 4.025 3.686 1.00 0.00 C\nATOM 222 C ALA B 12 65.925 4.008 4.646 1.00 0.00 C\nATOM 223 O ALA B 12 66.045 4.871 5.515 1.00 0.00 O\nATOM 224 CB ALA B 12 64.640 5.378 2.997 1.00 0.00 C\nATOM 225 N GLN B 13 66.799 3.020 4.481 1.00 0.00 N\nATOM 226 CA GLN B 13 67.976 2.889 5.332 1.00 0.00 C\nATOM 227 C GLN B 13 69.259 3.050 4.522 1.00 0.00 C\nATOM 228 O GLN B 13 69.595 2.201 3.697 1.00 0.00 O\nATOM 229 CB GLN B 13 67.971 1.529 6.037 1.00 0.00 C\nATOM 230 CG GLN B 13 69.112 1.331 7.023 1.00 0.00 C\nATOM 231 CD GLN B 13 69.040 -0.003 7.739 1.00 0.00 C\nATOM 232 OE1 GLN B 13 68.046 -0.811 7.388 1.00 0.00 N\nATOM 233 NE2 GLN B 13 69.869 -0.305 8.598 1.00 0.00 O\nATOM 234 N ALA B 14 69.971 4.146 4.765 1.00 0.00 N\nATOM 235 CA ALA B 14 71.218 4.420 4.059 1.00 0.00 C\nATOM 236 C ALA B 14 72.400 4.437 5.022 1.00 0.00 C\nATOM 237 O ALA B 14 72.520 5.332 5.859 1.00 0.00 O\nATOM 238 CB ALA B 14 71.122 5.747 3.320 1.00 0.00 C\nATOM 239 N LEU B 15 73.272 3.441 4.897 1.00 0.00 N\nATOM 240 CA LEU B 15 74.447 3.340 5.755 1.00 0.00 C\nATOM 241 C LEU B 15 75.732 3.468 4.943 1.00 0.00 C\nATOM 242 O LEU B 15 76.068 2.587 4.151 1.00 0.00 O\nATOM 243 CB LEU B 15 74.441 2.005 6.508 1.00 0.00 C\nATOM 244 CG LEU B 15 73.315 1.794 7.526 1.00 0.00 C\nATOM 245 CD1 LEU B 15 72.426 0.619 7.136 1.00 0.00 C\nATOM 246 CD2 LEU B 15 72.491 3.063 7.674 1.00 0.00 C\nATOM 247 N ALA B 16 76.446 4.570 5.146 1.00 0.00 N\nATOM 248 CA ALA B 16 77.695 4.815 4.434 1.00 0.00 C\nATOM 249 C ALA B 16 78.875 4.865 5.399 1.00 0.00 C\nATOM 250 O ALA B 16 78.995 5.790 6.202 1.00 0.00 O\nATOM 251 CB ALA B 16 77.604 6.114 3.646 1.00 0.00 C\nATOM 252 N PHE B 17 79.745 3.864 5.313 1.00 0.00 N\nATOM 253 CA PHE B 17 80.918 3.791 6.177 1.00 0.00 C\nATOM 254 C PHE B 17 82.204 3.886 5.364 1.00 0.00 C\nATOM 255 O PHE B 17 82.540 2.976 4.606 1.00 0.00 O\nATOM 256 CB PHE B 17 80.905 2.487 6.980 1.00 0.00 C\nATOM 257 CG PHE B 17 79.789 2.401 7.981 1.00 0.00 C\nATOM 258 CD1 PHE B 17 79.893 3.032 9.211 1.00 0.00 C\nATOM 259 CD2 PHE B 17 78.636 1.690 7.694 1.00 0.00 C\nATOM 260 CE1 PHE B 17 78.868 2.956 10.134 1.00 0.00 C\nATOM 261 CE2 PHE B 17 77.607 1.611 8.614 1.00 0.00 C\nATOM 262 CZ PHE B 17 77.724 2.244 9.835 1.00 0.00 C\nATOM 263 N ALA B 18 82.921 4.994 5.528 1.00 0.00 N\nATOM 264 CA ALA B 18 84.172 5.209 4.810 1.00 0.00 C\nATOM 265 C ALA B 18 85.350 5.293 5.775 1.00 0.00 C\nATOM 266 O ALA B 18 85.470 6.247 6.543 1.00 0.00 O\nATOM 267 CB ALA B 18 84.086 6.477 3.973 1.00 0.00 C\nATOM 268 N TYR B 19 86.218 4.286 5.729 1.00 0.00 N\nATOM 269 CA TYR B 19 87.388 4.244 6.598 1.00 0.00 C\nATOM 270 C TYR B 19 88.677 4.305 5.785 1.00 0.00 C\nATOM 271 O TYR B 19 89.013 3.367 5.063 1.00 0.00 O\nATOM 272 CB TYR B 19 87.371 2.972 7.450 1.00 0.00 C\nATOM 273 CG TYR B 19 86.263 2.938 8.479 1.00 0.00 C\nATOM 274 CD1 TYR B 19 85.564 4.090 8.814 1.00 0.00 C\nATOM 275 CD2 TYR B 19 85.918 1.753 9.118 1.00 0.00 C\nATOM 276 CE1 TYR B 19 84.550 4.063 9.756 1.00 0.00 C\nATOM 277 CE2 TYR B 19 84.907 1.716 10.059 1.00 0.00 C\nATOM 278 CZ TYR B 19 84.228 2.874 10.374 1.00 0.00 C\nATOM 279 OH TYR B 19 83.220 2.843 11.312 1.00 0.00 O\nATOM 280 N ALA B 20 89.396 5.417 5.909 1.00 0.00 N\nATOM 281 CA ALA B 20 90.649 5.602 5.187 1.00 0.00 C\nATOM 282 C ALA B 20 91.825 5.719 6.151 1.00 0.00 C\nATOM 283 O ALA B 20 91.946 6.701 6.883 1.00 0.00 O\nATOM 284 CB ALA B 20 90.568 6.839 4.303 1.00 0.00 C\nATOM 285 N VAL B 21 92.691 4.710 6.145 1.00 0.00 N\nATOM 286 CA VAL B 21 93.859 4.697 7.018 1.00 0.00 C\nATOM 287 C VAL B 21 95.150 4.725 6.207 1.00 0.00 C\nATOM 288 O VAL B 21 95.485 3.759 5.521 1.00 0.00 O\nATOM 289 CB VAL B 21 93.867 3.449 7.925 1.00 0.00 C\nATOM 290 CG1 VAL B 21 95.105 2.602 7.660 1.00 0.00 C\nATOM 291 CG2 VAL B 21 92.599 2.630 7.720 1.00 0.00 C\nATOM 292 N ALA B 22 95.871 5.839 6.291 1.00 0.00 N\nATOM 293 CA ALA B 22 97.126 5.995 5.565 1.00 0.00 C\nATOM 294 C ALA B 22 98.300 6.145 6.527 1.00 0.00 C\nATOM 295 O ALA B 22 98.421 7.153 7.222 1.00 0.00 O\nATOM 296 CB ALA B 22 97.050 7.197 4.636 1.00 0.00 C\nTER\n \"\"\"\n h = iotbx.pdb.input(source_info=None, lines=test_pdb_5).construct_hierarchy()\n anwer_h = iotbx.pdb.input(source_info=None,\n lines=answer_pdb_str).construct_hierarchy()\n h.write_pdb_file(\"test_8_before.pdb\")\n\n ncs_inp = iotbx.ncs.input(\n hierarchy=h,\n params=ncs_pars.ncs_search)\n ncs_groups = ncs_inp.get_ncs_restraints_group_list()\n\n nu.flip_atoms_in_ncs_groups(h, ncs_groups)\n h.write_pdb_file(\"test_8_result.pdb\")\n rmsd_smart = calculate_rmsd_smart(anwer_h, h)\n print(rmsd_smart)\n assert rmsd_smart < 0.01", "def matchCells(self, dTheta = 4, theta = None, n_max = 4, N = None,\\\n m_max = 4, M = None, max_strain = 1, max_atoms = 5000,\\\n limit = None, exp = 1, verbose = 1, min_angle = 10,\\\n remove_asd = True, asd_tol = 7, limit_asr = False,\\\n asr_tol = 1e-7, asr_iter = 350, asr_strain = \"eps_mas\",\\\n asr_endpoint = \"over\", target = None, favor = \"angle_same\"):\n\n if self.base_1 is None:\n string = \"No base structures exist\"\n ut.infoPrint(string)\n return\n\n \"\"\"Get number of atoms per area (xy) in base cell 1 and 2\"\"\"\n rhoA = self.pos_1.shape[0] / np.abs(np.cross(self.base_1[0:2, 0], self.base_1[0:2, 1]))\n rhoB = self.pos_2.shape[0] / np.abs(np.cross(self.base_2[0:2, 0], self.base_2[0:2, 1]))\n\n \"\"\"Cell rotation angles\"\"\"\n if theta is not None:\n if isinstance(theta, (int, np.integer)):\n angle = np.array([theta])\n else:\n angle = np.array(theta)\n else:\n angle = np.arange(0, 180, dTheta)\n\n if target is not None:\n if type(target) == list: target = np.array(target)\n angle = np.array([0])\n\n \"\"\"Repetions of the first cell vector, [-n_max,...,n_max],\n N takes president as a specific range of repititions\"\"\"\n if N is None:\n nR = np.arange(-n_max, n_max + 1)\n\n \"\"\"Repetions of the second cell vector, [0,...,m_max],\n M takes president as a specific range of repititions\"\"\"\n if M is None:\n mR = np.arange(0, m_max + 1)\n\n \"\"\"Create all permutations of nR and mR if M,N is specifed use only those\"\"\"\n if M is not None and N is not None:\n M = np.array(M)[:, None]; N = np.array(N)[:, None]\n dPerm = np.concatenate((M, N), axis = 1)\n else:\n dPerm = np.mgrid[nR[0]:nR[-1] + 1, mR[0]:mR[-1] + 1].reshape(2, nR.shape[0] * mR.shape[0])\n\n \"\"\"Convert angle to radians\"\"\"\n aRad = np.deg2rad(angle)\n\n \"\"\"Set up a Rotation matrix, move axis to work with shapes (X,2,2)\"\"\"\n R = np.moveaxis(np.array([[np.cos(aRad), -np.sin(aRad)],\n [np.sin(aRad), np.cos(aRad)]]), 2, 0)\n\n \"\"\"Rotate the B cell by the specified angles, e.g. C = R*B\"\"\"\n C = np.matmul(R, self.base_2[0:2, 0:2])\n\n \"\"\"Build all possible cell vectors given the permutations dPerm\n d = C*dPerm each row will be a possible cell vector\"\"\"\n d = np.matmul(C, dPerm)\n\n \"\"\"Express d in basis cell 1, d = A*e, find e -> A(-1)*d = e\"\"\"\n e = np.matmul(np.linalg.inv(self.base_1[0:2, 0:2]), d)\n\n \"\"\"Snap the e vectors to the A grid\"\"\"\n e = np.round(e, 0).astype(int)\n\n \"\"\"If target is supplied the matching is done against\n those specific repetitions. Supplied as a 2x2 matrix \n with basis vectors as columns. The righthanded version\n will be returned\"\"\"\n if target is not None:\n e = np.tile(np.array(target)[None, :, :], (R.shape[0], 1, 1))\n\n \"\"\"Caclculate the new (strained) d vectors (f), f = A * eInt\"\"\"\n f = np.matmul(self.base_1[0:2, 0:2], e)\n\n \"\"\"Create all permutations of the f vectors\"\"\"\n F = np.zeros((angle.shape[0], f.shape[2]**2, 2, 2))\n F[:, :, :, 0] = np.swapaxes(np.tile(f, f.shape[2]), 1, 2)\n F[:, :, :, 1] = np.swapaxes(np.repeat(f, f.shape[2], axis = 2), 1, 2)\n\n \"\"\"Flatten the first 2 dimensions\"\"\"\n F = F.reshape(-1, *F.shape[-2:])\n\n \"\"\"Create all the same permutations of the d vectors\"\"\"\n D = np.zeros((angle.shape[0], d.shape[2]**2, 2, 2))\n D[:, :, :, 0] = np.swapaxes(np.tile(d, d.shape[2]), 1, 2)\n D[:, :, :, 1] = np.swapaxes(np.repeat(d, d.shape[2], axis = 2), 1, 2)\n\n \"\"\"Flatten the first 2 dimensions\"\"\"\n D = D.reshape(-1, *D.shape[-2:])\n\n \"\"\"Create all the same permutations of the eInt vectors\"\"\"\n FRep = np.zeros((angle.shape[0], e.shape[2]**2, 2, 2))\n FRep[:, :, :, 0] = np.swapaxes(np.tile(e, e.shape[2]), 1, 2)\n FRep[:, :, :, 1] = np.swapaxes(np.repeat(e, e.shape[2], axis = 2), 1, 2)\n\n \"\"\"Flatten the first 2 dimensions\"\"\"\n FRep = FRep.reshape(-1, *FRep.shape[-2:])\n\n \"\"\"Create all the same permutations of the dPerm vectors\"\"\"\n dPerm = np.tile(dPerm[np.newaxis, :, :], (angle.shape[0], 1, 1))\n DRep = np.zeros((angle.shape[0], dPerm.shape[2]**2, 2, 2))\n DRep[:, :, :, 0] = np.swapaxes(np.tile(dPerm, dPerm.shape[2]), 1, 2)\n DRep[:, :, :, 1] = np.swapaxes(np.repeat(dPerm, dPerm.shape[2], axis = 2), 1, 2)\n\n \"\"\"Flatten the first 2 dimensions\"\"\"\n DRep = DRep.reshape(-1, *DRep.shape[-2:])\n\n \"\"\"Calculate the area of the F and D cells\"\"\"\n detF = np.linalg.det(F)\n detD = np.linalg.det(D)\n\n \"\"\"Remove all combinations where the determinant is 0 or <0\n i.e. linearly dependent or wrong handed. Do the same for \n the top cell\"\"\"\n keep = (detF > 1e-6) * (detD > 1e-6)\n detF = detF[keep]\n detD = detD[keep]\n\n if verbose > 0:\n string = \"Total basis pairs: %.0f | Lin dep/left handed: %.0f | Total kept: %.0f\"\\\n % (keep.shape[0], keep.shape[0] - np.sum(keep), np.sum(keep))\n ut.infoPrint(string)\n\n \"\"\"Return if no interfaces are found (if a specific match is wronghanded)\"\"\"\n if np.sum(keep) == 0:\n string1 = \"No Interfaces found to be linearly independedt or right handed. \"\n string2 = \"If a specific match is to be constructed swap M and N or target.\"\n ut.infoPrint(string1, sep_after = False)\n ut.infoPrint(string2, sep_before = False)\n return\n\n \"\"\"Remove the lin-dep/left handed combinations before calculating the strain\"\"\"\n F = F[keep]\n D = D[keep]\n FRep = FRep[keep]\n DRep = DRep[keep]\n\n \"\"\"Calculate the strain of the new cell vectors\"\"\"\n eps_11, eps_22, eps_12, eps_mas = ut.calcStrains(F, D)\n\n \"\"\"Create a matching vector with the original rotations\"\"\"\n ang = np.repeat(angle, f.shape[2]**2)\n ang = ang[keep]\n\n \"\"\"Calculate the number of atoms using the area and the area density\"\"\"\n rawAtoms = rhoA * detF + rhoB * detD\n atoms = np.round(rawAtoms)\n\n \"\"\"Check to make sure the calculated nr of atoms are integers, otherwise flag it\"\"\" \n tol = 7\n flag = (atoms != np.round(rawAtoms, tol))\n if np.sum(flag) != 0:\n index = np.arange(atoms.shape[0])[flag]\n string = \"None integer number of atoms calculated for the following interfaces\"\n ut.infoPrint(string, sep_before = False)\n for i in index:\n print(\"Index: %6i | Nr atoms: %14.10f\" % (i, rawAtoms[i]))\n\n \"\"\"Keep only unique entries. Found by checking for unique pairs of\n combinations for bottom and top surfaces\"\"\"\n full = np.zeros((atoms.shape[0], 4 * 2))\n full[:, 0:4] = FRep.reshape(*FRep.shape[0:1], -1)\n full[:, 4:8] = DRep.reshape(*DRep.shape[0:1], -1)\n\n ufi = np.unique(full, axis = 0, return_index = True)[1]\n keep = np.isin(np.arange(atoms.shape[0]), ufi)\n if verbose > 0:\n string = \"Non unique matches: %i | Total matches keept: %i\"\\\n % (atoms.shape[0] - np.sum(keep), np.sum(keep))\n ut.infoPrint(string)\n\n \"\"\"Assign values to class variables\"\"\"\n self.cell_1 = F[keep]\n self.cell_2 = D[keep]\n self.rep_1 = FRep[keep]\n self.rep_2 = DRep[keep]\n self.eps_11 = eps_11[keep]\n self.eps_22 = eps_22[keep]\n self.eps_12 = eps_12[keep]\n self.eps_mas = eps_mas[keep]\n self.atoms = atoms[keep]\n self.ang = ang[keep]\n self.e_int_c = np.zeros((self.atoms.shape[0], 1))\n self.w_sep_c = np.zeros((self.atoms.shape[0], 1))\n self.w_seps_c = np.zeros((self.atoms.shape[0], 1))\n self.e_int_d = np.zeros((self.atoms.shape[0], 1))\n self.w_sep_d = np.zeros((self.atoms.shape[0], 1))\n self.w_seps_d = np.zeros((self.atoms.shape[0], 1))\n self.order = np.arange(self.atoms.shape[0]) \n\n \"\"\"Further removal of interfaces based on specified critera follows below\"\"\"\n\n \"\"\"Reject interfaces based on criteria of strain * atoms^exp > limit\"\"\"\n if limit is not None:\n keep = ((self.eps_mas * (self.atoms ** exp)) < limit)\n ratio = np.sum(np.logical_not(keep))\n if verbose > 0:\n string = \"Matches with (strain * atoms^%s) > %s: %i | Total matches kept: %i\"\\\n % (exp, limit, ratio, np.sum(keep))\n ut.infoPrint(string)\n\n \"\"\"Remove interfaces with strain*atoms^exp > limit\"\"\"\n self.deleteInterfaces(keep, verbose = verbose - 1)\n\n \"\"\"Remove cells with to narrow cell angles, defined below\"\"\"\n ang_lim = np.deg2rad(min_angle)\n ang_1 = self.getBaseAngles(cell = 1)\n ang_2 = self.getBaseAngles(cell = 2)\n\n keep = (ang_1 > ang_lim) * (ang_1 < np.pi - ang_lim) *\\\n (ang_2 > ang_lim) * (ang_2 < np.pi - ang_lim)\n\n max_angle = np.sum(np.logical_not(keep))\n if verbose > 0:\n string = \"Cell angle outside limit (%.1f<X<%.1f): %i | Total kept: %i\"\\\n % (np.rad2deg(ang_lim), np.rad2deg(np.pi - ang_lim), max_angle, np.sum(keep))\n ut.infoPrint(string)\n\n \"\"\"Remove interfaces with angles outside specified range\"\"\"\n self.deleteInterfaces(keep, verbose = verbose - 1)\n\n \"\"\"Remove matches were any strain component is > max_strain\"\"\"\n keep = (np.abs(self.eps_11) < max_strain) *\\\n (np.abs(self.eps_22) < max_strain) *\\\n (np.abs(self.eps_12) < max_strain)\n\n max_strain = np.sum(np.logical_not(keep))\n if verbose > 0:\n string = \"Matches above max strain: %i | Total matches kept: %i\"\\\n % (max_strain, np.sum(keep))\n ut.infoPrint(string)\n\n \"\"\"Remove interfaces with abs(strains) above max_strain\"\"\"\n self.deleteInterfaces(keep, verbose = verbose - 1)\n\n \"\"\"Remove matches with the number of atoms > max_atoms\"\"\"\n keep = (self.atoms < max_atoms)\n max_atoms = np.sum(np.logical_not(keep))\n if verbose > 0:\n string = \"Matches with to many atoms: %i | Total matches kept: %i\"\\\n % (max_atoms, np.sum(keep))\n ut.infoPrint(string)\n\n \"\"\"Remove interfaces with more atoms than max_atoms\"\"\"\n self.deleteInterfaces(keep, verbose = verbose - 1)\n\n \"\"\"Find duplicates in the combo (nr_atoms, eps_mas) if specified\"\"\"\n if remove_asd:\n keep = self.getAtomStrainDuplicates(tol_mag = asd_tol, verbose = verbose, sort = favor)\n self.deleteInterfaces(keep, verbose = verbose - 1)\n\n if verbose > 0:\n string = \"Duplicate atoms/strain combinations: %i | Total matches kept: %i\"\\\n % (np.sum(np.logical_not(keep)), np.sum(keep))\n ut.infoPrint(string)\n\n \"\"\"Interfaces with |strains| < tol are slightly perturbed to avoid issues with log expressions\"\"\"\n tol = 1e-7\n exact_matches = np.abs(self.eps_mas) < tol\n self.eps_11[np.abs(self.eps_11) < tol] = tol\n self.eps_22[np.abs(self.eps_22) < tol] = tol\n self.eps_12[np.abs(self.eps_12) < tol] = tol\n self.eps_mas[np.abs(self.eps_mas) < tol] = tol\n if np.sum(exact_matches) > 0:\n string = \"Exact matches found: %i\" % np.sum(exact_matches)\n ut.infoPrint(string)\n\n \"\"\"Remove interfaces based on atom strain ratios, limiting the set to this number\"\"\"\n if limit_asr and self.atoms.shape[0] > 2:\n self.removeByAtomStrain(keep = limit_asr, tol = asr_tol, max_iter = asr_iter,\\\n strain = asr_strain, endpoint = asr_endpoint,\\\n verbose = verbose)\n\n \"\"\"Sort the interfaces based on number of atoms and reset the base order parameter\"\"\"\n self.sortInterfaces(sort = \"atoms\")\n self.setOrder(verbose = verbose)", "def _angle_of_attack(self, rel_wind, blade_chord):\n # blade_chord_vector - (relative_wind + pi)\n # rel_oposite = rel_wind.rotated(math.pi)\n aoa_rad = rel_wind.theta - blade_chord.theta\n aoa_rad = vec.normalize_angle(aoa_rad)\n aoa_360 = aoa_rad * 360 / math.tau\n return aoa_rad, aoa_360", "def find_all_ORFs_both_strands(dna):\n \n #reverse first \n #then take compliment \n #then find ORF \n reverseStrand = [] \n original = [] \n new = []\n reverseStrand.append(get_reverse_complement(dna)) \n original.append(find_all_ORFs(dna))\n new.append(find_all_ORFs(reverseStrand))\n return original + new", "def _get_bonded_h_via_distance(array, donor_mask, box):\n CUTOFF = 1.5\n\n coord = array.coord\n res_id = array.res_id\n hydrogen_mask = (array.element == \"H\")\n \n donor_hydrogen_mask = np.zeros(len(array), dtype=bool)\n associated_donor_indices = np.full(len(array), -1, dtype=int)\n\n donor_indices = np.where(donor_mask)[0]\n for donor_i in donor_indices:\n candidate_mask = hydrogen_mask & (res_id == res_id[donor_i])\n distances = distance(\n coord[donor_i], coord[candidate_mask], box=box\n )\n donor_h_indices = np.where(candidate_mask)[0][distances <= CUTOFF]\n for i in donor_h_indices:\n associated_donor_indices[i] = donor_i\n donor_hydrogen_mask[i] = True\n \n return donor_hydrogen_mask, associated_donor_indices", "def getChemAngles(self):\n dataDict = self.__dict__\n atomsIncluded = self.chemAtoms.issuperset\n result = frozenset(xx for xx in self.chemComp.chemAngles if atomsIncluded(xx.chemAtoms))\n return result", "def V_angles(atoms):\n \n Va = 0 # this is the variable we will store the sum of all the energies in\n N = len(atoms)\n for i in range(len(atoms)):\n j = (i+1) % N\n k = (i-1) % N\n x_ij = atoms.coords[j] - atoms.coords[i] # vector from atom i to j\n x_ik = atoms.coords[k] - atoms.coords[i] # vector from atom i to k\n theta = np.arccos(np.dot(x_ij, x_ik)/(norm(x_ij)*norm(x_ik))) # angle between the above two\n \n Va += (theta - TH0)**2\n \n return Va", "def test_set_rdkit_dihedrals(self):\n xyz0 = converter.str_to_xyz(\"\"\"O 1.17961475 -0.92725986 0.15472373\nC 0.45858928 0.27919340 -0.04589251\nC -1.02470597 -0.01894626 0.00226686\nH 0.73480842 0.69726202 -1.01850832\nH 0.73330833 0.98882191 0.74024781\nH -1.29861662 -0.45953441 0.96660817\nH -1.29713649 -0.74721756 -0.76877222\nH -1.61116041 0.89155300 -0.14917209\nH 2.12529871 -0.70387223 0.11849858\"\"\")\n spc0 = ARCSpecies(label='CCO', smiles='CCO', xyz=xyz0) # define with xyz for consistent atom order\n mol0 = spc0.mol\n\n torsion0 = (3, 2, 1, 9) # the OH rotor\n torsion0_list = [tor - 1 for tor in torsion0]\n new_dihedral = -60\n deg_increment = 240 # -180 + 240 = +60\n\n conf, rd_mol = converter.rdkit_conf_from_mol(mol0, xyz0)\n new_xyz1 = converter.set_rdkit_dihedrals(conf, rd_mol, torsion0_list, deg_abs=new_dihedral)\n\n conf, rd_mol = converter.rdkit_conf_from_mol(mol0, xyz0) # convert again to init the conf object\n new_xyz2 = converter.set_rdkit_dihedrals(conf, rd_mol, torsion0_list, deg_increment=deg_increment)\n\n expected_xyz1 = \"\"\"O 1.17961475 -0.92725986 0.15472373\nC 0.45858928 0.27919340 -0.04589251\nC -1.02470597 -0.01894626 0.00226686\nH 0.73480842 0.69726202 -1.01850832\nH 0.73330833 0.98882191 0.74024781\nH -1.29861662 -0.45953441 0.96660817\nH -1.29713649 -0.74721756 -0.76877222\nH -1.61116041 0.89155300 -0.14917209\nH 0.92345327 -1.27098714 1.02751540\n\"\"\"\n expected_xyz2 = \"\"\"O 1.17961475 -0.92725986 0.15472373\nC 0.45858928 0.27919340 -0.04589251\nC -1.02470597 -0.01894626 0.00226686\nH 0.73480842 0.69726202 -1.01850832\nH 0.73330833 0.98882191 0.74024781\nH -1.29861662 -0.45953441 0.96660817\nH -1.29713649 -0.74721756 -0.76877222\nH -1.61116041 0.89155300 -0.14917209\nH 0.92480849 -1.53430645 -0.56088835\n\"\"\"\n\n self.assertTrue(almost_equal_coords_lists(new_xyz1, converter.str_to_xyz(expected_xyz1)))\n self.assertTrue(almost_equal_coords_lists(new_xyz2, converter.str_to_xyz(expected_xyz2)))\n\n xyz1 = converter.str_to_xyz(\"\"\"N -0.29070308 0.26322835 0.48770927\nN 0.29070351 -0.26323281 -0.48771096\nN -2.61741263 1.38275080 2.63428181\nN 2.61742270 -1.38276006 -2.63427425\nC -1.77086206 0.18100754 0.43957605\nC 1.77086254 -0.18101028 -0.43957552\nC -2.22486176 -1.28143567 0.45202312\nC -2.30707039 0.92407663 -0.78734681\nC 2.30707074 -0.92407071 0.78735246\nC 2.22485929 1.28143406 -0.45203080\nC -2.23868798 0.85547218 1.67084736\nC 2.23869247 -0.85548109 -1.67084185\nH -1.90398693 -1.81060764 -0.45229645\nH -3.31681639 -1.35858536 0.51240600\nH -1.80714051 -1.81980551 1.31137107\nH -3.40300863 0.95379538 -0.78701415\nH -1.98806037 0.44494681 -1.71978670\nH -1.94802915 1.96005927 -0.81269573\nH 1.98805486 -0.44493850 1.71978893\nH 1.94803425 -1.96005464 0.81270509\nH 3.40300902 -0.95378386 0.78702431\nH 1.90398036 1.81061002 0.45228426\nH 3.31681405 1.35858667 -0.51241516\nH 1.80713611 1.81979843 -1.31138136\"\"\")\n spc1 = ARCSpecies(label='AIBN', smiles='CC(C)(C#N)/N=N/C(C)(C)C#N', xyz=xyz1)\n mol1 = spc1.mol\n\n torsion1 = (1, 2, 6, 9)\n torsion1_list = [tor - 1 for tor in torsion1]\n new_dihedral = 118.2\n\n conf, rd_mol = converter.rdkit_conf_from_mol(mol1, xyz1)\n new_xyz3 = converter.set_rdkit_dihedrals(conf, rd_mol, torsion1_list, deg_abs=new_dihedral)\n\n expected_xyz3 = \"\"\"N -0.29070308 0.26322835 0.48770927\nN 0.29070351 -0.26323281 -0.48771096\nN -2.61741263 1.38275080 2.63428181\nN 2.48573367 1.01638899 -2.68295766\nC -1.77086206 0.18100754 0.43957605\nC 1.77086254 -0.18101028 -0.43957552\nC -2.22486176 -1.28143567 0.45202312\nC -2.30707039 0.92407663 -0.78734681\nC 2.38216062 -1.58430507 -0.39387342\nC 2.21983062 0.66527087 0.75509913\nC -2.23868798 0.85547218 1.67084736\nC 2.16482620 0.49023713 -1.69815092\nH -1.90398693 -1.81060764 -0.45229645\nH -3.31681639 -1.35858536 0.51240600\nH -1.80714051 -1.81980551 1.31137107\nH -3.40300863 0.95379538 -0.78701415\nH -1.98806037 0.44494681 -1.71978670\nH -1.94802915 1.96005927 -0.81269573\nH 2.11909310 -2.10839740 0.53181512\nH 2.02775663 -2.19945525 -1.22981644\nH 3.47613291 -1.54390687 -0.45350823\nH 1.95308217 0.19222185 1.70685860\nH 3.30593713 0.81467275 0.75113509\nH 1.74954927 1.65592664 0.73932447\n\"\"\"\n\n self.assertTrue(almost_equal_coords_lists(new_xyz3, converter.str_to_xyz(expected_xyz3)))\n\n rd_conf, rd_mol = converter.rdkit_conf_from_mol(mol1, converter.str_to_xyz(expected_xyz3))\n angle = rdMT.GetDihedralDeg(rd_conf, torsion1_list[0], torsion1_list[1], torsion1_list[2], torsion1_list[3])\n\n self.assertAlmostEqual(angle, 118.2, 5)\n\n xyz4 = \"\"\"O 1.28706525 0.52121353 0.04219198\nC 0.39745682 -0.35265044 -0.63649234\nC -0.98541845 0.26289370 -0.64801959\nH 0.76016885 -0.50111637 -1.65799025\nH 0.38478504 -1.31559717 -0.11722981\nH -0.96971239 1.23774091 -1.14654347\nH -1.69760597 -0.38642828 -1.16478035\nH -1.34010718 0.43408610 0.37373771\nH 2.16336803 0.09985803 0.03295192\"\"\"\n spc4 = ARCSpecies(label='ethanol', smiles='CCO', xyz=xyz4)\n rd_conf, rd_mol = converter.rdkit_conf_from_mol(mol=spc4.mol, xyz=converter.str_to_xyz(xyz4))\n torsion4 = [9, 1, 2, 3]\n torsion4_list = [tor - 1 for tor in torsion4]\n new_xyz4 = converter.set_rdkit_dihedrals(rd_conf, rd_mol, torsion4_list, deg_abs=60)\n expected_xyz4 = \"\"\"O 1.28706525 0.52121353 0.04219198\nC 0.39745682 -0.35265044 -0.63649234\nC 0.36441173 -1.68197093 0.08682400\nH -0.59818222 0.10068325 -0.65235399\nH 0.74799641 -0.48357798 -1.66461710\nH 0.03647269 -1.54932006 1.12314420\nH -0.31340646 -2.38081353 -0.41122551\nH 1.36475837 -2.12581592 0.12433596\nH 2.16336803 0.09985803 0.03295192\"\"\"\n self.assertEqual(converter.xyz_to_str(new_xyz4), expected_xyz4)", "def transform_angle_by_quadrant(self, initial_angle, x_diff, y_diff):\n\t\tif x_diff > 0 and y_diff > 0:\n\t\t\tprint(\"p1 in quadrant: {}\".format(1))\n\t\t\t# Point B in quadrant 1..\n\t\t\treturn degrees(initial_angle)\n\t\telif x_diff < 0 and y_diff > 0:\n\t\t\tprint(\"p1 in quadrant: {}\".format(2))\n\t\t\t# Point B in quadrant 2..\n\t\t\treturn 180 - degrees(initial_angle)\n\t\telif x_diff < 0 and y_diff < 0:\n\t\t\tprint(\"p1 in quadrant: {}\".format(3))\n\t\t\t# Point B in quadrant 3..\n\t\t\treturn 180 + degrees(initial_angle)\n\t\telif x_diff > 0 and y_diff < 0:\n\t\t\tprint(\"p1 in quadrant: {}\".format(4))\n\t\t\t# Point B in quadrant 4..\n\t\t\treturn 360 - degrees(initial_angle)\n\t\telse:\n\t\t\traise \"Error occurred in basic_drive_3/transform_angle_by_quadrant func..\"", "def arc_tangents(amount, start, stop, truncated, sequence):\n ratio = (start + stop) / 2\n for x in range(start, amount):\n y = abs(round(ratio * math.atan(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def get_excluded_pairs(self, max_exclusion = 3):\n\n excluded_pairs = []\n\n # construct a matrix of size n by n where n is the number of atoms in this fragment\n # a value of 1 in row a and column b means that atom a and b are bonded\n connectivity_matrix = [[0 for k in range(self.get_num_atoms())] for i in range(self.get_num_atoms())]\n\n # loop over each pair of atoms\n for index1, atom1 in enumerate(self.get_atoms()):\n for index2, atom2 in enumerate(self.get_atoms()[index1 + 1:]):\n index2 += index1 + 1\n\n # if these atoms are bonded, set their values in the connectivity matrix to 1.\n if atom1.is_bonded(atom2):\n connectivity_matrix[index1][index2] = 1\n connectivity_matrix[index2][index1] = 1\n\n # current matrix represents connectivity_matrix^x where x is the same as as in the excluded_1x pairs we are currently generating\n current_matrix = connectivity_matrix\n\n excluded_pairs_12 = set()\n\n # loop over each pair of atoms\n for index1, atom1 in enumerate(self.get_atoms()):\n for index2, atom2 in enumerate(self.get_atoms()[index1 + 1:]):\n index2 += index1 + 1\n\n # if the value in the current matrix is at least 1, then these atoms are 1 bond apart, and are added to the excluded_pairs_12 list.\n if current_matrix[index1][index2] > 0:\n excluded_pairs_12.add((index1, index2))\n\n # add the excluded_pairs_12 to the list of all excluded pairs\n excluded_pairs.append(excluded_pairs_12)\n\n for i in range(max_exclusion - 1):\n\n # current matrix is multiplied by connectivity_matrix so that each iteration of the loop current_matrix = connectivity_matrix^(i + 1)\n current_matrix = numpy.matmul(current_matrix, connectivity_matrix)\n\n excluded_pairs_1x = set()\n\n # loop over each pair of atoms\n for index1, atom1 in enumerate(self.get_atoms()):\n for index2, atom2 in enumerate(self.get_atoms()[index1 + 1:]):\n index2 += index1 + 1\n\n # if the value in the connectivity matrix is at least 1, then these atoms are x bonds apart, and are added to the excluded_pairs_1x list.\n if current_matrix[index1][index2] > 0:\n excluded_pairs_1x.add((index1, index2))\n\n # filter out all terms inside other excluded lists from the new excluded list\n for excluded_pairs_1y in excluded_pairs:\n excluded_pairs_1x -= excluded_pairs_1y\n\n # add the excluded_pairs_1x to the list of all excluded pairs\n excluded_pairs.append(excluded_pairs_1x)\n\n return [[list(pair) for pair in excluded_pairs_1x] for excluded_pairs_1x in excluded_pairs]", "def test_rotamer_library_builder(self):\n LIGAND_PATH = 'ligands/oleic_acid.pdb'\n\n ligand_path = get_data_file_path(LIGAND_PATH)\n molecule = Molecule(ligand_path, exclude_terminal_rotamers=False)\n\n # rotamer_library = RotamerLibrary(molecule)\n\n rotamers_per_branch = molecule.rotamers\n\n assert len(rotamers_per_branch) == 2, \"Found an invalid number \" + \\\n \"of branches: {}\".format(len(rotamers_per_branch))\n\n atom_list_1 = list()\n atom_list_2 = list()\n\n rotamers = rotamers_per_branch[0]\n for rotamer in rotamers:\n atom_list_1.append(set([rotamer.index1, rotamer.index2]))\n\n rotamers = rotamers_per_branch[1]\n for rotamer in rotamers:\n atom_list_2.append(set([rotamer.index1, rotamer.index2]))\n\n EXPECTED_INDICES_1 = [set([9, 10]), set([8, 9]), set([7, 8]),\n set([6, 7]), set([5, 6]), set([2, 5]),\n set([0, 2]), set([0, 1])]\n\n EXPECTED_INDICES_2 = [set([12, 11]), set([12, 13]), set([13, 14]),\n set([14, 15]), set([15, 16]), set([16, 17]),\n set([17, 18]), set([18, 19])]\n\n where_1 = list()\n for atom_pair in atom_list_1:\n if atom_pair in EXPECTED_INDICES_1:\n where_1.append(1)\n elif atom_pair in EXPECTED_INDICES_2:\n where_1.append(2)\n else:\n where_1.append(0)\n\n where_2 = list()\n for atom_pair in atom_list_2:\n if atom_pair in EXPECTED_INDICES_1:\n where_2.append(1)\n elif atom_pair in EXPECTED_INDICES_2:\n where_2.append(2)\n else:\n where_2.append(0)\n\n assert (all(i == 1 for i in where_1)\n and all(i == 2 for i in where_2)) or \\\n (all(i == 2 for i in where_1)\n and all(i == 1 for i in where_2)), \"Invalid rotamer library \" + \\\n \"{}, {}\".format(where_1, where_2)\n\n assert (all(i == 1 for i in where_1)\n and all(i == 2 for i in where_2)\n and len(where_1) == len(EXPECTED_INDICES_1)\n and len(where_2) == len(EXPECTED_INDICES_2)) or \\\n (all(i == 2 for i in where_1)\n and all(i == 1 for i in where_2)\n and len(where_1) == len(EXPECTED_INDICES_2)\n and len(where_2) == len(EXPECTED_INDICES_1)), \"Unexpected \" + \\\n \"number of rotamers\"", "def adjustToKnownAngle(self, paths):\n for seg in paths:\n a = seg.tempAngle()\n i = (abs(vec_in_mPi_pPi(knownAngle - a) )).argmin()\n seg.newAngle = knownAngle[i]\n debug( ' Known angle ', seg, seg.tempAngle(),' -> ', knownAngle[i]) \n ## if abs(knownAngle[i] - a) < 0.08:", "def BraggAngle(ID,hkl,E=None):\n E = eV(E)\n d = dSpace(ID,hkl)\n theta = asind(lam(E)/2/d)\n return theta", "def nearby():\n for i in ids:\n for j in ids:\n if i != j:\n if sum([1 for x,y in zip(i,j) if x!=y]) == 1:\n print(\"\".join([x for x,y in zip(i,j) if x==y]))\n return", "def _angle(u, v, w, d='+'):\n vu = np.arctan2(u[1] - v[1], u[0] - v[0])\n vw = np.arctan2(w[1] - v[1], w[0] - v[0])\n phi = vw - vu\n if phi < 0:\n phi += 2 * np.pi\n if d == '-':\n phi = 2 * np.pi - phi\n return np.round(phi, 6)", "def getAngleIndices(self):\n coord_types, atom_indices = self.force_field.getInternalCoordinateDefinitions()\n angle_indices = np.where((coord_types == 'A') | (coord_types == 'D') | (coord_types == 'I'))[0]\n return angle_indices", "def bond_atoms(atom_list):\n pass", "def adjustToNewAngle(self):\n\n self.a,self.b,self.c = parametersFromPointAngle( 0.5*(self.point1+self.pointN), self.newAngle)\n\n #print 'adjustToNewAngle ', self, self.angle, self.newAngle\n self.angle = self.newAngle\n self.normalv = numpy.array( [ self.a, self.b ])\n self.unitv = numpy.array( [ self.b, -self.a ])\n if abs(self.angle) > numpy.pi/2 :\n if self.b > 0: self.unitv *= -1\n elif self.b<0 : self.unitv *= -1\n\n self.point1 = self.projectPoint(self.point1) # reset point1 \n if self.next is None or not self.next.isSegment():\n # move the last point (no intersect with next)\n\n pN = self.projectPoint(self.pointN)\n dirN = pN - self.point1 \n lN = length(pN, self.point1)\n self.pointN = dirN/lN*self.length + self.point1\n #print ' ... adjusting last seg angle ',p.dump() , ' normalv=', p.normalv, 'unitv ', p.unitv\n else:\n self.setIntersectWithNext()", "def delta_angle(self, last):\n if last is None:\n return self.a0\n return self.signed_angle(last.straight(1, 1).v, self.straight(1, 0).v)", "def calculate_distance_edge(self):\n mu_star = -np.sqrt(1. - (self.cell_xl / self.x)**2)\n\n if self.mu <= mu_star:\n\n l_edge = (-self.mu * self.x -\n np.sqrt(self.mu**2 * self.x**2 -\n self.x**2 + self.cell_xl**2))\n self.next_cell_index = self.cell_index - 1\n\n else:\n\n l_edge = (-self.mu * self.x +\n np.sqrt(self.mu**2 * self.x**2 -\n self.x**2 + self.cell_xr**2))\n self.next_cell_index = self.cell_index + 1\n\n return l_edge", "def find_reference_radials(azi, vel, debug=False):\n pos_valid = get_valid_rays(vel)\n pos_static = get_static_rays(vel)\n\n # Finding intersects of criteria 1 to 3.\n weight_valid = np.arange(0, len(pos_valid), 1)\n weight_static = np.arange(0, len(pos_static), 1)\n\n total_weight = np.zeros(len(pos_valid)) + np.NaN\n for cnt, (one_valid, one_valid_weight) in enumerate(zip(pos_valid, weight_valid)):\n try:\n one_static_weight = weight_static[one_valid == pos_static][0]\n except IndexError:\n one_static_weight = 9999\n\n total_weight[cnt] = one_static_weight + one_valid_weight\n\n pos1 = pos_valid[np.argmin(total_weight)]\n\n# # Finding the 2nd radial of reference\n# pos2 = pos1 + len(azi) // 2\n# if pos2 >= len(azi):\n# pos2 -= len(azi)\n\n try:\n ref2_range_min, ref2_range_max = get_opposite_azimuth(azi[pos1])\n if ref2_range_min < ref2_range_max:\n goodpos = np.where((azi >= ref2_range_min) & (azi <= ref2_range_max))[0]\n else:\n goodpos = np.where((azi >= ref2_range_min) | (azi <= ref2_range_max))[0]\n\n rslt = [(a, total_weight[a == pos_valid][0]) for a in goodpos if a in pos_valid]\n opposite_pos, opposite_weight = zip(*rslt)\n pos2 = opposite_pos[np.argmin(opposite_weight)]\n except Exception:\n pos2 = pos1 + len(azi) // 2\n if pos2 > len(azi):\n pos2 -= len(azi)\n if debug:\n print(f\"References are azimuths {azi[pos1]} and {azi[pos2]}, i.e. azimuthal positions {pos1} and {pos2}.\")\n\n return pos1, pos2", "def GetBonds(Bonds):\n b = sorted([(min(x), max(x)) for x in Bonds])\n Bonds13, Bonds14 = [], []\n for (a1,b1) in b:\n #check for bonds with a1 at the center of a 1-3 interaction,\n #letting b1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == a1 and b2 < b1] + \\\n [a2 for (a2,b2) in b if b2 == a1 and a2 < b1]\n Bonds13.extend([(min(c,b1), max(c,b1)) for c in clist])\n #check for bonds with b1 at the center of a 1-3 interaction,\n #letting a1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == b1 and b2 < a1] + \\\n [a2 for (a2,b2) in b if b2 == b1 and a2 < a1]\n Bonds13.extend([(min(c,a1), max(c,a1)) for c in clist])\n #find atoms connected to a1\n clist = [b2 for (a2,b2) in b if a1==a2 and not b1==b2] +\\\n [a2 for (a2,b2) in b if a1==b2 and not b1==a2]\n #find atoms connected to b1\n dlist = [a2 for (a2,b2) in b if b1==b2 and not a1==a2] +\\\n [b2 for (a2,b2) in b if b1==a2 and not a1==b2]\n Bonds14.extend([(min(c,d), max(c,d)) for c in clist for d in dlist])\n Bonds1213 = b + Bonds13\n #sort\n Bonds1213.sort()\n Bonds14.sort()\n #get unique values in case of loops\n Bonds1213 = [x for (i,x) in enumerate(Bonds1213) if i == 0 or x != Bonds1213[i-1]]\n Bonds14 = [x for (i,x) in enumerate(Bonds14) if i == 0 or x != Bonds14[i-1]]\n #convert to arrays \n Bonds1213 = array(Bonds1213, int)\n Bonds14 = array(Bonds14, int)\n return Bonds1213, Bonds14", "def addRevArcSeg(self, x1, y1, x2, y2, cx, cy, arcDir):\n a = x1 - cx\n b = y1 - cy\n r = sqrt(a*a + b*b)\n arc = Arc.fromVectors(QVector2D(a, b),\n QVector2D(x2 - cx, y2 - cy),\n r,\n arcDir == 'cclw')\n # TODO: By halving the mesh segs ( * 0.5), fewer triangles are\n # created. Shading is ok but arc edges look blocky.\n # angstep = 360.0 / (self._mesh.segs * 0.5)\n angstep = 360.0 / self._mesh.segs\n # minimum 2 segments in the arc\n segs = max(int(abs(arc.span()) / angstep), 3)\n step = arc.span() / segs\n sa = arc.startAngle()\n a1 = radians(sa)\n sa1 = sin(a1)\n ca1 = cos(a1)\n for i in range(1, segs):\n a2 = radians(sa + step * i)\n sa2 = sin(a2)\n ca2 = cos(a2)\n x1 = cx + r * ca1\n y1 = cy + r * sa1\n x2 = cx + r * ca2\n y2 = cy + r * sa2\n self.addRevLineSeg(x1, y1, x2, y2)\n a1 = a2\n sa1 = sa2\n ca1 = ca2\n if i == 1:\n # only blend the first strip\n self._mesh.blendTangent(False)\n # last strip\n else:\n a2 = radians(arc.endAngle())\n x1 = cx + r * ca1\n y1 = cy + r * sa1\n x2 = cx + r * cos(a2)\n y2 = cy + r * sin(a2)\n self.addRevLineSeg(x1, y1, x2, y2)", "def Oneside( x, y0, y1, r):\n\n true = 1\n size_x = np.shape( x )\n if not size_x: size_x = [0]\n\n if size_x[ 0 ] == 0:\n if x == 0: return x\n elif abs( x ) >= r: return Arc( x, y0, y1, r )\n yh = sqrt( r*r - x*x )\n if ( y0 <= -yh ):\n if ( y1 <= -yh ) : return Arc( x, y0, y1, r )\n elif ( y1 <= yh ) : return Arc( x, y0, -yh, r ) \\\n + Chord( x, -yh, y1 )\n else : return Arc( x, y0, -yh, r ) \\\n + Chord( x, -yh, yh ) + Arc( x, yh, y1, r )\n \n elif ( y0 < yh ):\n if ( y1 <= -yh ) : return Chord( x, y0, -yh ) \\\n + Arc( x, -yh, y1, r )\n elif ( y1 <= yh ) : return Chord( x, y0, y1 )\n else : return Chord( x, y0, yh ) + Arc( x, yh, y1, r )\n\n else :\n if ( y1 <= -yh ) : return Arc( x, y0, yh, r ) \\\n + Chord( x, yh, -yh ) + Arc( x, -yh, y1, r )\n elif ( y1 <= yh ) : return Arc( x, y0, yh, r ) + Chord( x, yh, y1 )\n else : return Arc( x, y0, y1, r )\n\n else :\n ans2 = x\n t0 = where( x == 0)[0]\n count = len(t0)\n if count == len( x ): return ans2\n\n ans = x * 0\n yh = x * 0\n to = where( abs( x ) >= r)[0]\n tocount = len(to)\n ti = where( abs( x ) < r)[0]\n ticount = len(ti)\n if tocount != 0: ans[ to ] = Arc( x[to], y0[to], y1[to], r )\n if ticount == 0: return ans\n \n yh[ ti ] = sqrt( r*r - x[ti]*x[ti] )\n \n t1 = where( np.less_equal(y0[ti],-yh[ti]) )[0]\n count = len(t1)\n if count != 0:\n i = ti[ t1 ]\n\n t2 = where( np.less_equal(y1[i],-yh[i]))[0]\n count = len(t2)\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Arc( x[j], y0[j], y1[j], r )\n\n t2 = where( ( greater(y1[i],-yh[i]) ) &\n ( less_equal(y1[i],yh[i]) ))[0]\n count = len(t2)\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Arc( x[j], y0[j], -yh[j], r ) \\\n + Chord( x[j], -yh[j], y1[j] )\n\n t2 = where( greater(y1[i], yh[i]) )[0]\n count = len(t2)\n\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Arc( x[j], y0[j], -yh[j], r ) \\\n + Chord( x[j], -yh[j], yh[j] ) \\\n + Arc( x[j], yh[j], y1[j], r )\n \n t1 = where( ( greater(y0[ti],-yh[ti]) ) & \n ( less(y0[ti],yh[ti]) ))[0] \n count = len(t1)\n if count != 0:\n i = ti[ t1 ]\n\n t2 = where( np.less_equal(y1[i],-yh[i]))[0]\n count = len(t2)\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Chord( x[j], y0[j], -yh[j] ) \\\n + Arc( x[j], -yh[j], y1[j], r )\n \n\n t2 = where( ( greater(y1[i], -yh[i]) ) & \n ( less_equal(y1[i], yh[i]) ))[0]\n count = len(t2)\n\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Chord( x[j], y0[j], y1[j] )\n\n t2 = where( greater(y1[i], yh[i]))[0]\n count = len(t2)\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Chord( x[j], y0[j], yh[j] ) \\\n + Arc( x[j], yh[j], y1[j], r )\n\n t1 = where( greater_equal(y0[ti], yh[ti]))[0] \n count = len(t1)\n if count != 0:\n i = ti[ t1 ]\n\n t2 = where ( np.less_equal(y1[i], -yh[i]))[0] \n count = len(t2)\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Arc( x[j], y0[j], yh[j], r ) \\\n + Chord( x[j], yh[j], -yh[j] ) \\\n + Arc( x[j], -yh[j], y1[j], r )\n\n t2 = where( ( greater(y1[i], -yh[i]) ) & \n ( less_equal(y1[i], yh[i]) ))[0]\n count = len(t2)\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Arc( x[j], y0[j], yh[j], r ) \\\n + Chord( x[j], yh[j], y1[j] )\n\n t2 = where( greater(y1[i], yh[i]))[0]\n count = len(t2)\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Arc( x[j], y0[j], y1[j], r )\n\n return ans", "def _adjust_refraction_to_direction_of_incidence(boundary_angle: float, new_angle: float,\n trajectory: TrajectoryBase) -> float:\n angle = trajectory.center.angle\n assert -math.pi / 2 <= boundary_angle <= math.pi / 2, \"boundary_angle should be in first or fourth quadrant\"\n # noinspection PyChainedComparisons\n if boundary_angle >= 0 and boundary_angle < angle % (2 * math.pi) < boundary_angle + math.pi:\n new_angle = math.pi - new_angle\n elif (boundary_angle < 0 and\n boundary_angle % (2 * math.pi) + math.pi < angle % (2 * math.pi) < boundary_angle % (\n 2 * math.pi)):\n new_angle = math.pi - new_angle\n return new_angle", "def checkForCircle(self, points, tangents):\n if len(points)<10:\n return False, 0\n\n if all(points[0]==points[-1]): # last exactly equals the first.\n # Ignore last point for this check\n points = points[:-1]\n tangents = tangents[:-1]\n #print 'Removed last ', points\n xmin,ymin, w, h = computeBox( points)\n diag2=(w*w+h*h)\n \n diag = sqrt(diag2)*0.5\n norms = numpy.sqrt(numpy.sum( tangents**2, 1 ))\n\n angles = numpy.arctan2( tangents[:,1], tangents[:,0] ) \n #debug( 'angle = ', repr(angles))\n N = len(angles)\n \n deltas = points[1:] - points[:-1] \n deltasD = numpy.concatenate([ [D(points[0],points[-1])/diag], numpy.sqrt(numpy.sum( deltas**2, 1 )) / diag] )\n\n # locate and avoid the point when swicthing\n # from -pi to +pi. The point is around the minimum\n imin = numpy.argmin(angles)\n debug(' imin ',imin)\n angles = numpy.roll(angles, -imin)\n deltasD = numpy.roll(deltasD, -imin)\n n=int(N*0.1)\n # avoid fluctuations by removing points around the min\n angles=angles[n:-n]\n deltasD=deltasD[n:-n]\n deltasD = deltasD.cumsum()\n N = len(angles)\n\n # smooth angles to avoid artificial bumps\n angles = smoothArray(angles, n=max(int(N*0.03),2) )\n\n deltaA = angles[1:] - angles[:-1]\n deltasDD = (deltasD[1:] -deltasD[:-1])\n deltasDD[numpy.where(deltasDD==0.)] = 1e-5*deltasD[0]\n dAdD = abs(deltaA/deltasDD)\n belowT, count = True,0\n for v in dAdD:\n if v>6 and belowT:\n count+=1\n belowT = False\n belowT= (v<6)\n\n self.temp = (deltasD,angles, tangents, dAdD )\n fracStraight = numpy.sum(deltasDD[numpy.where(dAdD<0.3)])/(deltasD[-1]-deltasD[0])\n curveLength = deltasD[-1]/3.14\n #print \"SSS \",count , fracStraight\n if curveLength> 1.4 or fracStraight>0.4 or count > 6:\n isCircle =False\n else: \n isCircle= (count < 4 and fracStraight<=0.3) or \\\n (fracStraight<=0.1 and count<5)\n\n if not isCircle:\n return False, 0\n \n # It's a circle !\n radius = points - numpy.array([xmin+w*0.5,ymin+h*0.5])\n radius_n = numpy.sqrt(numpy.sum( radius**2, 1 )) # normalize\n\n mini = numpy.argmin(radius_n) \n rmin = radius_n[mini]\n maxi = numpy.argmax(radius_n) \n rmax = radius_n[maxi]\n # void points around maxi and mini to make sure the 2nd max is found\n # on the \"other\" side\n n = len(radius_n)\n radius_n[maxi]=0 \n radius_n[mini]=0 \n for i in range(1,n/8+1):\n radius_n[(maxi+i)%n]=0\n radius_n[(maxi-i)%n]=0\n radius_n[(mini+i)%n]=0\n radius_n[(mini-i)%n]=0\n radius_n_2 = [ r for r in radius_n if r>0]\n rmax_2 = max(radius_n_2)\n rmin_2 = min(radius_n_2) # not good !!\n anglemax = numpy.arccos( radius[maxi][0]/rmax)*numpy.sign(radius[maxi][1])\n return True, (xmin+w*0.5,ymin+h*0.5, 0.5*(rmin+rmin_2), 0.5*(rmax+rmax_2), anglemax)", "def get_bond_connectivity(self):\n m, connectivity = self.owner, []\n for index, i in enumerate(self.rix):\n for j in self.rix[index + 1:]:\n b1 = m.rings[i].bix\n b2 = m.rings[j].bix\n if set(b1).intersection(b2):\n connectivity.append((i, j))\n return tuple(connectivity)", "def find_bond_groups(mol):\n rot_atom_pairs = mol.GetSubstructMatches(RotatableBondSmarts)\n rot_bond_set = set([mol.GetBondBetweenAtoms(*ap).GetIdx() for ap in rot_atom_pairs])\n rot_bond_groups = []\n while (rot_bond_set):\n i = rot_bond_set.pop()\n connected_bond_set = set([i])\n stack = [i]\n while (stack):\n i = stack.pop()\n b = mol.GetBondWithIdx(i)\n bonds = []\n for a in (b.GetBeginAtom(), b.GetEndAtom()):\n bonds.extend([b.GetIdx() for b in a.GetBonds() if (\n (b.GetIdx() in rot_bond_set) and (not (b.GetIdx() in connected_bond_set)))])\n connected_bond_set.update(bonds)\n stack.extend(bonds)\n rot_bond_set.difference_update(connected_bond_set)\n rot_bond_groups.append(tuple(connected_bond_set))\n return tuple(sorted(rot_bond_groups, reverse = True, key = lambda x: len(x)))" ]
[ "0.6646527", "0.63952506", "0.6240537", "0.5905683", "0.588075", "0.5806025", "0.5764874", "0.56130666", "0.5525293", "0.5508618", "0.5460365", "0.53958726", "0.535405", "0.53534824", "0.5297335", "0.5294864", "0.52534354", "0.52349055", "0.52014965", "0.51829153", "0.51585686", "0.5158113", "0.51318425", "0.5131614", "0.5120848", "0.51107645", "0.50867856", "0.5049683", "0.5030174", "0.49682838", "0.4958581", "0.49530023", "0.49477607", "0.4943899", "0.4938667", "0.4905577", "0.4905041", "0.4891315", "0.48877636", "0.48874956", "0.488567", "0.48737592", "0.48713693", "0.48705763", "0.4866071", "0.4861415", "0.48588172", "0.4856799", "0.4853416", "0.48532766", "0.4833994", "0.48248464", "0.48231012", "0.48161423", "0.4811012", "0.4809579", "0.47988373", "0.4796092", "0.47945508", "0.47934985", "0.47773427", "0.4768348", "0.476714", "0.47621265", "0.47611496", "0.4743789", "0.47292048", "0.47132188", "0.47097617", "0.4705918", "0.46943092", "0.4688248", "0.4688142", "0.46871608", "0.4682735", "0.46772626", "0.46741703", "0.4658396", "0.46525046", "0.4650702", "0.4648191", "0.46456194", "0.4644644", "0.46420172", "0.4636913", "0.46346253", "0.46326077", "0.46309876", "0.4621326", "0.46145877", "0.46112305", "0.4610014", "0.46099353", "0.4601056", "0.46008873", "0.459959", "0.459931", "0.4590903", "0.45893255", "0.45859605" ]
0.8237362
0
Indices of the first torsions in the BAT array
def getFirstTorsionInds(self, extended): offset = 6 if extended else 0 torsionInds = np.array(range(offset + 5, self.natoms * 3, 3)) primaryTorsions = sorted(list(set(self._firstTorsionTInd))) return list(torsionInds[primaryTorsions])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_agent_indices(array):\t\n\tagent_indices = np.argwhere(array != 0)\n\treturn agent_indices", "def get_vacancy_indices(array):\t\n\tvacancy_indices = np.argwhere(array == 0)\n\treturn vacancy_indices", "def _get_indx(self, t):\n t = np.array(t)\n a = (t[:, np.newaxis] <= self._data['stop']) & (t[:, np.newaxis] >=\n self._data['start'])\n return np.array([np.where(row)[0][0] for row in a])", "def tc_index(*args):\n index = []\n x = check_tc_data(args[0])\n i = 0\n for line in args[0].Data.TCData.tc_data:\n i += 1\n if line != x[i - 1]:\n index.append(0)\n elif line == x[i - 1]:\n index.append(i)\n return index", "def get_main_points(neuron):\n (branch_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 2)\n (endpoint_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 0)\n selected_index = np.union1d(branch_index + neuron.n_soma,\n endpoint_index + neuron.n_soma)\n selected_index = np.append(range(neuron.n_soma), selected_index)\n return selected_index", "def pt_index(*args):\n index = []\n x = check_pt_data(args[0])\n i = 0\n for line in args[0].Data.PTData.pt_data:\n i += 1\n if line != x[i - 1]:\n index.append(0)\n elif line == x[i - 1]:\n index.append(i)\n return index", "def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices", "def get_peak_ind(discrete_array):\n\n indexes = [j for j in range(discrete_array.size) if discrete_array[j-1]==0 and\\\n discrete_array[j]==1]\n\n return indexes", "def nonzero_indices(a):\n return (np.nonzero(a)[0])", "def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check", "def hoggar_indices():\n return list(product([0,1], repeat=6))", "def occ_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==1:\n indices.append(index)\n return indices", "def indices(self):\n return tuple([slice(*r) for r in self.location])", "def mainIndices(self):\n return self.i1, self.i2", "def get_gt_hom_idxs(alt_num):\n last = -1\n hom_idxs = []\n for a in range(alt_num + 1):\n last = last + (a + 1)\n hom_idxs.append(last)\n return hom_idxs", "def indices(self):\n return range(len(self))", "def columnIndexes(a):\n nrows = (a.size-2)+1\n return a[1*np.arange(nrows)[:,None] + np.arange(2)]", "def starting_values(self, resids: NDArray) -> NDArray:", "def get_indexes(self, dataset):\n\n for i in range(self.max_iters):\n index = random.randint(0, len(dataset))\n gt_bboxes_i = dataset.get_ann_info(index)['bboxes']\n if len(gt_bboxes_i) != 0:\n break\n\n return index", "def get_agent_indices_of_type(array, agent_type):\t\n\tagent_indices = np.argwhere(array == agent_type)\n\treturn agent_indices", "def footprint_corner_indices():", "def atom_idxs(self):\n\n return np.array([atom.atom_idxs for atom in self])", "def _get_indexes(self, participants):\n tr_idx = int(np.floor(self.tr_size*len(participants)))\n j = self.val_size + self.tr_size\n val_idx = int(np.floor(j*len(participants)))\n return tr_idx, val_idx", "def get_pent_idx(pent):\n pidx = 0\n for i in range(pent.shape[0]):\n for j in range(pent.shape[1]):\n if pent[i][j] != 0:\n pidx = pent[i][j]\n break\n if pidx != 0:\n break\n if pidx == 0:\n return -1\n return pidx - 1", "def get_index(self, x, y):\n i = (y - self.y0) // self.dy\n j = (x - self.x0) // self.dx\n i = min(max(i, 0), self.n-1)\n j = min(max(j, 0), self.m-1)\n return [i, j]", "def get_active_register_indices(self):\n assert self.sketch.ndim == 1, 'Currently only support 1-dimensional sketch.'\n return np.flatnonzero(self.sketch)", "def get_indices(waves):\n prob_ = np.abs(waves)**2\n # batch\n prob = [np.sum(prob_[i:i+4,:], axis=0) for i in range(0, len(waves[:,0]), 4)]\n prob = np.asarray(prob)\n prob_tot = np.sum(prob, axis=0)\n \n # cutoff\n length = np.size(prob[:,0])\n len10 = int(length/10)\n flags = np.zeros((prob.shape[1]), dtype=int)\n # hinges\n # 50% within 10% of corners\n\n # surface\n # 50% within 10% of surfaces\n # not already labelled hinges\n prob_left = np.sum(prob[0:len10,:], axis=0)\n frac_left = prob_left/prob_tot\n\n prob_right = np.sum(prob[length-len10:length,:], axis=0)\n frac_right = np.divide(prob_right, prob_tot)\n\n for i in range(len(flags)):\n if frac_left[i]>0.5 or frac_right[i]>0.5:\n flags[i] = 1\n \n indices = [i for i, x in enumerate(flags) if x == 1]\n indices0 = [i for i, x in enumerate(flags) if x == 0]\n \n return indices, indices0", "def _get_chunk_indexer(self, array):\n if self.data.num_chunks == 1:\n return np.broadcast_to(0, len(array))\n return np.digitize(array, self.offsets[1:])", "def computeTopSurfaceIndices(top):\n itop = np.array([(top[i,j], j, i) \\\n for i in range(top.shape[0]) \\\n for j in range(top.shape[1]) \\\n if top[i,j] >= 0])\n return itop", "def index(self):\n # Check is multiple orders were given\n try:\n orders = list(iter(self.orders))\n except TypeError:\n orders = [self.orders]\n sites = self._epistasismap.sites\n x = [i for i in range(1, len(sites)) if len(sites[i]) in orders]\n # Add the zeroth element if included\n if 0 in orders:\n x = [0] + x\n return np.array(x)", "def get_main_branch_indices(self):\n\n assert self.halt is not None\n prog_main_index = self.halt_index\n prog_main_indices = self.halt.prop(\n 'progenitor.main.indices', self.halt_index)\n self.main_branch_indices = prog_main_indices\n return prog_main_indices", "def index_col(self, i0, i1, j0, j1):\n edges = self.h5['indexes']['bin1_offset'][i0:i1 + 1]\n index = []\n for lo1, hi1 in zip(edges[:-1], edges[1:]):\n if hi1 - lo1 > 0:\n bin2 = self.h5['pixels']['bin2_id'][lo1:hi1]\n mask = (bin2 >= j0) & (bin2 < j1)\n index.append(lo1 + np.flatnonzero(mask))\n if not index:\n return np.array([], dtype=int)\n else:\n return np.concatenate(index, axis=0)", "def get_34index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==1]", "def masked_indices(mask):\n\treturn np.nonzero(np.ravel(mask,order='C'))[0]", "def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices", "def indexarray(matrix_terms, which, var):\n mults = matrix_terms[which].copy()\n mults[:, var] += 1\n return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)", "def indices(self) -> np.ndarray:\n return self.impl.indices", "def get_img_indices():\n if K.image_dim_ordering() == 'th':\n return 0, 1, 2, 3\n else:\n return 0, 3, 1, 2", "def bool_2_indices(bool_array):\n if ~isinstance(bool_array, np.ndarray):\n bool_array = np.array(bool_array)\n if bool_array[-1]:\n bool_array[-1] = False\n lims = np.arange(bool_array.size)[bool_array ^ np.roll(bool_array, 1)]\n if len(lims) > 0:\n if lims[-1] == bool_array.size - 1:\n lims[-1] = bool_array.size\n return np.reshape(lims, (len(lims) // 2, 2))\n else:\n return [[np.nan, np.nan]]", "def get_indexes(self, dataset: BaseDataset) -> int:\n\n index = [np.random.randint(0, len(dataset)) for _ in range(1)]\n\n return index", "def indices(self, fit):\r\n lam = self.lam_reeval if self.lam_reeval else 2 + len(fit) / 20\r\n reev = int(lam) + ((lam % 1) > np.random.rand())\r\n return np.argsort(array(fit, copy=False)[:2 * (reev + 1)])[:reev]", "def get_index(band_nums,chan_num):\n ch_index=np.searchsorted(band_nums,chan_num)\n return int(ch_index)", "def agent_locs_idx(self):\n return tuple(self.agent_locs.T)", "def non_masked_indices(mask):\n\treturn np.nonzero(np.ravel(mask-1,order='C'))[0]", "def index(self):\n a = self.array_form\n\n return sum([j for j in xrange(len(a) - 1) if a[j] > a[j+1]])", "def ordered_indices(self):\r\n return np.arange(len(self), dtype=np.int64)", "def mesh_span_indices(self):\n self._ensure_mesh()\n k2m = self._knots_to_mesh\n return np.where(k2m[1:] != k2m[:-1])[0]", "def getBreakIndices(self):\n for i in self.raw.index[:-1]:\n if self.raw['stress'][i+1] > self.raw['stress'][i] and \\\n self.raw['stress'][i+2] < self.raw['stress'][i+1]:\n brkIdx1 = i+1 # brkIdx1: start of the first unloading\n break\n if self.reloading:\n for i in self.raw.index[brkIdx1+1:-1]:\n if self.raw['stress'][i+1] < self.raw['stress'][i] and \\\n self.raw['stress'][i+2] > self.raw['stress'][i+1]:\n brkIdx2 = i+1 # brkIdx2: end of the first unloading\n break\n # brkIdx3: Point on the NCL after the first reloading\n brkIdx3 = self.raw.query(f'stress == stress[{brkIdx1}]').index[1]\n # brkIdx4: index of the last point on the NCL\n brkIdx4 = self.raw.query('stress == stress.max()').index[0]\n self.secondUnloading = False\n else:\n brkIdx2 = self.raw.index[-1]\n brkIdx3 = None\n brkIdx4 = None\n\n self.brkIdx1 = brkIdx1\n self.brkIdx2 = brkIdx2\n self.brkIdx3 = brkIdx3\n self.brkIdx4 = brkIdx4\n return", "def coor2idx(x, y):\r\n a = round(x/4000,0)*4000\r\n b = (round_down(y/4000,0)+0.5)*4000\r\n i = int((a - 24000)/4000) + 1\r\n j = int((b - 22000)/4000) + 1\r\n return i, j", "def _raveled_index(self):\n return np.r_[:self.size]", "def _raveled_index(self):\n return np.r_[:self.size]", "def cardinal_indices(self, index):\n cardinals = [\n self.north_index(index),\n self.east_index(index),\n self.south_index(index),\n self.west_index(index)\n ]\n return [i for i in cardinals if 0 < i < (self.size * self.size)]", "def batch_indices(self):\n b = self.batch_size\n return [np.arange(i*b, i*b+b) for i in range(self.num_batches)]", "def get_index_array(self):\n return self.region_pairs", "def refractive_index(self):\n wd = np.arange(80,820,10)\n nd = self.boundary.imat.refractive_index(wd) \n\n plt.plot(wd, nd)\n\n return wd, nd", "def get_adjacent_idxs(sample, array):\n state = sample >= array # boolean array\n # Find the index where the last \"True\"\n # This is the idx lower than the sample\n idx_lower = np.where(state)[0][-1]\n\n # Find the index where the last \"False\"\n # This is the first idx lower than the sample\n idx_higher = np.where(np.logical_not(state))[0][0]\n\n return idx_lower, idx_higher", "def indices(self):\n i, j, _edge = self.indicesAndEdge()\n return i, j", "def shortest_tips(neuron):\n (branch_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 2)\n (endpoint_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 0)\n selected_index = np.union1d(endpoint_index + 1,\n branch_index + 1)\n selected_index = np.append(0, selected_index)", "def xy(self, photons):\n flatbeam = self.beamImage.flatten()\n beamsorted = np.argsort(flatbeam)\n ind = np.searchsorted(flatbeam[beamsorted], photons[\"resID\"])\n return np.unravel_index(beamsorted[ind], self.beamImage.shape)", "def morton_idx(pts):\n lib = _initlib()\n p = require(pts, dtype=float64, requirements=['C']) \n inv_cell_width = 1.0/8192\n npts = len(p)\n out = empty(npts, dtype=int64)\n lib.get_morton_idx(p, npts, inv_cell_width, out)\n return out", "def min_indices(self):\n return {term.minterm_index for term in self.iter_minterms()}", "def index(self) -> int:", "def getBinIndices(self, linear_index):\n return linear_index / self.magic_array % self.nbins_across_dims", "def get_indices(self):\r\n return self._indices", "def matrix_idx(n_hist, n_req, n_rows):\n\n flat_idx = []\n for i in range(n_rows):\n flat_idx.extend(range(i * n_req, (i + 1) * n_req + n_hist))\n # idx = np.unravel_index(flat_idx, (n_rows, n_hist + n_req))\n\n idx_matrix = np.reshape(flat_idx, (n_rows, n_hist + n_req))\n idxX = idx_matrix[:, n_req:]\n idxY = idx_matrix[:, :n_req]\n\n return idxX, idxY", "def get_should_translate_index(arr):\n result = []\n for i, item in enumerate(arr):\n if item > 0.25:\n result.append(i)\n\n return result", "def _find_nonzero_runs(values):\n\n error_checking.assert_is_numpy_array_without_nan(values)\n error_checking.assert_is_numpy_array(values, num_dimensions=1)\n\n zero_flags = numpy.concatenate((\n [True], numpy.equal(values, 0), [True]\n ))\n\n nonzero_flags = numpy.invert(zero_flags)\n differences = numpy.abs(numpy.diff(nonzero_flags))\n index_matrix = numpy.where(differences == 1)[0].reshape(-1, 2)\n\n return index_matrix[:, 0], index_matrix[:, 1] - 1", "def get_source_indices(sent, dic):\n clean_sent = cleanup_sentence(sent)\n words = clean_sent.split(' ')\n n_words = len(words) + 1 # counting for the </s>\n indices = np.zeros(n_words)\n cnt = 0\n nsrc_unk = 0\n unk_idx = dic.symbol_to_index[\"<unk>\"]\n eos_idx = dic.symbol_to_index[\"</s>\"]\n for i, word in enumerate(words):\n wid = dic.symbol_to_index.get(word, None)\n if wid is None:\n indices[cnt] = unk_idx\n nsrc_unk += 1\n else:\n indices[cnt] = wid\n if wid == unk_idx:\n nsrc_unk += 1\n cnt += 1\n indices[cnt] = eos_idx\n cnt += 1\n return indices, indices.shape[0], nsrc_unk", "def _state_index(state):\n delta_y, delta_x, bird_lmh, pipe_lmh, is_flapping = state\n actions, height, width, _, _, _ = Q.shape\n\n y = int((height / 2) + (delta_y / step_r) - 1)\n x = int((width / 2) + (delta_x / step_c) - 1)\n\n return y, x, bird_lmh, pipe_lmh, is_flapping", "def getstartingshape(vals):\n \n return 1", "def indXtoJ(indX):\n return np.unravel_index(indX % xx.size, xx.shape)", "def tril_indices_from(arr,k=0):\r\n if not arr.ndim==2 and arr.shape[0] == arr.shape[1]:\r\n raise ValueError(\"input array must be 2-d and square\")\r\n return tril_indices(arr.shape[0],k)", "def getbaraidxij(self,idx_): # 3\n res,resargs = self.__obj.getbaraidxij(idx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _i_return_value,_j_return_value = resargs\n return _i_return_value,_j_return_value", "def i_index(self, coord):\n return coord + 1 if coord + 1 > self.dimensions - 1 else 0", "def get_final_pruned_indices(self):\n return self.final_pruned_indices", "def least_indices(array: np.ndarray, n: int) -> tuple:\n flat = array.ravel()\n indices = np.argpartition(flat, n)[:n]\n indices = indices[np.argsort(flat[indices])]\n return np.unravel_index(indices, array.shape)", "def _get_index_lists(self, mat):\n n_row, n_col = mat.shape\n \n col_ind_at_row, row_ind_at_col = [],[]\n for i in range(n_row):\n aux_ind = _np.where(mat[i]>0)[0]\n if len(aux_ind) == 0:\n raise Exception('Row {} is composed of zeros'.format(i))\n col_ind_at_row.append(aux_ind)\n \n for j in range(n_col):\n aux_ind = _np.where(mat[:,j]>0)[0]\n if len(aux_ind) == 0:\n raise Exception('Column {} is composed of zeros'.format(j))\n \n row_ind_at_col.append(aux_ind)\n \n return col_ind_at_row, row_ind_at_col", "def _iter_indices(self, frame, y):\n pass", "def first_in_first_out(table):\n min_order = table[0].fifo_order\n min_index = 0\n for index, table_line in enumerate(table):\n if table_line.fifo_order < min_order:\n min_order = table_line.fifo_order\n min_index = index\n\n return min_index", "def create_jackknife_indexes(data):\n from numpy import arange, delete\n\n index_range = arange(0, len(data))\n return (delete(index_range, i) for i in index_range)", "def get_test_index():\n return list(range(305, 435))", "def pndindex(*args):\r\n return np.ndindex(*args)", "def theta_start(self, ndim=2):\n return 10", "def get_segment_index(datadb):\n #match in time!!!!\n if cfg.use_saliency:\n segment_index_tar = util.get_time_for_visual(datadb)\n segment_index_tar_future = OrderedDict()\n for key in segment_index_tar.keys():\n segment_index_tar_future[key] = np.array(segment_index_tar[key])+max_encoder_seq_length\n return segment_index_tar,segment_index_tar_future", "def _get_trough_and_peak_idx(waveform):\n trough_idx = np.argmin(waveform, axis=1)\n peak_idx = -1 * np.ones(trough_idx.shape, dtype=int) # int, these are used for indexing\n for i, tridx in enumerate(trough_idx):\n if tridx == waveform.shape[1] - 1:\n trough_idx[i] = 0\n peak_idx[i] = 0\n continue\n idx = np.argmax(waveform[i, tridx:])\n peak_idx[i] = idx + tridx\n return trough_idx, peak_idx", "def first_and_last_index(arr, number):\n\n # TODO: Write your first_and_last function here\n # Note that you may want to write helper functions to find the start\n # index and the end index\n bin_occurence = find_number_recursively(arr, number, 0, len(arr) - 1)\n print(bin_occurence)\n first = find_first_index_recursively(arr, number, bin_occurence)\n last = find_last_index_recursively(arr, number, bin_occurence)\n print([first, last])\n return [first, last]", "def run_idxs(self):\n return list(range(len(self._h5[RUNS])))", "def run_traj_idxs(self, run_idx):\n return list(range(len(self._h5['{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES)])))", "def one_dim_index(self, i, j):\n return int(i + j * self.nx)", "def _code_indices(self) -> Tuple[int, ...]:\n return tuple(idx for idx, seg in enumerate(self.segments) if seg.is_code)", "def get_5index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==0]", "def find_index(self):\n current = self.from_grid\n #find index of \"*\"\n for x in range(len(current)):\n for y in range(len(current[x])):\n if current[x][y] == \"*\":\n index = (x,y)\n return index", "def find_indices(colorhs, centres):\n\n indices = np.zeros(colorhs.shape[0], dtype=np.uint8)\n i = 0\n\n for hs in colorhs:\n # Past Euclidian distance\n past_ed = float(\"inf\")\n for cluster in range(centres.shape[0]):\n # Current Euclidian distance\n curr_ed = (sum((hs - centres[cluster, :]) ** 2)) ** 1/2\n # A frame belongs to the cluster with the minimum ed value.\n if curr_ed <= past_ed:\n past_ed = curr_ed\n indices[i] = cluster\n i += 1\n return indices", "def _get_x_of_t(self, arr):\n\n t_max = self._get_max_t()\n arr = list(arr)\n\n if arr[-1][0] < t_max:\n arr.append([t_max, arr[-1][1]])\n\n arr = np.array(arr)\n return arr[:, 1], arr[:, 0]", "def relevant_indexes(data, min_threshold):\n\n start_index = 1\n end_index = len(data) - 1\n\n for i in range(len(data)):\n if data[i] > min_threshold:\n start_index = i\n break\n\n for i in range(len(data)):\n if data[::-1][i] > min_threshold:\n end_index = i\n break\n\n return start_index, end_index", "def tree_idx(tree,j1,J1,J2):\n j = j1\n for k in np.arange(J1+1,J2+1,1):\n j = tree[k]['IDX'][j]\n \n j2 = j\n return j2", "def wind_profile_indices_map(self):\n if self._hybrid_meta is None:\n return np.array([]), np.array([])\n\n idxs = self._hybrid_meta[self.__wind_rpi_n].astype(int)\n idxs = idxs[idxs >= 0]\n\n return idxs.index.values, idxs.values", "def getbaraidxij(self,idx_):\n i_ = ctypes.c_int32()\n j_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbaraidxij(self.__nativep,idx_,ctypes.byref(i_),ctypes.byref(j_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n i_ = i_.value\n _i_return_value = i_\n j_ = j_.value\n _j_return_value = j_\n return (_i_return_value,_j_return_value)", "def _get_met_indices(metric):\n letters = [f\"/{x}\" for x in range(0, len(metric.shape))]\n return letters", "def BatchCreator(self, j, n_batch):\n j_start = (j-1)*n_batch + 1\n j_end = j*n_batch + 1\n ind = np.arange(start= j_start, stop=j_end, step=1)\n return ind" ]
[ "0.6661381", "0.6593251", "0.62259746", "0.622321", "0.6037638", "0.60303926", "0.602448", "0.5961824", "0.59254426", "0.59253204", "0.58767104", "0.58369315", "0.58064735", "0.58055794", "0.579832", "0.5781476", "0.5770356", "0.57517385", "0.5733817", "0.5665305", "0.5646749", "0.56205446", "0.56146145", "0.56140596", "0.55619746", "0.55513585", "0.5549283", "0.55470526", "0.5541172", "0.5538824", "0.5538755", "0.55372024", "0.5529243", "0.55269766", "0.55188715", "0.54729575", "0.5449641", "0.5438343", "0.543744", "0.54362124", "0.54280156", "0.5417573", "0.54107094", "0.5407219", "0.54036653", "0.540042", "0.5389135", "0.53684837", "0.5366808", "0.53634", "0.53634", "0.5359883", "0.5352314", "0.53382844", "0.53358257", "0.53169835", "0.53034246", "0.52993613", "0.5297609", "0.5297371", "0.5292483", "0.52852213", "0.52843076", "0.5269045", "0.5265228", "0.5260781", "0.525626", "0.52549464", "0.52547586", "0.525244", "0.52524364", "0.5251949", "0.5235182", "0.52318853", "0.5230897", "0.5217088", "0.52131885", "0.52089643", "0.5199979", "0.5190639", "0.5189155", "0.5186154", "0.51852864", "0.51811606", "0.51745397", "0.5174416", "0.5173727", "0.5172201", "0.5168251", "0.5165375", "0.51577777", "0.51542246", "0.5149635", "0.5146387", "0.514223", "0.5139285", "0.5139188", "0.5136329", "0.51321626", "0.51243836" ]
0.57871866
15
Conversion from Cartesian to BondAngleTorsion coordinates
def BAT(self, XYZ, extended=False): root = [distance(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]]),\ distance(XYZ[self.rootInd[1]],XYZ[self.rootInd[2]]),\ angle(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]],XYZ[self.rootInd[2]])] import itertools internal = root + \ [val for val in itertools.chain.from_iterable([\ BAT4(XYZ[a1],XYZ[a2],XYZ[a3],XYZ[a4]) \ for (a1,a2,a3,a4) in self._torsionIndL])] torsions = internal[5::3] phase_torsions = [(torsions[n] - torsions[self._firstTorsionTInd[n]]) \ if self._firstTorsionTInd[n]!=n else torsions[n] \ for n in range(len(torsions))] internal[5::3] = phase_torsions if not extended: return np.array(internal) external = self.extended_coordinates(XYZ[self.rootInd[0]], \ XYZ[self.rootInd[1]], XYZ[self.rootInd[2]]) return np.array(list(external) + list(internal))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Cartesian(self, BAT):\n # Arrange BAT coordinates in convenient arrays\n offset = 6 if len(BAT) == (3 * self.natoms) else 0\n bonds = BAT[offset + 3::3]\n angles = BAT[offset + 4::3]\n phase_torsions = BAT[offset + 5::3]\n torsions = [(phase_torsions[n] + phase_torsions[self._firstTorsionTInd[n]]) \\\n if self._firstTorsionTInd[n]!=n else phase_torsions[n] \\\n for n in range(self.ntorsions)]\n\n p1 = np.array([0., 0., 0.])\n p2 = np.array([0., 0., BAT[offset]])\n p3 = np.array([BAT[offset+1]*np.sin(BAT[offset+2]), 0., \\\n BAT[offset]-BAT[offset+1]*np.cos(BAT[offset+2])])\n\n # If appropriate, rotate and translate the first three atoms\n if offset == 6:\n # Rotate the third atom by the appropriate value\n (phi, theta, omega) = BAT[3:6]\n co = np.cos(omega)\n so = np.sin(omega)\n Romega = np.array([[co, -so, 0], [so, co, 0], [0, 0, 1]])\n p3 = Romega.dot(p3)\n # Rotate the second two atoms to point in the right direction\n cp = np.cos(phi)\n sp = np.sin(phi)\n ct = np.cos(theta)\n st = np.sin(theta)\n Re = np.array([[cp * ct, -sp, cp * st], [ct * sp, cp, sp * st],\n [-st, 0, ct]])\n p2 = Re.dot(p2)\n p3 = Re.dot(p3)\n # Translate the first three atoms by the origin\n origin = np.array(BAT[:3])\n p1 += origin\n p2 += origin\n p3 += origin\n\n XYZ = np.zeros((self.natoms, 3))\n\n XYZ[self.rootInd[0]] = p1\n XYZ[self.rootInd[1]] = p2\n XYZ[self.rootInd[2]] = p3\n\n for ((a1,a2,a3,a4), bond, angle, torsion) in \\\n zip(self._torsionIndL,bonds,angles,torsions):\n sphere = Sphere(Vector(XYZ[a2]), bond)\n cone = Cone(Vector(XYZ[a2]), Vector(XYZ[a3] - XYZ[a2]), angle)\n plane123 = Plane(Vector(XYZ[a4]), Vector(XYZ[a3]), Vector(XYZ[a2]))\n points = sphere.intersectWith(cone).intersectWith(plane123)\n p = points[0] if (Plane(Vector(XYZ[a3]), Vector(\n XYZ[a2]), points[0]).normal * plane123.normal) > 0 else points[1]\n p = rotatePoint(Vector(p),\n Line(Vector(XYZ[a2]), Vector(XYZ[a2] - XYZ[a3])),\n torsion)\n XYZ[a1] = p.array\n\n return XYZ\n\n for ((a1,a2,a3,a4), bond, angle, torsion) in \\\n zip(self._torsionIndL,bonds,angles,torsions):\n\n p2 = XYZ[a2]\n p3 = XYZ[a3]\n p4 = XYZ[a4]\n\n # circle = sphere.intersectWith(cone)\n n23 = normalize(p3 - p2)\n\n # points = circle.intersectWith(plane123)\n # plane.intersectWith(Plane(circle.center, circle.normal)) is a line\n # line_direction = cross(normalize(cross(p4-p3,n23)),n23)\n\n # Rotate the point about the p2-p3 axis by the torsion angle\n v21 = (bond * np.cos(angle)) * n23 - (bond * np.sin(angle)) * cross(\n normalize(cross(p4 - p3, n23)), n23)\n s = np.sin(torsion)\n c = np.cos(torsion)\n XYZ[a1] = p2 - cross(n23, v21) * s + np.sum(\n n23 * v21) * n23 * (1.0 - c) + v21 * c", "def _internal_to_cartesian(self, bond_position, angle_position, torsion_position, r, theta, phi):\n # TODO: _cartesian_to_internal and _internal_to_cartesian should accept/return units and have matched APIs\n\n check_dimensionality(bond_position, unit.nanometers)\n check_dimensionality(angle_position, unit.nanometers)\n check_dimensionality(torsion_position, unit.nanometers)\n check_dimensionality(r, float)\n check_dimensionality(theta, float)\n check_dimensionality(phi, float)\n\n # Compute Cartesian coordinates from internal coordinates using all-dimensionless quantities\n # All inputs to numba must be in float64 arrays implicitly in md_unit_syste units of nanometers and radians\n from perses.rjmc import coordinate_numba\n xyz = coordinate_numba.internal_to_cartesian(\n bond_position.value_in_unit(unit.nanometers).astype(np.float64),\n angle_position.value_in_unit(unit.nanometers).astype(np.float64),\n torsion_position.value_in_unit(unit.nanometers).astype(np.float64),\n np.array([r, theta, phi], np.float64))\n # Transform position of new atom back into unit-bearing Quantity\n xyz = unit.Quantity(xyz, unit=unit.nanometers)\n\n # Compute abs det Jacobian using unitless values\n detJ = np.abs(r**2*np.sin(theta))\n\n check_dimensionality(xyz, unit.nanometers)\n check_dimensionality(detJ, float)\n return xyz, detJ", "def cartesian2polar(cartesian):\n cartesian = np.array(cartesian).squeeze()\n x, y = cartesian\n r = np.linalg.norm([x, y])\n azimuth = np.arctan2(y, x)\n return np.array([r, azimuth])", "def _cartesian_to_internal(self, atom_position, bond_position, angle_position, torsion_position):\n # TODO: _cartesian_to_internal and _internal_to_cartesian should accept/return units and have matched APIs\n\n check_dimensionality(atom_position, unit.nanometers)\n check_dimensionality(bond_position, unit.nanometers)\n check_dimensionality(angle_position, unit.nanometers)\n check_dimensionality(torsion_position, unit.nanometers)\n\n # Convert to internal coordinates once everything is dimensionless\n # Make sure positions are float64 arrays implicitly in units of nanometers for numba\n from perses.rjmc import coordinate_numba\n internal_coords = coordinate_numba.cartesian_to_internal(\n atom_position.value_in_unit(unit.nanometers).astype(np.float64),\n bond_position.value_in_unit(unit.nanometers).astype(np.float64),\n angle_position.value_in_unit(unit.nanometers).astype(np.float64),\n torsion_position.value_in_unit(unit.nanometers).astype(np.float64))\n # Return values are also in floating point implicitly in nanometers and radians\n r, theta, phi = internal_coords\n\n # Compute absolute value of determinant of Jacobian\n detJ = np.abs(r**2*np.sin(theta))\n\n check_dimensionality(r, float)\n check_dimensionality(theta, float)\n check_dimensionality(phi, float)\n check_dimensionality(detJ, float)\n\n return internal_coords, detJ", "def cartesianToPolar(x=0, y=0):\n\n radius = np.hypot(x, y)\n theta = np.arctan2(y, x)\n return theta, radius", "def cartesianToPolar(x,y):\n r = np.sqrt(x**2 + y**2)\n theta = np.arctan2(y,x)\n\n return r,theta", "def cart2polar3d(cartesian):\n radius = np.linalg.norm(cartesian)\n theta = np.cos", "def getAxisAndAngleDegres(self):\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n return xyz, angle", "def to_cartesian(dimensions, angles):\n return Operator(transform=np.transpose(np.array(_basis_vectors(dimensions, angles))))", "def polarToCartesian(theta=0, radius=0):\n\n x = radius * np.cos(theta)\n y = radius * np.sin(theta)\n return x, y", "def to_axang(self) -> Tuple[np.ndarray, float]:\n return self.to_axisangle()", "def polar2cartesian(polar):\n polar = np.array(polar).squeeze()\n r, azimuth = polar\n x = r * np.cos(azimuth)\n y = r * np.sin(azimuth)\n return np.array([x, y])", "def to_axang(self) -> Tuple[np.ndarray, float]:\n denom = np.linalg.norm(self.v)\n angle = 2.0*np.arctan2(denom, self.w)\n axis = np.zeros(3) if angle==0.0 else self.v/denom\n return axis, angle", "def _position_cylindrical2cartesian(pos):\n \n rho=pos[:,0]\n theta=pos[:,1]\n z=pos[:,2]\n\n x=rho*np.cos(theta)\n y=rho*np.sin(theta)\n z=z\n\n return np.dstack((x,y,z))[0]", "def polar_to_cartesian(radius, angle_deg):\n\n theta = np.deg2rad(angle_deg)\n x = radius * np.cos(theta)\n y = radius * np.sin(theta)\n return(x, y)", "def getAngle(self):\n x, y = self.components\n return math.atan2(y, x)", "def py_ang(self,v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n return np.arctan2(sinang, cosang)", "def angle(self):\n return angle(self.force, self.forceXYZ, self.excited_axis,\n self.distance, self.distanceXYZ)", "def angle(z):", "def polar_to_cartesian(r, theta):\n\n x = r * cos(theta)\n y = r * sin(theta)\n\n return x, y", "def calc_mainchain_bond_angle(self):\n aN = self.get_atom('N')\n aCA = self.get_atom('CA')\n aC = self.get_atom('C')\n aO = self.get_atom('O')\n aCB = self.get_atom('CB')\n\n naN = None\n naCA = None\n next_res = self.get_offset_residue(1)\n if next_res:\n naN = next_res.get_atom('N')\n naCA = next_res.get_atom('CA')\n\n N_CA_C = AtomMath.calc_angle(aN, aCA, aC)\n CA_C_O = AtomMath.calc_angle(aCA, aC, aO)\n N_CA_CB = AtomMath.calc_angle(aN, aCA, aCB)\n CB_CA_C = AtomMath.calc_angle(aCB, aCA, aC)\n CA_C_nN = AtomMath.calc_angle(aCA, aC, naN)\n C_nN_nCA = AtomMath.calc_angle(aC, naN, naCA)\n\n return (N_CA_C, N_CA_CB, CB_CA_C, CA_C_O, CA_C_nN, C_nN_nCA)", "def angle(self):\n self.convert_window(\"Angle\", \"degree\", [\"arcminute\", \"arcsecond\", \"circle\", \"degree\", \"gon\", \"gradian\", \"mil(Nato)\", \"mil(Soviet Union)\", \"mil(Sweden)\", \"octant\", \"quadrant\", \"radian\", \"revolution\", \"sextant\", \"sign\", \"turn\"])", "def _position_cartesian2cylindrical(pos):\n\n \n #save cartesian position of each particle\n x=pos[:,0]\n y=pos[:,1]\n z=pos[:,2]\n\n rho= np.sqrt(x**2+y**2)\n theta=np.arctan2(y,x)\n\n\n return np.dstack((rho,theta,z))[0]", "def cartesian2polar(x, y):\n r = (x**2+y**2)**.5\n phi = atan2(y, x)\n return phi, r", "def polarToCartesian(r,theta):\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n return x,y", "def cartesian_coordinates(self):\n # extract RA items\n ra_hours, ra_minutes, ra_seconds = RA_RE.match(str(self.ra)).groups()\n # then cast\n ra_hours = int(ra_hours)\n ra_minutes = int(ra_minutes)\n ra_seconds = float(ra_seconds)\n\n # extract DEC items\n dec_sign, dec_degrees, dec_minutes, dec_seconds = DEC_RE.match(str(self.dec)).groups()\n # then cast\n dec_sign = -1 if dec_sign == '-' else 1\n dec_degrees = int(dec_degrees)\n dec_minutes = int(dec_minutes)\n dec_seconds = float(dec_seconds)\n\n # to degrees\n a = (ra_hours*15) + (ra_minutes*0.25) + (ra_seconds*0.004166)\n b = abs(dec_degrees + dec_minutes/60 + dec_seconds/3600) * dec_sign\n\n # to radians\n a = math.radians(a)\n b = math.radians(b)\n\n distance = float(self.distance)\n\n x = (distance * math.cos(b)) * math.cos(a)\n y = (distance * math.cos(b)) * math.sin(a)\n z = distance * math.sin(b)\n\n return x, y, z", "def angle(*args):\n if len(args) < 1:\n return 0.0\n elif len(args) == 1:\n return np.arctan2(args[0][1], args[0][0])\n else:\n v1 = args[0].flatten()\n v2 = args[1].flatten()\n return np.arccos(np.dot(v1, v2) / (norm(v1) * norm(v2)))", "def cartesian(position):\n return [position[0] * cos(position[1]), position[0] * sin(position[1])]", "def jointAngles(self,x,y,z):\n #Converts from arm tics to mm\n x = self.ticToMm(x)\n y = self.ticToMm(y)\n z = self.ticToMm(z)\n \n\n a = sqrt(x**2 + y**2)\n b = sqrt(a**2 + z**2)\n\n phi = atan2(z,a)\n psy = atan2(a,z)\n theta3 = acos(2 - b**2/375**2)\n chi = (pi - theta3)/2\n\n theta3 = theta3*(180/pi) #Elbow\n theta1 = atan(x/y)*(180/pi) #Base\n theta2 = (chi + phi)*(180/pi)+90 #Shoulder\n theta4 = (chi + psy)*(180/pi) #Wrist\n\n return [theta1,theta2,theta3,theta4]", "def calculate_attitude_angle(self):\n return np.arctan(np.pi * (1 - self.eccentricity_ratio ** 2) / (4 * self.eccentricity_ratio))", "def to_circular(self):\n return quad_hybrid.dot(self.linear)", "def to_cartesian(self):\n\n if self.cartesian is None:\n theta = math.radians(self.lat)\n phi = math.radians(self.long)\n x = R_EARTH * math.cos(theta) * math.cos(phi)\n y = R_EARTH * math.cos(theta) * math.sin(phi)\n z = R_EARTH * math.sin(theta)\n self.cartesian = CartesianPoint(x, y, z)\n return self.cartesian", "def to_cartesian(self):\n w = 1.73205 # sqrt(3)\n h = 2\n dx = 0.5 * w if self.y % 2 == 1 else 0\n x = 0.5 * w + self.x * w + dx\n y = 0.5 * h + 0.75 * self.y * h\n return (x, y)", "def py_ang(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n return np.arctan2(sinang, cosang)", "def to_cartesian(r, phi):\n x = r*np.cos(phi)\n y = r*np.sin(phi)\n return x, y", "def polar_decomposition(self):\n return self.polar_unit_vector, self.polar_angle", "def polar_to_cartesian(self, r, theta):\n # x = rcos(theta), y = rsin(theta)\n x, y = r*math.cos(theta), r*math.sin(theta)\n x, y = self.add((x, y), self.pole)\n return x, y", "def get_cartesian_coords(self):\n r = 1\n dec = self.dec + 90\n x = r * math.sin(np.deg2rad(dec)) * math.cos(np.deg2rad(self.ra))\n y = r * math.sin(np.deg2rad(dec)) * math.sin(np.deg2rad(self.ra))\n z = r * math.cos(np.deg2rad(dec))\n\n return [x, y, z]", "def cartesian_to_polar(self, x, y):\n # r = (x^2+y^2)^2, theta = tan^-1(y/x)\n # pole is the reference point of the coordinate system\n x, y = self.get_rel_to_pole(x, y)\n r = math.sqrt(pow(x, 2)+pow(y, 2))\n # set specific code for edge cases\n if x == 0 and y != 0:\n sign = lambda x: (1, -1)[x < 0]\n return r, sign(y)*math.pi/2\n if x == 0 and y == 0:\n return 0, 0\n else:\n theta = math.atan(y/x)\n return r, theta", "def angle(self):\n return arccos(dot((self.a - self.o) / self.r, (self.b - self.o) / self.r))", "def polar2cartesian(phi, r):\n phi_radians = radians(phi)\n x = r*cos(phi_radians)\n y = r*sin(phi_radians)\n return x, y", "def to_axisangle(self) -> Tuple[np.ndarray, float]:\n angle = np.arccos((self.A.trace()-1)/2)\n axis = np.zeros(3)\n if angle!=0:\n axis = np.array([self.A[2, 1]-self.A[1, 2], self.A[0, 2]-self.A[2, 0], self.A[1, 0]-self.A[0, 1]])/(2*np.sin(angle))\n return axis, angle", "def _spherical_to_cartesian(ra, dec):\n rar = np.radians(ra)\n decr = np.radians(dec)\n\n x = np.cos(rar) * np.cos(decr)\n y = np.sin(rar) * np.cos(decr)\n z = np.sin(decr)\n \n return x, y, z", "def polar_to_cartesian(dist, theta, phi):\n z = np.cos(phi)\n s = np.sin(phi)\n x = s * np.cos(theta)\n y = s * np.sin(theta)\n return np.stack((x, y, z), axis=-1) * np.expand_dims(dist, axis=-1)", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(2.0*(self.w*self.x + self.y*self.z), 1.0 - 2.0*(self.x**2 + self.y**2))\n theta = np.arcsin(2.0*(self.w*self.y - self.z*self.x))\n psi = np.arctan2(2.0*(self.w*self.z + self.x*self.y), 1.0 - 2.0*(self.y**2 + self.z**2))\n return np.c_[phi, theta, psi]", "def atan2 (cls, y, x) :\n return Angle_R (math.atan2 (y, x))", "def convert_coordinates_to_angle(x, y, center_x_pos, center_y_pos):\n\n dx = x - center_x_pos\n dy = y - center_y_pos\n rads = math.atan2(-dy, dx)\n rads %= 2 * math.pi\n return math.degrees(rads)", "def get_polar_coordinates(cup_position, bot_position):\n\n distance_x = cup_position[0] - bot_position[0]\n distance_y = cup_position[1] - bot_position[1]\n\n r = math.hypot(distance_x, distance_y)\n theta = math.degrees(math.atan(distance_y/distance_x))\n\n return r, theta", "def xyangle(self,xc=None,yc=None):\n if xc == None:\n xc = self.x1\n if yc == None:\n yc = self.y1\n dx = self.x-xc\n dy = self.y-yc\n self.angle = arctan2(dx,dy) # in radians\n self.sin = sin(self.angle)\n self.cos = cos(self.angle)", "def point_angle(cx, cy, px, py):\n return atan2(py - cy, px - cx)", "def angle_and_axis(basis):\n q = matrix.col(basis.orientation).normalize()\n return q.unit_quaternion_as_axis_and_angle(deg=True)", "def angle(self):\n self._normalise()\n norm = np.linalg.norm(self.vector)\n return self._wrap_angle(2.0 * atan2(norm,self.scalar))", "def IAngle(a, b, t):\n \n # http://www.engineersedge.com/material_science/moment-inertia-gyration-7.htm\n d = b - t \n y = b - (t*(2*d + a) + d**2)/(2*(d+a))\n I = 1/3 * (t*y**3 + a*(b-y)**3 - (a-t)*(b-y-t)**3)\n return I", "def angle(self) -> float:\n ...", "def to_barycentric(cartesian):\n s = [(corners[i] - mid_points[i]).dot(cartesian - mid_points[i]) / 0.75\n for i in range(3)]\n s_clipped = clip(a=s, a_min=0, a_max=1)\n return s_clipped / norm(s_clipped, ord=1)", "def to_polar_tuple(self):\r\n return (abs(self), self.arg())", "def map_ang(cart_pad):\n X, Y, Z = cart_pad\n cart_pad = np.square(cart_pad)\n R = cart_pad.sum(axis=0)\n R = np.sqrt(R)\n thth = np.arccos(Z / R)\n phph = np.arctan2(Y, X)\n return np.array([thth, phph, R])", "def gona(self):\n return GONAngle(dec2gon(self.dec_angle))", "def GetRingBondAng(mol, ringpath):\n N = len(ringpath)\n atoms = [[ringpath[i], ringpath[(i+1)%N], ringpath[(i+2)%N]] for i in range(N)]\n molconf = mol.GetConformer()\n bondang =[rdMolTransforms.GetAngleRad(molconf, x[0], x[1], x[2]) for x in atoms]\n return bondang", "def calculate_angles(self, x, y):\n Oimat = inv(self.Omat)\n Mat = self.pixel_size * inv(self.Dmat) * Oimat\n polar_angles = []\n azimuthal_angles = []\n for i in range(len(x)):\n peak = Oimat * (vec(x[i], y[i]) - self.Cvec)\n v = norm(Mat * peak)\n polar_angle = np.arctan(v / self.distance)\n polar_angles.append(polar_angle)\n azimuthal_angles.append(np.arctan2(-peak[1, 0], peak[2, 0]))\n return (np.array(polar_angles) * degrees,\n np.array(azimuthal_angles) * degrees)", "def angle(self, v1, v2):\r\n cosang = np.dot(v1, v2)\r\n sinang = np.linalg.norm(np.cross(v1, v2))\r\n return np.arctan2(sinang, cosang)", "def dec2gona(dec):\n return GONAngle(dec2gon(dec))", "def _position_spherical2cartesian(pos):\n \n r=pos[:,0]\n theta=pos[:,1]\n phi=pos[:,2]\n\n if any(theta>np.pi) or any(theta<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n\n x=r*np.sin(theta)*np.cos(phi)\n y=r*np.sin(theta)*np.sin(phi)\n z=r*np.cos(theta)\n\n return np.dstack((x,y,z))[0]", "def GalacticToCartesian(Galactic,SolarPosition): \n \n # l,b,s->x,y,z\n cl = np.cos(Galactic[:,0])\n sl = np.sin(Galactic[:,0])\n cb = np.cos(Galactic[:,1])\n sb = np.sin(Galactic[:,1])\n x = SolarPosition[0]-Galactic[:,2]*cb*cl\n y = Galactic[:,2]*cb*sl\n z = Galactic[:,2]*sb+SolarPosition[1]\n\n if(len(Galactic[0,:])==3):\n Cartesian = np.column_stack((x,y,z))\n else:\n # vlos,mu_lcos(b),mu_b -> vx,vy,vz\n vl = pm2vel*Galactic[:,2]*Galactic[:,4]\n vb = pm2vel*Galactic[:,2]*Galactic[:,5]\n tmp2 = cb*Galactic[:,3]-sb*vb\n vx = cl*tmp2-sl*vl+SolarPosition[2]\n vy = sl*tmp2+cl*vl+SolarPosition[3]\n vz = sb*Galactic[:,3]+cb*vb+SolarPosition[4]\n Cartesian = np.column_stack((x,y,z,-vx,vy,vz))\n \n return Cartesian", "def Cardioid(self, a):\n t = range(-180,180)\n a = float(a)\n x = []\n y = []\n for i in t:\n i = self.deg2rad(i)\n x.append(a*(2*math.cos(i) - math.cos(2*i)))\n y.append(a*(2*math.sin(i) - math.sin(2*i)))\n return x, y", "def _angle_of_attack(self, rel_wind, blade_chord):\n # blade_chord_vector - (relative_wind + pi)\n # rel_oposite = rel_wind.rotated(math.pi)\n aoa_rad = rel_wind.theta - blade_chord.theta\n aoa_rad = vec.normalize_angle(aoa_rad)\n aoa_360 = aoa_rad * 360 / math.tau\n return aoa_rad, aoa_360", "def _pole_to_cart(self,angles,distances):\n cart=[]\n for i in xrange(0,len(angles)-1):\n angle = angles[i]\n distance = distances[i] \n xs, ys = distance*cos(angle), distance*sin(angle)\n cart.append(tuple((xs,ys)))\n return cart", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(2.0*(self.w*self.x + self.y*self.z), 1.0 - 2.0*(self.x**2 + self.y**2))\n theta = np.arcsin(2.0*(self.w*self.y - self.z*self.x))\n psi = np.arctan2(2.0*(self.w*self.z + self.x*self.y), 1.0 - 2.0*(self.y**2 + self.z**2))\n return np.array([phi, theta, psi])", "def comp_angle_magnet(self):\n Rbo = self.get_Rbo()\n W0 = self.comp_W0m()\n Harc = self.comp_H_arc()\n if self.is_outwards():\n return float(2 * arctan(W0 / (2 * (Rbo + self.H1 - Harc))))\n else:\n return float(2 * arctan(W0 / (2 * (Rbo - self.H1 - Harc))))\n\n # if self.W0_is_rad:\n # return self.W0\n # else: # Convert W0 from m to rad\n # Rbo = self.get_Rbo()\n # return float(2 * arcsin(self.W0 / (2 * Rbo)))", "def setAngle(self, value):\n n, a = Vector.polar(self.components)\n self.components = Vector.cartesian([n, value])", "def convergence_angle(self):\n return np.arctan2(self.radius, self.focal_length)", "def angular_to_cartesian(theta, phi):\n return array([sin(theta) * cos(phi),\n sin(theta) * sin(phi),\n cos(theta)])", "def _angle(self, a, b, c):\n divid = (a ** 2 + b ** 2 - c ** 2)\n divis = (2 * a * b)\n if (divis) > 0:\n result = float(divid) / divis\n if result <= 1.0 and result >= -1.0:\n return acos(result)\n return 0\n else:\n return 0", "def wakeAngle(df, turbList):\n x1 = df.loc[turbList[0], 'x']\n x2 = df.loc[turbList[1], 'x']\n y1 = df.loc[turbList[0], 'y']\n y2 = df.loc[turbList[1], 'y']\n wakeAngle = np.arctan2(\n y2 - y1,\n x2 - x1) * 180.0 / np.pi # Angle in normal cartesian coordinates\n\n # Convert angle to compass angle\n wakeAngle = 270.0 - wakeAngle\n if wakeAngle < 0:\n wakeAngle = wakeAngle + 360.0\n if wakeAngle > 360:\n wakeAngle = wakeAngle - 360.0\n\n return wakeAngle", "def find_angle(p1, p2, p3):\n\n BAx = p1[0] - p2[0]\n BAy = p1[1] - p2[1]\n\n BCx = p3[0] - p2[0]\n BCy = p3[1] - p2[1]\n\n a = [BAx, BAy]\n b = [BCx, BCy]\n a_mag = np.linalg.norm(a)\n b_mag = np.linalg.norm(b)\n\n theta = np.arccos(np.dot(a, b) / (a_mag * b_mag))\n\n return math.degrees(theta)", "def cylindrical2cartesian(cylinder):\n cart = np.zeros(cylinder.shape)\n cart[:, 0] = cylinder[:, 0] * np.cos(cylinder[:, 1])\n cart[:, 1] = cylinder[:, 0] * np.sin(cylinder[:, 1])\n cart[:, 2] = cylinder[:, 2]\n return cart", "def atan2_vec(vector):\n return -np.arctan2(vector[1], vector[0])", "def angle(x, y, deg=False):\n rad_angle = np.arccos(np.dot(x, y)/ (norm(x)*norm(y)))\n if deg:\n return rad_angle*(180.0/np.pi)\n else:\n return rad_angle", "def pol2cart(distance, angle):\r\n r = np.array(distance)\r\n theta = np.deg2rad(360 - np.array(angle))\r\n \r\n x = r * np.cos(theta)\r\n y = r * np.sin(theta)\r\n \r\n return(x, y)", "def compute_angle(self, direction):\n scaled_cosine = self.w1.dot(direction) # ||direction|| cos(theta)\n scaled_sine = self.w2.dot(direction) # ||direction|| sin(theta)\n return np.arctan2(scaled_sine, scaled_cosine)", "def atan (cls, x) :\n return Angle_R (math.atan (x))", "def pol2cart(theta: float, rho: float) -> typing.Tuple[float, float]:\n return rho * cos(theta), rho * sin(theta)", "def atan(self, x):\n return self.arctan(x)", "def gon2deca(gon):\n return DECAngle(gon2dec(gon))", "def angle(self) -> int:", "def angle_to(self, target_pos):\n return angle_to(self.tonp(), target_pos.tonp())", "def getCartesian(self, phi, theta, radius):\n point_x = round(sin(theta) * cos(phi) * radius,4)\n point_y = round(sin(theta) * sin(phi) * radius,4)\n point_z = round(cos(theta) * radius,4)\n return [point_x, point_y, point_z]", "def vector_angle(v):\n assert len(v) == 2\n x, y = v\n return np.arctan2(y, x)", "def arc_to(self, position):\n ### EXTRA CREDIT\n # TODO\n pass # delete this when you implement your code", "def transform_angle_by_quadrant(self, initial_angle, x_diff, y_diff):\n\t\tif x_diff > 0 and y_diff > 0:\n\t\t\tprint(\"p1 in quadrant: {}\".format(1))\n\t\t\t# Point B in quadrant 1..\n\t\t\treturn degrees(initial_angle)\n\t\telif x_diff < 0 and y_diff > 0:\n\t\t\tprint(\"p1 in quadrant: {}\".format(2))\n\t\t\t# Point B in quadrant 2..\n\t\t\treturn 180 - degrees(initial_angle)\n\t\telif x_diff < 0 and y_diff < 0:\n\t\t\tprint(\"p1 in quadrant: {}\".format(3))\n\t\t\t# Point B in quadrant 3..\n\t\t\treturn 180 + degrees(initial_angle)\n\t\telif x_diff > 0 and y_diff < 0:\n\t\t\tprint(\"p1 in quadrant: {}\".format(4))\n\t\t\t# Point B in quadrant 4..\n\t\t\treturn 360 - degrees(initial_angle)\n\t\telse:\n\t\t\traise \"Error occurred in basic_drive_3/transform_angle_by_quadrant func..\"", "def angle(self):\n return atan2(self.v.y, self.v.x)", "def angle(force, forceXYZ, excited_axis, distance, distanceXYZ):\n\n Fabs = force\n Fz = forceXYZ[:, 2]\n if excited_axis == 'X':\n ea = 0\n else:\n ea = 1\n Fxy = forceXYZ[:, ea]\n\n dist = distance\n dist3D = distanceXYZ\n Dz = dist3D[:, 2]\n Dxy = dist3D[:, ea]\n\n AF = _angle(Fz, Fxy, Fabs, angle='A')\n BF = _angle(Fz, Fxy, Fabs, angle='B')\n CF = _angle(Fz, Fxy, Fabs, angle='C')\n AD = _angle(Dz, Dxy, dist, angle='A')\n BD = _angle(Dz, Dxy, dist, angle='B')\n CD = _angle(Dz, Dxy, dist, angle='C')\n\n angle = dict(list(zip(['AF', 'BF', 'CF', 'AD', 'BD', 'CD'],\n [AF, BF, CF, AD, BD, CD])))\n\n return angle", "def index_to_angle(i):\n return -135.0 + (i / 1081.0) * 0.25", "def _angle(*vectors):\n if len(vectors) == 1:\n return DubinsUAV2D._sawtooth(np.arctan2(vectors[0][1], vectors[0][0]))\n elif len(vectors) == 2:\n return DubinsUAV2D._sawtooth(np.arctan2(vectors[1][1], vectors[1][0]) - np.arctan2(vectors[0][1], vectors[0][0]))\n else:\n raise AttributeError()", "def from_cartesian(cls, cartesian):\n z = cartesian.z\n y = cartesian.y\n x = cartesian.x\n theta = math.asin(z / R_EARTH)\n phi = math.atan2(y, x)\n lat = math.degrees(theta)\n lon = math.degrees(phi)\n if lon < 0:\n lon += 360\n return cls(lat, lon)", "def to_cartesian(self): # TODO\n pass", "def getAngle(A, B, C):\n if A * B == 0:\n return 180\n else:\n return degrees(acos((A * A + B * B - C * C)/(2.0 * A * B)))", "def quat_angle(quat):\n return 2 * float(np.arccos(min(1, max(-1, quat[0]))))", "def to_polar(x, y):\n r = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y, x)\n return r, phi", "def angle(cosT):\n for i in range(N):\n cosT = ((cosT + 1) / two) ** itwo\n sinT = (1 - cosT * cosT) ** itwo\n return sinT * (2 ** N)", "def coord_rotate_rad(x, y, z):\n #-- 1 --\n xt = math.asin ( math.sin(x) * math.sin(y) +\n math.cos(x) * math.cos(y) * math.cos(z) )\n #-- 2 --\n yt = math.acos ( ( math.sin(x) - math.sin(y) * math.sin(xt) ) /\n ( math.cos(y) * math.cos(xt) ) )\n #-- 3 --\n if math.sin(z) > 0.0:\n yt = TWO_PI - yt\n\n #-- 4 --\n return (xt, yt)" ]
[ "0.6777609", "0.6658217", "0.6547693", "0.6523295", "0.63220453", "0.6290984", "0.62786305", "0.6244212", "0.62430567", "0.6160538", "0.61536187", "0.6145556", "0.61396515", "0.61340433", "0.60545677", "0.60487115", "0.6047855", "0.6031376", "0.6025377", "0.6023894", "0.60146546", "0.6005549", "0.60022223", "0.59798217", "0.59605914", "0.59599423", "0.59494394", "0.5937717", "0.5934777", "0.59071326", "0.59066236", "0.58905405", "0.5875601", "0.58667094", "0.58652526", "0.58590555", "0.5854203", "0.58524", "0.5849701", "0.5849348", "0.5840782", "0.5813563", "0.5809686", "0.5806841", "0.57795876", "0.57793915", "0.5767061", "0.57570964", "0.57439", "0.5741279", "0.5731219", "0.5716683", "0.5705732", "0.5704187", "0.57003915", "0.5697061", "0.56944793", "0.56912893", "0.5685369", "0.56828773", "0.56825876", "0.56720644", "0.5653344", "0.5649978", "0.56439966", "0.56430876", "0.56361055", "0.5625579", "0.5623661", "0.56233865", "0.56209266", "0.5617967", "0.5617264", "0.56136006", "0.561294", "0.56104994", "0.56103736", "0.56020564", "0.55995715", "0.55927664", "0.55880296", "0.5582813", "0.55789006", "0.55754024", "0.55736387", "0.55713135", "0.55711377", "0.55694926", "0.556942", "0.55533427", "0.5553248", "0.5551278", "0.5550107", "0.55499536", "0.5547866", "0.5547591", "0.5536962", "0.55348", "0.55269927", "0.5518923", "0.5515146" ]
0.0
-1
Conversion from (internal or extended) BondAngleTorsion to Cartesian coordinates
def Cartesian(self, BAT): # Arrange BAT coordinates in convenient arrays offset = 6 if len(BAT) == (3 * self.natoms) else 0 bonds = BAT[offset + 3::3] angles = BAT[offset + 4::3] phase_torsions = BAT[offset + 5::3] torsions = [(phase_torsions[n] + phase_torsions[self._firstTorsionTInd[n]]) \ if self._firstTorsionTInd[n]!=n else phase_torsions[n] \ for n in range(self.ntorsions)] p1 = np.array([0., 0., 0.]) p2 = np.array([0., 0., BAT[offset]]) p3 = np.array([BAT[offset+1]*np.sin(BAT[offset+2]), 0., \ BAT[offset]-BAT[offset+1]*np.cos(BAT[offset+2])]) # If appropriate, rotate and translate the first three atoms if offset == 6: # Rotate the third atom by the appropriate value (phi, theta, omega) = BAT[3:6] co = np.cos(omega) so = np.sin(omega) Romega = np.array([[co, -so, 0], [so, co, 0], [0, 0, 1]]) p3 = Romega.dot(p3) # Rotate the second two atoms to point in the right direction cp = np.cos(phi) sp = np.sin(phi) ct = np.cos(theta) st = np.sin(theta) Re = np.array([[cp * ct, -sp, cp * st], [ct * sp, cp, sp * st], [-st, 0, ct]]) p2 = Re.dot(p2) p3 = Re.dot(p3) # Translate the first three atoms by the origin origin = np.array(BAT[:3]) p1 += origin p2 += origin p3 += origin XYZ = np.zeros((self.natoms, 3)) XYZ[self.rootInd[0]] = p1 XYZ[self.rootInd[1]] = p2 XYZ[self.rootInd[2]] = p3 for ((a1,a2,a3,a4), bond, angle, torsion) in \ zip(self._torsionIndL,bonds,angles,torsions): sphere = Sphere(Vector(XYZ[a2]), bond) cone = Cone(Vector(XYZ[a2]), Vector(XYZ[a3] - XYZ[a2]), angle) plane123 = Plane(Vector(XYZ[a4]), Vector(XYZ[a3]), Vector(XYZ[a2])) points = sphere.intersectWith(cone).intersectWith(plane123) p = points[0] if (Plane(Vector(XYZ[a3]), Vector( XYZ[a2]), points[0]).normal * plane123.normal) > 0 else points[1] p = rotatePoint(Vector(p), Line(Vector(XYZ[a2]), Vector(XYZ[a2] - XYZ[a3])), torsion) XYZ[a1] = p.array return XYZ for ((a1,a2,a3,a4), bond, angle, torsion) in \ zip(self._torsionIndL,bonds,angles,torsions): p2 = XYZ[a2] p3 = XYZ[a3] p4 = XYZ[a4] # circle = sphere.intersectWith(cone) n23 = normalize(p3 - p2) # points = circle.intersectWith(plane123) # plane.intersectWith(Plane(circle.center, circle.normal)) is a line # line_direction = cross(normalize(cross(p4-p3,n23)),n23) # Rotate the point about the p2-p3 axis by the torsion angle v21 = (bond * np.cos(angle)) * n23 - (bond * np.sin(angle)) * cross( normalize(cross(p4 - p3, n23)), n23) s = np.sin(torsion) c = np.cos(torsion) XYZ[a1] = p2 - cross(n23, v21) * s + np.sum( n23 * v21) * n23 * (1.0 - c) + v21 * c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _cartesian_to_internal(self, atom_position, bond_position, angle_position, torsion_position):\n # TODO: _cartesian_to_internal and _internal_to_cartesian should accept/return units and have matched APIs\n\n check_dimensionality(atom_position, unit.nanometers)\n check_dimensionality(bond_position, unit.nanometers)\n check_dimensionality(angle_position, unit.nanometers)\n check_dimensionality(torsion_position, unit.nanometers)\n\n # Convert to internal coordinates once everything is dimensionless\n # Make sure positions are float64 arrays implicitly in units of nanometers for numba\n from perses.rjmc import coordinate_numba\n internal_coords = coordinate_numba.cartesian_to_internal(\n atom_position.value_in_unit(unit.nanometers).astype(np.float64),\n bond_position.value_in_unit(unit.nanometers).astype(np.float64),\n angle_position.value_in_unit(unit.nanometers).astype(np.float64),\n torsion_position.value_in_unit(unit.nanometers).astype(np.float64))\n # Return values are also in floating point implicitly in nanometers and radians\n r, theta, phi = internal_coords\n\n # Compute absolute value of determinant of Jacobian\n detJ = np.abs(r**2*np.sin(theta))\n\n check_dimensionality(r, float)\n check_dimensionality(theta, float)\n check_dimensionality(phi, float)\n check_dimensionality(detJ, float)\n\n return internal_coords, detJ", "def _internal_to_cartesian(self, bond_position, angle_position, torsion_position, r, theta, phi):\n # TODO: _cartesian_to_internal and _internal_to_cartesian should accept/return units and have matched APIs\n\n check_dimensionality(bond_position, unit.nanometers)\n check_dimensionality(angle_position, unit.nanometers)\n check_dimensionality(torsion_position, unit.nanometers)\n check_dimensionality(r, float)\n check_dimensionality(theta, float)\n check_dimensionality(phi, float)\n\n # Compute Cartesian coordinates from internal coordinates using all-dimensionless quantities\n # All inputs to numba must be in float64 arrays implicitly in md_unit_syste units of nanometers and radians\n from perses.rjmc import coordinate_numba\n xyz = coordinate_numba.internal_to_cartesian(\n bond_position.value_in_unit(unit.nanometers).astype(np.float64),\n angle_position.value_in_unit(unit.nanometers).astype(np.float64),\n torsion_position.value_in_unit(unit.nanometers).astype(np.float64),\n np.array([r, theta, phi], np.float64))\n # Transform position of new atom back into unit-bearing Quantity\n xyz = unit.Quantity(xyz, unit=unit.nanometers)\n\n # Compute abs det Jacobian using unitless values\n detJ = np.abs(r**2*np.sin(theta))\n\n check_dimensionality(xyz, unit.nanometers)\n check_dimensionality(detJ, float)\n return xyz, detJ", "def cartesian2polar(cartesian):\n cartesian = np.array(cartesian).squeeze()\n x, y = cartesian\n r = np.linalg.norm([x, y])\n azimuth = np.arctan2(y, x)\n return np.array([r, azimuth])", "def polarToCartesian(theta=0, radius=0):\n\n x = radius * np.cos(theta)\n y = radius * np.sin(theta)\n return x, y", "def to_cartesian(self):\n w = 1.73205 # sqrt(3)\n h = 2\n dx = 0.5 * w if self.y % 2 == 1 else 0\n x = 0.5 * w + self.x * w + dx\n y = 0.5 * h + 0.75 * self.y * h\n return (x, y)", "def to_cartesian(self):\n\n if self.cartesian is None:\n theta = math.radians(self.lat)\n phi = math.radians(self.long)\n x = R_EARTH * math.cos(theta) * math.cos(phi)\n y = R_EARTH * math.cos(theta) * math.sin(phi)\n z = R_EARTH * math.sin(theta)\n self.cartesian = CartesianPoint(x, y, z)\n return self.cartesian", "def get_cartesian_coords(self):\n r = 1\n dec = self.dec + 90\n x = r * math.sin(np.deg2rad(dec)) * math.cos(np.deg2rad(self.ra))\n y = r * math.sin(np.deg2rad(dec)) * math.sin(np.deg2rad(self.ra))\n z = r * math.cos(np.deg2rad(dec))\n\n return [x, y, z]", "def polar2cartesian(polar):\n polar = np.array(polar).squeeze()\n r, azimuth = polar\n x = r * np.cos(azimuth)\n y = r * np.sin(azimuth)\n return np.array([x, y])", "def cartesian_coordinates(self):\n # extract RA items\n ra_hours, ra_minutes, ra_seconds = RA_RE.match(str(self.ra)).groups()\n # then cast\n ra_hours = int(ra_hours)\n ra_minutes = int(ra_minutes)\n ra_seconds = float(ra_seconds)\n\n # extract DEC items\n dec_sign, dec_degrees, dec_minutes, dec_seconds = DEC_RE.match(str(self.dec)).groups()\n # then cast\n dec_sign = -1 if dec_sign == '-' else 1\n dec_degrees = int(dec_degrees)\n dec_minutes = int(dec_minutes)\n dec_seconds = float(dec_seconds)\n\n # to degrees\n a = (ra_hours*15) + (ra_minutes*0.25) + (ra_seconds*0.004166)\n b = abs(dec_degrees + dec_minutes/60 + dec_seconds/3600) * dec_sign\n\n # to radians\n a = math.radians(a)\n b = math.radians(b)\n\n distance = float(self.distance)\n\n x = (distance * math.cos(b)) * math.cos(a)\n y = (distance * math.cos(b)) * math.sin(a)\n z = distance * math.sin(b)\n\n return x, y, z", "def _position_cylindrical2cartesian(pos):\n \n rho=pos[:,0]\n theta=pos[:,1]\n z=pos[:,2]\n\n x=rho*np.cos(theta)\n y=rho*np.sin(theta)\n z=z\n\n return np.dstack((x,y,z))[0]", "def cartesianToPolar(x=0, y=0):\n\n radius = np.hypot(x, y)\n theta = np.arctan2(y, x)\n return theta, radius", "def polar_to_cartesian(radius, angle_deg):\n\n theta = np.deg2rad(angle_deg)\n x = radius * np.cos(theta)\n y = radius * np.sin(theta)\n return(x, y)", "def cartesianToPolar(x,y):\n r = np.sqrt(x**2 + y**2)\n theta = np.arctan2(y,x)\n\n return r,theta", "def polar_to_cartesian(r, theta):\n\n x = r * cos(theta)\n y = r * sin(theta)\n\n return x, y", "def cartesian(position):\n return [position[0] * cos(position[1]), position[0] * sin(position[1])]", "def polar2cartesian(phi, r):\n phi_radians = radians(phi)\n x = r*cos(phi_radians)\n y = r*sin(phi_radians)\n return x, y", "def _position_cartesian2cylindrical(pos):\n\n \n #save cartesian position of each particle\n x=pos[:,0]\n y=pos[:,1]\n z=pos[:,2]\n\n rho= np.sqrt(x**2+y**2)\n theta=np.arctan2(y,x)\n\n\n return np.dstack((rho,theta,z))[0]", "def polarToCartesian(r,theta):\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n return x,y", "def polar_to_cartesian(self, r, theta):\n # x = rcos(theta), y = rsin(theta)\n x, y = r*math.cos(theta), r*math.sin(theta)\n x, y = self.add((x, y), self.pole)\n return x, y", "def to_cartesian(dimensions, angles):\n return Operator(transform=np.transpose(np.array(_basis_vectors(dimensions, angles))))", "def to_cartesian(r, phi):\n x = r*np.cos(phi)\n y = r*np.sin(phi)\n return x, y", "def polar_to_cartesian(dist, theta, phi):\n z = np.cos(phi)\n s = np.sin(phi)\n x = s * np.cos(theta)\n y = s * np.sin(theta)\n return np.stack((x, y, z), axis=-1) * np.expand_dims(dist, axis=-1)", "def _position_spherical2cartesian(pos):\n \n r=pos[:,0]\n theta=pos[:,1]\n phi=pos[:,2]\n\n if any(theta>np.pi) or any(theta<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n\n x=r*np.sin(theta)*np.cos(phi)\n y=r*np.sin(theta)*np.sin(phi)\n z=r*np.cos(theta)\n\n return np.dstack((x,y,z))[0]", "def cartesian2polar(x, y):\n r = (x**2+y**2)**.5\n phi = atan2(y, x)\n return phi, r", "def cart2polar3d(cartesian):\n radius = np.linalg.norm(cartesian)\n theta = np.cos", "def getCartesian(self, phi, theta, radius):\n point_x = round(sin(theta) * cos(phi) * radius,4)\n point_y = round(sin(theta) * sin(phi) * radius,4)\n point_z = round(cos(theta) * radius,4)\n return [point_x, point_y, point_z]", "def _spherical_to_cartesian(ra, dec):\n rar = np.radians(ra)\n decr = np.radians(dec)\n\n x = np.cos(rar) * np.cos(decr)\n y = np.sin(rar) * np.cos(decr)\n z = np.sin(decr)\n \n return x, y, z", "def getAxisAndAngleDegres(self):\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n return xyz, angle", "def to_axang(self) -> Tuple[np.ndarray, float]:\n return self.to_axisangle()", "def __cartesian2spherical(x: float, y: float, z: float) -> Tuple[float, float]:\n if x == 0 and y == 0:\n return 0, np.degrees(np.pi * 0.5 * np.sign(z))\n lat = np.arctan2(z, np.sqrt(x * x + y * y))\n lon = np.arctan2(y, x)\n return np.degrees(lon), np.degrees(lat)", "def to_axang(self) -> Tuple[np.ndarray, float]:\n denom = np.linalg.norm(self.v)\n angle = 2.0*np.arctan2(denom, self.w)\n axis = np.zeros(3) if angle==0.0 else self.v/denom\n return axis, angle", "def cartesian_to_polar(self, x, y):\n # r = (x^2+y^2)^2, theta = tan^-1(y/x)\n # pole is the reference point of the coordinate system\n x, y = self.get_rel_to_pole(x, y)\n r = math.sqrt(pow(x, 2)+pow(y, 2))\n # set specific code for edge cases\n if x == 0 and y != 0:\n sign = lambda x: (1, -1)[x < 0]\n return r, sign(y)*math.pi/2\n if x == 0 and y == 0:\n return 0, 0\n else:\n theta = math.atan(y/x)\n return r, theta", "def get_polar_coordinates(cup_position, bot_position):\n\n distance_x = cup_position[0] - bot_position[0]\n distance_y = cup_position[1] - bot_position[1]\n\n r = math.hypot(distance_x, distance_y)\n theta = math.degrees(math.atan(distance_y/distance_x))\n\n return r, theta", "def to_polar_tuple(self):\r\n return (abs(self), self.arg())", "def lon_lat_to_cartesian(lon, lat, R = 1):\n lon_r = np.radians(lon)\n lat_r = np.radians(lat)\n\n x = R * np.cos(lat_r) * np.cos(lon_r)\n y = R * np.cos(lat_r) * np.sin(lon_r)\n z = R * np.sin(lat_r)\n return x,y,z", "def GalacticToCartesian(Galactic,SolarPosition): \n \n # l,b,s->x,y,z\n cl = np.cos(Galactic[:,0])\n sl = np.sin(Galactic[:,0])\n cb = np.cos(Galactic[:,1])\n sb = np.sin(Galactic[:,1])\n x = SolarPosition[0]-Galactic[:,2]*cb*cl\n y = Galactic[:,2]*cb*sl\n z = Galactic[:,2]*sb+SolarPosition[1]\n\n if(len(Galactic[0,:])==3):\n Cartesian = np.column_stack((x,y,z))\n else:\n # vlos,mu_lcos(b),mu_b -> vx,vy,vz\n vl = pm2vel*Galactic[:,2]*Galactic[:,4]\n vb = pm2vel*Galactic[:,2]*Galactic[:,5]\n tmp2 = cb*Galactic[:,3]-sb*vb\n vx = cl*tmp2-sl*vl+SolarPosition[2]\n vy = sl*tmp2+cl*vl+SolarPosition[3]\n vz = sb*Galactic[:,3]+cb*vb+SolarPosition[4]\n Cartesian = np.column_stack((x,y,z,-vx,vy,vz))\n \n return Cartesian", "def _polar_to_cartesian(self, radius: float, radians: float) -> None:\n self.x = round(radius * math.cos(radians), EPSILON_EXP_MINUS_1)\n self.y = round(radius * math.sin(radians), EPSILON_EXP_MINUS_1)", "def polar_decomposition(self):\n return self.polar_unit_vector, self.polar_angle", "def make_cartesian(r: float, phi: float):\n x = r*np.cos(phi)\n y = r*np.sin(phi)\n return x, y", "def cylindrical2cartesian(cylinder):\n cart = np.zeros(cylinder.shape)\n cart[:, 0] = cylinder[:, 0] * np.cos(cylinder[:, 1])\n cart[:, 1] = cylinder[:, 0] * np.sin(cylinder[:, 1])\n cart[:, 2] = cylinder[:, 2]\n return cart", "def corners_cartesian(self):\n x_corners, y_corners, z_corners = \\\n starwinds_magnetogram.coordinate_transforms.rectangular_coordinates_from_spherical(\n np.ones(self.polar_corners.shape),\n self.polar_corners,\n self.azimuthal_corners)\n\n return x_corners, y_corners, z_corners", "def from_cartesian(cls, cartesian):\n z = cartesian.z\n y = cartesian.y\n x = cartesian.x\n theta = math.asin(z / R_EARTH)\n phi = math.atan2(y, x)\n lat = math.degrees(theta)\n lon = math.degrees(phi)\n if lon < 0:\n lon += 360\n return cls(lat, lon)", "def cartesian_to_lon_lat(x, y, z, R = 1):\n lon = np.degrees(np.arctan2(y,x))\n lat = np.degrees(np.pi/2-np.arctan2((x**2+y**2)**0.5,z))\n\n return lon,lat", "def cartesian_To_Center(self, x, y, z):\n\n if x > 0.0 and -self.L_cap <= y <= 0.0:\n s = self.L_cap + y\n xc = x - self.rb\n yc = z\n else:\n theta = full_arctan2(y, x)\n if theta <= self.ang:\n s = theta * self.rb + self.L_cap\n xc = np.sqrt(x ** 2 + y ** 2) - self.rb\n yc = z\n elif self.ang < theta <= 2 * np.pi: # i'm being lazy here and not limiting the real end\n x0, y0 = np.cos(self.ang) * self.rb, np.sin(self.ang) * self.rb\n thetaEndPerp = np.pi - np.arctan(-1 / np.tan(self.ang))\n x, y = x - x0, y - y0\n deltaS, xc = np.cos(thetaEndPerp) * x + np.sin(-thetaEndPerp) * y, np.sin(thetaEndPerp) * x + np.cos(\n thetaEndPerp) * y\n yc = z\n xc = -xc\n s = (self.ang * self.rb + self.L_cap) + deltaS\n else:\n raise ValueError\n return s, xc, yc", "def Cardioid(self, a):\n t = range(-180,180)\n a = float(a)\n x = []\n y = []\n for i in t:\n i = self.deg2rad(i)\n x.append(a*(2*math.cos(i) - math.cos(2*i)))\n y.append(a*(2*math.sin(i) - math.sin(2*i)))\n return x, y", "def centers_cartesian(self):\n polar_centers, azimuthal_centers = self.centers()\n x_centers, y_centers, z_centers = \\\n starwinds_magnetogram.coordinate_transforms.rectangular_coordinates_from_spherical(\n np.ones(polar_centers.shape),\n polar_centers,\n azimuthal_centers)\n\n return x_centers, y_centers, z_centers", "def _pole_to_cart(self,angles,distances):\n cart=[]\n for i in xrange(0,len(angles)-1):\n angle = angles[i]\n distance = distances[i] \n xs, ys = distance*cos(angle), distance*sin(angle)\n cart.append(tuple((xs,ys)))\n return cart", "def getAngle(self):\n x, y = self.components\n return math.atan2(y, x)", "def cartesian(self):\n raise NotImplementedError(\"This is not implemented.\")\n return CartCoord()", "def to_cartesian(self): # TODO\n pass", "def spherical_to_cartesian(self, r, phi, theta):\n x = r*cos(phi)*sin(theta)\n y = r*sin(phi)*sin(theta)\n z = r*cos(theta)\n \n return Vector(float(x), float(y), float(z))", "def getCartesianPoints2(r, theta, center):\n x = r * np.cos(theta) + center[0]\n y = r * np.sin(theta) + center[1]\n\n return x, y", "def to_circular(self):\n return quad_hybrid.dot(self.linear)", "def convert_to_cartesian(grid: List[Tuple[float, float]], radius: float = 1.0) -> List[Tuple[float, float, float]]:\n\n # conversion radians -> degrees\n r2d = 180.0 / np.pi\n\n # calculate x/y/z coordinates, assuming r=1\n return [\n (\n radius * np.cos(lat / r2d) * np.cos(lon / r2d),\n radius * np.cos(lat / r2d) * np.sin(lon / r2d),\n radius * np.sin(lat / r2d),\n )\n for lon, lat in grid\n ]", "def point_angle(cx, cy, px, py):\n return atan2(py - cy, px - cx)", "def convert_coordinates_to_angle(x, y, center_x_pos, center_y_pos):\n\n dx = x - center_x_pos\n dy = y - center_y_pos\n rads = math.atan2(-dy, dx)\n rads %= 2 * math.pi\n return math.degrees(rads)", "def _rotate_coords(self, x, y, theta, ox, oy):\n s, c = self._pkgs['numpy'].sin(theta), self._pkgs['numpy'].cos(theta)\n x, y = self._pkgs['numpy'].asarray(x) - ox, self._pkgs['numpy'].asarray(y) - oy\n return x * c - y * s + ox, x * s + y * c + oy", "def cartesian_coordinates(self, *axes):", "def pol2cart(theta: float, rho: float) -> typing.Tuple[float, float]:\n return rho * cos(theta), rho * sin(theta)", "def to_polar(x, y):\n r = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y, x)\n return r, phi", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(2.0*(self.w*self.x + self.y*self.z), 1.0 - 2.0*(self.x**2 + self.y**2))\n theta = np.arcsin(2.0*(self.w*self.y - self.z*self.x))\n psi = np.arctan2(2.0*(self.w*self.z + self.x*self.y), 1.0 - 2.0*(self.y**2 + self.z**2))\n return np.c_[phi, theta, psi]", "def to_axisangle(self) -> Tuple[np.ndarray, float]:\n angle = np.arccos((self.A.trace()-1)/2)\n axis = np.zeros(3)\n if angle!=0:\n axis = np.array([self.A[2, 1]-self.A[1, 2], self.A[0, 2]-self.A[2, 0], self.A[1, 0]-self.A[0, 1]])/(2*np.sin(angle))\n return axis, angle", "def coord_rotate_rad(x, y, z):\n #-- 1 --\n xt = math.asin ( math.sin(x) * math.sin(y) +\n math.cos(x) * math.cos(y) * math.cos(z) )\n #-- 2 --\n yt = math.acos ( ( math.sin(x) - math.sin(y) * math.sin(xt) ) /\n ( math.cos(y) * math.cos(xt) ) )\n #-- 3 --\n if math.sin(z) > 0.0:\n yt = TWO_PI - yt\n\n #-- 4 --\n return (xt, yt)", "def calculate_attitude_angle(self):\n return np.arctan(np.pi * (1 - self.eccentricity_ratio ** 2) / (4 * self.eccentricity_ratio))", "def from_angle_to_xy(args, angles):\n l1, l2, m1, m2, g = args\n time, theta1, theta2 = angles.T\n x1 = l1*np.sin(theta1)\n y1 = -l1*np.cos(theta1)\n x2 = l2*np.sin(theta2) + x1\n y2 = -l2*np.cos(theta2) + y1\n return np.array([time, x1, y1, x2, y2]).T", "def spherical2cartesian(v):\n \n x = np.cos(v[0]) * np.cos(v[1]) \n y = np.cos(v[0]) * np.sin(v[1]) \n z = np.sin(v[0]) \n \n return [x,y,z]", "def angular_to_cartesian(theta, phi):\n return array([sin(theta) * cos(phi),\n sin(theta) * sin(phi),\n cos(theta)])", "def cart2pol(x: float, y: float) -> typing.Tuple[float, float]:\n return atan2(y, x), hypot(x, y)", "def spherical2cartesian(phi, theta, depth):\n x = depth * np.sin(theta) * np.cos(phi)\n y = depth * np.cos(theta)\n z = depth * np.sin(theta) * np.sin(phi)\n\n return x, y, z", "def spherical_to_cartesian(r, lat, lon):\n import math\n\n if np.isscalar(r) and np.isscalar(lat) and np.isscalar(lon):\n x = r * math.cos(lat) * math.cos(lon)\n y = r * math.cos(lat) * math.sin(lon)\n z = r * math.sin(lat)\n else:\n x = r * np.cos(lat) * np.cos(lon)\n y = r * np.cos(lat) * np.sin(lon)\n z = r * np.sin(lat)\n\n return x, y, z", "def pol2cart(distance, angle):\r\n r = np.array(distance)\r\n theta = np.deg2rad(360 - np.array(angle))\r\n \r\n x = r * np.cos(theta)\r\n y = r * np.sin(theta)\r\n \r\n return(x, y)", "def polar_coord(point, center):\n x = point[0] - center[0]\n y = point[1] - center[1]\n rho = np.sqrt(x ** 2 + y ** 2)\n phi = np.arctan2(y, x)\n return np.array([phi, rho])", "def coordsxy(self, convert_to=False):\n if convert_to == 'rad':\n return (self.x*3.14159/180., self.y*3.14159/180.)\n elif convert_to == 'deg':\n return (self.x/3.14159*180., self.y/3.14159*180.)\n else:\n return (self.x, self.y)", "def py_ang(self,v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n return np.arctan2(sinang, cosang)", "def to_barycentric(cartesian):\n s = [(corners[i] - mid_points[i]).dot(cartesian - mid_points[i]) / 0.75\n for i in range(3)]\n s_clipped = clip(a=s, a_min=0, a_max=1)\n return s_clipped / norm(s_clipped, ord=1)", "def CartesianToPolar(Cartesian):\n \n # x,y,z -> R,phi,z\n R = np.sqrt(Cartesian[:,0]*Cartesian[:,0]+Cartesian[:,1]*Cartesian[:,1])\n phi = np.arctan2(Cartesian[:,1],Cartesian[:,0])\n z = Cartesian[:,2]\n phi[phi<0.] += 2.*np.pi\n if (len(Cartesian[0,:])==3):\n Polar = np.column_stack((R,phi,z))\n else:\n # vx,vy,vz -> vR,vphi,vz\n cp = np.cos(phi)\n sp = np.sin(phi)\n vR = Cartesian[:,3]*cp+Cartesian[:,4]*sp\n vphi = Cartesian[:,4]*cp-Cartesian[:,3]*sp\n vz = Cartesian[:,5]\n Polar = np.column_stack((R,phi,z,vR,vphi,vz))\n\t\t\n return Polar", "def getCartesianPoints(rTheta, center):\n if rTheta.ndim == 2:\n x = rTheta[:, 0] * np.cos(rTheta[:, 1]) + center[0]\n y = rTheta[:, 0] * np.sin(rTheta[:, 1]) + center[1]\n else:\n x = rTheta[0] * np.cos(rTheta[1]) + center[0]\n y = rTheta[0] * np.sin(rTheta[1]) + center[1]\n\n return np.array([x, y]).T", "def xyangle(self,xc=None,yc=None):\n if xc == None:\n xc = self.x1\n if yc == None:\n yc = self.y1\n dx = self.x-xc\n dy = self.y-yc\n self.angle = arctan2(dx,dy) # in radians\n self.sin = sin(self.angle)\n self.cos = cos(self.angle)", "def angle(self):\n return arccos(dot((self.a - self.o) / self.r, (self.b - self.o) / self.r))", "def to_polar(center_coords, neighbors_coords):\n return cart2pol((neighbors_coords - center_coords)[:, 0],\n (neighbors_coords - center_coords)[:, 1])", "def spherical2cartesian(spherical):\n spherical = np.array(spherical).squeeze()\n distance, azimuth, elevation = spherical\n x = distance * np.sin(azimuth) * np.cos(elevation)\n y = distance * np.sin(azimuth) * np.sin(elevation)\n z = distance * np.cos(azimuth)\n return np.array([x, y, z])", "def angle(self):\n return angle(self.force, self.forceXYZ, self.excited_axis,\n self.distance, self.distanceXYZ)", "def sphericalToCartesian(altitude=0, azimuth=0, radius=0):\n\n rcos_theta = radius * np.cos(altitude)\n x = rcos_theta * np.cos(azimuth)\n y = rcos_theta * np.sin(azimuth)\n z = radius * np.sin(altitude)\n return x, y, z", "def atan2 (cls, y, x) :\n return Angle_R (math.atan2 (y, x))", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(2.0*(self.w*self.x + self.y*self.z), 1.0 - 2.0*(self.x**2 + self.y**2))\n theta = np.arcsin(2.0*(self.w*self.y - self.z*self.x))\n psi = np.arctan2(2.0*(self.w*self.z + self.x*self.y), 1.0 - 2.0*(self.y**2 + self.z**2))\n return np.array([phi, theta, psi])", "def polar_to_cart(angle: float, scalar: float):\n radians = math.radians(angle)\n change_y = math.cos(radians)\n change_x = math.sin(radians)\n return change_x * scalar, change_y * scalar", "def cartesian2polar(coords, inputshape, origin):\n\n r_index, theta_index = coords\n\n r = r_index * (rangeX[1] - rangeX[0])/2.0/inputshape[0]\n theta = theta_index * 2.0*np.pi/inputshape[1] + np.pi\n\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n \n i = np.round(x/Lx*inputshape[0]) + origin[0]\n j = np.round(y/Ly*inputshape[0]) + origin[1]\n \n return (i,j)", "def polar_to_xy(r, theta):\r\n x = r*np.cos(theta)\r\n y = r*np.sin(theta)\r\n return x, y", "def coord_polar(mat):\n x = mat[:, 0].copy()\n y = mat[:, 1].copy()\n\n r = np.sqrt(x**2 + y**2)\n theta = np.arctan2(y, x)\n\n return r, theta", "def calculate_angles(self, x, y):\n Oimat = inv(self.Omat)\n Mat = self.pixel_size * inv(self.Dmat) * Oimat\n polar_angles = []\n azimuthal_angles = []\n for i in range(len(x)):\n peak = Oimat * (vec(x[i], y[i]) - self.Cvec)\n v = norm(Mat * peak)\n polar_angle = np.arctan(v / self.distance)\n polar_angles.append(polar_angle)\n azimuthal_angles.append(np.arctan2(-peak[1, 0], peak[2, 0]))\n return (np.array(polar_angles) * degrees,\n np.array(azimuthal_angles) * degrees)", "def calc_mainchain_bond_angle(self):\n aN = self.get_atom('N')\n aCA = self.get_atom('CA')\n aC = self.get_atom('C')\n aO = self.get_atom('O')\n aCB = self.get_atom('CB')\n\n naN = None\n naCA = None\n next_res = self.get_offset_residue(1)\n if next_res:\n naN = next_res.get_atom('N')\n naCA = next_res.get_atom('CA')\n\n N_CA_C = AtomMath.calc_angle(aN, aCA, aC)\n CA_C_O = AtomMath.calc_angle(aCA, aC, aO)\n N_CA_CB = AtomMath.calc_angle(aN, aCA, aCB)\n CB_CA_C = AtomMath.calc_angle(aCB, aCA, aC)\n CA_C_nN = AtomMath.calc_angle(aCA, aC, naN)\n C_nN_nCA = AtomMath.calc_angle(aC, naN, naCA)\n\n return (N_CA_C, N_CA_CB, CB_CA_C, CA_C_O, CA_C_nN, C_nN_nCA)", "def _rect_to_cyl_coords(self, x, y):\n theta = (np.pi * y) / (self.arch_radius * 2)\n y = self.arch_radius * np.sin(theta)\n z = self.arch_radius * np.cos(theta)\n return np.array([x, y, z])", "def _geodetic_to_cartesian(cls, lat, lon, alt):\n C = Earth.r / np.sqrt(1 - (Earth.e * np.sin(lat)) ** 2)\n S = Earth.r * (1 - Earth.e ** 2) / np.sqrt(1 - (Earth.e * np.sin(lat)) ** 2)\n r_d = (C + alt) * np.cos(lat)\n r_k = (S + alt) * np.sin(lat)\n\n norm = np.sqrt(r_d ** 2 + r_k ** 2)\n return norm * np.array(\n [np.cos(lat) * np.cos(lon), np.cos(lat) * np.sin(lon), np.sin(lat)]\n )", "def jointAngles(self,x,y,z):\n #Converts from arm tics to mm\n x = self.ticToMm(x)\n y = self.ticToMm(y)\n z = self.ticToMm(z)\n \n\n a = sqrt(x**2 + y**2)\n b = sqrt(a**2 + z**2)\n\n phi = atan2(z,a)\n psy = atan2(a,z)\n theta3 = acos(2 - b**2/375**2)\n chi = (pi - theta3)/2\n\n theta3 = theta3*(180/pi) #Elbow\n theta1 = atan(x/y)*(180/pi) #Base\n theta2 = (chi + phi)*(180/pi)+90 #Shoulder\n theta4 = (chi + psy)*(180/pi) #Wrist\n\n return [theta1,theta2,theta3,theta4]", "def cartesian2cylindrical(coords):\n cyl = np.zeros(coords.shape)\n cyl[:, 0] = np.sqrt(coords[:, 0] ** 2 + coords[:, 1] ** 2)\n cyl[:, 1] = np.arctan2(coords[:, 1], coords[:, 0])\n cyl[:, 2] = coords[:, 2]\n return cyl", "def CartesianToGalactic(Cartesian,SolarPosition): \n\t \n # x,y,z->l,b,s\n tmp1 = SolarPosition[0]-Cartesian[:,0]\n tmp2 = Cartesian[:,1]\n tmp3 = Cartesian[:,2]-SolarPosition[1]\n s = np.sqrt(tmp1*tmp1+tmp2*tmp2+tmp3*tmp3)\n l = np.arctan2(tmp2,tmp1)\n b = np.arcsin(tmp3/s)\n l[l<0.] += 2.*np.pi; \n\n if(len(Cartesian[0,:])==3):\n Galactic = np.column_stack((l,b,s))\n else:\n \t # vx,vy,vz -> vlos,mu_lcos(b),mu_b\n vx = -Cartesian[:,3]-SolarPosition[2]\n vy = Cartesian[:,4]-SolarPosition[3]\n vz = Cartesian[:,5]-SolarPosition[4]\n cl = np.cos(l)\n sl = np.sin(l)\n cb = np.cos(b)\n sb = np.sin(b)\n vlos = vx*cl*cb+vy*sl*cb+vz*sb;\n mul = (-vx*sl+vy*cl)/(pm2vel*s)\n mub = (-vx*cl*sb-vy*sl*sb+vz*cb)/(pm2vel*s)\n Galactic = np.column_stack((l,b,s,vlos,mul,mub))\n \n return Galactic", "def PolarToCartesian(Polar):\n\t \n # R,phi,z -> x,y,z\n cp = np.cos(Polar[:,1])\n sp = np.sin(Polar[:,1])\n x = Polar[:,0] * cp\n y = Polar[:,0] * sp\n z = Polar[:,2]\n\n if (len(Polar[0,:])==3):\n Cartesian = np.column_stack((x,y,z))\n else:\n # vR,vphi,vz -> vx,vy,vz\n vx = Polar[:,3]*cp-Polar[:,4]*sp\n vy = Polar[:,4]*cp+Polar[:,3]*sp\n vz = Polar[:,5]\n Cartesian = np.column_stack((x,y,z,vx,vy,vz))\n \n return Cartesian", "def cartesianToSpherical(x=0, y=0, z=0):\n\n hxy = np.hypot(x, y)\n radius = np.hypot(hxy, z)\n altitude = np.arctan2(z, hxy)\n azimuth = np.arctan2(y, x)\n return altitude, azimuth, radius", "def angle(self):\n self.convert_window(\"Angle\", \"degree\", [\"arcminute\", \"arcsecond\", \"circle\", \"degree\", \"gon\", \"gradian\", \"mil(Nato)\", \"mil(Soviet Union)\", \"mil(Sweden)\", \"octant\", \"quadrant\", \"radian\", \"revolution\", \"sextant\", \"sign\", \"turn\"])", "def ecliptic_latlon(self):\n vector = _ECLIPJ2000.dot(self.position.au)\n d, lat, lon = to_polar(vector)\n return (Angle(radians=lat, signed=True),\n Angle(radians=lon),\n Distance(au=d))" ]
[ "0.68272", "0.68232924", "0.6670016", "0.6668133", "0.6666709", "0.66622704", "0.6632937", "0.6616939", "0.6595138", "0.6567005", "0.6544198", "0.65291274", "0.6471032", "0.6468995", "0.63908124", "0.63474804", "0.63316333", "0.63281065", "0.631877", "0.6312516", "0.6256514", "0.6237257", "0.6228126", "0.62179005", "0.6212803", "0.6207163", "0.6195567", "0.61545914", "0.610239", "0.60956603", "0.6085277", "0.60794514", "0.60555583", "0.6052241", "0.60416764", "0.60200626", "0.6007307", "0.5945415", "0.59411275", "0.592603", "0.5917811", "0.59048265", "0.5902513", "0.5899148", "0.58701605", "0.5867562", "0.58280647", "0.5826497", "0.58172655", "0.5795465", "0.5781731", "0.5780374", "0.5777003", "0.57670504", "0.57616895", "0.574858", "0.5747188", "0.57471544", "0.5738561", "0.5711256", "0.57022804", "0.56996757", "0.56910187", "0.5688214", "0.5680279", "0.56754863", "0.56717694", "0.56684625", "0.566797", "0.56673235", "0.56646144", "0.5663905", "0.56442565", "0.5644089", "0.56414485", "0.56199974", "0.56193393", "0.56185967", "0.56165487", "0.5609131", "0.5596069", "0.5593049", "0.5581202", "0.5573643", "0.5562718", "0.5554569", "0.5552169", "0.5537279", "0.55350053", "0.5533976", "0.5529308", "0.55219424", "0.5519424", "0.5517765", "0.55154616", "0.5511811", "0.55099684", "0.5507944", "0.55070925", "0.55067813" ]
0.68568003
0
Opens the molecule in VMD
def showMolecule(self, colorBy=None, label=False, dcdFN=None): # Write PDB file # To set Occupancy, change atom.occupancy # To set Beta, change atom.temperature_factor import os.path pdbFN = os.path.join(MMTK.Database.molecule_types.directory, 'showMolecule.pdb') outF = MMTK.PDB.PDBOutputFile(pdbFN) outF.write(self.molecule) outF.close() # Write VMD script script = 'set ligand [mol new ' + pdbFN + ']\n' if colorBy is not None: script += 'mol modcolor 0 $ligand ' + colorBy + '\n' script += 'mol modstyle 0 0 CPK 1.000000 0.300000 10.000000 10.000000\n' if label: script += """ proc label_atoms { molid seltext } { set sel [atomselect $molid $seltext] set atomlist [$sel list] foreach {atom} $atomlist { set atomlabel [format "%d/%d" $molid $atom] label add Atoms $atomlabel } $sel delete } label_atoms 0 all """ if dcdFN is not None: script += 'animate delete all $ligand\n' script += 'mol addfile ' + dcdFN + ' type dcd waitfor all\n' scriptF = open('showMolecule.vmd', 'w') scriptF.write(script) scriptF.close() # Find and run vmd import AlGDock vmdCommand = AlGDock.findPath(AlGDock.search_paths['vmd']) import subprocess subprocess.call([vmdCommand, '-e', 'showMolecule.vmd']) # Remove files os.remove(pdbFN) os.remove('showMolecule.vmd')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def viewNMDinVMD(filename):\n\n vmd = pathVMD()\n if vmd:\n os.system('{0} -e {1}'.format(vmd, abspath(filename)))", "def _vmd_script_molecule(mole, filename=\"molecule.xyz\"):\n output = \"# load new molecule\\n\"\n if len(mole.atom) == 0:\n raise ValueError(\"Need at least one molecule file with coordinates.\")\n atoms = mole.atom\n natoms = len(mole.atom[0:, 0])\n f = open(filename, \"w\")\n f.write(str(natoms) + \"\\n\\n\")\n for i in range(0, natoms):\n symb = str(atoms[i, 0])\n coord = \" \".join(map(str, atoms[i, 1].tolist()))\n f.write(symb + \" \" + coord + \"\\n\")\n f.close()\n output += (\n \"mol {0} {1} type {2} first 0 last -1 step 1 filebonds 1 autobonds 1 waitfor all\"\n \"\\n\".format(\"new\", filename, \"{xyz}\")\n )\n output += \"#\\n\" \"# representation of the atoms\\n\"\n output += \"mol representation CPK 1.000000 0.300000 118.000000 131.000000\\n\"\n output += (\n \"mol delrep 0 top\\n\"\n \"mol color Element\\n\"\n \"mol selection {{all}}\\n\"\n \"mol material Opaque\\n\"\n \"mol addrep top\\n\"\n \"#\\n\"\n )\n return output", "def open(self):\n self._command = \"open\"", "def open(self):\n try:\n self.handle = self.rm.get_instrument(self.visaName)\n self.handle.write('*RST') #reset device to default\n time.sleep(.5)\n self.handle.write(':FORM:DATA ASC') #return ASCII\n except Exception:\n print('Dvm34411.open() failed !')\n raise\n return True", "def open_database(app):\n app.status.message(\"Opening DICOM folder..\")\n path = app.dialog.directory(\"Select a DICOM folder\")\n if path == '':\n app.status.message('') \n return\n app.status.cursorToHourglass()\n app.close()\n app.open(path)\n app.status.hide()\n app.status.cursorToNormal()", "def OpenDicomSerie(dirname=None):\n\tglobal volume, dim_x, dim_y, dim_z, spacing, origin, CT_open, filename_CT, dir_ini\n ct_swapY, ct_swapZ = False, False\n \n\tprint 'Opening DICOM serie ... '\n\n\t# Opening file\n\tif(dirname==None):\n\t\tfile_path = tkFileDialog.askopenfilename(initialdir = dir_ini, filetypes = [('DICOM files', '*.dcm')])\n\t\tfilelist = os.listdir(os.path.dirname(file_path))\n\telse:\n\t\tfilelist = os.listdir(dirname)\n\t\tfile_path = dirname + filelist[0]\n\n\tfilename_CT = file_path\n dir_ini = str(file_path.rsplit('/', 1)[0])+'/'\n\n\t# Getting dimensions\n\tds = pydicom.read_file(file_path)\n\tsp = ds.PixelSpacing\n\tds.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian\n\tct_swapZ =(ds.ImageOrientationPatient[0:3] == [1, 0, 0])\n\tct_swapY =(ds.ImageOrientationPatient[3:6] == [0, 1, 0])\n\n dim_x = 0\n for f in filelist:\n if f.endswith(\".dcm\"): dim_x = dim_x + 1 \n\n\tdim_y, dim_z = np.shape(ds.pixel_array)[1], np.shape(ds.pixel_array)[0]\n \n\tvolume = np.zeros((dim_x, dim_y,dim_z))\n slicelocation = np.zeros(dim_x)\n\n\t# creating volume\n\tfor f,i in zip(filelist,range(dim_x)):\n\t\tif f.endswith(\".dcm\"):\n\t\t\tds = pydicom.read_file(os.path.dirname(file_path)+'/'+f)\n\t\t\tds.file_meta.transfersyntaxuid = pydicom.uid.ImplicitVRLittleEndian \n\t\t\tvolume[i,:,:] = ds.pixel_array\n\t\t\tif('slicelocation' in ds):\tslicelocation[i] = ds.SliceLocation\n\t\t\telse:\tslicelocation[i] = ds.ImagePositionPatient[2]\n \n\torder = np.argsort(slicelocation)\n slicelocation = slicelocation[order] # slicelocation is now sorted\n \n\tspacing = [float(slicelocation[1] - slicelocation[0]),float(sp[1]), float(sp[0])]\n\torigin = [float(slicelocation[0]),float(ds.ImagePositionPatient[1]),float(ds.ImagePositionPatient[0])]\n\tvolume = volume[order,:,:] # volume is now sorted\n\n\tif (\"RescaleSlope\" in ds):\tvolume = float(ds.RescaleSlope)*volume\n\tif (\"RescaleIntercept\" in ds):\tvolume = volume + float(ds.RescaleIntercept)\n\n\t# Dealing with image orientation\n print ' ct_swapY, ct_swapZ :', ct_swapY, ct_swapZ\n\tif(ct_swapY == True):\n volume = np.flip(volume,1) # flip volume, Y direction\n origin[1] = origin[1] + dim_y*spacing[1] \n if(ct_swapZ == True):\n volume = np.flip(volume,2) # flip volume, Z direction\n origin[2] = origin[2] + dim_z*spacing[2] \n if(ct_swapZ == True)and(ct_swapY == True): spacing[1], spacing[2] = spacing[2], spacing[1]\n\n\tSet_axes_lim_init()\n\tSet_scales()\n\tCT_open = True\n\tUpdate_all()\n\n\tprint(' file successfully opened!')", "def open_idf(self):\n\n self.save()\n\n filepath = self.idfname\n\n import os\n import platform\n import subprocess\n\n if platform.system() == \"Darwin\": # macOS\n subprocess.call((\"open\", filepath))\n elif platform.system() == \"Windows\": # Windows\n os.startfile(filepath)\n else: # linux variants\n subprocess.call((\"xdg-open\", filepath))", "def on_open_uv_editor():\n cmds.TextureViewWindow()", "def open(self):\n super(Nodzgraph, self).open(dockable=self.configuration.maya.docked,\n area=self.configuration.maya.dock_area,\n allowedArea=self.configuration.maya.allowed_dock_areas,\n floating=self.configuration.maya.floating,\n width=self.configuration.maya.width,\n height=self.configuration.maya.height\n )", "def open(self) -> None:", "def open(self) -> None:", "def open(self) -> None:", "def open(self):\r\n pass", "def open(self):\r\n pass", "def dicom_cli():", "def open( self ):\n pass", "def open(self):", "def open(self):\n raise NotImplementedError(\"Implement this method in child class\")", "def Open(self):\n return True", "def Open(self):\n return True", "def open(file):\n args = {\"file\": file}\n send_command(\"open\", args)", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n self._isOpen = True", "def runOpenMM(parm, inpcrdFile, system, rad, K, Indices, solvate, out_dcd, out_csv, out_rst ):\n\n \n def newIntegrator():\n integrator = mm.LangevinIntegrator(\n 300.0 * u.kelvin,\n 10.0 / u.picosecond,\n 2.0 * u.femtosecond)\n return integrator\n\n def pmdStructureToOEMol(parm, resname):\n\n from oeommtools.utils import openmmTop_to_oemol\n mask = \"!(:%s)\" %resname\n structure_LIG = parmed.load_file( '../2gmx_wat.prmtop', xyz = '../equilibration/rst/step8.rst.125000' )\n structure_LIG.strip(mask)\n pos = structure_LIG.positions\n top = structure_LIG.topology\n molecule = openmmTop_to_oemol(top, pos, verbose=False)\n OEPerceiveBondOrders(molecule)\n OEAssignAromaticFlags(molecule)\n OEFindRingAtomsAndBonds(molecule)\n\n return molecule\n \n def getAtomIndices( structure, resname ):\n \"\"\"\n Get atom indices of a ligand from ParmEd Structure.\n Arguments\n ---------\n resname : str\n String specifying the resiue name of the ligand.\n structure: parmed.Structure\n ParmEd Structure object of the atoms to be moved.\n Returns\n -------\n atom_indices : list of ints\n list of atoms in the coordinate file matching lig_resname\n \"\"\"\n atom_indices_ligand = []\n topology = structure.topology\n for atom in topology.atoms():\n if str(resname) in atom.residue.name:\n atom_indices_ligand.append(atom.index)\n\n return atom_indices_ligand\n\n\n \"\"\"\n Rotate the torsion to an angle rad using openeye toolkits\n \"\"\" \n molecule = pmdStructureToOEMol( parm, \"LIG\" )\n atom_indices_ligand = getAtomIndices( parm, \"LIG\" )\n\n\n dihedral_atoms = [\"C10\", \"C9\", \"C3\", \"C2\" ]\n atom1 = molecule.GetAtom(OEHasAtomName(dihedral_atoms[0]))\n atom2 = molecule.GetAtom(OEHasAtomName(dihedral_atoms[1]))\n atom3 = molecule.GetAtom(OEHasAtomName(dihedral_atoms[2]))\n atom4 = molecule.GetAtom(OEHasAtomName(dihedral_atoms[3]))\n if OESetTorsion(molecule, atom1, atom2, atom3, atom4, rad ) == False :\n print(\"Torsional bond couldn't be rotated. Please enter correct atoms!\"); \n exit()\n\n # Update ligand positions in nc_sim\n updated_pos = molecule.GetCoords()\n\n for index, atomidx in enumerate(atom_indices_ligand): \n parm.positions[atomidx] = np.array(updated_pos[index])*u.nanometers\n\n \"\"\"\n harmonically restrain dihedral angle\n see units, http://docs.openmm.org/6.3.0/userguide/theory.html\n \"\"\"\n pi = np.pi\n harmonic = mm.CustomTorsionForce(\"k*min(dtheta, 2*pi-dtheta)^2; dtheta = abs(theta-theta0); pi = %.5f\" % pi);\n harmonic.addPerTorsionParameter(\"theta0\");\n harmonic.addPerTorsionParameter(\"k\");\n system.addForce(harmonic)\n harmonic.addTorsion(Indices[0], Indices[1], Indices[2], Indices[3], (rad, K))\n\n # Restraint non-moving part of the ligand\n restraintWt = 200 #kcal/mol/A2\n # define the custom force to restrain atoms to their starting positions\n force_restr = mm.CustomExternalForce('k_restr*periodicdistance(x, y, z, x0, y0, z0)^2')\n # Add the restraint weight as a global parameter in kcal/mol/A^2\n force_restr.addGlobalParameter(\"k_restr\", restraintWt*u.kilocalories_per_mole/u.angstroms**2)\n # Define the target xyz coords for the restraint as per-atom (per-particle) parameters\n force_restr.addPerParticleParameter(\"x0\")\n force_restr.addPerParticleParameter(\"y0\")\n force_restr.addPerParticleParameter(\"z0\")\n alch_list = ['C9', 'H92', 'H93', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'H1', 'H2', 'H4', 'H5', 'H6']\n for idx, atom_crd in enumerate( parm.positions ):\n name=parm.atoms[idx].name;\n resname=parm.atoms[idx].residue.name;\n if resname == \"LIG\":\n if not name in alch_list:\n xyz = parm.positions[idx].in_units_of(u.nanometers)/u.nanometers\n force_restr.addParticle(idx, xyz)\n system.addForce( force_restr )\n\n # build simulaion\n platform = mm.Platform.getPlatformByName('CUDA')\n integ1 = newIntegrator()\n simulation = app.Simulation(parm.topology, system, integ1)\n simulation.context.setPositions( parm.positions )\n\n # Set Box dimensions\n inpcrd = app.AmberInpcrdFile( inpcrdFile );\n if inpcrd.boxVectors is not None:\n simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)\n\n print('RESTARTING simulation from a previous State..........%s' %inpcrdFile)\n velocities = parm.velocities \n simulation.context.setVelocities( inpcrd.velocities ) \n\n # perform minimization\n print('Minimizing...')\n simulation.minimizeEnergy( tolerance = 0.5 * kilojoule/mole )\n \n # adding simulation reporters\n simulation.context.setVelocitiesToTemperature(300*u.kelvin)\n simulation.reporters.append(app.DCDReporter(out_dcd, 1000))\n simulation.reporters.append(app.StateDataReporter(csv_file, 1000, step=True, potentialEnergy=True, totalEnergy=True, volume=True,temperature=True, separator='\\t'))\n restrt = RestartReporter( out_rst, 10000000, parm.ptr('natom') );\n state = simulation.context.getState(getPositions=True, getEnergy=True, getVelocities=True, enforcePeriodicBox=True)\n restrt.report(simulation, state)\n\n\n print('Production run at NVT...')\n simulation.step(5000000) # 10 ns\n \n # saving last restart\n state = simulation.context.getState(getPositions=True, getEnergy=True, getVelocities=True, enforcePeriodicBox=True)\n restrt.report(simulation, state)\n return", "def open(self):\n self.solenoid.set(self.OPEN)", "def _load_molecule(self):\n self.pymol = pybel.readstring(self.input_format, self.file_dic['input'])", "def OpenDosi(filename=None):\n\tglobal dosi, spacing_dosi, dim_x_dosi, dim_y_dosi, dim_z_dosi, dosi_open, isodose_show, origin_dosi, filename_dosi\n\tdosi_swapY,dosi_swapZ = False, False\n\n\ttypes = [('All files', '*.dcm *.mhd'), ('DCM files', '*.dcm'), ('MHD files', '*.mhd')]\n\n\tif(filename==None):\tfile_path = tkFileDialog.askopenfilename(initialdir = dir_ini, filetypes = types)\n\telse:\tfile_path = filename\n\n\tfilename_dosi = file_path\n\n\tprint('Opening RD file ...')\n\n\t### .dcm file ###\n\tif(file_path.endswith('.dcm')):\n\t\tds = pydicom.read_file(file_path)\n\t\tds.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian \n\t\tscaling_dosi = float(ds.DoseGridScaling)\n\t\tdosi = scaling_dosi*ds.pixel_array\n\t\tsp = ds.PixelSpacing\n\t\tspacing_dosi = [ float(ds.GridFrameOffsetVector[1] - ds.GridFrameOffsetVector[0]), float(sp[1]),float(sp[0])]\n\t\torigin_dosi = ds.ImagePositionPatient\n\t\torigin_dosi = [float(origin_dosi[2]),float(origin_dosi[1]),float(origin_dosi[0])]\n\t\tdosi_swapZ =(ds.ImageOrientationPatient[0:3] == [1, 0, 0])\n dosi_swapY =(ds.ImageOrientationPatient[3:6] == [0, 1, 0])\n\n\t\t#if ds.SeriesDescription=='PatientLETScorer [MeV/mm/(g/cm3)]':\tSetIntensityRange(dosi,0,15)\n\n\t### .mhd file ###\n\tif(file_path.endswith('.mhd')):\t\n \t\titkimage = sitk.ReadImage(file_path) \t\t\t\t# Reads the image using SimpleITK\n \t\tdosi = sitk.GetArrayFromImage(itkimage)\n\t\tspacing_dosi = np.array(list(reversed(itkimage.GetSpacing()))) \t# Read the spacing along each dimension\n\t\torigin_dosi = np.array(list(reversed((itkimage.GetOrigin()))))\t\t# Read the origin\n\t\ttext_file = open(file_path, \"r\")\n\t\ttmp = text_file.readlines()\n\t\tdosi_swap = (tmp[8][-4:-1] == 'RAI')\n\n\tif(len(np.shape(volume))==3):\tdim_x_dosi, dim_y_dosi, dim_z_dosi = np.shape(dosi)[0], np.shape(dosi)[1], np.shape(dosi)[2]\n\n\tif(len(np.shape(volume))==2):\tdim_x_dosi, dim_y_dosi, dim_z_dosi = np.shape(dosi)[0], np.shape(dosi)[1], 1\n\n\t#print 'dosi type', dosi.dtype\n\t\n\t# Dealing with image orientation\n\tif(dosi_swapY == True):\n\t\tdosi = np.flip(dosi,1) # flip volume\n\t\torigin_dosi[1] = origin_dosi[1] + dim_y_dosi*spacing_dosi[1]\t\t\n\tif(dosi_swapZ == True):\n\t\tdosi = np.flip(dosi,2) # flip volume\n\t\torigin_dosi[2] = origin_dosi[2] + dim_z_dosi*spacing_dosi[2]\n\tif(dosi_swapY == True)and(dosi_swapZ == True):\n\t\tspacing_dosi[1], spacing_dosi[2] = spacing_dosi[2], spacing_dosi[1]\n\n print ' dosi_swapY, dosi_swapZ :', dosi_swapY, dosi_swapZ\n\n\tdosi_open = True\n\tisodose_show = True\n\tcheck1.select()\n\tUpdate_all()\n\n\tprint(' file successfully opened!')", "def vol_open_path(volpath, open_flags=VMDK_OPEN_DEFAULT):\n dhandle = get_uint(0)\n ihandle = get_uint(0)\n key = c_uint32(0)\n\n res = lib.DiskLib_OpenWithInfo(volpath.encode(), open_flags,\n byref(key), byref(dhandle),\n byref(ihandle))\n if res != 0:\n logging.warning(\"Open %s failed - %x\", volpath, res)\n return dhandle", "def open(self) -> None:\n\n raise NotImplementedError", "def open(self) -> None:\n pass", "def open(self) -> None:\n if not self.__opened:\n if self.path is None:\n self.path = HID.enumerate_devices(self.vendor_id)[0]\n self.device.open_path(self.path)\n self.device.set_nonblocking(True)\n self.__opened = True", "def open(self):\n raise NotImplementedError( 'Needs implementation' )", "def open_new(cls,\n fname,\n natom,\n box,\n crds=True,\n vels=False,\n frcs=False,\n remd=None,\n remd_dimension=None,\n title='',\n protocolWork=False,\n alchemicalLambda=False):\n inst = cls(fname, 'w')\n ncfile = inst._ncfile\n if remd is not None:\n if remd[0] in 'Tt':\n inst.remd = 'TEMPERATURE'\n elif remd[0] in 'Mm':\n inst.remd = 'MULTI'\n if remd_dimension is None:\n raise ValueError('remd_dimension must be given ' 'for multi-D REMD')\n inst.remd_dimension = int(remd_dimension)\n else:\n raise ValueError('remd must be T[emperature] or M[ultiD]')\n else:\n inst.remd = None\n inst.hasbox = bool(box)\n inst.hasvels = bool(vels)\n inst.hascrds = bool(crds)\n inst.hasfrcs = bool(frcs)\n\n inst.hasprotocolWork = bool(protocolWork)\n inst.hasalchemicalLambda = bool(alchemicalLambda)\n\n # Assign the main attributes\n ncfile.Conventions = \"AMBER\"\n ncfile.ConventionVersion = \"1.0\"\n ncfile.application = \"AmberTools\"\n ncfile.program = \"ParmEd\"\n ncfile.programVersion = parmed.__version__\n ncfile.title = \"ParmEd-created trajectory\"\n inst.Conventions = \"AMBER\"\n inst.ConventionVersion = \"1.0\"\n inst.application = \"AmberTools\"\n inst.program = \"ParmEd\"\n inst.programVersion = parmed.__version__\n inst.title = ncfile.title\n # Create the dimensions\n ncfile.createDimension('frame', None)\n ncfile.createDimension('spatial', 3)\n ncfile.createDimension('atom', natom)\n if inst.remd == 'MULTI':\n ncfile.createDimension('remd_dimension', inst.remd_dimension)\n inst.frame, inst.spatial, inst.atom = None, 3, natom\n if inst.hasbox:\n ncfile.createDimension('cell_spatial', 3)\n ncfile.createDimension('cell_angular', 3)\n ncfile.createDimension('label', 5)\n inst.cell_spatial, inst.cell_angular, inst.label = 3, 3, 5\n # Create the variables and assign units and scaling factors\n v = ncfile.createVariable('spatial', 'c', ('spatial', ))\n v[:] = np.asarray(list('xyz'))\n if inst.hasbox:\n v = ncfile.createVariable('cell_spatial', 'c', ('cell_spatial', ))\n v[:] = np.asarray(list('abc'))\n v = ncfile.createVariable('cell_angular', 'c', (\n 'cell_angular',\n 'label',\n ))\n v[:] = np.asarray([list('alpha'), list('beta '), list('gamma')])\n v = ncfile.createVariable('time', 'f', ('frame', ))\n v.units = 'picosecond'\n if inst.hascrds:\n v = ncfile.createVariable('coordinates', 'f', ('frame', 'atom', 'spatial'))\n v.units = 'angstrom'\n inst._last_crd_frame = 0\n if inst.hasvels:\n v = ncfile.createVariable('velocities', 'f', ('frame', 'atom', 'spatial'))\n v.units = 'angstrom/picosecond'\n inst.velocity_scale = v.scale_factor = 20.455\n inst._last_vel_frame = 0\n if nc is not None:\n v.set_auto_maskandscale(False)\n if inst.hasfrcs:\n v = ncfile.createVariable('forces', 'f', ('frame', 'atom', 'spatial'))\n v.units = 'kilocalorie/mole/angstrom'\n inst._last_frc_frame = 0\n if inst.hasbox:\n v = ncfile.createVariable('cell_lengths', 'd', ('frame', 'cell_spatial'))\n v.units = 'angstrom'\n v = ncfile.createVariable('cell_angles', 'd', ('frame', 'cell_angular'))\n v.units = 'degree'\n inst._last_box_frame = 0\n if inst.remd == 'TEMPERATURE':\n v = ncfile.createVariable('temp0', 'd', ('frame', ))\n v.units = 'kelvin'\n inst._last_remd_frame = 0\n elif inst.remd == 'MULTI':\n ncfile.createVariable('remd_indices', 'i', ('frame', 'remd_dimension'))\n ncfile.createVariable('remd_dimtype', 'i', ('remd_dimension', ))\n inst._last_remd_frame = 0\n\n inst._last_time_frame = 0\n\n if inst.hasprotocolWork:\n v = ncfile.createVariable('protocolWork', 'f', ('frame', ))\n v.units = 'kT'\n inst._last_protocolWork_frame = 0\n\n if inst.hasalchemicalLambda:\n v = ncfile.createVariable('alchemicalLambda', 'f', ('frame', ))\n v.units = 'unitless'\n inst._last_alchemicalLambda_frame = 0\n\n return inst", "def open_new(cls,\n fname,\n natom,\n box,\n crds=True,\n vels=False,\n frcs=False,\n remd=None,\n remd_dimension=None,\n title='',\n protocolWork=False,\n alchemicalLambda=False):\n inst = cls(fname, 'w')\n ncfile = inst._ncfile\n if remd is not None:\n if remd[0] in 'Tt':\n inst.remd = 'TEMPERATURE'\n elif remd[0] in 'Mm':\n inst.remd = 'MULTI'\n if remd_dimension is None:\n raise ValueError('remd_dimension must be given ' 'for multi-D REMD')\n inst.remd_dimension = int(remd_dimension)\n else:\n raise ValueError('remd must be T[emperature] or M[ultiD]')\n else:\n inst.remd = None\n inst.hasbox = bool(box)\n inst.hasvels = bool(vels)\n inst.hascrds = bool(crds)\n inst.hasfrcs = bool(frcs)\n\n inst.hasprotocolWork = bool(protocolWork)\n inst.hasalchemicalLambda = bool(alchemicalLambda)\n\n # Assign the main attributes\n ncfile.Conventions = \"AMBER\"\n ncfile.ConventionVersion = \"1.0\"\n ncfile.application = \"AmberTools\"\n ncfile.program = \"ParmEd\"\n ncfile.programVersion = parmed.__version__\n ncfile.title = \"ParmEd-created trajectory\"\n inst.Conventions = \"AMBER\"\n inst.ConventionVersion = \"1.0\"\n inst.application = \"AmberTools\"\n inst.program = \"ParmEd\"\n inst.programVersion = parmed.__version__\n inst.title = ncfile.title\n # Create the dimensions\n ncfile.createDimension('frame', None)\n ncfile.createDimension('spatial', 3)\n ncfile.createDimension('atom', natom)\n if inst.remd == 'MULTI':\n ncfile.createDimension('remd_dimension', inst.remd_dimension)\n inst.frame, inst.spatial, inst.atom = None, 3, natom\n if inst.hasbox:\n ncfile.createDimension('cell_spatial', 3)\n ncfile.createDimension('cell_angular', 3)\n ncfile.createDimension('label', 5)\n inst.cell_spatial, inst.cell_angular, inst.label = 3, 3, 5\n # Create the variables and assign units and scaling factors\n v = ncfile.createVariable('spatial', 'c', ('spatial',))\n v[:] = np.asarray(list('xyz'))\n if inst.hasbox:\n v = ncfile.createVariable('cell_spatial', 'c', ('cell_spatial',))\n v[:] = np.asarray(list('abc'))\n v = ncfile.createVariable('cell_angular', 'c', (\n 'cell_angular',\n 'label',\n ))\n v[:] = np.asarray([list('alpha'), list('beta '), list('gamma')])\n v = ncfile.createVariable('time', 'f', ('frame',))\n v.units = 'picosecond'\n if inst.hascrds:\n v = ncfile.createVariable('coordinates', 'f', ('frame', 'atom', 'spatial'))\n v.units = 'angstrom'\n inst._last_crd_frame = 0\n if inst.hasvels:\n v = ncfile.createVariable('velocities', 'f', ('frame', 'atom', 'spatial'))\n v.units = 'angstrom/picosecond'\n inst.velocity_scale = v.scale_factor = 20.455\n inst._last_vel_frame = 0\n if nc is not None:\n v.set_auto_maskandscale(False)\n if inst.hasfrcs:\n v = ncfile.createVariable('forces', 'f', ('frame', 'atom', 'spatial'))\n v.units = 'kilocalorie/mole/angstrom'\n inst._last_frc_frame = 0\n if inst.hasbox:\n v = ncfile.createVariable('cell_lengths', 'd', ('frame', 'cell_spatial'))\n v.units = 'angstrom'\n v = ncfile.createVariable('cell_angles', 'd', ('frame', 'cell_angular'))\n v.units = 'degree'\n inst._last_box_frame = 0\n if inst.remd == 'TEMPERATURE':\n v = ncfile.createVariable('temp0', 'd', ('frame',))\n v.units = 'kelvin'\n inst._last_remd_frame = 0\n elif inst.remd == 'MULTI':\n ncfile.createVariable('remd_indices', 'i', ('frame', 'remd_dimension'))\n ncfile.createVariable('remd_dimtype', 'i', ('remd_dimension',))\n inst._last_remd_frame = 0\n\n inst._last_time_frame = 0\n\n if inst.hasprotocolWork:\n v = ncfile.createVariable('protocolWork', 'f', ('frame',))\n v.units = 'kT'\n inst._last_protocolWork_frame = 0\n\n if inst.hasalchemicalLambda:\n v = ncfile.createVariable('alchemicalLambda', 'f', ('frame',))\n v.units = 'unitless'\n inst._last_alchemicalLambda_frame = 0\n\n return inst", "def test_cdrom(self):\n self.command.package = self.input_ovf\n self.command.file_path = \"input.iso\"\n self.command.run()\n self.command.finished()\n self.check_diff(\"\"\"\n <ovf:File ovf:href=\"input.vmdk\" ovf:id=\"file1\" ovf:size=\"{vmdk_size}\" />\n- <ovf:File ovf:href=\"input.iso\" ovf:id=\"file2\" ovf:size=\"{iso_size}\" />\n <ovf:File ovf:href=\"sample_cfg.txt\" ovf:id=\"textfile\" \\\novf:size=\"{cfg_size}\" />\n...\n <rasd:ElementName>CD-ROM 1</rasd:ElementName>\n- <rasd:HostResource>ovf:/file/file2</rasd:HostResource>\n <rasd:InstanceID>7</rasd:InstanceID>\n\"\"\".format(vmdk_size=self.FILE_SIZE['input.vmdk'],\n iso_size=self.FILE_SIZE['input.iso'],\n cfg_size=self.FILE_SIZE['sample_cfg.txt']))\n self.assertFalse(os.path.exists(os.path.join(self.temp_dir,\n \"input.iso\")),\n \"deleted file should not be exported\")", "def _vmd_script_start():\n return (\n \"#!/usr/local/bin/vmd\\n\"\n \"# VMD version: 1.8.6\\n\"\n \"#\\n\"\n \"# Display settings\\n\"\n \"display projection Perspective\\n\"\n \"display nearclip set 0.000000\\n\"\n \"display shadow off\\n\"\n \"color Element {C} silver\\n\"\n \"color Element {Cl} green\\n\"\n \"axes location Off\\n\"\n \"color Display Background white\\n\"\n \"light 2 on\\n\"\n \"light 3 on\\n\"\n \"#\\n\"\n )", "def _run_openmoc(self):\n\n # Segmentize over the geometry with a fine and coarse cmfd mesh\n for m in [3, 51]:\n\n # Overlay simple CMFD mesh\n self._result += '{0} x {0} CMFD mesh\\n'.format(m)\n geometry = self.input_set.geometry\n cmfd = openmoc.Cmfd()\n cmfd.setLatticeStructure(m, m)\n geometry.setCmfd(cmfd)\n geometry.initializeCmfd()\n\n # Track over the composite geometry\n super(TrackingGridCMFDTestHarness, self)._run_openmoc()", "def open_editor(event):\n event.current_buffer.open_in_editor(event.cli)", "def open(self):\n file = askopenfilename(\n initialdir=self.initial_directory,\n filetypes=(\n (\"Audio Video Interleave\", \"*.avi\"),\n (\"Matroska\", \"*.mkv\"),(\"MPEG-4 AVC\",\"*.mp4\"),\n )\n )\n if isinstance(file, tuple):\n return\n if os.path.isfile(file):\n self.play_film(file)", "def open_mix(remote, path):\n cmd = mmapi.StoredCommands()\n cmd.AppendSceneCommand_OpenMixFile(path)\n remote.runCommand(cmd)", "def open_last_simulation(self):\n\n filepath, *_ = self.simulation_dir.files(\"*.idf\")\n\n import os\n import platform\n import subprocess\n\n if platform.system() == \"Darwin\": # macOS\n subprocess.call((\"open\", filepath))\n elif platform.system() == \"Windows\": # Windows\n os.startfile(filepath)\n else: # linux variants\n subprocess.call((\"xdg-open\", filepath))", "def performOpen(self, options={}):\n self.switch = USB_Digital_Switch() # Create an instance of the switch class\n self.serial_number = str(self.getAddress())\n\n status = self.establish_connection() # Connect the switch (pass the serial number as an argument if required)\n if status > 0:\n resp = self.switch.Send_SCPI(\":MN?\", \"\") # Read model name\n self.model_number = str(resp[2])\n self.setModel(self.model_number)\n self.log(self.model_number, level = 30)", "def open_file(self):\n if not self.loaded:\n self.load()\n\n # call a plugin action to perform the open action\n from cviewer.plugins.cff2.actions.actions import OpenFile", "def do_open_dome(self, *arg):\n if not self.is_setup:\n return\n if not self.pocs.observatory.has_dome:\n print_warning('There is no dome.')\n return\n if not self.pocs.is_weather_safe():\n print_warning('Weather conditions are not good, not opening dome.')\n return\n try:\n if self.pocs.observatory.open_dome():\n print_info('Opened the dome.')\n else:\n print_warning('Failed to open the dome.')\n except Exception as e:\n print_warning('Problem opening the dome: {}'.format(e))", "def open(self):\n raise NotImplementedError", "def open(self):\n raise NotImplementedError", "def performOpen(self, options={}):\n \n self.establish_connection() \n self.setModel(self.model_number)\n\n self.log(self.model_number, level = 30)\n \n self.switch_id_lsb_map = {'A': 0, 'B': 1, 'C': 2, 'D': 3,\n 'E': 4, 'F': 5, 'G': 6, 'H': 7}\n #except Exception as e:\n #msg = str(e)\n #raise InstrumentDriver.CommunicationError(msg)", "def _openButton(self):\n #get the specified file\n selected_file = self.view.list.getSelected()\n\n if selected_file:\n self.model.open(selected_file)\n return\n\n #prompt if they really want to open maya\n dialogs = Dialogs(self.view)\n\n msg = 'No file selected!'\n msg += '\\n\\nAre you sure you want to open maya without a file?'\n dialogs.confirmPrompt(msg)\n\n self.model.open()", "def open(self) -> None:\n raise NotImplementedError()", "async def open(self):\n pass", "def action_open(self, file_location):\n tmp_in_file = file_location\n if not tmp_in_file:\n return\n self.in_file = tmp_in_file\n\n print(\"Current 360 video: %s\" % ntpath.basename(self.in_file))\n\n console = Console()\n parsed_metadata = metadata_utils.parse_metadata(self.in_file,\n console.append)\n\n metadata = None\n audio_metadata = None\n if parsed_metadata:\n metadata = parsed_metadata.video\n audio_metadata = parsed_metadata.audio\n\n for line in console.log:\n if \"Error\" in line:\n print(\"Failed to load file %s\"\n % ntpath.basename(self.in_file))\n self.var_spherical = False\n self.var_spatial_audio = False\n return\n\n if audio_metadata:\n print(audio_metadata.get_metadata_string())", "def chooseOpenFile(self):\n fname = QFileDialog.getOpenFileName(self, 'Open file',\n filter=\"Meshes (*.stl)\")\n if fname[0] == '':\n return\n name = fname[0][:-4].split('/')[-1]\n self.files[name] = AmpObject(fname[0], 'limb')\n amp = self.files[name]\n amp.addActor()\n amp.tform = vtk.vtkTransform()\n amp.tform.PostMultiply()\n amp.actor.SetUserTransform(amp.tform)\n# amp.centre()\n self.fileManager.addRow(name, amp)\n self.display()\n self.filesDrop.append(name)\n if hasattr(self, 'alCont'):\n self.alCont.getNames()\n if hasattr(self, 'regCont'):\n self.regCont.getNames()", "def bfm_open ( cid=0\n , rigor=False\n , verbose=False ):\n global _cosim\n global _cid\n _bfm_open = WrapFunction( _cosim\n , 'bfm_open'\n , ctypes.c_int\n ,[ctypes.c_int])\n ret = _bfm_open( cid )\n if ret: _cid = cid\n return ret", "def openmm_system(self):\n\n # Load the initial coords into the system and initialise\n pdb = app.PDBFile(self.pdb)\n forcefield = app.ForceField(self.xml)\n modeller = app.Modeller(pdb.topology, pdb.positions) # set the initial positions from the pdb\n self.system = forcefield.createSystem(modeller.topology, nonbondedMethod=app.NoCutoff, constraints=None)\n\n # Check what combination rule we should be using from the xml\n xmlstr = open(self.xml).read()\n # check if we have opls combination rules if the xml is present\n try:\n self.combination = ET.fromstring(xmlstr).find('NonbondedForce').attrib['combination']\n except AttributeError:\n pass\n except KeyError:\n pass\n\n if self.combination == 'opls':\n print('OPLS combination rules found in xml file')\n self.opls_lj()\n\n temperature = constants.STP * unit.kelvin\n integrator = mm.LangevinIntegrator(temperature, 5 / unit.picoseconds, 0.001 * unit.picoseconds)\n\n self.simulation = app.Simulation(modeller.topology, self.system, integrator)\n self.simulation.context.setPositions(modeller.positions)", "def open(self, device_id):\n self._js[device_id].open()", "def draw_mol(mol, highlightAtoms, highlightColors):\n drawer = rdMolDraw2D.MolDraw2DSVG(400, 200)\n drawer.DrawMolecule(mol, highlightAtoms=highlightAtoms, highlightAtomColors=highlightColors)\n drawer.FinishDrawing()\n\n # TODO: return or save image, for inclusion in a PDF report or similar\n\n # To display in a notebook:\n # svg = drawer.GetDrawingText().replace('svg:', '')\n # display(SVG(svg))", "def open(self):\n try:\n self._ser = serial.Serial(self.device, 9600, timeout=1)\n\t self.log.info(u\"= = > Virtual Amp opened({}).\".format(self.device))\n except:\n error = u\"Error while opening device : {}\".format(self.device)\n raise Mprsg6zException(error)", "def connect_dmm2110():\n address = 'USB0::0x05E6::0x2110::8010814::INSTR'\n rm = visa.ResourceManager()\n return rm.open_resource(address)", "def open(self):\n self.log.debug('upm - in upm open()')\n # Add code here to be executed only when the resource is initially opened.", "def open(self):\n if self.comm is None:\n state, buffer_paths, buffers = _remove_buffers(self.get_state())\n\n args = dict(target_name='jupyter.widget',\n data={'state': state, 'buffer_paths': buffer_paths},\n buffers=buffers,\n metadata={'version': __protocol_version__}\n )\n if self._model_id is not None:\n args['comm_id'] = self._model_id\n\n self.comm = comm.create_comm(**args)", "def read_vmdas(self,):\n fd = self.f\n # The raw files produced by VMDAS contain a binary navigation data\n # block.\n self.cfg['sourceprog'] = 'VMDAS'\n ens = self.ensemble\n k = ens.k\n if self._source != 1 and self._debug_level >= 1:\n print(' \\n***** Apparently a VMDAS file \\n\\n')\n self._source = 1\n self.vars_read += ['time_gps',\n 'latitude_gps',\n 'longitude_gps',\n 'etime_gps',\n 'elatitude_gps',\n 'elongitude_gps',\n 'flags',\n 'ntime', ]\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[2] + utim[3] * 256, utim[1], utim[0])\n # This byte is in hundredths of seconds (10s of milliseconds):\n time = tmlib.timedelta(milliseconds=(int(fd.read_ui32(1) / 10)))\n fd.seek(4, 1) # \"PC clock offset from UTC\" - clock drift in ms?\n ens.time_gps[k] = tmlib.date2epoch(date + time)[0]\n ens.latitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.longitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.etime_gps[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) * 10)))[0]\n ens.elatitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.elongitude_gps[k] = fd.read_i32(1) * self._cfac\n fd.seek(12, 1)\n ens.flags[k] = fd.read_ui16(1)\n fd.seek(6, 1)\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[0] + utim[1] * 256, utim[3], utim[2])\n ens.ntime[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) / 10)))[0]\n fd.seek(16, 1)\n self._nbyte = 2 + 76", "def storeMolecule():\n pass", "def open_sonde(data_file, file_format=None, *args, **kwargs):\n return Sonde(data_file, file_format=file_format, *args, **kwargs)", "def _onOpen(self, event):\n self.openExperiment()", "def show(self):\n # Used for testing because there is obviously no way back\n # from VISU_Gen.SetCurrentStudy\n if not self.display:\n return\n\n # Desactivation : Load the med file in the PARAVIS component\n #import smeca_utils.visu_utils as VU\n #log.info(\"Loading Paravis module...\")\n #msg = VU.load_med_file(self.read_fname())\n #log.info(msg)", "def __init__(\n molfile,\n directory=\"/home/oohnohnoh1/Desktop/GIT/Chemiinformatics_work/Chemistry2quant/src/chemistry2quant/WIP\",\n sdf_file=\"bzr.sdf\",\n ):\n super().__init__(directory, sdf_file)\n \"\"\"\n\t\tInheriting from the rdkitProcessDf and initializng for the methods within there\n\t\t\"\"\"\n self.molfile = molfile", "def main():\r\n app = QtGui.QApplication(sys.argv)\r\n form = dIdVGui()\r\n form.show()\r\n app.exec_()", "def setup_pymol():\n pymol.finish_launching() # Prevent threading errors\n # Configure global settings\n cmd.set('scene_buttons', 1)\n cmd.set('matrix_mode', 1)\n cmd.set('movie_panel', 1)\n # Configure quality settings\n cmd.mset(\"1 x500\")\n cmd.set('ray_trace_frames', 1)\n cmd.viewport(800, 800)", "def open(self):\n\n self._key_generator = KeyGenerator()\n\n # A map from LOD to LODHistory instance for all LODs that have\n # been referenced so far:\n self._lod_histories = {}\n\n # This corresponds to the 'nodes' table in a Subversion fs. (We\n # don't need a 'representations' or 'strings' table because we\n # only track file existence, not file contents.)\n self._node_db = _NodeDatabase()\n\n # Start at revision 0 without a root node.\n self._youngest = 0", "def open(self):\n pn_link_open(self._impl)", "def open_viewer(self):\r\n choice = self.thoughts_lst.get(tk.ACTIVE)\r\n subject = self.refference[choice]\r\n tbl = self.home_table[subject]\r\n view = kit.SQL_pull('*', tbl, 'subject_id = \"{}\"'.format(subject))\r\n obj = kit.class_fill(tbl, view[0])\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n jv.Viewer(self.session, obj)", "def open( self, filename ):\r\n #http://www.oooforum.org/forum/viewtopic.phtml?t=35344\r\n properties = []\r\n properties.append( OpenOfficeDocument._makeProperty( 'Hidden', True ) ) \r\n properties = tuple( properties )\r\n self.oodocument = self.openoffice.loadComponentFromURL( uno.systemPathToFileUrl( os.path.abspath( filename ) ), \"_blank\", 0, properties )", "def makeVideo(self):\n \n #from morphforge.morphology.util import TriMeshBuilderVerySimple\n import sys\n sys.path.append('/usr/share/pyshared/')\n \n #import morphforge\n from morphforge.morphology.mesh import MeshBuilderRings\n MonkeyPatchMayaVi()\n #import enthought.mayavi.mlab as mlab\n from mayavi import mlab\n \n assert len(self.morphs)==1\n mesh = MeshBuilderRings().build(self.morphs[0])\n \n \n #mlab.options.offscreen = True\n \n \n @mlab.show\n @mlab.animate(delay=100 )#, ui=False) #(delay=500, ui=False)\n def _showSimpleCylinders():\n \n f = mlab.figure( bgcolor=None, fgcolor=None, engine=None, size=(1024, 768))\n #f = mlab.gcf() \n #c = TriMeshBuilderVerySimple(self.morphs[0])\n #mlab.triangular_mesh(c.x, c.y, c.z, c.triangles, colormap=self.colormap)\n mlab.triangular_mesh(mesh.vertices[:,0], mesh.vertices[:,1], mesh.vertices[:,2], mesh.triangles, colormap=self.colormap)\n \n for i in itertools.count():\n print i\n f.scene.camera.azimuth(0.1)\n mlab.savefig('/home/michael/Desktop/out/O%04d.png'%i)#, size=(1024,768))\n f.scene.render()\n if i> 3600:\n break\n yield\n \n _showSimpleCylinders()", "def openMayaScene(self, *arg, **keys):\n mode = Mode(keys.get('show', None), keys.get('sequence', None))\n mayaSceneFile = keys.get(\"mayaSceneFile\")\n if not mayaSceneFile:\n recipePath = mode.get(Recipe.XML_FILE, keys)\n recipe = Recipe.recipeFromFile(recipePath)\n mayaSceneFile = recipe.getMayaFile()\n\n if not mayaSceneFile:\n return\n\n mayaCommand = mode.get(\"[mayaCommand]\", keys)\n mayaCommand += \" \" + mayaSceneFile + \"&\"\n OSUtils.run(mayaCommand)\n return", "def openController(self, name, parent):\n frame = ICS[name](parent)\n frame.Show()\n return frame", "def render_molecule(\n smiles: str,\n path: str,\n width: int = 320,\n height: int = 240,\n file_format: str = \"svg\",\n clearbackground: bool = False,\n force_regenerate: bool = False,\n) -> None:\n # Import the openeye toolkit\n from openeye import oechem, oedepict\n\n output_name = get_image_filename(smiles)\n output_path = os.path.join(path, os.extsep.join([output_name, file_format]))\n\n if not force_regenerate and os.path.exists(output_path):\n logging.info(\"Skipping already-rendered molecule: %s\", smiles)\n return\n\n # Generate OpenEye OEMol object from SMILES\n # see https://docs.eyesopen.com/toolkits/python/oechemtk/molctordtor.html?highlight=smiles#construction-from-smiles\n mol = oechem.OEGraphMol()\n\n if not oechem.OESmilesToMol(mol, smiles):\n raise ValueError(f\"Failed to convert SMILES string to molecule: {smiles}\")\n\n # Configure options (lots more are available)\n # see https://docs.eyesopen.com/toolkits/python/depicttk/OEDepictClasses/OE2DMolDisplayOptions.html\n opts = oedepict.OE2DMolDisplayOptions()\n opts.SetWidth(width)\n opts.SetHeight(height)\n\n # Render image\n oedepict.OEPrepareDepiction(mol)\n disp = oedepict.OE2DMolDisplay(mol, opts)\n oedepict.OERenderMolecule(output_path, disp, clearbackground)", "def open(self):\n self.state = True\n self.mainwindow.sendMessage('a')\n print(\"opening \" + self.name)", "def open_camera(self):\n camera_source = self.winOpenCam.camera_source_used()\n if camera_source:\n param_name = select_file(\n \"Select Parameter\", \"../\", \"Parameter Files (*.json)\")\n if param_name:\n self.moildev = Moildev(param_name)\n self.running_video(camera_source)\n self.cam = True", "def _doOpenTool(self):\n self._cmdOpenTool()", "def open(self):\n with self._not_full:\n self._closed = False", "def open_sync_folder(self):\n if platform.system() == 'Darwin':\n subprocess.call(['open', '--', self.sync_dir])\n elif platform.system() == 'Linux':\n subprocess.call(['gnome-open', self.sync_dir])\n elif platform.system() == 'Windows':\n subprocess.call(['explorer', self.sync_dir])", "def open(cls, name, gxapi_vox=None, dtype=None, mode=MODE_READ, depth=False):\n\n if gxapi_vox is None:\n gxapi_vox = gxapi.GXVOX.create(_vox_file_name(name))\n vox = cls(name, gxapi_vox, dtype=dtype, mode=mode)\n\n vox.is_depth = depth\n\n return vox", "def _open_remote(file_ref):\n _authenticate()\n return dxpy.bindings.dxfile.DXFile(_get_id_fname(file_ref)[0])", "def open_video(self):\n\n \n self.filename_temp, _ = QFileDialog.getOpenFileName(self, \"Open Video\")\n\n if self.filename_temp != '':\n if self.filename_temp[-3:] == \"mp4\" or self.filename_temp[-3:] == \"wav\" or self.filename_temp[-3:] == \"wmv\" or self.filename_temp[-3:] == \"mov\":\n self.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile(self.filename_temp)))\n self.playBtn.setEnabled(True)\n self.saveBtn.setEnabled(True)\n self.videoOpened = True\n self.clear_annotation()\n self.filename = self.filename_temp\n\n\n else:\n message = QMessageBox()\n message.setWindowTitle(\"Fail\")\n message.setText(\"Please choose a file with one of the following extensions:\\nmp4, wav, mov or wmv.\")\n x = message.exec_() # this will show our messagebox¨¨\n\n\n elif self.filename_temp == '' and self.videoOpened:\n self.filename = self.filename\n elif self.filename_temp == '' and not self.videoOpened:\n self.filename = None", "def gen_vmd_script(\n mole,\n grads,\n filename=\"molecule.xyz\",\n scriptname=\"molecule.vmd\",\n colorid=1,\n scalex=10,\n):\n if colorid > 15:\n colorid = 0\n output = _vmd_script_start()\n output += _vmd_script_molecule(mole, filename)\n output += _vmd_script_vectors(mole, grads, colorid, scalex)\n with open(scriptname, \"w\") as f:\n f.write(output)", "def newMolSystem(project):\n\n i = 1\n while project.findFirstMolSystem(code='MS%d' % (i)):\n i += 1\n molSystem = project.newMolSystem(code='MS%d' % (i))\n molSystem.name = molSystem.code\n \n return molSystem", "def OpenFile(filename=None):\n\tglobal volume, spacing, dim_x, dim_y, dim_z, origin, CT_open, filename_CT, dir_ini\n\tct_swapY, ct_swapZ = False, False\n\n\tif(filename==None):\n\t\ttypes = [('All files', '*.dcm *.mhd'), ('DCM files', '*.dcm'), ('MHD files', '*.mhd')]\n\t\tfile_path = tkFileDialog.askopenfilename(initialdir = dir_ini, filetypes = types)\n\telse:\tfile_path = filename\n\n\tfilename_CT = file_path\n dir_ini = str(file_path.rsplit('/', 1)[0])+'/'\n\tprint 'Opening file...'\n\n\t### .dcm file ###\n\tif(file_path.endswith('.dcm')):\n\t\tds = pydicom.read_file(file_path)\n\t\tds.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian \n\t\tvolume = ds.pixel_array\n\n try:\n\t\t spacing[0:1] = ds.PixelSpacing\n\t origin[0:1] = ds.ImagePositionPatient\n except Exception:\n\t\t spacing = ds.PixelSpacing\n\t origin = ds.ImagePositionPatient\n\n if (ds.Modality == 'RTDOSE'):\n\t\t if (\"DoseGridScaling\" in ds):\tvolume = float(ds.DoseGridScaling)*volume\n else:\n\t if (\"RescaleSlope\" in ds):\tvolume = float(ds.RescaleSlope)*volume\n\t if (\"RescaleIntercept\" in ds):\tvolume = volume + float(ds.RescaleIntercept)\n\n\t if(len(np.shape(volume))==3):\n\t\t spacing = [ float(ds.GridFrameOffsetVector[1] - ds.GridFrameOffsetVector[0]), float(spacing[1]),float(spacing[0])]\n\t\t origin = [float(origin[2]),float(origin[1]),float(origin[0])]\n\n ct_swapZ =(ds.ImageOrientationPatient[0:3] == [1, 0, 0])\n ct_swapY =(ds.ImageOrientationPatient[3:6] == [0, 1, 0])\n\n\t\t#if ds.SeriesDescription=='PatientLETScorer [MeV/mm/(g/cm3)]':\tSetIntensityRange(volume,0,15)\n\n\t### .mhd file ###\n\tif(file_path.endswith('.mhd')):\t\n \t\titkimage = sitk.ReadImage(file_path) \t\t\t# Reads the image using SimpleITK\n \t\tvolume = sitk.GetArrayFromImage(itkimage)\n\t\tspacing = np.array(list(reversed(itkimage.GetSpacing()))) \t# Read the spacing along each dimension\n\t\torigin = np.array(list(reversed((itkimage.GetOrigin()))))\t# Read the origin of the ct_scan\n\t\ttext_file = open(file_path, \"r\")\n\t\ttmp = text_file.readlines()\n\t\tct_swap = (tmp[8][-4:-1] == 'RAI')\n\n\tif(len(np.shape(volume))==3):\n \tdim_x, dim_y, dim_z = np.shape(volume)[0], np.shape(volume)[1], np.shape(volume)[2]\n\n\t# Dealing with image orientation\n print ' ct_swapY, ct_swapZ :', ct_swapY, ct_swapZ\n\tif(ct_swapY == True):\n volume = np.flip(volume,1) # flip volume, Y direction\n origin[1] = origin[1] + dim_y*spacing[1] \n if(ct_swapZ == True):\n volume = np.flip(volume,2) # flip volume, Z direction\n origin[2] = origin[2] + dim_z*spacing[2] \n if(ct_swapZ == True)and(ct_swapY == True): spacing[1], spacing[2] = spacing[2], spacing[1]\n\n\tif(len(np.shape(volume))==2):\tdim_x, dim_y, dim_z = np.shape(volume)[0], np.shape(volume)[1], 0\n\n\tSet_axes_lim_init()\n\tSet_scales()\n\tCT_open = True\n\tUpdate_all()\n\n\tprint ' file successfully opened!'", "def open_video(self):\n\n # start the stream on the bebop\n if (self.is_bebop):\n self.drone_object.start_video_stream()\n\n # we have bypassed the old opencv VideoCapture method because it was unreliable for rtsp\n\n # get the path for the config files\n fullPath = inspect.getfile(DroneVisionGUI)\n shortPathIndex = fullPath.rfind(\"/\")\n if (shortPathIndex == -1):\n # handle Windows paths\n shortPathIndex = fullPath.rfind(\"\\\\\")\n print(shortPathIndex)\n shortPath = fullPath[0:shortPathIndex]\n self.imagePath = join(shortPath, \"images\")\n self.utilPath = join(shortPath, \"utils\")\n print(self.imagePath)\n print(self.utilPath)\n\n if self.is_bebop:\n # generate the streaming-address for the Bebop\n self.utilPath = join(shortPath, \"utils\")\n self.stream_adress = \"%s/bebop.sdp\" % self.utilPath\n else:\n # generate the streaming-address for the Mambo\n self.stream_adress = \"rtsp://192.168.99.1/media/stream2\"\n\n # initialise the vlc-player with the network-caching\n self.player = vlc.MediaPlayer(self.stream_adress, \":network-caching=\" + str(self.network_caching))\n\n # start the buffering\n success = self._start_video_buffering()", "def open_launcher(self):\n vim.command('silent! botright split {0}'.format(self.name))\n self.setup_buffer()", "def open(self):\n if dev[self.id] != FLI_INVALID_DEVICE:\n raise FliError(\"Device already opened\")\n dev[self.id] = FLIDEVICE_CAMERA\n\n # set default parameters\n self.setTemperature(CCD_TEMP)\n self.setHBin(1)\n self.setVBin(1)\n self.setExpTime(0)\n self.setFrame(0, 0, 1072, 1033)\n with self.lock:\n self.status = READY\n self.visibleExpArea = (24, 9, 1048, 1033)\n self.defaultExpArea = (0, 0, 1072, 1033)\n self.expArea = (0, 0, 1072, 1033)\n self.regions = ((0, 0, 0), (0, 0, 0))", "def open_doi(doi):\n webbrowser.open_new_tab(DOI_URL % doi)", "def __invokeJmol(self, inputFile, outputFile, format, width, height):\n try:\n jmolCmd = self.__getJmolCmd()\n fd, name = tempfile.mkstemp(\".jmol\", \"ice\")\n os.write(fd, \"load %s\" % inputFile)\n os.close(fd)\n _, stderr = self.iceContext.system.execute2(\"java\", \"-jar\", jmolCmd, \"-n\", \n \"-g\", \"%sx%s\" % (width, height), \"-xios\", name, \"-w\", \n \"%s:%s\" % (format, outputFile.replace(\"\\\\\", \"/\")))\n if stderr != \"\":\n raise Exception(stderr)\n if len(stderr) > 0:\n raise Exception(stderr)\n except Exception, e:\n print \"Failed to create preview: %s\" % str(e)", "def molecule(self):\n return self._molecule", "def vacuum_cgmd(self):\n\n\t\texstring_dssp = 'except: cannot find dssp at '+gmxpaths['dssp']+\\\n\t\t\t'\\nconsider using the following syntax to download for 64-bit linux:'+\\\n\t\t\t'\\n\\twget ftp://ftp.cmbi.ru.nl/pub/software/dssp/dssp-2.0.4-linux-amd64'+\\\n\t\t\t'\\n\\tor navigate to ftp://ftp.cmbi.ru.nl/pub/software/dssp/'+\\\n\t\t\t'\\n\\tand make sure you add execute permissions'\n\t\t\t\n\t\texstring_martinize = 'except: cannot find martinize at '+gmxpaths['martinize']+\\\n\t\t\t'\\nconsider using the following syntax to download:'+\\\n\t\t\t'\\n\\twget http://md.chem.rug.nl/cgmartini/images/tools/martinize/martinize-2.4/martinize.py'+\\\n\t\t\t'\\n\\tor navigate to http://md.chem.rug.nl/cgmartini/index.php/tools2/proteins-and-bilayers'+\\\n\t\t\t'\\n\\tand make sure you add execute permissions'\n\t\n\t\t#---first test to see if executables are available\n\t\tif not os.path.isfile(os.path.expanduser(gmxpaths['dssp'])): raise Exception(exstring_dssp)\n\t\tif not os.path.isfile(os.path.expanduser(gmxpaths['martinize'])): raise Exception(exstring_martinize)\t\n\t\n\t\tcmd = [gmxpaths['martinize'],\n\t\t\t'-f system-input.pdb',\n\t\t\t'-o system-original.top',\n\t\t\t'-x protein-cg.pdb',\n\t\t\t'-ff martini22','-ed',\n\t\t\t'-dssp '+gmxpaths['dssp']]\n\t\tcall(cmd,logfile='log-martinize',cwd=self.rootdir)\n\t\t\n\t\twith open(self.rootdir+'system-original.top') as fp: lines = fp.readlines()\n\t\tself.itp_protein = [l.split()[0] for l in lines if l[:7] == 'Protein']\n\n\t\t#---note that this section leaves out lipids\n\t\tself.itp_lipid = []\n\t\t\n\t\t#---note that this method is currently set to only simulate one protein\n\t\tself.nprots = [1]\n\t\tself.write_topology_protein('vacuum.top')\n\t\t\n\t\tcmd = [gmxpaths['editconf'],\n\t\t\t'-f protein-cg.pdb',\n\t\t\t'-o vacuum-alone.gro']\n\t\tcall(cmd,logfile='log-editconf-convert',cwd=self.rootdir)\n\t\n\t\tprint \"building box with \"+str(self.settings['wbuffer'])+'nm of water'\n\t\tcmd = [gmxpaths['editconf'],\n\t\t\t'-f vacuum-alone.gro',\n\t\t\t'-d '+str(self.settings['wbuffer']),\n\t\t\t'-o vacuum.gro','-c']\n\t\tcall(cmd,logfile='log-editconf-vacuum',cwd=self.rootdir)\n\t\t\n\t\tself.minimization_method('vacuum')" ]
[ "0.612638", "0.6115121", "0.60250276", "0.6011934", "0.5911261", "0.57081985", "0.568981", "0.5678963", "0.5570185", "0.5564583", "0.5564583", "0.5564583", "0.5531796", "0.5531796", "0.54938924", "0.54809994", "0.5477402", "0.5396461", "0.5377517", "0.5377517", "0.5375009", "0.53636", "0.53636", "0.53636", "0.53636", "0.53636", "0.53636", "0.53636", "0.5300105", "0.5294562", "0.5293086", "0.5260659", "0.5258946", "0.5251451", "0.5247629", "0.5238155", "0.523471", "0.5233587", "0.52309364", "0.52309364", "0.5218213", "0.52090085", "0.51937133", "0.51935107", "0.519255", "0.5179976", "0.51769793", "0.5166749", "0.5163891", "0.51617795", "0.51424503", "0.51424503", "0.5137283", "0.5120103", "0.5116872", "0.51113176", "0.5111254", "0.5101961", "0.5095059", "0.5085253", "0.5050183", "0.50484985", "0.5034045", "0.5025991", "0.5017712", "0.50087684", "0.49948317", "0.49787122", "0.4976252", "0.49739966", "0.49732995", "0.49689284", "0.49688792", "0.49600053", "0.49569836", "0.49468398", "0.49411875", "0.49332425", "0.49039012", "0.49014658", "0.48960418", "0.48901805", "0.4887112", "0.4876553", "0.48589763", "0.4855579", "0.48539862", "0.4852868", "0.48468927", "0.48465452", "0.48312208", "0.48265424", "0.48242828", "0.48186493", "0.48138523", "0.48107144", "0.48042175", "0.4798956", "0.47974202", "0.47867864" ]
0.6395765
0
Test read and write ints.
def test_message_int(): result = True message = msg.Message() for i in range(num_it): message.appendInt(i) if message.length != msg.HEADER_SIZE + (i+1)*msg.intStruct.size: print("Size is ", message.length, " but should be ", msg.HEADER_SIZE + (i+1)*msg.intStruct.size) print("Error : message.appendInt") result = False message.resetCursor() for i in range(num_it): r = message.readInt() if r != i: print(r, " vs ", i) print("Error : message.read/appendInt") result = False return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read(self) -> int:\n ...", "def test_int_field():", "def read(self) -> int:", "def test_numbers_roundtrip():\n for num in (0, 1, 2, 178, 300, BIG_NUMBER):\n num2 = UnsignedInt.read(UnsignedInt.to_bytes(num))\n assert num2 == num", "def test_integer(self):\n esnA = ESN(N_in,N_out,random_state=1)\n esnB = ESN(N_in,N_out,random_state=1)\n self._compare(esnA,esnB,should_be=\"same\")", "def testInt(self):\n self.assertEquals(20, int(Color.RED))\n self.assertEquals(2, int(Color.ORANGE))", "def test_toInt(self):\r\n self.assertEqual(self.black.toInt(), 0)\r\n self.assertEqual(self.red.toInt(), 16711680)\r\n self.assertEqual(self.pink.toInt(), 6553600)", "def test_int_to_int(self):\n @converters.wrap\n def inner_test(param: int):\n \"\"\"Make sure the parameter was converted correctly.\"\"\"\n self.assertEqual(param, 15)\n inner_test(param=15)", "def check_for_int(check):", "def test_int(self):\n from random import randint\n from ctypes import byref, c_int\n # back up array.\n a_orig = self.a.copy()\n # run FORTRAN subroutine.\n tval = randint(0,10000000)\n self.args[0] = byref(c_int(tval))\n self.lib_c_ctypes.ctypes_test(*self.args)\n # revert in Python and test.\n self.a -= tval\n for i in range(len(self.a)):\n self.assertEqual(self.a[i], a_orig[i])", "def test_toint(number, expected, cond):\n assert toInt(number, cond=cond) == expected", "def test_read_count(self):\n self.assertEqual(1, self.alice_storage.read_count)\n self.assertEqual(1, self.bob_storage.read_count)\n self.assertEqual(0, self.carol_storage.read_count)\n self.assertEqual(0, self.anonymous_storage.read_count)", "def test_integer(self):\n conn = self.database.connection()\n cursor = conn.cursor()\n dialect = self.database.dialect()\n dbapi = self.database.dbapi()\n query = dialect.translate('DROP TABLE test_integer')\n try:\n cursor.execute(query)\n except dbapi.Error:\n conn.rollback()\n query = dialect.translate('CREATE TABLE test_integer ' \\\n '( value INTEGER NOT NULL )')\n cursor.execute(query)\n data = []\n query = 'INSERT INTO test_integer VALUES (%s)'\n for i in range(100):\n item = random.randrange(-sys.maxint, sys.maxint)\n data.append(item)\n cursor.execute(query, (item,))\n query = 'SELECT * FROM test_integer'\n cursor.execute(query)\n result = cursor.fetchall()\n for row in result:\n item = row[0]\n assert isinstance(item, int) or isinstance(item, long)\n assert item in data\n data.remove(item)\n query = dialect.translate('DELETE FROM test_integer')\n cursor.execute(query)\n query = dialect.translate('DROP TABLE test_integer')\n cursor.execute(query)\n conn.commit()", "def test_int_out_of_range(parallel, guess):\n imin = np.iinfo(int).min + 1\n imax = np.iinfo(int).max - 1\n huge = f\"{imax+2:d}\"\n\n text = f\"P M S\\n {imax:d} {imin:d} {huge:s}\"\n expected = Table([[imax], [imin], [huge]], names=(\"P\", \"M\", \"S\"))\n # NOTE: Warning behavior varies for the parameters being passed in.\n with pytest.warns() as w:\n table = ascii.read(\n text, format=\"basic\", guess=guess, fast_reader={\"parallel\": parallel}\n )\n if not parallel:\n assert len(w) == 1\n assert (\n \"OverflowError converting to IntType in column S, reverting to String\"\n in str(w[0].message)\n )\n assert_table_equal(table, expected)\n\n # Check with leading zeroes to make sure strtol does not read them as octal\n text = f\"P M S\\n000{imax:d} -0{-imin:d} 00{huge:s}\"\n expected = Table([[imax], [imin], [\"00\" + huge]], names=(\"P\", \"M\", \"S\"))\n with pytest.warns() as w:\n table = ascii.read(\n text, format=\"basic\", guess=guess, fast_reader={\"parallel\": parallel}\n )\n if not parallel:\n assert len(w) == 1\n assert (\n \"OverflowError converting to IntType in column S, reverting to String\"\n in str(w[0].message)\n )\n assert_table_equal(table, expected)", "def test_integers(self):\n for const in [\n SSL_ST_CONNECT,\n SSL_ST_ACCEPT,\n SSL_ST_MASK,\n SSL_CB_LOOP,\n SSL_CB_EXIT,\n SSL_CB_READ,\n SSL_CB_WRITE,\n SSL_CB_ALERT,\n SSL_CB_READ_ALERT,\n SSL_CB_WRITE_ALERT,\n SSL_CB_ACCEPT_LOOP,\n SSL_CB_ACCEPT_EXIT,\n SSL_CB_CONNECT_LOOP,\n SSL_CB_CONNECT_EXIT,\n SSL_CB_HANDSHAKE_START,\n SSL_CB_HANDSHAKE_DONE,\n ]:\n assert isinstance(const, int)\n\n # These constants don't exist on OpenSSL 1.1.0\n for const in [\n SSL_ST_INIT,\n SSL_ST_BEFORE,\n SSL_ST_OK,\n SSL_ST_RENEGOTIATE,\n ]:\n assert const is None or isinstance(const, int)", "def test_reading_counter(self):\n self._test_reading_counter_template()", "def test_roundtrip_signed_int():\n for num in (0, -0, -1, 2, -178, 300, -BIG_NUMBER, BIG_NUMBER):\n num2 = SignedInt.read(SignedInt.to_bytes(num))\n assert num2 == num", "def test_get_value_int(self):\n val = self.setting_int.get_value()\n self.assertIsInstance(val, int)\n self.assertEqual(val, 170)", "def test_get_value_int(self):\n val = self.setting_int.get_value()\n self.assertIsInstance(val, int)\n self.assertEqual(val, 170)", "def test_int(self):\n output, _err = self.executor.prepare('do-stuff', 'special', verbosity=5).batch()\n self.assertEqual(output, 'doing stuff very specially')", "def test_int(self):\n htype = h5t.py_create('i')\n self.assertIsInstance(htype, h5t.TypeIntegerID)", "def test_int(self, env: yaenv.Env):\n _val = env.int('INT_VAR')\n assert _val == 1 and type(_val) == int\n _val = env.int('MISSING', -2)\n assert _val == -2 and type(_val) == int\n with pytest.raises(yaenv.EnvError) as err:\n _ = env.int('LIST_VAR')\n assert 'Invalid integer' in str(err.value)\n assert env.int('MISSING') is None", "def getInt(self, int: int, int2: int) -> int:\n ...", "def test_create_valid_int(self):\n storage = FileStorage()\n tests = [9, 12, 10000]\n expected = [9, 12, 10000]\n\n for i in range(len(tests)):\n self.remove_all()\n with patch('sys.stdout', new=StringIO()) as f:\n self.console.onecmd(\n 'create BaseModel test_var={}'.format(tests[i]))\n attributes = list(storage.all().values())\n actual = attributes[0].test_var\n self.assertEqual(expected[i], actual)\n self.assertEqual(int, type(actual))", "def test_integer_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_integer_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_simple_index, 12345)\n\t)", "def test_devide_int(self):\n self.assertEqual(operations.devide(8,4), 2)", "def write(self, value: int, /) -> None:", "def test_safeGetInt(self):\n self.assertEqual(\n BMConfigParser().safeGetInt('nonexistent', 'nonexistent'), 0)\n self.assertEqual(\n BMConfigParser().safeGetInt('nonexistent', 'nonexistent', 42), 42)", "def test_bit_get_int(self):\n ops = [bitwise_operations.bit_get_int(self.five_255_bin, 0, 8, False)]\n\n _, _, result = self.as_connection.operate(self.test_key, ops)\n\n expected_result = 255\n assert result[\"255\"] == expected_result", "def test_op_one_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n offl_a = stream.bind(a)\n offl_a.one()\n offl_a.update_host()\n stream.sync()\n self.assertTrue((a == 1).all(),\n \"Array should be all one.\" + str(a))", "def test_getint(self):\n self.assertEqual(self.config.getint('advanced','n'),12)", "def test_read_count(self):\n self.assertEqual(1, self.alice_inbox.read_count)\n self.assertEqual(1, self.bob_inbox.read_count)\n self.assertEqual(0, self.carol_inbox.read_count)", "def test_read(self):\n string = \"1 10\\n\"\n num1, num2 = collatz_read(string)\n self.assertEqual(num1, 1)\n self.assertEqual(num2, 10)", "def write_readinto(self, write_buf: bytes, read_buf: bytes, /) -> Optional[int]:", "def write_readinto(self, write_buf: bytes, read_buf: bytes, /) -> Optional[int]:", "def test_add_integers(self):\n print(\"---running test_add_integers\")\n result = some_math.add(1, 2)\n assert result == 3", "def test_read_input_file(self):\n\n test_max_digit = 2\n tuple1 = self.test_raw_tuple\n tuple2, max_digit = read_input_file(self.test_drug_info_file)\n self.assertEqual(tuple1, tuple2)\n self.assertAlmostEqual(max_digit,test_max_digit)", "def test_op_setslice_scalar_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n b = -1\n\n expect = numpy.empty_like(a)\n expect[:] = b\n\n offl_a = stream.bind(a)\n offl_a[:] = b\n offl_a.update_host()\n stream.sync()\n\n self.assertTrue((a == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(a, expect))", "def test_device_read(self):\n test_adc = ads1115_single(assume_defaults)\n\n try:\n value = test_adc.read()\n self.assertIsInstance(value, int)\n except FileNotFoundError:\n # If this occurs, the I2C file was not found in /dev/, likely\n # because the ADC isn't actually attached to the dut. Pass the test.\n pass", "def test_integer_key():\n\tbackup_and_restore(\n\t\tlambda context: put_keys(lib.SET, INTEGER_KEYS, \"foobar\", False),\n\t\tNone,\n\t\tlambda context: check_keys(lib.SET, INTEGER_KEYS, \"foobar\", False)\n\t)", "def test_add_int(self):\n self.assertEqual(operations.add(3,4), 7)", "def test_integer_update(self):\r\n vm = Integer.value_manager(None, None, 5)\r\n assert not vm.changed\r\n vm.value = 4\r\n assert vm.changed", "def test_creation_int():\n value = 1\n\n num_a = param.Integer(value=value)\n assert num_a.value == value", "def test_integer_variable(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\"int n = 5\")\n assert bb._var == {\"n\": 5}", "def _testReadWrite(self):\n self.shouldWrite = True\n\n def checkReadInput(fd):\n self.assertEquals(fd.read(1), b'x')\n self._reactor.stop()\n\n def writeOnce(fd):\n if self.shouldWrite:\n self.shouldWrite = False\n fd.write(b'x')\n self._reader = Reader(self._p1, checkReadInput)\n self._writer = Writer(self._p2, writeOnce)\n\n self._reactor.addWriter(self._writer)\n\n # Test that adding the reader twice adds it only once to\n # IOLoop.\n self._reactor.addReader(self._reader)\n self._reactor.addReader(self._reader)", "def test_int32_value():\n x = Int32Value(42)\n assert x == IntType(42)\n y = Int32Value(IntType(0))\n assert y == IntType(0)", "def test_read1(self):\n string = \"100 200\\n\"\n num1, num2 = collatz_read(string)\n self.assertEqual(num1, 100)\n self.assertEqual(num2, 200)", "def testSetOffsetWithInt(self):\n self.node.offset = 2\n\n self.assertEqual(\n (2, 2, 2),\n self.node.offset\n )", "def test_should_return_a_integer(self):\n\n tcp_flags = TCPControlBits(['SYN', 'ACK'])\n assert_is_instance(tcp_flags.to_int(), int)", "def test_add_integer(self):\n assert cr.add(3, 2) == 3 + 2", "def test_op_fillfrom_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n offl_r = stream.empty_like(a)\n offl_r.fillfrom(a)\n r = offl_r.update_host().array\n stream.sync()\n self.assertTrue((a == r).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(a, r))", "def test_int(self):\n self.assertTrue(validate_measure_input('0', self.measures))\n self.assertTrue(validate_measure_input('1', self.measures))\n self.assertTrue(validate_measure_input(str(len(self.measures)), self.measures))\n self.assertFalse(validate_measure_input(str(len(self.measures) + 1), self.measures))", "def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarInt32()", "def test_read(self):\n with patch('RPi.GPIO.setmode') as mock_setmode:\n gpio = GPIODevice()\n with patch('RPi.GPIO.setup') as mock_setup:\n with patch('RPi.GPIO.input') as mock_input:\n mock_input.return_value = True\n value = gpio.read(0)\n with patch('RPi.GPIO.cleanup') as mock_cleanup:\n gpio.close()\n self.assertDictEqual(value, {\"value\": True})", "def test_read(self):\n self.reader._timing = [3, 2, 2, 1, 1, 1]\n score, time = self.reader.read(self.books[0], 0, 3)\n self.assertTrue(self.books[0].id_book not in self.reader._books)\n self.assertEqual(3, score)\n self.assertEqual(6, time)\n self.assertEqual([3, 3, 3, 2, 2, 2], self.reader._timing)\n score, time = self.reader.read(self.books[3], 4, 5)\n self.assertTrue(self.books[3].id_book not in self.reader._books)\n self.assertEqual(0, score)\n self.assertEqual(7, time)\n self.assertEqual([3, 3, 3, 2, 3, 3], self.reader._timing)", "def test_int_arg(self):\n obj = Base(9)\n self.assertTrue(obj.id is 9)", "def test_device_read(self):\n test_adc = ads1115_differential(assume_defaults)\n\n try:\n value = test_adc.read()\n self.assertIsInstance(value, int)\n except FileNotFoundError:\n # If this occurs, the I2C file was not found in /dev/, likely\n # because the ADC isn't actually attached to the dut. Pass the test.\n pass", "def test_int_log(self):\n htype = h5t.py_create('i', logical=True)\n self.assertIsInstance(htype, h5t.TypeIntegerID)", "def test_random_integers(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.random_integers((20, 20), -5, 5))\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.random_integers(-5, 5, size=(20,20))\r\n numpy_val1 = rng.random_integers(-5, 5, size=(20,20))\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)", "def test_bit_get_int_accross_bytes(self):\n ops = [bitwise_operations.bit_get_int(self.test_bin_ones, 4, 8, False)]\n\n _, _, result = self.as_connection.operate(self.test_key, ops)\n\n expected_result = 16\n assert result[\"bitwise1\"] == expected_result", "def test_analog_read_successfull_read_operation(self):\n resp = json.loads(bolt.analogRead(self.ANALOG_READ_PIN))\n self.assertEqual(resp[\"success\"], self.SUCCESS_RESPONSE)\n self.assertTrue(0 <= int(resp[\"value\"]) <= 1024)", "def test_another_read(self):\n self.reader._timing = [3, 2, 3, 3, 1, 1]\n score, time = self.reader.read(self.books[0], 0, 6)\n self.assertTrue(self.books[0].id_book not in self.reader._books)\n self.assertEqual(0, score)\n self.assertEqual(9, time)", "def test_mod():\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n new_value = value % 2\n num_a.value %= 2\n assert num_a.value == new_value", "def test_roll(self):\n pig = game.pig.Pig('PlayerA', 'PlayerB')\n for i in range(500):\n r = pig.roll()\n self.assertIsInstance(r, int)\n self.assertTrue(1 <= r <= 6)", "def test_read_with_no_locks(self):\n\n transaction = Transaction(\"T1\", TransactionType.READ_WRITE, 1)\n instruction = Instruction(\"R(T1, x2)\")\n\n self.assertEquals(self.data_manager.read(transaction, instruction), \"20\")", "def test_read4(self):\n string = \"1 1\\n\"\n num1, num2 = collatz_read(string)\n self.assertEqual(num1, 1)\n self.assertEqual(num2, 1)", "def test_bit_get_int_bit_offset_out_of_range(self):\n ops = [bitwise_operations.bit_get_int(self.test_bin_ones, 41, 1, False)]\n\n with pytest.raises(e.OpNotApplicable):\n self.as_connection.operate(self.test_key, ops)", "def test_read2(self):\n string = \"201 210\\n\"\n num1, num2 = collatz_read(string)\n self.assertEqual(num1, 201)\n self.assertEqual(num2, 210)", "def test_list_int(self):\n result = add(2, 4)\n self.assertEqual(result, 6)", "def read(self, n=1):\n return 0", "def test_bytes_int(self):\n self.assertEqual(py23_bytes(5), b'\\x00\\x00\\x00\\x00\\x00')\n # Test using newint:\n self.assertEqual(py23_bytes(int(5)), b'\\x00\\x00\\x00\\x00\\x00')\n self.assertTrue(isinstance(py23_bytes(int(5)), bytes_types))", "def testSetPowerWithInt(self):\n self.node.power = 2\n\n self.assertEqual(\n (2, 2, 2),\n self.node.power\n )", "def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarInt()", "def test_op_fill_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n value = 73422137\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n expect = numpy.empty_like(a)\n expect[:] = value\n offl_a = stream.bind(a)\n offl_a.fill(value)\n offl_a.update_host()\n stream.sync()\n self.assertTrue((a == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(a, expect))", "def test_bit_get_int_bad_argument_type(self):\n ops = [bitwise_operations.bit_get_int(self.test_bin_ones, 0, 1.5, False)]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(self.test_key, ops)", "def test_incrdecr(self):\n\t\tyield self.conn.set(\"an_integer\", 42)\n\n\t\tself.assertEqual((yield self.conn.incr(\"an_integer\", 1)), 43)\n\t\tself.assertEqual((yield self.conn.decr(\"an_integer\", 1)), 42)", "def test_integer_key_stored():\n\tbackup_and_restore(\n\t\tlambda context: put_keys(lib.SET, INTEGER_KEYS, \"foobar\", True),\n\t\tNone,\n\t\tlambda context: check_keys(lib.SET, INTEGER_KEYS, \"foobar\", True)\n\t)", "def readInt(self) -> int:\n return self._unpack('!i', 4)", "def test_unread_count(self):\n self.assertEqual(1, self.alice_storage.unread_count)\n self.assertEqual(2, self.bob_storage.unread_count)\n self.assertEqual(0, self.carol_storage.unread_count)\n self.assertEqual(0, self.anonymous_storage.unread_count)", "def main():\n\n open_read_write()", "def test_random_integers(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n m = Module()\r\n m.random = RandomStreams(utt.fetch_seed())\r\n m.fn = Method([], m.random.random_integers((20,20), -5, 5))\r\n\r\n made = m.make()\r\n made.random.initialize()\r\n fn_val0 = made.fn()\r\n fn_val1 = made.fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.random_integers(-5, 5, size=(20,20))\r\n numpy_val1 = rng.random_integers(-5, 5, size=(20,20))\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)", "def testIntegers(self):\n sdq1 = getattr(self.s1, 'sdq1')\n self.app.REQUEST.form['startingYear'] = 'string'\n self.app.REQUEST.form['endingYear'] = 'string'\n self.app.REQUEST.form['futureYears'] = 'string'\n app = self.app\n dummy_controller_state = ControllerState(\n id='base_edit',\n context=sdq1,\n button='submit',\n status='success',\n errors={},\n next_action=None,)\n controller = self.portal.portal_form_controller\n controller_state = controller.validate(dummy_controller_state, app.REQUEST, ['validate_base',])\n errors = controller_state.getErrors()\n errors = sdq1.post_validate(self.app.REQUEST, errors)\n assert errors != {}, \"Validation error not raised\"\n assert errors.has_key('startingYear')\n assert errors.has_key('endingYear')\n assert errors.has_key('futureYears')", "def test_right_twos_to_int(self):\n self.assertEqual(utils.twos_to_int('101'.zfill(8)), 5)", "def test():\n import doctest\n doctest.testmod(verbose=0)\n test_int()\n test_tuple()", "def output_integer(state, key, data):\n return int(state[key])", "def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarUInt32()", "def test_integer_length(doctest):", "def test_inc_rolls(self):\n computer1 = computer.Computer(1)\n computer1.inc_rolls()\n res = computer1.rolls\n exp = 1\n self.assertEqual(res, exp)", "def test_bit_get_int_multiple_bytes(self):\n ops = [bitwise_operations.bit_get_int(self.five_255_bin, 4, 17, False)]\n\n _, _, result = self.as_connection.operate(self.test_key, ops)\n\n expected_result = 131071\n assert result[\"255\"] == expected_result", "def test_io_statistics(self):\n import time\n from supvisors.statistics import instant_io_statistics, io_statistics\n # take 2 spaced instant cpu statistics\n ref_stats = instant_io_statistics()\n time.sleep(1)\n last_stats = instant_io_statistics()\n stats = io_statistics(last_stats, ref_stats, 1)\n # test keys\n self.assertListEqual(ref_stats.keys(), stats.keys())\n self.assertListEqual(last_stats.keys(), stats.keys())\n # test that values are pairs\n for intf, bytes in stats.items():\n self.assertEqual(2, len(bytes))\n for value in bytes:\n self.assertIs(int, type(value))", "def test_io_in_out_loop(self):\n self.l.output(conf_io=0x1, state_io=0x0)\n for i in range(10):\n state_d, state_io, count = self.l.output(state_io=0x1)\n self.assertTrue(state_io & 0x2)\n state_d, state_io, count = self.l.output(state_io=0x0)\n self.assertTrue(not state_io & 0x2)", "def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarInt64()", "def test_bind_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n library = get_library(device, \"libtests.so\")\n pattern = int(0xdeadbeefabbaabba)\n a = numpy.empty((4711 * 1024,), dtype=int)\n a[:] = pattern\n offl_a = stream.bind(a)\n r = numpy.empty((1,), dtype=int)\n offl_r = stream.bind(r)\n stream.invoke(library.test_check_pattern,\n offl_a, offl_a.size, offl_r, pattern)\n offl_r.update_host()\n stream.sync()\n\n self.assertEqual(r[0], a.shape[0])", "def test_output(self):\n good_value_pairs = INT_VALUE_PAIRS\n for pair in good_value_pairs:\n output = to_cardinal_number(pair[0])\n self.assertEqual(output, pair[1],\n f\"{pair[0]} should be {pair[1]}, not {output}\")", "def readinto(self, buf: bytes, write: int = 0x00, /) -> Optional[int]:", "def readinto(self, buf: bytes, write: int = 0x00, /) -> Optional[int]:", "def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarUInt()", "def testIntValue(self):\n objectID = uuid4()\n user = createUser(u'username', u'password', u'User',\n u'[email protected]')\n namespace = createNamespace(user, u'name')\n tag = createTag(user, namespace, u'tag')\n self.store.add(TagValue(user.id, tag.id, objectID, 42))", "def test_int_to_str(self):\n @converters.wrap\n def inner_test(param: str):\n \"\"\"Make sure the parameter was converted correctly.\"\"\"\n self.assertEqual(param, '256')\n inner_test(param=256)", "def test_list_int2(self):\n inp = [(0, 0), (10, 1), (1, 2)]\n expected = 19\n actual = get_num_steps(inp)\n self.assertEqual(expected, actual)" ]
[ "0.62519395", "0.6250594", "0.62442267", "0.61574054", "0.5960592", "0.59363145", "0.5931189", "0.5928802", "0.59218436", "0.587367", "0.5865747", "0.58532387", "0.57997644", "0.5783584", "0.57745636", "0.5768972", "0.5754527", "0.57348996", "0.57348996", "0.57081896", "0.56948346", "0.56756854", "0.5665358", "0.56630194", "0.5621748", "0.56077176", "0.5563673", "0.5561611", "0.55376834", "0.55287766", "0.5501422", "0.54978096", "0.5495314", "0.549531", "0.549531", "0.5491468", "0.5485377", "0.54809695", "0.5468015", "0.54651946", "0.5455302", "0.544123", "0.5437209", "0.5432861", "0.5431298", "0.54230756", "0.5418102", "0.5411968", "0.539313", "0.5392056", "0.5391759", "0.5391718", "0.5387238", "0.5381982", "0.53803325", "0.53686327", "0.5352779", "0.53414863", "0.53388804", "0.5333273", "0.53331494", "0.5331258", "0.5328248", "0.5320194", "0.530198", "0.5279639", "0.52757645", "0.5269274", "0.52663976", "0.5257537", "0.5257402", "0.5250666", "0.5248402", "0.52393526", "0.523396", "0.52326924", "0.5227285", "0.5217835", "0.52102405", "0.5208493", "0.5203628", "0.52030396", "0.5199826", "0.51904154", "0.51899296", "0.51874036", "0.5184014", "0.51780486", "0.5177027", "0.51725006", "0.5171829", "0.5170749", "0.51653486", "0.51562977", "0.5142901", "0.5142901", "0.51377845", "0.51364326", "0.513514", "0.51124036" ]
0.625918
0
Test read and write floats.
def test_message_float(): result = True message = msg.Message() for i in range(num_it): message.appendFloat(i/128.789456) if message.length != msg.HEADER_SIZE + (i+1)*msg.floatStruct.size: print("Size is ", message.length, " but should be ", msg.HEADER_SIZE + (i+1)*msg.floatStruct.size) print("Error : message.appendFloat") result = False message.resetCursor() for i in range(num_it): r = message.readFloat() if abs(r - i/128.789456) > 0.000001: print(r, " vs ", i/128.789456) print("Error : message.read/appendFloat") result = False return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_float():\n floatify = fields.FloatField().adapt\n\n for input, expect in [\n (1.1, 1.1),\n (11, 11.0),\n (int(5.7), 5)\n ]:\n assert floatify(input) == expect", "def test_float_storage():\n values = [2.3434, 124012.2323209999, -12.39212445433389]\n\n for value in values:\n sign, exp, mantissa = to_sign_exponent_mantissa(value)\n restored_value = from_sign_exponent_mantissa(sign, exp, mantissa)\n print(restored_value)\n assert(value == restored_value)", "def test_float(self):\n htype = h5t.py_create('f')\n self.assertIsInstance(htype, h5t.TypeFloatID)", "async def test_float_data_type(hass, mock_hub):\n register_config = {\n CONF_COUNT: 2,\n CONF_REGISTER_TYPE: CALL_TYPE_REGISTER_HOLDING,\n CONF_DATA_TYPE: DATA_TYPE_FLOAT,\n CONF_SCALE: 1,\n CONF_OFFSET: 0,\n CONF_PRECISION: 5,\n }\n await run_test(\n hass,\n mock_hub,\n register_config,\n SENSOR_DOMAIN,\n register_words=[16286, 1617],\n expected=\"1.23457\",\n )", "def __test_float(self, bk):\n for arg in self.args['float']:\n print(\"\\nTesting:\", arg)\n ds = ArgoDataFetcher(backend=bk).float(arg).to_xarray()\n assert isinstance(ds, xr.Dataset) == True", "def test_float(self, env: yaenv.Env):\n _val = env.float('FLOAT_VAR')\n assert _val == 10.0 and type(_val) == float\n _val = env.float('MISSING', -3.1)\n assert _val == -3.1 and type(_val) == float\n with pytest.raises(yaenv.EnvError) as err:\n _ = env.float('LIST_VAR')\n assert 'Invalid numerical' in str(err.value)\n assert env.float('MISSING') is None", "def testfloat ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTup1, expRes in self.knownFloatValues:\r\n\t\t\tfrac1 = eval ( r.sub ( 'frac.frac', fracTup1 ) ) \r\n\t\t\tself.assertAlmostEqual ( float ( frac1 ), expRes )", "def test_create_valid_float(self):\n storage = FileStorage()\n tests = [9.124, 90.24, 90.0, 90.]\n expected = [9.124, 90.24, 90.0, 90.0]\n\n for i in range(len(tests)):\n self.remove_all()\n with patch('sys.stdout', new=StringIO()) as f:\n self.console.onecmd(\n 'create BaseModel test_var={}'.format(tests[i]))\n attributes = list(storage.all().values())\n actual = attributes[0].test_var\n self.assertEqual(expected[i], actual)\n self.assertEqual(float, type(actual))", "def test_op_fillfrom_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n offl_r = stream.empty_like(a)\n offl_r.fillfrom(a)\n r = offl_r.update_host().array\n stream.sync()\n self.assertTrue((a == r).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(a, r))", "def test_float(self):\n self.assertFalse(validate_measure_input('0.0', self.measures))\n self.assertFalse(validate_measure_input('1.0', self.measures))\n self.assertFalse(validate_measure_input('1.1', self.measures))", "def test_float_log(self):\n htype = h5t.py_create('f', logical=True)\n self.assertIsInstance(htype, h5t.TypeFloatID)", "def test_floats(self):\r\n problem_setup = [\r\n # [given_answer, [list of correct responses], [list of incorrect responses]]\r\n [1, [\"1\"], [\"1.1\"]],\r\n [2.0, [\"2.0\"], [\"1.0\"]],\r\n [4, [\"4.0\", \"4.00004\"], [\"4.00005\"]],\r\n [0.00016, [\"1.6*10^-4\"], [\"\"]],\r\n [0.000016, [\"1.6*10^-5\"], [\"0.000165\"]],\r\n [1.9e24, [\"1.9*10^24\"], [\"1.9001*10^24\"]],\r\n [2e-15, [\"2*10^-15\"], [\"\"]],\r\n [3141592653589793238., [\"3141592653589793115.\"], [\"\"]],\r\n [0.1234567, [\"0.123456\", \"0.1234561\"], [\"0.123451\"]],\r\n [1e-5, [\"1e-5\", \"1.0e-5\"], [\"-1e-5\", \"2*1e-5\"]],\r\n ]\r\n for given_answer, correct_responses, incorrect_responses in problem_setup:\r\n problem = self.build_problem(answer=given_answer)\r\n self.assert_multiple_grade(problem, correct_responses, incorrect_responses)", "def test_add_floats(self):\n print(\"---running test_add_floats\")\n result = some_math.add(10.5, 2)\n assert result == 12.5", "def test_op_one_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n offl_a = stream.bind(a)\n offl_a.one()\n offl_a.update_host()\n stream.sync()\n self.assertTrue((a == 1.0).all(),\n \"Array should be all one.\" + str(a))", "def check_for_float(check):", "def test_source_with_float_value():\n source = festim.Source(2.0, volume=1, field=\"solute\")\n assert isinstance(source.value, f.Constant)", "def write_float(self, f: float) -> None:\n self.write(STRUCT_FLOAT.pack(f))", "def test__get_float(self, value, result):\n test_result = _get_float(value)\n self.assertEqual(result, test_result)", "def testFloatValue(self):\n objectID = uuid4()\n user = createUser(u'username', u'password', u'User',\n u'[email protected]')\n namespace = createNamespace(user, u'name')\n tag = createTag(user, namespace, u'tag')\n self.store.add(TagValue(user.id, tag.id, objectID, 42.1))", "async def test_floats_get_rounded_correctly(hass, mock_hub):\n register_config = {\n CONF_COUNT: 1,\n CONF_DATA_TYPE: DATA_TYPE_INT,\n CONF_SCALE: 1.5,\n CONF_OFFSET: 0,\n CONF_PRECISION: 0,\n }\n await run_test(\n hass,\n mock_hub,\n register_config,\n SENSOR_DOMAIN,\n register_words=[1],\n expected=\"2\",\n )", "def test_op_fill_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n value = 7342.2137\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n expect = numpy.empty_like(a)\n expect[:] = value\n offl_a = stream.bind(a)\n offl_a.fill(value)\n offl_a.update_host()\n stream.sync()\n self.assertTrue((a == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(a, expect))", "def test_float():\n assert float(Quantity(1, unit('m'))) == float(1)", "def test_wiki_toc_isfloat_true(self):\n from .wiki_toc import isfloat\n value = isfloat(value='40.22222')\n self.assertTrue(value is True)", "def testSetWithFloat(self):\n self.node.sat = 100.1\n\n self.assertEqual(\n Decimal('100.1'),\n self.node.sat\n )", "def test_float_single_precision(self):\n data = service_call.encode_call(\"foo\", 1. + 1e-8)\n name, args = service_call.decode_call(data)\n\n # Check that we have the marker for single precision float\n self.assertEqual(1., args)", "def test_float_variable(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\"float alpha = -0.5432\")\n assert bb._var == {\"alpha\": -0.5432}", "def testtofloatString ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTup1, expRes in self.knownFloatStringValues:\r\n\t\t\tfrac1 = eval ( r.sub ( 'frac.frac', fracTup1 ) ) \r\n\t\t\tself.assertEqual ( frac1.tofloatString (), expRes )", "def test_wiki_toc_isfloat_false(self):\n from .wiki_toc import isfloat\n value = isfloat(value='test_float')\n self.assertTrue(value is False)", "def read_floats(filepointer):\n\tdata = read_strings(filepointer)\n\tif not data:\n\t\treturn None\n\ttry:\n\t\tdata = [float(x) for x in data]\n\t\treturn data\n\texcept:\n\t\t# try the next line\n\t\treturn read_floats(filepointer)", "def test_float_range_2():\n try:\n float_range('2.0')\n assert False # Should be unreachable\n except Exception:\n pass", "def readFloat(self) -> float:\n return self._unpack('!f', 4)", "def read_float(stream, size):\n\t\n\tif size not in (0, 4, 8):\n\t\traise IOError('Cannot read floating point values with lengths other than 0, 4, or 8 bytes.')\n\tvalue = 0.0\n\tif size in (4, 8):\n\t\tdata = stream.read(size)\n\t\tvalue = struct.unpack({\n\t\t\t4: '>f',\n\t\t\t8: '>d'\n\t\t}[size], data)[0]\n\treturn value", "def test_denominator_float(self):\n steps = save_divide(np.ones(2), 2)\n np.testing.assert_equal(steps, 0.5 * np.ones(2))", "def test_float_range():\n assert 0.5 == float_range('0.5')", "def test_op_setslice_scalar_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=float)\n b = 2.5\n\n expect = numpy.empty_like(a)\n expect[:] = b\n\n offl_a = stream.bind(a)\n offl_a[:] = b\n offl_a.update_host()\n stream.sync()\n\n self.assertTrue((a == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(a, expect))", "def read_float(stream, writer_schema=None, reader_schema=None): # noqa\n return unpack('<f', stream.read(4))[0]", "def test_create_maze_with_float(self):\n try:\n _ = Maze(4.0, 4)\n self.assertEqual(True, False, 'should not have got here: '\n 'maze created with float index.')\n except TypeError:\n self.assertEqual(True, True)", "def test_creation_float():\n with pytest.raises(ValueError) as __:\n value = 42.30474\n __ = param.Integer(value=value)", "def testSetPowerWithFloat(self):\n self.node.power = 100.1\n\n self.assertEqual(\n (Decimal('100.1'), Decimal('100.1'), Decimal('100.1')),\n self.node.power\n )", "def write_float(self, f):\n if not isinstance(f, float):\n raise TypeError(\"expected a float, got %r\" % (type(f),))\n\n self.write(self._packers[\"f\"].pack(f))", "def write_float(self, process_handle: int, address: int, value):\n self.__bufferSize = 4\n is_write = self.__write_bytes(process_handle, address, value)\n return True if is_write else False", "def writeFloat(self, value: float):\n self._pack('!f', value)", "def test_numerator_float(self):\n steps = save_divide(1, np.ones(2) * 2)\n np.testing.assert_equal(steps, 0.5 * np.ones(2))", "def test_op_zero_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n offl_a = stream.bind(a)\n offl_a.zero()\n offl_a.update_host()\n stream.sync()\n self.assertEqual(sum(a), 0.0,\n \"Array should be all zeros.\")", "def test_deserialize_float_value(self):\n from petstore_api.model import banana\n _response_for_200 = api_client.OpenApiResponse(\n content={\n self.json_content_type: api_client.MediaType(schema=banana.Banana),\n },\n )\n data = {\n 'lengthCm': 3.1415\n }\n response = self.__response(data)\n deserialized = _response_for_200.deserialize(response, self.configuration)\n body = deserialized.body\n self.assertTrue(isinstance(body, banana.Banana))\n self.assertTrue(isinstance(body.lengthCm, Decimal))\n self.assertEqual(body.lengthCm, 3.1415)\n\n \"\"\"\n Float value is serialized without decimal point\n The client receive it as an integer, which work because Banana.lengthCm is type number without format\n Which accepts int AND float\n \"\"\"\n data = {\n 'lengthCm': 3\n }\n response = self.__response(data)\n deserialized = _response_for_200.deserialize(response, self.configuration)\n body = deserialized.body\n self.assertTrue(isinstance(body, banana.Banana))\n self.assertTrue(isinstance(body.lengthCm, Decimal))\n self.assertEqual(body.lengthCm, 3)", "def test_getfloat(self):\n self.assertEqual(self.config.getfloat('advanced','m'),42.0)", "def checkFloat(comment, value, expected, tol=1e-10, update=True):\n if np.isnan(value) and np.isnan(expected):\n res = True\n elif np.isnan(value) or np.isnan(expected):\n res = False\n else:\n res = abs(value - expected) <= tol\n if update:\n if not res:\n print(\"checking float\",comment,'|',value,\"!=\",expected)\n results[\"fail\"] += 1\n else:\n results[\"pass\"] += 1\n return res", "def test_add_with_float_arg(self):\n\n a = Vec3(2, 3, 4)\n b = 5.0\n\n result = a + b\n\n expected_result = Vec3(7, 8, 9)\n\n self.assertEqual(result, expected_result)", "def read_float(data):\n s_type = \"=%s\" % get_type(\"float\")\n return struct.unpack(s_type, data.read(4))[0]", "def test_small_floats(self) -> None:\n f = open('test_files/track-with-small-floats.gpx')\n \n\n gpx = mod_gpxpy.parse(f)\n\n xml = gpx.to_xml()\n self.assertNotIn('e-', xml)", "def test_float32(self):\r\n start, stop, step = fscalars('start', 'stop', 'step')\r\n out = arange(start, stop, step)\r\n f = function([start, stop, step], out)\r\n\r\n if config.cast_policy == 'custom':\r\n assert out.dtype == start.type.dtype\r\n elif config.cast_policy == 'numpy':\r\n numpy_dtype = numpy.arange(numpy.array(0, dtype=start.dtype),\r\n numpy.array(1, dtype=stop.dtype),\r\n numpy.array(1, dtype=step.dtype)).dtype\r\n assert out.dtype == numpy_dtype\r\n elif config.cast_policy == 'numpy+floatX':\r\n assert out.dtype == config.floatX\r\n else:\r\n raise NotImplementedError(config.cast_policy)\r\n arg_vals = [(0, 5, 1), (2, 11, 4), (-5, 1.1, 1.2), (1.3, 2,\r\n -2.1), (10, 2, 2)]\r\n for arg_v in arg_vals:\r\n start_v, stop_v, step_v = arg_v\r\n start_v_, stop_v_, step_v_ = numpy.asarray(arg_v,\r\n dtype=start.type.dtype)\r\n f_val = f(start_v_, stop_v_, step_v_)\r\n if config.cast_policy == 'custom':\r\n expected_val = numpy.arange(start_v, stop_v, step_v,\r\n dtype=start.type.dtype)\r\n elif config.cast_policy in ('numpy', 'numpy+floatX'):\r\n expected_val = numpy.arange(start_v_, stop_v_, step_v_,\r\n dtype=out.dtype)\r\n else:\r\n raise NotImplementedError(config.cast_policy)\r\n assert numpy.all(f_val == expected_val)", "def test_float_range_3():\n try:\n float_range('foobar')\n assert False # Should be unreachable\n except Exception:\n pass", "def test_op_isub_scalar_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=float)\n s = 1.3\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a - s\n\n offl_a = stream.bind(a)\n offl_a -= s\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old).all(),\n \"Input array operand must be modified: \"\n \"{0} should be {1}\".format(r, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_data_out_of_range(parallel, fast_reader, guess):\n # Python reader and strtod() are expected to return precise results\n rtol = 1.0e-30\n\n # Update fast_reader dict; adapt relative precision for fast_converter\n if fast_reader:\n fast_reader[\"parallel\"] = parallel\n if fast_reader.get(\"use_fast_converter\"):\n rtol = 1.0e-15\n elif np.iinfo(np.int_).dtype == np.dtype(np.int32):\n # On 32bit the standard C parser (strtod) returns strings for these\n pytest.xfail(\"C parser cannot handle float64 on 32bit systems\")\n\n if parallel:\n if not fast_reader:\n pytest.skip(\"Multiprocessing only available in fast reader\")\n elif CI:\n pytest.xfail(\"Multiprocessing can sometimes fail on CI\")\n\n test_for_warnings = fast_reader and not parallel\n if not parallel and not fast_reader:\n ctx = nullcontext()\n else:\n ctx = pytest.warns()\n\n fields = [\"10.1E+199\", \"3.14e+313\", \"2048e+306\", \"0.6E-325\", \"-2.e345\"]\n values = np.array([1.01e200, np.inf, np.inf, 0.0, -np.inf])\n # NOTE: Warning behavior varies for the parameters being passed in.\n with ctx as w:\n t = ascii.read(\n StringIO(\" \".join(fields)),\n format=\"no_header\",\n guess=guess,\n fast_reader=fast_reader,\n )\n if test_for_warnings: # Assert precision warnings for cols 2-5\n assert len(w) == 4\n for i in range(len(w)):\n assert f\"OverflowError converting to FloatType in column col{i+2}\" in str(\n w[i].message\n )\n read_values = np.array([col[0] for col in t.itercols()])\n assert_almost_equal(read_values, values, rtol=rtol, atol=1.0e-324)\n\n # Test some additional corner cases\n fields = [\n \".0101E202\",\n \"0.000000314E+314\",\n \"1777E+305\",\n \"-1799E+305\",\n \"0.2e-323\",\n \"5200e-327\",\n \" 0.0000000000000000000001024E+330\",\n ]\n values = np.array(\n [1.01e200, 3.14e307, 1.777e308, -np.inf, 0.0, 4.94e-324, 1.024e308]\n )\n with ctx as w:\n t = ascii.read(\n StringIO(\" \".join(fields)),\n format=\"no_header\",\n guess=guess,\n fast_reader=fast_reader,\n )\n if test_for_warnings: # Assert precision warnings for cols 4-6\n assert len(w) == 3\n for i in range(len(w)):\n assert f\"OverflowError converting to FloatType in column col{i+4}\" in str(\n w[i].message\n )\n read_values = np.array([col[0] for col in t.itercols()])\n assert_almost_equal(read_values, values, rtol=rtol, atol=1.0e-324)\n\n # Test corner cases again with non-standard exponent_style (auto-detection)\n if fast_reader and fast_reader.get(\"use_fast_converter\"):\n fast_reader.update({\"exponent_style\": \"A\"})\n else:\n pytest.skip(\"Fortran exponent style only available in fast converter\")\n\n fields = [\n \".0101D202\",\n \"0.000000314d+314\",\n \"1777+305\",\n \"-1799E+305\",\n \"0.2e-323\",\n \"2500-327\",\n \" 0.0000000000000000000001024Q+330\",\n ]\n with ctx as w:\n t = ascii.read(\n StringIO(\" \".join(fields)),\n format=\"no_header\",\n guess=guess,\n fast_reader=fast_reader,\n )\n if test_for_warnings:\n assert len(w) == 3\n read_values = np.array([col[0] for col in t.itercols()])\n assert_almost_equal(read_values, values, rtol=rtol, atol=1.0e-324)", "def test_sum_list_floats(self):\n\n list_of_floats = [1.2, 2.34, 2.001]\n result = sum(list_of_floats)\n\n self.assertEqual(result, 5.541)", "def test_sub_float():\n with pytest.raises(ValueError) as __:\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n num_a.value -= 1.5", "def test_op_sub_scalar_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n s = 1.3\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a - s\n\n offl_a = stream.bind(a)\n offl_r = offl_a - s\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old).all(),\n \"Input array operand must not be modified: \"\n \"{0} should be {1}\".format(a, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "async def test_floating_point_scale(hass, mock_hub):\n register_config = {\n CONF_COUNT: 1,\n CONF_DATA_TYPE: DATA_TYPE_INT,\n CONF_SCALE: 2.4,\n CONF_OFFSET: 0,\n CONF_PRECISION: 2,\n }\n await run_test(\n hass,\n mock_hub,\n register_config,\n SENSOR_DOMAIN,\n register_words=[1],\n expected=\"2.40\",\n )", "def test_mod_float():\n with pytest.raises(ValueError) as __:\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n num_a.value %= 1.5", "def test_primitives(logger, class_, raw_bytes, expected):\n # Set up the generator with the raw bytes:\n position = recover(raw_bytes)\n data = next(position)\n assert data == ''\n\n result = class_.load(position)\n\n if issubclass(class_, Float):\n assert round(result.value, 3) == expected\n\n else:\n assert result.value == expected", "def write_float_array(f, path, values, dtype='f8'):\n dset = f.create_dataset(path, (len(values),), dtype=dtype)\n dset[:] = values\n f.flush()", "async def test_floating_point_encoding(self, r):\n await r.flushdb()\n timestamp = 1349673917.939762\n await r.zadd('a', timestamp, 'a1')\n assert await r.zscore('a', 'a1') == timestamp", "def read_float(self, process_handle: int, address: int):\n self.__bufferSize = 4\n value = self.__read_bytes(process_handle, address)\n return None if value is None else unpack('<f', bytearray(value))", "def test_op_add_scalar_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n s = 1.3\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a + s\n\n offl_a = stream.bind(a)\n offl_r = offl_a + s\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old).all(),\n \"Input array operand must not be modified: \"\n \"{0} should be {1}\".format(a, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_float_remoteness_allowed(self):\n\n # Two different classes with only slightly different storage_types\n class FrontentSlow(VerboseDataDir):\n storage_type = 10\n _verbose = True\n\n class FrontentSlightlySlower(VerboseDataDir):\n storage_type = 10.001\n raise_when_run = False\n _verbose = True\n\n def find(self, *args, **kwargs):\n print(self.raise_when_run)\n if self.raise_when_run:\n raise strax.testutils.SomeCrash\n return super().find(*args, **kwargs)\n\n storage_slow = FrontentSlow(self._sub_dir('slow'))\n storage_slightly_slow = FrontentSlightlySlower(self._sub_dir('slightlyslow'))\n storages = [storage_slow, storage_slightly_slow]\n st = strax.Context(storage=storages, **self.context_kwargs)\n\n # Make the data, it should be in both frontends\n st.make(self.run_id, self.target)\n self.assertTrue(st.is_stored(self.run_id, self.target))\n for sf in storages:\n self.assertTrue(st._is_stored_in_sf(self.run_id, self.target, sf), str(sf))\n\n # Now set the charge, if the slightly slower frontend is asked\n # for data, it will raise an error\n storage_slightly_slow.raise_when_run = True\n st.set_context_config({'forbid_creation_of': '*'})\n # No error raises because we get the storage_slow's data\n st.get_array(self.run_id, self.target)\n # just to be sure, we would have gotten an error if it would\n # have gotten data from storage_slightly_slow\n with self.assertRaises(strax.testutils.SomeCrash):\n st.storage = [storage_slightly_slow]\n print(st.storage)\n st.is_stored(self.run_id, self.target)", "def testFloatInput(self):\n nb.rescale_length(2.0)\n self.assertEqual(2.0, nb.rscale)", "def read_float(self):\n return self._packers[\"f\"].unpack(self.read(4))[0]", "def testSetOffsetWithFloat(self):\n self.node.offset = 100.1\n\n self.assertEqual(\n (Decimal('100.1'), Decimal('100.1'), Decimal('100.1')),\n self.node.offset\n )", "def read(reader: BitStreamReader, _index: int) -> float:\n\n return reader.readFloat64()", "def test_add_float():\n with pytest.raises(ValueError) as __:\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n num_a.value += 1.5", "def is_float(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_float)", "def write_float(self, registeraddress, value, numberOfRegisters=2):\n _checkNumerical(value, description='input value')\n _checkInt(numberOfRegisters, minvalue=2, maxvalue=4, description='number of registers')\n self._genericCommand(16, registeraddress, value, \\\n numberOfRegisters=numberOfRegisters, payloadformat='float')", "def read_floats(self, count=1, location=None):\n return_vals = []\n byteorder = {'little':'<f', 'big':'>f'}[self._byteorder]\n if self._tiff is not None:\n off = self._offset\n if location is not None:\n off = location\n for c in range(count):\n return_vals.append(unpack_from(byteorder, self._tiff[off:off+4])[0])\n off += 4# size\n if location is None:\n self._offset += (count * 4) #size)\n return return_vals", "def is_float(*args): \n try:\n for i in args:\n float(i)\n return True\n except Exception:\n return False", "def assert_floats_equal(expected, received,message=None):\n number = [float, int] # list of number types\n if type(expected) not in number:\n if message is None:\n message = ('assert_floats_equal: first argument %s is not a number' % repr(expected))\n elif type(received) not in number:\n if message is None:\n message = ('assert_floats_equal: second argument %s is not a number' % repr(received))\n elif (not isclose(expected,received)):\n if message is None:\n message = 'assert_floats_equal: expected %s but instead got %s' % (repr(expected),repr(received))\n else:\n message = None\n \n if not message is None:\n quit_with_error(message)", "def test_feature_format(X):\r\n print(\"test_feature_format()...\", end = \"\")\r\n for row in range(len(X)):\r\n for col in range(len(X[0])):\r\n assert (isinstance(X[row][col], float) == True)\r\n print(\"Passed!\")", "def testSetWithNegativeFloat(self):\n def setSat():\n self.node.sat = -20.1\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSat\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSat()\n\n self.assertEqual(\n Decimal('0.0'),\n self.node.sat\n )", "def _readFloat(self, rawData, offset=0):\n val, = unpack(\n self.floatFormat, rawData[\n offset:offset + self.floatFormatLen])\n \n return val", "def test_ParameterVariable_write_complex_float(self, mock_f):\n\n par = provide_parameter(\"double\", \"test\", value=5.4,\n comment=\"test comment\")\n\n with mock_f('test.txt', 'w') as m_fo:\n write_parameter(m_fo, parameter=par, stop_character=\")\")\n\n expected_writes = [unittest.mock.call(\"double test\"),\n unittest.mock.call(\" = 5.4\"),\n unittest.mock.call(\")\"),\n unittest.mock.call(\"// test comment\"),\n unittest.mock.call(\"\\n\")]\n\n mock_f.assert_called_with('test.txt', 'w')\n handle = mock_f()\n handle.write.assert_has_calls(expected_writes, any_order=False)", "def test_data(filename, col, day):\r\n\r\n extracted = extract_info(filename)\r\n formatted = format(extracted, col, day) # calling my funcations\r\n float_rep = float(formatted) # getting the float representation of the info\r\n return float_rep", "def test_mul_float():\n with pytest.raises(ValueError) as __:\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n num_a.value *= 1.5", "def test_float_type(self):\n\n input_ = 1.2\n expected = ValueError\n with self.assertRaises(expected):\n math.factorial(input_)", "def testSetPowerWithNegativeFloat(self):\n def setPower():\n self.node.power = -20.1\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setPower\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setPower()\n\n self.assertEqual(\n (Decimal('0.0'), Decimal('0.0'), Decimal('0.0')),\n self.node.power\n )", "def testSetSlopeWithFloat(self):\n self.node.slope = 100.1\n\n self.assertEqual(\n (Decimal('100.1'), Decimal('100.1'), Decimal('100.1')),\n self.node.slope\n )", "async def put_float( # pylint: disable=inconsistent-return-statements\n self, complex_body: IO, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:", "def ReadFloat(self, endian=\"<\"):\n return self.unpack(\"%sf\" % endian, 4)", "def write_float32(self, f: float) -> None:\n self.buffer += struct.pack(\"<f\", f)", "def read(reader: BitStreamReader, _index: int) -> float:\n\n return reader.readFloat32()", "def test_irr_read(irregular_written_data):\n\n fp, written = irregular_written_data\n with openEDF(fp) as reader:\n arr = reader.read(0)\n #imprecision due to 2-byte conversion so tolerance set to 1 unit\n assert np.allclose(written, arr, equal_nan=True, atol=1)", "def testConvertIntegerToFloat(self):\n message = protojson.decode_message(MyMessage, '{\"a_float\": 10}')\n\n self.assertTrue(isinstance(message.a_float, float))\n self.assertEquals(10.0, message.a_float)", "def test_op_setslice_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=float)\n b = a + 2.5\n expect = numpy.empty_like(b)\n old_b = numpy.empty_like(b)\n expect[:] = b[:]\n old_b[:] = b[:]\n\n offl_a = stream.bind(a)\n offl_a[:] = b\n offl_a.update_host()\n stream.sync()\n\n self.assertTrue((b == old_b).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_b))\n self.assertTrue((a == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(a, expect))", "def write(writer: BitStreamWriter, value: float) -> None:\n\n writer.writeFloat64(value)", "def isFloat(value): \n try:\n float(value)\n return True\n except ValueError:\n return False", "def write(writer: BitStreamWriter, value: float) -> None:\n\n writer.writeFloat32(value)", "def test_temperature(self):\r\n self.assertEqual(Converter.TemperatureCtoF(50), 122)\r\n self.assertEqual(Converter.TemperatureCtoF(-50), -58)\r\n self.assertEqual(Converter.TemperatureFtoC(50), 10)\r\n self.assertAlmostEqual(Converter.TemperatureFtoC(-50), -45.55, places=0)", "def test_renderer_works_correctly_with_numpy_floating(self):\n data = numpy.float32(0.0)\n rendered = self.renderer.render(\n data=data, media_type=\"application/json\", renderer_context={}\n )\n reloaded = orjson.loads(rendered)\n\n self.assertEqual(reloaded, data)", "def give_me_a_float():\n return 5.8\n pass", "def convertStringToFloat(xmlNode):\n try:\n val = float(xmlNode.text)\n return val\n except (ValueError,TypeError):\n raise IOError('Real value is required for content of node %s, but got %s' %(node.tag, node.text))", "def test_iadd_with_float_argument(self):\n\n a = Vec3(2, 3, 4)\n b = 1.0\n\n a += b\n\n expected_result = Vec3(3, 4, 5)\n\n self.assertEqual(a, expected_result)", "def test_op_mul_scalar_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n s = 1.3\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a * s\n\n offl_a = stream.bind(a)\n offl_r = offl_a * s\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old).all(),\n \"Input array operand must not be modified: \"\n \"{0} should be {1}\".format(a, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))" ]
[ "0.71284944", "0.7088308", "0.6970931", "0.6921978", "0.68760276", "0.6846847", "0.68283427", "0.67947567", "0.67181623", "0.6711072", "0.66815597", "0.65003604", "0.6471494", "0.6447859", "0.6403583", "0.63836086", "0.6375022", "0.63572294", "0.629849", "0.62963325", "0.62845504", "0.62635535", "0.6256603", "0.62111646", "0.62075514", "0.61898947", "0.6181282", "0.6174005", "0.6160491", "0.61570406", "0.6154437", "0.61363816", "0.6134698", "0.6105179", "0.61050117", "0.6104329", "0.6103412", "0.60800457", "0.6066351", "0.60139775", "0.60034585", "0.596201", "0.5958375", "0.588691", "0.58766717", "0.5867646", "0.58641416", "0.5863294", "0.5859543", "0.58551043", "0.58522207", "0.58444774", "0.5844088", "0.5841523", "0.5830533", "0.58134145", "0.580674", "0.58067", "0.5806425", "0.5791023", "0.5778065", "0.576989", "0.57681", "0.57578284", "0.5734281", "0.5724873", "0.5718287", "0.5712098", "0.5708608", "0.5706068", "0.5703113", "0.569766", "0.56864274", "0.56832063", "0.56808335", "0.5680271", "0.5678295", "0.5671299", "0.56674725", "0.5647367", "0.5635664", "0.56348354", "0.56283927", "0.56265867", "0.56264484", "0.5588514", "0.5584674", "0.5581081", "0.5574695", "0.5574395", "0.55733466", "0.55727", "0.55579656", "0.5557643", "0.55570555", "0.5551454", "0.5550388", "0.55496365", "0.55477226", "0.55454373" ]
0.7005608
2
Test read and write booleans.
def test_message_boolean(): result = True message = msg.Message() for i in range(num_it): message.appendBoolean(True if i % 2 == 0 else False) if message.length != msg.HEADER_SIZE + (i+1)*msg.boolStruct.size: print("Size is ", message.length, " but should be ", msg.HEADER_SIZE + (i+1)*msg.boolStruct.size) print("Error : message.appendBoolean") result = False message.resetCursor() for i in range(num_it): r = message.readBoolean() if r != (True if i % 2 == 0 else False): print(r, " vs ", (True if i % 2 == 0 else False)) print("Error : message.read/appendBoolean") result = False return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_boolean_roundtrip():\n for b in (True, False):\n assert b == Boolean.read(Boolean.to_bytes(b))", "def read(reader: BitStreamReader, _index: int) -> bool:\n\n return reader.readBool()", "def test_bool_field():", "def test_for_bool(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"for bool b in [True, False]\\n\\tUnaryGate(b, 0) | 0\"\n )\n assert np.all(\n bb._forvar[\"b\"] == np.array([True, False])\n )", "def test_command_edit_info_boolean_flags():\n def f(inputfile):\n with tempfile.NamedTemporaryFile() as tmp:\n shutil.copy(inputfile, tmp.name)\n\n for flag in (\"write_protected\", \"synchronized\", \"cleaned\"):\n for true_value, false_value in ((\"1\", \"0\"),\n (\"yes\", \"no\"),\n (\"YES\", \"No\"),\n (\"true\", \"false\"),\n (\"tRuE\", \"FaLsE\")):\n wozardry.parse_args([\"edit\", \"-i\", \"%s:%s\" % (flag, true_value), tmp.name])\n with open(tmp.name, \"rb\") as tmpstream:\n woz = wozardry.WozDiskImage(tmpstream)\n assert woz.info[flag] == True\n wozardry.parse_args([\"edit\", \"-i\", \"%s:%s\" % (flag, false_value), tmp.name])\n with open(tmp.name, \"rb\") as tmpstream:\n woz = wozardry.WozDiskImage(tmpstream)\n assert woz.info[flag] == False\n f(kValid1)\n f(kValid2)", "def check_for_bool(check):", "def test_boolstyle_round_trip(self):\n def do_round_trip(trueval, falseval, invalid=False):\n logger.debug('Exporting to csv file: {} with bool style {},{}'.format(tempfile.name, trueval, falseval))\n _, err, _ = self.run_cqlsh(cmds=\"COPY ks.testbooleans TO '{}' WITH BOOLSTYLE='{}, {}'\"\n .format(tempfile.name, trueval, falseval))\n if invalid:\n expected_err = \"Invalid boolean styles [{}, {}]\".format(\n ', '.join([\"'{}'\".format(s.strip()) for s in trueval.split(',')]),\n ', '.join([\"'{}'\".format(s.strip()) for s in falseval.split(',')]))\n assert expected_err in err\n return\n\n tempfile_rows_as_list = list(csv_rows(tempfile.name))\n assert [['0', falseval], ['1', trueval]] == sorted(tempfile_rows_as_list)\n exported_results = list(self.session.execute(\"SELECT * FROM testbooleans\"))\n\n logger.debug('Importing from csv file: {}'.format(tempfile.name))\n self.session.execute('TRUNCATE ks.testbooleans')\n self.run_cqlsh(cmds=\"COPY ks.testbooleans FROM '{}' WITH BOOLSTYLE='{}, {}'\"\n .format(tempfile.name, trueval, falseval))\n\n imported_results = list(self.session.execute(\"SELECT * FROM testbooleans\"))\n assert sorted(exported_results) == sorted(imported_results)\n\n self.prepare()\n self.session.execute(\"\"\"\n CREATE TABLE testbooleans (\n a int PRIMARY KEY,\n b boolean\n )\"\"\")\n\n insert_statement = self.session.prepare(\"INSERT INTO testbooleans (a, b) VALUES (?, ?)\")\n self.session.execute(insert_statement, [0, False])\n self.session.execute(insert_statement, [1, True])\n tempfile = self.get_temp_file()\n\n do_round_trip('True', 'False')\n do_round_trip('TRUE', 'FALSE')\n do_round_trip('yes', 'no')\n do_round_trip('1', '0')\n do_round_trip('TRUE', 'no')\n do_round_trip('True', '0')\n\n do_round_trip('TRUE', 'TRUE', invalid=True)\n do_round_trip('TRUE', '', invalid=True)\n do_round_trip('', 'FALSE', invalid=True)\n do_round_trip('', '', invalid=True)\n do_round_trip('yes, no', 'maybe', invalid=True)", "def test_true_false_cases(file_with_true_and_false_value, result, inp):\n assert read_magic_number(file_with_true_and_false_value) == result", "def readBoolean(self) -> bool:\n return self.readByte() == 1", "def read_bool(self):\n return self.read_uint32() == 1", "def test_getboolean(self):\n self.assertEqual(self.config.getboolean('advanced','bool'),True)", "def test_bool_variable(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\"bool b1 = True\\n bool b2 = False\")\n assert bb._var == {\"b1\": True, \"b2\": False}", "def test_get_value_bool(self):\n val = self.setting_bool.get_value()\n self.assertIsInstance(val, bool)\n self.assertEqual(val, True)", "def test_get_value_bool(self):\n val = self.setting_bool.get_value()\n self.assertIsInstance(val, bool)\n self.assertEqual(val, True)", "def read_bool(self):\n return bool(self.read_and_unpack('l')[0])", "def _testReadWrite(self):\n self.shouldWrite = True\n\n def checkReadInput(fd):\n self.assertEquals(fd.read(1), b'x')\n self._reactor.stop()\n\n def writeOnce(fd):\n if self.shouldWrite:\n self.shouldWrite = False\n fd.write(b'x')\n self._reader = Reader(self._p1, checkReadInput)\n self._writer = Writer(self._p2, writeOnce)\n\n self._reactor.addWriter(self._writer)\n\n # Test that adding the reader twice adds it only once to\n # IOLoop.\n self._reactor.addReader(self._reader)\n self._reactor.addReader(self._reader)", "def test_human_readable_boolean_false():\n # TODO: add a test case that follows the provided example", "def canwrite(self):\n return False", "def test_boolean_logic(self):\n\n self.assertTrue(True, \"True should be equal to True.\")\n self.assertFalse(False, \"False should not be equal to True.\")\n self.assertNotEqual(True, False, \"True should not be equal to False.\")", "def read_bool(data):\n s_type = \"=%s\" % get_type(\"bool\")\n return struct.unpack(s_type, data.read(1))[0]", "def test_read(self):\n with patch('RPi.GPIO.setmode') as mock_setmode:\n gpio = GPIODevice()\n with patch('RPi.GPIO.setup') as mock_setup:\n with patch('RPi.GPIO.input') as mock_input:\n mock_input.return_value = True\n value = gpio.read(0)\n with patch('RPi.GPIO.cleanup') as mock_cleanup:\n gpio.close()\n self.assertDictEqual(value, {\"value\": True})", "async def test_fan_read_state(hass: HomeAssistant, utcnow) -> None:\n helper = await setup_test_component(hass, create_fan_service)\n\n state = await helper.async_update(\n ServicesTypes.FAN, {CharacteristicsTypes.ON: False}\n )\n assert state.state == \"off\"\n\n state = await helper.async_update(\n ServicesTypes.FAN, {CharacteristicsTypes.ON: True}\n )\n assert state.state == \"on\"", "def test_isread_command(self):\r\n bm = BmarkMock()\r\n bm.tags['toread'] = True\r\n updated = IsRead.run(bm)\r\n self.assertTrue(\r\n 'toread' not in updated.tags,\r\n \"Updated bmark should not have 'toread' tag set\")", "def ReadBool(self):\n return self.unpack('?')", "def canread(self):\n return False", "def read_bool(stream, indent=INDENT):\n value = streambyte_to_int(stream, 1)[0]\n return str(bool(value)) + \" [HEX: \" + hex(value) + \"]\"", "def test_bool(self, env: yaenv.Env):\n _val = env.bool('BOOL_VAR')\n assert not _val and type(_val) == bool\n _val = env.bool('INT_VAR')\n assert _val and type(_val) == bool\n _val = env.bool('MISSING', True)\n assert _val and type(_val) == bool\n with pytest.raises(yaenv.EnvError) as err:\n _ = env.bool('FLOAT_VAR')\n assert 'Invalid boolean' in str(err.value)\n assert env.bool('MISSING') is None", "def test_boolean(self):\n endpoint = self.api.boolean\n assert endpoint.openapi_types['body'] == (bool,)\n assert endpoint.settings['response_type'] == (bool,)", "def _readBool(self, rawData, offset=0):\n val, = unpack(\n self.boolFormat, rawData[\n offset:offset + self.boolFormatLen])\n \n return val", "def writeable(self):\n return self._cc[14] == 0", "def __bool__(self):\n raise RuntimeError(\"Cannot evaluate CrypTensors to boolean values\")", "def test_read(self):\n class Test(pyperry.Base): pass\n Test.configure('read', adapter=TestAdapter)\n self.assertEqual(Test.adapter('read').mode, 'read')\n\n Test.configure('write', adapter=TestAdapter)\n self.assertEqual(Test.adapter('write').mode, 'write')", "def writable(self):\n return bool(self.buffer)", "def getBoolean(self, int: int, int2: int) -> bool:\n ...", "def test_python_bool(self):\n\n m = Mothur(**self.init_vars)\n self.set_current_dirs(m)\n m.pcr.seqs(fasta='test_fasta_1.fasta', start=20, keepdots=False)\n m.pcr.seqs(fasta='test_fasta_1.fasta', start=20, keepdots=True)\n\n return", "def _writeBool(self, val):\n self.__writeValue(self.boolFormat, val)", "def test_utils_to_bool(self, tcex, input_, expected):\n result = tcex.utils.to_bool(input_)\n assert result == expected, f'Input {input_} result of {result} != {expected}'", "async def test_fanv2_read_state(hass: HomeAssistant, utcnow) -> None:\n helper = await setup_test_component(hass, create_fanv2_service)\n\n state = await helper.async_update(\n ServicesTypes.FAN_V2, {CharacteristicsTypes.ACTIVE: False}\n )\n assert state.state == \"off\"\n\n state = await helper.async_update(\n ServicesTypes.FAN_V2, {CharacteristicsTypes.ACTIVE: True}\n )\n assert state.state == \"on\"", "def test_boolean_custom_values(self):\n true_values = ['YES', 'yes', 'Yes']\n false_values = ['NO', 'no', 'No']\n wrong_values = ['true', 'false', 'True', 'False', 'y', 'n', 'Y', 'N', 't', '1', 1, '0', 0]\n descriptor = self.base_field_descriptor\n descriptor['type'] = 'boolean'\n # only 'default' format\n descriptor['format'] = 'default'\n descriptor['trueValues'] = true_values\n descriptor['falseValues'] = false_values\n\n f = SchemaField(descriptor)\n for v in true_values:\n self.assertTrue(f.cast(v))\n for v in false_values:\n self.assertFalse(f.cast(v))\n for v in wrong_values:\n with self.assertRaises(Exception):\n f.cast(v)", "def _flagsTest(self, method, item):\n d = getattr(self.client, method)('3', ('\\\\Read', '\\\\Seen'), False)\n self.assertEqual(\n self.transport.value(),\n b'0001 STORE 3 ' + item + b' (\\\\Read \\\\Seen)\\r\\n')\n self.client.lineReceived(b'* 3 FETCH (FLAGS (\\\\Read \\\\Seen))')\n self.client.lineReceived(b'0001 OK STORE completed')\n self.assertEqual(\n self.successResultOf(d),\n {3: {'FLAGS': ['\\\\Read', '\\\\Seen']}})", "def test_bool(self):\n mute_map = MutableMap()\n\n assert not mute_map\n\n mute_map['str_val'] = 'test'\n\n assert mute_map", "def is_read_only(self):\n return (self.get_name().startswith(\"b\")\n or self.get_name() == \"jump_cond\" # meta-instruction\n or self.get_name() == \"j\"\n or self.get_name() == \"ld\"\n or self.get_name() == \"lw\"\n or self.get_name() == \"lb\")", "def test_roundtrip_list():\n assert [True, False, True, False, True] == (\n List(Boolean).read(\n List(Boolean).to_bytes(\n [True, False, True, False, True]))\n )", "def __bool__(self):\n raise RuntimeError(\"Cannot evaluate BinarySharedTensors to boolean values\")", "def test_int_to_bool_true(self):\n self.assertEqual(TransformList.int_to_bool({'varname': 1}, 'varname'), True)", "def test_write(self):\n with patch('RPi.GPIO.setmode') as mock_setmode:\n gpio = GPIODevice()\n with patch('RPi.GPIO.setup') as mock_setup:\n with patch('RPi.GPIO.output') as mock_output:\n value = gpio.write(0, False)\n mock_output.called_once_with(0, False)\n with patch('RPi.GPIO.cleanup') as mock_cleanup:\n gpio.close()", "def test_flags(self):\n d = self._examineOrSelect()\n self._response(\n b'* FLAGS (\\\\Answered \\\\Flagged \\\\Deleted \\\\Seen \\\\Draft)')\n self.assertEqual(\n self.successResultOf(d), {\n 'READ-WRITE': False,\n 'FLAGS': ('\\\\Answered', '\\\\Flagged', '\\\\Deleted', '\\\\Seen',\n '\\\\Draft')})", "def bool(self):\n return bool(self.int(2))", "def test_convert_logical():", "def boolean_func(experiment):", "def __bool__(self):\n raise ValueError(\"bool() not permitted\")", "def set_boolean(x):\n\n if x:\n return \"True\"\n else:\n return \"False\"", "def test_human_readable_boolean_true():\n true_value = True\n true_value_human_readable = util.get_human_readable_boolean(true_value)\n assert true_value_human_readable == \"Yes, it is!\"", "def unix_read_write(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"unix_read_write\")", "def test_verify_state_of_a_device():", "def write(writer: BitStreamWriter, value: bool) -> None:\n\n writer.writeBool(value)", "def test_getboolean_with_default(self):\n self.assertEqual(self.config.getboolean('advanced','p'),None)\n self.assertEqual(self.config.getboolean('advanced','p',True),True)", "def is_raw_write(command): \n if command.startswith('<WRITE') and command.endswith('>'):\n return True\n else: \n return False\n # end if", "def _validate_can_write(self):\n if self._mode not in WRITE_MODES:\n raise IOError(\"File is not writable\")\n if self.Writable == 'no':\n raise IOError(\"'Writable' flag is 'no'\")", "def give_me_a_boolean():\n return True\n pass", "def get_bool(self, x, y, name):\r\n\r\n value = self.get_tile(x, y).get(name)\r\n return value in (True, 1, 'true', 'yes', 'True', 'Yes', '1', 'on', 'On')", "def test_no_io_on_bool():\n file = get_image_cache_file()\n bool(file)\n assert not file.storage.exists.called\n assert not file.storage.open.called", "def test_data(self):\n\n # Boolean tests\n is_datas = [True, False]\n for is_data in is_datas:\n self.colorspace.setIsData(is_data)\n self.assertEqual(is_data, self.colorspace.isData())\n\n # Wrong type tests\n wrong_is_datas = [['test'], 'test']\n for wrong_is_data in wrong_is_datas:\n with self.assertRaises(TypeError):\n self.colorspace.setIsData(wrong_is_data)", "def get_bool2(self):\n pass", "def unpack_true(data):\n return True, 0", "def test_boolean_in_serializer() -> None:\n assert cv.custom_serializer(cv.boolean) == {\n \"type\": \"boolean\",\n }", "async def test_input_boolean(client, input_boolean_entities) -> None:\n body = await generate_latest_metrics(client)\n\n assert (\n 'input_boolean_state{domain=\"input_boolean\",'\n 'entity=\"input_boolean.test\",'\n 'friendly_name=\"Test\"} 1.0' in body\n )\n\n assert (\n 'input_boolean_state{domain=\"input_boolean\",'\n 'entity=\"input_boolean.helper\",'\n 'friendly_name=\"Helper\"} 0.0' in body\n )", "def testBoolValue(self):\n objectID = uuid4()\n user = createUser(u'username', u'password', u'User',\n u'[email protected]')\n namespace = createNamespace(user, u'name')\n tag = createTag(user, namespace, u'tag')\n self.store.add(TagValue(user.id, tag.id, objectID, True))", "def truth(self, line, cell = None, bools = (\"False\", \"True\")):\n if line:\n print(truth(*line.split(\", \"), bools = bools))\n elif cell:\n print(truth(*cell.strip().split(\"\\n\"), bools = bools))", "def test_successful_read(self):\r\n\t\tself.assertTrue(self._configuration_ is not None)", "def testAllWrite(self):\n import time,copy\n time.sleep(2)\n client = ModbusTCP(self.config['vdevs']['slave']['icsifaces'][0],\n self.config['vdevs']['slave']['points']) \n\n pts = copy.deepcopy(self.config['vdevs']['slave']['points'])\n pts = [pt for pt in pts if pt['name']!='pressure'] #Can't write to pres\n ptnames = [ pt['name'] for pt in pts ]\n pointsvalues = dict(zip(ptnames, [0]*len(ptnames)))\n reply = client.writePoints(pointsvalues)\n assert reply is None, \"Write returned value other than None: \" + str(reply)\n reply = client.readPoints(ptnames)\n #print \"Reply: \", reply\n for pt in ptnames:\n #assert value == reply[ptnames.index(pt)]\n if not 0 == reply[ptnames.index(pt)]: \n print pt, ' was not read properly.'", "def _flagsSilentlyTest(self, method, item):\n d = getattr(self.client, method)('3', ('\\\\Read', '\\\\Seen'), True)\n self.assertEqual(\n self.transport.value(),\n b'0001 STORE 3 ' + item + b' (\\\\Read \\\\Seen)\\r\\n')\n self.client.lineReceived(b'0001 OK STORE completed')\n self.assertEqual(self.successResultOf(d), {})", "def kerberos5i_read_write(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"kerberos5i_read_write\")", "def test_true_is_true(self):\n self.assertEqual(True, True)", "def __bool__(x):\n if x.value == 1:\n return True\n elif x.value == -1:\n return False\n else:\n raise ValueError('cannot determine boolean value of Unknown')", "def test_hooks_falsy_by_default():\n config = util.read_config(\"some-nonexistant-path\")\n assert not config[\"pre_write\"]\n assert not config[\"post_write\"]", "def true(self):\n val = self.read(4)\n if val != b'true':\n self.on_parser_error(\"true token expected\")\n return True", "def test_checkFlags(self):\n self.failUnlessEqual(self.nice.opts['aflag'], 1)\n self.failUnlessEqual(self.nice.opts['flout'], 0)", "def decide (self):\n if (self.msg == b'1') or (self.msg == b'2'): # reads bats\n self.bat = True\n # else: # if doesn't read\n elif self.msg == b'': # if reads \"no bat\"\n self.bat = False", "def is_bool(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_bool)", "def on_off_bool(value):\n return value == 'on'", "def main():\n\n open_read_write()", "def testConfigC(self):\n assert type(self.config['debug']) == bool, \"Not parsing string to boolean correctly\"", "def bool(x) -> bool:\n pass", "def writable(self):\n return True", "def _check_rw_flag(self, rw_flag):\n\n rw_flag = rw_flag.lower()\n if rw_flag == \"r\":\n pass\n elif rw_flag == \"w\":\n pass\n else:\n raise ValueError(\"rw_flag must be 'r' or 'w'\")\n return rw_flag", "def test_int_to_bool_false(self):\n self.assertEqual(TransformList.int_to_bool({'varname': 0}, 'varname'), False)", "def test_safeGetBoolean(self):\n self.assertIs(\n BMConfigParser().safeGetBoolean('nonexistent', 'nonexistent'),\n False\n )\n # no arg for default\n # pylint: disable=too-many-function-args\n with self.assertRaises(TypeError):\n BMConfigParser().safeGetBoolean(\n 'nonexistent', 'nonexistent', True)", "def __bool__(self):\n return bool(self.get_value())", "async def test_v2_oscillate_read(hass: HomeAssistant, utcnow) -> None:\n helper = await setup_test_component(hass, create_fanv2_service)\n\n state = await helper.async_update(\n ServicesTypes.FAN_V2, {CharacteristicsTypes.SWING_MODE: 0}\n )\n assert state.attributes[\"oscillating\"] is False\n\n state = await helper.async_update(\n ServicesTypes.FAN_V2, {CharacteristicsTypes.SWING_MODE: 1}\n )\n assert state.attributes[\"oscillating\"] is True", "def test_enable_enabled():\n config_info = read_config()\n config_info['enabled'] = True\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['enabled'] is True", "def test_project_reader(project):\n if is_server_administrator():\n return True\n if is_project_administrator(project):\n return True\n if is_project_writer(project):\n return True\n if is_project_reader(project):\n return True\n return False", "def test_io_in_out_loop(self):\n self.l.output(conf_io=0x1, state_io=0x0)\n for i in range(10):\n state_d, state_io, count = self.l.output(state_io=0x1)\n self.assertTrue(state_io & 0x2)\n state_d, state_io, count = self.l.output(state_io=0x0)\n self.assertTrue(not state_io & 0x2)", "def test_checkread(self):\n user1 = {'uid': 1, 'gid': 1}\n self.assertTrue(self.m._checkread(user1, {}))\n mock_image = {\n 'userACL': None,\n 'groupACL': None\n }\n # Test a public image with ACLs set to None\n self.assertTrue(self.m._checkread(user1, mock_image))\n # Now empty list instead of None. Treat it the same way.\n mock_image['userACL'] = []\n mock_image['groupACL'] = []\n self.assertTrue(self.m._checkread(user1, mock_image))\n self.assertTrue(self.m._checkread(user1, {'private': False}))\n # Private false should trump other things\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'userACL': [2]}))\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'groupACL': [2]}))\n # Now check a protected image that the user should\n # have access to\n mock_image['userACL'] = [1]\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 2, 'gid': 1}, mock_image))\n # Now check by groupACL\n mock_image['groupACL'] = [1]\n self.assertTrue(self.m._checkread({'uid': 3, 'gid': 1}, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 3, 'gid': 2}, mock_image))\n # What about an image with a list\n mock_image = {\n 'userACL': [1, 2, 3],\n 'groupACL': [4, 5, 6]\n }\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 7, 'gid': 7}, mock_image))", "def __bool__(self):\r\n return self.valid", "def test_read_write_mode(self, mock_database):\n experiment = create_experiment(\n \"a\", space={\"x\": \"uniform(0, 10)\"}, storage=mock_database.storage\n )\n assert experiment.mode == \"x\"\n\n experiment = get_experiment(\"a\", 2, mode=\"r\", storage=mock_database.storage)\n assert experiment.mode == \"r\"\n\n with pytest.raises(UnsupportedOperation) as exc:\n experiment.insert({\"x\": 0})\n\n assert exc.match(\"ExperimentClient must have write rights to execute `insert()\")\n\n experiment = get_experiment(\"a\", 2, mode=\"w\", storage=mock_database.storage)\n assert experiment.mode == \"w\"\n\n trial = experiment.insert({\"x\": 0})\n\n with pytest.raises(UnsupportedOperation) as exc:\n experiment.reserve(trial)\n\n assert exc.match(\n \"ExperimentClient must have execution rights to execute `reserve()\"\n )", "def check():", "def get_bool(self, sect, opt):\r\n return self.get_safe(sect, opt) == \"True\"", "def testAllWrite(self):\n import time,copy\n time.sleep(2)\n to_config = self.config['vdevs']['slave']['icsifaces'][0]\n from_config = self.config['vdevs']['master']['clientifaces'][0]\n points = self.config['vdevs']['slave']['points']\n client = ModbusRTU(to_config, points, from_config)\n\n pts = copy.deepcopy(self.config['vdevs']['slave']['points'])\n pts = [pt for pt in pts if pt['name']!='pressure'] #Can't write to pres\n for i in xrange(50):\n ptnames = [ pt['name'] for pt in pts ]\n pointsvalues = dict(zip(ptnames, [0]*len(ptnames)))\n reply = client.writePoints(pointsvalues)\n assert reply is None, \"Write returned value other than None: \" + str(reply)\n reply = client.readPoints(ptnames)\n #print \"Reply: \", reply\n for pt in ptnames:\n #assert value == reply[ptnames.index(pt)]\n if not 0 == reply[ptnames.index(pt)]: \n print pt, ' was not read properly.'", "def writable(self):\n self._check_not_closed()\n return False" ]
[ "0.6882313", "0.66271466", "0.65398455", "0.63707685", "0.6339095", "0.62233245", "0.6203335", "0.6150687", "0.6148839", "0.61370564", "0.5970456", "0.5936587", "0.59159636", "0.59159636", "0.5886013", "0.5855655", "0.5835535", "0.58087593", "0.5781061", "0.5776412", "0.5771563", "0.5766694", "0.5723804", "0.57046247", "0.5692837", "0.5683103", "0.5661741", "0.56577915", "0.5651214", "0.5637148", "0.5612362", "0.56063765", "0.55756277", "0.55625147", "0.55622536", "0.5561949", "0.5559224", "0.55579084", "0.5526606", "0.5517519", "0.5496716", "0.5488177", "0.54686034", "0.54454434", "0.5410937", "0.54101074", "0.5404894", "0.5401312", "0.53901947", "0.53764594", "0.5375589", "0.53736895", "0.5373408", "0.53715223", "0.5356985", "0.5354646", "0.535392", "0.5337128", "0.5333922", "0.5331397", "0.53310025", "0.53277725", "0.53062505", "0.52936256", "0.5288128", "0.52835", "0.52668875", "0.52574164", "0.5253694", "0.52448314", "0.5241469", "0.52314335", "0.5228596", "0.5223248", "0.52146274", "0.5213026", "0.5201805", "0.51935357", "0.5192075", "0.51793414", "0.5175643", "0.51637655", "0.5158975", "0.5158369", "0.5154026", "0.51513076", "0.51487994", "0.51487184", "0.5148612", "0.51459694", "0.51422757", "0.51411045", "0.51381904", "0.51281005", "0.512699", "0.5123252", "0.512104", "0.5119492", "0.51117414", "0.5107471" ]
0.6360841
4
Test read and write strings.
def test_message_string(): result = True message = msg.Message() size = 0 for i in range(num_it): message.appendString(str(i) + "azertyuiopqsdfghjklmwxcvbn") size += len(str(i) + "azertyuiopqsdfghjklmwxcvbn") if message.length != msg.HEADER_SIZE + (i+1)*msg.intStruct.size + size: print("Size is ", message.length, " but should be ", msg.HEADER_SIZE + (i+1)*msg.intStruct.size + size) print("Error : message.appendString") result = False message.resetCursor() for i in range(num_it): r = message.readString() if r != str(i) + "azertyuiopqsdfghjklmwxcvbn": print(r, " vs ", str(i) + "azertyuiopqsdfghjklmwxcvbn") print("Error : message.read/appendString") result = False return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_file_ascii_readwrite(self):\n FileWriter(self.ascii_path).write(self.ascii_string) # file write\n ascii_text = FileReader(self.ascii_path).read() # file read\n self.assertEqual(ascii_text, self.ascii_string)", "def test_string():", "def test_file_ascii_readwrite_append(self):\n FileWriter(self.ascii_path).append(self.ascii_string) #append a second string of the ascii text\n ascii_text = FileReader(self.ascii_path).read()\n self.assertEqual(ascii_text, (self.ascii_string)*2) #confirm that it equals two of the ascii strings", "def test_text_roundtrip():\n for text in (\"\", \"a\", \"Hello, world!\", \"9\" * 1000):\n assert text == String.read(String.to_bytes(text))", "def check(self, s):\n bufferValue = self.f.getvalue()\n if isinstance(s, str):\n bufferValue = bufferValue.decode(\"utf-8\")\n self.assertEqual(bufferValue, s)", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def test_strings(self):\n topo = Topology()\n ud = []\n ud.append(u'⡍⠔⠙⠖ ⡊ ⠙⠕⠝⠰⠞ ⠍⠑⠁⠝ ⠞⠕ ⠎⠁⠹ ⠹⠁⠞ ⡊ ⠅⠝⠪⠂ ⠕⠋ ⠍⠹')\n ud.append(u'2H₂ + O₂ ⇌ 2H₂O, R = 4.7 kΩ, ⌀ 200 mm')\n ud.append(u'многоязычных')\n ud.append(\"Arsenal hammered 5-1 by Bayern again\")\n s = topo.source(ud, name=u'façade')\n sas = s.as_string()\n sd = s.map(lambda s : {'val': s + u\"_test_it!\"})\n tester = Tester(topo)\n tester.contents(s, ud)\n tester.contents(sas, ud)\n dud = []\n for v in ud:\n dud.append({'val': v + u\"_test_it!\"})\n tester.contents(sd, dud)\n\n tester.test(self.test_ctxtype, self.test_config)", "def test_001(compiler, temp_builds_dir):\n filepath = temp_builds_dir.join(\"compiler_write_001\")\n\n content = \"\"\"Some sample latin text\"\"\"\n\n compiler.write_content(content, filepath.strpath)\n\n # Read file to compare\n with io.open(filepath.strpath, \"r\", encoding=\"utf-8\") as f:\n result = f.read()\n\n assert content == result", "def test_stringToString(self):\n self.assertNativeString(\"Hello!\", \"Hello!\")", "def test_file_utf8_readwrite_noraise_unicodeerror(self):\n FileWriter(self.unicode_path).write(self.unicode_string)\n unicode_text = FileReader(self.unicode_path).read()\n self.assertEqual(self.unicode_string, unicode_text)", "def test_read_types():\n t1 = ascii.read(\"a b c\\n1 2 3\\n4 5 6\", format=\"fast_basic\", guess=False)\n # TODO: also read from file\n t2 = ascii.read(StringIO(\"a b c\\n1 2 3\\n4 5 6\"), format=\"fast_basic\", guess=False)\n t3 = ascii.read([\"a b c\", \"1 2 3\", \"4 5 6\"], format=\"fast_basic\", guess=False)\n assert_table_equal(t1, t2)\n assert_table_equal(t2, t3)", "def test_strings(self):\n # Message. Double-nested to ensure serializers are recursing properly.\n message = {\n \"values\": {\n # UTF-8 sequence for british pound, but we want it not interpreted into that.\n \"utf-bytes\": b\"\\xc2\\xa3\",\n # Actual unicode for british pound, should come back as 1 char\n \"unicode\": \"\\u00a3\",\n # Emoji, in case someone is using 3-byte-wide unicode storage\n \"emoji\": \"\\u1F612\",\n # Random control characters and null\n \"control\": b\"\\x01\\x00\\x03\\x21\",\n }\n }\n # Send it and receive it\n channel_layer.send(\"str_test\", message)\n _, received = channel_layer.receive_many([\"str_test\"])\n # Compare\n self.assertIsInstance(received[\"values\"][\"utf-bytes\"], six.binary_type)\n self.assertIsInstance(received[\"values\"][\"unicode\"], six.text_type)\n self.assertIsInstance(received[\"values\"][\"emoji\"], six.text_type)\n self.assertIsInstance(received[\"values\"][\"control\"], six.binary_type)\n self.assertEqual(received[\"values\"][\"utf-bytes\"], message[\"values\"][\"utf-bytes\"])\n self.assertEqual(received[\"values\"][\"unicode\"], message[\"values\"][\"unicode\"])\n self.assertEqual(received[\"values\"][\"emoji\"], message[\"values\"][\"emoji\"])\n self.assertEqual(received[\"values\"][\"control\"], message[\"values\"][\"control\"])", "def test_write_file():\n filename = 'test'\n content = 'hello!'\n\n write_file(content, filename)\n assert read_file(filename) == 'hello!'", "def test_file_ascii_safewrite(self):\n os.remove(self.ascii_path) #remove the existing text file for tests\n if os.path.exists(self.ascii_path):\n raise IOError(\"The ascii test file was not deleted. (test_IO.py.test_file_ascii_safewrite)\")\n else:\n safe_response = FileWriter(self.ascii_path).safe_write(self.ascii_string) # attempt safe_write when no preexisting file present\n ascii_text = FileReader(self.ascii_path).read()\n self.assertEqual(ascii_text, self.ascii_string) # assert that the correct text was written\n self.assertEqual(safe_response, True) # assert that returns True when file not present and writes\n\n if os.path.exists(self.ascii_path):\n self.assertEqual(FileWriter(self.ascii_path).safe_write(self.ascii_string), False) #confirm that returns False to calling function when there is a pre-existing file\n else:\n raise IOError(\"The ascii test file is not present (test_IO.py.test_file_ascii_safewrite)\")", "def test_002(compiler, temp_builds_dir):\n filepath = temp_builds_dir.join(\"compiler_write_002\")\n\n content = \"\"\"Some sample unicode text: フランス Furansu\"\"\"\n\n compiler.write_content(content, filepath.strpath)\n\n # Read file to compare\n with io.open(filepath.strpath, \"r\", encoding=\"utf-8\") as f:\n result = f.read()\n\n assert content == result", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def test_003(compiler, temp_builds_dir):\n filepath = temp_builds_dir.join(\"foo/bar/home.txt\")\n\n content = \"\"\"Some sample unicode text: フランス Furansu\"\"\"\n\n compiler.write_content(content, filepath.strpath)\n\n # Read file to compare\n with io.open(filepath.strpath, \"r\", encoding=\"utf-8\") as f:\n result = f.read()\n\n assert content == result", "def test_str(self, r, rep):\n assert str(r) == rep", "def test_read_file():\n filename = 'sample'\n assert read_file(filename) == 'hello!\\n'", "def test_string():\n pass", "def test_str_method(self):\n expected = \"[Square] (1) 0/0 - 2\\n\"\n output = io.StringIO()\n sys.stdout = output\n print(self.a)\n self.assertEqual(expected, output.getvalue())\n\n expected = \"[Square] (100) 1/2 - 3\\n\"\n output = io.StringIO()\n sys.stdout = output\n print(self.b)\n self.assertEqual(expected, output.getvalue())\n\n expected = \"[Square] (2) 0/0 - 5\\n\"\n output = io.StringIO()\n sys.stdout = output\n print(self.c)\n self.assertEqual(expected, output.getvalue())", "def test_read_strips(connection, reader, loop):\n reader.push(\" a b c | @#$ d \\n\")\n loop.run_until_complete(connection.connect())\n value = loop.run_until_complete(connection.read())\n assert value == \"a b c | @#$ d\"\n assert reader.has_read(\" a b c | @#$ d \\n\")", "def test_str(self, string, application):\n assert string == str(application)", "def test_write_file_to_disk_str(self):\r\n file_data = 'A' * 100\r\n write_file_to_disk(self.test_file3, file_data)\r\n self.file_contents_is_equal(self.test_file3, file_data)", "def test_create_valid_str(self):\n storage = FileStorage()\n tests = ['new', 'new\\\\\\\"', '\\\\\\\"', 'My_little_house', '\"\"', '____']\n expected = ['new', 'new\"', '\"', 'My little house', '', ' ']\n\n for i in range(len(tests)):\n self.remove_all()\n with patch('sys.stdout', new=StringIO()) as f:\n self.console.onecmd(\n 'create BaseModel test_var=\"{}\"'.format(tests[i]))\n attributes = list(storage.all().values())\n actual = attributes[0].test_var\n self.assertEqual(expected[i], actual)\n self.assertEqual(str, type(actual))", "def test_file_utf8_safewrite(self):\n os.remove(self.unicode_path) #remove the existing text file for tests\n if os.path.exists(self.unicode_path):\n raise IOError(\"The unicode test file was not deleted. (test_IO.py.test_file_utf8_safewrite)\")\n else:\n safe_response = FileWriter(self.unicode_path).safe_write(self.unicode_string) # attempt safe_write when no preexisting file present\n u_text = FileReader(self.unicode_path).read()\n self.assertEqual(u_text, self.unicode_string) # assert that the correct text was written\n self.assertEqual(safe_response, True) # assert that returns True when file not present and writes\n\n if os.path.exists(self.unicode_path):\n self.assertEqual(FileWriter(self.unicode_path).safe_write(self.unicode_string), False) #confirm that returns False to calling function when there is a pre-existing file\n else:\n raise IOError(\"The unicode test file is not present (test_IO.py.test_file_utf8_safewrite)\")", "def test_str(self):\n self.assertEqual(str(self.content), \"Test Content\")", "def test_read(self):\n string = \"1 10\\n\"\n num1, num2 = collatz_read(string)\n self.assertEqual(num1, 1)\n self.assertEqual(num2, 10)", "def test_strings_with_foo(self):\n write this test!", "def test_read_file():\n assert read_file('test_read_file.txt') == \"\"\"ABCDEFGHIJKLMNOPQRSTUVWXYZ?\nabcdefghijklmnopqrstuvwxyz.\n\"\"\"", "def test__read_scenario_files(self):\n test_str = '<sequence_demo><adaptivenumericinput />'\n test_result = _read_scenario_files()\n self.assertEqual(test_str, test_result[0:len(test_str)])", "def test_read():\n f = open('test', mode='r')\n line = f.read()\n f.close()", "def test_from_string(self):\n from pystarlab.starlab import Story\n king_output = \"king.out\"\n\n king_path = os.path.join(DATA_DIR, king_output)\n with open(king_path, 'r') as f:\n king_str = f.read()\n king_story = Story.from_string(king_str)\n self.assertEquals(king_str, str(king_story))", "def test_file_utf8_write_noraise_unicodeerror(self):\n FileWriter(self.unicode_path).write(self.unicode_string)\n unicode_text = FileReader(self.unicode_path).read_utf8()\n self.assertEqual(self.unicode_string, unicode_text)", "def test_read1(self):\n string = \"100 200\\n\"\n num1, num2 = collatz_read(string)\n self.assertEqual(num1, 100)\n self.assertEqual(num2, 200)", "def _test_text(self, url, content, buffering):\n # read(-1), readable(), seekable()\n with wfdb.io._url.openurl(url, \"r\", buffering=buffering) as tf:\n self.assertTrue(tf.readable())\n self.assertTrue(tf.seekable())\n self.assertEqual(tf.read(), content)\n self.assertEqual(tf.read(), \"\")\n\n # read(10)\n with wfdb.io._url.openurl(url, \"r\", buffering=buffering) as tf:\n result = \"\"\n while True:\n chunk = tf.read(10)\n result += chunk\n if len(chunk) < 10:\n break\n self.assertEqual(result, content)\n\n # readline(), seek(), tell()\n with wfdb.io._url.openurl(url, \"r\", buffering=buffering) as tf:\n result = \"\"\n while True:\n rpos = tf.tell()\n tf.seek(0)\n tf.seek(rpos)\n chunk = tf.readline()\n result += chunk\n if len(chunk) == 0:\n break\n self.assertEqual(result, content)", "def test_read_line(self):\n\n expected_data = ['\\\"lu, jr\\\"','ming-yuan','\\\"DRUG,1\\\"',135.999,True,3]\n input_string = '001,\\\"LU, JR\\\",MING-YUAN,\\\"DRUG,1\\\",135.999\\n'\n data = read_line(input_string)\n self.assertEqual(expected_data[0],data[0])\n self.assertEqual(expected_data[1],data[1])\n self.assertEqual(expected_data[2],data[2])\n self.assertAlmostEqual(expected_data[3],data[3])\n self.assertEqual(expected_data[4],data[4])\n self.assertAlmostEqual(expected_data[5],data[5])\n\n #Check for odd numers of quotation marks\n input_string = '001,\\\"LU\\\",\\\"MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n #Check for missing fields\n input_string = '001,,MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n input_string = '001,LU,MING-YUAN,DRUG1,\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n #Check for corrupted fields\n input_string = '001x,LU,MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n input_string = '001,LU,MING-YUAN,DRUG1,1ag5\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def testUploadWrapper(self):\n # Check that small reads still work.\n encrypted_data = \"\"\n while 1:\n small_read = self.encrypt_wrapper.read(2)\n if not small_read:\n break\n\n # Make sure that the reads are not larger than requested.\n self.assertTrue(len(small_read) <= 2)\n\n encrypted_data += small_read\n\n self.decrypt_wrapper.write(small_read)\n\n self.assertEqual(self.test_string, self.outfd.getvalue())", "def test_add_text_str(self):\n with DataArchive(self.user, DATA_DOWNLOADS_WORKING_DIR) as archive:\n data_to_write = \"Hello, łorld!\"\n file_path = archive.add_text('testfile', data_to_write)\n self.assertTrue(os.path.isfile(file_path))\n\n valid_output_path = os.path.join(archive.data_dir_path, 'testfile.txt')\n self.assertEqual(file_path, valid_output_path)\n\n with open(file_path, 'r') as fp:\n saved_data = fp.read().strip()\n self.assertEqual(saved_data, data_to_write)", "def test_str(self):\n \n # Create a Resource object\n book = Book(\"Penguin Group\", \"New York\", \"fiction\", 1, \"White Noise\", \n Name(\"Don\", \"\", \"DeLillo\"), \n \"Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.\",\n \"sci-fi\", \"English\", 1985, \"US\", 326, \"book\",\n [\"culture\", \"survival\", \"life\", \"society\"])\n \n # Assert expected result of the str function\n self.assertEqual(str(book), (\"ID: 1 \\nTitle: White Noise \"\\\n \"\\nCreator: Don DeLillo \\nSummary: Delillo's White Noise follows \"\\\n \"narrator Jack Gladney, a professor at a \\nsmall Liberal Arts \"\\\n \"college and describes an academic year. Jack teaches \\nat ... \"\\\n \"\\nGenre: sci-fi \\nLanguage: English \\nYear: 1985 \"\\\n \"\\nCountry: US \\nLength: 326p \\nType: book \"\\\n \"\\nKeywords: culture, life, society, survival\\nPublisher: \"\\\n \"Penguin Group \\nCity: New York \\nCategory: fiction\"))", "def test_read_file_populates_data_1():\n storage_manager = \"hello again\"\n word_list = storage_manager\n assert word_list is not None\n assert len(word_list) == 11", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def test_file_utf8_readwrite(self):\n FileWriter(self.unicode_path).write_utf8(self.unicode_string)\n unicode_text = FileReader(self.unicode_path).read_utf8()\n self.assertEqual(unicode_text, self.unicode_string)", "def test_str(self):\r\n map_obj = MetadataMap.parseMetadataMap(self.m1)\r\n string_rep = StringIO(map_obj)\r\n\r\n self.m1.seek(0)\r\n\r\n # The string representation of the map_obj is unpredictable, since\r\n # it is generated from a dict, so we have to get a little clever\r\n exp_headers = self.m1.readline().strip().split('\\t')\r\n obs_headers = string_rep.readline().strip().split('\\t')\r\n\r\n # make sure that they have the same columns\r\n self.assertEqual(set(exp_headers), set(obs_headers))\r\n\r\n # make sure they have the same values for the same columns\r\n for obs, exp in zip(string_rep, self.m1):\r\n obs_elements = obs.strip().split('\\t')\r\n exp_elements = exp.strip().split('\\t')\r\n\r\n for exp_i, exp_header in enumerate(exp_headers):\r\n obs_i = obs_headers.index(exp_header)\r\n\r\n self.assertEqual(obs_elements[exp_i],\r\n exp_elements[obs_i])", "def test_strings_are_equal(self):\n string1 = 'this is the first string'\n string2 = 'this is the first string' # ==, !is\n string3 = 'this is the second string'\n self.assertEqual(True, comparator.strings_are_equal(string1, string2))\n self.assertEqual(False, comparator.strings_are_equal(string1, string3))", "def write(self, str: str, /) -> None:", "def read(self, s):\n pass", "def test_for_str(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n 'for str s in [\"one\", \"two\"]\\n\\tMeasureFock() | 0'\n )\n assert np.all(\n bb._forvar[\"s\"] == np.array([\"one\", \"two\"])\n )", "def test_str(self):\n \n # Create a Resource object\n resource = Resource(1, \"White Noise\", Name(\"Don\", \"\", \"DeLillo\"), \n \"Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.\",\n \"sci-fi\", \"English\", 1985, \"US\", 326, \"book\",\n [\"culture\", \"survival\", \"life\", \"society\"])\n \n # Assert expected result of the str function\n self.assertEqual(str(resource), (\"ID: 1 \\nTitle: White Noise \"\\\n \"\\nCreator: Don DeLillo \\nSummary: Delillo's White Noise follows \"\\\n \"narrator Jack Gladney, a professor at a \\nsmall Liberal Arts \"\\\n \"college and describes an academic year. Jack teaches \\nat ... \"\\\n \"\\nGenre: sci-fi \\nLanguage: English \\nYear: 1985 \"\\\n \"\\nCountry: US \\nLength: 326p \\nType: book \"\\\n \"\\nKeywords: culture, life, society, survival\"))", "def test_file_utf8_readas_writeas(self):\n FileWriter(self.unicode2_path).write_as(self.unicode_string, \"utf-8\")\n unicode_text = FileReader(self.unicode2_path).read_as(\"utf-8\")\n self.assertEqual(unicode_text, self.unicode_string)", "def test_file_bin_readwrite(self):\n FileWriter(self.binary_path).write_bin(self.binary_string)\n bin_data = FileReader(self.binary_path).read_bin()\n self.assertEqual(bin_data, self.binary_string)", "def testWriteLine(self):\n file_writer = writers.FileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer.WriteLine('Line of text')\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n expected_output_data = b'Line of text\\r\\n'\n self.assertEqual(output_data, expected_output_data)", "def test_read_file_populates_data_0():\n storage_manager = \"hello\"\n word_list = storage_manager\n assert word_list is not None\n assert len(word_list) == 5", "def test_mutate(self):\n\t\tself.string_a.mutate()\n\t\tself.string_b.mutate()\n\t\tself.assertNotEqual(\"a\", self.string_a.value)\n\t\tself.assertNotEqual(\"b\", self.string_b.value)", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def test_read_delete(self):\n\n expected = \"Hello, World! This has been written by Fun Ilrys.\"\n File(\"hi\").write(expected)\n actual = File(\"hi\").read()\n\n self.assertEqual(expected, actual)\n\n expected = False\n File(\"hi\").delete()\n actual = PyFunceble.path.isfile(\"hi\")\n\n self.assertEqual(expected, actual)", "def test_str(self):\n for duration, _, str_ in self.test_cases:\n self.assertEqual(str(Rest(duration)), str_)", "def test_process_string():\n decode = StringProcessor()\n assert decode.process_string(\"ab\") == \"\"\n decode.output = \"\"\n\n assert decode.process_string(\"ab*\") == \"b\"\n decode.output = \"\"\n\n assert decode.process_string(\"ab^\") == \"ba\"\n decode.output = \"\"\n\n assert decode.process_string(\"^\") == \"\"", "def test_single_line_string(delimiter, fast_reader):\n expected = Table([[1], [2], [3.00]], names=(\"col1\", \"col2\", \"col3\"))\n text = \"1{0:s}2{0:s}3.0\".format(delimiter)\n\n if delimiter in (\"\\r\", \"\\n\"):\n t1 = ascii.read(\n text, format=\"no_header\", delimiter=delimiter, fast_reader=fast_reader\n )\n assert_table_equal(t1, expected)\n else:\n # Windows raises OSError, but not the other OSes.\n with pytest.raises((FileNotFoundError, OSError)):\n t1 = ascii.read(\n text, format=\"no_header\", delimiter=delimiter, fast_reader=fast_reader\n )\n\n t2 = ascii.read(\n [text], format=\"no_header\", delimiter=delimiter, fast_reader=fast_reader\n )\n assert_table_equal(t2, expected)", "def test_putStrin_with_Stringlit2(self):\n input = \"\"\"\n \n void main () {\n putStringLn(\"Tam nay thi ket thuc testcase 100 cho roi\");\n }\n \"\"\"\n expect = \"Tam nay thi ket thuc testcase 100 cho roi\\n\"\n self.assertTrue(TestCodeGen.test(input,expect,600))", "def test_str(self):\n temp = self.mktemp()\n os.makedirs(temp)\n os.makedirs(os.path.join(temp, \"mytestproj\"))\n\n with open(os.path.join(temp, \"mytestproj\", \"__init__.py\"), \"w\") as f:\n f.write(\"__version__ = '1.2.3'\")\n\n version = get_version(temp, \"mytestproj\")\n self.assertEqual(version, \"1.2.3\")", "def test_native_str(self):\n if PY2:\n import __builtin__\n builtin_str = __builtin__.str\n else:\n import builtins\n builtin_str = builtins.str\n\n inputs = [b'blah', u'blah', 'blah']\n for s in inputs:\n self.assertEqual(native_str(s), builtin_str(s))\n self.assertTrue(isinstance(native_str(s), builtin_str))", "def test_write(self):\n\n store = self.get_store(\n uri='http://localhost:9000/index.html', content=\"My message\")\n store.write('http://localhost:9000/index.html', \"New message\")\n\n content = store.read('http://localhost:9000/index.html')\n\n self.assertEquals(content, \"New message\")", "def test_string_conversion():\n ob = ConversionTest()\n\n assert ob.StringField == \"spam\"\n assert ob.StringField == u\"spam\"\n\n ob.StringField = \"eggs\"\n assert ob.StringField == \"eggs\"\n assert ob.StringField == u\"eggs\"\n\n ob.StringField = u\"spam\"\n assert ob.StringField == \"spam\"\n assert ob.StringField == u\"spam\"\n\n ob.StringField = u'\\uffff\\uffff'\n assert ob.StringField == u'\\uffff\\uffff'\n\n ob.StringField = System.String(\"spam\")\n assert ob.StringField == \"spam\"\n assert ob.StringField == u\"spam\"\n\n ob.StringField = System.String(u'\\uffff\\uffff')\n assert ob.StringField == u'\\uffff\\uffff'\n\n ob.StringField = None\n assert ob.StringField is None\n\n with pytest.raises(TypeError):\n ConversionTest().StringField = 1\n\n world = UnicodeString()\n test_unicode_str = u\"안녕\"\n assert test_unicode_str == str(world.value)\n assert test_unicode_str == str(world.GetString())\n assert test_unicode_str == str(world)", "def _testAssignData(self):\n key = ('foo', 'bar')\n data = r'text!\\nthere'\n\n with self.cache.Lookup(key) as ref:\n self.assertFalse(ref.Exists())\n ref.AssignText(data)\n self.assertTrue(ref.Exists())\n self.assertEqual(osutils.ReadFile(ref.path), data)\n\n with self.cache.Lookup(key) as ref:\n self.assertTrue(ref.Exists())\n self.assertEqual(osutils.ReadFile(ref.path), data)", "def test_string_output(self):\n got_str = yamlish.dumps(IN)\n got = yaml.load(got_str)\n self.assertEqual(got, self._expected, \"Result matches\")", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def _test_python_reader(basename):\n converted, expected = _test_reader(basename, 'python')\n assert _remove_output(converted) == _remove_output(expected)", "def test1_write():\n with open(FILE_DIR + FILE_NAME, mode='w', encoding='utf-8') as f:\n f.write(DATA)", "def test_putStrin_with_Stringlit(self):\n input = \"\"\"\n \n void main () {\n putStringLn(\"Tam nay thi ket thuc testcase 100 cho roi\");\n }\n \"\"\"\n expect = \"Tam nay thi ket thuc testcase 100 cho roi\\n\"\n self.assertTrue(TestCodeGen.test(input,expect,599))\n ##### dat ten trung nen chi chay ra 98 testcase", "def assertNativeString(self, original, expected):\n self.assertEqual(nativeString(original), expected)\n self.assertIsInstance(nativeString(original), str)", "def testText(self):\n lc = self.CreateConsole()\n contents = \"\"\n self.assertEqual(contents, lc.GetText())\n for str in ('a', 'foo', '\\n\\n\\n', 'bar\\nbaz\\n choke choke zapf'):\n contents += str\n lc.AppendText(str)\n self.assertEqual(contents, lc.GetText())", "def test_basic():\n line = \"test\"\n assert wrap_line(line) == \"test\"", "def test_noUnicode(self):\n s = proto_helpers.StringTransport()\n self.assertRaises(TypeError, s.write, \"foo\")", "def test_strings_without_foo(self):\n write this test!", "def test_write_string():\n buf = make_buffer()\n writer = XmlWriter(buf)\n writer.write_element('value', 'myvalue')\n writer.flush()\n assert_equals(decode_buffer(buf), '<value>myvalue</value>')", "def test_read_from_file():\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert \"\\n\" not in d.read_code_from_file()", "def test_file_gzip_ascii_readwrite(self):\n if state.py2:\n FileWriter(self.ascii_path).gzip(self.ascii_string)\n gzip_contents = FileReader(self.ascii_path + \".gz\").read_gzip()\n self.assertEqual(gzip_contents, self.ascii_string)\n elif state.py3:\n FileWriter(self.ascii_path).gzip(bytes(self.ascii_string, 'utf-8'))\n gzip_contents = FileReader(self.ascii_path + \".gz\").read_gzip()\n self.assertEqual(gzip_contents.decode('ascii'), self.ascii_string)", "def test_str(self):\n character = self.character\n\n self.assertEqual(str(character), self.character_raw['name'])", "def test_str(self):\r\n self.assertEqual(str(self.black), 'black:#000000')\r\n self.assertEqual(str(self.red), 'red:#ff0000')\r\n self.assertEqual(str(self.pink), 'pink:#640000')", "def fn2Test(pStrings, s, outputFile):\n with open(outputFile, 'w') as fH:\n fH.write(\" \".join(pStrings) + \" \" + s)\n return s", "def test_write_file_to_disk(self):\r\n file_data = u'ß' * 100\r\n write_file_to_disk(self.test_file2, file_data)\r\n self.file_contents_is_equal(self.test_file2, file_data)", "def test_read4(self):\n string = \"1 1\\n\"\n num1, num2 = collatz_read(string)\n self.assertEqual(num1, 1)\n self.assertEqual(num2, 1)", "def test_utils_random_string(self):\n for length in range(10, 100, 10):\n random_1 = random_string(length)\n random_2 = random_string(length)\n self.assertEqual(len(random_1), length)\n self.assertEqual(len(random_2), length)\n self.assertNotEqual(random_1, random_2)", "def test_read_from_file():\n from scraper import read_from_file\n assert read_from_file(TEST_FILE) == (TEST_CONTENT, 'utf-8')", "def store_string(self, string: str) -> None:", "def testExampleStringGeneration(ref):\n actual = generate_string()\n ref.assertStringCorrect(actual, 'string_result.html',\n ignore_substrings=['Copyright', 'Version'])", "def test_io_path_string(args, string):\n assert deepr.io.Path(*args) == string", "def _testReadWrite(self):\n self.shouldWrite = True\n\n def checkReadInput(fd):\n self.assertEquals(fd.read(1), b'x')\n self._reactor.stop()\n\n def writeOnce(fd):\n if self.shouldWrite:\n self.shouldWrite = False\n fd.write(b'x')\n self._reader = Reader(self._p1, checkReadInput)\n self._writer = Writer(self._p2, writeOnce)\n\n self._reactor.addWriter(self._writer)\n\n # Test that adding the reader twice adds it only once to\n # IOLoop.\n self._reactor.addReader(self._reader)\n self._reactor.addReader(self._reader)", "def test_create_invalid_str(self):\n storage = FileStorage()\n tests = ['\"', 'Hi \"', '\"Hi', '\\\"']\n\n for test in tests:\n self.remove_all()\n with patch('sys.stdout', new=StringIO()) as f:\n self.console.onecmd(\n 'create BaseModel test_var={}'.format(test))\n attributes = list(storage.all().values())\n self.assertFalse('test_var' in attributes[0].to_dict())", "def test_string(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', data=\"string\")\n assert dset.data == \"string\"", "def test_str(self):\n ar = awstats_reader.AwstatsReader(test_file_dir, 'jjncj.com')\n self.assertEqual(str(ar), '<AwstatsReader: 2008, 2009>')", "def testUploadWrapperWithLargeWrites(self):\n # Read all the data at once.\n encrypted_data = self.encrypt_wrapper.read(1024 * 1024 * 100)\n\n # Write all the data at once.\n self.decrypt_wrapper.write(encrypted_data)\n\n self.assertEqual(self.test_string, self.outfd.getvalue())", "def test_string_update(self):\r\n vm = String.value_manager(None, None, 'str')\r\n assert not vm.changed\r\n vm.value = 'unicode'\r\n assert vm.changed", "def test_str_magic_method():\n LINES = (\n \"One morn before me were three figures seen,\",\n \"And once more came they by:-alas! wherefore?\",\n )\n for line in LINES:\n assert(str(LineBuilder(line)) == line)", "def test_string_variable(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata('str username = \"Josh\"')\n assert bb._var == {\"username\": \"Josh\"}", "def test_get_value_str(self):\n val = self.setting_str.get_value()\n self.assertIsInstance(val, str)\n self.assertEqual(val, 'test')", "def test_get_value_str(self):\n val = self.setting_str.get_value()\n self.assertIsInstance(val, str)\n self.assertEqual(val, 'test')", "def test_compare_to_string(self):\n r = self.RNA(\"UCC\")\n self.assertEqual(r, \"UCC\")" ]
[ "0.6916156", "0.6884965", "0.65640235", "0.6562629", "0.6518813", "0.64406925", "0.6423337", "0.6257812", "0.62549824", "0.6238165", "0.62272376", "0.6220291", "0.6190515", "0.61704606", "0.6157264", "0.6138941", "0.6124542", "0.6122053", "0.61147743", "0.6111996", "0.605968", "0.6055502", "0.6040894", "0.6027196", "0.60266775", "0.59878695", "0.59791607", "0.59629846", "0.5954637", "0.5930086", "0.58883274", "0.58850574", "0.5884899", "0.58748686", "0.58731896", "0.58717835", "0.5866095", "0.58525825", "0.58364385", "0.58360624", "0.5833646", "0.5824126", "0.58234555", "0.58192515", "0.5811241", "0.57860607", "0.57770425", "0.5769417", "0.57339364", "0.57305723", "0.57289165", "0.57225627", "0.572005", "0.57113725", "0.57099926", "0.56969845", "0.5688919", "0.5676432", "0.5662971", "0.5658431", "0.5658342", "0.56516415", "0.56390136", "0.5612454", "0.5605905", "0.5594618", "0.55828094", "0.5580041", "0.55779636", "0.55634993", "0.55571795", "0.55559653", "0.555401", "0.55460566", "0.5539794", "0.55311656", "0.55249697", "0.552456", "0.5523171", "0.5519958", "0.5518126", "0.5517395", "0.5509854", "0.5505288", "0.5498261", "0.54942966", "0.5493072", "0.5486274", "0.5481827", "0.5463612", "0.54614025", "0.5457897", "0.54571915", "0.54515225", "0.54435074", "0.5438964", "0.5436999", "0.5432503", "0.5432503", "0.54244024" ]
0.56046695
65
Test read and write mixed datatypes.
def test_message_mixed(): result = True message = msg.Message() size = 0 for i in range(num_it): message.appendInt(8848) message.appendBoolean(True) message.appendFloat(128.789456) message.appendString(str(i) + "azertyuiopmlkjhgfdsqwxcvbn") size += msg.intStruct.size + msg.boolStruct.size + msg.floatStruct.size + msg.intStruct.size + len(str(i) + "azertyuiopqsdfghjklmwxcvbn") if message.length != msg.HEADER_SIZE + size: print("Size is ", message.length, " but should be ", msg.HEADER_SIZE + size) print("Error : message.appendMixed") result = False message.resetCursor() for i in range(num_it): a = message.readInt() b = message.readBoolean() c = message.readFloat() d = message.readString() if a != 8848: print("Error in int", i, a) result = False if not b is True: print("Errro in boolean", i, b) result = False if abs(c- 128.789456) > 0.00001: print("Error in float", i, c) result = False if d != str(i) + "azertyuiopmlkjhgfdsqwxcvbn": print("Error in string", i, d) result = False return result # // mixed # message = new Message(); # for(int j = 0 ; j < 1024 ; j++){ # message.resetCursor(); # message.appendInt(8848); # message.appendBoolean(true); # message.appendFloat((float) 128.789456); # message.appendString("azertyuiopmlkjhgfdsqwxcvbn"); # message.resetCursor(); # if(message.readInt() != 8848){ # System.out.println("Error in Int"); # System.exit(0); # } # if(message.readBoolean() != true){ # System.out.println("Error in Boolean"); # System.exit(0); # } # if(message.readFloat() != (float) 128.789456){ # System.out.println("Error in Float"); # System.exit(0); # } # if(message.readString().compareTo("azertyuiopmlkjhgfdsqwxcvbn") != 0){ # System.out.println("Error in String"); # System.exit(0); # } # } # System.out.println("OK : mixed types");
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_datatype():\n\n assert isinstance(pf.get_datatype(), torch.dtype)\n assert pf.get_datatype() == torch.float32\n\n pf.set_datatype(torch.float64)\n assert isinstance(pf.get_datatype(), torch.dtype)\n assert pf.get_datatype() == torch.float64\n pf.set_datatype(torch.float32)\n\n with pytest.raises(TypeError):\n pf.set_datatype(\"lala\")", "def test_read_types():\n t1 = ascii.read(\"a b c\\n1 2 3\\n4 5 6\", format=\"fast_basic\", guess=False)\n # TODO: also read from file\n t2 = ascii.read(StringIO(\"a b c\\n1 2 3\\n4 5 6\"), format=\"fast_basic\", guess=False)\n t3 = ascii.read([\"a b c\", \"1 2 3\", \"4 5 6\"], format=\"fast_basic\", guess=False)\n assert_table_equal(t1, t2)\n assert_table_equal(t2, t3)", "def test_mixed_dtypes(suffix: str) -> None:\n path = rsc / mixed_dtypes_file\n df = read_ods(path.with_suffix(suffix), 1)\n\n assert isinstance(df, pd.DataFrame)\n assert len(df) == 10\n assert len(df.columns) == 5\n\n type_list = [float, object, float, float, object]\n assert df.dtypes.tolist() == type_list\n col_b_types = [type(v) for v in df.B.values]\n assert str in col_b_types and float in col_b_types", "def test_all_datatypes_write(self):\n self.all_datatypes_prepare()\n\n insert_statement = self.session.prepare(\n \"\"\"INSERT INTO testdatatype (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, za)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\"\"\")\n self.session.execute(insert_statement, self.data)\n\n def _test(prepared_statements):\n tempfile = self.get_temp_file()\n logger.debug('Exporting to csv file: {name}'.format(name=tempfile.name))\n self.run_cqlsh(cmds=\"COPY ks.testdatatype TO '{}' WITH PREPAREDSTATEMENTS = {}\"\n .format(tempfile.name, prepared_statements))\n\n out, err, _ = self.run_cqlsh(cmds=\"SELECT * FROM ks.testdatatype\")\n results = self.parse_cqlsh_query(out=out, num_cols=len(self.data), timestamps_to_be_rounded=[10, 17])\n\n self.assertCsvResultEqual(tempfile.name, results, 'testdatatype')\n\n _test(True)\n _test(False)", "def test_data_type(self):\n self.assertTrue(self.tester.data_type(), \"18S\")", "def test_all_datatypes_read(self):\n self.all_datatypes_prepare()\n\n tempfile = self.get_temp_file()\n\n with open(tempfile.name, 'w') as csvfile:\n writer = csv.writer(csvfile)\n # serializing blob bytearray in friendly format\n data_set = list(self.data)\n\n data_set[2] = self.format_blob(self.data[2])\n # Here we convert containers of blobs to strings that match exactly the output of the SELECT *\n # because otherwise the comparison fails due to extra quotes added by the csv writer around the blobs\n # that were converted to strings. White spaces do matter\n data_set[24] = '{3: ' + self.format_blob(self.data[24][3]) + '}'\n data_set[25] = '[' + ', '.join(self.format_blob(b) for b in self.data[25]) + ']'\n data_set[26] = '{' + ', '.join(self.format_blob(b) for b in self.data[26]) + '}'\n writer.writerow(data_set)\n\n def _test(prepared_statements):\n logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))\n out, err, _ = self.run_cqlsh(cmds=\"COPY ks.testdatatype FROM '{}' WITH PREPAREDSTATEMENTS = {}\"\n .format(tempfile.name, prepared_statements))\n\n out, err, _ = self.run_cqlsh(cmds=\"SELECT * FROM ks.testdatatype\")\n results = self.parse_cqlsh_query(out=out, num_cols=len(self.data), timestamps_to_be_rounded=[10, 17])\n\n self.assertCsvResultEqual(tempfile.name, results, 'testdatatype')\n\n _test(True)\n _test(False)", "def test_get_datatypes(self):\n obs = _get_datatypes(self.metadata_map.ix[:, self.headers])\n exp = ['float8', 'varchar', 'integer']\n self.assertEqual(obs, exp)", "def test_datatype(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df, hist1, hist2, hist3 = get_test_histograms1()\n\n assert hist1.datatype == str\n np.testing.assert_array_equal(hist2.datatype, [numpy.number, str])\n np.testing.assert_array_equal(hist3.datatype, [numpy.datetime64, numpy.number, str])", "def test_types(self):\n \n self.assertIsInstance(self.tx_data_in, numpy.ndarray)\n self.assertIsInstance(self.circuit_simulation, bool)\n self.assertIsInstance(self.bypass, bool)\n \n pass", "def test_all_datatypes_round_trip(self):\n self.all_datatypes_prepare()\n\n insert_statement = self.session.prepare(\n \"\"\"INSERT INTO testdatatype (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, za)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\"\"\")\n self.session.execute(insert_statement, self.data)\n\n tempfile = self.get_temp_file()\n logger.debug('Exporting to csv file: {name}'.format(name=tempfile.name))\n self.run_cqlsh(cmds=\"COPY ks.testdatatype TO '{}'\".format(tempfile.name))\n\n exported_results = list(self.session.execute(\"SELECT * FROM testdatatype\"))\n\n def _test(prepared_statements):\n self.session.execute('TRUNCATE ks.testdatatype')\n\n self.run_cqlsh(cmds=\"COPY ks.testdatatype FROM '{}' WITH PREPAREDSTATEMENTS = {}\"\n .format(tempfile.name, prepared_statements))\n\n imported_results = list(self.session.execute(\"SELECT * FROM testdatatype\"))\n\n assert exported_results == imported_results\n\n _test(True)\n _test(False)", "def test_datatype_detection():\n\n grammar = \"\"\"\n IsObjectDatatype: INT | STRING | ID;\n IsIntDatatype: INT;\n IsIdDatatype: ID;\n IsAlsoDatatype: SubDT1 | SubDT2;\n SubDT1: INT;\n SubDT2: STRING;\n \"\"\"\n\n mm = metamodel_from_str(grammar)\n\n IsObjectDatatype = mm['IsObjectDatatype']\n assert isinstance(IsObjectDatatype, ecore.EDataType)\n assert IsObjectDatatype.name == 'IsObjectDatatype'\n assert IsObjectDatatype.eType == object\n\n IsIntDatatype = mm['IsIntDatatype']\n assert isinstance(IsIntDatatype, ecore.EDataType)\n assert IsIntDatatype.name == 'IsIntDatatype'\n assert IsIntDatatype.eType == int\n\n IsIdDatatype = mm['IsIdDatatype']\n assert isinstance(IsIdDatatype, ecore.EDataType)\n assert IsIdDatatype.name == 'IsIdDatatype'\n assert IsIdDatatype.eType == str\n\n IsAlsoDatatype = mm['IsAlsoDatatype']\n assert isinstance(IsAlsoDatatype, ecore.EDataType)\n IsAlsoDatatype = mm['IsAlsoDatatype']\n assert IsAlsoDatatype.eType == object", "def test_rdb(parallel, read_rdb):\n text = \"\"\"\n\nA\\tB\\tC\n1n\\tS\\t4N\n1\\t 9\\t4.3\n\"\"\"\n table = read_rdb(text, parallel=parallel)\n expected = Table([[1], [\" 9\"], [4.3]], names=(\"A\", \"B\", \"C\"))\n assert_table_equal(table, expected)\n assert_equal(table[\"A\"].dtype.kind, \"i\")\n assert table[\"B\"].dtype.kind in (\"S\", \"U\")\n assert_equal(table[\"C\"].dtype.kind, \"f\")\n\n with pytest.raises(ValueError) as e:\n text = \"A\\tB\\tC\\nN\\tS\\tN\\n4\\tb\\ta\" # C column contains non-numeric data\n read_rdb(text, parallel=parallel)\n assert \"Column C failed to convert\" in str(e.value)\n\n with pytest.raises(ValueError) as e:\n text = \"A\\tB\\tC\\nN\\tN\\n1\\t2\\t3\" # not enough types specified\n read_rdb(text, parallel=parallel)\n assert \"mismatch between number of column names and column types\" in str(e.value)\n\n with pytest.raises(ValueError) as e:\n text = \"A\\tB\\tC\\nN\\tN\\t5\\n1\\t2\\t3\" # invalid type for column C\n read_rdb(text, parallel=parallel)\n assert \"type definitions do not all match [num](N|S)\" in str(e.value)", "def test_incorrect_data_type():\n \n test_object = fa.read_in_envision(data_csv=list_A, platemap_csv=plate_map_file, data_type='typo', size=384)", "def test_datatype(self):\n dates = pd.date_range(start=\"2007-01-01\", end=\"2007-02-01\")\n\n ts = pd.DataFrame(\n {\n \"var1\": np.arange(len(dates), dtype=np.int8),\n \"var2\": np.arange(len(dates), dtype=np.int16),\n \"var3\": np.arange(len(dates), dtype=np.int32),\n \"var4\": np.arange(len(dates), dtype=np.int64)\n },\n index=dates)\n\n dataset_w = GriddedNcContiguousRaggedTs(self.testdatapath,\n self.grid,\n mode=\"w\")\n\n for gpi in self.gpis:\n dataset_w.write(gpi, ts)\n\n dataset_r = GriddedNcContiguousRaggedTs(self.testdatapath,\n self.grid,\n mode=\"r\")\n\n for gpi in self.gpis:\n arr = dataset_r.read(gpi)\n assert (arr[\"var1\"].dtype == np.int8)\n assert (arr[\"var2\"].dtype == np.int16)\n assert (arr[\"var3\"].dtype == np.int32)\n assert (arr[\"var4\"].dtype == np.int64)", "def test_datatypes(self):\n conn = self.connect()\n c = conn.cursor()\n c.execute(\n \"\"\"\ncreate table test_datatypes (\n b bit,\n i int,\n l bigint,\n f real,\n s varchar(32),\n u varchar(32),\n bb blob,\n d date,\n dt datetime,\n ts timestamp,\n td time,\n t time,\n st datetime)\n\"\"\"\n )\n try:\n # insert values\n\n v = (\n True,\n -3,\n 123456789012,\n 5.7,\n \"hello'\\\" world\",\n \"Espa\\xc3\\xb1ol\",\n \"binary\\x00data\".encode(conn.encoding),\n datetime.date(1988, 2, 2),\n datetime.datetime(2014, 5, 15, 7, 45, 57),\n datetime.timedelta(5, 6),\n datetime.time(16, 32),\n time.localtime(),\n )\n c.execute(\n \"insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) values\"\n \" (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\",\n v,\n )\n c.execute(\"select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes\")\n r = c.fetchone()\n self.assertEqual(b\"\\x01\", r[0])\n self.assertEqual(v[1:10], r[1:10])\n self.assertEqual(\n datetime.timedelta(0, 60 * (v[10].hour * 60 + v[10].minute)), r[10]\n )\n self.assertEqual(datetime.datetime(*v[-1][:6]), r[-1])\n\n c.execute(\"delete from test_datatypes\")\n\n # check nulls\n c.execute(\n \"insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st)\"\n \" values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\",\n [None] * 12,\n )\n c.execute(\"select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes\")\n r = c.fetchone()\n self.assertEqual(tuple([None] * 12), r)\n\n c.execute(\"delete from test_datatypes\")\n\n # check sequences type\n for seq_type in (tuple, list, set, frozenset):\n c.execute(\n \"insert into test_datatypes (i, l) values (2,4), (6,8), (10,12)\"\n )\n seq = seq_type([2, 6])\n c.execute(\n \"select l from test_datatypes where i in %s order by i\", (seq,)\n )\n r = c.fetchall()\n self.assertEqual(((4,), (8,)), r)\n c.execute(\"delete from test_datatypes\")\n\n finally:\n c.execute(\"drop table test_datatypes\")", "def test_write_types(dtype, tmp_path):\n path = tmp_path / \"test_type_writing.hdf5\"\n\n grid = UnitGrid([32])\n c = ScalarField.random_uniform(grid).copy(dtype=dtype)\n if dtype == complex:\n c += 1j * ScalarField.random_uniform(grid)\n\n storage = FileStorage(path, keep_opened=False)\n storage.start_writing(c)\n assert len(storage) == 0\n storage.append(c, 0)\n assert storage._file_state == \"closed\"\n assert len(storage) == 1\n assert storage._file_state == \"reading\"\n storage.append(c, 1)\n assert len(storage) == 2\n assert storage.dtype == np.dtype(dtype)\n\n storage2 = FileStorage(path, write_mode=\"append\")\n assert storage.times == storage2.times\n assert storage.data == storage2.data\n storage.close() # close the old storage to enable writing here\n storage2.start_writing(c)\n storage2.append(c, 2)\n storage2.close()\n\n assert len(storage2) == 3\n np.testing.assert_allclose(storage2.times, np.arange(3))\n assert storage2.dtype == np.dtype(dtype)\n\n storage3 = FileStorage(path, write_mode=\"reading\")\n assert len(storage3) == 3\n for field in storage3:\n np.testing.assert_allclose(field.data, c.data)\n assert storage3.dtype == np.dtype(dtype)", "def test_convert_logical():", "def test_read_raw_datatype():\n bids_root = _TempDir()\n bids_path = _bids_path.copy().update(root=bids_root, datatype='meg')\n raw = _read_raw_fif(raw_fname, verbose=False)\n write_raw_bids(raw, bids_path, overwrite=True, verbose=False)\n\n raw_1 = read_raw_bids(bids_path=bids_path)\n bids_path.update(datatype=None)\n raw_2 = read_raw_bids(bids_path=bids_path)\n raw_3 = read_raw_bids(bids_path=bids_path)\n\n raw_1.crop(0, 2).load_data()\n raw_2.crop(0, 2).load_data()\n raw_3.crop(0, 2).load_data()\n\n assert raw_1 == raw_2\n assert raw_1 == raw_3", "def test_as_python_types(self):\n obs = _as_python_types(self.metadata_map, self.headers)\n exp = [[2.1, 3.1, 3],\n ['str1', '200', 'string30'],\n [1, 2, 3]]\n self.assertEqual(obs, exp)", "def test_snmpset_wrong_type():\n with pytest.raises(SNMPWriteError) as excinfo:\n snmpset(ipaddress=SNMP_SRV_ADDR, oid='SNMPv2-MIB::sysName.0',\n value_type='a', value='255.255.255.255', port=SNMP_SRV_PORT)\n assert 'Bad variable type' in str(excinfo.value)", "def test_match_types(self):\n f = lws.match_types\n # assert f(str, u'test') is True\n assert f(str, 'test') is True\n assert f(int, 123) is True\n assert f(int, 123.00) is False\n assert f(bool, [1, 2, 3]) is False", "def test_story_data_types(self):\n assert type(self.story.rank) == int\n assert type(self.story.story_id) == int\n assert type(self.story.title) in self.text_type\n assert type(self.story.link) in self.text_type\n assert type(self.story.domain) in self.text_type\n assert type(self.story.points) == int\n assert type(self.story.submitter) in self.text_type\n assert type(self.story.published_time) in self.text_type\n assert type(self.story.submitter_profile) in self.text_type\n assert type(self.story.num_comments) == int\n assert type(self.story.comments_link) in self.text_type\n assert type(self.story.is_self) == bool", "def test_invalid_data_types(self):\n response=self.check_invalid_data_type()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],\"Require int or float type\")\n self.assertEqual(response.status_code, 200)", "def test_conversion(parallel, read_basic):\n text = \"\"\"\nA B C D E F G H\n1 a 3 4 5 6 7 8\n2. 1 9 -.1e1 10.0 8.7 6 -5.3e4\n4 2 -12 .4 +.e1 - + six\n\"\"\"\n table = read_basic(text, parallel=parallel)\n assert_equal(table[\"A\"].dtype.kind, \"f\")\n assert table[\"B\"].dtype.kind in (\"S\", \"U\")\n assert_equal(table[\"C\"].dtype.kind, \"i\")\n assert_equal(table[\"D\"].dtype.kind, \"f\")\n assert table[\"E\"].dtype.kind in (\"S\", \"U\")\n assert table[\"F\"].dtype.kind in (\"S\", \"U\")\n assert table[\"G\"].dtype.kind in (\"S\", \"U\")\n assert table[\"H\"].dtype.kind in (\"S\", \"U\")", "def test_data(self):\n\n # Boolean tests\n is_datas = [True, False]\n for is_data in is_datas:\n self.colorspace.setIsData(is_data)\n self.assertEqual(is_data, self.colorspace.isData())\n\n # Wrong type tests\n wrong_is_datas = [['test'], 'test']\n for wrong_is_data in wrong_is_datas:\n with self.assertRaises(TypeError):\n self.colorspace.setIsData(wrong_is_data)", "def test_writing_unsupported_types_to_hdf5(self):\n some_dict = {}\n some_dict['list_of_ints'] = list(np.arange(5))\n some_dict['list_of_floats'] = list(np.arange(5.1))\n some_dict['weird_dict'] = {'a': 5}\n data1 = new_data(formatter=self.formatter, location=self.loc_provider,\n name='test_missing_attr')\n some_dict['nested_dataset'] = data1\n\n some_dict['list_of_dataset'] = [data1, data1]\n some_dict['list_of_mixed_type'] = ['hello', 4, 4.2]\n\n fp = self.loc_provider(\n io=DataSet.default_io,\n record={'name': 'test_dict_writing'})+'.hdf5'\n F = h5py.File(fp, mode='a')\n self.formatter.write_dict_to_hdf5(some_dict, F)\n new_dict = {}\n self.formatter.read_dict_from_hdf5(new_dict, F)\n # objects are not identical but the string representation should be\n self.assertEqual(str(some_dict['nested_dataset']),\n new_dict['nested_dataset'])\n self.assertEqual(str(some_dict['list_of_dataset']),\n new_dict['list_of_dataset'])\n self.assertEqual(str(some_dict['list_of_mixed_type']),\n new_dict['list_of_mixed_type'])\n\n F['weird_dict'].attrs['list_type'] = 'unsuported_list_type'\n with self.assertRaises(NotImplementedError):\n self.formatter.read_dict_from_hdf5(new_dict, F)", "def test_load_from_msg_type_check(self):\n for msg in self.cases.keys():\n\n cr = CloudRecord()\n cr.load_from_msg(msg)\n\n for key in cr._int_fields:\n value = cr._record_content[key]\n # Check the value we are going to be passing to MySQL\n # is an integer or None. MySQL 5.6.x rejects the value\n # otherwise, whereas 5.1.x interprets it as integer 0.\n valid_value = isinstance(value, int) or value is None\n # Use 'repr' to show quote marks if value is a string.\n self.assertTrue(valid_value, 'Integer %s with value: %s\\n%s' %\n (key, repr(value), msg))\n\n for key in cr._float_fields:\n value = cr._record_content[key]\n # Check the value we are going to be passing to MySQL\n # is a float or None. MySQL 5.6.x rejects the value\n # otherwise, whereas 5.1.x interprets it as 0.00.\n valid_value = isinstance(value, float) or value is None\n # Use 'repr' to show quote marks if value is a string.\n self.assertTrue(valid_value, 'Decimal %s with value: %s\\n%s' %\n (key, repr(value), msg))\n\n for key in cr._datetime_fields:\n value = cr._record_content[key]\n # Check the value we are going to be passing to MySQL\n # is a datetime or None. MySQL 5.6.x rejects the value\n # otherwise, whereas 5.1.x interprets it as a zero timestamp.\n valid_value = isinstance(value, datetime) or value is None\n # Use 'repr' to show quote marks if value is a string.\n self.assertTrue(valid_value, 'Datetime %s with value: %s\\n%s' %\n (key, repr(value), msg))", "def testTypeSingle(self):\n prop = make_prop(kind=bool)\n with self.assertRaises(TypeError):\n prop.interpret(1, {})\n\n self.assertEqual(True, prop.interpret(True, {}))", "def isdatatype(object):\n return isinstance(object, (str, int, bool, float, type(None)))", "def test_types(question):\n instance = question[\"instance\"]\n for name, data in instance.get(\"variables\", {}).items():\n assert \"optional\" not in data or isinstance(data[\"optional\"], bool)\n if data.get(\"type\") == \"boolean\":\n assert \"value\" not in data or isinstance(data[\"value\"], bool)\n elif data.get(\"type\") in [\"integer\", \"long\"]:\n assert \"value\" not in data or isinstance(data[\"value\"], int)", "def test_inputs():\n out_dir = _TempDir()\n # test tsv\n beh = dict(test=[1, 2], test2=[2, 1])\n _to_tsv(op.join(out_dir, 'test.tsv'), beh)\n assert beh == _read_tsv(op.join(out_dir, 'test.tsv'))\n with pytest.raises(ValueError, match='Unable to read'):\n _read_tsv('test.foo')\n with pytest.raises(ValueError, match='Error in reading tsv'):\n with open(op.join(out_dir, 'test.tsv'), 'w') as _:\n pass\n _read_tsv(op.join(out_dir, 'test.tsv'))\n with pytest.raises(ValueError, match='contains no data'):\n with open(op.join(out_dir, 'test.tsv'), 'w') as f:\n f.write('test')\n _read_tsv(op.join(out_dir, 'test.tsv'))\n with pytest.raises(ValueError, match='different lengths'):\n with open(op.join(out_dir, 'test.tsv'), 'w') as f:\n f.write('test\\ttest2\\n1\\t1\\n1')\n _read_tsv(op.join(out_dir, 'test.tsv'))\n with pytest.raises(ValueError, match='Empty data file, no keys'):\n _to_tsv(op.join(out_dir, 'test.tsv'), dict())\n with pytest.raises(ValueError, match='Unable to write'):\n _to_tsv('foo.bar', dict(test=1))\n # test read\n raw, beh, events, corrupted_indices = pd_parser.simulate_pd_data()\n with pytest.raises(ValueError, match='must be loaded from disk'):\n _read_raw(raw, preload=True)\n raw.save(op.join(out_dir, 'test-raw.fif'), overwrite=True)\n with pytest.raises(ValueError, match='not recognized'):\n _read_raw('foo.bar')\n raw2 = _read_raw(op.join(out_dir, 'test-raw.fif'), preload=True)\n np.testing.assert_array_almost_equal(raw._data, raw2._data, decimal=3)\n # test load beh\n with pytest.raises(ValueError, match='not in the columns'):\n _load_beh(op.join(basepath, 'pd_events.tsv'), 'foo')\n # test get pd data\n with pytest.raises(ValueError, match='in raw channel names'):\n _get_data(raw, ['foo'])\n with pytest.raises(ValueError, match='in raw channel names'):\n _get_channel_data(raw, ['foo'])\n with pytest.raises(ValueError, match='baseline must be between 0 and 1'):\n pd_parser.parse_pd(raw, beh=beh, baseline=2)\n with pytest.raises(FileNotFoundError, match='fname does not exist'):\n _load_data('bar/foo.fif')\n with pytest.raises(ValueError, match='pd-parser data not found'):\n raw.save(op.join(out_dir, 'foo.fif'))\n _load_data(op.join(out_dir, 'foo.fif'))\n # test i/o\n raw3 = _read_raw(op.join(out_dir, 'test-raw.fif'))\n _save_data(raw3, events=np.arange(10), event_id='Fixation',\n ch_names=['pd'], beh=beh, add_events=False)\n with pytest.raises(ValueError, match='`pd_parser_sample` is not allowed'):\n _save_data(raw3, events=events, event_id='Fixation', ch_names=['pd'],\n beh=beh, add_events=False)\n annot, pd_ch_names, beh2 = _load_data(raw3)\n raw.set_annotations(annot)\n events2, event_id = mne.events_from_annotations(raw)\n np.testing.assert_array_equal(events2[:, 0], np.arange(10))\n assert event_id == {'Fixation': 1}\n assert pd_ch_names == ['pd']\n np.testing.assert_array_equal(beh2['time'], beh['time'])\n np.testing.assert_array_equal(beh2['pd_parser_sample'], np.arange(10))\n # check overwrite\n behf = op.join(out_dir, 'behf-test.tsv')\n _to_tsv(behf, beh)\n with pytest.raises(ValueError, match='directory already exists'):\n pd_parser.parse_pd(raw3, beh=behf)\n pd_parser.parse_pd(raw3, beh=None, pd_ch_names=['pd'], overwrite=True)\n annot, pd_ch_names, beh = _load_data(raw3)\n raw3.set_annotations(annot)\n events2, _ = mne.events_from_annotations(raw3)\n assert all([event in events2[:, 0] for event in events[:, 0]])\n assert pd_ch_names == ['pd']\n assert beh is None\n # test overwrite\n raw = _read_raw(op.join(out_dir, 'test-raw.fif'))\n with pytest.raises(ValueError, match='data directory already exists'):\n _check_overwrite(raw, add_events=False, overwrite=False)", "def test_import_type_dense():\n x = np.random.rand(7, 11)\n export_data('/tmp/test.dense', x)\n assert x.dtype == import_data('/tmp/test.dense').dtype", "def test_read(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dt = np.dtype('(3,)f8')\n dset = f.create_dataset('x', (10,), dtype=dt)\n # TODO implement this\n # assert dset.shape == (10,)\n # assert dset.dtype == dt\n\n # Full read\n out = dset[...]\n assert out.dtype == np.dtype('f8')\n assert out.shape == (10, 3)\n\n # Single element\n out = dset[0]\n assert out.dtype == np.dtype('f8')\n assert out.shape == (3,)\n\n # Range\n out = dset[2:8:2]\n assert out.dtype == np.dtype('f8')\n assert out.shape == (3, 3)", "def test_get_type():\n formatter = TabularOutputFormatter()\n\n tests = (\n (1, int),\n (2.0, float),\n (b\"binary\", binary_type),\n (\"text\", text_type),\n (None, type(None)),\n ((), text_type),\n )\n\n for value, data_type in tests:\n assert data_type is formatter._get_type(value)", "def test_snmpset_non_existant_type():\n with pytest.raises(SNMPWriteError) as excinfo:\n snmpset(ipaddress=SNMP_SRV_ADDR, community='public',\n oid='SNMPv2-MIB::sysName.0', value_type='z',\n value='Test Description', port=SNMP_SRV_PORT)\n assert str(excinfo.value) == 'The type value you specified does not ' \\\n 'match one of the accepted type codes.\\n' \\\n 'Valid type codes are one of ' \\\n '(i|u|t|a|o|s|x|d|b)'", "def test_import_type_densetxt():\n x = np.random.rand(7, 11)\n export_data('/tmp/test.densetxt', x)\n assert x.dtype == import_data('/tmp/test.densetxt').dtype", "def test_type_errors():\n\n\ttry:\n\t\ttransmissions = compute_transmissions(cal_directory, lines = 3.0)\n\texcept TypeError:\n\t\ttry:\n\t\t\ttransmissions = compute_transmissions(cal_directory, calibrator = 300.0)\n\t\texcept TypeError:\n\t\t\tassert True\n\t\telse:\n\t\t\tassert False\n\telse:\n\t\tassert False", "def test_default_read():\n # If new data formats are added to preprocess, they need to be tested\n tested_data_formats = [\"ASCII\", \"SU\", \"SAC\"]\n\n preprocess = Default()\n assert(set(tested_data_formats) ==\n set(preprocess._obs_acceptable_data_formats))\n\n st1 = preprocess.read(os.path.join(TEST_DATA, \"AA.S0001.BXY.semd\"),\n data_format=\"ascii\")\n\n st2 = preprocess.read(os.path.join(TEST_DATA, \"Uy_file_single_d.su\"),\n data_format=\"su\")\n\n st3 = preprocess.read(os.path.join(TEST_DATA, \"AA.S0001.BXY.sac\"),\n data_format=\"sac\")\n\n assert(st1[0].stats.npts == st2[0].stats.npts)\n assert(st3[0].stats.npts == st2[0].stats.npts)", "def test_data_types(sdc_builder, sdc_executor, database, sql_type, insert_fragment, expected_type, expected_value, keep_data):\n table_name = get_random_string(string.ascii_lowercase, 20)\n connection = database.engine.connect()\n try:\n # Create table\n connection.execute(f\"\"\"\n CREATE TABLE {table_name}(\n id int primary key,\n data_column {sql_type} NULL\n )\n \"\"\")\n\n # And insert a row with actual value\n connection.execute(f\"INSERT INTO {table_name} VALUES(1, {insert_fragment})\")\n # And a null\n connection.execute(f\"INSERT INTO {table_name} VALUES(2, NULL)\")\n\n builder = sdc_builder.get_pipeline_builder()\n\n origin = builder.add_stage('MySQL Query Consumer')\n origin.sql_query = 'SELECT * FROM {0}'.format(table_name)\n origin.incremental_mode = False\n origin.on_unknown_type = 'CONVERT_TO_STRING'\n\n wiretap = builder.add_wiretap()\n\n origin >> wiretap.destination\n\n pipeline = builder.build().configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n sdc_executor.start_pipeline(pipeline)\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 2)\n sdc_executor.stop_pipeline(pipeline)\n\n assert len(wiretap.output_records) == 2\n record = wiretap.output_records[0]\n null_record = wiretap.output_records[1]\n\n # Since we are controlling types, we want to check explicit values inside the record rather the the python\n # wrappers.\n # TLKT-177: Add ability for field to return raw value\n\n assert record.field['data_column'].type == expected_type\n assert null_record.field['data_column'].type == expected_type\n\n assert record.field['data_column']._data['value'] == expected_value\n assert null_record.field['data_column'] == None\n finally:\n if not keep_data:\n logger.info('Dropping table %s in %s database ...', table_name, database.type)\n connection.execute(f\"DROP TABLE {table_name}\")", "def test_raw_data(self):\n self.assertEqual(self.tester.raw_data, 1)", "def test_domain_and_target_type(self):\n t = OneHotEncode(3)\n assert t.domain_type == \"integer\"\n assert t.target_type == \"real\"", "def test_cast(self):\n dim = Fidelity(\"epoch\", 1, 10)\n with pytest.raises(NotImplementedError):\n dim.cast()", "def test_types(self):\n \n self.assertIsInstance(self.c, int)\n self.assertIsInstance(self.dX, int)\n self.assertIsInstance(self.dY, int)\n self.assertIsInstance(self.dXg, int)\n self.assertIsInstance(self.dYg, int)\n self.assertIsInstance(self.dXqg, int)\n self.assertIsInstance(self.dYqg, int)\n self.assertIsInstance(self.Xr, int)\n self.assertIsInstance(self.Yr, int)\n self.assertIsInstance(self.dXq, int)\n self.assertIsInstance(self.dYq, int)\n self.assertIsInstance(self.outx_cal, int)\n self.assertIsInstance(self.outy_cal, int)\n self.assertIsInstance(self.dXql, int)\n self.assertIsInstance(self.dYql, int)\n self.assertIsInstance(self.dWx, int)\n self.assertIsInstance(self.dWy, int)\n self.assertIsInstance(self.Wf1, int)\n self.assertIsInstance(self.W, int)\n self.assertIsInstance(self.Wrms, int)\n self.assertIsInstance(self.delta, int)\n self.assertIsInstance(self.yb, int)\n self.assertIsInstance(self.x_edge, int)\n self.assertIsInstance(self.z_basis, int)\n self.assertIsInstance(self.coeff, int)\n self.assertIsInstance(self.nz, int)\n self.assertIsInstance(self.mz, int)\n self.assertIsInstance(self.nn, int)\n self.assertIsInstance(self.a, int)\n self.assertIsInstance(self.b, int)\n self.assertIsInstance(self.a1, int)\n self.assertIsInstance(self.b1, int)\n self.assertIsInstance(self.theta, int)\n self.assertIsInstance(self.jx, int)\n self.assertIsInstance(self.jy, int)\n self.assertIsInstance(self.ma, int)\n self.assertIsInstance(self.xx, int)\n self.assertIsInstance(self.outx_l, int)\n \n pass", "def test_encode_data(self):\n if self._cls == 'MetaschemaType':\n for x in self._valid_decoded:\n self.assert_raises(NotImplementedError, self.import_cls.encode_type, x)\n self.assert_raises(NotImplementedError, self.import_cls.encode_data,\n x, self.typedef)\n self.assert_raises(NotImplementedError, self.import_cls.decode_data, None,\n self.typedef)\n else:\n for x in self._valid_decoded:\n y = self.import_cls.encode_type(x, **self._encode_type_kwargs)\n z = self.import_cls.encode_data(x, y, **self._encode_data_kwargs)\n self.import_cls.encode_data_readable(x, None)\n self.import_cls.encode_data_readable(x, y)\n x2 = self.import_cls.decode_data(z, y)\n self.assert_result_equal(x2, x)\n if self._cls not in ['JSONNullMetaschemaType', 'AnyMetaschemaType']:\n self.assert_raises(MetaschemaTypeError,\n self.import_cls.encode_type, None)", "def test_casting_without_iterable(test_fixture, test_input, expected):\n test_fixture.cast_prop = test_input\n assert test_input == test_fixture.cast_prop == expected\n assert type(test_fixture.cast_prop) == type(expected)", "def test_types(self):\n assert types.typeClass(\"str\") == str\n\n assert types.isBuiltinType(\"str\")\n\n assert types.isCollectionType(\"map\")\n assert types.isCollectionType(\"seq\")\n assert not types.isCollectionType(\"str\")\n\n assert types.isScalarType(\"str\")\n assert not types.isScalarType(\"seq\")\n assert not types.isScalarType(\"map\")\n\n assert types.isCollection([])\n assert types.isCollection({})\n assert not types.isCollection(\"foo\")\n\n assert types.isScalar(\"\")\n assert types.isScalar(True)\n assert not types.isScalar([])\n\n assert types.isCorrectType(\"\", str)\n assert types.isCorrectType({}, dict)\n\n assert types.isString(\"foo\")\n assert not types.isString([])\n\n assert types.isInt(1)\n assert not types.isInt(\"foo\")\n\n assert types.isBool(True)\n assert not types.isBool(1)\n assert not types.isBool(\"true\")\n\n assert types.isFloat(1.0)\n assert not types.isFloat(\"foo\")\n\n assert types.isNumber(1)\n assert types.isNumber(1.0)\n assert not types.isNumber(\"foo\")\n\n assert types.isText(\"foo\")\n assert types.isText(1)\n assert types.isText(1.0)\n assert not types.isText([])\n assert not types.isText(True)\n\n assert types.isAny(\"foo\")\n assert types.isAny(True)\n assert types.isAny(1)\n assert types.isAny(1.0)\n assert types.isAny({})\n assert types.isAny([])\n\n assert types.isEnum(\"foo\")\n assert not types.isEnum(1)\n\n assert types.isNone(None)\n assert not types.isNone(\"foo\")", "def test_import_dense_type_mat():\n x = np.random.rand(3, 2)\n export_data('/tmp/test.mat', x)\n assert x.dtype == import_data('/tmp/test.mat').dtype", "def test_dtype_equality(self):\r\n dtypes = get_numeric_types(with_complex=True)\r\n # Perform all pairwise comparisons of dtypes, making sure comparing\r\n # their string representation yields the same result.\r\n for dtype1_idx, dtype1 in enumerate(dtypes):\r\n for dtype2 in dtypes[dtype1_idx + 1:]:\r\n assert (dtype1 == dtype2) == (str(dtype1) == str(dtype2))", "def test_default_write(tmpdir):\n # If new data formats supported by SPECFEM are added to preprocess,\n # they need to be tested\n tested_data_formats = [\"ASCII\", \"SU\"]\n\n preprocess = Default()\n assert(set(tested_data_formats) ==\n set(preprocess._syn_acceptable_data_formats))\n\n st1 = preprocess.read(os.path.join(TEST_DATA, \"AA.S0001.BXY.semd\"),\n data_format=\"ascii\")\n\n preprocess.syn_data_format = \"ASCII\"\n preprocess.write(st1, fid=os.path.join(tmpdir, \"test_stream_ascii\"))\n\n preprocess.syn_data_format = \"SU\"\n preprocess.write(st1, fid=os.path.join(tmpdir, \"test_stream_su\"))", "def test_match_valid_data_val(self):\n f = lws.valid_data_val\n schema_val = ('some text', str, 'text')\n assert f(schema_val, 'text') is True\n schema_val = ('some number', float, 7.00)\n assert f(schema_val, 7) is False\n assert f(schema_val, 7.00) is True\n schema_val = ('True', bool, True)\n assert f(schema_val, True) is True\n assert f(schema_val, False) is False\n schema_val = ('even', int, lambda x: x % 2 == 0)\n assert f(schema_val, 2) is True\n assert f(schema_val, 257) is False", "def test_read_write(dset):\n file_name = \"test.hdf5\"\n dset.write(file_name)\n\n dset_new = dataset.Dataset.read(file_name)\n\n for field_name, field in dset._fields.items():\n print(f\"Testing {field_name}\")\n try:\n if field.data.dtype.type is np.str_:\n assert np.char.equal(field.data, dset_new._fields[field_name].data).all()\n else:\n assert np.equal(field.data, dset_new._fields[field_name].data).all()\n except AttributeError:\n for group_field_name, group_field in field.data._fields.items():\n if group_field.data.dtype.type is np.str_:\n assert np.char.equal(\n group_field.data, dset_new._fields[field_name].data._fields[group_field_name].data\n ).all()\n else:\n assert np.equal(\n group_field.data, dset_new._fields[field_name].data._fields[group_field_name].data\n ).all()\n\n os.remove(file_name)", "def test_df_all_types():\n return pd.DataFrame({\n 'intcol': [1, 2],\n 'strcol': ['three', 'four'],\n 'floatcol': [5.0, 6.0],\n 'boolcol': [True, False],\n 'datetimecol': [\n np.datetime64('2020-01-01'), np.datetime64('2020-01-02')],\n })", "def test_types(self):\n \n self.assertIsInstance(self.detector_type, str)\n self.assertIsInstance(self.psd, dict)\n self.assertIsInstance(self.intensity, dict)\n self.assertIsInstance(self.database, str)\n self.assertIsInstance(self.position, list)\n self.assertIsInstance(self.angle, list)\n self.assertIsInstance(self.linearity_curve, dict)\n self.assertIsInstance(self.FOV, float)\n \n pass", "def test_pandas_dtypes():\n assert pd.DataFrame([1, 2]).dtypes.values[0] == np.dtype('int64') == np.int64\n assert pd.DataFrame([1, 2, None]).dtypes.values[0] == np.dtype('float64') == np.float64\n\n assert pd.DataFrame([1.0, 2.0]).dtypes.values[0] == np.dtype('float64') == np.float64\n assert pd.DataFrame([1.0, 2.0, None]).dtypes.values[0] == np.dtype('float64') == np.float64\n\n assert pd.DataFrame([True, False]).dtypes.values[0] == np.dtype('bool') == np.bool\n assert pd.DataFrame([True, False, None]).dtypes.values[0] == np.dtype('object') == np.object\n\n assert pd.DataFrame([\"A\", \"B\"]).dtypes.values[0] == np.dtype('object') == np.object\n assert pd.DataFrame([\"A\", \"B\", None]).dtypes.values[0] == np.dtype('object') == np.object", "def test_simple(self):\n dt = np.dtype([('a','i'), ('b','f'),('c','f8')])\n htype = h5t.py_create(dt)\n self.assertEqual(htype.dtype, dt)", "def checktypestest(chosen_df):\n for i in chosen_df:\n if not chosen_df.dtypes[1] == chosen_df.dtypes[i]:\n raise ValueError('Types do not match')", "def _type_check_double(self, data):\n if type(data) not in self._VALID_TYPES:\n return False\n return True", "def test_compound(self):\n dt = np.dtype([('a','i4'),('b','f8')])\n v = np.ones((4,), dtype=dt)\n dset = self.f.create_dataset('foo', (4,), data=v)\n self.assertEqual(dset[0], v[0])\n self.assertIsInstance(dset[0], np.void)", "def test_read(self):\n dt = np.dtype('(3,)f8')\n dset = self.f.create_dataset('x',(10,),dtype=dt)\n self.assertEqual(dset.shape, (10,))\n self.assertEqual(dset.dtype, dt)\n\n # Full read\n out = dset[...]\n self.assertEqual(out.dtype, np.dtype('f8'))\n self.assertEqual(out.shape, (10,3))\n\n # Single element\n out = dset[0]\n self.assertEqual(out.dtype, np.dtype('f8'))\n self.assertEqual(out.shape, (3,))\n\n # Range\n out = dset[2:8:2]\n self.assertEqual(out.dtype, np.dtype('f8'))\n self.assertEqual(out.shape, (3,3))", "def test_upcast(self):\r\n if config.cast_policy == 'custom':\r\n assert arange(iscalar()).dtype == iscalar().dtype\r\n assert arange(fscalar()).dtype == fscalar().dtype\r\n assert arange(dscalar()).dtype == dscalar().dtype\r\n\r\n # int32 + float32 -> float64\r\n assert arange(iscalar(), fscalar()).dtype == dscalar().dtype\r\n assert arange(iscalar(), dscalar()).dtype == dscalar().dtype\r\n assert arange(fscalar(), dscalar()).dtype == dscalar().dtype\r\n\r\n assert arange(iscalar(), fscalar(), dscalar()).dtype == \\\r\n dscalar().dtype\r\n elif config.cast_policy in ('numpy', 'numpy+floatX'):\r\n for dtype in get_numeric_types():\r\n # Test with a single argument.\r\n arange_dtype = arange(scalar(dtype=str(dtype))).dtype\r\n numpy_dtype = numpy.arange(numpy.array(1, dtype=dtype)).dtype\r\n if (dtype != 'float64' and\r\n numpy_dtype == 'float64' and\r\n config.cast_policy == 'numpy+floatX' and\r\n config.floatX == 'float32'):\r\n # We want a float32 arange.\r\n assert arange_dtype == 'float32'\r\n else:\r\n # Follow numpy.\r\n assert arange_dtype == numpy_dtype\r\n\r\n # Test with two arguments.\r\n for stop_dtype in get_numeric_types():\r\n arange_dtype = arange(\r\n start=scalar(dtype=str(dtype)),\r\n stop=scalar(dtype=str(stop_dtype))).dtype\r\n numpy_dtype = numpy.arange(\r\n start=numpy.array(0, dtype=dtype),\r\n stop=numpy.array(1, dtype=stop_dtype)).dtype\r\n if (dtype != 'float64' and\r\n stop_dtype != 'float64' and\r\n numpy_dtype == 'float64' and\r\n config.cast_policy == 'numpy+floatX' and\r\n config.floatX == 'float32'):\r\n # We want a float32 arange.\r\n assert arange_dtype == 'float32'\r\n else:\r\n # Follow numpy.\r\n assert arange_dtype == numpy_dtype\r\n\r\n # Test with three arguments.\r\n for step_dtype in get_numeric_types():\r\n arange_dtype = arange(\r\n start=scalar(dtype=str(dtype)),\r\n stop=scalar(dtype=str(stop_dtype)),\r\n step=scalar(dtype=str(step_dtype))).dtype\r\n numpy_dtype = numpy.arange(\r\n start=numpy.array(0, dtype=dtype),\r\n stop=numpy.array(1, dtype=stop_dtype),\r\n step=numpy.array(1, dtype=step_dtype)).dtype\r\n if (dtype != 'float64' and\r\n stop_dtype != 'float64' and\r\n step_dtype != 'float64' and\r\n numpy_dtype == 'float64' and\r\n config.cast_policy == 'numpy+floatX' and\r\n config.floatX == 'float32'):\r\n # We want a float32 arange.\r\n assert arange_dtype == 'float32'\r\n else:\r\n # Follow numpy.\r\n assert arange_dtype == numpy_dtype\r\n else:\r\n raise NotImplementedError(config.cast_policy)", "def test_read(self):\n self.assertArrayEqual(self.dset['a'], self.data['a'])", "def test_union_simple():\n valid_test_cases = [\n {\"one\": 1},\n {\"one\": \"1\"},\n ]\n\n invalid_test_cases = [\n {\"one\": 3.1415},\n {\"one\": None},\n {\"one\": BasicUnionClass()},\n ]\n\n for test_case in valid_test_cases:\n instance = deserialize.deserialize(BasicUnionClass, test_case)\n assert test_case[\"one\"] == instance.one\n\n for test_case in invalid_test_cases:\n with pytest.raises(deserialize.DeserializeException):\n _ = deserialize.deserialize(BasicUnionClass, test_case)", "def test_basic_types(self):\n\t\tyield self.check_setget(\"a_string\", \"some random string\")\n\t\tyield self.check_setget(\"an_integer\", 42)\n\t\tyield self.check_setget(\"a_long\", long(1<<30))\n\t\tyield self.check_setget(\"a_dict\", { \"foo\" : \"bar\", \"baz\" : \"quux\" })", "def test_data_types_raises_error_if_all_type_in_in_are_not_present(self, iris_binary):\n condition = Conditions.IN\n value = Values.data_values()\n\n yaml_str = input_requirements_yaml(Fields.DATA_TYPES, condition, value)\n schema_dict = self.yaml_str_to_schema_dict(yaml_str)\n validator = SchemaValidator(schema_dict)\n\n with pytest.raises(DrumSchemaValidationException):\n validator.validate_inputs(iris_binary)", "def test_conversion_dispatch(self):\n import System\n from IronPythonTest import Cmplx, Cmplx2, ConversionDispatch, FieldTest, MixedDispatch\n cd = ConversionDispatch()\n\n ###########################################\n # checker functions - verify the result of the test\n\n def Check(res, orig):\n if hasattr(res, \"__len__\"):\n self.assertEqual(len(res), len(orig))\n i = 0\n for a in res:\n self.assertEqual(a, orig[i])\n i = i+1\n self.assertEqual(i, len(orig))\n\n def len_helper(o):\n if hasattr(o, 'Count'): return o.Count\n return len(o)\n\n def clear_helper(o):\n if hasattr(o, 'Clear'):\n o.Clear()\n else:\n del o[:]\n\n def CheckModify(res, orig):\n Check(res, orig)\n\n index = len_helper(res)\n res.Add(orig[0])\n Check(res, orig)\n\n res.RemoveAt(index)\n Check(res, orig)\n\n x = res[0]\n res.Remove(orig[0])\n Check(res, orig)\n\n res.Insert(0, x)\n Check(res, orig)\n\n if hasattr(res, \"sort\"):\n try:\n res.sort()\n except TypeError: # unorderable types\n pass\n else:\n Check(res, orig)\n\n clear_helper(res)\n Check(res, orig)\n\n def keys_helper(o):\n if hasattr(o, 'keys'): return list(o.keys())\n\n return o.Keys\n\n def CheckDict(res, orig):\n if hasattr(res, \"__len__\"):\n self.assertEqual(len(res), len(orig))\n i = 0\n\n for a in keys_helper(res):\n self.assertEqual(res[a], orig[a])\n i = i+1\n self.assertEqual(i, len(orig))\n\n\n ###################################\n # test data sets used for all the checks\n\n\n # list/tuple data\n inttuple = (2,3,4,5)\n strtuple = ('a', 'b', 'c', 'd')\n othertuple = (['a', 2], ['c', 'd', 3], 5)\n\n intlist = [2,3,4,5]\n strlist = ['a', 'b', 'c', 'd']\n otherlist = [('a', 2), ('c', 'd', 3), 5]\n\n intdict = {2:5, 7:8, 9:10}\n strdict = {'abc': 'def', 'xyz':'abc', 'mno':'prq'}\n objdict = { (2,3) : (4,5), (1,2):(3,4), (8,9):(1,4)}\n mixeddict = {'abc': 2, 'def': 9, 'qrs': 8}\n\n objFunctions = [cd.Array,cd.ObjIList, cd.Enumerable]\n objData = [inttuple, strtuple, othertuple]\n\n intFunctions = [cd.IntEnumerable, cd.IntIList]\n intData = [inttuple, intlist]\n\n intTupleFunctions = [cd.IntArray]\n intTupleData = [inttuple]\n\n strFunctions = [cd.StringEnumerable, cd.StringIList]\n strData = [strtuple, strlist]\n\n strTupleFunctions = [cd.StringArray]\n strTupleData = [strtuple]\n\n # dictionary data\n\n objDictFunctions = [cd.DictTest]\n objDictData = [intdict, strdict, objdict, mixeddict]\n\n intDictFunctions = [cd.IntDictTest]\n intDictData = [intdict]\n\n strDictFunctions = [cd.StringDictTest]\n strDictData = [strdict]\n\n mixedDictFunctions = [cd.MixedDictTest]\n mixedDictData = [mixeddict]\n\n modCases = [ (cd.ObjIList, (intlist, strlist, otherlist)),\n ( cd.IntIList, (intlist,) ),\n ( cd.StringIList, (strlist,) ),\n ]\n\n testCases = [ [objFunctions, objData],\n [intFunctions, intData],\n [strFunctions, strData],\n [intTupleFunctions, intTupleData],\n [strTupleFunctions, strTupleData] ]\n\n dictTestCases = ( (objDictFunctions, objDictData ),\n (intDictFunctions, intDictData ),\n (strDictFunctions, strDictData),\n (mixedDictFunctions, mixedDictData) )\n\n ############################################3\n # run the test cases:\n\n # verify all conversions succeed properly\n\n for cases in testCases:\n for func in cases[0]:\n for data in cases[1]:\n Check(func(data), data)\n\n\n # verify that modifications show up as appropriate.\n\n for case in modCases:\n for data in case[1]:\n newData = list(data)\n CheckModify(case[0](newData), newData)\n\n\n # verify dictionary test cases\n\n for case in dictTestCases:\n for data in case[1]:\n for func in case[0]:\n newData = dict(data)\n CheckDict(func(newData), newData)\n\n\n x = FieldTest()\n y = System.Collections.Generic.List[System.Type]()\n x.Field = y\n\n # verify we can bind w/ add & radd\n self.assertEqual(x.Field, y)\n\n a = Cmplx(2, 3)\n b = Cmplx2(3, 4)\n\n x = a + b\n y = b + a\n\n\n #############################################################\n # Verify combinaions of instance / no instance\n\n a = MixedDispatch(\"one\")\n b = MixedDispatch(\"two\")\n c = MixedDispatch(\"three\")\n d = MixedDispatch(\"four\")\n\n x= a.Combine(b)\n y = MixedDispatch.Combine(a,b)\n\n self.assertEqual(x.called, \"instance\")\n self.assertEqual(y.called, \"static\")\n\n x= a.Combine2(b)\n y = MixedDispatch.Combine2(a,b)\n z = MixedDispatch.Combine2(a,b,c,d)\n v = a.Combine2(b,c,d)\n\n self.assertEqual(x.called, \"instance\")\n self.assertEqual(y.called, \"static\")\n self.assertEqual(z.called, \"instance_three\")\n self.assertEqual(v.called, \"instance_three\")\n\n\n ###########################################################\n # verify non-instance built-in's don't get bound\n\n class C:\n mymax = max\n\n a = C()\n self.assertEqual(a.mymax(0,0), 0)", "def test_canConvert(string, cast, expected):\n assert canConvert(string, cast) == expected", "def test_type_conversion(registry: AdapterLoader) -> None:\n registry.add(\"dummy\", FakeAdapterWithDateTime)\n\n connection = connect(\":memory:\", [\"dummy\"], isolation_level=\"IMMEDIATE\")\n cursor = connection.cursor()\n\n cursor.execute('SELECT * FROM \"dummy://\"')\n assert cursor.fetchall() == []\n\n cursor.execute(\n 'INSERT INTO \"dummy://\" (birthday) VALUES (?)',\n (datetime(2021, 1, 1, 0, 0),),\n )\n cursor.execute('SELECT * FROM \"dummy://\"')\n assert cursor.fetchall() == [\n (\n None,\n datetime(2021, 1, 1, 0, 0),\n None,\n None,\n ),\n ]\n\n # make sure datetime is stored as a datetime\n assert FakeAdapterWithDateTime.data == [\n {\n \"age\": None,\n \"birthday\": datetime(2021, 1, 1, 0, 0),\n \"name\": None,\n \"pets\": None,\n \"rowid\": 1,\n },\n ]\n assert isinstance(FakeAdapterWithDateTime.data[0][\"birthday\"], datetime)\n\n cursor.execute(\n 'SELECT * FROM \"dummy://\" WHERE birthday > ?',\n (datetime(2020, 12, 31, 0, 0),),\n )\n assert cursor.fetchall() == [\n (None, datetime(2021, 1, 1, 0, 0), None, None),\n ]", "def test_types(self):\n field_types = (\n ('clip_id', int), ('created_at', datetime.datetime),\n ('description', str), ('filename', str),\n ('format', smscsv.MediaFormat), ('media_id', int), ('title', str)\n )\n for item in self.items:\n for name, type_ in field_types:\n self.assertIsInstance(getattr(item, name), type_)", "def test_basic_numpy_dtypes():\n assert np.int != np.int8\n assert np.int != np.int16\n assert np.int != np.int32\n assert np.int != np.int64\n\n assert np.int == int\n assert np.int8 != int\n assert np.int16 != int\n assert np.int32 != int\n assert np.int64 != int\n\n assert np.dtype(np.int) == np.dtype('int') == np.dtype(int)\n assert np.dtype(np.int8) == np.dtype('int8') == np.int8\n assert np.dtype(np.int16) == np.dtype('int16') == np.int16\n assert np.dtype(np.int32) == np.dtype('int32') == np.int32\n assert np.dtype(np.int64) == np.dtype('int64') == np.int64", "def test_read_raw_unsupported_multi(fname, tmp_path):\n fname = tmp_path / fname\n fname.write_text('')\n with pytest.raises(RuntimeError, match='Could not read.*using any'):\n read_raw(fname)", "def parse_type(fobj, data_type):\n if data_type == \"Boolean\": ## False if 0x00 else True\n return bool(fobj.read(1))\n elif data_type == \"Byte\": ## 1 byte int\n return fobj.read(1)[0]\n elif data_type == \"DateTime\": ## 8 bytes signed int\n return struct.unpack(\"<q\", fobj.read(8))[0]\n elif data_type == \"Double\": ## 8 bytes floating point\n return struct.unpack(\"<d\", fobj.read(8))[0]\n elif data_type == \"Int\": ## 4 bytes unsigned int\n return struct.unpack(\"<I\", fobj.read(4))[0]\n elif data_type == \"Int-Double pair\": ## 0x08-Int-0x0d-Double with AssertionError\n bb = fobj.read(1)[0]\n if bb != 0x08:\n raise AssertionError('parse_type(fobj, data_type): '\n '1st byte(%s) of \"Int-Double pair\" != 0x08' % bb)\n first_int = parse_type(fobj, \"Int\")\n bb = fobj.read(1)[0]\n if bb != 0x0d:\n raise AssertionError('parse_type(fobj, data_type): '\n '6th byte(%s) of \"Int-Double pair\" != 0x0d' % bb)\n return [first_int, parse_type(fobj, \"Double\")]\n elif data_type == \"Int-Double pair*\": ## int(n) - \"Int-Double pair\"*n\n return [parse_type(fobj, \"Int-Double pair\") for i in range(parse_type(fobj, \"Int\"))]\n elif data_type == \"Long\": ## 8 bytes unsigned int\n return struct.unpack(\"<Q\", fobj.read(8))[0]\n elif data_type == \"Short\": ## 2 bytes unsigned int\n return struct.unpack(\"<H\", fobj.read(2))[0]\n elif data_type == \"Single\": ## 4 bytes floating point\n return struct.unpack(\"<f\", fobj.read(4))[0]\n elif data_type == \"String\": ## 0x00 or 0x0b - ULE128(n) - UTF-8(length=n)\n bb = fobj.read(1)[0]\n if bb == 0x00:\n return None\n elif bb != 0x0b:\n ## TODO: show integers in assertion error in hexadecimal and decimal\n ## to make debug more convenient (cause I may inspect the file in a byte reader.\n raise AssertionError('parse_type(fobj, data_type): '\n '1st byte(%s) of \"String\" not in {0x00, 0x0b}' % bb)\n strlen = parse_type(fobj, \"ULEB128\")\n return fobj.read(strlen).decode(\"utf-8\")\n elif data_type == \"ULEB128\": ## https://en.wikipedia.org/wiki/LEB128#Decode_unsigned_integer\n i = 0 ## derived from the wiki psuedo code\n res = 0\n shift = 0\n while True:\n bb = fobj.read(1)[0]\n i += 1\n res |= ((bb & 0b1111111) << shift)\n if (bb & 0b10000000) == 0:\n break\n shift += 7\n return res\n elif data_type == \"Timing point\": ## Double - Double - Boolean\n return parse_types(fobj, [\"Double\", \"Double\", \"Boolean\"])\n elif data_type == \"Timing point+\": ## int(n) - \"Timing point\"*n\n return [parse_type(fobj, \"Timing point\") for i in range(parse_type(fobj, \"Int\"))]\n else:\n raise NotImplementedError('parse_type(fobj, data_type): Unknown data type: \"%s\".' % data_type)", "def test_data_type(self):\n self.assertRaises(TypeError, Square, 'hello', 3, 2)\n self.assertRaises(TypeError, Square, 3, True, 2)\n self.assertRaises(TypeError, Square, 3, 2, 3.45)", "def test_fortran_reader_notbasic():\n\n tabstr = dedent(\n \"\"\"\n a b\n 1 1.23D4\n 2 5.67D-8\n \"\"\"\n )[1:-1]\n\n t1 = ascii.read(tabstr.split(\"\\n\"), fast_reader={\"exponent_style\": \"D\"})\n\n assert t1[\"b\"].dtype.kind == \"f\"\n\n tabrdb = dedent(\n \"\"\"\n a\\tb\n # A simple RDB table\n N\\tN\n 1\\t 1.23D4\n 2\\t 5.67-008\n \"\"\"\n )[1:-1]\n\n t2 = ascii.read(\n tabrdb.split(\"\\n\"), format=\"rdb\", fast_reader={\"exponent_style\": \"fortran\"}\n )\n\n assert t2[\"b\"].dtype.kind == \"f\"\n\n tabrst = dedent(\n \"\"\"\n = =======\n a b\n = =======\n 1 1.23E4\n 2 5.67E-8\n = =======\n \"\"\"\n )[1:-1]\n\n t3 = ascii.read(tabrst.split(\"\\n\"), format=\"rst\")\n\n assert t3[\"b\"].dtype.kind == \"f\"\n\n t4 = ascii.read(tabrst.split(\"\\n\"), guess=True)\n\n assert t4[\"b\"].dtype.kind == \"f\"\n\n # In the special case of fast_converter=True (the default),\n # incompatibility is ignored\n t5 = ascii.read(tabrst.split(\"\\n\"), format=\"rst\", fast_reader=True)\n\n assert t5[\"b\"].dtype.kind == \"f\"\n\n with pytest.raises(ParameterError):\n ascii.read(tabrst.split(\"\\n\"), format=\"rst\", guess=False, fast_reader=\"force\")\n\n with pytest.raises(ParameterError):\n ascii.read(\n tabrst.split(\"\\n\"),\n format=\"rst\",\n guess=False,\n fast_reader={\"use_fast_converter\": False},\n )\n\n tabrst = tabrst.replace(\"E\", \"D\")\n\n with pytest.raises(ParameterError):\n ascii.read(\n tabrst.split(\"\\n\"),\n format=\"rst\",\n guess=False,\n fast_reader={\"exponent_style\": \"D\"},\n )", "def test_incorrect_data_type_plate():\n \n test_object = fa.read_in_envision(data_csv=list_A, platemap_csv=plate_map_file, data_type='plate', size=384)", "def test_user_type_simple_attributes_with_roundtrip():\n Person = Map.from_file(\"definitions/Person.buf\")\n me = Person(name=\"Bede Kelly\", age=20)\n bytestream = me.to_bytes()\n new_me = Person.read(bytestream)\n assert \"Bede Kelly\" == new_me.name\n assert 20 == new_me.age", "def testTypeFancy(self):\n prop = make_prop(kind=config.List(int))\n for value in (1, 'hi', [3, 'test']):\n with self.assertRaises(TypeError):\n prop.interpret(value, {})\n\n self.assertEqual([2, 3], prop.interpret([2, 3], {}))", "def test_boolean_custom_values(self):\n true_values = ['YES', 'yes', 'Yes']\n false_values = ['NO', 'no', 'No']\n wrong_values = ['true', 'false', 'True', 'False', 'y', 'n', 'Y', 'N', 't', '1', 1, '0', 0]\n descriptor = self.base_field_descriptor\n descriptor['type'] = 'boolean'\n # only 'default' format\n descriptor['format'] = 'default'\n descriptor['trueValues'] = true_values\n descriptor['falseValues'] = false_values\n\n f = SchemaField(descriptor)\n for v in true_values:\n self.assertTrue(f.cast(v))\n for v in false_values:\n self.assertFalse(f.cast(v))\n for v in wrong_values:\n with self.assertRaises(Exception):\n f.cast(v)", "def test_dummydb_add_data_to_table_wrong_column_type(self):\n db = DummyDB()\n columns = {\n \"one\": int,\n \"two\": str,\n \"three\": bool,\n }\n db.create_table(\"new_table\", columns)\n result = db.select(\"new_table\", two=1)", "def test_read_raw_supported(fname):\n read_raw(fname)\n read_raw(fname, verbose=False)\n raw = read_raw(fname, preload=True)\n assert \"data loaded\" in str(raw)", "def _type_check(data):\n if data.__class__.__name__ != \"Matrix3\":\n return False\n return True", "def data_types(self):", "def testTheType(self, theTestType):\n \n pass", "def test_inspect_semi_structured_datatypes(engine_testaccount):\n table_name = \"test_variant2\"\n metadata = MetaData()\n test_variant = Table(\n table_name,\n metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"va\", VARIANT),\n Column(\"ar\", ARRAY),\n )\n metadata.create_all(engine_testaccount)\n try:\n with engine_testaccount.connect() as conn:\n with conn.begin():\n sql = textwrap.dedent(\n f\"\"\"\n INSERT INTO {table_name}(id, va, ar)\n SELECT 1,\n PARSE_JSON('{{\"vk1\":100, \"vk2\":200, \"vk3\":300}}'),\n PARSE_JSON('[\n {{\"k\":1, \"v\":\"str1\"}},\n {{\"k\":2, \"v\":\"str2\"}},\n {{\"k\":3, \"v\":\"str3\"}}]'\n )\n \"\"\"\n )\n conn.exec_driver_sql(sql)\n inspecter = inspect(engine_testaccount)\n columns = inspecter.get_columns(table_name)\n assert isinstance(columns[1][\"type\"], VARIANT)\n assert isinstance(columns[2][\"type\"], ARRAY)\n\n s = select(test_variant)\n results = conn.execute(s)\n rows = results.fetchone()\n results.close()\n assert rows[0] == 1\n data = json.loads(rows[1])\n assert data[\"vk1\"] == 100\n assert data[\"vk3\"] == 300\n assert data is not None\n data = json.loads(rows[2])\n assert data[1][\"k\"] == 2\n finally:\n test_variant.drop(engine_testaccount)", "def test_esdumper_sa_datatypes(testapp, database):\n db = database\n\n class Model(db.Model, RecordMetadataBase):\n string = db.Column(db.String(255))\n text = db.Column(db.Text)\n biginteger = db.Column(db.BigInteger)\n integer = db.Column(db.Integer)\n boolean = db.Column(db.Boolean(name=\"boolean\"))\n text_variant = db.Column(db.Text().with_variant(mysql.VARCHAR(255), \"mysql\"))\n\n assert SearchDumper._sa_type(Model, \"biginteger\") == int\n assert SearchDumper._sa_type(Model, \"boolean\") == bool\n assert SearchDumper._sa_type(Model, \"created\") == datetime\n assert SearchDumper._sa_type(Model, \"id\") == UUID\n assert SearchDumper._sa_type(Model, \"integer\") == int\n assert SearchDumper._sa_type(Model, \"json\") == dict\n assert SearchDumper._sa_type(Model, \"text_variant\") == str\n assert SearchDumper._sa_type(Model, \"text\") == str\n assert SearchDumper._sa_type(Model, \"updated\") == datetime\n assert SearchDumper._sa_type(Model, \"invalid\") is None", "def test_with_valid_input(self):\n for dataset_type in ['regular', 'raw', 'REGULAR', 'RAW']:\n try:\n check_dataset_type(dataset_type)\n except ValueError:\n self.fail(\"Dataset {0} should be valid\".format(dataset_type))", "def test_stochatreat_output_treat_col_dtype(treatments_dict):\n treatments_df = treatments_dict[\"treatments\"]\n assert treatments_df[\"treat\"].dtype == np.int64, \"Treatment column is missing\"", "def test(type, value):\n if isinstance(value, (list, tuple)):\n values = value\n else:\n values = [value]\n stream = StringIO()\n codec = Codec(stream, SPEC)\n for v in values:\n codec.encode(type, v)\n codec.flush()\n enc = stream.getvalue()\n stream.reset()\n dup = []\n for i in range(len(values)):\n dup.append(codec.decode(type))\n if values != dup:\n raise AssertionError(\"%r --> %r --> %r\" % (values, enc, dup))", "def test_read_raw_unsupported_single(fname):\n with pytest.raises(ValueError, match='Unsupported file type'):\n read_raw(fname)", "def test_casting_with_iterable(test_fixture, test_input, expected):\n test_fixture.cast_prop = test_input\n assert test_fixture.cast_prop == expected\n assert type(test_fixture.cast_prop) is tuple\n for val in test_fixture.cast_prop:\n assert type(val) is float", "def test_fileobj(self, ext, dtype):\n sample_rate = 16000\n num_frames = 3 * sample_rate\n num_channels = 2\n with self.assertRaisesRegex(ValueError, \"SoX backend does not support reading\"):\n self._query_fileobj(ext, dtype, sample_rate, num_channels, num_frames)", "def test_match_stype():\n if backwards.PY2: # pragma: Python 2\n slist = ['hello', bytearray('hello'), unicode('hello')]\n else: # pragma: Python 3\n slist = ['hello', b'hello', bytearray('hello', 'utf-8')]\n for s1 in slist:\n for s2 in slist:\n nt.assert_equal(backwards.match_stype(s1, s2), s1)\n nt.assert_raises(TypeError, backwards.match_stype, 1, 'hello')", "def test_multiple_types() -> None:\n soup = generate_case(\"multiple_types\")\n\n tests.html_schema_doc_asserts.assert_types(\n soup, [\"object\", \"string\", \"string or null\", \"integer or number\", \"integer, string, number or null\"]\n )", "def test_input_type_conv(self):\n # Should work with strings instead of dates\n birth_date_str = \"1 January 2000\"\n birth_date = datetime(2000, 1, 1)\n person = Person(\n self.initial_year, self.name, birth_date_str,\n retirement_date=self.retirement_date)\n self.assertEqual(person.birth_date, birth_date)\n self.assertIsInstance(person.birth_date, datetime)\n\n # Should work with non-str/non-datetime values as well\n birth_date = 2000\n retirement_date = birth_date + 65\n person = Person(\n self.initial_year, self.name, birth_date,\n retirement_date=retirement_date)\n self.assertEqual(person.birth_date.year,\n datetime(2000, 1, 1).year)\n self.assertEqual(person.birth_date.year + 65,\n person.retirement_date.year)\n self.assertEqual(person.birth_date.month,\n person.retirement_date.month)\n self.assertEqual(person.birth_date.day,\n person.retirement_date.day)\n\n # Let's mix different types of non-datetime inputs. Should work.\n birth_date = \"3 February 2001\"\n retirement_date = 2002\n person = Person(\n self.initial_year, self.name, birth_date,\n retirement_date=retirement_date)\n birth_date = datetime(2001, 2, 3)\n self.assertEqual(person.birth_date, birth_date)\n self.assertEqual(person.retirement_date.year, retirement_date)\n self.assertEqual(person.birth_date.month,\n person.retirement_date.month)\n self.assertEqual(person.birth_date.day,\n person.retirement_date.day)\n\n # Let's mix datetime and non-datetime inputs. Should work.\n birth_date = \"3 February 2001\"\n retirement_date = datetime(2002, 1, 1)\n person = Person(\n self.initial_year, self.name, birth_date,\n retirement_date=retirement_date)\n birth_date = datetime(2001, 2, 3)\n self.assertEqual(person.birth_date, birth_date)\n self.assertEqual(person.retirement_date.year, retirement_date.year)\n self.assertEqual(person.birth_date.month, birth_date.month)\n self.assertEqual(person.retirement_date.month,\n retirement_date.month)\n self.assertEqual(person.birth_date.day, birth_date.day)\n self.assertEqual(person.retirement_date.day, retirement_date.day)", "def test_gets_different_data(self):\n print(\"Testing that get_region_data can return different data types\")\n\n test_ctd = get_region_data(np.array([[3505, 1, 0, 0]]), self.float_name, self.config,\n self.index, self.pres)\n test_bot = get_region_data(np.array([[3505, 0, 1, 0]]), self.float_name, self.config,\n self.index, self.pres)\n test_argo = get_region_data(np.array([[3505, 0, 0, 1]]), self.float_name, self.config,\n self.index, self.pres)\n\n self.assertTrue(test_ctd[0].shape[1] != test_argo[0].shape[1],\n \"Should get a different data set, if we have specified it\")\n self.assertTrue(test_bot[0].shape[1] != test_argo[0].shape[1],\n \"Should get a different data set, if we have specified it\")", "def test_check_type_1():\r\n hl = hotlist.HotList()\r\n hl._validate_value(1)\r\n hl._validate_value(1L)\r\n hl._validate_value(1.5)\r\n hl._validate_value(\"abc\")\r\n hl._validate_value(u\"abc\")\r\n hl._validate_value((1, 2, 3,))\r\n hl._validate_value((1, \"AAA\", 3,))\r\n hl._validate_value((1, (\"AAA\", 2, 3,) , 3,))\r\n hl._validate_value((1, frozenset([\"AAA\", 2, 3,]) , 3,))\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value([ 1, 2, 3,])\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value(( 1, 2, [ 3, 4, 5,],))\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value({})\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value(hotlist.HotList())", "def valid_dtype(expected, found):\n if expected not in ('bool', 'byte', 'text', 'number', 'int', 'float', 'any'):\n raise SystemError((\"** Error: invalid value (%s) in definition file \"\n \"for expected data type\") % expected)\n if expected == 'any':\n return True\n if found in ('str', 'unicode') or re.match( r'^\\|S\\d+$', found) or 'byte' in found:\n # print \"found dtype '%s', interpreting as string\" % dtype\n dtype = 'text'\n elif 'bool' in found:\n dtype = 'bool'\n elif 'int' in found or 'long' in found:\n dtype = 'int'\n elif 'float' in found or 'double' in found:\n dtype = 'float'\n else:\n raise ValueError((\"** Error: unable to recognize data type (%s) for validation.\"\n \"expecting compatable with '%s'\") % (found, expected))\n valid = (dtype == expected or (dtype in ('int', 'float', 'bool', ) and expected == 'number'))\n return valid", "def test_incorrect_data_type_list():\n \n test_object = fa.read_in_envision(data_csv=plate_1, platemap_csv=plate_map_file, data_type='list', size=384)", "def test_primitives(logger, class_, raw_bytes, expected):\n # Set up the generator with the raw bytes:\n position = recover(raw_bytes)\n data = next(position)\n assert data == ''\n\n result = class_.load(position)\n\n if issubclass(class_, Float):\n assert round(result.value, 3) == expected\n\n else:\n assert result.value == expected", "def test_type_inference_lens(self):\n # Create new work trail and retrieve the HEAD workflow of the default\n # branch\n f_handle = self.filestore.upload_file(INCOMPLETE_CSV_FILE)\n ds = self.datastore.load_dataset(f_handle=f_handle)\n # Infer type\n command = cmd.mimir_type_inference(DATASET_NAME, 0.6)\n result = self.compute_lens_result(ds, command)\n self.assertTrue(result.is_success)\n # Get dataset\n ds2 = self.datastore.get_dataset(result.provenance.write[DATASET_NAME].identifier)\n self.assertEqual(len(ds2.columns), 3)\n self.assertEqual(ds2.row_count, 7)\n ds1_rows = ds.fetch_rows()\n ds2_rows = ds2.fetch_rows()\n for i in range(ds2.row_count):\n self.assertEqual(ds1_rows[i].values, ds2_rows[i].values)", "def test_write(self):\n dset = self.f.create_dataset('x2', (10, 2))\n\n x = np.zeros((10, 1))\n dset[:, 0] = x[:, 0]\n with self.assertRaises(TypeError):\n dset[:, 1] = x" ]
[ "0.67663014", "0.6662102", "0.66280866", "0.65572697", "0.6511266", "0.64390135", "0.6435044", "0.63185567", "0.63012403", "0.62329113", "0.6192442", "0.6122348", "0.6109678", "0.606131", "0.6042542", "0.6038543", "0.59518325", "0.5944244", "0.5932898", "0.5906418", "0.5901045", "0.58891606", "0.58769494", "0.58381146", "0.58225614", "0.58184624", "0.579893", "0.579569", "0.57532173", "0.5724753", "0.57031095", "0.5687187", "0.5684048", "0.5683354", "0.56736046", "0.5641963", "0.5641252", "0.5638495", "0.56331176", "0.5623203", "0.56116325", "0.56023353", "0.55968416", "0.55953765", "0.5582451", "0.55823576", "0.55812824", "0.5579496", "0.55704474", "0.5567092", "0.55653965", "0.5564821", "0.556437", "0.5553773", "0.55376774", "0.5537669", "0.55208045", "0.5513532", "0.55102897", "0.549763", "0.54907346", "0.5486177", "0.54836243", "0.54754925", "0.54721475", "0.54656297", "0.546552", "0.54620445", "0.54609174", "0.5454592", "0.5448173", "0.543808", "0.54332733", "0.5431559", "0.5425161", "0.5424589", "0.541717", "0.54142714", "0.5402571", "0.53956586", "0.53942233", "0.53939813", "0.5384748", "0.53814644", "0.5380562", "0.53795904", "0.5376136", "0.5364774", "0.53614897", "0.53516656", "0.5343466", "0.5340207", "0.533585", "0.5334938", "0.5333797", "0.53319037", "0.53286606", "0.53203416", "0.5318188", "0.53165597" ]
0.53594905
89
Handles a join game request. Adds the user to the game if it is not full. Otherwise, rejects the user from joining.
def join_game(players_cursor, states_cursor, user, room_id): # Make sure player isn't already in the game joined_query = '''SELECT * FROM players_table WHERE user = ? AND room_id = ?;''' joined = players_cursor.execute(joined_query, (user, room_id)).fetchall() if len(joined) > 0: # TODO: Return proper message for already in game raise KeyError # Check if the game is already full players_query = '''SELECT * FROM players_table WHERE room_id = ?;''' players = players_cursor.execute(players_query, (room_id,)).fetchall() if len(players) == MAX_PLAYERS: # TODO: Return proper message for joining full game raise ValueError # Since the game is not full, add the player to the game insert_player = '''INSERT into players_table VALUES (?,?,?,?,?,?,?);''' players_cursor.execute(insert_player, (user, STARTING_STACK, 0, 0, "", len(players), room_id)) FRAMES.append(display_game(players_cursor, states_cursor, user, room_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def join(self, ctx):\n if lobby.count(f\"{ctx.author.mention}\") == 0:\n add(lobby, ctx.author.mention)\n await ctx.channel.send(\"You've been added to the queue!\")\n else:\n await ctx.channel.send(\"You're already queued for a match!\")\n await ctx.channel.send(embed=lobby_list())\n if len(lobby) == teamSizeMax:\n if roster:\n await ctx.channel.send(\n \"There is currently a match being picked right now, please try again after picking is finished\")\n else:\n assign_captains()", "def _join(self, req):\n orig_game = None\n if self.game:\n orig_game = self.game\n game_id = req.pop(0)\n self.game, self.player = self.server.join_game(game_id, self)\n if orig_game:\n orig_game.leave(self)", "async def join(self, interaction: discord.Interaction, button: discord.ui.Button):\n\t\tif interaction.user.id == self.ctx.author.id:\n\t\t\tawait interaction.response.send_message(\n\t\t\t\tcontent='You have already joined the game. You can add AI players or start the game early with the other two buttons.',\n\t\t\t\tephemeral=True,\n\t\t\t)\n\t\t\treturn\n\t\tself.players.append(interaction.user)\n\t\tself.start.disabled = False\n\t\tif len(self.players) >= self.max_players:\n\t\t\tview = None\n\t\t\tself.stop()\n\t\telse:\n\t\t\tview = self\n\t\tawait interaction.response.edit_message(content=self.generate_message(), view=view)", "def do_join_game(self):\n\t\titem = self.li_servers.get_selected()[0]\n\n\t\tself.nickname = self.e_nickname.text\n\t\tself.server_uuid = item.server.uuid\n\t\tself.game_name = item.server.name\n\t\tself.num_players = item.server.num_players\n\t\tself.boardsize = item.server.boardsize\n\n\t\td = {\"state\": be.S_JOIN,\n\t\t\t\t\"uuid\": self.server_uuid,\n\t\t\t\t\"name\": self.game_name,\n\t\t\t\t\"nickname\": self.nickname}\n\t\tevent = pygame.event.Event(be.E_STATE, d)\n\t\tpygame.event.post(event)\n\n\t\tself.hide_all()\n\t\tself.renderer.color = (0, 0, 0, 0)", "def on_joinuser(self, data):\n user_data = {\n 'un': data[3], # nick\n 'ml': data[4], # mod level\n 'st': data[5], # status related\n 'id': data[6], # ezcapechat user id\n 'su': data[7] # ?\n }\n if data[3] == self.users.client.nick:\n self.users.add_client_data(user_data)\n else:\n _user = self.users.add(data[3], user_data)\n print ('%s Joined the room.' % _user.nick)\n\n #BOT\n if (_user.nick.lower() in self.autogreet):\n self.send_public(\"%s, %s\" % (_user.nick, self.autogreet[_user.nick.lower()]))", "def join_game(self, request):\n player = Player.query(Player.name == request.player_name).get()\n print player\n if not player:\n raise endpoints.NotFoundException(\n 'A Player with that name does not exist!, '\n 'we need a second player in order to join the game')\n try:\n game = gameutils.get_by_urlsafe(request.urlsafe_key, Game)\n game.player2 = player.key\n game.put()\n except ValueError:\n raise endpoints.BadRequestException('please verify the information '\n 'of the second player')\n\n # Use a task queue to update the average attempts remaining.\n # This operation is not needed to complete the creation of a new game\n # so it is performed out of sequence.\n\n return game.to_form('Second Player Joined the Game, we are ready to start the game!', player.name)", "def handle_join_room(self, lobby_command, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n words = lobby_command.split()\n roomname = words[1]\n print(f\"Handling join room {roomname} for {user}\")\n for _room in self.rooms:\n if _room.name == roomname:\n print(\"Requested roomname found..\")\n if user in _room.room_attrbts['members']:\n msg = f\"Client {user} is already a member of room {_room.name}\"\n self.log_and_send(client_socket, msg)\n return\n else:\n _room.room_attrbts['members'].add(user)\n msg = f\"{user} successfully joined membership of room {roomname}\"\n self.log_and_send(client_socket, msg)\n return\n msg = f'Client {user} passed invalid room. Could not join room {roomname}'\n self.log_and_send(client_socket, msg)\n return", "def join_game(sid, msg):\n if (msg != None and 'uuid' not in msg):\n games = Game.objects.filter(full=False)\n elif (msg != None and 'uuid' in msg):\n games = Game.objects.filter(uuid=msg['uuid'])\n if (len(games) > 0):\n game = games[0]\n game.full = True\n game.save()\n sio.emit('join_game', {'data': serializers.serialize(\n 'json', [game], fields=('created', 'uuid'))}, room=sid)\n join_room(sid, game.uuid)\n else:\n sio.emit('error', {\n 'data': 'No currently joinable game'\n }, room=sid)", "def on_join(data):\n username = request.sid\n room = data\n join_room(room)\n logging.info(username + ' has entered the room.')\n send(username + ' has entered the room.', room=room)", "def join_game (self, game_name):\n r = requests.post (self.url_endpoint,\n data = {\"join_game\": True, \"player_secret\": self.secret, \"game_name\": game_name})\n if (r.status_code != 201):\n print (f\"ERROR: Failed to join game <{game_name}>:\\n\", r.text)\n return r\n\n join_data = json.loads (r.text)\n self.cur_game_name = game_name\n self.cur_game_secret = join_data ['game_name']", "def join(var, wrapper, message):\n # keep this and the event in fjoin() in sync\n evt = Event(\"join\", {\n \"join_player\": join_player,\n \"join_deadchat\": join_deadchat,\n \"vote_gamemode\": vote_gamemode\n })\n if not evt.dispatch(var, wrapper, message, forced=False):\n return\n if var.PHASE in (\"none\", \"join\"):\n if wrapper.private:\n return\n if var.ACCOUNTS_ONLY:\n if wrapper.source.account is None:\n wrapper.pm(messages[\"not_logged_in\"])\n return\n if evt.data[\"join_player\"](var, wrapper) and message:\n evt.data[\"vote_gamemode\"](var, wrapper, message.lower().split()[0], doreply=False)\n\n else: # join deadchat\n if wrapper.private and wrapper.source is not wrapper.target:\n evt.data[\"join_deadchat\"](var, wrapper.source)", "def fjoin(var, wrapper, message):\n # keep this and the event in def join() in sync\n evt = Event(\"join\", {\n \"join_player\": join_player,\n \"join_deadchat\": join_deadchat,\n \"vote_gamemode\": vote_gamemode\n })\n\n if not evt.dispatch(var, wrapper, message, forced=True):\n return\n noticed = False\n fake = False\n if not message.strip():\n evt.data[\"join_player\"](var, wrapper, forced=True)\n\n parts = re.split(\" +\", message)\n possible_users = {u.lower().nick for u in wrapper.target.users}\n to_join = []\n if not botconfig.DEBUG_MODE:\n match = complete_one_match(users.lower(parts[0]), possible_users)\n if match:\n to_join.append(match)\n else:\n for i, s in enumerate(parts):\n match = complete_one_match(users.lower(s), possible_users)\n if match:\n to_join.append(match)\n else:\n to_join.append(s)\n for tojoin in to_join:\n tojoin = tojoin.strip()\n # Allow joining single number fake users in debug mode\n if users.predicate(tojoin) and botconfig.DEBUG_MODE:\n user = users._add(wrapper.client, nick=tojoin) # FIXME\n evt.data[\"join_player\"](var, type(wrapper)(user, wrapper.target), forced=True, who=wrapper.source)\n continue\n # Allow joining ranges of numbers as fake users in debug mode\n if \"-\" in tojoin and botconfig.DEBUG_MODE:\n first, hyphen, last = tojoin.partition(\"-\")\n if first.isdigit() and last.isdigit():\n if int(last)+1 - int(first) > var.MAX_PLAYERS - len(list_players()):\n wrapper.send(messages[\"too_many_players_to_join\"].format(wrapper.source.nick))\n break\n fake = True\n for i in range(int(first), int(last)+1):\n user = users._add(wrapper.client, nick=str(i)) # FIXME\n evt.data[\"join_player\"](var, type(wrapper)(user, wrapper.target), forced=True, who=wrapper.source)\n continue\n if not tojoin:\n continue\n\n maybe_user = None\n\n for user in wrapper.target.users:\n if users.equals(user.nick, tojoin):\n maybe_user = user\n break\n else:\n if not users.predicate(tojoin) or botconfig.DEBUG_MODE:\n if not noticed: # important\n wrapper.send(\"{0}{1}\".format(wrapper.source, messages[\"fjoin_in_chan\"]))\n noticed = True\n continue\n\n if maybe_user is not None:\n if not botconfig.DEBUG_MODE and var.ACCOUNTS_ONLY:\n if maybe_user.account is None:\n wrapper.pm(messages[\"account_not_logged_in\"].format(maybe_user))\n return\n elif botconfig.DEBUG_MODE:\n fake = True\n\n if maybe_user is not users.Bot:\n if maybe_user is None and users.predicate(tojoin) and botconfig.DEBUG_MODE:\n maybe_user = users._add(wrapper.client, nick=tojoin) # FIXME\n evt.data[\"join_player\"](var, type(wrapper)(maybe_user, wrapper.target), forced=True, who=wrapper.source)\n else:\n wrapper.pm(messages[\"not_allowed\"])\n if fake:\n wrapper.send(messages[\"fjoin_success\"].format(wrapper.source, len(list_players())))", "def lobbyUserJoin(self, __userID):\n\n\t\t# Make sure the user is not already in mp lobby\n\t\tif (__userID not in self.usersInLobby):\n\t\t\t# We don't need to join #lobby, client will automatically send a packet for it\n\t\t\tself.usersInLobby.append(__userID)", "def join_game(game):\n game = int(game)\n if 0 > game or game > len(games):\n return \"Not a valid gameBike\"\n if games.join_game(game):\n return \"Registration done\"\n else:\n return \"Not valid registration\"", "def player_join(self, player_ip, *args):\r\n\t\ttry:\r\n\t\t\tplayer_ID = args[0] # IndexError\r\n\t\t\tteam_name = args[1] # IndexError\r\n\t\t\tteam_type = self.team_get_type_by_name(team_name) # ValueError\r\n\t\texcept IndexError:\t# Invaild arguments\r\n\t\t\tself._comm_server.send_message(player_ip, \"join fail\")\r\n\t\t\t_logger.error(\"player-join: \" \\\r\n\t\t\t\t\"The arguments for join the game are invaild.\")\r\n\t\texcept ValueError:\t# Invaild team name\r\n\t\t\tself._comm_server.send_message(player_ip, \"join fail\")\r\n\t\t\t_logger.error(\"player-join: \" \\\r\n\t\t\t\t\"Specified team name {0} is not found.\".format(team_name))\r\n\t\telse:\r\n\t\t\t# If the player has already joined\r\n\t\t\tif self._teammates.get(player_ip) is not None:\r\n\t\t\t\tself._comm_server.send_message(player_ip, \"join fail\")\r\n\t\t\t\t_logger.error(\"player-join: \" \\\r\n\t\t\t\t\t\"IP {0} has already joined the game.\".format(player_ip))\r\n\t\t\t\treturn\r\n\r\n\t\t\t# Check if the player ID is used in the team\r\n\t\t\tplayer_info = self._teams[team_type].get_player_info_by_ID(player_ID)\r\n\t\t\tif player_info is not None:\r\n\t\t\t\tself._comm_server.send_message(player_ip, \"join fail\")\r\n\t\t\t\t_logger.error(\"player-join: \" \\\r\n\t\t\t\t\t\"Player \\\"{0}\\\" is already in the team.\".format(player_ID))\r\n\t\t\t\treturn\r\n\r\n\t\t\tplayer_info = self._teams[team_type] \\\r\n\t\t\t\t.add_player_info(player_ip, player_ID, team_name)\r\n\r\n\t\t\tself._teammates[player_ip] = team_type\r\n\t\t\tself._handlers[\"player-join\"].invoke(player_info, team_type)\r\n\r\n\t\t\tself._comm_server.send_message(player_ip, \"join ok\")\r\n\r\n\t\t\t_logger.info(\"Player \\\"{0}\\\" from {1} joins the team \\\"{2}\\\".\" \\\r\n\t\t\t\t.format(player_info.ID, player_info.IP, player_info.team_name))", "async def join(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n return\n player = ctx.message.author.name\n if player.lower() in list(tod_games[room]['participants'].keys()):\n await amor_manager.say(\"{}... you're already playing Truth or Dare here!\".format(room))\n else:\n tod_games[room]['participants'][player.lower()] = {'spins': 0}\n await amor_manager.say(\"{} has joined Truth or Dare!\".format(player))", "def send_join_request(self, roomname: str, username: str) -> None:\n\n get_members_query = Query.get_room_members(roomname)\n room_members: str = self.db.read_execute_query(get_members_query)[0][0]\n room_members_list = room_members.split()\n\n if username in room_members_list:\n return\n\n try:\n get_pending_requests_query = Query.get_pending_requests(roomname)\n pending_requests: str = self.db.read_execute_query(get_pending_requests_query)[0][0]\n room_request_query = Query.room_request(roomname, username, pending_requests)\n self.db.execute_query(room_request_query)\n\n except IndexError:\n room_request_query = Query.room_request(roomname, username, \"\")\n self.db.execute_query(room_request_query)", "def join_server(self, data, user):\n # User will spawn in one of following rooms\n user.room = choice((\"100\", \"300\", \"800\", \"804\"))\n user.send([\"js\", \"-1\", \"1\", \"1\", \"0\", \"0\"])\n self.add(user)", "async def chat_join(self, event):\n await self.send_json(\n return_value(\n ACTION_JOIN,\n event['label'],\n event['username'],\n MSG_JOIN,\n NO_MESSAGE\n )\n )", "def on_join(data):\n print(str(data))\n if models.Leaderboard.query.filter_by(\n username=data['user']).first() is None:\n add_user(data['user'])\n users, scores = calculate_scores()\n socketio.emit('leaderboard_info', {'users': users, 'scores': scores})", "def on_join(data):\r\n\r\n username = data[\"username\"]\r\n room = data[\"room\"]\r\n join_room(room)\r\n\r\n # Broadcast that new user has joined\r\n send({\"msg\": username + \" has joined the \" + room + \" room.\"}, room=room)", "def evaluate_join_request(self, roomname: str, username: str, action: str) -> bool:\n\n if action not in [\"accept\", \"decline\"]: \n raise ValueError(\"Action must be either one of 'accept' or 'decline'\")\n\n def _remove(updated_pending_requests):\n \"\"\"Remove username from pending request list\"\"\"\n remove_member_from_pending_query = Query.room_request(roomname, \"\", updated_pending_requests)\n self.db.execute_query(remove_member_from_pending_query)\n\n def _accept():\n \"\"\"Accept the request\"\"\"\n get_members_query = Query.get_room_members(roomname)\n room_members: str = self.db.read_execute_query(get_members_query)[0][0]\n add_member_query = Query.add_member(roomname, username, room_members)\n self.db.execute_query(add_member_query)\n\n room_exsists = self.room_exists(roomname)\n get_pending_requests_query = Query.get_pending_requests(roomname)\n pending_requests: str = self.db.read_execute_query(get_pending_requests_query)[0][0]\n request_is_pending = bool(re.findall(rf'\\b{username}\\b', pending_requests))\n \n if room_exsists and request_is_pending:\n updated_pending_requests = re.sub(rf'\\b{username}\\b', \"\", pending_requests)\n if action == \"accept\":\n _accept()\n _remove(updated_pending_requests)\n else:\n _remove(updated_pending_requests)\n\n self.logger.debug(\"Join request evaluated\")\n return True\n\n else:\n self.logger.error(f\"User '{username}' has sent no request to Room {roomname}\")\n return False", "async def react_join(a: Message):\n if a.action.member_id == club_id:\n await a.answer(r_register_help)\n stats.jincr()", "async def _99join(ctx):\n NNB.add_player(ctx.message.author)\n await ctx.send(\"Welcome to the game, {}\".format(ctx.message.author.name))\n print(\"Added {} to the game.\".format(ctx.message.author.name))", "def join_team(request):\n template = loader.get_template('team/join_team.html')\n if request.method == 'POST':\n form = UserRequestForm(request.POST)\n user = request.user\n\n if form.is_valid():\n for team in form.cleaned_data['teams']:\n request_user_team_obj = RequestUserTeam(team=team, user=user, message=request.POST.get('message'))\n request_user_team_obj.save()\n messages.success(request, _('Your requests have been sent.'))\n else:\n team = request.user.profile.team\n if team is None:\n form = UserRequestForm()\n else:\n return redirect('/') # User which has a team can not see this template\n\n context = {'form': form}\n return CustomHttpResponse.send(template, context, request)", "def join(self, request, pk=None, *args, **kwargs):\n course = self.get_object()\n user = request.user\n serializer = self.serializer_class(course)\n res = Course.objects.join(course.id, user)\n\n if res:\n return Response({'status': True,\n 'message': 'Success Join Course',\n 'data': serializer.data}, status=status.HTTP_200_OK)\n else:\n return Response({'status': False,\n 'message': 'You have joined Course, Please check your dashboard',\n 'data': serializer.data},\n status=status.HTTP_200_OK)", "def join_player(self, data, user):\n self.remove(user)\n\n user.room = \"100\"\n user.x = \"0\"\n user.y = \"0\"\n user.frame = \"0\"\n\n self.add(user)", "def check_can_join(self, user):\n if not user.is_active or user.is_anonymous():\n return False\n\n membership = self.check_membership(user)\n\n if membership is not None and not membership.is_left():\n return False # Already joined\n\n if self.join_condition == 'A':\n return True\n elif self.join_condition == 'K':\n return user.profile.karma >= self.join_karma_threshold\n elif self.join_condition == 'I':\n return True # Can send a request\n else:\n return False", "def join_server(self, request):\n\n token = request.form['token']\n client = request.form['client']\n server = request.form['server']\n\n if len(client) == 0 or len(server) == 0:\n return (400, 'Bad Request')\n\n if not RestClient.instance().validate_token(token):\n return (401, 'Unauthorized')\n\n chat_rooms = ChatRooms.instance()\n game_server = GameServers.instance().get_servers().get(server)\n if game_server is None:\n return (404, 'Not Found')\n\n chat_rooms.join_room(server, client)\n\n result = {\n 'name': game_server.get_name(),\n 'host': game_server.get_host(),\n 'port': game_server.get_port(),\n 'owner': game_server.get_owner()\n }\n\n return (200, result)", "def userJoin(self, __userID):\n\n\t\tif (__userID not in self.connectedUsers):\n\t\t\tself.connectedUsers.append(__userID)", "def join(request):\n c = {}\n c.update(csrf(request))\n if request.method == 'POST': # If the form has been submitted...\n form = TeamJoinForm(request.POST)\n if form.is_valid(): # All validation rules pass\n team = Team.objects.get(pk=form.cleaned_data['team'])\n if team.secret == form.cleaned_data['secret']:\n member = request.user.member\n member.team = team\n member.save()\n messages.add_message(request, messages.SUCCESS, 'Team joined!')\n return HttpResponseRedirect(reverse('team_details', args=(team.id,)))\n else:\n #TODO: Redirect\n form._errors[\"secret\"] = form.error_class(\n ['Secret does not match!'])\n else:\n print 'form not valid'\n else: \n form = TeamJoinForm()\n\n return render_to_response(\"teams/join.html\", {'form': form, 'c':c},\n context_instance=RequestContext(request))", "def joinGame(self, playerID, startFreshP):\n\n # Log the join attempt\n logStrF = \"joinGame called w/ playerID %d (fresh game requested?: %s)\"\n TournamentSystem._logger.debug(logStrF, playerID, str(startFreshP))\n\n # Add the player to a pending game if one exists\n for gameID, game in self.games.iteritems():\n if game.status == ChessMatch.STATUS_PENDING:\n color = game.join(playerID, p2ReqFreshStart=startFreshP)\n if color:\n logStrF = \"Added player %d to existing game %d (sfP=%s)\"\n TournamentSystem._logger.debug(logStrF,\n playerID,\n gameID,\n str(startFreshP))\n return (True, {\"gameID\": gameID,\n \"startFreshP\": startFreshP})\n\n # Add a player to a new game otherwise\n newMatch = ChessMatch(firstPlayerID=playerID,\n p1ReqFreshStart=startFreshP)\n newID = _getUniqueInt(self.games.keys())\n self.games[newID] = newMatch\n TournamentSystem._logger.debug(\"Added player %d to new game %d\",\n playerID, newID)\n return (True, {\"gameID\": newID})", "def join_game(game_id, name):\n name = name or generate_player_name()\n game_data = load_state(game_id)\n if not game_data:\n return None\n if game_data['active']:\n return None\n if game_data['ended_at']:\n return None\n player = add_player_to_game(game_data, name)\n if player:\n msg = make_info_message('You have joined the game')\n alt_msg = make_info_message(\n '{} has joined the game'.format(player['name']))\n flash_player(game_data, player, msg, alt_msg)\n save_state(game_data)\n return player", "def join_team(request, team_pk):\n\trequested_team = ChallengeTeam.objects.get(pk = team_pk)\n\t\n\tif(requested_team.challenge.invite_only and not request.user in requested.team.challenge.invited):\n\t\tmessages.error(request, \"You need an invite to join that Challenge. Please ask the administrator for an invite\")\n\t\treturn redirect(\"/challenge\")\n\t\t\n\trequest.user.joined_teams.add(requested_team)\n\tmessages.success(request, \"You have successfully joined the team: \" + requested_team.team_name)\n\treturn redirect(\"/challenge/team/view/\" + str(team_pk))", "def join_room(self, room_name): \r\n logging.debug('Joining room {ro}'.format(ro=room_name))\r\n\r\n for room in self.rooms:\r\n if room.name == room_name:\r\n room.add_user(self)\r\n self._rooms[room_name] = room\r\n room.welcome(self)\r\n break\r\n else:\r\n room = Room(room_name)\r\n self.rooms.append(room)\r\n self._rooms[room_name] = room\r\n room.add_user(self)", "def join(self, message, db_session):\n username = self.ts.get_user(message)\n user = db_session.query(db.User).filter(db.User.name == username).one_or_none()\n if not user:\n user = db.User(name=username)\n db_session.add(user)\n try:\n self.player_queue.push(username, user.times_played)\n self._add_to_whisper_queue(username, \"You've joined the queue.\")\n user.times_played += 1\n except RuntimeError:\n self._add_to_whisper_queue(username, \"You're already in the queue and can't join again.\")\n\n # queue_snapshot = copy.deepcopy(self.player_queue.queue)\n # self.command_queue.appendleft(('_insert_into_player_queue_spreadsheet',\n # {'username': username, 'times_played':user.times_played, 'player_queue': queue_snapshot}))", "async def tod_join(self, ctx, *args):\n if ctx.author not in self.players:\n self.players.append(ctx.author)\n message = f\"{ctx.author.mention} has been added to the game!\"\n await ctx.send(message)\n else:\n message = f\"{ctx.author.mention} has already joined!\"\n await ctx.send(message)\n\n # Updates the role if channel exists\n for channel in ctx.guild.channels:\n if channel.name.startswith(\"truth-or-dare\"):\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.add_roles(role)\n return\n\n # Creates the channel if it doesn't exist\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n bots = discord.utils.get(ctx.guild.roles, name=\"Bots\")\n overwrites = {\n ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False, send_messages=False),\n bots: discord.PermissionOverwrite(read_messages=True, send_messages=True),\n role: discord.PermissionOverwrite(read_messages=True, send_messages=True, connect=True, speak=True)\n }\n await ctx.guild.create_text_channel('truth-or-dare', overwrites=overwrites)\n await ctx.guild.create_voice_channel('secret-voice', overwrites=overwrites)\n\n # Adds the role\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.add_roles(role)", "async def on_member_join(self, member: discord.Member) -> None:\n\n await add_user_in_db(member, member.guild)\n\n guild_from_db = await Guilds.get(guild_id=member.guild.id)\n role_saver = guild_from_db.role_saver\n if role_saver:\n user_roles = await UserRoles.get_many(guild_id=member.guild.id, user_id=member.id)\n if user_roles:\n for rol in user_roles:\n role = discord.utils.get(member.guild.roles, id=rol.role_id)\n if role.name == '@everyone':\n continue\n else:\n await member.add_roles(role)\n\n await Profiles.update(user_id=member.id,\n guild_id=member.guild.id,\n set=[\"joins = joins + 1\"])\n await Guilds.update(guild_id=member.guild.id,\n set=[\"day_joins = day_joins + 1\"])\n\n await self.refresh_user_count_channel(member.guild)", "async def join(self, ctx, invite : discord.Invite):\r\n if ctx.message.author.id == \"481270883701358602\":\r\n await self.client.accept_invite(invite)\r\n await self.client.say(\"Joined the server.\")\r\n else:\r\n await self.client.say(\"**Owner only command.**\")", "def test_join_after_invite(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n\n u2 = self.register_user(\"u2\", \"pass\")\n u2token = self.login(\"u2\", \"pass\")\n\n self.helper.invite(r1, u1, u2, tok=u1token)\n\n r1stats_ante = self._get_current_stats(\"room\", r1)\n assert r1stats_ante is not None\n\n self.helper.join(r1, u2, tok=u2token)\n\n r1stats_post = self._get_current_stats(\"room\", r1)\n assert r1stats_post is not None\n\n self.assertEqual(\n r1stats_post[\"current_state_events\"] - r1stats_ante[\"current_state_events\"],\n 0,\n )\n self.assertEqual(\n r1stats_post[\"joined_members\"] - r1stats_ante[\"joined_members\"], +1\n )\n self.assertEqual(\n r1stats_post[\"invited_members\"] - r1stats_ante[\"invited_members\"], -1\n )", "def on_join(data):\n quiz = store.get_quiz_by_user_id(data['user_id'])\n room = quiz.quiz_id\n\n # get and clean the users (no score)\n users = store.get_users_by_id(store.get_quiz_by_id(room).users)\n users_cleaned = [user.name for user in users]\n\n # emit the new users the to the room\n if room is not None:\n join_room(room)\n emit(\"current_players\", {\"users\": users_cleaned}, room=room)", "def join_room(self, client, room):\n if room.verify_if_is_invited(client):\n room.add_member(client)\n self.send_message('Te has unido a la sala {}'.format(room.get_name()), client)\n else:\n self.send_message('No estas invitado a la sala.', client)", "async def on_member_join(member):\n if boterate.has_member(member):\n boterate.update_member(member)\n else:\n boterate.insert_user(member)", "def join_game(gameid,hostplayer):\n\tuser_db = auth.current_user_db()\n\tname = user_db.name\n\ttoken = channel.create_channel(name + gameid) \n\ttemplate_values = {\n\t\t\t\t\t\t\"gameid\":gameid,\n\t\t\t\t\t\t\"token\": channel.create_channel(name + gameid),\n\t\t\t\t\t\t\"yourname\": name,\n\t\t\t\t\t\t\"hostplayer\":hostplayer\n\t\t\t\t\t\t}\n\treturn render_template(\"player.html\", values=template_values)", "def joined(message):\n room = session.get('room')\n join_room(room)\n emit('status', {'msg': session.get('name') + ' has entered the room.'}, room=room)", "def joined(message):\n room = session.get('room')\n join_room(room)\n emit('status', {'msg': session.get('name') + ' has entered the room.'}, room=room)", "def process_join(message):\n try:\n # If the user is already registered process as an update instead\n Resident.objects.get(phone_number=message.sender)\n return process_update(message)\n except Resident.DoesNotExist:\n resident = Resident.objects.create(\n phone_number=message.sender,\n location=message.location.location)\n \n message.respond('Thank you for registering. You will now receive messages for your area.')\n\n return TropoOkResponse()", "def validate_can_enter(self, user, contest_pool):\n\n # the contest attempting to be joined\n target_skill_level = contest_pool.skill_level\n if target_skill_level.enforced == False:\n return # the skill level of this contest is not enforced -- anyone can join no matter what\n\n # find any enforced skill_levels we have an entry in not matching our target.\n # if any are found, that means we cant join and must raise exception\n entries = Entry.objects.filter(\n user=user,\n contest_pool__draft_group=contest_pool.draft_group,\n contest_pool__skill_level__enforced=True\n ).exclude(contest_pool__skill_level=target_skill_level)\n\n if entries.count() > 0:\n raise self.CanNotEnterSkillLevel()", "async def join(self, ctx):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n if player.is_connected:\n return await ctx.send(\"I'm already in a voice channel :no_entry:\")\n else:\n if not ctx.author.voice or not ctx.author.voice.channel:\n return await ctx.send(\"You are not in a voice channel :no_entry:\")\n player.store('sessionowner', ctx.author.id)\n player.store('channel', ctx.channel.id)\n await player.connect(ctx.author.voice.channel.id)\n await ctx.send(\"Summoned to `{}` <:done:403285928233402378>\".format(ctx.author.voice.channel.name))", "def join(self, user, role=None):\n if self.check_can_join(user) or role is not None:\n membership, c = Membership.objects.get_or_create(user=user, blog=self)\n if c:\n post_rating = PostVote.objects.filter(object__author=user, object__blog=self).score()\n membership.overall_posts_rating = post_rating\n comment_rating = CommentVote.objects.filter(object__author=user, object__post__blog=self).score()\n membership.overall_comments_rating = comment_rating\n\n if role is not None:\n membership.role = role\n membership.save()\n return\n if membership.role == 'LB':\n membership.role = 'B'\n membership.save()\n return _(\"Success. You are still banned, though\")\n elif membership.role != 'L':\n return _(\"You've already joined to the=is blog\")\n elif self.join_condition == 'I':\n membership.role = 'W'\n membership.save()\n return _(\"A request has been sent\")\n else:\n membership.role = 'M'\n membership.save()\n return _(\"Success\")\n else:\n raise PermissionCheckFailed(_(\"You can't join this blog\"))", "async def user_joined_button(self, payload: discord.RawReactionActionEvent) -> None:\n\n self.bits = flip_action_bits(LoggingActions.USER_JOINED, self.bits)\n await self.update_embed()", "async def join(self, gid):\n\t\tif self.group != None:\n\t\t\tif self.group.gid == gid:\n\t\t\t\traise exceptions.ClientError('IN_GROUP')\n\n\t\tif gid and not utilities.validate_string(gid):\n\t\t\traise exceptions.ClientError('INVALID_STRING')\n\n\t\tif gid:\n\t\t\tgroup = Group.register(gid)\n\t\telse:\n\t\t\ttries = 0\n\t\t\twhile 1:\n\t\t\t\tif tries >= 5:\n\t\t\t\t\traise exceptions.ClientError('INVALID_GROUP')\n\t\t\t\tgid = utilities.random_string(16)\n\t\t\t\tgroup = Group.register(gid)\n\t\t\t\tif len(group.members) == 0:\n\t\t\t\t\tbreak\n\t\t\t\ttries += 1\n\n\t\tif group.in_game:\n\t\t\traise exceptions.ClientError('IN_GAME')\n\n\t\tawait group.add(self)", "def api_contests_join():\n if request.method == 'GET':\n user = get_queryparam('user')\n returnJSON = models.select_joined_contest(\n params=('*'),\n conditions=('{}=\\\"{}\\\"'.format(\n settings.DB_COLUMNS.JOINED_CONTEST_USER,\n user\n )\n )\n )\n return jsonify(returnJSON)\n elif request.method == 'POST':\n postJSON = request.get_json()\n models.insert_joined_contest(\n postJSON[settings.DB_COLUMNS.JOINED_CONTEST_USER],\n postJSON[settings.DB_COLUMNS.JOINED_CONTEST_CONTEST]\n )\n return ('', 204)\n elif request.method == 'DELETE':\n postJSON = request.get_json()\n models.delete_joined_contest(\n postJSON[settings.DB_COLUMNS.JOINED_CONTEST_USER],\n postJSON[settings.DB_COLUMNS.JOINED_CONTEST_CONTEST]\n )\n return ('', 204)", "def join_farm(self, request, pk):\n farm = self.get_object()\n user = request.user\n farm.add_member(user)\n return Response({}, status=status.HTTP_202_ACCEPTED)", "def join(self):\n channel = self.data[0]\n user_pseudonym = VALIDATED_USERS.get_pseudonym(SOCKET_TO_USERID.get(self.source, None))\n\n if user_pseudonym and self.target:\n target_server = self.target[1]\n if(BANHANDLER.is_banned_from_channel(user_pseudonym, target_server, channel)):\n self.source[0].send(\":orcbot!~@localhost PRIVMSG \"+SOCKET_TO_USERID[self.source]+\" :You're banned from \"+channel+\"\\r\\n\")\n elif(self.target):\n self.message = self.message +\"\\r\\n\"\n self.target[0].sendall(self.message)\n self.send()", "async def join(self, context):\n try:\n if Tournament.persons < 32:\n name_participant = self.get_params_as_text(context)\n self.tournament.register_participant(name_participant[:20])\n await context.send(f'{name_participant} se anoto en el torneo. Participante N°{Tournament.persons}')\n else:\n await context.send('No hay mas lugar. El que se fue a la villa perdio su silla. 32/32')\n except Exception as error:\n print(error)", "def join(data):\n username, room = data['username'], data['room']\n join_room(room)", "def on_join(data):\n logger.info(f\"Joining: {data}\")\n to = data[\"to\"]\n if to in TO_OPTIONS.keys():\n join_room(to)\n logger.info(f\"Rooms: {rooms()}\")\n else:\n logger.warning(f\"{to} not in TO_OPTIONS\")", "def add_player(self, user):\n # Make sure the user can play\n user_profile = user.get_profile()\n if user_profile.credits < self.entrance_fee:\n raise ValueError(\"Not enough credits to pay entrance fee.\")\n if self.is_user_playing(user):\n raise ValueError(\"User already in tournament.\")\n \n # Handle the money transfer to join the tournament\n user_profile.credits = user_profile.credits - self.entrance_fee\n user_profile.save()\n self.prize_pool = self.prize_pool + self.entrance_fee\n self.save()\n \n # Join the tournament\n new_player = Player(user=user,\n tournament=self,\n credits=self.starting_credits)\n new_player.save()\n return True", "async def join_room(self, room_id):\n print(\"PublicChatConsumer\", \"join_room\", self.scope[\"user\"])\n if self.scope[\"user\"].is_authenticated:\n try:\n room: PublicChatRoom = await get_room_or_error(room_id)\n except ClientError as e:\n await self.handle_client_error(e)\n else:\n # Add user to the room\n await connect_user(room, self.scope[\"user\"])\n\n # Set the room_id with the current room\n self.room_id = room_id\n\n # Add user to the group\n await self.channel_layer.group_add(\n room.group_name,\n self.channel_name\n )\n\n # Send acknowledgement to client\n await self.send_json({\n \"join\": str(room_id),\n \"username\": self.scope[\"user\"].username\n })\n\n # Send the total number of connected users to client\n connected_users_count = await get_connected_users_count(room)\n await self.channel_layer.group_send(\n room.group_name,\n {\n \"type\": \"connected.users.count\",\n \"connected_users_count\": connected_users_count\n }\n )", "def join(self, name):\n \n if name in self.roomList:\n pass\n else:\n self.sendCommand(\"global /join\",name)", "def join(self, game):\n self.game = game\n self.game.join(self)\n return self.game", "def join(self, game):\n self.game = game\n self.game.dealer_join(self)\n return self.game", "def userJoined(self, user, channel):\n self.dispatch('population', 'userJoined', user, channel)", "def userJoined(self, user, channel):\n # Send messasge to Server bot.\n self.data_in(text=\"\", type=\"joined\", user=\"server\", channel=channel,\n nicklist=[user])", "def on_player_join(self, func):\n self.loop.create_task(self._rpc.register_event('ACTIVITY_JOIN', func))\n return func", "def join_in_play(self, join_in_play):\n\n self._join_in_play = join_in_play", "def test_logged_user_can_join(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_can_access(self, self.url,\n post_redirect_url=expected_url)\n\n self.assertIn(logged_user, self.group.users.all())\n self.assertIn(self.group, logged_user.joined_groups.all())", "async def join_leaderboard(self, ctx: commands.Context) -> None:\n if ctx.channel.id != settings.aoc.channel_id:\n await ctx.send(f\"Please use the <#{settings.aoc.channel_id}> channel\")\n return\n\n author = ctx.message.author\n\n info_str = (\n \"Head over to https://adventofcode.com/leaderboard/private \"\n \"with code `975452-d90a48b0` to join the TWT private leaderboard!\"\n )\n try:\n await author.send(info_str)\n except discord.errors.Forbidden:\n await ctx.send(f\":x: {author.mention}, please (temporarily) enable DMs to receive the join code\")\n else:\n await ctx.message.add_reaction(\"\\U0001F4E8\")", "def joined(message):\n room = session.get('room')\n join_room(room)\n emit('status', {'msg': session.get('name') + ' joined'}, room=room)", "def joined(message):\n\tglobal GLOBAL_NUM_USERS\n\tGLOBAL_NUM_USERS = GLOBAL_NUM_USERS + 1\n\tprint(message)\n\tsession['name'] = message['name']\n\tsession['room'] = message['room']\n\troom = session.get('room')\n\tjoin_room(room)\n\tprint('%s : joined' % session)\n\temit('_joined', {'user_name': session.get('name'), 'num_users' : GLOBAL_NUM_USERS}, room=room)", "def join_room(room):\n return request.namespace.join_room(room)", "def join_session(self, information, player):\n try: # if input of int() is not convertible to integer it throws an error\n req_ses_id = int(information.split(protocol._MSG_FIELD_SEP)[1])\n except ValueError:\n print(\"session id is not int convertible: %s\" % information.split(protocol._MSG_FIELD_SEP))\n return # TODO: appropriate error to user\n\n for session in self.current_sessions:\n if session.game_id == req_ses_id:\n break\n self.__lock.acquire()\n player.current_session_id = session.game_id\n joined_session = session.add_player(player)\n # TODO: some mysterious behavior observed here. couldn't reproduce it [Novin]\n print(\"player added to current session!\")\n self.__lock.release()\n if joined_session:\n return session\n else:\n return None", "async def on_member_join(member: Member):\n await member_handler.member_joined(member)", "def joined(self, channel):\n log.info(\"Joined %s.\", channel)\n # ask for the current list of users in the channel\n self.dispatch('presence', 'joined', channel)", "async def chat_usercompletechallenge(self, event):\n await self.send_json(\n return_value(\n ACTION_USER_COMPLETE,\n event['label'],\n event['username'],\n MSG_ALERT,\n NO_MESSAGE\n )\n )\n\n # TODO send all users in room that game start and that 's a new challenge to complete", "def join_room(self, data, user):\n # Filters out | to prevent string injection\n data[\"args\"] = [i.replace(\"|\", \"\") for i in data[\"args\"]]\n\n self.remove(user)\n\n user.room = data[\"args\"][1]\n user.x = data[\"args\"][2]\n user.y = data[\"args\"][3]\n user.frame = \"0\"\n\n self.add(user)", "def add_game(username):\n\n if 'username' in session:\n # As above, this checks if the session cookie has a username key\n # if not, send the user to a login screen.\n form = AddGameForm()\n # is this try...except block necessary?\n \n current_user = User.from_mongo(**mongo.db.users.find_one({\"name\": session.get('username')}))\n if current_user is None:\n flash('Oops. We encountered a problem. Please log in to continue')\n session.pop('username')\n session.pop('email')\n session.pop('_id')\n return redirect(url_for('login'))\n else:\n if form.validate_on_submit():\n label = form.title.data\n platform = form.platform.data\n year = int(form.year.data)\n genre = form.genre.data\n publisher = form.publisher.data\n # ensure publisher is selectfield with id as values \n new_game_name = label.replace(\" \", \"_\").lower() \n new_game = Game.add_game(label=label, \n platform=platform, \n year=year, \n genre=genre, \n publisher=publisher)\n new_game_ref = new_game.create_game_ref() \n\n # Add new game to Users list of games\n current_user.game_list.append(new_game_ref) \n # Update User on the database\n current_user.update_user() \n flash('Game Added Successfully')\n return redirect(url_for('profile', \n username=session['username']))\n else:\n return render_template('add_game.html', \n username=username, \n form=form)\n else:\n flash('Please log in to continue')\n return redirect(url_for('login'))", "def do_start_joined(self):\n\t\td = {\"state\": be.S_GAME,\n\t\t\t\t\"hosting\": False,\n\t\t\t\t\"uuid\": None,\n\t\t\t\t\"name\": self.game_name,\n\t\t\t\t\"nickname\": self.nickname,\n\t\t\t\t\"num_players\": self.num_players,\n\t\t\t\t\"boardsize\": self.boardsize}\n\t\tevent = pygame.event.Event(be.E_STATE, d)\n\t\tpygame.event.post(event)\n\n\t\tself.hide_all()\n\t\tself.renderer.color = (0, 0, 0, 0)", "async def join_leaderboard(self, ctx: commands.Context) -> None:\n author = ctx.message.author\n log.info(f\"{author.name} ({author.id}) has requested the PyDis AoC leaderboard code\")\n\n info_str = (\n \"Head over to https://adventofcode.com/leaderboard/private \"\n f\"with code `{AocConfig.leaderboard_join_code}` to join the PyDis private leaderboard!\"\n )\n try:\n await author.send(info_str)\n except discord.errors.Forbidden:\n log.debug(f\"{author.name} ({author.id}) has disabled DMs from server members\")\n await ctx.send(f\":x: {author.mention}, please (temporarily) enable DMs to receive the join code\")\n else:\n await ctx.message.add_reaction(Emojis.envelope)", "async def on_member_join(self, member: Member):\n\n if not self._is_tracked(member.guild, EventPriority.join):\n return\n\n em = self.em_base(\n member,\n f\"User {member.mention} ({member.name}) joined\",\n EventColors.join.value\n )\n\n em.add_field(\n name=\"Account Creation Timestamp\",\n value=self._get_timestamp()\n )\n\n await self.log_event(em, member.guild, priority=EventPriority.join)", "def join_course(request, username, course_code):\n\n #get data from post \n if request.method == 'GET':\n \n # if user log in \n try:\n user = User.objects.get(username=username)\n if ensure_login(user) == False:\n return JsonResponse({'login': 'User must login'}, status=403) \n except:\n return JsonResponse({'login': 'User must login'}, status=403)\n\n try:\n course_to_join = Course.objects.get(course_code=course_code)\n except:\n return JsonResponse({'error': 'Soemthing went Wrong in frontend, No such Course'}, status=404)\n\n if user.is_staff:\n return JsonResponse({'error': 'Staff not allow to join course'}, status=404)\n \n if user.user_info.level != course_to_join.level:\n return JsonResponse({'error': 'must be in the same level'}, status=404)\n \n\n # check if the user didn't yet enrolled\n try:\n is_enrolled = user.enrolled_courses.get(course_code=course_code)\n return JsonResponse({'error': 'Student Already Enrolled in this course'}, status=403)\n except:\n is_enrolled = False\n\n if not is_enrolled:\n # log the user into the course\n course_to_join.students.add(user)\n course_to_join.save()\n\n return JsonResponse({'success': True}, status=200)\n else:\n return JsonResponse({'error': 'Method not Allowed'}, status=405)", "def on_join_data(self, data):\n self.users.client.key = data[7] # unique user identifier ?\n self.users.client.join_time = data[11] # join time as unix including milliseconds ?\n self._room_id = data[13] # room id\n\n self.send_connection_ok()\n\n if config.DEBUG_TO_CONSOLE:\n print ('Join Data:')\n for i, v in enumerate(data):\n print ('\\t[%s] - %s' % (i, v))", "def join_team_action(request):\n # Find the team corresponding to the provided code.\n data = request.json_body\n team_id = None\n if request.by_admin:\n # Accept a team_id if the authenticated user is an admin.\n if 'team_id' in data:\n team_id = data['team_id']\n if team_id is None:\n code = data['code']\n team_id = find_team_by_code(request.db, code)\n if team_id is None:\n raise ApiError('unknown team code')\n user_id = request.context.user_id\n # Add the user to the team.\n join_team(request.db, user_id, team_id, now=datetime.utcnow())\n # Ensure the user gets team credentials.\n reset_user_principals(request)\n return {'success': True}", "async def on_guild_join(self, guild):\n l.info(f\"Joined {guild.name} with {guild.member_count} users!\")", "async def check_in_game(user_id, ctx): # this is meant for when it is accessed by commands outside of BlackJack.\n check = ex.first_result(await ex.conn.fetchrow(\"SELECT COUNT(*) From blackjack.games WHERE player1 = $1 OR player2 = $1\", user_id))\n if check:\n await ctx.send(f\"> **{ctx.author}, you are already in a pending/active game. Please type {await ex.get_server_prefix_by_context(ctx)}endgame.**\")\n return True", "async def on_member_join(self, member):\n verified = get(member.guild.roles, name='verified')\n verify_channel = get(member.guild.channels, name='verify')\n db_discord_user = PostgreSQL.get_discord_user(member.id)\n # Checks if the verified role exists, if it doesn't a DM is sent to the server owner to configure it\n if verified is None:\n await verify_channel.send(f'{member.guild.owner.mention} The verified role doesn\\'t exist in the server `{member.guild.name}`. Please type `!build` in one of the text channels in that server')\n return\n\n # Checks if the user exists in the database, if it doesn't a DM is sent to the user to tell them to get verified\n if db_discord_user is None:\n await verify_channel.send(f'{member.mention} You have not been verified yet. Please visit {WEBSITE} to get verified (VPN is required)')\n return\n \n db_openid_user = PostgreSQL.get_openid_user(db_discord_user[\"openidc_id\"])\n email = db_openid_user[\"username\"]\n await member.add_roles(verified, reason='Assigning user the verified role')\n\n if check_shelve_file(member.guild.id):\n await member.edit(nick=f'{member.name} [{email}]', reason=\"Changing users\\'s nickname\")", "def handle_enter_room_session(self, lobby_command, client_socket):\n words = lobby_command.split()\n sent_name = words[1]\n user = self.clients[client_socket]['data'].decode('utf-8')\n for room in self.rooms:\n if room.name == sent_name and user in room.room_attrbts['members']:\n room.room_attrbts['active'].add(user)\n msg = f'User {user} is a member of room {sent_name}. Entering user into active mode for this room. ACTIVE'\n print(msg)\n return\n msg = f'Room {sent_name} not found or user {user} is not yet a member. NONACTIVE'\n self.log_and_send(client_socket, msg)\n return", "def join(self, username=None, password=None):\n if username is not None:\n logging.debug(\"Ignored username parameter on join(), it is unsupported on this back-end.\")\n if password is None:\n password = \"\"\n room = str(self)\n\n self.connection.join(room, key=password)\n holder.bot.callback_room_joined(self)\n logging.info(\"Joined room {}\".format(room))", "async def on_guild_join(self, guild):\n\t\tglobal defaultLeaderboard\n\n\t\tif str(guild.id) not in self.leaderboards:\n\t\t\tself.leaderboards[str(guild.id)] = defaultLeaderboard\n\t\t\tawait self.update_state()", "def joined(self, channel):\n # Return user list to Server bot.\n self.get_nicklist()", "def on_join(self, event):\n self.pre_check(event)\n state = event.guild.get_member(event.author).get_voice_state()\n if not state:\n return api_loop(\n event.channel.send_message,\n \"You must be in a voice channel to use that command.\",\n )\n if event.guild.id not in self.guilds:\n try:\n client = state.channel.connect(mute=False)\n except VoiceException as e:\n return api_loop(\n event.channel.send_message,\n \"Failed to connect to voice: `{}`\".format(e),\n )\n else:\n self.guilds[event.guild.id] = psuedo_queue(\n self,\n player=Player(client),\n guild_id=event.guild.id,\n )\n return", "def joined(message):\n #room = session.get('room')\n room='abc'\n join_room(room)\n #emit('status', {'msg': session.get('name') + ' has entered the room.' + message['msg']}, room=room)\n emit('status', {'msg': 'Yao has entered the room.'}, room=room)\n #emit('status', {'msg': 'Yao has entered the room.'}, room='room1')", "async def signups_helper(self, ctx, game: str, minimum: int=2, maximum: int=50, rounds: int=1) -> bool:\n guild = ctx.guild.id #`guild` is actually the guild's id, but using guild to shorten the variable\n # Check if there is an existing game\n self._existing_game(ctx)\n\n # Creation of embed to start signups\n embed = discord.Embed(title=f\"Game of '{game.capitalize()}' by {ctx.author}\",\n description=f\"Sign up by reacting 🙋‍♂️ to this message!\\n{rounds} Rounds\\nMinimum Players: {minimum}\\nMaximum Players: {maximum}\",\n color=discord.Colour(random.randint(0, 16777215)))\n embed.add_field(name=\"Current Signups\", value='None', inline=True)\n embed.set_footer(text=f\"React ▶️ to close signups and start the game or react ⏹️ to cancel the game.\\nOnly the host or server moderators can start or cancel the game.\")\n self.games_info[guild][0] = await ctx.send(embed=embed)\n\n reactions = ('🙋‍♂️', '▶️', '⏹️')\n for emoji in reactions:\n await self.games_info[guild][0].add_reaction(emoji)\n self.games_info[guild][1] = True\n \n # Not sure if it is a bug, but somehow the bot when it reacts the stop button,\n # can stop the game. No idea how, but just to resolve it:\n await asyncio.sleep(1)\n\n # Wait for signal to start or cancel game\n def stop_signups_check(reaction, user:discord.Member):\n return (reaction.emoji in ['▶️', '⏹️']\n and reaction.message.id == self.games_info[guild][0].id\n and (user.id == ctx.author.id \n or ctx.channel.permissions_for(user).manage_guild))\n while True:\n signal, user = await self.bot.wait_for('reaction_add', check=stop_signups_check)\n if signal.emoji == '▶️':\n player_count = len(self.games_info[guild][2])\n # Check if number of players fits the requirement\n if player_count >= minimum and player_count <= maximum:\n self.games_info[guild][1] = False # Ensure that number of players don't change\n await ctx.send(f\"Request by {user}: Starting Game\")\n return True\n else:\n await ctx.send(f\"Recevied request to start game by {user}, but number of players does not meet requirement.\")\n elif signal.emoji == '⏹️':\n await ctx.send(f\"Game cancelled by {user}.\")\n self.games_info[guild] = gamesDict()\n return False\n else:\n raise Exception # Shouldn't happen by the nature of the above code", "async def addto(self, ctx, game, user):\n\n if add(game, user.id):\n await self.bot.say(\"{} was added to {}'s' library.\".format(game, user.nick))\n else:\n await self.bot.say(\"{} already has this game in their library.\".format(user.nick))", "def irc_JOIN(self, prefix, params):\n user = re.match(self.user_regex, prefix)\n channel = params[0]\n\n self.logger.debug(\n \"%s!%s@%s joined %s\" %\n (user.group(1), user.group(2), user.group(3), channel)\n )\n\n self.event_manager.fire(\"irc.join\", user, channel)", "def test_join_first_time(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n\n u2 = self.register_user(\"u2\", \"pass\")\n u2token = self.login(\"u2\", \"pass\")\n\n r1stats_ante = self._get_current_stats(\"room\", r1)\n assert r1stats_ante is not None\n\n self.helper.join(r1, u2, tok=u2token)\n\n r1stats_post = self._get_current_stats(\"room\", r1)\n assert r1stats_post is not None\n\n self.assertEqual(\n r1stats_post[\"current_state_events\"] - r1stats_ante[\"current_state_events\"],\n 1,\n )\n self.assertEqual(\n r1stats_post[\"joined_members\"] - r1stats_ante[\"joined_members\"], 1\n )", "def new(user, requested_players):\n main_grid = [[0]*9 for _ in range(9)]\n wallv_grid = [[0]*8 for _ in range(9)]\n wallh_grid = [[0]*9 for _ in range(8)]\n wallfills_grid = [[0]*8 for _ in range(8)]\n game_id = randint(0, 1000000)\n last_status = json.dumps({\"status\": \"Waiting for other players to join...\",\n \"waiting\": True})\n if requested_players == \"two\":\n# while TwoPlayerGame.objects.filter(game_id=game_id):\n# game_id = randint(0, 10000)\n user.two_player_game_id = game_id\n main_grid[0][4] = 1\n main_grid[8][4] = 2\n TwoPlayerGame.objects.create(\n game_id=game_id,\n player1=user,\n player1_walls=10,\n player2_walls=10,\n main_grid=json.dumps(main_grid),\n wallv_grid=json.dumps(wallv_grid),\n wallh_grid=json.dumps(wallh_grid),\n wallfills_grid=json.dumps(wallfills_grid),\n last_status=last_status,\n turn=user)\n if requested_players == \"four\":\n# while FourPlayerGame.objects.filter(game_id=game_id):\n# game_id = randint(0, 10000)\n user.four_player_game_id = game_id\n main_grid[0][4] = 1\n main_grid[8][4] = 2\n main_grid[4][0] = 3\n main_grid[4][8] = 4\n FourPlayerGame.objects.create(\n game_id=game_id,\n player1=user,\n player1_walls=5,\n player2_walls=5,\n player3_walls=5,\n player4_walls=5,\n main_grid=json.dumps(main_grid),\n wallv_grid=json.dumps(wallv_grid),\n wallh_grid=json.dumps(wallh_grid),\n wallfills_grid=json.dumps(wallfills_grid),\n last_status=last_status,\n turn=user)\n user.save()\n return", "def test_join_after_leave(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n\n u2 = self.register_user(\"u2\", \"pass\")\n u2token = self.login(\"u2\", \"pass\")\n\n self.helper.join(r1, u2, tok=u2token)\n self.helper.leave(r1, u2, tok=u2token)\n\n r1stats_ante = self._get_current_stats(\"room\", r1)\n assert r1stats_ante is not None\n\n self.helper.join(r1, u2, tok=u2token)\n\n r1stats_post = self._get_current_stats(\"room\", r1)\n assert r1stats_post is not None\n\n self.assertEqual(\n r1stats_post[\"current_state_events\"] - r1stats_ante[\"current_state_events\"],\n 0,\n )\n self.assertEqual(\n r1stats_post[\"joined_members\"] - r1stats_ante[\"joined_members\"], +1\n )\n self.assertEqual(\n r1stats_post[\"left_members\"] - r1stats_ante[\"left_members\"], -1\n )", "async def _join_money(ctx):\n await join_money(bot, ctx.message.author)" ]
[ "0.6783792", "0.67832756", "0.664182", "0.6565777", "0.653179", "0.6504052", "0.6492886", "0.6466361", "0.643423", "0.6424354", "0.63196415", "0.6313337", "0.63049585", "0.6297384", "0.6251035", "0.6194082", "0.6171204", "0.61612934", "0.61388975", "0.61386853", "0.6117116", "0.6084342", "0.5988996", "0.5933416", "0.59290606", "0.58987814", "0.5862914", "0.58532655", "0.5847674", "0.5833886", "0.5832843", "0.58227134", "0.5795606", "0.5793646", "0.5785229", "0.57765913", "0.5770591", "0.5768056", "0.5759081", "0.5745206", "0.5736092", "0.57285976", "0.57161236", "0.57100904", "0.5696705", "0.5696705", "0.5681795", "0.56787044", "0.56556773", "0.56450015", "0.5631137", "0.5629915", "0.56147647", "0.5607501", "0.55784154", "0.5561631", "0.5561063", "0.5548591", "0.55404687", "0.55393916", "0.55386144", "0.5534787", "0.55261123", "0.5515287", "0.550376", "0.5486832", "0.5479865", "0.5468182", "0.54632133", "0.54505885", "0.5446756", "0.54436225", "0.54414237", "0.542199", "0.54107344", "0.54029775", "0.54018784", "0.54006594", "0.5361197", "0.53298163", "0.53256017", "0.5306112", "0.5305458", "0.53024095", "0.52912086", "0.5277723", "0.52637124", "0.52542907", "0.52519035", "0.5225774", "0.52205896", "0.5206893", "0.5205398", "0.51699966", "0.51699257", "0.5165382", "0.5155026", "0.51531136", "0.51461583", "0.51321393" ]
0.6428327
9
Handles a start game request. Starts the game if the request was sent by the host, and there are at least two players.
def start_game(players_cursor, states_cursor, user, room_id): users_query = '''SELECT * FROM players_table WHERE room_id = ?;''' users = players_cursor.execute(users_query, (room_id,)).fetchall() if users[0][USERNAME] == user: # Insert a game state entry into the states_table deck = ",".join(cards) board = "" dealer = random.randint(0, len(users) - 1) action = (dealer + 3) % len(users) pot = 0 new_state = '''INSERT into states_table VALUES (?,?,?,?,?,?);''' states_cursor.execute(new_state, (deck, board, dealer, action, pot, room_id)) start_new_hand(players_cursor, states_cursor, dealer, user, room_id) else: raise ValueError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def game_start(self):\r\n\t\tself._comm_server.broadcast_message(\"game-start\")\r\n\t\tself._is_game_started = True\r\n\t\tself._handlers[\"game-start\"].invoke()\r\n\t\t_logger.info(\"Game is started.\")", "def start_game(self, **kwargs):\n\n success, info = self.gms.start_game(\n player=kwargs.get('player', 'x'),\n first_turn=raw_input('Would you like to go first? y/n\\n') == 'y'\n )\n if success:\n if info['status_code'] == core_constants.GAME_STATUS_HUMAN_MOVE_REQUIRED:\n print(self.gms.game.get_board_state_pretty())\n self.play_human_move()\n else:\n print(info['messages'][0])", "def bcp_game_start(self, **kargs):\n self.bcp_player_add(number=1)\n self.bcp_player_turn_start(player=1)\n self.events.post('game_started', **kargs)", "def startGame(self):\n\n\t\tfor name in self.players.keys():\n\t\t\tself.startPlayerGame((name, 0))\n\t\tself.setupGuiSignals()", "def start_game(self):\n self.code = code.get_random_num()\n self.Player1 = self.get_player(1)\n self.Player2 = self.get_player(2)\n attempt = self.Player1.make_guess()\n guess.guess_lists(attempt, self.code)\n right_answer_list = guess.return_answer()\n num_guessed_list = guess.return_player_guess()\n check.check(num_guessed_list, right_answer_list)\n attempt = self.Player2.make_guess()\n guess.guess_lists(attempt, self.code)\n right_answer_list = guess.return_answer()\n num_guessed_list = guess.return_player_guess()\n output = check.check(num_guessed_list, right_answer_list)\n play = end_game.end_game(output)\n if play == True:\n self.keep_playing()", "def start_game(self):\n\n\t\tpass", "def start_game(self):\n return self.do_actions('before_game')", "def start(self):\n self.save_checkpoint(\"setup\")\n\n logging.info(\"Starting game...\")\n body = render_message(\n \"welcome.html\",\n game_name=self.name,\n night_end=self.night_end.strftime(\"%I:%M %p\"),\n day_end=self.day_end.strftime(\"%I:%M %p\"),\n players=self.game.players,\n )\n self.send_message(mafia.events.PUBLIC, \"%s: Start\" % self.name, body)\n self.game.begin()\n self.started = True\n\n self.save_checkpoint(\"start\")", "def do_start_new_game(request_json, this_player_user_id):\n game = Game(this_player_user_id)\n logger.debug(f\"do_start_new_game with {request_json}\")\n parsed_values, message = game.parse_requested_config(request_json)\n if parsed_values:\n # initalise a session\n logger.debug(f\"do_start_new_game parsed values, creating session to commit save.\")\n c = common_db.Common_DB()\n this_session = c.common_Sessionmaker()\n result, message = game.save(this_session)\n if not result:\n logger.error(\"do_start_new_game save failed, rolling back\")\n this_session.rollback()\n response = {\"startnewgame\": False,\n \"message\": message}\n else:\n logger.info(\"do_start_new_game save ok, committing\")\n this_session.commit()\n msg = quote_plus(f\"Game created successfully with ID {game.state.game_id}\"\n \". Now let's wait for some other players to join.\")\n response = {\"startnewgame\": True,\n \"new_game_id\": game.state.game_id,\n \"message\": message,\n \"redirect_querystring\": f\"?msg={msg}\"}\n this_session.close()\n logger.debug(\"do_start_new_game completed successfully - returning: %s\",\n jsonpickle.encode(response, unpicklable=False))\n return response, game\n else:\n logger.error(\"do_start_new_game unable to parse values: %s\", message)\n response = {\"startnewgame\": False,\n \"message\": message}\n return response, None", "def admin_start_game(game_id, player_id):\n game_data = load_state(game_id)\n if not game_data:\n return False\n players = game_data.get('players')\n if player_id not in [p['id'] for p in players]:\n return False\n player = [p for p in players if p['id'] == player_id][0]\n if game_data['active']:\n return False\n if game_data['ended_at']:\n return False\n if len(game_data['players']) < game_data['min_players']:\n return False\n if not player['admin']:\n return False\n start_game(game_data)\n msg = make_info_message('The game has started')\n alt_msg = make_info_message('{} started the game'.format(player['name']))\n flash_player(game_data, player, msg, alt_msg)\n msg = make_info_message(\n 'The first player to reach {} points wins!'.format(POINTS_TO_WIN))\n flash_broadcast(game_data, msg)\n save_state(game_data)\n return True", "def requestToStartGame(self):\n obj = {\"worldName\": self.worldInfo.worldName}\n main.msgQ.addToPendingObj(Constants.WORLD_TYPE, 1)\n main.msgQ.addToPendingObj(Constants.WORLD_NAME, self.worldInfo.worldName)\n main.cManager.sendRequest(Constants.CMSG_JOIN_PVP_WORLD, obj)", "def start(self):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # open socket, AF_INET=get IPv4 address, SOCK_STREAM=protocol TCP\n s.bind((HOST, PORT)) # bind socket to the address (ip,port)\n print(\"[STARTING] server is starting...\")\n s.listen() # start listening to clients\n print(f\"[LISTENING] Server is listening on {HOST, PORT}\")\n while True:\n cl_socket, addr = s.accept() # accept client\n # if there are less than 3 active players - accept, else - continue (wait for next request)\n if threading.activeCount() == MAX_GAMES_LIVE + 1: # this thread + max amount of games\n # 3 active players, request denied\n cl_socket.send('[1]Game manager is full, please try again later'.encode())\n cl_socket.close()\n print(f\"[CONNECTION DENIED] {addr}\")\n else: # make new thread and start the game\n cl_socket.send(\"[0]OK let's play\".encode()) # verification msg for client\n print(f\"[NEW CONNECTION] {addr} connected.\")\n thread = threading.Thread(target=self.new_client, args=(cl_socket, addr)) # open thread with 'new_client' function\n thread.start()\n print(f\"[ACTIVE CONNECTIONS] {threading.activeCount() - 1}\")", "def handle_game_start_event(methods=[\"GET\", \"POST\"]):\n if not game.round_already_started:\n game.round_already_started = True\n print(\"start the round\")\n game.start_round()\n for player in game.ordered_players:\n hand_dict = {\n \"trump\": [game.trump_value, game.trump]\n }\n for i, card in enumerate(player.hand):\n hand_dict[i]=[card.value, card.suit]\n socketio.emit(\"deal hand\", hand_dict, room=player.sid)", "def startGame():\n session[\"game\"] = clueLogic.game()\n return render_template(\n \"StartGame.html\",\n suspects=clueLogic.cards[\"suspects\"],\n weapons=clueLogic.cards[\"weapons\"],\n rooms=clueLogic.cards[\"rooms\"]\n )", "def start_game(self) -> None:\n if self.started and not self.finished:\n self.finish_game()\n \n self.started = True\n self.finished = False\n\n self.game_count += 1\n self.games_list[self.game_index] = {\n \"total_kills\": 0,\n \"players\": [],\n \"kills\": {}\n }\n\n return", "def maybe_start(self):\r\n\t\tif not [p for p in self.players if not p.ready]\\\r\n\t\t and len(self.players) == self.max_players \\\r\n\t\t and not self.started:\r\n\t\t\tself.start()", "def start_game(request, game_id):\n try: \n game = Game.objects.get(pk=game_id)\n except Game.DoesNotExist:\n return HttpResponse(\"Game can't be found\")\n if SMSing:\n game.is_closed = True\n game.save()\n logger.debug(\"Going to shuffle for game %s\" % game)\n shuffle_for_game(game)\n logger.debug(\"Done shuffling for game %s. Going to send welcome SMSes.\" % game)\n if SMSing:\n send_game_welcome(game)\n activePlayers = game.active_count()\n logger.debug(\"Sent to %i players for game %s.\" % ( activePlayers, game))\n return HttpResponseRedirect('/m/status/%s' % game.id)", "def start_game(self) -> None:\n self.init_game()\n self.play()", "def start_game_check(self):\n if len(self.pending_players) > 0:\n return False\n else:\n return True", "async def start(self):\n await self.on_start()\n valid = await self.get_participants()\n\n if valid:\n await asyncio.sleep(1)\n await self.prepare()\n await self.game()\n\n del started[started.index(self.channel.id)]", "def start_game(self, first_player: players.Player, second_player: players.Player) -> int:\n from itertools import cycle\n self.game_running = True\n players_queue = cycle([first_player, second_player])\n status_after_play = 0\n\n # Player moves one by one\n for player in players_queue:\n if not self.game_running:\n break\n\n status_after_play = self.play(player)\n\n if status_after_play is not None:\n self.game_running = False\n\n self.switch_player()\n\n return status_after_play", "def gameStarted(self):\n\t\tself.server.gamesToStart -= 1\n\t\tif self.server.gamesToStart == 0:\n\t\t\tself.server.allGamesStarted.emit()", "async def start_game(self, game_id):\n game = await self.get_game(game_id)\n player1 = game[1]\n player2 = game[2]\n await self.add_player_status(player1)\n await self.add_player_status(player2)\n # Add Two Cards to both players [ Not in a loop because the messages should be in order on discord ]\n await self.add_card(player1)\n await self.add_card(player1)\n await self.add_card(player2)\n await self.add_card(player2)", "def on_launch(event_request, session):\n print(\"=====on_launch requestId: \" + event_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n return play_new_game(False)", "def start(self):\n running = True\n while running:\n k=self.Game.playgame()\n if k=='Exit':\n running = False\n continue\n elif k=='resume':\n continue\n elif k=='GameOver':\n o=self.gameover()\n if o=='newgame':\n self.Game=Game(self.Display)\n else:\n running = False\n while k=='Won':\n o=self.won()\n if o=='newgame':\n self.Game=Game(self.Display)\n break\n elif o==\"Exit\":\n output = self.Game.popup()\n if output == 'resume':\n self.Game.GameBoard.display()\n continue\n else:\n running = True\n break", "def start_of_game(self):\n pass", "def start_game(self):\n while self.can_deal:\n self.take_turn()", "def start(self):\n\n # Call the protected _turn method to start the game\n self._turn()", "def do_start_hosted(self):\n\t\tself.game_name = self.e_gamename.text\n\t\tself.num_players = (1, int(self.e_players.text))\n\t\tself.boardsize = (int(self.e_boardw.text), int(self.e_boardh.text))\n\n\t\td = {\"state\": be.S_GAME,\n\t\t\t\t\"hosting\": True,\n\t\t\t\t\"uuid\": None,\n\t\t\t\t\"name\": self.game_name,\n\t\t\t\t\"nickname\": self.nickname,\n\t\t\t\t\"num_players\": self.num_players,\n\t\t\t\t\"boardsize\": self.boardsize}\n\t\tevent = pygame.event.Event(be.E_STATE, d)\n\t\tpygame.event.post(event)\n\n\t\tself.hide_all()\n\t\tself.renderer.color = (0, 0, 0, 0)", "def receive_game_start_message(self, game_info: Dict[str, Union[int, Dict, List]]) -> None:\n self.__stack_size = game_info['rule']['initial_stack']\n self.__game_finished = False", "def requestReady(self):\n if self.team[self.team_num][self.map_pos].avatarLabel['text'] == \"\":\n return;\n \n if self.isHost:\n obj = {\"worldName\":self.worldInfo.worldName}\n main.cManager.sendRequest(Constants.CMSG_START_TO_READY_GAME, obj)\n \n else:\n obj ={\"worldName\": self.worldInfo.worldName}\n main.cManager.sendRequest(Constants.CMSG_READY, obj)\n self.isReady = 1", "def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()", "def start_multiplayer(self, host: bool):\n if host:\n server_game = tetris_game.TetrisGame(500 + 200, 1000, \"multiplayer\", 75)\n server = TetrisServer(server_game)\n\n server.run()\n else:\n client_game = tetris_game.TetrisGame(500 + 200, 1000, \"multiplayer\", 75)\n client = TetrisClient(client_game)\n client.run()", "def start_game(self):\n\n player1 = self.lb1.get(ACTIVE)\n player2 = self.lb2.get(ACTIVE)\n\n self.master.destroy()\n self.master = None\n\n win = 0\n draw = 0\n loss = 0\n\n for i in range (1):\n\n print(\"Game \"+str(i+1) + \"\\n\")\n\n game = Game.Game(player1, player2, self.ui_draw.get())\n\n status = game.play()\n\n if status == Status.white_win:\n win += 1\n elif status == Status.black_win:\n loss += 1\n else:\n draw += 1\n\n print(\"White wins: \" + str(win) + \"\\nDraws: \" + str(draw) + \"\\nBlack Wins: \"\n + str(loss))", "def startGame(self, window, ids):\n self.application = Application(master = window, bot = self)\n self.gameThread = self.createGroup(\"Welcome to poker.\", ids)\n\n for i in range(self.application.table.allPlayers):\n self.application.table.allPlayers[i].onlineId = ids[i]\n\n window.mainloop()\n \n self.application.proceed()", "def at_start(self):\n if not self.db.started:\n self.player.start()\n self.db.started = True", "def is_player_game_started(self, player_id):\n return self.starter_map[player_id] < self.PLAYER_NOT_STARTED", "def tellIfStarted(self):\n if self.game_number == 1:\n self.welcome()\n else:\n self.tellGameNumber()", "def startGame():\n #roundnumber\n eel.updateRoundNumber()\n # start page\n eel.updateStartPage([startPage.getTitle(), startPage.getUrl()])\n eel.updateStartPageDescription(startPage.getFirstSentence())\n # goal page\n eel.updateGoalPage([goalPage.getTitle(), goalPage.getUrl()])\n eel.updateGoalPageDescription(goalPage.getFirstSentence())\n # ui updates\n eel.updateCurrentPage(\n [wikiPageStackTrace[-1].getTitle(), wikiPageStackTrace[-1].getUrl()])\n eel.updateCurrentPageDescription(wikiPageStackTrace[-1].getFirstSentence())\n eel.printInPageList(wikiPageStackTrace[-1].getOnlyLinksListJS())\n # loader\n time.sleep(0.5)\n eel.hideLoader()", "def start(self):\n self.player = Player()\n self.dealer = Dealer()\n self.pot = 0\n self.side_bet = 0\n start_game()", "def start_play(self, player1, player2, start_player=0, is_shown=1):\n if start_player not in (0, 1):\n raise Exception('start_player should be either 0 (player1 first) '\n 'or 1 (player2 first)')\n self.board.init_board(start_player)\n p1, p2 = self.board.players\n player1.set_player_ind(p1)\n player2.set_player_ind(p2)\n players = {p1: player1, p2: player2}\n if is_shown:\n self.graphic(self.board, player1.player, player2.player)\n while True:\n move = -1\n current_player = self.board.current_player\n player_in_turn = players[current_player]\n if len(self.board.availables) != 0:\n move = player_in_turn.get_action(self.board)\n self.board.do_move(move)\n if is_shown:\n self.graphic(self.board, player1.player, player2.player)\n end, winner = self.board.game_end()\n if end:\n if is_shown:\n if winner != -1:\n print(\"Game end. Winner is\", players[winner])\n else:\n print(\"Game end. Tie\")\n return winner", "def prepare_start_game(self, game, selected_decks, users, **kwargs):\n\n def _cb_event_animations(event):\n run_event_animations(self, event)\n\n def _cb_trigger_animations(trigger, current_event):\n run_trigger_animations(self, trigger, current_event)\n\n if C.UI.Cocos.RunAnimations:\n game.add_callback(_cb_event_animations, when='event')\n game.add_callback(_cb_trigger_animations, when='trigger')\n\n game.add_callback(self._update_content, when='resolve')\n game.add_callback(self._log_update_time, when='resolve')\n game.add_callback(self._game_end_dialog, when='game_end')\n game.start_game(selected_decks, mode='standard',\n class_hero_maps=[user.class_hero_map for user in users])\n self.users[0], self.users[1] = users[0], users[1]\n self.main_player_id = kwargs.pop('main_player_id', None)\n self.where_come_from = kwargs.pop('where_come_from', None)", "def start(self, player: Player) -> Game:\n\n board_payload = dict(rows=self.rows, cols=self.cols)\n initial_slots = self._get_initial_slots(**board_payload)\n board_db = self.repo.boards.add(\n {**board_payload, \"slots\": initial_slots, \"mines\": 0}\n )\n board = Board.from_orm(board_db)\n\n board.set_mines(mines=self.mines)\n board_db = self.repo.boards.update(board_db, board)\n\n game_payload = dict(\n player_id=player.id,\n board_id=board.id,\n status=GameStatusEnum.ongoing,\n start_time=datetime.utcnow(),\n )\n game_db = self.repo.games.add(game_payload)\n game = Game.from_orm(game_db)\n return game", "async def on_start(self):\n m = \"**{}** has started a game of {}! To participate, say `I`! **{} players needed.**\".format(\n self.message.author.display_name, self.name, self.num)\n await client.say(self.message, m)", "def test_start(self):\n magic_hat = Game()\n result = Game.start(magic_hat)\n self.assertEqual(result, game.STATUS_PLAYING)", "def request_pygame(self):\n\n if pygame and not self.pygame_requested:\n self.events.add_handler('init_phase_3', self._pygame_init)\n self.pygame_requested = True\n return True\n\n else:\n return False", "def start_game(self):\n env = os.environ.copy()\n hook_path = os.path.join('hook', 'libhook.so')\n game_path = os.path.join(env.get('HOME'), '.local', 'share', 'Steam',\n 'steamapps', 'common', 'Super Hexagon',\n 'SuperHexagon')\n\n env['LD_PRELOAD'] = os.path.abspath(hook_path)\n args = [\"bash\", game_path]\n\n self.controller.handle_keys([])\n\n self.frame_counter = 0\n self.dead_until = None\n\n self.game_process = subprocess.Popen(\n args,\n env=env,\n # stdout=subprocess.DEVNULL,\n )", "async def on_world_start(self, data, connection):\n player = self.plugins['player_manager'].get_player_by_name(\n connection.player.name)\n if hasattr(player, 'seen_before'):\n return True\n else:\n self.background(self._new_player_greeter(connection))\n self.background(self._new_player_gifter(connection))\n player.seen_before = True\n return True", "def post(self, request, **kwargs):\n room = kwargs.get(\"room\")\n player = get_player_from_request(request)\n if player is None or player.room != room:\n return JsonResponse({\"error\": True, \"errormsg\": \"Not authorized\"})\n\n game = request.POST.get(\"game\", None)\n try:\n room.start_game(game)\n return JsonResponse({\"error\": False})\n except RoomStateException:\n return JsonResponse({\"error\": True, \"errormsg\": \"Room state error, please refresh this page.\"})\n except InvalidAmountOfPlayersException as e:\n return JsonResponse({\"error\": True, \"errormsg\": str(e)})\n except ValueError:\n return JsonResponse({\"error\": True, \"errormsg\": \"Unknown error occurred\"})", "def start(self, timed):\n\n if timed:\n self._game = TimedGame(self._players)\n else:\n self._game = Game(self._players)\n \n self._game.start()", "def handle_start_bidding_event(methods=[\"GET\", \"POST\"]):\n if game.active_player_index == len(game.ordered_players):\n game.active_player_index = 0\n active_player = game.ordered_players[game.active_player_index]\n game.state = \"playing\"\n socketio.emit(\"your turn\", room=active_player.sid)\n else:\n active_player = game.ordered_players[game.active_player_index]\n if not active_player.bid_active:\n socketio.emit(\"make bid field\", room=active_player.sid)\n active_player.bid_active = True", "def startGame():\n\n\tprint(\"\\nOK! Let's play!\")\n\tprint(\"--------------------------------------------------------------------------------------\")\n\tprint(\"Note:\")\n\tprint(\"\\tNow you must be kept in your mind a random integer from specific range and I must be guessing that number!\")\n\tprint(\"\\tIf you answer honestly all of my questions I certainly will guess that number!\")\n\tprint(\"--------------------------------------------------------------------------------------\\n\")\n\tgameLogic()", "def _prepare_start(self, client, player_id, game_info, game_data, realtime: bool = False):\n self._client: Client = client\n self.player_id: int = player_id\n self._game_info: GameInfo = game_info\n self._game_data: GameData = game_data\n self.realtime: bool = realtime", "def start():\n commands = {\"new tournament\": Controller.new_tournament,\n \"new round\": Controller.new_round,\n \"new player\": Controller.new_player,\n\n \"set round\": Controller.set_round,\n \"set player\": Controller.set_player,\n\n \"get players -all -alpha\": Controller.get_all_players_alpha,\n \"get players -all -rank\": Controller.get_all_players_rank,\n \"get players -alpha\": Controller.get_players_alpha,\n \"get players -rank\": Controller.get_players_rank,\n\n \"get tournament -all\": Controller.get_all_tournaments,\n \"get tournament\": Controller.get_tournament,\n\n \"get round -all\": Controller.get_all_rounds,\n \"get round\": Controller.get_round,\n\n \"get match -all\": Controller.get_all_matches,\n \"get match\": Controller.get_match,\n\n \"load\": Controller.load,\n\n \"exit\": Controller.close_app\n }\n\n # At the beginning of the program, load all data from a data_base.\n Controller.load()\n print(\"Need help? Type 'commands' to see all commands and there purposes.\")\n\n while True:\n instruction = str(input(\"ChessManager >>> \"))\n try:\n commands[instruction]()\n except KeyError:\n print(\"Wrong Command.\")", "def __handle_gamestart(self, peerconn,data,peername):\n #-------------------------------------------------------------------------- \n if data in self.rejoin_thread_dict:\n temp_list=self.rejoin_thread_dict.pop(data)\n game_id=temp_list[1]\n player_number=temp_list[2]\n self.game_dict[int(game_id)].append(data)\n peerconn.send_data(DETAILS,'%d %s %d %d' % (int(game_id),self.gameid_map_dict[int(game_id)],len(self.game_dict[int(game_id)])-1,int(player_number)))\n for peer_list in self.game_dict[int(game_id)]:\n if peer_list!=data:\n peerconn.send_data(PLAYER_LIST,'%s %s %d' % (peer_list,peer_list.split(\":\")[0],int(peer_list.split(\":\")[1])))\n else:\n # This condition is hit when nodes require initial set up details\n print \"in else of game start\" , data\n if not \"STARTED\" in data:\n self.game_dict_lock.acquire()\n #Check if there is already a game with lesser than 4 users. If so add the user to it. If not create new game\n if(self.game_id in self.game_dict):\n player_number = len(self.game_dict[self.game_id])+1\n if(len(self.game_dict[self.game_id])<=MAX_PLAYER_NUMBER-1):\n peerconn.send_data(DETAILS,'%d %s %d %d' % (self.game_id,self.gameid_map_dict[self.game_id],len(self.game_dict[self.game_id]),player_number))\n for peer_list in self.game_dict[self.game_id]:\n peerconn.send_data(PLAYER_LIST,'%s %s %d' % (peer_list,peer_list.split(\":\")[0],int(peer_list.split(\":\")[1])))\n self.game_dict[self.game_id].append(data)\n if(len(self.game_dict[self.game_id])==MAX_PLAYER_NUMBER):\n self.game_id=self.game_id+1\n print \"Game dictionary is :\"\n print self.game_dict[self.game_id]\n \n #create new game for the given game-id and add user to it\n else:\n map_id=random.randint(1, 4)\n print self.available_maps_dict[map_id] \n self.game_dict[self.game_id]=[]\n player_number = len(self.game_dict[self.game_id])+1\n peerconn.send_data(DETAILS,'%d %s %d %d' % (self.game_id,self.available_maps_dict[map_id],len(self.game_dict[self.game_id]), player_number))\n self.game_dict[self.game_id].append(data)\n self.gameid_map_dict[self.game_id]=self.available_maps_dict[map_id]\n print \"Game dictionary is :\"\n print self.game_dict[self.game_id]\n self.game_dict_lock.release()\n #this condition is hit when a game is started with < 4 players\n else:\n message,game_id = data.split(\" \")\n if int(game_id)==int(self.game_id) and len(self.game_dict[self.game_id])!=MAX_PLAYER_NUMBER:\n self.game_id=self.game_id+1\n print \"GAME ID\" , self.game_id\n peerconn.send_data(REPLY,'OK')", "def start(self):\n for game_object in self.game_objects:\n game_object.start()\n # end for\n self.time = time.time()\n self.paused = False\n self.running = True\n print 'GAME STARTED'", "def start(update, context):\n # Get user that sent /start and log his name\n global USERS\n user = update.message.from_user\n logger.info(\"User %s started the conversation.\", user.first_name)\n PLAYER_TEMPLATE = {\n \"round\": 1,\n \"username\": user.username,\n \"player_cards\": [],\n \"player_total\": 0,\n \"dealer_card\": \"\",\n \"strategy\": \"\"\n }\n USERS[user.username] = PLAYER_TEMPLATE\n CURRENT_CONTEXT = USERS[user.username]\n\n # New Round message\n message = f'New Round! Round: {CURRENT_CONTEXT[\"round\"]} ({CURRENT_CONTEXT[\"username\"]}) \\nDealers Card: {CURRENT_CONTEXT[\"dealer_card\"]} \\nYour Cards: {CURRENT_CONTEXT[\"player_cards\"]} \\nYour total: {CURRENT_CONTEXT[\"player_total\"]} \\n\\nReady? '\n\n # Send message with text and appended InlineKeyboard\n update.message.reply_text(\n message,\n reply_markup=InlineKeyboardMarkup([[inline('New Round')]])\n )\n\n # Tell ConversationHandler that we're in state `DEALER` now\n return PLAYER_CARD_ONE", "def handle_start(self, msg):\n\n chat_id = msg[\"chat\"][\"id\"]\n\n # sending start message\n message_text = \"Hey there! I am PolyglotBot. Do you want to play a game? You need to guess language of audio I send to you. Let's start.\"\n self.bot.sendMessage(chat_id, message_text)\n\n # creating new user\n username = msg[\"chat\"][\"username\"]\n self.user = User(username)\n\n # sending track to guess\n self.send_track(chat_id)", "def start_new_game(self):\r\n\r\n self.initialize_game_params()\r\n self.timer = Timer(self.screen)\r\n self.mine_counter = MineCounter(self.num_of_mines, self.screen)\r\n self.reset_button = ResetButton(self.screen)\r\n self.high_score = HighScore(self.rows, self.cols, self.num_of_mines, self.screen)\r\n self.board = Board(self.rows, self.cols, self.num_of_mines, self.screen)\r\n self.play_game()", "def start_game(self):\n controller = self.controller\n controller.on_init()\n\n self.game_running = True\n\n while self.game_running and controller.is_character_alive():\n controller.keyboard_game_control(self)\n\n self.game_running = False\n\n self.end_game()", "def do_start_joined(self):\n\t\td = {\"state\": be.S_GAME,\n\t\t\t\t\"hosting\": False,\n\t\t\t\t\"uuid\": None,\n\t\t\t\t\"name\": self.game_name,\n\t\t\t\t\"nickname\": self.nickname,\n\t\t\t\t\"num_players\": self.num_players,\n\t\t\t\t\"boardsize\": self.boardsize}\n\t\tevent = pygame.event.Event(be.E_STATE, d)\n\t\tpygame.event.post(event)\n\n\t\tself.hide_all()\n\t\tself.renderer.color = (0, 0, 0, 0)", "def start_game(self):\n self._add_mines()", "def start_game(self):\n print(\"hi there, game started!\")\n self.draw()", "def start_game(self):\n player_count = 0\n confirmed_count = 0\n while confirmed_count < 15:\n new = self._next_frame()\n if new:\n percents = self._find_percents()\n if percents:\n self.logger.debug(\"%d players found, confirming %d/15\", len(percents), confirmed_count)\n if player_count == len(percents):\n confirmed_count += 1\n else:\n confirmed_count = 0\n player_count = len(percents)\n self.games.append(Game([Player(self.frame, point) for point in percents]))\n self.game_in_progress = True\n self.logger.info(\"Starting game with %d players\", player_count)", "def start_new_game(player1, player2):\n return {\n 'player1': \"X\",\n 'player2': \"O\",\n 'board': [\n [\"-\", \"-\", \"-\"],\n [\"-\", \"-\", \"-\"],\n [\"-\", \"-\", \"-\"],\n ],\n 'next_turn': \"X\",\n 'winner': None\n }", "def start_game(self):\n p1_move = True\n is_all_moves_over = False\n while not is_all_moves_over:\n\n while p1_move and not is_all_moves_over:\n p1 = int(input(\"Player 1 pos:\"))\n is_all_moves_over, p1_move = self.play('p1', p1, p1_move)\n\n while not p1_move and not is_all_moves_over:\n p2 = int(input(\"Player 2 pos:\"))\n is_all_moves_over, p1_move = self.play('p2', p2, p1_move)\n\n print(\"Game Ended in Draw\")", "def start(self):\n\n p = Parser()\n if self.event_status < 1:\n print(\"\\n\" * 100)\n self.game_intro()\n print(\"\\n\" * 100)\n\n playing = True\n while playing:\n self.check_upgrades()\n self.check_energy()\n self.check_event_status()\n cur_location = self.player.get_location()\n cur_location.print_description(self.event_status)\n cur_location.print_details(self.event_status)\n print_player_info(self.player)\n cur_location.set_visited(True)\n\n player_command = get_command()\n cmd_action, cmd_exit, cmd_direction, cmd_item, cmd_character = Parser.action_requested(player_command)\n\n print(\"\\n\" * 100)\n if cmd_action == GO:\n self.player.go_exit(self.event_status, direction=cmd_direction, exit_name=cmd_exit)\n\n elif cmd_action == TAKE:\n if cmd_item is None:\n print(\"You can't take that.\")\n else:\n self.player.take(cmd_item)\n\n elif cmd_action == DROP:\n if cmd_item is None:\n print(\"You can't drop that.\")\n else:\n self.player.drop(cmd_item)\n\n elif cmd_action == TALK:\n if cmd_character is None:\n print(\"You can't do talk to that.\")\n else:\n self.player.talk(cmd_character, self.event_status)\n\n elif cmd_action == LOOK:\n self.player.look(self.event_status)\n\n elif cmd_action == SAVEGAME:\n tmp_save_dir = input(\"Enter the save name\\n> \")\n if tmp_save_dir:\n save_dir = tmp_save_dir\n else:\n save_dir = None\n self.save(save_dir)\n\n elif cmd_action == QUIT:\n print(\"Exiting the game...\")\n return\n\n elif cmd_action == LOOK_AT:\n if cmd_item is None:\n print(\"You can't look at that.\")\n else:\n self.player.look_at(cmd_item)\n\n elif cmd_action == LISTEN:\n self.player.listen()\n\n elif cmd_action == PULL:\n if cmd_item is None:\n print(\"You can't pull that.\")\n else:\n self.pull(cmd_item)\n\n elif cmd_action == PUSH:\n if cmd_item is None:\n print(\"You can't push that.\")\n else:\n self.push(cmd_item)\n\n elif cmd_action == CHARGE:\n self.player.charge()\n\n elif cmd_action == USE:\n if cmd_item is None:\n print(\"You can't use that.\")\n else:\n self.use(cmd_item)\n\n elif cmd_action == WAIT:\n sleep_rate = 0.2\n print(\"You wait for a few moments...\")\n time.sleep(2)\n duration = time.time() + 5\n while time.time() < duration:\n print(\"\\n\" * 100)\n print(\"*\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"**\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"***\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"****\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"*****\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"****\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"***\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"**\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"*\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"Nothing happened...\")\n time.sleep(2)\n print(\"\\n\" * 100)\n\n elif cmd_action == HELP:\n self.help()\n # wait for user to finish reading\n input(\"Press 'enter' to continue.\")\n\n elif cmd_action == INVENTORY:\n self.player.print_inventory()\n\n elif cmd_action == LOADGAME:\n saved_games_dir = os.path.join(os.getcwd(), \"saved_games\")\n\n # Print Available Saved Games\n print(\"Enter the number of the game you want to load.\")\n saved_games = [game for game in os.listdir(saved_games_dir)]\n for index, sg in enumerate(saved_games):\n print(\"{0}. {1}\".format(index + 1, sg))\n\n # TODO error checking on user input\n user_game_selection = input(\">\")\n user_game = saved_games[int(user_game_selection) - 1]\n print(\"Loading game: {0}\".format(user_game))\n print(\"\\n\" * 100)\n self.load_game(os.path.join(saved_games_dir, user_game))\n else:\n print(\"Huh? That doesn't make any sense.\")", "def start_gameloop(self):\n print(\"Game Loop starting...\")\n while True:\n current_turn = self.who_goes_first()\n print('The ' + current_turn + ' will go first.')\n while self.is_active:\n if current_turn == \"player\":\n self.board.draw()\n move = get_player_move(\n self.board.positions, self.board.is_position_availible)\n self.board.make_move(move, self.player_letter)\n current_turn = \"computer\"\n else:\n move = self.npc.get_move(self.board)\n self.board.make_move(move, self.npc.letter)\n current_turn = \"player\"\n if self.board.is_winner(self.player_letter):\n self.board.draw()\n print(\"You won!\")\n self.is_active = False\n if self.board.is_winner(self.npc.letter):\n self.board.draw()\n print(\"You lost!\")\n self.is_active = False\n if self.board.is_board_full():\n self.board.draw()\n print(\"Tie\")\n self.is_active = False\n if request_play_again() is False:\n break\n self.is_active = True\n self.board = Board(request_board_size())", "def start_game(self):\n self.board = Board(num_tableaus=self.tableau_qty, num_decks=self.decks, deal_3=self.deal_3)\n self.board.init_move_dict()\n self.board.deal(self.deck)\n\n if self.api_use:\n self.init_game_api()\n elif self.commandline:\n self.init_cl_game()\n else:\n self.init_pygame()", "def join_game(self, request):\n player = Player.query(Player.name == request.player_name).get()\n print player\n if not player:\n raise endpoints.NotFoundException(\n 'A Player with that name does not exist!, '\n 'we need a second player in order to join the game')\n try:\n game = gameutils.get_by_urlsafe(request.urlsafe_key, Game)\n game.player2 = player.key\n game.put()\n except ValueError:\n raise endpoints.BadRequestException('please verify the information '\n 'of the second player')\n\n # Use a task queue to update the average attempts remaining.\n # This operation is not needed to complete the creation of a new game\n # so it is performed out of sequence.\n\n return game.to_form('Second Player Joined the Game, we are ready to start the game!', player.name)", "def bcp_player_turn_start(self, player, **kwargs):\n\n if ((self.player and self.player.number != player) or\n not self.player):\n\n self.player = self.player_list[int(player)-1]", "def start_game(self):\n self._puzzle.get_puzzle()\n self._do_outputs()\n\n while self._keep_playing:\n print(\"\")\n print(\"+-----+-----+-----\")\n print(\"\")\n self._get_inputs()\n self._do_updates()\n self._do_outputs()\n print(\"+-----+-----+-----\")", "def basic_begin_game(game_context) :\n game_context.world.set_game_defined()\n execute_context(game_context)", "async def start_game_loop(self, frame: tk.Frame) -> None:\r\n try:\r\n results = await self.recv_doc_manager()\r\n\r\n if results is None: # Nothing was sent back, something broke on the server (disconnected)\r\n self.is_waiting = False\r\n return \"Error: no connection to the socket\"\r\n\r\n while results[\"action\"] == \"[GAME - TURN]\":\r\n self.game_data = results[\"data\"]\r\n frame.render()\r\n results = await self.recv_doc_manager()\r\n if results is None:\r\n self.is_in_game = False\r\n return \"Error: no connection to the socket\"\r\n\r\n if results[\"action\"] == \"[GAME - END]\":\r\n self.game_data = results[\"data\"]\r\n self.user_data = self.game_data[\"updated_user_data\"]\r\n # Game ended\r\n frame.MSG.set(\"Player \" + str(self.game_data[\"winner\"]) + \" has won!\")\r\n frame.render()\r\n frame.msg_label.grid()\r\n frame.end_game_btn.grid()\r\n self.is_in_game = False\r\n return\r\n\r\n except:\r\n self.is_in_game = False\r\n self.game_data = {}\r\n raise", "def run_game(self) -> None:\n decision = 0\n if self._initial:\n self._initial = False\n while decision != 1:\n try:\n display_no_combat_init(self.hero)\n decision = get_user_input([1, 2, -1])\n if decision == -1:\n self._quit()\n elif decision == 2:\n self._show_bag()\n else:\n break\n except KeyboardInterrupt:\n print(\"[!] If you want to quit, use the provided user interface\")\n\n while not self.hero.is_dead:\n try:\n self._load_map()\n except KeyboardInterrupt:\n print(\"[!] If you want to quit, use the provided user interface\")", "async def new_game():\n if enough_players():\n GAME.new_game()\n await update_players()", "def init_game(self):\n self.view.carregar_jogadores_possiveis(self._possible_players_list())\n self.view.put_view_in_main_loop()", "def playSinglePlayerGame(self):\n player = 1\n board = self.game.getInitBoard()\n while self.game.getGameEnded(board, player) == 0:\n action = self.player1(self.game.getCanonicalForm(board, player))\n board, player = self.game.getNextState(board, player, action)\n return self.game.getGameEnded(board, player)", "def is_game_started(self):\r\n\t\treturn self._is_game_started", "async def accept_replay(request, game):\n game = game.lower()\n replay_file = request.files.get('replay')\n if replay_file:\n if game == STARCRAFT:\n load_map = request.args.get(\"load_map\", False)\n result = await SC2Replay.analyze_replay(replay_file, load_map)\n if result:\n data = {\"result\": result,\n \"game\": STARCRAFT,\n \"success\": True}\n return sanic.response.json(data, status=200)\n else:\n data = {\n \"error\": \"Unable to parse game file.\",\n \"success\": False,\n \"game\": game\n }\n return sanic.response.json(data, status=500)\n else:\n data = {\n \"error\": \"Game not in list of games.\",\n \"success\": False,\n \"game\": game\n }\n return sanic.response.json(data, status=404)\n else:\n data = {\n \"error\": \"No replay file given.\",\n \"success\": False,\n \"game\": game\n }\n return sanic.response.json(data, status=500)", "def lobbyStage(self):\n # Organizes outbound data to clients into a dict\n print(\"(\" + str(self.HOST) + \", \" + str(self.PORT) +\"):: Starting lobby stage.\", file=self.logs)\n gameState = {\n \"connection\": str(self.PORT), \n \"ready\":False,\n \"start\":False,\n \"opponentPort\": None,\n }\n\n counter = 0\n while True:\n inboundData = self.socket.recvfrom(1024) # Gets bundle of data from clients\n data = inboundData[0] # Separates data from address\n \n ########\n self.bitsIn += sys.getsizeof(data)\n\n address = inboundData[1] # Separates address from data\n data = pickle.loads(data) # Unpickles data back into a python dict\n \n # Keeps track of how often the server recieves information from each client.\n updatedTime = time.time() \n self.clientUpdateTimes[str(address)] = updatedTime\n\n # If a new address connects, add it to the list of clients\n if address not in self.clients:\n self.clients.append(address)\n print(str(address)+ \":: New connection.\", file=self.logs)\n \n # If there are two players, the game is ready to start.\n if len(self.clients) == 2:\n gameState['ready'] = True \n for client in self.clients:\n if client != address:\n gameState['opponentPort'] = client \n \n if len(self.clients) == 1:\n gameState['ready'] = False\n gameState['opponentPort'] = None\n\n else:\n if data['command'] != \"\":\n print(str(address) +\"::\", data, file=self.logs) # Only prints out non-trivial data from clients \n \n # Handle commands from other servers\n if data['command'] == \"close\": # Ends the server\n break\n\n if data['command'] == 'ping': # Confirms connection to client servers\n print(\"(\" + str(address) +\")::\", self.clients, file=self.logs)\n \n if data['command'] == 'start':\n for client in self.clients: # Tells both player views to move on\n gameState['start'] = True # to the next stage\n outboundData = pickle.dumps(gameState)\n self.socket.sendto(outboundData, client)\n break\n\n # Packages up data and sends it back to the client\n outboundData = pickle.dumps(gameState)\n\n ######\n self.bitsOut += sys.getsizeof(outboundData)\n \n self.socket.sendto(outboundData, address)\n\n # Continuously saves logging information to a text file:\n self.logs.close()\n self.logs = open(str(self.filepath)+\"/_logs/\"+ str(self.PORT) + \".txt\", \"a+\")\n\n # Check client connections here\n self.checkClientConnections(time.time())", "def start_game(self):\n self.init_card = self.next_card.throw_card()\n self.current_card = \"\"\n self.final_score = 0\n\n while self.keep_playing:\n \"\"\"Functions to start the game\"\"\"\n # Calls the output functions, making the game start\n self.output(self.init_card) \n\n #Evaluating if the score is zero or not\n if self.score == 0:\n #if the score is zero, it is game over and end of the game.\n print(\"=============================================================================\")\n print(\"GAME OVER! \\nThank you for you time playing this game. \\nWe hope you have fun\")\n print(\"=============================================================================\")\n break\n else:\n #Else we will ask for the user if they wanted to play or not.\n play = input(\"Keep Playing? [y/n]\")\n if \"n\" in play:\n print(\"Thank you for playing with us. Have a nice day!\")\n break", "def start(self):\n start_time = time()\n\n def callback(res):\n env, data, game_id = res.result()\n if env is None:\n logger.info('invalid data: {}'.format(game_id))\n return\n\n self.save_data(data, game_id)\n logger.debug(f\"game {game_id}\"\n f\"halfmoves={env.num_halfmoves:3} {env.winner:12}\"\n f\"{' by resign ' if env.resigned else ' '}\"\n f\"{env.observation.split(' ')[0]}\")\n\n with ProcessPoolExecutor(max_workers=3) as executor:\n games = self.get_games_from_all_files()\n\n # poisoned reference (memleak)\n for i, game in enumerate(games):\n job = executor.submit(get_buffer, self.config, game, len(games), i)\n job.add_done_callback(callback)\n # for res in as_completed([executor.submit(get_buffer, self.config, game, len(games), i) for i, game in enumerate(games)]):", "def setupStage(self):\n print(\"(\" + str(self.HOST) + \", \" + str(self.PORT) +\"):: Initiating setup stage\", file=self.logs)\n\n # Holds player map and name information here. Will be used to create objects later.\n mapVotes = []\n playerNames = {}\n colors = [\"red\", \"blue\"]\n\n gameState = {\n \"ready\": False,\n \"game\": None \n }\n while True:\n # Continuously saves logging information to a text file:\n self.logs.close()\n self.logs = open(str(self.filepath)+\"/_logs/\"+ str(self.PORT) + \".txt\", \"a+\")\n\n # Gets all the events from the game window. A.k.a., do stuff here.\n inboundData = self.socket.recvfrom(1024) # Gets bundle of data from clients\n data = inboundData[0] # Separates data from address\n address = inboundData[1]\n\n # Keeps track of how often the server recieves information from each client.\n updatedTime = time.time() \n self.clientUpdateTimes[str(address)] = updatedTime\n\n ########\n self.bitsIn += sys.getsizeof(data)\n\n address = inboundData[1] # Separates address from data\n data = pickle.loads(data) # Unpickles data back into a python dict\n\n command = data['command']\n if command != None: \n # Takes in information from both players\n if command == \"SUBMIT\":\n pName = data['playerName']\n mVote = data['mapVote']\n\n mapVotes.append(mVote)\n playerNames[str(address)] = pName\n \n # Both votes are in. Chooses a map, builds the Board object.\n if len(mapVotes) == 2:\n # Only chooses one map for both players\n if self.map == None:\n mapTuple = random.choice(mapVotes)\n size = mapTuple[0]\n m = mapTuple[1]\n\n if size == \"SMALL\":\n randomMap = MapGenerator((5,7), m)\n self.map = randomMap.getMap()\n mapString = self.map\n width = randomMap.getDimensions()[0]\n height = randomMap.getDimensions()[1]\n tokens = randomMap.getTokens()\n if size == \"MEDIUM\":\n randomMap = MapGenerator((7,9), m)\n self.map = randomMap.getMap()\n mapString = self.map\n width = randomMap.getDimensions()[0]\n height = randomMap.getDimensions()[1]\n tokens = randomMap.getTokens()\n if size == \"BIG\":\n randomMap = MapGenerator((10,12), m)\n self.map = randomMap.getMap()\n mapString = self.map\n width = randomMap.getDimensions()[0]\n height = randomMap.getDimensions()[1]\n tokens = randomMap.getTokens()\n if size == \"HUGE\":\n randomMap = MapGenerator((12,15), m)\n self.map = randomMap.getMap()\n mapString = self.map\n width = randomMap.getDimensions()[0]\n height = randomMap.getDimensions()[1]\n tokens = randomMap.getTokens()\n if size == \"RANDOM\":\n randWidth = random.randint(5, 13)\n randHeight = random.randint(5, 13)\n\n randomMap = MapGenerator((randWidth,randHeight), m)\n self.map = randomMap.getMap()\n mapString = self.map\n width = randomMap.getDimensions()[0]\n height = randomMap.getDimensions()[1]\n tokens = randomMap.getTokens()\n\n # Builds the game board\n self.board = Board(width, height, mapString)\n\n # Both players' names have been entered, creates Player objects.\\\n # Appends player objects to state variable. \n if len(playerNames) == 2 and len(colors) > 0:\n p = Player(playerNames[str(address)], colors.pop(), None, tokens, address)\n self.players.append(p)\n \n # Player objects and Board object have both been created.\n # Builds the Game object, stores it, then tells the PlayerViews its ready.\n if len(self.players) == 2 and self.board != None:\n self.game = Game(self.board, self.players[0], self.players[1])\n gameState['game'] = self.game\n gameState['ready'] = True\n\n # Sends data to both players simultaneously\n for client in self.clients:\n outboundData = pickle.dumps(gameState)\n self.socket.sendto(outboundData, client)\n break\n\n # Packages up data and sends it back to the client\n outboundData = pickle.dumps(gameState)\n\n ######\n self.bitsOut += sys.getsizeof(outboundData)\n\n self.socket.sendto(outboundData, address)\n \n # Check client connections here\n self.checkClientConnections(time.time())", "def game_start_menu():\n\tprint(\"Welcome to Star Game!\")\n\tplayer_location = None # changed if loading from file\n\tif os.path.isdir(\"save_files\") and os.listdir(\"save_files\"): # if saved files exist\n\t\tfiles = os.listdir('save_files')\n\t\tnew_or_load = input(\"\\n1. Create a new cluster\\n2. Warp back to a previous cluster\\n\")\n\t\twhile new_or_load not in [\"1\", \"2\"]: #user did not enter \"1\" or \"2\"\n\t\t\tnew_or_load = input('Invalid selection. Please enter \"1\" or \"2\":\\n') \n\t\tif new_or_load == \"1\": # new game\n\t\t\tstar_cluster, player_location = create_new_game()\n\t\telse: # load game\n\t\t\tprint(\"Choose a cluster to warp to:\")\n\t\t\tfor file in files:\n\t\t\t\tif file.endswith(\".txt\"):\n\t\t\t\t\tprint(os.path.splitext(file)[0])\n\t\t\tfilename = input()\n\t\t\twhile not _test_filename_existence(filename, files): # must choose a valid save file\n\t\t\t\tfilename = input(\"That cluster does not exist. Choose again:\\n\")\n\t\t\tsave_data = {}\n\t\t\twith open(os.path.normpath(\"save_files/\" + filename + \".txt\"), \"r\") as json_file:\n\t\t\t\tsave_data = json.load(json_file)\n\t\t\tplayer_location = save_data[\"location\"]\n\t\t\tstar_cluster = cluster.Cluster(filename)\n\telse: # No previous games exists. Start new game.\n\t\tif not os.path.isdir(\"save_files\"):\n\t\t\tos.mkdir(\"save_files\")\n\t\tstar_cluster, player_location = create_new_game()\n\treturn star_cluster, player_location", "def start_21game(self):\n self.is_game_start = True\n self.already_has_a_winner = False\n self.player_point = {}\n self.generate_21game_number()\n self.boardcast(self.game_msg)", "def game_start():\n herolist = Hero_List(hots_db)\n heroclasses = []\n for item in herolist:\n heroclasses.append(Item(item, 'hero'))\n curgame = Game(Team('home'), Team('enemy'), Team('hero_pool', heroclasses), '')\n return curgame", "def admin_start(request):\n games = Game.objects.filter(is_done=False, is_closed=False)\n return render_to_response('ms/admin_start.html', RequestContext(request, {'games': games}))", "def server_play(board):\n\tbest_move = minimax(convert_board(board), server_player)['index']\n\tserver_move = play_move(board, best_move)\n\n\tif server_move:\n\n\t\t# check if server has won the current game\n\t\tif is_game_won(server_move, server_player):\n\t\t\treturn response(\n\t\t\t\t'Player o has won the game.',\n\t\t\t\tserver_move,\n\t\t\t\t200\n\t\t\t)\n\n\t\t# check if user player has won the current game\n\t\telif is_game_won(server_move, user_player):\n\t\t\treturn response(\n\t\t\t\t'Player x has won the game.',\n\t\t\t\tserver_move,\n\t\t\t\t200\n\t\t\t)\n\n\t\t# check if the current game is a draw\n\t\telif is_draw(convert_board(server_move)):\n\t\t\treturn response(\n\t\t\t\t'Draw!!!',\n\t\t\t\tserver_move,\n\t\t\t\t200\n\t\t\t)\n\n\t\t# next turn\n\t\telse:\n\t\t\treturn response(\n\t\t\t\t'Your turn',\n\t\t\t\tserver_move,\n\t\t\t\t200\n\t\t\t)", "def start(self):\n # Call the protected _turn method to start the game\n self._end_time = time.time() + 60\n self._turn()", "def start():\n context = zmq.Context()\n socket = context.socket(zmq.REP)\n socket.bind(\"tcp://%s:%s\" % (HOST, PORT))\n print \"server started..\"\n\n while True:\n # Wait for next request from client\n gameData = json.loads(socket.recv())\n print \"data recieved %s\"%gameData\n score = gameData.get('score')\n try:\n computerData = getComputerChoice(gameData)\n except utils.ServerException, e:\n errorData = {\"Error\":e.message}\n socket.send(json.dumps(errorData))\n continue\n score[computerData['computerColor']] += computerData['points']\n computerData['score'] = score\n socket.send(json.dumps(computerData))\n print \"responding with %s\"%computerData", "def start(update, context):\n # store user data\n put_in_all_user_data(update.effective_user)\n # getting the start menu InlineKeyboardMarkup\n reply_markup = get_start_menu()\n # reply with menu markup\n update.message.reply_text(\"What do you want to do?\", reply_markup=reply_markup)\n # telling the ConversationHandler we are in the FIRST Stage\n return FIRST", "def start_game(self):\n user_name = self.user_name.get()\n self.row, self.column = self.get_size_of_grid()\n self.user_symbol, self.cpu_symbol = self.get_symbols()\n if user_name == \"\" or self.row == 0 or self.user_symbol is None:\n messagebox.showwarning(\"Warning!\", \"Please complete all the fields!\")\n return\n # The connection to the database in order to rtetrieve the data is done.\n is_registered = False\n connection, cursor = GUI.open_database_connection()\n cursor.execute(\"select * from player\")\n for row in cursor:\n if row[0] == user_name:\n is_registered = True\n self.user_score = int(row[1])\n self.cpu_score = int(row[2])\n\n if is_registered is False:\n cursor.execute(\"insert into player values (%s, %s, %s)\", (user_name, 0, 0))\n GUI.close_database_connection(connection, cursor)\n # After checking the case if the current user hadn't played the game before, it is added to the database\n\n self.frame_gameplay = Frame(self.root, bd=4)\n self.frame_game_entry.pack_forget()\n self.frame_gameplay.pack()\n\n GUI.insert_empty_space(self.frame_gameplay, 0, 10)\n # The purpose of the below labels is for \"design\n score_label = Label(self.frame_gameplay, text=\"Score:\")\n score_label.grid(row=0, column=11)\n\n label_user = Label(self.frame_gameplay, text=user_name)\n label_cpu = Label(self.frame_gameplay, text=\"CPU\")\n label_cpu.grid(row=1, column=12)\n label_user.grid(row=0, column=12)\n\n lbl_user_score = Label(self.frame_gameplay, text=str(self.user_score))\n lbl_cpu_score = Label(self.frame_gameplay, text=str(self.cpu_score))\n lbl_user_score.grid(row=0, column=13)\n lbl_cpu_score.grid(row=1, column=13)\n\n funny_label = Label(self.frame_gameplay, text=\"Play Obstruction!\")\n funny_label.grid(row=0, column=0, columnspan=3)\n\n GUI.insert_empty_space(self.frame_gameplay, 1, 0)\n # The true gameplay starts now!\n self.upload_board()", "def play_game(self, early_finish=False):\n automated = self.game_type == self.game_types['vs_ai']\n while not (self.check_for_end_of_game() or (early_finish and self.check_for_early_finish())):\n self.play_single_turn()\n if automated:\n input('Press any key to continue.\\n')\n\n if self.player_1.score > self.player_2.score:\n print(\"player 1 wins!\" if automated else \"you win!\")\n elif self.player_2.score > self.player_1.score:\n print(\"player 2 wins!\" if automated else \"you lose!\")\n else:\n print(\"it was a tie!\")\n self.game_board.graphical_output(block=True)", "def start_game():\n logger.info(\"Clicking play button\")\n mouseclick(coords_play_final_button[0], coords_play_final_button[1])", "def handle_clients_connection():\n global server_socket\n global players\n # setting client's queue length\n server_socket.listen(5)\n print \"server online. waiting for players\"\n\n while len(players) < 2:\n _accept_client(server_socket)\n\n print \"there are 2 players. waiting for more...?\"\n\n while len(players) < 4:\n send_msg_to_players(players, \"we are %d players. more players?\" % len(players))\n print \"we are %d players. more players?\" % len(players)\n if \"yes\" in receive_msg_from_players():\n _accept_client(server_socket)\n else:\n send_msg_to_players(players, \"the final number of players is %d\" % len(players))\n print \"the final number of players is %d\" % len(players)\n break\n\n send_msg_to_players(players,\"start game\")\n print \"start game\"", "def start(self):\n while self.turns <= 7:\n # print()\n # print(\"This is turn {}.\".format(self.turns))\n turn = Turn(self.current_player, self.die)\n turn.run()\n self.current_player.score += turn.score\n # print(\"{}'s score is now {}\".format(self.current_player, self.current_player.score))\n self.turns += 1\n # print()\n # print(\"You have reached 7 turns. Game over.\")\n # print(\"Your total score is {}.\".format(self.current_player.score))", "def play(self):\n print(\"Board size: {}x{} with {} games using pieces: {}\".format(self.size[0], self.size[1], self.num_games, self.pieces))\n print(\"Player 1 using layout '{}' and play strategy '{}'\".format(self.layouts[0], self.plays[0]))\n print(\"Player 2 using layout '{}' and play strategy '{}'\".format(self.layouts[1], self.plays[1]))\n print(\"Running...\")\n self.start_time = time.time()\n\n for game in range(self.num_games):\n if self.verbose: print(\"Playing game {}:\".format(game))\n players = (Player(\"Player 1\", self.size[0], self.size[1], self.pieces, self.layouts[0], self.plays[0], self.verbose),\n Player(\"Player 2\", self.size[0], self.size[1], self.pieces, self.layouts[1], self.plays[1], self.verbose))\n\n finished = False\n game_round = 0\n\n while not finished:\n game_round += 1\n for i in range(2):\n player = players[i]\n opponent = players[0] if i == 1 else players[1]\n\n attack_pos = player.get_next_attack()\n player.set_attack_result(attack_pos, *opponent.is_hit(attack_pos))\n\n if opponent.is_player_dead() is True:\n self.wins[i] += 1\n self.tries[i] += game_round\n finished = True\n if self.verbose: print(\"Player {} won the game on round {}\\n\".format(i+1, game_round))\n break", "def wait_for_start(self):\n while True:\n ev = self.scene.waitfor('click')\n game_type = self.on_click(ev)\n if game_type:\n return game_type", "def game_started(self, pname, matchid):\n g = self.games.pop(matchid, None)\n if (g):\n # It's an automatch game. Save to history.\n logging.info('Automatch game started: %s' % matchid)\n self.history.append(g) \n else:\n # It's not an automatch game. Remove player from seek queue, etc.\n msg = 'Player %s started a non-automatch game.' % pname\n logging.debug(msg)\n self._rem_player(pname, msg, False)" ]
[ "0.7171834", "0.7154305", "0.68895155", "0.67457765", "0.6644632", "0.6629957", "0.66196835", "0.66131645", "0.6558208", "0.652107", "0.64768547", "0.6432589", "0.64246315", "0.6315022", "0.6312065", "0.621343", "0.6191547", "0.6181988", "0.61723584", "0.6163077", "0.6129981", "0.6123602", "0.602852", "0.6002622", "0.59920764", "0.59225917", "0.5874064", "0.5867342", "0.5843497", "0.58215064", "0.58198416", "0.58190763", "0.58162886", "0.58151865", "0.58082974", "0.57996786", "0.57911444", "0.5781355", "0.57601583", "0.57574993", "0.5743468", "0.57260835", "0.5722528", "0.57215303", "0.5720475", "0.57203096", "0.5713783", "0.5707936", "0.56967556", "0.56890464", "0.56887794", "0.5686043", "0.5680639", "0.567733", "0.5659606", "0.5651294", "0.56378603", "0.5623141", "0.56192094", "0.56190073", "0.56083024", "0.560375", "0.55978376", "0.5596997", "0.5594006", "0.55830276", "0.5580467", "0.55788916", "0.5566832", "0.5562485", "0.55618745", "0.55352664", "0.551989", "0.5500971", "0.54955834", "0.549338", "0.54895645", "0.5485428", "0.54485637", "0.54401976", "0.543287", "0.54234594", "0.5398168", "0.53811795", "0.5370132", "0.5368024", "0.5366514", "0.5362448", "0.5360488", "0.53603745", "0.5358998", "0.5353512", "0.5348227", "0.5345814", "0.5345081", "0.5328102", "0.5318724", "0.53171945", "0.53153116", "0.53148466" ]
0.5490905
76
Handles a leave game request. Deletes the user from the game.
def leave_game(players_cursor, states_cursor, user, room_id): leave_query = '''DELETE FROM players_table WHERE user = ? AND room_id = ?''' players_cursor.execute(leave_query, (user, room_id)) FRAMES.append(display_game(players_cursor, states_cursor, user, room_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def leave(msg: telebot.types.Message):\n if utils.in_menu(msg.from_user):\n bot.reply_to(\n msg,\n 'This command outside of game is useless.'\n )\n return\n\n game, user, opponent = utils.get_game_user_opponent(msg.from_user)\n if not game or not user:\n # todo log something\n return\n\n user.state = states.USER_IN_MENU\n user.losses += 1\n utils.update_user(user)\n bot.send_message(\n user.user_id,\n 'You surrendered.'\n )\n\n if opponent:\n opponent.state = states.USER_IN_MENU\n opponent.wins += 1\n utils.update_user(opponent)\n bot.send_message(\n opponent.user_id,\n 'Your opponent surrendered'\n )\n\n field = json.loads(game.field)\n sig = 1 if user == game.user1 else 2\n\n # changes users emojis to poop\n for i in range(len(field)):\n for j in range(len(field[i])):\n if field[i][j] == sig:\n field[i][j] = 4\n\n if opponent:\n utils.send_updated_field(bot, field, game, opponent)\n Game.delete_by_id(game.id)", "def leaveGame(game, player): # is also called in register player if THE UNPROBABLE happens (e.g. there was a crash and bobby can't come in again)\n\t#check if player is in game and game exists, if the player is the creator close the game\n\tgame_key = game.key()\n\tplayer_key = player.key()\n\n\tif game != None and player != None:\t\t\t\n\t\tif game.creator.key() == player.key():\n\t\t\t#TODO: close game\n\n\t\t\tplayer.currentGame = None\n\t\t\tplayer.put()\n\n\t\t\tgame.status = 2\n\t\t\tgame.players.remove(player.key())\n\t\t\tgame.playerCount -= 1\n\t\t\tgame.put()\n\n\t\t\tlogging.info('Creator %s left game %s, game stopped'%(player_key,game_key))\n\t\t\tvalue = \"done\"\n\t\telif player.key() in game.players:\n\t\t\tplayer.currentGame = None\n\t\t\tplayer.put()\n\n\t\t\tgame.players.remove(player.key())\n\t\t\tgame.playerCount -= 1\n\t\t\tgame.put()\n\n\t\t\tlogging.info('Player %s left game %s, game has now %s players left'%(player_key,game_key,game.playerCount))\n\n\t\t\t#TODO: deal with the horrible aftermath\n\t\t\t#maybe if only 2 left start showdown, give 2 minutes then set marker in between them\n\t\t\tvalue = \"done\"\n\t\telse:\n\t\t\tlogging.error('Attempt to leave game %s by player %s failed, not in list apparently and not creator'%(game_key,player_key))\t\t\t\n\t\t\tvalue = \"error\"\t\t\n\telse:\n\t\tlogging.error('Attempt to leave game %s by player %s failed, no game or player'%(game_key,player_key))\t\t\t\n\t\tvalue = \"error\"\n\n\treturn value", "def on_leave(self, event):\n self.pre_check(event)\n self.remove_player(event.guild.id)", "def _leave(self, *args):\n if not self.game:\n raise ServerException('not playing a game')\n self.game.leave(self)\n self.game = self.player = None", "def on_leave(data):\n username = request.sid\n room = data\n leave_room(room)\n logging.info(username + ' has left the room.')\n send(username + ' has left the room.', room=room)", "async def tod_leave(self, ctx, *args):\n try:\n self.players.remove(ctx.author)\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.remove_roles(role)\n except ValueError:\n pass\n message = f\"{ctx.author.mention} has been removed from the game!\"\n await ctx.send(message)", "async def leave_room(self, label):\n user = self.user\n room = await self.get_room(label)\n\n await self.channel_layer.group_send(\n room.group_name,\n {\n 'type': 'chat.leave',\n 'label': label,\n 'username': user.username,\n 'title': room.name,\n }\n )\n # Remove that we're in the room\n self.rooms.discard(label)\n\n # Remove client from the group so he no longer get room messages\n await self.channel_layer.group_discard(\n room.group_name,\n self.channel_name\n )\n\n await self.send_json(\n return_value(\n ACTION_LEAVE, room.label, TO_ME, MSG_LEAVE, NO_MESSAGE\n )\n )", "def leave(self, message, db_session):\n username = self.ts.get_user(message)\n user = db_session.query(db.User).filter(db.User.name == username).one_or_none()\n if not user:\n user = db.User(name=username)\n db_session.add(user)\n for tup in self.player_queue.queue:\n if tup[0] == username:\n self.player_queue.queue.remove(tup)\n self._add_to_whisper_queue(username, \"You've left the queue.\")\n user.times_played -= 1\n break\n else:\n self._add_to_whisper_queue(username, \"You're not in the queue and must join before leaving.\")", "async def leave(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n else:\n player = ctx.message.author.name\n if player.lower() not in list(tod_games[room]['participants'].keys()):\n await amor_manager.say(\"{}, you cannot leave the game if you have not joined\".format(player))\n elif player == tod_games[room]['host']:\n await amor_manager.say(\"{}, you cannot leave the game you're the host\".format(player))\n else:\n del tod_games[room]['participants'][player.lower()]\n await amor_manager.say(\"{} has left Truth or Dare.\".format(player))", "def on_leave(data):\r\n\r\n username = data['username']\r\n room = data['room']\r\n leave_room(room)\r\n send({\"msg\": username + \" has left the room\"}, room=room)", "def leave_group():\n incoming = request.get_json()\n Participant.delete_participant_with_user_id_and_room_id(session['user_id'], incoming['room_id'])\n return jsonify(results = incoming['room_id'])", "def handle_leave_room(self, lobby_command, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n words = lobby_command.split()\n roomname = words[1]\n print(f\"Handling leave room {roomname} for {user}\")\n for _room in self.rooms:\n if _room.name == roomname:\n print(\"Requested roomname found..\")\n if user not in _room.room_attrbts['members']:\n msg = f\"Client {user} is already NOT a member of room {_room.name}\"\n self.log_and_send(client_socket, msg)\n return\n else:\n _room.room_attrbts['members'].remove(user)\n msg = f\"User {user} successfully removed from room {roomname}\"\n self.log_and_send(client_socket, msg)\n return\n msg = f'Client {user} passed invalid room. Could not join room {roomname}'\n self.log_and_send(client_socket, msg)\n return", "def leave_farm(self, request, pk):\n farm = self.get_object()\n user = request.user\n farm.remove_member(user)\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def leave(self):\n self.game.dealer_leave(self)\n return self.game", "def leave(ctx, network):\n return _leave(ctx.obj['client'], network)", "def leave(self, *args, **kwargs):\n return self.bot.leave_chat(self.id, *args, **kwargs)", "def leave(self):\n self.remove(\n self.subreddit._reddit.config.username or self.subreddit._reddit.user.me()\n )", "def leave(self):\n self.game.leave(self)\n return self.game", "def cancel_game(self, request):\n game = get_by_urlsafe(request.urlsafe_key, Game)\n if game and not game.game_over:\n game.key.delete()\n return StringMessage(message='Game with key: {} deleted.'.\n format(request.urlsafe_key))\n elif game and game.game_over:\n raise endpoints.BadRequestException('Game is already over!')\n else:\n raise endpoints.NotFoundException('Game not found!')", "def cancel_game(self, request):\n game = get_by_urlsafe(request.urlsafe_game_key,Game)\n if not game:\n raise endpoints.NotFoundException('A Game with that key does not exist!')\n if game.game_over:\n raise endpoints.ForbiddenException('Game has ended.')\n else:\n game.key.delete()\n return StringMessage(message = 'Game Cancelled!')", "async def chat_leave(self, event):\n await self.send_json(\n return_value(\n ACTION_WENT_OFFLINE,\n event['label'],\n event['username'],\n MSG_LEAVE,\n NO_MESSAGE\n )\n )", "async def leave(ctx, *, check=\"\"):\r\n # if botv.isAdmin(ctx.message.author) and check == \"now, bot\":\r\n # if necessary, save checks can go here; check presently commented out because botv can\r\n # fail to initialize in testing\r\n await bot.say(\"Allan, please add dialogue!\")\r\n quit()", "def user_logged_out(self, sender, request, user, **kwargs):", "def on_client_exit(self, game) -> None:\n pass", "def delete_board(request):\n required_fields = ['user_id', 'game_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # check for not allowed characters\n if check_special_characters(str(data['user_id'])) or check_special_characters(str(data['game_id'])) \\\n or check_special_characters(str(data['token'])):\n return Response({'error': str('Unaccepted character passed!')},\n status=status.HTTP_400_BAD_REQUEST)\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Here delete the game board from user's saved profile\n if not db.delete_game(data['user_id'], data['game_id']):\n return Response({'error': str('Error when deleting the game!')}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})", "def leave(self):\n p = GameOverPopup(self)\n p.open()", "def leave_room(room):\n return request.namespace.leave_room(room)", "def leaveEvent(self, event):\n self.destroy()", "def cancel_game(self):\n ndb.delete_multi(UserGame.query(UserGame.game_key == self.key).fetch(keys_only=True)) \n\n k = self.key\n k.delete()", "def leave_loose_game(self):\n self.update_json_file()\n self.end = True\n self.root.destroy()\n GameOver()", "async def cancel_game(self) -> None:\r\n # Checks if the client is already authenticated\r\n if self.is_auth is True and self.is_waiting is True and self.is_in_game is False:\r\n packaged_leave_game_queue_document = self.pkg_doc_manager(\"[CANCEL GAME]\", self.user_data[0])\r\n self.send(packaged_leave_game_queue_document)", "def destroy(self, request, pk=None):\n try:\n game = Game.objects.get(pk=pk)\n game.delete()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n\n except Game.DoesNotExist as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)\n\n except Exception as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def delete_game(request, game_id):\n user = request.user\n game = Game.objects.get(id=game_id)\n\n if not user.is_staff and not user in game.moderators.all():\n return HttpResponseRedirect('/game_details/' + game_id + '/')\n\n # Not only we have to delete object from database, but also all files related to it\n gamename = game.name\n path = settings.MEDIA_ROOT\n game.delete()\n \n system('rm -rf ' + path + settings.JUDGES_SOURCES_DIR + '/' + gamename + '/')\n system('rm -rf ' + path + settings.JUDGES_BINARIES_DIR + '/' + gamename + '/')\n system('rm -rf ' + path + settings.RULES_DIR + '/' + gamename + '/')\n\n return HttpResponseRedirect('/')", "def logout():\n flash('You were logged out')\n session.pop('user_id', None)\n return redirect(url_for('leaderboard'))", "def logout(request):\n # user_name == user_id\n required_fields = ['user_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # check for not allowed characters\n if check_special_characters(str(data['user_id'])) or check_special_characters(str(data['token'])):\n return Response({'error': str('Unaccepted character passed!')},\n status=status.HTTP_400_BAD_REQUEST)\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Here let db know we are logging out by removing user's token\n if not db.remove_token(data['user_id']):\n return Response({'error': str('Error when logging out!')}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})", "def player_disconnect(game_event):\n print(\"Player Disconnect\")\n userid = game_event.get_int('userid')\n print(\"userid: %s\" % userid)\n playerinfo = playerinfo_from_userid(userid)\n print(\"playerinfo: %s\" % playerinfo)\n steamid = playerinfo.get_networkid_string()\n print(\"player steamid: %s\" % steamid)\n \n if not steamid == \"BOT\":\n print(\"REAL PLAYER FOUND!\")\n steam64 = convertSteamIDToCommunityID(steamid)\n print(\"steam64: %s\" % steam64)\n \n deactivated_result = leetcoin_client.deactivatePlayer(steam64)", "async def delete_game(self, game_id):\n game = await self.get_game(game_id)\n await ex.conn.execute(\"DELETE FROM blackjack.games WHERE gameid = $1\", game_id)\n await ex.conn.execute(\"DELETE FROM blackjack.currentstatus WHERE userid = $1\", game[1])\n await ex.conn.execute(\"DELETE FROM blackjack.currentstatus WHERE userid = $1\", game[2])\n log.console(f\"Game {game_id} deleted.\")", "async def leave(self):\n return await self._state.leave_team(self.id)", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def delete_game(game_id):\n try:\n is_admin = True if \"admin\" in session else False\n if is_admin:\n mongo.db.terms.remove({\"game_fk\": ObjectId(game_id)})\n mongo.db.games.remove({\"_id\": ObjectId(game_id)})\n flash(\"Game successfully deleted\", category=\"success\")\n return redirect(url_for(\"get_games\"))\n else:\n flash(\"You do not have permission to manage supported games\",\n category=\"error\")\n return redirect(url_for(\"get_terms\"))\n except KeyError:\n flash(Markup(\"Please <a href='login'>\"\n \"login</a> to delete a game\"), category=\"error\")\n return redirect(url_for(\"get_terms\"))", "def deleted_user(request):\n auth.logout(request)\n messages.success(request, \"Your profile has been deleted. Please contact us if you want to undo this.\")\n return redirect(reverse('index'))", "def closeaccount(request):\n get_user_model().objects.get(username=request.user.get_username()).delete()\n return Response({}, status=status.HTTP_200_OK)", "def on_removeuser(self, username):\n self.users.remove(username)\n print ('%s left the room.' % username)", "def stop(game, requested_players, username, already_stopped=False):\n user = User.objects.get(username=username)\n user.two_player_game_id = None\n user.four_player_game_id = None\n user.save()\n if game.player1 == user:\n game.player1 = None\n elif game.player2 == user:\n game.player2 = None\n elif game.player3 == user:\n game.player3 = None\n elif game.player4 == user:\n game.player4 = None\n if not already_stopped:\n game.turn = None\n game.last_status = json.dumps({\"stopped\": True,\n \"status\": (\"The game has been stopped by %s\"\n % username)})\n last_status = game.last_status\n if ((requested_players == \"two\" and game.player1 == None and\n game.player2 == None) or\n (requested_players == \"four\" and game.player1 == None and\n game.player2 == None and game.player3 == None and\n game.player4 == None)):\n game.delete()\n else:\n game.save()\n return HttpResponse(last_status)", "def delete(self, channel_id, username):\n\n if self.username != None and len(self.username) != 0:\n\n ## remove our user and alert others in the chat room\n user = self.channel.find_user_by_username(self.username)\n\n if user != None:\n message = self.get_argument('message', '%s has left the room.' % self.nickname)\n #remove_user(user)\n\n ## respond to the client our success\n self.set_status(200, 'OK')\n self.delete_cookie('username')\n self.delete_cookie('nickname')\n self.add_to_payload('message',unquote(self.nickname) + ' has left the chat room')\n\n else:\n ## let the client know we failed because they were not found\n self.set_status(403, 'Authentication failed')\n\n else:\n ## let the client know we failed because they didn't ask nice\n self.set_status(403, 'missing nickname argument')\n return self.render()", "async def leave(self):\n\t\tif self.group == None:\n\t\t\traise exceptions.ClientError('NO_GROUP')\n\n\t\tawait self.group.remove(self)\n\n\t\tself.group = None", "def delete(self, user, _id=None, match=None):\n if _id is None and match is None:\n\n LOGGER.debug('Removing all games and metagames.')\n for user in self.user_dao.search():\n for i, game in enumerate(user['owns']):\n del user['owns'][i]\n user = {k: v for k, v in user.items() if k != '_id'}\n print(user)\n self.user_dao.update(user['_id'], user)\n self.meta_game_dao.delete()\n rest.database.db.games.remove()\n\n elif _id is not None:\n\n LOGGER.debug(f'Removing games and metagames with id and game_id of {_id}.')\n for user in self.user_dao.search(**{'owns.game': ObjectId(_id)}):\n print(user)\n for i, game in enumerate(user['owns']):\n if game['game'] == _id:\n del user['owns'][i]\n self.user_dao.update(user['_id'], user)\n self.meta_game_dao.delete(match={'game_id': ObjectId(_id)})\n rest.database.db.games.remove({'_id': ObjectId(_id)})\n\n else:\n\n rest.database.db.games.remove(match)\n # TODO: Implement removing users data as well if game is removed.", "def test_not_logged_user_cannot_leave(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def handle_exit_room_session(self, lobby_command, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n for room in self.rooms:\n if user in room.room_attrbts['active']:\n room.room_attrbts['active'].remove(user)\n msg = f'User {user} is no longer active in room {room.name}.'\n print(msg)\n return\n msg = f'Room {room.name} not found or user {user} is not yet a member. NONACTIVE'\n self.log_and_send(client_socket, msg)\n return", "async def leave(self, ctx):\n if ctx.guild is None:\n await ctx.reply(\"This command can only be used in a server, not in DMs.\")\n raise commands.CommandError(\"Invoker not in a guild.\")\n\n if ctx.author.voice is None or ctx.author.voice.channel is None:\n await ctx.reply(\"You need to be in a voice channel to use this command.\")\n raise commands.CommandError(\"Invoker not connected to a voice channel.\")\n\n if ctx.voice_client is not None and ctx.author.voice.channel != ctx.voice_client.channel:\n await ctx.reply(\"You need to be in the same voice channel as the bot to use this command.\")\n raise commands.CommandError(\"Invoker not in same voice channel as bot.\")\n\n if ctx.voice_client is not None:\n SpotifyController.stop_for_channel(ctx.voice_client.channel.id)\n await ctx.voice_client.disconnect()\n return\n await ctx.send('I am not connected to a voice channel...')", "def delete_upload(request):\r\n\tgame_id = request.GET['id']\r\n\tgame = Game.objects.get(id = game_id)\r\n\tif(request.user.profile == game.developer):\r\n\t\tif request.method == 'POST':\r\n\t\t\tgame.delete()\r\n\t\t\tprint('game deleted')\r\n\t\t\treturn redirect('developer_dashboard')\r\n\t\telse:\r\n\t\t\treturn render(request, 'confirm_delete.html', {'game':game})\r\n\telse:\r\n\t\treturn redirect('home')", "def user_logout(request):\n\n # Since we know the user is logged in, we can now just log them out.\n\tlogout(request)\n\n # Take the user back to the homepage. \n\treturn HttpResponseRedirect(reverse('website:index'))", "async def leave(self, ctx):\n\n if ctx.voice_client is not None:\n return await ctx.voice_client.disconnect()", "def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200", "def cancel_game(self, request):\n game = get_by_urlsafe(request.urlsafe_game_key, Game)\n if game and not game.game_over:\n game.end_game(won=False)\n game.key.delete()\n return StringMessage(\n message='Game {} has been cancelled'.format(\n request.urlsafe_game_key))\n elif game and game.game_over:\n return StringMessage(\n message='Game {} is already over!'.format(\n request.urlsafe_game_key))\n else:\n raise endpoints.NotFoundException('Game not found.')", "def leave_group(self):\n\t\tself.sendMessage(ID_CTRL + \"LEAVE\", True)\n\t\tself.joinstate = 0\n\t\tself.createstate = 0\n\t\tself.__key = None", "def delete_user():", "def leave(self):\n self.pleaseQuit=1", "def leave(self, user):\n membership = self.check_membership(user)\n if membership is not None and membership.role != 'O':\n if membership.role == 'B':\n membership.role = 'LB'\n else:\n membership.role = 'L'\n membership.save()", "def leave_game(game_id, player_id):\n game_data = load_state(game_id)\n if not game_data:\n return False\n players = game_data.get('players')\n if player_id not in [p['id'] for p in players]:\n return False\n if game_data['ended_at']:\n return False\n quitter = [p for p in players if p['id'] == player_id][0]\n msg = make_info_message('You have left the game')\n alt_msg = make_info_message('{} has left the game'.format(quitter['name']))\n flash_player(game_data, quitter, msg, alt_msg)\n if quitter['active']:\n activate_next_player(game_data, player_quit=True)\n game_data['players'].remove(quitter)\n # If the quitter was the admin and there is at least one player\n # remaining, reassign the admin role to the first position.\n new_admin = None\n if quitter['admin'] and game_data['players']:\n new_admin = game_data['players'][0]\n new_admin['admin'] = True\n # If one player remaining in an active game, end the game now.\n if game_data['active'] and len(game_data['players']) == 1:\n game_data['active'] = False\n game_data['players'][0]['active'] = False\n game_data['ended_at'] = serialize_datetime(datetime.utcnow())\n msg = make_info_message('The game has ended')\n flash_broadcast(game_data, msg)\n if new_admin and not (game_data['started_at'] or game_data['ended_at']):\n msg = make_info_message('You are now the game administrator')\n flash_player(game_data, new_admin, msg)\n # If no players remaining, end the game now.\n if not game_data['players']:\n game_data['ended_at'] = serialize_datetime(datetime.utcnow())\n # If game is still active at this point, reclaim the quitter's cards.\n if game_data['active']:\n reclaim_player_cards(game_data, quitter)\n\n save_state(game_data)\n return True", "async def leave_room(self, room_id):\n # Зарегистрированный пользователь находится в нашей области благодаря аутентификации ASGI middleware\n room = await get_room_or_error(room_id, self.scope[\"user\"])\n # Отправить сообщение, если оно включено\n if settings.NOTIFY_USERS_ON_ENTER_OR_LEAVE_ROOMS:\n await self.channel_layer.group_send(\n room.group_name,\n {\n \"type\": \"chat.leave\",\n \"room_id\": room_id,\n \"username\": self.scope[\"user\"].first_name,\n }\n )\n # Remove that we're in the room\n self.rooms.discard(room_id)\n # Remove them from the group so they no longer get room messages\n await self.channel_layer.group_discard(\n room.group_name,\n self.channel_name,\n )\n # Instruct their client to finish closing the room\n await self.send_json({\n \"leave\": str(room.id),\n })", "async def leaveserver(self, ctx, guild: int):\n guild = self.bot.get_guild(guild)\n await guild.leave()\n embed = discord.Embed(title=f\"left {guild.name} owned by: {guild.owner.name}\")\n embed.set_author(name=ctx.author.nick if ctx.author.nick else ctx.author.name, icon_url=ctx.author.avatar_url)\n await ctx.message.delete()\n await ctx.send(embed=embed)", "def sign_out_action(request, course_id):\n course = get_object_or_404(Course, id=course_id)\n try:\n student = Student.objects.get(id=request.POST['student'])\n except (KeyError, Student.DoesNotExist):\n return render(request, 'hall_pass/sign_out_page.html', {\n 'course': course,\n 'error_message': str(request.POST),\n })\n else:\n if student.current_absence is None:\n # create a new absence\n absence = Absence(\n student=student,\n course=course,\n time_out=datetime.now(),\n reason=request.POST['reason']\n )\n absence.save()\n\n student.current_absence = absence\n student.save()\n else:\n # complete current absence\n absence = student.current_absence\n absence.time_in = datetime.now()\n absence.save()\n\n student.current_absence = None\n student.save()\n return HttpResponseRedirect(\n reverse('hall_pass:sign_out_result', args=(course_id, student.id))\n )", "def logout():\n\n session.pop(\"leader_logged_in\", False)\n session.pop(\"leader_id\", None)\n session.pop(\"leader_email\", None)\n\n return redirect(f\"{BASEPATH}/login\")", "def gdisconnect():\n try:\n access_token = login_session['credentials']\n except KeyError:\n flash('Failed to get access token')\n return redirect(url_for('home'))\n print(\"User's name was {}.\".format(login_session['name']))\n if access_token is None:\n print('Access Token is None')\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n del login_session['credentials']\n del login_session['user_id']\n del login_session['name']\n del login_session['email']\n print('Successfully logged out.')\n flash('Successfully logged out.')\n return redirect(url_for('home'))", "def delete(request):\n # user_name == user_id\n required_fields = ['user_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # check for not allowed characters\n if check_special_characters(str(data['user_id'])) or check_special_characters(str(data['token'])):\n return Response({'error': str('Unaccepted character passed!')},\n status=status.HTTP_400_BAD_REQUEST)\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Here remove the user's account from the database\n if not db.remove_user(data['user_id']):\n return Response({'error': str('Error when removing the user account!')}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})", "def logout():\n user = g.user\n do_logout(user)\n\n flash(\"You have successfully logged out.\", 'success')\n return redirect(\"/login\")", "async def leave(self):\n request = self._state.leave_thread(self.id)\n await request", "async def remove(self, ctx, game):\n\n user = ctx.message.author\n\n if remove(game, user.id):\n await self.bot.say(\"{}, {} was removed from your library.\".format(user.mention, game))\n else:\n await self.bot.say(\"{}, you do not have this game in your library.\".format(user.mention))", "async def endGame(self, ctx):\n print(\"Ending game ...\")\n await self.movePlayer(ctx=ctx, voiceChannel=self.lastVoiceChannel, reason=\"Fin de partie.\")\n await self.deleteCategory(ctx=ctx, reason=\"Fin de partie.\")\n await self.deleteRole(ctx=ctx, reason=\"Fin de partie.\")\n print(\"Game ended\")\n await self.delete()", "def leave_page(self):\n self.window.destroy()", "def end_game(self, user):\r\n self.game_over = True\r\n self.put()\r\n # Add the game to the score 'board'\r\n score = Score(user=user, date=date.today(), won=True,\r\n guesses=self.turn)\r\n score.put()\r\n winner = user.get()\r\n print \"winner: \", winner.name\r\n winner.wins += 1\r\n winner.winloss_ratio = float(\r\n winner.wins / (winner.wins + winner.losses))\r\n winner.total_guesses += self.turn\r\n winner.put()\r\n if self.player1 == user:\r\n loser_key = self.player2\r\n else:\r\n loser_key = self.player1\r\n score = Score(user=loser_key, date=date.today(), won=False,\r\n guesses=self.turn)\r\n score.put()\r\n loser = loser_key.get()\r\n loser.losses += 1\r\n loser.winloss_ratio = float(loser.wins / (loser.wins + loser.losses))\r\n loser.total_guesses += self.turn\r\n loser.put()", "def user_logout():\n\n session.pop('logged_in', None)\n flash('You are now logged out')\n\n return redirect('/')", "def cancel_game(self, request):\n game = get_by_urlsafe(request.urlsafe_game_key, Game)\n if game:\n if game.game_over:\n return game.to_form('Game is over. Cannot cancel game.')\n else:\n game.history.append('Game canceled!')\n game.end_game(False)\n return game.to_form('Game canceled!')\n else:\n raise endpoints.NotFoundException('Game not found!')", "def user_delete(self, request):\n\n try:\n if request.method == \"POST\":\n flash(\"Be careful you are about to delete all of your data\")\n self._student_handler.delete_students(current_user.scheme_id, current_user.k_number)\n return redirect(url_for(\"user.user\"))\n else:\n return render_template(\"user/delete_page.html\")\n\n except Exception as e:\n self._log.exception(\"Could not delete student\")\n return abort(500)", "def exit_survey(request):\n form = ExitSurvey(request.POST or None)\n u = User.objects.get(username=request.user.username)\n u.exited = True\n u.save()\n if request.method == 'POST':\n if form.is_valid():\n try:\n game = InteractiveStatic.objects.get(users=u)\n game_id = game.id\n except InteractiveStatic.DoesNotExist:\n game_id = -1\n instance = form.save(commit=False)\n instance.username = u.username\n instance.game = game_id\n instance.save()\n return redirect('static_mode:done')\n else:\n print('NOT Valid')\n return render(request, 'control/survey.html', {'form': form, 'score': round(request.user.get_score * .25, 2)})", "async def logout(self):\n try:\n user = self.request.session.get(\"user\")\n chat = self.request.session.get(\"chat\")\n active_sockets = self.request.app.active_sockets\n active_sockets.get_chat(chat).del_user(user)\n\n self.request.session.pop(\"user\")\n self.request.user = None\n self.request.chat = None\n\n return {\n \"Type\": \"account\",\n \"Command\": \"logout\",\n \"Status\": \"success\"\n }\n except KeyError:\n return {\"Type\": \"account\", \"Command\": \"logout\", \"Status\": \"error\"}", "def game_exit(self):\n self.set_state(GameState.EXITING)\n self.game_stop()\n self.game_log_statistics()", "async def leave(self, msg):\n if msg.author.voice is not None and msg.voice_client is not None:\n if msg.voice_client.is_playing() is True or self.player[msg.guild.id]['queue']:\n self.player[msg.guild.id]['queue'].clear()\n msg.voice_client.stop()\n return await msg.voice_client.disconnect(), await msg.message.add_reaction(emoji='✅')\n\n return await msg.voice_client.disconnect(), await msg.message.add_reaction(emoji='✅')\n\n if msg.author.voice is None:\n return await msg.send(\"You must be in the same voice channel as bot to disconnect it via command\")", "async def _leave(self, ctx: commands.Context):\n\n await ctx.voice_state.stop()\n del self.voice_states[ctx.guild.id]", "def logout_user():\n\n # Delete session data to log out\n del session[\"user_id\"]\n flash(\"Successfully logged out!\")\n\n return redirect(\"/\")", "def endGame(self):\n pass", "def endGame():\n return render_template(\"endGame.html\")", "def leaveMUC(self, room, nick, msg='', pfrom=None):\n if msg:\n self.xmpp.sendPresence(pshow='unavailable', pto=\"%s/%s\" % (room, nick), pstatus=msg, pfrom=pfrom)\n else:\n self.xmpp.sendPresence(pshow='unavailable', pto=\"%s/%s\" % (room, nick), pfrom=pfrom)\n del self.rooms[room]", "def leave(self):\n self.subreddit._reddit.post(\n API_PATH[\"leavecontributor\"], data={\"id\": self.subreddit.fullname}\n )", "def exit_the_site(guest_name):\n auth.remove_guest(guest_name)\n return user_handler.exit_the_site(guest_name)", "def remove_guest(eid, gid):\n check_admin()\n\n guestList = GuestList.query.filter_by(event_id=eid).all()\n for guest in guestList:\n print(\"guest.guest_id: \" + str(guest.guest_id))\n print(\"gid: \" + str(gid))\n if guest.guest_id == gid:\n db.session.delete(guest)\n db.session.commit()\n \n flash('You have successfully removed a user from the event.')\n\n # redirect to the events page\n return redirect(url_for('admin.event_guestlist', id=eid))\n\n return render_template(title=\"Removed Guest\")", "def logout():\n flash(\"You have been logged out\")\n session.pop(\"user\")\n return redirect(url_for(\"login\"))", "def delete(self, channel_id, username):\n username = unquote(username).decode('utf-8')\n\n if self.username != None and len(self.username) != 0 and self.username == username:\n\n ## remove our user and alert others in the chat room\n user = self.channel.find_user_by_username(self.username)\n\n if user != None:\n message = self.get_argument('message', '%s has left the room.' % user.nickname)\n self.channel.remove_user(user)\n msg = ChatMessage(timestamp=int(time.time() * 1000), username='system', nickname='system',\n message=message, msgtype='system', channel_name=self.channel_id)\n\n self.channel.add_chat_message(msg)\n\n ## respond to the client our success\n self.set_status(200)\n self.add_to_payload('message',unquote(user.nickname) + ' has left the chat room')\n\n else:\n ## let the client know we failed because they were not found\n self.set_status(403, 'User not found, Authentication failed')\n\n else:\n ## let the client know we failed because they didn't ask nice\n self.set_status(403, 'missing username argument')\n return self.render()", "def api_logout(request):\n if hasattr(request, \"user\") and request.user:\n logout(request) # Sends the user_logged_out signal\n return Response(None, status=status.HTTP_200_OK)\n return Response(None, status=status.HTTP_404_NOT_FOUND)", "async def user_logout_process(self, ctx: commands.Context):\n await ctx.cfg_member.szuruname.set(None)\n await ctx.cfg_member.szurutoken.set(None)\n await ctx.send(\n f\"{ctx.author.mention}: you have been logged out, I no longer have access to your account.\",\n reference=ctx.message,\n )", "def close_menu(game_event):\n wire_menu.close(index_from_userid(game_event.get_int('userid')))", "async def leave_room(self, room_id):\n print(\"PublicChatConsumer\", \"leave_room\")\n if self.scope[\"user\"].is_authenticated:\n try:\n room: PublicChatRoom = await get_room_or_error(room_id)\n except ClientError as e:\n await self.handle_client_error(e)\n else:\n # Remove user from room users\n await disconnect_user(room, self.scope[\"user\"])\n\n # Set room_id to None\n self.room_id = None\n\n # Remove user from the group\n await self.channel_layer.group_discard(\n room.group_name,\n self.channel_name\n )\n\n # Send the total number of connected users to the client\n connected_users_count = await get_connected_users_count(room)\n await self.channel_layer.group_send(\n room.group_name,\n {\n \"type\": \"connected.users.count\",\n \"connected_users_count\": connected_users_count\n }\n )", "def leave_in_play(self, leave_in_play):\n\n self._leave_in_play = leave_in_play", "def logout_user():\n request_json = request.get_json()\n username = request_json[\"username\"]\n try:\n users_service.logout_user(username)\n except:\n return Response(\"User does not exist\", 400)\n return Response(\"Logged out\", status=200)", "def delete_game(sid, msg):\n uuid = msg['uuid']\n game = Game.objects.get(uuid=uuid)\n game.delete()", "def delete_user():\n #TODO user delete\n pass", "def leave_request_decline(self, token, **kwargs):\n cr, uid, context = self._get_cr_uid_context()\n res = self._check_leave_request(\n cr, uid, request, token, context=context\n )\n if isinstance(res, http.Response):\n return res\n if res:\n res.signal_workflow('refuse')\n if res.state == 'refuse':\n return request.website.render(\n \"tk_hr_approve_request.leave_request_refused\"\n )", "def log_out():\n\n del session[\"user_id\"]\n # print session[\"user_id\"]\n flash('You were successfully logged out')\n return render_template('homepage.html')\n\n #Additional reference for log in/log out can be found in project tracker project" ]
[ "0.7001719", "0.6419002", "0.63921964", "0.639108", "0.6370939", "0.633421", "0.63169086", "0.6268317", "0.6229235", "0.61546296", "0.6119499", "0.6118067", "0.60932064", "0.6045495", "0.6015096", "0.5974975", "0.59517586", "0.59031034", "0.59014386", "0.5897309", "0.5892574", "0.5889712", "0.58342934", "0.58237493", "0.5820593", "0.57754123", "0.57619804", "0.57610285", "0.5752704", "0.5738764", "0.5729185", "0.5723454", "0.5720712", "0.5720563", "0.568404", "0.5676977", "0.5670921", "0.56698114", "0.5663175", "0.5663175", "0.56114763", "0.5594224", "0.5593882", "0.5574382", "0.5573156", "0.55475837", "0.55457157", "0.5530408", "0.5529969", "0.5524809", "0.5522613", "0.552029", "0.5518", "0.5511451", "0.54864407", "0.54850346", "0.54848707", "0.5463473", "0.5455535", "0.5450122", "0.54478985", "0.54316413", "0.5426222", "0.5411673", "0.5403024", "0.5396729", "0.53610134", "0.53585124", "0.5354115", "0.5353153", "0.53479016", "0.53473926", "0.5346794", "0.5335175", "0.53261", "0.5323556", "0.5315051", "0.53085715", "0.5308561", "0.5300927", "0.5296967", "0.5290282", "0.5280146", "0.52792364", "0.5271277", "0.52646524", "0.5249852", "0.5245618", "0.52439314", "0.5237074", "0.5232415", "0.5231827", "0.5228975", "0.5210056", "0.5203844", "0.5197126", "0.5195755", "0.51949066", "0.5183968", "0.51766104" ]
0.66279423
1
Select Relationships associated with specified fact_id.
def select_by_fact_id(cls, fact_id): return db.session.query(cls).filter_by(fact_id=fact_id).all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_relationships(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(relationship_query, (person_id,)) # note a tuple is needed as a parameter value for SQLITE\n\n relation_list = []\n for row in c:\n _relation = Relationship()\n _relation.person_id = row[\"personid\"]\n _relation.person.first_name = row[\"firstname\"]\n _relation.person.last_name = row[\"lastname\"]\n _relation.person.middle_initial = row[\"middleinitial\"]\n _relation.related_person_id = row[\"related_personid\"]\n _relation.relationship_id = row[\"relationshipid\"]\n _relation.relationship_type = row[\"relationshiptype\"]\n _relation.relationship_type_description = row[\"key\"]\n relation_list.append(_relation)\n conn.close()\n return relation_list\n except:\n return []", "def has_relationship(id):\n\n relationship = p.toolkit.get_action(\"package_relationships_list\")(\n data_dict={\"id\": id}\n )\n if relationship:\n rel = bool(relationship)\n return rel", "def get_relationship(self, guid):\n results = None\n atlas_endpoint = self.endpoint_url + f\"/relationship/guid/{guid}\"\n\n getResponse = requests.get(\n atlas_endpoint,\n headers=self.authentication.get_authentication_headers()\n )\n\n results = self._handle_response(getResponse)\n\n return results", "def selected_relationships(self):\n return self._selected_relationships", "def relationships(self):", "def get_relationships_for_destination(self, destination_id):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_for_destination\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'destinationId': str(destination_id)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def select_by_foreign_keys(cls, subject_id=None, object_id=None, relationship_type_id=None):\n filter_clause = sa.and_(\n sa.and_(cls.subject_id == subject_id, cls.object_id == object_id),\n cls.relationship_type_id == relationship_type_id)\n return db.session.query(cls).filter(filter_clause).first()", "def user_relationships(id, related_collection_name):\n response = None\n if request.method == 'POST':\n response = User.create_relationships(id, related_collection_name, eval(request.data))\n elif request.method == 'PATCH':\n response = User.update_relationship(id, related_collection_name, json.loads(request.data))\n elif request.method == 'DELETE':\n response = User.disconnect_relationship(id, related_collection_name, eval(request.data))\n elif request.method == 'GET':\n response = User.get_relationship(request.args, id, related_collection_name)\n return response", "def get_relationship_query(self):\n # Implemented from template for\n # osid.resource.ResourceQuerySession.get_resource_query_template\n return queries.RelationshipQuery(runtime=self._runtime)", "def findAllInfectedRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:COVID_EXPOSURE]->(n2:Person) \"\n \"RETURN ID(n1) , r , r.date , r.name , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def get_relationship(self, relationship_id):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resource\n # NOTE: This implementation currently ignores plenary view\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find_one(\n dict({'_id': ObjectId(self._get_id(relationship_id, 'relationship').get_identifier())},\n **self._view_filter()))\n return objects.Relationship(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)", "def get_relationships_for_source(self, source_id):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_for_source\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'sourceId': str(source_id)},\n **self._view_filter())).sort('_sort_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def get_depend_package(id):\n\n relationships = []\n try:\n relationships = p.toolkit.get_action(\"package_relationships_list\")(\n data_dict={\"id\": id, \"rel\": \"dependency_of\"}\n )\n except Exception, e:\n return {}\n\n depend = []\n if relationships:\n for rel in relationships:\n try:\n access = p.toolkit.check_access(\n \"package_show\",\n context={\"user\": c.user},\n data_dict={\"id\": rel[\"object\"]},\n )\n dep = p.toolkit.get_action(\"package_show\")(\n data_dict={\"id\": rel[\"object\"]}\n )\n depend.append(dep)\n except:\n pass\n return depend", "def _get_fact(self, fact):\n for kbfact in self.facts:\n if fact == kbfact:\n return kbfact", "def _get_fact(self, fact):\n for kbfact in self.facts:\n if fact == kbfact:\n return kbfact", "def _get_fact(self, fact):\n for kbfact in self.facts:\n if fact == kbfact:\n return kbfact", "def get_doctor_include_related(id):\n doctor = Doctor.query.get(id)\n result = full_doctor_schema.dump(doctor)\n return jsonify(result.data)", "def get_relationships_by_genus_type_for_destination(self, destination_id, relationship_genus_type):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_destination\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'destinationId': str(destination_id),\n 'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def get_rule(rule_id):\n\n rule = get_db().execute('SELECT i.*, c.name as category_name FROM ruleset i JOIN categories c ON i.category_id = c.id WHERE i.id = ?', (rule_id, )).fetchone()\n\n return rule", "def get_child_ids(id,conn):\n\n child_ids = ('WITH RECURSIVE children AS '\n '(SELECT subject_id '\n 'FROM cvterm_relationship '\n 'WHERE object_id = %s '\n 'UNION '\n 'SELECT cr.subject_id '\n 'FROM cvterm_relationship cr '\n 'INNER JOIN children ch ON ch.subject_id = cr.object_id) '\n 'SELECT * FROM children')\n ids = connect(child_ids,id,conn)\n list_of_ids = []\n for item in ids:\n list_of_ids.append(item[0])\n return(list_of_ids)", "def get_related(this_obj, other_obj, m2m=False):\n # is het niet raar dat je voor twee concrete objecten ophaalt naar welke van het ene type\n # verwezen wordt vanuit het andere type? Of is dat om de vorige/volgende te kunnen bepalen?\n # als ik kijk naar het gebruik in GetRelations dan is het tweede argument ook niet een object\n # maar een relatie (uit de fields verzameling)\n if m2m:\n fields = [x for x in other_obj._meta.many_to_many]\n else:\n fields = [x for x in other_obj._meta.get_fields() if x.name != 'project' and\n x.get_internal_type() == 'ForeignKey']\n for fld in fields:\n if fld.related_model == this_obj._meta.model:\n related_name = fld.related_query_name()\n break\n else:\n return None # not found\n try:\n return this_obj.__getattribute__(related_name).all()\n except UnboundLocalError:\n return None\n # zou je deze ook kunnen vervangen door een aanroep van get_relation en dan met de opgehaalde\n # naam de gerelateerde objecten ophalen en meteen de vorige en de volgende bepalen?\n # (heeft uiteraard konsekwenties voor de aanroepende code)\n # oorspronkelijk lijkt dat ook zo geweest te zijn, de functie heette toen get_relation en het\n # gedeelte dat nu nog zo heet was daarin hardgecodeerd\n # deze functie wordt alleen aangeroepen in een paar methoden van de hieronder opgenomen klasse\n # GetRelations, namelijk om de namen van relaties uit andere objecten naar het huidige te kunnen\n # bepalen.\n # Als je get_relation zoals die nu is gebruikt zou je dat onderscheid (van versus naar relaties)\n # met dezelfde functie kunnen afhandelen", "def _filter_related_fk(self, rel):\n field = rel.field\n if isinstance(field, models.ForeignKey):\n if self._join_allowed(rel.parent_model, rel.model, field):\n return rel", "def get_depend_def_by_step_id(self, step_id):\n try:\n result = self._session.query(StepEntity.name).\\\n filter(StepEntity.id == StepDependencyEntity.parent_id).\\\n filter(StepDependencyEntity.child_id == step_id).\\\n all()\n\n result_list = [\n row[0] for row in result\n ]\n\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return result_list", "def get_facts(facts, situation, target):\n return [f[2] for f in facts if (f[0], f[1]) == (situation, target)]", "def test_select_by_concept_type(self, select_relationships):\n select_relationships.return_value = [Mock(subject='hello'), Mock(subject='kitty')]\n mock_concept_type = Mock(name='concept_type')\n\n result = FactQuery._select_by_concept_type(mock_concept_type)\n self.assertEqual(['hello', 'kitty'], result)\n select_relationships.assert_called_once_with('is', object_name=mock_concept_type)", "def get_links_package(id):\n\n relationships = []\n try:\n relationships = p.toolkit.get_action(\"package_relationships_list\")(\n data_dict={\"id\": id, \"rel\": \"linked_from\"}\n )\n except Exception, e:\n return {}\n\n links = []\n if relationships:\n for rel in relationships:\n try:\n access = p.toolkit.check_access(\n \"package_show\",\n context={\"user\": c.user},\n data_dict={\"id\": rel[\"object\"]},\n )\n link = p.toolkit.get_action(\"package_show\")(\n data_dict={\"id\": rel[\"object\"]}\n )\n links.append(link)\n except:\n pass\n return links", "def filter_relationships(self, srcif, routes):\n outroutes = []\n rel = self.relations[srcif]\n for route in routes:\n opp_rel = self.relations[route[PEER]]\n if (rel == CUST or opp_rel == CUST) or (rel == PROV and opp_rel == PROV):\n outroutes.append(route)\n return outroutes", "def findAllAppContactRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:APP_CONTACT]->(n2:Person) \"\n \"RETURN ID(n1) , r , r.date , r.hour, ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def findAllGetVaccineRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:GET_VACCINE]->(n2:Vaccine) \"\n \"RETURN ID(n1) , r , r.date , r.country , r.expirationDate , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def test_filter_relationships_by_concept_type__object(self):\n # Set up mocks and test data\n concept_type = 'birds'\n mock_match_0 = Mock(name='mock_match_0',\n object=Mock(name='mock_subject_0',\n concept_name='mock_subject_0',\n concept_types=['birds', 'snakes']))\n\n mock_match_1 = Mock(name='mock_match_1',\n object=Mock(name='mock_subject_1',\n concept_name='mock_subject_1',\n concept_types=['snakes', 'turtles']))\n\n mock_match_2 = Mock(name='mock_match_2',\n object=Mock(name='mock_subject_2',\n concept_name='mock_subject_2',\n concept_types=['snakes', 'birds']))\n\n mock_matches = [mock_match_0, mock_match_1, mock_match_2]\n\n # Make call\n filtered_matches = FactQuery._filter_relationships_by_concept_type(\n mock_matches, concept_type, relationship_attr='object')\n\n # Verify results\n self.assertEqual([mock_match_0, mock_match_2], filtered_matches)", "def get_linked_package(id):\n\n relationships = []\n try:\n relationships = p.toolkit.get_action(\"package_relationships_list\")(\n data_dict={\"id\": id, \"rel\": \"links_to\"}\n )\n except Exception, e:\n return {}\n\n linked = []\n if relationships:\n for rel in relationships:\n lnk = p.toolkit.get_action(\"package_show\")(\n data_dict={\"id\": rel[\"object\"]}\n )\n linked.append(lnk)\n\n return linked", "def get_fact(self, category, selected_option):\r\n try:\r\n conn = self.create_connection()\r\n query = \"\"\"WITH sub_category_lookup AS (\r\n\t\t\t\t\t\t\t\tSELECT id \r\n\t\t\t\t\t\t\t\tFROM categories \r\n\t\t\t\t\t\t\t\tWHERE sub_category = '%s' \r\n\t\t\t\t\t\t\t\tAND category = '%s')\r\n \t\tSELECT date_time, \r\n \tdata \r\n \t\tFROM fact\r\n \t\tWHERE category_id = (select category_id FROM sub_category_lookup) \r\n \t\tORDER BY date_time ; \"\"\"%(selected_option, \\\r\n category)\r\n data_frame = pd.read_sql(query, conn)\r\n conn.close()\r\n except (psycopg2.Error, ValueError):\r\n print(\"Error at get_fact, check connection or query\")\r\n return data_frame", "def get_dependency_package(id):\n\n relationships = []\n try:\n relationships = p.toolkit.get_action(\"package_relationships_list\")(\n data_dict={\"id\": id, \"rel\": \"depends_on\"}\n )\n except Exception, e:\n return {}\n\n dependencies = []\n if relationships:\n for rel in relationships:\n dependency = p.toolkit.get_action(\"package_show\")(\n data_dict={\"id\": rel[\"object\"]}\n )\n dependencies.append(dependency)\n return dependencies", "def select_foreign_edges(self, transaction_id, key=None):\n return [self.select(transaction_id, \"foreign_edges\", key)]", "def find_goal(self, concl, goal_id):\n prf = self.prf\n try:\n for n in goal_id:\n for item in prf.items[:n]:\n if item.th is not None and item.th.can_prove(concl):\n return item.id\n prf = prf.items[n].subproof\n except (AttributeError, IndexError):\n raise TacticException()", "def filter_relationships(self, srcif, routes):\n def filt(route):\n dst = route[DEST][:-1]\n dst += '2'\n return self.relations[srcif] == CUST or self.relations[dst] == CUST\n\n outroutes = routes.copy()\n outroutes = list(filter(filt, outroutes))\n return outroutes", "def relations(self):\n\t\treturn [(self.factions[k][0], self._faction_affinity.get(k, 50)) for k in self.factions.keys()]", "def test_relationship_edges(self):\n path = os.path.join(get_file_dir(), 'data', 'GO_edges_relationship.json')\n with open(path, 'rt') as json_file:\n json_files = []\n for data in json_file:\n json_files.append(json.loads(data))\n for entry in json_files:\n if entry[\"id\"] == \"GO:0000332__GO:0003720__part_of\":\n self.assertEqual(entry[\"from\"], \"GO_term/GO:0000332\")\n self.assertEqual(entry[\"to\"], \"GO_term/GO:0003720\")\n self.assertEqual(entry[\"relationship_type\"], \"part_of\")\n if entry[\"from\"] == \"GO_term/GO:0000335\":\n self.assertEqual(entry[\"id\"], \"GO:0000335__GO:0006313__negatively_regulates\")\n self.assertEqual(entry[\"to\"], \"GO_term/GO:0006313\")\n self.assertEqual(entry[\"relationship_type\"], \"negatively_regulates\")", "def problem_relationships(self, identifier):\n return self._get(\"problems/%d/relationships\" % identifier).json()", "def get_queryset(self):\n\n return Relationship.objects.filter(\n Q(from_person=self.request.user.person) |\n Q(to_person=self.request.user.person))", "def test_getCpfRelations(self):\n pass", "def searchRelations(self):\n subcategory_id = self.concept_list.selectedItems()[0].data(Qt.UserRole)[1].id\n self.setConceptDescription()\n result = self.db.search_relation(subcategory_id)\n self.setResult(result, self.relation_list)", "def findAllLiveRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:LIVE]->(n2:House) \"\n \"RETURN ID(n1) , r , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def get_relationships_by_genus_type_for_source(self, source_id, relationship_genus_type):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_source\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'sourceId': str(source_id),\n 'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def get_rel_question(self, org_id, rel_id):\n return self.get_rel_thread(org_id, rel_id).find('./RelQuestion')", "def facts_dslquery(dsl_dict, **kwargs):\n return _dslquery('facts', dsl_dict, **kwargs)", "def filter_for_term_relationships(src, relationship_type, object_id, target=True):\n filters = [\n Filter(\"type\", \"=\", \"relationship\"),\n Filter(\"relationship_type\", \"=\", relationship_type),\n ]\n if target:\n filters.append(Filter(\"target_ref\", \"=\", object_id))\n else:\n filters.append(Filter(\"source_ref\", \"=\", object_id))\n\n results = src.query(filters)\n return remove_deprecated(results)", "def test_child_relationships(self, init_db, category_with_favorites):\n\n category = Category.get(id=category_with_favorites.id)\n assert category.get_child_relationships() is not None\n assert len(category.favorites.all()) > 0", "def test_filter_selected_rels_raises_value_err():\n # Act and assert\n with raises(ValueError):\n _, _ = filter_selected_relationships(\n SimpleNodeSchema(),\n selected_relationships={InterestingAssetToSubResourceRel()},\n )", "def get_relationships_for_peers(self, source_id, destination_id):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_for_peers\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'sourceId': str(source_id),\n 'destinationId': str(destination_id)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def get_query():\n return CiscoVlanIftableRelationshipQuery", "def get_relationships_by_genus_type(self, relationship_genus_type):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_genus_type\n # NOTE: This implementation currently ignores plenary view\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', DESCENDING)\n return objects.RelationshipList(result, runtime=self._runtime, proxy=self._proxy)", "def get_drupal_relation_ids(db_obj, db_cur, e1_entity_type, e1_entity_id,\n relation_cv, e2_entity_type, e2_entity_id):\n\n # relation details\n relation_ident = relation_cv[0]\n relation_type = relation_ident[1]\n\n # handle key relation-field\n relation_field_join = ''\n relation_field_cond = ''\n relation_value_cond = ''\n if len(relation_ident) > 2:\n relation_field_name = relation_ident[2]\n relation_value_type = relation_cv[1]\n\n # field join\n relation_field_join = (\n 'LEFT JOIN field_data_field_{0} AS k_rf\\n'\n ' ON k_rf.entity_id = e2.entity_id\\n'\n ' AND k_rf.revision_id = e2.revision_id' .\n format(relation_field_name)\n )\n\n # conditions\n relation_field_cond = (\n \"AND k_rf.entity_type = 'relation'\\n\"\n \"AND k_rf.deleted = 0\"\n )\n\n # handle value type\n if relation_value_type.startswith('term: '):\n relation_key_column = 'k_rf_t.name'\n relation_field_join += (\n '\\nLEFT JOIN taxonomy_term_data AS k_rf_t\\n'\n 'ON k_rf_t.tid = k_rf.field_{0}_tid' .\n format(relation_field_name)\n )\n elif relation_value_type == 'ip':\n relation_key_column = (\n 'k_rf.field_{0}_start'.format(relation_field_name)\n )\n else:\n relation_key_column = (\n 'k_rf.field_{0}_value'.format(relation_field_name)\n )\n\n # handle specified field value\n if len(relation_cv) > 2:\n relation_value = relation_cv[2]\n relation_value_cond = (\n 'AND {0} = %s'.format(relation_key_column)\n )\n\n # query string and arguments\n query_str = (\n'''\nSELECT e1.entity_id, e1.revision_id\nFROM field_data_endpoints AS e1\nLEFT JOIN field_data_endpoints AS e2\n ON e2.entity_id = e1.entity_id\n AND e2.revision_id = e1.revision_id\n AND e2.endpoints_r_index > e1.endpoints_r_index\n{0}\nWHERE e1.revision_id IN\n (SELECT MAX(vid)\n FROM relation_revision\n GROUP BY rid)\nAND e1.entity_type = 'relation'\nAND e1.bundle = %s\nAND e1.endpoints_entity_type = %s\nAND e1.endpoints_entity_id = %s\nAND e1.deleted = 0\nAND e2.endpoints_entity_type = %s\nAND e2.endpoints_entity_id = %s\nAND e2.deleted = 0\n{1}\n{2}\n''' .\n format(relation_field_join, relation_field_cond,\n relation_value_cond)\n )\n query_args = [relation_type, e1_entity_type, e1_entity_id,\n e2_entity_type, e2_entity_id]\n if len(relation_ident) > 2 and len(relation_cv) > 2:\n query_args.append(relation_value)\n\n # execute the query\n if not db_obj.execute(db_cur, query_str.strip(), query_args,\n has_results=True):\n return None\n ret = db_obj.fetchall(db_cur)\n if not ret[0]:\n return None\n if not ret[1]:\n return []\n return ret[1]", "def get_relation_for_chunk_id(self, relation_name, chunk_id):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(sql.SQL(\"SELECT * FROM {} WHERE chunk_id = %s\").format(sql.Identifier(relation_name)), [chunk_id])\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)", "def test_select_by_concept_type__no_matches(self, select_relationships):\n select_relationships.return_value = []\n\n result = FactQuery._select_by_concept_type(Mock(name='concept_types'))\n self.assertEqual([], result)", "def test_conceptional_relations(id, conrel, expected_ids):\n synset = germanet_data.get_synset_by_id(id)\n related = synset.relations[conrel]\n np.testing.assert_equal(sorted([syn.id for syn in related]), sorted(expected_ids))", "def get_connections_by_relation(self, qid, relation):\n if self._kg_symbols is None:\n return []\n return self._kg_symbols.get_connections_by_relation(qid, relation)", "def getreferingobjs(idfindex, idfobject):\n idf, edges = eppystuff.an_idfedges(idfindex)\n refobjs = idfobject.getreferingobjs() \n keys = [refobj.key for refobj in refobjs] \n objnames = [refobj.obj[1] for refobj in refobjs] \n idfkeys = idf_helpers.idfobjectkeys(idf)\n keysobjsindexes = [(idfkeys.index(refobj.key.upper()), \n idf.idfobjects[refobj.key.upper()].index(refobj))\n for refobj in refobjs] \n urls = [\"../../%s/%s\" % (idfkey, objkey) \n for idfkey, objkey in keysobjsindexes]\n urllinks = ['<a href=%s>%s</a>' % (url, name) \n for url, name in zip(urls, objnames)]\n lines = [\"%s->%s\" % (refobj.key, urllink) \n for refobj, urllink in zip(refobjs, urllinks)]\n return ', '.join(lines)", "def related(filterset, filter_name):\n if not filterset.relationship:\n return filter_name\n return LOOKUP_SEP.join([filterset.relationship, filter_name])", "def getFact(self, fact):\n for kbfact in self.kb.facts:\n if fact == kbfact:\n return True\n return False", "def get_by_cluster_id(cls, cluster_id):\n query = cls.all()\n if cluster_id is not None:\n query = query.join(models.NetworkGroup)\\\n .join(models.NodeGroup)\\\n .filter(models.NodeGroup.cluster_id == cluster_id)\n return query", "def query_rule_by_id(runtime, idd):\r\n return runtime.policy_parser.query_policy_by_id(idd).rule", "def get_related(self, module):\n\n connection = self._module._connection\n result = connection.get_relationships(self._module._name, self['id'],\n module._name.lower(), '', ['id'])\n\n entries = []\n for elem in result['entry_list']:\n entry = SugarEntry(module)\n entry._fields['id'] = elem['id']\n entries.append(entry)\n\n return entries", "def get_relationships_by_genus_type_for_peers(self, source_id, destination_id, relationship_genus_type):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_peers\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'sourceId': str(source_id),\n 'destinationId': str(destination_id),\n 'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def find_relationship(person1, person2):\n lines1 = get_ancestor_lines(person1)\n lines2 = get_ancestor_lines(person2)\n mrcas = find_most_recent(set(lines1).intersection(set(lines2)))\n\n relationships = []\n for anc in mrcas:\n relationships.append((lines1[anc], lines2[anc]))\n return relationships", "def _get(self, association_id):\n association = DB_USER_CUSTOMER_RELS_TABLE.get(doc_id=int(association_id))\n if not association:\n flask_restful.abort(404, message=f\"Customer/user association '{association_id}' \"\n \"not found!\")\n res = {\n \"id\" : association.doc_id\n }\n res.update(association)\n\n cust_data = [DB_CUSTOMER_TABLE.get(doc_id=association['customer_id'])]\n user_data = [DB_USER_TABLE.get(doc_id=association['user_id'])]\n\n res['_embedded'] = {\n \"user\" : self.embed_user_data_in_result(user_data)[0],\n \"customer\" : self.embed_customer_data_in_result(cust_data)[0]\n }\n link_spec = {\n \"self\" : CustomerUserAssociation.get_self_url(association.doc_id),\n \"contained_in\" : CustomerUserAssociationList.get_self_url()\n }\n\n res['_links'] = self.make_links(link_spec)\n return res", "def get_queryset(self):\n\t\treturn super(CourseDocument, self).get_queryset().select_related(\n\t\t 'belongs_to'\n\t\t)", "def find_related_nodes(reltype, inst=None):\n if inst is None:\n inst = ctx.instance\n ret = []\n for rel in inst.relationships:\n if reltype in rel.type_hierarchy:\n ret.append(rel.target)\n return ret", "def select_favorite_foods(self):\n self.cursor = self.data_base.cursor(MySQLCursorPrepared)\n self.cursor.execute(\"USE Purbeurre\")\n self.cursor.execute(\"\"\"SELECT Favorite.id, Food.name_food\n FROM Food \n JOIN Favorite ON Food.id = Favorite.id_substitute_chooses \n WHERE Food.id = Favorite.id_substitute_chooses\n ORDER BY Favorite.id\"\"\")\n id_name_substitute = self.cursor.fetchall()\n self.cursor.execute(\"\"\"SELECT Food.name_food\n FROM Food\n JOIN Favorite ON Food.id = Favorite.id_food\n WHERE Food.id = Favorite.id_food\n ORDER BY Favorite.id\"\"\")\n name_substituted_food = self.cursor.fetchall()\n substituted_food_substitute = self.new_orm.transform_favorite_foods_to_object\\\n (id_name_substitute, name_substituted_food)\n id_substitute = substituted_food_substitute[0]\n name_substitute = substituted_food_substitute[1]\n name_substituted_food = substituted_food_substitute[2]\n return id_substitute, name_substituted_food, name_substitute", "def query_neo4j_index(gdb, id, category):\n index = gdb.nodes.indexes.get(category)\n nodes = index.get('id', id)\n\n # TODO: When we load the logical defs ontology we will have to deal with\n # multiple root nodes\n return nodes[0]", "def get_depend_by_child_id(self, child_id):\n try:\n result = self._session.query(StepDependencyEntity).\\\n filter(StepDependencyEntity.child_id == child_id).\\\n all()\n result_dict = self.result_dict(result)\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return result_dict", "def test_child_relationships(self, init_db, favorite1):\n\n favorite = Favorite.get(id=favorite1.id)\n\n with raises(NotImplementedError) as error:\n favorite.get_child_relationships()\n \n assert str(error.value) == \"The get_relationships method must be overridden in all child model classes\"", "def relationship(cls):\n return relationship.many_to_one(cls, 'relationship')", "def customer_group_get_related(group_id):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n\n query = \"\"\"\n SELECT \n `group_id`,\n `group_name`,\n `description`,\n `timestamp`,\n `created_by`,\n `creation_time`,\n `is_deleted`,\n `updated_by`,\n `role_id`,\n `is_default`,\n `is_customer`,\n `company_name`,\n `company_address`,\n `company_telephone`,\n `company_fax`,\n `company_website`,\n `company_sales_contact`,\n `company_purchase_contact`,\n `company_business`,\n `company_business_type`,\n `company_sales_email`,\n `company_purchase_email`,\n `company_reg_number`,\n `company_vat_number` \n FROM `groups` \n WHERE `groups`.`company_name` = (\n SELECT `asshole`.`company_name` \n FROM \n (\n SELECT * \n FROM `groups` \n WHERE `group_id` = \"%s\"\n ) AS `asshole`\n )\n \"\"\" %(group_id)\n \n group_details = None\n cursor = db.cursor()\n\n if cursor.execute(query) != 0:\n group_details = cursor.fetchall()\n\n cursor.close()\n db.close()\n\n return group_details", "def fact(self, name):\n facts = self.facts(name=name)\n return next(fact for fact in facts)", "def find_relations(self, rid:int, *rids):\n first = set(self.rid_to_relations.get(rid, ()))\n if rids:\n return first.intersection(*[self.rid_to_relations.get(x, set()) for x in rids])\n return first", "def get_recipe(self, _id):\n raise NotImplementedError()", "def get_edges(node: Node) -> RelationshipMatch:\n global _graph\n\n edges_connected_to_node = _graph.match((node,), r_type='LINKS_TO')\n return edges_connected_to_node", "def findAllVisitRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:VISIT]->(n2:Location) \"\n \"RETURN ID(n1) , r , r.date , r.start_hour , r.end_hour , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def affaires_factures_view(request):\n # Check connected\n if not Utils.check_connected(request):\n raise exc.HTTPForbidden()\n\n affaire_id = request.matchdict[\"id\"]\n\n query = request.dbsession.query(VFactures).filter(\n VFactures.affaire_id == affaire_id\n ).all()\n return Utils.serialize_many(query)", "def food_choice(self, category_id):\n\n self.cursor.execute(\"\"\" SELECT food.id, food.name\n FROM food\n INNER JOIN category_food\n ON food.id = category_food.food_id\n WHERE category_food.category_id = %s && nutriscore > 'b'\n ORDER BY id LIMIT 8 OFFSET 0\"\"\", category_id)\n rows = self.cursor.fetchall()\n print(\"Choisissez votre aliment :\")\n possible_choice = []\n while True:\n try:\n for row in rows:\n possible_choice.append(row[0])\n print(row[0], row[1])\n choice = int(input(\"Entrez votre choix: \\n\"))\n if choice in possible_choice:\n break\n except ValueError:\n continue\n\n return choice", "def get_relationships_by_genus_type_for_destination_on_date(self, destination_id, relationship_genus_type, from_, to):\n raise errors.Unimplemented()", "def add_fact_relationship(self, table_from: str, entry_from: dict, table_to: str, entry_to: dict):\n\n table_lut = {'p': \"10\", # procedure\n 'c': \"19\", # condition\n 'm': \"21\", # measurement\n 'o': \"27\"} # observation\n self.fact_relations.append((table_lut[table_from], entry_from, table_lut[table_to], entry_to))", "def get_all_oeid_from_referential(self, cr, uid, referential_id, context=None):\n\n ir_model_data_obj = self.pool.get('ir.model.data')\n model_data_ids = ir_model_data_obj.search(cr, uid, [('model', '=', self._name), ('referential_id', '=', referential_id)])\n #because OpenERP might keep ir_model_data (is it a bug?) for deleted records, we check if record exists:\n claimed_oe_ids = [x['res_id'] for x in ir_model_data_obj.read(cr, uid, model_data_ids, ['res_id'], context=context)]\n return claimed_oe_ids and self.exists(cr, uid, claimed_oe_ids, context=context) or []", "def findAllMakeTestRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:MAKE_TEST]->(n2:Test) \"\n \"RETURN ID(n1) , r , r.date , r.hour , r.result , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def _add_relationships(self, element: Element) -> None:\n elements: Set[str] = {v.id for v in self.element_views}\n\n for relationship in element.get_efferent_relationships():\n if relationship.destination.id in elements:\n self._relationship_views.add(\n RelationshipView(relationship=relationship)\n )\n\n for relationship in element.get_afferent_relationships():\n if relationship.source.id in elements:\n self._relationship_views.add(\n RelationshipView(relationship=relationship)\n )", "def GetConcept(self, concept_id):\n for concept in self.concepts:\n if concept.concept_id == concept_id:\n return concept\n\n return None", "def get_graph_info(self, graph_flow_id, type = None):\n try:\n query_set = models.AUTO_ML_RULE.objects.filter(graph_flow_id=graph_flow_id)\n query_set = serial.serialize(\"json\", query_set)\n query_set = json.loads(query_set)\n ids = []\n for row in query_set :\n ids.append(row)\n return ids\n except Exception as e:\n raise Exception(e)", "def get_food_with_id(cls, food_id):\n obj = cls.objects(food_id=food_id).first()\n return obj", "def select_children(self):\n objs = []\n for obj in pm.selected():\n objs.extend(obj.listRelatives(ad=True, type=[\"transform\", \"joint\"]))\n pm.select(objs, add=True)", "def filter_relationships(self, srcif, routes):\n outroutes = []\n for r in routes:\n src = self.relations[srcif]\n dest = self.relations[r[SRC_IF]]\n # there prob is a single if conditional that can account for all cases but watevs\n if src == PEER and dest == PEER:\n continue\n if src == PEER and dest == PROV:\n continue\n if src == PROV and dest == PEER:\n continue\n outroutes.append(r)\n\n return outroutes", "def fishs_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=FISH_TYPE_URI,\n rdf_type_name=FISH_TYPE_NAME, \n kls=Fish)", "def related_to(self, name=None):\n\t\treturn self.related(name, True)", "def findAll(tx):\n query = (\n \"MATCH (n1)-[r]->(n2) \"\n \"RETURN n1 AS node1 , r AS relationship , n2 AS node2 \"\n )\n\n result = tx.run(query)\n return [(record[\"node1\"], record[\"relationship\"], record[\"node2\"]) for record in result]", "def get_referents(data):\n fakes_group = data.groups.get(\"Fakes\")\n\n return [obj for obj in data.groups[\"Referents\"].objects\n if not obj.hide_render or fakes_group in obj.users_group]", "def getAttributsByIdref(self, id) :\n\t\t# if id in self.lid.keys() :\n\t\t# \treturn self.lid[id]\n\t\t# else :\n\t\treturn self._getIdrefs(self.doc.documentElement, id)", "def createRelationshipsInfect(id, test_date, test_hour, daysBack):\n familyQuery = (\n \"MATCH (pp:Person)-[:LIVE]->(h:House)<-[:LIVE]-(ip:Person) \"\n \"WHERE ID(pp) = $id AND ip <> pp AND NOT (ip)<-[:COVID_EXPOSURE]-(pp)\"\n \"RETURN DISTINCT ID(ip);\"\n )\n\n \"\"\"\n IMPORTANT: ($date) represents the date from which we check the contacts. It is the date of positive test - 7 days\n We check all contacts until the date of positive test\n \"\"\"\n appContactQuery = (\n \"MATCH (pp:Person)-[r1:APP_CONTACT]->(ip:Person) \"\n \"WHERE ID(pp) = $id AND (r1.date > date($date) OR (r1.date = date($date) AND r1.hour >= time($hour))) \"\n \"AND (r1.date < date($date) + duration({days:7}) OR (r1.date = date($date)+duration({days:7}) AND \"\n \"r1.hour <= time($hour))) \"\n \"AND NOT \"\n \"(pp)-[:COVID_EXPOSURE{date: r1.date}]->(ip)\"\n \"RETURN DISTINCT ID(ip) , r1.date;\"\n )\n locationContactQuery = (\n \"MATCH (pp:Person)-[r1:VISIT]->(l:Location)<-[r2:VISIT]-(ip:Person) \"\n \"WHERE ID(pp) = $id AND ip <> pp AND (r1.date > date($date) OR (r1.date = date($date) AND r1.start_hour >= time($hour))) \"\n \"AND (r1.date < date($date) + duration({days:7}) OR (r1.date = date($date)+duration({days:7}) AND \"\n \"r1.end_hour <= time($hour))) AND r2.date = r1.date AND \"\n \"((r1.start_hour < r2.start_hour AND r1.end_hour > r2.start_hour) OR \"\n \"(r2.start_hour < r1.start_hour AND r2.end_hour > r1.start_hour)) AND NOT \"\n \"(pp)-[:COVID_EXPOSURE{name: l.name , date: r1.date}]->(ip)\"\n \"RETURN DISTINCT ID(ip) , r1.date , l.name;\"\n )\n\n # date = datetime.date.today() - datetime.timedelta(daysBack)\n \"\"\"\n date is referred to date test - daysback \n \"\"\"\n date = test_date - datetime.timedelta(daysBack)\n infectedIds = []\n with driver.session() as s:\n familyInfected = s.read_transaction(findInfectInFamily, familyQuery, id)\n appInfected = s.read_transaction(findInfect, appContactQuery, id, date, test_hour)\n locationInfected = s.read_transaction(findInfect, locationContactQuery, id, date, test_hour)\n\n for el in familyInfected, appInfected, locationInfected:\n if len(el) > 0:\n # Take just the id\n infectedIds.append(el[0]['ID(ip)'])\n\n infectedIds = []\n for el in familyInfected:\n infectedIds.append(el['ID(ip)'])\n\n for infectedId in infectedIds:\n query = (\n \"MATCH (pp:Person) , (ip:Person) \"\n \"WHERE ID(pp) = $id AND ID(ip) = $ipid \"\n \"CREATE (pp)-[:COVID_EXPOSURE{date:date($date)}]->(ip);\"\n )\n s.write_transaction(createInfectFamily, query, id, infectedId, date.strftime(\"%Y-%m-%d\"))\n\n infectedIds = []\n for el in appInfected:\n details = []\n details.append(el['ID(ip)'])\n details.append(el['r1.date'])\n infectedIds.append(details)\n\n for infectedId, infectedDate in infectedIds:\n query = (\n \"MATCH (pp:Person) , (ip:Person) \"\n \"WHERE ID(pp) = $id AND ID(ip) = $ipid \"\n \"CREATE (pp)-[:COVID_EXPOSURE{date: date($date)}]->(ip);\"\n )\n s.write_transaction(createInfectApp, query, id, infectedId, infectedDate)\n\n infectedIds = []\n\n for el in locationInfected:\n details = []\n details.append(el['ID(ip)'])\n details.append(el['r1.date'])\n details.append(el['l.name'])\n infectedIds.append(details)\n\n for infectedId, infectedDate, infectedPlace in infectedIds:\n query = (\n \"MATCH (pp:Person) , (ip:Person) \"\n \"WHERE ID(pp) = $id AND ID(ip) = $ipid \"\n \"CREATE (pp)-[:COVID_EXPOSURE{date: date($date) , name: $name}]->(ip);\"\n )\n s.write_transaction(createInfectLocation, query, id, infectedId, infectedDate, infectedPlace)", "def test_select_matching_relationships(self, select_by_values):\n # Set up mocks and test data\n select_by_values.return_value = ['one', 'two']\n test_relationship_type_name = 'eats'\n test_subject_name = 'otter'\n test_object_name = 'mussels'\n test_rel_number = 99\n \n # Make call\n matches = FactQuery._select_matching_relationships(test_relationship_type_name,\n subject_name=test_subject_name,\n object_name=test_object_name,\n relationship_number=test_rel_number)\n # Verify result\n self.assertEqual(['one', 'two'], matches)\n\n # Verify mocks\n select_by_values.assert_called_once_with(relationship_type_name=test_relationship_type_name,\n subject_name=test_subject_name,\n object_name=test_object_name,\n relationship_number=test_rel_number)", "def get_edge_query(from_id, rel_type, to_id):\n # TODO: what to do with labels here.\n\n return ((\"MERGE (a:user {id: %s}) \"\n \"MERGE (b:user {id: %s}) \"\n \"MERGE a-[:%s]->b \"\n \"RETURN *\") % (from_id, to_id, rel_type))", "def test_get_all_related(self):\n c1 = content.ContentMetadata.objects.using(self.the_channel_id).get(title=\"c1\")\n c2 = content.ContentMetadata.objects.using(self.the_channel_id).get(title=\"c2\")\n # if c1 is related to c2\n expected_output = content.ContentMetadata.objects.using(self.the_channel_id).filter(title__in=[\"c2\"])\n actual_output = api.get_all_related(channel_id=self.the_channel_id, content=c1)\n self.assertEqual(set(expected_output), set(actual_output))\n # then c2 should be related to c1\n expected_output = content.ContentMetadata.objects.using(self.the_channel_id).filter(title__in=[\"c1\"])\n actual_output = api.get_all_related(channel_id=self.the_channel_id, content=c2)\n self.assertEqual(set(expected_output), set(actual_output))" ]
[ "0.58382934", "0.514626", "0.50882876", "0.5030613", "0.5016771", "0.49949938", "0.4898808", "0.48690563", "0.48584178", "0.48120192", "0.47925648", "0.47674325", "0.47505498", "0.473037", "0.473037", "0.473037", "0.47198808", "0.47121876", "0.46776888", "0.46749067", "0.46717918", "0.46698582", "0.46666363", "0.46639878", "0.46553952", "0.4637488", "0.46335638", "0.4614692", "0.46069464", "0.45725232", "0.45718956", "0.45646247", "0.4555684", "0.454892", "0.45423663", "0.45191088", "0.44981882", "0.44872674", "0.44848981", "0.44800392", "0.44776", "0.446162", "0.44559887", "0.44543728", "0.44526845", "0.44430655", "0.44379312", "0.4435704", "0.44204387", "0.44113192", "0.4408393", "0.44010717", "0.4381111", "0.4379483", "0.43544328", "0.43325356", "0.4327235", "0.43249807", "0.43114114", "0.42850843", "0.42817855", "0.42815602", "0.42738768", "0.42716786", "0.4266977", "0.42585248", "0.42561117", "0.42542383", "0.42503914", "0.4243889", "0.4238307", "0.42360806", "0.42355475", "0.4230841", "0.4213431", "0.42102978", "0.4209639", "0.42021486", "0.41949242", "0.4187648", "0.41842034", "0.41823983", "0.4175797", "0.41752973", "0.4173167", "0.41713324", "0.41686165", "0.41685918", "0.41620895", "0.41620594", "0.41552562", "0.41545933", "0.41503957", "0.41502088", "0.41489246", "0.4131675", "0.4130205", "0.41261205", "0.41235915", "0.41223186" ]
0.68101376
0
Select Relationship with specified subject, object and relationship type.
def select_by_foreign_keys(cls, subject_id=None, object_id=None, relationship_type_id=None): filter_clause = sa.and_( sa.and_(cls.subject_id == subject_id, cls.object_id == object_id), cls.relationship_type_id == relationship_type_id) return db.session.query(cls).filter(filter_clause).first()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_by_values(cls, relationship_type_name=None, relationship_number=None,\n subject_name=None, object_name=None):\n query = db.session.query(cls).\\\n join(RelationshipType).\\\n filter(RelationshipType.relationship_type_name==relationship_type_name)\n if relationship_number:\n query = query.filter(Relationship.count==relationship_number)\n if subject_name: \n subject_concept = sa_orm.aliased(Concept)\n query = query.\\\n join(subject_concept, Relationship.subject_id==subject_concept.concept_id).\\\n filter(subject_concept.concept_name==subject_name)\n if object_name:\n object_concept = sa_orm.aliased(Concept)\n query = query.\\\n join(object_concept, Relationship.object_id==object_concept.concept_id).\\\n filter(object_concept.concept_name==object_name)\n return query.all()", "def relation( self, obj, relType ):\n raise NotImplementedError(\"relation\")", "def test_filter_relationships_by_concept_type__object(self):\n # Set up mocks and test data\n concept_type = 'birds'\n mock_match_0 = Mock(name='mock_match_0',\n object=Mock(name='mock_subject_0',\n concept_name='mock_subject_0',\n concept_types=['birds', 'snakes']))\n\n mock_match_1 = Mock(name='mock_match_1',\n object=Mock(name='mock_subject_1',\n concept_name='mock_subject_1',\n concept_types=['snakes', 'turtles']))\n\n mock_match_2 = Mock(name='mock_match_2',\n object=Mock(name='mock_subject_2',\n concept_name='mock_subject_2',\n concept_types=['snakes', 'birds']))\n\n mock_matches = [mock_match_0, mock_match_1, mock_match_2]\n\n # Make call\n filtered_matches = FactQuery._filter_relationships_by_concept_type(\n mock_matches, concept_type, relationship_attr='object')\n\n # Verify results\n self.assertEqual([mock_match_0, mock_match_2], filtered_matches)", "def get_association(relation: str, subject_node: Node, object_node: Node, is_negated: bool, pmid: str = '',\n association_config: dict = None):\n if association_config is None:\n association_config = associations\n most_relevant_relation = list(get_biolink_association(subject_node, object_node, association_config).items())[0][0]\n biolink_relation = most_relevant_relation(id=0,\n subject=subject_node['id'],\n relation=relation,\n object=object_node['id'],\n negated=is_negated,\n publications=[pmid])\n return get_relationship_from_biolink(subject_node, biolink_relation, object_node)", "def test_select_by_concept_type(self, select_relationships):\n select_relationships.return_value = [Mock(subject='hello'), Mock(subject='kitty')]\n mock_concept_type = Mock(name='concept_type')\n\n result = FactQuery._select_by_concept_type(mock_concept_type)\n self.assertEqual(['hello', 'kitty'], result)\n select_relationships.assert_called_once_with('is', object_name=mock_concept_type)", "def test_filter_relationships_by_concept_type__subject(self):\n # Set up mocks and test data\n concept_type = 'birds'\n mock_match_0 = Mock(name='mock_match_0',\n subject=Mock(name='mock_subject_0',\n concept_name='mock_subject_0',\n concept_types=['birds', 'snakes']))\n\n mock_match_1 = Mock(name='mock_match_1',\n subject=Mock(name='mock_subject_1',\n concept_name='mock_subject_1',\n concept_types=['snakes', 'turtles']))\n\n mock_match_2 = Mock(name='mock_match_2',\n subject=Mock(name='mock_subject_2',\n concept_name='mock_subject_2',\n concept_types=['snakes', 'birds']))\n\n mock_matches = [mock_match_0, mock_match_1, mock_match_2]\n\n # Make call\n filtered_matches = FactQuery._filter_relationships_by_concept_type(\n mock_matches, concept_type, relationship_attr='subject')\n\n # Verify results\n self.assertEqual([mock_match_0, mock_match_2], filtered_matches)", "def get_relationship_type(\n r: Union[\"ModelRelationship\", t.RelationshipType, t.RelationshipName, str]\n) -> t.RelationshipType:\n relationship_type = r.type if isinstance(r, ModelRelationship) else r\n return t.RelationshipType(normalize_relationship_type(relationship_type))", "def get_relationships_by_record_type(self, relationship_record_type):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_record_type\n # STILL NEED TO IMPLEMENT!!!\n return objects.RelationshipList([])", "def get_relationship_from_biolink(biolink_subject: Node,\n biolink_association: Association,\n biolink_object: Node):\n properties = {key: value for key, value in biolink_association.__dict__.items() if key != 'id'}\n return Relationship(biolink_subject,\n get_pythonic_name(biolink_association.__class__.__name__),\n biolink_object,\n **properties)", "def get_biolink_association(subject_node: Node, object_node: Node, association_config: dict = None) -> dict:\n if association_config is None:\n association_config = associations\n subject_query = list(subject_node.labels)[0]\n object_query = list(object_node.labels)[0]\n association = {association: requirements for association, requirements in association_config.items()\n if subject_query in requirements[0]\n and object_query in requirements[1]}\n if len(association) is 0:\n association = {Association: ['*']}\n return association", "def filter_for_term_relationships(src, relationship_type, object_id, target=True):\n filters = [\n Filter(\"type\", \"=\", \"relationship\"),\n Filter(\"relationship_type\", \"=\", relationship_type),\n ]\n if target:\n filters.append(Filter(\"target_ref\", \"=\", object_id))\n else:\n filters.append(Filter(\"source_ref\", \"=\", object_id))\n\n results = src.query(filters)\n return remove_deprecated(results)", "def get_relationships_by_genus_type(self, relationship_genus_type):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_genus_type\n # NOTE: This implementation currently ignores plenary view\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', DESCENDING)\n return objects.RelationshipList(result, runtime=self._runtime, proxy=self._proxy)", "def by_type(self, type):\n return self.filter(related_type__title=type)", "def get_relationship(self, relationship_id):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resource\n # NOTE: This implementation currently ignores plenary view\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find_one(\n dict({'_id': ObjectId(self._get_id(relationship_id, 'relationship').get_identifier())},\n **self._view_filter()))\n return objects.Relationship(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)", "def get_ids_related_to(cls, object_type, related_type, related_ids=[]):\n\n if isinstance(related_ids, (int, long)):\n related_ids = [related_ids]\n\n if not related_ids:\n return db.session.query(Relationship.source_id).filter(sql.false())\n\n destination_ids = db.session.query(Relationship.destination_id).filter(\n and_(\n Relationship.destination_type == object_type,\n Relationship.source_type == related_type,\n Relationship.source_id.in_(related_ids),\n )\n )\n source_ids = db.session.query(Relationship.source_id).filter(\n and_(\n Relationship.source_type == object_type,\n Relationship.destination_type == related_type,\n Relationship.destination_id.in_(related_ids),\n )\n )\n\n queries = [destination_ids, source_ids]\n queries.extend(cls.get_extension_mappings(\n object_type, related_type, related_ids))\n queries.extend(cls.get_special_mappings(\n object_type, related_type, related_ids))\n\n return cls._array_union(queries)", "def test_get_relation_type(self):\n pass", "def relationship(*args, b: bool=True, relationshipData: Union[AnyStr, List[AnyStr], bool]=\"\",\n q=True, query=True, e=True, edit=True, **kwargs)->Union[None, Any]:\n pass", "def get(self, dto):\n assert dto.using in list(self.models.keys())\n Relation = self.models[dto.using]\n return self.session.query(Relation)\\\n .filter(Relation.purpose == dto.purpose)\\\n .filter(Relation.sender == dto.sender)\\\n .filter(Relation.recipient == dto.recipient)\\\n .first()", "def relationship(cls):\n return relationship.many_to_one(cls, 'relationship')", "def get_relation(srt, soort):\n result, multiple = None, None\n if srt != soort or soort in ('funcproc', 'techproc'):\n for relobj in my.rectypes[srt]._meta.get_fields():\n if relobj.related_model and corr_naam(relobj.related_model._meta.model_name) == soort:\n result = relobj.name\n multiple = False if relobj.get_internal_type() == 'ForeignKey' else True\n break\n return result, multiple", "def get_object(self, subject=None, predicate=None):\n\n # Get the result of the search\n results = self.rdf.objects(subject, predicate)\n as_list = list(results)\n\n # Don't raise exceptions, value test!\n if not as_list:\n return None\n\n return as_list[0]", "def get_relationships_by_genus_type_for_source(self, source_id, relationship_genus_type):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_source\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'sourceId': str(source_id),\n 'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def get_relationships_by_genus_type_for_peers(self, source_id, destination_id, relationship_genus_type):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_peers\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'sourceId': str(source_id),\n 'destinationId': str(destination_id),\n 'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def subjects(\n self,\n predicate: Optional[\"_PredicateType\"] = None,\n object: Optional[\"_ObjectType\"] = None,\n ) -> Generator[\"_SubjectType\", None, None]:\n for t, c in self.triples((None, predicate, object)):\n yield t[0]", "def subjects(\n self,\n predicate: Optional[\"_PredicateType\"] = None,\n object: Optional[\"_ObjectType\"] = None,\n ) -> Generator[\"_SubjectType\", None, None]:\n for t, c in self.triples((None, predicate, object)):\n yield t[0]", "async def determine_contact(rec, ref: Ref, type='parent'):\n if type not in ['parent', 'impacted']:\n raise ValueError(\"Type must be impacted or parent!\")\n\n LOG.debug(f\"Determing {type} for object id: %s -- %s-%s...\", rec.id,\n rec.Name, rec.Type)\n offset_min = rec.last_seen - 2.5\n\n if type == \"parent\":\n accpt_colors = ['Blue', 'Red'\n ] if rec.Color == 'Violet' else [rec.Color]\n\n # query_filter = (\n # ~(Object.type.startswith('Decoy'))\n # & ~(Object.c.type.startswith('Misc'))\n # & ~(Object.c.type.startswith('Projectile'))\n # & ~(Object.c.type.startswith('Weapon'))\n # &\n # ~(Object.c.type.startswith(\"Ground+Light+Human+Air+Parachutist\")))\n query_filter = \" (type not like ('%Decoy%')\"\\\n \" AND type not like ('%Misc%')\"\\\n \" AND type not like ('%Weapon%')\"\\\n \" AND type not like ('%Projectile%')\"\\\n \" AND type not like ('%Ground+Light+Human+Air+Parachutist%'))\"\n\n elif type == 'impacted':\n accpt_colors = ['Red'] if rec.Color == 'Blue' else ['Red']\n # query_filter = (Object.c.type.startswith('Air+'))\n query_filter = \" type like ('%Air+%')\"\n\n else:\n raise NotImplementedError\n\n color_query = f\"\"\" color in ('{\"','\".join(accpt_colors)}')\"\"\"\n id_query = f\" id != {rec.id} \"\n query = f\"\"\" SELECT id FROM object\n WHERE {query_filter} AND {color_query} AND {id_query}\n \"\"\"\n\n nearby_objs = await DB.fetch(query)\n\n closest = []\n for nearby in nearby_objs:\n near = ref.obj_store[nearby[0]]\n if ((near.last_seen <= offset_min\n and not (near.Type.startswith('Ground') and near.alive == 1))\n and (abs(near.alt - rec.alt) < 2000)\n and (abs(near.lat - rec.lat) <= 0.0005)\n and (abs(near.lon - rec.lon) <= 0.0005)):\n continue\n\n prox = compute_dist(rec.cart_coords, near.cart_coords)\n LOG.debug(\"Distance to object %s - %s is %s...\", near.Name, near.Type,\n str(prox))\n if not closest or (prox < closest[1]):\n closest = [near.id, prox, near.Name, near.Pilot, near.Type]\n\n if not closest:\n return None\n\n if closest[1] > 1000:\n LOG.warning(\n f\"Rejecting closest {type} for {rec.id}-{rec.Name}-{rec.Type}: \"\n \"%s %sm...%d checked!\", closest[4],\n str(closest[1]), len(nearby_objs))\n\n return None\n\n return closest", "def fetch_relation(self, address):\n if (self.from_id is not None) and (self.to_id is not None):\n new_neofj = NeoFJ(address=address)\n relations = new_neofj.get_two_node_relations(_id1=self.from_id, _id2=self.to_id, _f_relation=self.rel_type)\n relation = relations[0]\n self.rel_type = relation.type\n self.rel_dict = relation.properties", "def get_relationships_by_parent_genus_type(self, relationship_genus_type):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_parent_genus_type\n # STILL NEED TO IMPLEMENT!!!\n return objects.RelationshipList([])", "def get_relationships_by_genus_type_for_destination(self, destination_id, relationship_genus_type):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_destination\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'destinationId': str(destination_id),\n 'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def delete_relationship(tx, node_value_1=None, node_value_2=None, node_type_1=None, node_type_2=None, relationship=None):\n if node_value_1 is None and node_type_1 is None:\n cql = \"MATCH ()-[u:\" + relationship + \"]-(w:\" + node_type_2 + \"{name:$node_value_2}) \" \\\n \"DELETE u;\"\n try:\n tx.run(cql, node_value_2=node_value_2)\n except Exception as e:\n print(str(e))\n elif node_value_2 is None and node_type_2 is None:\n cql = \"MATCH (s:\" + node_type_1 + \"{name:$node_value_1})-[u:\" + relationship + \"]-() \" \\\n \"DELETE u;\"\n try:\n tx.run(cql, node_value_1=node_value_1)\n except Exception as e:\n print(str(e))\n else:\n cql = \"MATCH (s:\" + node_type_1 + \"{name:$node_value_1})-[u:\" + relationship + \"]-(w:\" + node_type_2 + \"{name:$node_value_2}) \" \\\n \"DELETE u;\"\n try:\n tx.run(cql, node_value_1=node_value_1, node_value_2=node_value_2)\n except Exception as e:\n print(str(e))", "def _add_relationship(self, reltype, target_part):\n rId = self._relationships._next_rId\n rel = _Relationship(rId, reltype, target_part)\n self._relationships._additem(rel)\n return rel", "def relation(self, related=None, group=None):\n\t\tif not related:\n\t\t\treturn None\n\n\t\t# Try to get parent model for multi-table models\n\t\tif hasattr(related, 'parent_model'):\n\t\t\trelated_content_type = ContentType.objects.get_for_model(related.parent_model)\n\t\telse:\n\t\t\trelated_content_type = ContentType.objects.get_for_model(type(related))\n\n\t\targs = {\n\t\t\t'content_type': ContentType.objects.get_for_model(type(self)),\n\t\t\t'object_id': self.pk,\n\t\t\t'related_object_id': related.pk,\n\t\t\t'related_content_type': related_content_type,\n\t\t}\n\n\t\tif group:\n\t\t\targs.update({'group': group})\n\n\t\tfrom .models import Related\n\t\treturn Related.objects.get(**args)", "def get_relationship_query(self):\n # Implemented from template for\n # osid.resource.ResourceQuerySession.get_resource_query_template\n return queries.RelationshipQuery(runtime=self._runtime)", "def fetch_obj(type, id, error=404, new_id=False):\n if id is None:\n abort(error)\n obj_q = Session.query(type)\n obj = obj_q.get(int(id))\n #else:\n # obj = obj_q.filter(type.ID==int(id)).first()\n\n if obj is None:\n abort(error)\n return obj", "def find_related_nodes(reltype, inst=None):\n if inst is None:\n inst = ctx.instance\n ret = []\n for rel in inst.relationships:\n if reltype in rel.type_hierarchy:\n ret.append(rel.target)\n return ret", "def test_find_relation_types(self):\n pass", "def subject_predicates(\n self, object: Optional[\"_ObjectType\"] = None\n ) -> Generator[Tuple[\"_SubjectType\", \"_PredicateType\"], None, None]:\n for t, c in self.triples((None, None, object)):\n yield t[0], t[1]", "def subject_predicates(\n self, object: Optional[\"_ObjectType\"] = None\n ) -> Generator[Tuple[\"_SubjectType\", \"_PredicateType\"], None, None]:\n for t, c in self.triples((None, None, object)):\n yield t[0], t[1]", "def relationship(self):\r\n return relationships.Relationship(self)", "def relationship_types(self):\n return frozenset(self._relationships_by_type.keys())", "def select_object(obj, objects=[]):\n return __SelectObjects(obj, objects)", "def __init__(self, obj, relationship, subj):\n self._obj = obj\n self._subj = subj\n self._relationship = relationship", "def get_object(\n self, object_t, object_id=None, relation=None, parent=None, **params\n ):\n url = self.object_url(object_t, object_id, relation)\n params = params or {}\n if self.access_token is not None:\n params[\"access_token\"] = str(self.access_token)\n response = self.session.get(url, params=params)\n json_data = response.json()\n if \"error\" in json_data:\n raise ValueError(\n f\"API request return error for object: {object_t} id: {object_id}\"\n )\n return self._process_json(json_data, parent)", "def add_restriction(self, subject, predicate, object_):\n if type(object_) != rdflib.URIRef:\n object_ = self.check_thing(object_)\n\n if type(predicate) != rdflib.URIRef:\n predicate = self.check_thing(predicate)\n\n if type(subject) != infixowl.Class:\n if type(subject) != rdflib.URIRef:\n subject = self.check_thing(subject)\n subject = infixowl.Class(subject, graph=self.g)\n\n restriction = infixowl.Restriction(predicate, graph=self.g, someValuesFrom=object_)\n subject.subClassOf = [restriction] + [c for c in subject.subClassOf]", "def search_stix21_objects(rel_list, object_name, rel_type='any') -> list:\n searched_rel_list = list()\n for relationship in rel_list:\n if relationship[3] == rel_type or rel_type == 'any':\n if relationship[0] == object_name and relationship[0] == relationship[2]:\n searched_rel_list.append(relationship)\n else:\n for position in range(len(relationship)):\n if relationship[position] == object_name:\n searched_rel_list.append(relationship)\n return searched_rel_list", "def get_relationships_by_genus_type_for_peers_on_date(self, source_id, destination_id, relationship_genus_type, from_, to):\n raise errors.Unimplemented()", "def get_relationships_by_genus_type_for_destination_on_date(self, destination_id, relationship_genus_type, from_, to):\n raise errors.Unimplemented()", "def relationships(self):", "def relationship(self):\n return relationships.Relationship(self)", "def test_select_by_concept_type__no_matches(self, select_relationships):\n select_relationships.return_value = []\n\n result = FactQuery._select_by_concept_type(Mock(name='concept_types'))\n self.assertEqual([], result)", "def predicates(\n self,\n subject: Optional[\"_SubjectType\"] = None,\n object: Optional[\"_ObjectType\"] = None,\n ) -> Generator[\"_PredicateType\", None, None]:\n for t, c in self.triples((subject, None, object)):\n yield t[1]", "def predicates(\n self,\n subject: Optional[\"_SubjectType\"] = None,\n object: Optional[\"_ObjectType\"] = None,\n ) -> Generator[\"_PredicateType\", None, None]:\n for t, c in self.triples((subject, None, object)):\n yield t[1]", "def findByType(self, constraintType = 'internalConstraint'):\r\n for rule in self.rules:\r\n if (isinstance(rule, InternalConstraint) and constraintType == 'internalConstraint') or (isinstance(rule, PhysicalConstraint) and constraintType == 'physicalConstraint'):\r\n return rule", "def get_queryset(self):\n\t\treturn super(CourseDocument, self).get_queryset().select_related(\n\t\t 'belongs_to'\n\t\t)", "def create_statically_for_rule_graph(cls, product_type, subject_type) -> \"Get\":\n return cls(product_type, subject_type, None)", "def get_queryset(self):\n\n return Relationship.objects.filter(\n Q(from_person=self.request.user.person) |\n Q(to_person=self.request.user.person))", "def test_which_reptiles__species_subject(self, select_relationships, concept_is_species, \n filter_by_concept_type):\n # Set up mocks and test data\n parsed_query = Mock(name='parsed_query',\n text='which reptiles eat bugs',\n subject_name='reptiles',\n object_name='bugs',\n relationship_type_name='eat',\n relationship_number=3,\n relationship_negation=False)\n fact_query = FactQuery(parsed_query=parsed_query)\n\n mock_match_1 = Mock(name='match_1',\n subject=Mock(concept_name='subject_1'))\n mock_match_2 = Mock(name='match_2',\n subject=Mock(concept_name='subject_2'))\n select_relationships.return_value = [mock_match_1, mock_match_2]\n concept_is_species.side_effect = [False, True]\n filter_by_concept_type.return_value = [mock_match_1, mock_match_2]\n\n # Make call\n results = fact_query._which_animal_query()\n\n # Verify results\n self.assertEqual(set(['subject_1', 'subject_2']), set(results))\n\n # Verify mocks\n select_relationships.assert_called_once_with(\n 'eat', object_name='bugs', relationship_number=3)\n\n call_args_list = concept_is_species.call_args_list\n self.assertEqual(2, len(call_args_list))\n self.assertEqual(call('bugs'), call_args_list[0])\n self.assertEqual(call('reptiles'), call_args_list[1])\n\n filter_by_concept_type.assert_called_once_with(\n [mock_match_1, mock_match_2], 'reptiles', relationship_attr='subject')", "def find_type(source, target):\n x = [r for r in source.synset_relations if r.target == target.id]\n if len(x) != 1:\n raise Exception(\n \"Synsets not linked or linked by more than one property\")\n return x[0].rel_type", "def find_object(self, obj_type, obj_name):\n try:\n # Simply look it up by type and name.\n obj = self.model_map['object'][obj_type][obj_name][1]\n except KeyError:\n # No dice. This object doesn't exist in the model.\n obj = None\n\n return obj", "def get_query():\n return CiscoVlanIftableRelationshipQuery", "def specific(self):\n\n specific_type = ContentType.objects.get_for_id(self.specific_type_id)\n model_class = specific_type.model_class()\n if model_class is None:\n return self\n elif isinstance(self, model_class):\n return self\n else:\n return specific_type.get_object_for_this_type(id=self.id)", "def test_get_relationship_templates(self):\n pass", "def _filter_related_one2one(self, rel):\n field = rel.field\n if isinstance(field, models.OneToOneField):\n if self._join_allowed(rel.parent_model, rel.model, field):\n return rel", "def _get_relationship_data(self):\n relationship_field = request.path.split('/')[-1]\n if current_app.config.get('DASHERIZE_API') == True:\n relationship_field = relationship_field.replace('-', '_')\n\n if relationship_field not in get_relationships(self.schema).values():\n raise RelationNotFound('', \"{} has no attribute {}\".format(self.schema.__name__, relationship_field))\n\n related_type_ = self.schema._declared_fields[relationship_field].type_\n related_id_field = self.schema._declared_fields[relationship_field].id_field\n model_relationship_field = get_model_field(self.schema, relationship_field)\n\n return relationship_field, model_relationship_field, related_type_, related_id_field", "def relationships(self, r_type=None, n_ids=()):\n if r_type is None:\n r_sets = []\n else:\n r_sets = [self._relationships_by_type.get(r_type, frozenset())]\n if not n_ids or (hasattr(n_ids, \"__iter__\") and all(n_id is None for n_id in n_ids)):\n pass\n elif isinstance(n_ids, Sequence):\n for n_index, n_id in enumerate_nodes(n_ids):\n if n_id is not None:\n r_sets.append({r_id for r_id, i in self._relationships_by_node.get(n_id, ())\n if i == n_index})\n elif isinstance(n_ids, Set):\n for n_id in n_ids:\n if n_id is not None:\n r_sets.append({r_id for r_id, i in self._relationships_by_node.get(n_id, ())})\n else:\n raise TypeError(\"Nodes must be supplied as a Sequence or a Set\")\n if r_sets:\n return iter(reduce(and_operator, r_sets))\n else:\n return iter(self._relationships)", "def get_object_or_child_by_type(self, *types):\n\n objects = self.get_objects_or_children_by_type(*types)\n return objects[0] if any(objects) else None", "def get(self, *args, **kwargs):\n self.before_get(args, kwargs)\n\n relationship_field, model_relationship_field, related_type_, related_id_field = self._get_relationship_data()\n related_view = self.schema._declared_fields[relationship_field].related_view\n related_view_kwargs = self.schema._declared_fields[relationship_field].related_view_kwargs\n\n obj, data = self._data_layer.get_relationship(model_relationship_field,\n related_type_,\n related_id_field,\n kwargs)\n\n for key, value in copy(related_view_kwargs).items():\n if isinstance(value, str) and value.startswith('<') and value.endswith('>'):\n tmp_obj = obj\n for field in value[1:-1].split('.'):\n tmp_obj = getattr(tmp_obj, field)\n related_view_kwargs[key] = tmp_obj\n\n result = {'links': {'self': request.path,\n 'related': url_for(related_view, **related_view_kwargs)},\n 'data': data}\n\n qs = QSManager(request.args, self.schema)\n if qs.include:\n schema = compute_schema(self.schema, dict(), qs, qs.include)\n\n serialized_obj = schema.dump(obj)\n result['included'] = serialized_obj.data.get('included', dict())\n\n self.after_get(result)\n return result", "def set_relation(\n self, other, reltype=None, set_reverse=True\n ): ## TODO: logic to find and set siblings?\n ##TODO: test coverage\n reltype = reltype.upper()\n reltype_reverse = {\"CHILD\": \"PARENT\", \"PARENT\": \"CHILD\", \"SIBLING\": \"SIBLING\"}[\n reltype\n ]\n if isinstance(other, CalendarObjectResource):\n if other.id:\n uid = other.id\n else:\n uid = other.icalendar_component[\"uid\"]\n else:\n uid = other\n if set_reverse:\n other = self.parent.object_by_uid(uid)\n if set_reverse:\n other.set_relation(other=self, reltype=reltype_reverse, set_reverse=False)\n\n existing_relation = self.icalendar_component.get(\"related-to\", None)\n existing_relations = (\n existing_relation\n if isinstance(existing_relation, list)\n else [existing_relation]\n )\n for rel in existing_relations:\n if rel == uid:\n return\n\n self.icalendar_component.add(\n \"related-to\", uid, parameters={\"RELTYPE\": reltype}, encode=True\n )\n\n self.save()", "def test_change_relation_type(self):\n pass", "def __init__(self, *nodes, **properties):\n num_args = len(nodes)\n if num_args == 0:\n raise TypeError(\"Relationships must specify at least one endpoint\")\n elif num_args == 1:\n # Relationship(a)\n self._type = self.default_type()\n nodes = (nodes[0], nodes[0])\n elif num_args == 2:\n if nodes[1] is None or isinstance(nodes[1], string):\n # Relationship(a, \"TO\")\n self._type = nodes[1]\n nodes = (nodes[0], nodes[0])\n else:\n # Relationship(a, b)\n self._type = self.default_type()\n nodes = (nodes[0], nodes[1])\n elif num_args == 3:\n # Relationship(a, \"TO\", b)\n self._type = nodes[1]\n nodes = (nodes[0], nodes[2])\n else:\n raise TypeError(\"Hyperedges not supported\")\n Entity.__init__(self, nodes[0], self, nodes[1], **properties)", "def get_resource_by_type(self, graph_db, service_type):\n node = neo_resource.get_node_by_property(graph_db, self.label, 'name', service_type)\n return node", "def _get_related_instance(self,\n related_instance_class: Union[Type[NetworkInstance],\n Type[VnfInstance]],\n relationship_related_to_type: str) -> Iterator[\\\n Union[NetworkInstance,\n VnfInstance]]:\n if not relationship_related_to_type in [\"l3-network\", \"generic-vnf\", \"pnf\"]:\n msg = (\n f'Invalid \"relationship_related_to_type\" value. '\n f'Provided \"{relationship_related_to_type}\". '\n f'Has to be \"l3-network\" or \"generic-vnf\".'\n )\n raise ParameterError(msg)\n for relationship in self.relationships:\n if relationship.related_to == relationship_related_to_type:\n yield related_instance_class.create_from_api_response(\\\n self.send_message_json(\"GET\",\n (f\"Get {self.instance_id} \"\n f\"{related_instance_class.__class__.__name__}\"),\n f\"{self.base_url}{relationship.related_link}\"),\n self)", "def test_animal_attribute_query__species_subject(self, select_relationships, \n concept_is_species, filter_by_concept_type):\n # Set up mocks and test data\n mock_1 = Mock(name='mock_1')\n mock_2 = Mock(name='mock_2')\n mock_3 = Mock(name='mock_3')\n select_relationships.side_effect = [[], [mock_1, mock_2, mock_3]]\n concept_is_species.return_value = True\n filter_by_concept_type.return_value = [mock_1, mock_2]\n\n parsed_query = Mock(name='parsed_query',\n text='do birds have wings',\n subject_name='birds',\n object_name='wings',\n relationship_type_name='have',\n relationship_number=2)\n fact_query = FactQuery(parsed_query=parsed_query)\n\n # Make call\n result = fact_query._animal_attribute_query()\n \n # Verify results\n self.assertEqual('yes', result)\n\n # Verify mocks\n call_args_list = select_relationships.call_args_list\n self.assertEqual(2, len(call_args_list))\n expected_calls = [\n call('have', subject_name='birds', object_name='wings', relationship_number=2),\n call('have', object_name='wings', relationship_number=2)]\n self.assertEqual(expected_calls, call_args_list)", "def _select_match(matches):\n # TOOD: add user friendly representation to of each relation\n raise NotImplementedError()", "def test_add_relation_type(self):\n pass", "def is_relationship(column):\n return isinstance(column.property, sqlalchemy.orm.relationships.RelationshipProperty)", "def get_related(this_obj, other_obj, m2m=False):\n # is het niet raar dat je voor twee concrete objecten ophaalt naar welke van het ene type\n # verwezen wordt vanuit het andere type? Of is dat om de vorige/volgende te kunnen bepalen?\n # als ik kijk naar het gebruik in GetRelations dan is het tweede argument ook niet een object\n # maar een relatie (uit de fields verzameling)\n if m2m:\n fields = [x for x in other_obj._meta.many_to_many]\n else:\n fields = [x for x in other_obj._meta.get_fields() if x.name != 'project' and\n x.get_internal_type() == 'ForeignKey']\n for fld in fields:\n if fld.related_model == this_obj._meta.model:\n related_name = fld.related_query_name()\n break\n else:\n return None # not found\n try:\n return this_obj.__getattribute__(related_name).all()\n except UnboundLocalError:\n return None\n # zou je deze ook kunnen vervangen door een aanroep van get_relation en dan met de opgehaalde\n # naam de gerelateerde objecten ophalen en meteen de vorige en de volgende bepalen?\n # (heeft uiteraard konsekwenties voor de aanroepende code)\n # oorspronkelijk lijkt dat ook zo geweest te zijn, de functie heette toen get_relation en het\n # gedeelte dat nu nog zo heet was daarin hardgecodeerd\n # deze functie wordt alleen aangeroepen in een paar methoden van de hieronder opgenomen klasse\n # GetRelations, namelijk om de namen van relaties uit andere objecten naar het huidige te kunnen\n # bepalen.\n # Als je get_relation zoals die nu is gebruikt zou je dat onderscheid (van versus naar relaties)\n # met dezelfde functie kunnen afhandelen", "def get_model_relationship_id(\n thing: Union[\"ModelRelationship\", t.ModelRelationshipId, UUID, str]\n) -> t.ModelRelationshipId:\n if isinstance(thing, UUID):\n return t.ModelRelationshipId(thing)\n elif isinstance(thing, ModelRelationship):\n return thing.id\n return t.ModelRelationshipId(UUID(thing))", "def rel_from_domain(relation, category):\n # query for instance_of foolowed by sublcass_of and domain category\n query = f\"\"\"\n SELECT DISTINCT ?s ?sLabel ?property ?propertyLabel ?o ?oLabel\n {{\n hint:Query hint:optimizer \"None\"\n VALUES ?property {{wdt:{relation}}}\n ?s wdt:P31* / wdt:P279* wd:{category} . # Find items in the domain\n ?s ?property ?o .\n MINUS {{?s wdt:P31 / wdt:P279* wd:Q2725376 }} # exclude 'demographics'\n SERVICE wikibase:label {{ bd:serviceParam wikibase:language \"[AUTO_LANGUAGE],en\" . }}\n }}\n \"\"\"\n return apply_request(query)", "def setType(self, *args):\n return _libsbml.Association_setType(self, *args)", "def _create_new_relation_concept(self, rc_type, data_dict):\n # generate name, create individual with role assignments\n i = self.auto_generated_name_numbers[rc_type]\n self.auto_generated_name_numbers[rc_type] += 1\n relation_name = f\"i{rc_type.name}_{i}\"\n\n kwargs = {}\n for key, value in data_dict.items():\n res = self._handle_key_for_individual(key, value, relation_name, None)\n if res is not None:\n kwargs.update(res)\n\n relation_individual = self._create_individual(rc_type, relation_name, relation_name, label=None, kwargs=kwargs)\n\n return relation_individual", "def selectType(self):\n\n\t\tif len(self.type) == 0:\n\t\t\treturn\n\n\t\ttmplist = []\n\t\tfor atom in self.atomlist:\n\t\t\tfound = False\n\t\t\tfor type in self.type:\n\t\t\t\tif atom.kind == type:\n\t\t\t\t\tfound = True\t\n\n\t\t\tif found and not self.invtype:\n\t\t\t\ttmplist.append(atom)\n\t\t\tif not found and self.invtype:\n\t\t\t\ttmplist.append(atom)\n\n\t\tself.atomlist = tmplist", "def get_object_by_type(self, *types: str) -> Optional[TgnObject]:\n children = self.get_objects_by_type(*types)\n return children[0] if any(children) else None", "def _parse_relation(chunk, type=\"O\"):\n r1 = chunk.get(XML_RELATION)\n r2 = chunk.get(XML_ID, chunk.get(XML_OF))\n r1 = [x != \"-\" and x or None for x in r1.split(\"|\")] or [None]\n r2 = [x != \"-\" and x or None for x in r2.split(\"|\")] or [None]\n r2 = [x is not None and x.split(_UID_SEPARATOR )[-1] or x for x in r2]\n if len(r1) < len(r2): r1 = r1 + r1 * (len(r2)-len(r1)) # [1] [\"SBJ\", \"OBJ\"] => \"SBJ-1;OBJ-1\"\n if len(r2) < len(r1): r2 = r2 + r2 * (len(r1)-len(r2)) # [2,4] [\"OBJ\"] => \"OBJ-2;OBJ-4\"\n return \";\".join([\"-\".join([x for x in (type, r1, r2) if x]) for r1, r2 in zip(r1, r2)])", "def get_record_relationship_id(\n thing: Union[\"RecordRelationship\", t.RecordRelationshipId, UUID, str]\n) -> t.RecordRelationshipId:\n if isinstance(thing, UUID):\n return t.RecordRelationshipId(thing)\n elif isinstance(thing, RecordRelationship):\n return thing.id\n return t.RecordRelationshipId(UUID(thing))", "def object(self):\n if not self.initial.get('content_type'):\n return None\n if not self.initial.get('object_id'):\n return None\n return self.initial.get('content_type').get_object_for_this_type(\n pk=self.initial.get('object_id')\n )", "def _get_permission(self, obj_type, path, username):\n if obj_type == Collection:\n# XXX - in iRODS < 4.2, CollectionUser.name isn't supported.\n# query = self.session.query(Collection, CollectionAccess).filter(\n# CollectionUser.name == username, Collection.name == path)\n# result = [self.perm_str_mapping[row[CollectionAccess.name]] for row in query\n query = self.session.query(User.id).filter(User.name == username)\n for row in query:\n id = row[User.id]\n query = self.session.query(Collection, CollectionAccess).filter(\n CollectionAccess.user_id == id, Collection.name == path)\n result = [self.perm_str_mapping[row[CollectionAccess.name]] for row in query]\n### XXX - ^^^\n return result\n if obj_type == DataObject:\n conditions = [\n Collection.name == dirname(path),\n DataObject.name == basename(path),\n User.name == username\n ]\n query = self.session.query(DataObject.name, DataAccess.name) \\\n .filter(*conditions).all()\n result = [self.perm_str_mapping[row[DataAccess.name]] for row in query]\n return result\n self._fail(\"Unsupported Object type\")\n return None", "def filter(self, destination_object=None, source_object=None, **kwargs):\n if destination_object:\n kwargs.update({\n \"destination_id\": destination_object.pk,\n \"destination_type\": get_for_model(destination_object),\n })\n if source_object:\n kwargs.update({\n \"source_id\": source_object.pk,\n \"source_type\": get_for_model(source_object),\n })\n return super(RelatedContentQuerySet, self).filter(**kwargs)", "def __getattribute__(self, name):\n x = object.__getattribute__(self, name)\n if name.startswith(\"_\"):\n return x\n schema_cls = object.__getattribute__(self, Schema.__name__)\n if name in schema_cls.relationships:\n if object.__getattribute__(self, PillowtalkBase.UNMARSHALL): # locking marshalling prevents recursion\n # Decide to use original value or fullfilled value...\n r = schema_cls.relationships[name]\n if type(x) is r.mod2: # if relationship is already fullfilled\n return x\n else:\n new_x = self.fullfill_relationship(name)\n if new_x is not None and new_x != [None] and new_x != []:\n return new_x\n if issubclass(x.__class__, Relationship):\n raise TypeError(\"Relationship \\\"name\\\" was not correctly resolved.\")\n return x", "def select_record_type(self, label):\n self.wait_until_modal_is_open()\n locator = lex_locators[\"object\"][\"record_type_option\"].format(label)\n self._jsclick(locator)\n self.selenium.click_button(\"Next\")", "def test_select_matching_relationships(self, select_by_values):\n # Set up mocks and test data\n select_by_values.return_value = ['one', 'two']\n test_relationship_type_name = 'eats'\n test_subject_name = 'otter'\n test_object_name = 'mussels'\n test_rel_number = 99\n \n # Make call\n matches = FactQuery._select_matching_relationships(test_relationship_type_name,\n subject_name=test_subject_name,\n object_name=test_object_name,\n relationship_number=test_rel_number)\n # Verify result\n self.assertEqual(['one', 'two'], matches)\n\n # Verify mocks\n select_by_values.assert_called_once_with(relationship_type_name=test_relationship_type_name,\n subject_name=test_subject_name,\n object_name=test_object_name,\n relationship_number=test_rel_number)", "def get_relationships_by_genus_type_for_source_on_date(self, source_id, relationship_genus_type, from_, to):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_source_on_date\n relationship_list = []\n for relationship in self.get_relationships_by_genus_type_for_source():\n if overlap(from_, to, relationship.start_date, relationship.end_date):\n relationship_list.append(relationship)\n return objects.RelationshipList(relationship_list, runtime=self._runtime)", "def get_specific_item(model, type, id):\n if(type == \"office\"):\n return model.get_office(id)\n elif(type == \"party\"):\n return model.get_party(id)\n return []", "def get_objective(self, objtype=None, get_value=True):\n if objtype is None:\n # Find active objective\n if self.act_objective is not None:\n obj = self.act_objective\n else:\n raise ValueError('No active objective found.')\n\n else:\n assert objtype in self.objectives.keys(), 'Requested objective does not exist. Please choose from {}'.format(\n self.objectives.keys())\n obj = self.objectives[objtype]\n\n if get_value:\n return value(obj)\n else:\n return obj", "def related_type(self):\n return addresser.ObjectType.NONE", "def relationship_type(self):\n return addresser.RelationshipType.ATTRIBUTES", "def setRelationshipTypes(self, value):\n return self._set(relationshipTypes=value)", "def __call__(\n self, relationship: 'Any', registry: 'Any' = None, **field_kwargs: dict\n ) -> NestedFilterableConnectionField:\n model = relationship.mapper.entity\n model_type = registry.get_type_for_model(model)\n\n filters: 'Optional[FilterSet]' = self.model_filters.get(model)\n\n if filters is not None:\n field_kwargs.setdefault(\n self.model_loader_class.filter_arg, filters\n )\n\n return self.field_class(model_type._meta.connection, **field_kwargs)", "def get_edge_query(from_id, rel_type, to_id):\n # TODO: what to do with labels here.\n\n return ((\"MERGE (a:user {id: %s}) \"\n \"MERGE (b:user {id: %s}) \"\n \"MERGE a-[:%s]->b \"\n \"RETURN *\") % (from_id, to_id, rel_type))", "def predicate_objects(\n self, subject: Optional[\"_SubjectType\"] = None\n ) -> Generator[Tuple[\"_PredicateType\", \"_ObjectType\"], None, None]:\n for t, c in self.triples((subject, None, None)):\n yield t[1], t[2]" ]
[ "0.6398576", "0.59592456", "0.58598846", "0.5763038", "0.5670848", "0.56461585", "0.5524951", "0.5501221", "0.5444976", "0.5378021", "0.53130275", "0.5282662", "0.5178828", "0.51488453", "0.5099441", "0.50218856", "0.501885", "0.49724635", "0.48888737", "0.4885239", "0.4870398", "0.47908905", "0.4773507", "0.4773079", "0.4773079", "0.47706643", "0.47632754", "0.475571", "0.4748589", "0.47369", "0.4733508", "0.47015163", "0.469982", "0.46924296", "0.46816584", "0.46537262", "0.46534586", "0.46534586", "0.46482477", "0.46427363", "0.46322697", "0.46274534", "0.4617024", "0.45962933", "0.45679304", "0.4567732", "0.45636082", "0.45555142", "0.45437607", "0.45366707", "0.45320258", "0.45320258", "0.44981506", "0.4489257", "0.44867972", "0.4477616", "0.4450492", "0.44486728", "0.44392726", "0.4435898", "0.4434155", "0.44260097", "0.4414502", "0.4410352", "0.44048232", "0.43990844", "0.4371726", "0.43694797", "0.43604606", "0.4356539", "0.43498892", "0.43284762", "0.43254146", "0.43242013", "0.4317927", "0.43121356", "0.43076244", "0.4299702", "0.4298694", "0.42981917", "0.42968062", "0.42902678", "0.42893368", "0.42815664", "0.4280225", "0.4278723", "0.4272953", "0.42720768", "0.42706934", "0.42669326", "0.4262487", "0.4261246", "0.4259506", "0.42581233", "0.4256303", "0.42562813", "0.42493078", "0.42490372", "0.42443427", "0.42311236" ]
0.72244817
0
Select Relationships with specified relationship_type, count, subject, and object.
def select_by_values(cls, relationship_type_name=None, relationship_number=None, subject_name=None, object_name=None): query = db.session.query(cls).\ join(RelationshipType).\ filter(RelationshipType.relationship_type_name==relationship_type_name) if relationship_number: query = query.filter(Relationship.count==relationship_number) if subject_name: subject_concept = sa_orm.aliased(Concept) query = query.\ join(subject_concept, Relationship.subject_id==subject_concept.concept_id).\ filter(subject_concept.concept_name==subject_name) if object_name: object_concept = sa_orm.aliased(Concept) query = query.\ join(object_concept, Relationship.object_id==object_concept.concept_id).\ filter(object_concept.concept_name==object_name) return query.all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_by_foreign_keys(cls, subject_id=None, object_id=None, relationship_type_id=None):\n filter_clause = sa.and_(\n sa.and_(cls.subject_id == subject_id, cls.object_id == object_id),\n cls.relationship_type_id == relationship_type_id)\n return db.session.query(cls).filter(filter_clause).first()", "def test_filter_relationships_by_concept_type__object(self):\n # Set up mocks and test data\n concept_type = 'birds'\n mock_match_0 = Mock(name='mock_match_0',\n object=Mock(name='mock_subject_0',\n concept_name='mock_subject_0',\n concept_types=['birds', 'snakes']))\n\n mock_match_1 = Mock(name='mock_match_1',\n object=Mock(name='mock_subject_1',\n concept_name='mock_subject_1',\n concept_types=['snakes', 'turtles']))\n\n mock_match_2 = Mock(name='mock_match_2',\n object=Mock(name='mock_subject_2',\n concept_name='mock_subject_2',\n concept_types=['snakes', 'birds']))\n\n mock_matches = [mock_match_0, mock_match_1, mock_match_2]\n\n # Make call\n filtered_matches = FactQuery._filter_relationships_by_concept_type(\n mock_matches, concept_type, relationship_attr='object')\n\n # Verify results\n self.assertEqual([mock_match_0, mock_match_2], filtered_matches)", "def test_filter_relationships_by_concept_type__subject(self):\n # Set up mocks and test data\n concept_type = 'birds'\n mock_match_0 = Mock(name='mock_match_0',\n subject=Mock(name='mock_subject_0',\n concept_name='mock_subject_0',\n concept_types=['birds', 'snakes']))\n\n mock_match_1 = Mock(name='mock_match_1',\n subject=Mock(name='mock_subject_1',\n concept_name='mock_subject_1',\n concept_types=['snakes', 'turtles']))\n\n mock_match_2 = Mock(name='mock_match_2',\n subject=Mock(name='mock_subject_2',\n concept_name='mock_subject_2',\n concept_types=['snakes', 'birds']))\n\n mock_matches = [mock_match_0, mock_match_1, mock_match_2]\n\n # Make call\n filtered_matches = FactQuery._filter_relationships_by_concept_type(\n mock_matches, concept_type, relationship_attr='subject')\n\n # Verify results\n self.assertEqual([mock_match_0, mock_match_2], filtered_matches)", "def get_relationships_by_record_type(self, relationship_record_type):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_record_type\n # STILL NEED TO IMPLEMENT!!!\n return objects.RelationshipList([])", "def filter_for_term_relationships(src, relationship_type, object_id, target=True):\n filters = [\n Filter(\"type\", \"=\", \"relationship\"),\n Filter(\"relationship_type\", \"=\", relationship_type),\n ]\n if target:\n filters.append(Filter(\"target_ref\", \"=\", object_id))\n else:\n filters.append(Filter(\"source_ref\", \"=\", object_id))\n\n results = src.query(filters)\n return remove_deprecated(results)", "def get_relationships_by_genus_type(self, relationship_genus_type):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_genus_type\n # NOTE: This implementation currently ignores plenary view\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', DESCENDING)\n return objects.RelationshipList(result, runtime=self._runtime, proxy=self._proxy)", "def test_select_by_concept_type(self, select_relationships):\n select_relationships.return_value = [Mock(subject='hello'), Mock(subject='kitty')]\n mock_concept_type = Mock(name='concept_type')\n\n result = FactQuery._select_by_concept_type(mock_concept_type)\n self.assertEqual(['hello', 'kitty'], result)\n select_relationships.assert_called_once_with('is', object_name=mock_concept_type)", "def get_ids_related_to(cls, object_type, related_type, related_ids=[]):\n\n if isinstance(related_ids, (int, long)):\n related_ids = [related_ids]\n\n if not related_ids:\n return db.session.query(Relationship.source_id).filter(sql.false())\n\n destination_ids = db.session.query(Relationship.destination_id).filter(\n and_(\n Relationship.destination_type == object_type,\n Relationship.source_type == related_type,\n Relationship.source_id.in_(related_ids),\n )\n )\n source_ids = db.session.query(Relationship.source_id).filter(\n and_(\n Relationship.source_type == object_type,\n Relationship.destination_type == related_type,\n Relationship.destination_id.in_(related_ids),\n )\n )\n\n queries = [destination_ids, source_ids]\n queries.extend(cls.get_extension_mappings(\n object_type, related_type, related_ids))\n queries.extend(cls.get_special_mappings(\n object_type, related_type, related_ids))\n\n return cls._array_union(queries)", "def relationship_count(self, r_type=None, n_ids=()):\n if r_type is None and not n_ids:\n return len(self._relationships)\n elif not n_ids:\n return len(self._relationships_by_type.get(r_type, ()))\n else:\n return sum(1 for _ in self.relationships(r_type, n_ids))", "def by_type(self, type):\n return self.filter(related_type__title=type)", "def get_relationships_by_parent_genus_type(self, relationship_genus_type):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_parent_genus_type\n # STILL NEED TO IMPLEMENT!!!\n return objects.RelationshipList([])", "def test_select_by_concept_type__no_matches(self, select_relationships):\n select_relationships.return_value = []\n\n result = FactQuery._select_by_concept_type(Mock(name='concept_types'))\n self.assertEqual([], result)", "def _get_objects(self, object_type, **kwargs):\r\n return self.parent_connection._get_objects(object_type,\r\n sys_id=self.sys_id,\r\n **kwargs)", "def get_relationships_by_genus_type_for_destination(self, destination_id, relationship_genus_type):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_destination\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'destinationId': str(destination_id),\n 'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def _get_objects(self, object_type, **kwargs):\r\n\r\n return self.parent_connection._get_objects(object_type,\r\n sys_id=self.sys_id,\r\n jbod_id=self.jbod_id,\r\n **kwargs)", "def get_queryset(self):\n\n return Relationship.objects.filter(\n Q(from_person=self.request.user.person) |\n Q(to_person=self.request.user.person))", "def relationship_types(self):\n return frozenset(self._relationships_by_type.keys())", "def get_relationships_by_genus_type_for_peers(self, source_id, destination_id, relationship_genus_type):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_peers\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'sourceId': str(source_id),\n 'destinationId': str(destination_id),\n 'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def test_select_matching_relationships(self, select_by_values):\n # Set up mocks and test data\n select_by_values.return_value = ['one', 'two']\n test_relationship_type_name = 'eats'\n test_subject_name = 'otter'\n test_object_name = 'mussels'\n test_rel_number = 99\n \n # Make call\n matches = FactQuery._select_matching_relationships(test_relationship_type_name,\n subject_name=test_subject_name,\n object_name=test_object_name,\n relationship_number=test_rel_number)\n # Verify result\n self.assertEqual(['one', 'two'], matches)\n\n # Verify mocks\n select_by_values.assert_called_once_with(relationship_type_name=test_relationship_type_name,\n subject_name=test_subject_name,\n object_name=test_object_name,\n relationship_number=test_rel_number)", "def get_relationships_by_genus_type_for_source(self, source_id, relationship_genus_type):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_source\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'sourceId': str(source_id),\n 'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def search_stix21_objects(rel_list, object_name, rel_type='any') -> list:\n searched_rel_list = list()\n for relationship in rel_list:\n if relationship[3] == rel_type or rel_type == 'any':\n if relationship[0] == object_name and relationship[0] == relationship[2]:\n searched_rel_list.append(relationship)\n else:\n for position in range(len(relationship)):\n if relationship[position] == object_name:\n searched_rel_list.append(relationship)\n return searched_rel_list", "def _get_objects(self, object_type, **kwargs):\r\n\r\n return self.parent_connection._get_objects(object_type,\r\n sys_id=self.sys_id,\r\n brick_id=self.brick_id,\r\n **kwargs)", "def get_relationship_query(self):\n # Implemented from template for\n # osid.resource.ResourceQuerySession.get_resource_query_template\n return queries.RelationshipQuery(runtime=self._runtime)", "def test_which_reptiles__species_subject(self, select_relationships, concept_is_species, \n filter_by_concept_type):\n # Set up mocks and test data\n parsed_query = Mock(name='parsed_query',\n text='which reptiles eat bugs',\n subject_name='reptiles',\n object_name='bugs',\n relationship_type_name='eat',\n relationship_number=3,\n relationship_negation=False)\n fact_query = FactQuery(parsed_query=parsed_query)\n\n mock_match_1 = Mock(name='match_1',\n subject=Mock(concept_name='subject_1'))\n mock_match_2 = Mock(name='match_2',\n subject=Mock(concept_name='subject_2'))\n select_relationships.return_value = [mock_match_1, mock_match_2]\n concept_is_species.side_effect = [False, True]\n filter_by_concept_type.return_value = [mock_match_1, mock_match_2]\n\n # Make call\n results = fact_query._which_animal_query()\n\n # Verify results\n self.assertEqual(set(['subject_1', 'subject_2']), set(results))\n\n # Verify mocks\n select_relationships.assert_called_once_with(\n 'eat', object_name='bugs', relationship_number=3)\n\n call_args_list = concept_is_species.call_args_list\n self.assertEqual(2, len(call_args_list))\n self.assertEqual(call('bugs'), call_args_list[0])\n self.assertEqual(call('reptiles'), call_args_list[1])\n\n filter_by_concept_type.assert_called_once_with(\n [mock_match_1, mock_match_2], 'reptiles', relationship_attr='subject')", "def get_relationships_by_query(self, relationship_query):\n # Implemented from template for\n # osid.resource.ResourceQuerySession.get_resources_by_query\n and_list = list()\n or_list = list()\n for term in relationship_query._query_terms:\n if '$in' in relationship_query._query_terms[term] and '$nin' in relationship_query._query_terms[term]:\n and_list.append(\n {'$or': [{term: {'$in': relationship_query._query_terms[term]['$in']}},\n {term: {'$nin': relationship_query._query_terms[term]['$nin']}}]})\n else:\n and_list.append({term: relationship_query._query_terms[term]})\n for term in relationship_query._keyword_terms:\n or_list.append({term: relationship_query._keyword_terms[term]})\n if or_list:\n and_list.append({'$or': or_list})\n view_filter = self._view_filter()\n if view_filter:\n and_list.append(view_filter)\n if and_list:\n query_terms = {'$and': and_list}\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(query_terms).sort('_id', DESCENDING)\n else:\n result = []\n return objects.RelationshipList(result, runtime=self._runtime, proxy=self._proxy)", "def test_find_relation_types(self):\n pass", "def test_animal_attribute_query__species_subject(self, select_relationships, \n concept_is_species, filter_by_concept_type):\n # Set up mocks and test data\n mock_1 = Mock(name='mock_1')\n mock_2 = Mock(name='mock_2')\n mock_3 = Mock(name='mock_3')\n select_relationships.side_effect = [[], [mock_1, mock_2, mock_3]]\n concept_is_species.return_value = True\n filter_by_concept_type.return_value = [mock_1, mock_2]\n\n parsed_query = Mock(name='parsed_query',\n text='do birds have wings',\n subject_name='birds',\n object_name='wings',\n relationship_type_name='have',\n relationship_number=2)\n fact_query = FactQuery(parsed_query=parsed_query)\n\n # Make call\n result = fact_query._animal_attribute_query()\n \n # Verify results\n self.assertEqual('yes', result)\n\n # Verify mocks\n call_args_list = select_relationships.call_args_list\n self.assertEqual(2, len(call_args_list))\n expected_calls = [\n call('have', subject_name='birds', object_name='wings', relationship_number=2),\n call('have', object_name='wings', relationship_number=2)]\n self.assertEqual(expected_calls, call_args_list)", "def relation( self, obj, relType ):\n raise NotImplementedError(\"relation\")", "def relationships(self, r_type=None, n_ids=()):\n if r_type is None:\n r_sets = []\n else:\n r_sets = [self._relationships_by_type.get(r_type, frozenset())]\n if not n_ids or (hasattr(n_ids, \"__iter__\") and all(n_id is None for n_id in n_ids)):\n pass\n elif isinstance(n_ids, Sequence):\n for n_index, n_id in enumerate_nodes(n_ids):\n if n_id is not None:\n r_sets.append({r_id for r_id, i in self._relationships_by_node.get(n_id, ())\n if i == n_index})\n elif isinstance(n_ids, Set):\n for n_id in n_ids:\n if n_id is not None:\n r_sets.append({r_id for r_id, i in self._relationships_by_node.get(n_id, ())})\n else:\n raise TypeError(\"Nodes must be supplied as a Sequence or a Set\")\n if r_sets:\n return iter(reduce(and_operator, r_sets))\n else:\n return iter(self._relationships)", "def test_get_relationship_templates(self):\n pass", "def get_biolink_association(subject_node: Node, object_node: Node, association_config: dict = None) -> dict:\n if association_config is None:\n association_config = associations\n subject_query = list(subject_node.labels)[0]\n object_query = list(object_node.labels)[0]\n association = {association: requirements for association, requirements in association_config.items()\n if subject_query in requirements[0]\n and object_query in requirements[1]}\n if len(association) is 0:\n association = {Association: ['*']}\n return association", "def find_related_nodes(reltype, inst=None):\n if inst is None:\n inst = ctx.instance\n ret = []\n for rel in inst.relationships:\n if reltype in rel.type_hierarchy:\n ret.append(rel.target)\n return ret", "def relationships(self):", "def relationship(*args, b: bool=True, relationshipData: Union[AnyStr, List[AnyStr], bool]=\"\",\n q=True, query=True, e=True, edit=True, **kwargs)->Union[None, Any]:\n pass", "def test_which_animals__species_object(self, select_relationships, concept_is_species, \n filter_by_concept_type):\n # Set up mocks and test data\n parsed_query = Mock(name='parsed_query',\n text='which animals eat reptiles',\n subject_name='animals',\n object_name='reptiles',\n relationship_type_name='eat',\n relationship_number=3,\n relationship_negation=False)\n fact_query = FactQuery(parsed_query=parsed_query)\n\n mock_match_1 = Mock(name='match_1',\n subject=Mock(concept_name='subject_1'))\n mock_match_2 = Mock(name='match_2',\n subject=Mock(concept_name='subject_2'))\n mock_match_3 = Mock(name='match_3',\n subject=Mock(concept_name='subject_3'))\n select_relationships.side_effect = [[mock_match_1, mock_match_2],\n [mock_match_1, mock_match_3]]\n concept_is_species.side_effect = [True, False]\n filter_by_concept_type.side_effect = [\n [mock_match_1, mock_match_3],\n [mock_match_1, mock_match_2, mock_match_1, mock_match_3]]\n\n # Make call\n results = fact_query._which_animal_query()\n\n # Verify results\n self.assertEqual(set(['subject_1', 'subject_2', 'subject_3']), set(results))\n\n # Verify mocks\n call_args_list = select_relationships.call_args_list\n self.assertEqual(2, len(call_args_list))\n expected_calls = [\n call('eat', object_name='reptiles', relationship_number=3),\n call('eat', relationship_number=3)]\n self.assertEqual(expected_calls, call_args_list)\n\n call_args_list = concept_is_species.call_args_list\n self.assertEqual(2, len(call_args_list))\n self.assertEqual(call('reptiles'), call_args_list[0])\n self.assertEqual(call('animals'), call_args_list[1])\n\n call_args_list = filter_by_concept_type.call_args_list\n self.assertEqual(2, len(call_args_list))\n expected_calls = [\n call([mock_match_1, mock_match_3], \n 'reptiles', \n relationship_attr='object'),\n call([mock_match_1, mock_match_2, mock_match_1, mock_match_3],\n 'animals',\n relationship_attr='subject')]\n self.assertEqual(expected_calls, call_args_list)", "def prefetch_relations(relations, fields):\n\n\t\t# Create dict with content_type_id keys containing dict of pk's of that content type's objects\n\t\tcontent_objects = {}\n\t\tfor relation in relations:\n\t\t\tcontent_objects.setdefault(getattr(relation, fields['content_type_id']), set()).add(getattr(relation, fields['object_id']))\n\n\t\t# Grab the distinct content types\n\t\tcontent_types = ContentType.objects.in_bulk(content_objects.keys())\n\n\t\t# Do queries for each content type and store results\n\t\trelation_cache = {}\n\t\tfor content_type, fk_list in content_objects.items():\n\t\t\tct_model = content_types[content_type].model_class()\n\t\t\trelation_cache[content_type] = ct_model.objects.public().in_bulk(list(fk_list))\n\n\t\t# Cache each result on django's internal cache for the Relation object\n\t\tfor relation in relations:\n\t\t\ttry:\n\t\t\t\tsetattr(relation, '_content_object_cache', relation_cache[getattr(relation, fields['content_type_id'])][getattr(relation, fields['object_id'])])\n\t\t\texcept KeyError:\n\t\t\t\tpass", "def get_related_objects(self, obj_type):\n suffix = self._get_api_suffix(obj_type)\n if obj_type == self.__class__ and suffix == 'adversaries':\n return []\n endpoint = self._get_api_endpoint() + '/' + suffix\n results = self.tq.get(endpoint)\n if 'data' not in results:\n return []\n\n tr = []\n for obj in results['data']:\n inst = obj_type(self.tq)\n inst.fill_from_api_response(obj)\n tr.append(inst)\n return tr", "def get_queryset(self):\n\t\treturn super(CourseDocument, self).get_queryset().select_related(\n\t\t 'belongs_to'\n\t\t)", "def get_relationships_by_ids(self, relationship_ids):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_ids\n # NOTE: This implementation currently ignores plenary view\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n object_id_list = []\n for i in relationship_ids:\n object_id_list.append(ObjectId(self._get_id(i, 'relationship').get_identifier()))\n result = collection.find(\n dict({'_id': {'$in': object_id_list}},\n **self._view_filter()))\n result = list(result)\n sorted_result = []\n for object_id in object_id_list:\n for object_map in result:\n if object_map['_id'] == object_id:\n sorted_result.append(object_map)\n break\n return objects.RelationshipList(sorted_result, runtime=self._runtime, proxy=self._proxy)", "def predicates(\n self,\n subject: Optional[\"_SubjectType\"] = None,\n object: Optional[\"_ObjectType\"] = None,\n ) -> Generator[\"_PredicateType\", None, None]:\n for t, c in self.triples((subject, None, object)):\n yield t[1]", "def predicates(\n self,\n subject: Optional[\"_SubjectType\"] = None,\n object: Optional[\"_ObjectType\"] = None,\n ) -> Generator[\"_PredicateType\", None, None]:\n for t, c in self.triples((subject, None, object)):\n yield t[1]", "def test_animal_attribute_query__species_object(self, select_relationships, \n concept_is_species, filter_by_concept_type):\n # Set up mocks and test data\n mock_1 = Mock(name='mock_1')\n mock_2 = Mock(name='mock_2')\n mock_3 = Mock(name='mock_3')\n select_relationships.side_effect = [[], [mock_1, mock_2, mock_3]]\n concept_is_species.side_effect = [False, True]\n filter_by_concept_type.return_value = [mock_1]\n\n parsed_query = Mock(name='parsed_query',\n text='do herons eat mammals',\n subject_name='herons',\n object_name='mammals',\n relationship_type_name='eat',\n relationship_number=2)\n fact_query = FactQuery(parsed_query=parsed_query)\n\n # Make call\n result = fact_query._animal_attribute_query()\n \n # Verify results\n self.assertEqual('yes', result)\n\n # Verify mocks\n call_args_list = select_relationships.call_args_list\n self.assertEqual(2, len(call_args_list))\n expected_calls = [\n call('eat', subject_name='herons', object_name='mammals', relationship_number=2),\n call('eat', subject_name='herons', relationship_number=2)]\n self.assertEqual(expected_calls, call_args_list)", "def collect_predicates(subject, row, structure_row, files, stc, prefixes):\n related_predicates = set()\n for related_row in stc.iterrows():\n if (\n related_row[1][\"File\"] == row.File\n ) and (\n related_row[1][\"Sheet\"] == row.Sheet\n ) and (\n related_row[1][\"Indexed_Entity\"] == row.Column_Header\n ):\n if related_row[1][\"Type\"] == \"foreign key\":\n for foreign_pred in foreign(\n structure_row,\n related_row[1],\n files,\n stc,\n prefixes\n ):\n related_predicates.add(foreign_pred)\n elif (\n row[\"Definition or Relationship\"] in [\n \"rdfs:label\",\n \"schema:text\"\n ]\n ):\n related_predicates = related_predicates | label(\n row,\n structure_row,\n prefixes\n )\n tp = type_pred(row, prefixes)\n if tp:\n related_predicates.add(tp)\n return(related_predicates)", "def predicate_objects(\n self, subject: Optional[\"_SubjectType\"] = None\n ) -> Generator[Tuple[\"_PredicateType\", \"_ObjectType\"], None, None]:\n for t, c in self.triples((subject, None, None)):\n yield t[1], t[2]", "def predicate_objects(\n self, subject: Optional[\"_SubjectType\"] = None\n ) -> Generator[Tuple[\"_PredicateType\", \"_ObjectType\"], None, None]:\n for t, c in self.triples((subject, None, None)):\n yield t[1], t[2]", "def subject_predicates(\n self, object: Optional[\"_ObjectType\"] = None\n ) -> Generator[Tuple[\"_SubjectType\", \"_PredicateType\"], None, None]:\n for t, c in self.triples((None, None, object)):\n yield t[0], t[1]", "def subject_predicates(\n self, object: Optional[\"_ObjectType\"] = None\n ) -> Generator[Tuple[\"_SubjectType\", \"_PredicateType\"], None, None]:\n for t, c in self.triples((None, None, object)):\n yield t[0], t[1]", "def get_association(relation: str, subject_node: Node, object_node: Node, is_negated: bool, pmid: str = '',\n association_config: dict = None):\n if association_config is None:\n association_config = associations\n most_relevant_relation = list(get_biolink_association(subject_node, object_node, association_config).items())[0][0]\n biolink_relation = most_relevant_relation(id=0,\n subject=subject_node['id'],\n relation=relation,\n object=object_node['id'],\n negated=is_negated,\n publications=[pmid])\n return get_relationship_from_biolink(subject_node, biolink_relation, object_node)", "def get_relationships_by_genus_type_for_destination_on_date(self, destination_id, relationship_genus_type, from_, to):\n raise errors.Unimplemented()", "def delete_relationship(tx, node_value_1=None, node_value_2=None, node_type_1=None, node_type_2=None, relationship=None):\n if node_value_1 is None and node_type_1 is None:\n cql = \"MATCH ()-[u:\" + relationship + \"]-(w:\" + node_type_2 + \"{name:$node_value_2}) \" \\\n \"DELETE u;\"\n try:\n tx.run(cql, node_value_2=node_value_2)\n except Exception as e:\n print(str(e))\n elif node_value_2 is None and node_type_2 is None:\n cql = \"MATCH (s:\" + node_type_1 + \"{name:$node_value_1})-[u:\" + relationship + \"]-() \" \\\n \"DELETE u;\"\n try:\n tx.run(cql, node_value_1=node_value_1)\n except Exception as e:\n print(str(e))\n else:\n cql = \"MATCH (s:\" + node_type_1 + \"{name:$node_value_1})-[u:\" + relationship + \"]-(w:\" + node_type_2 + \"{name:$node_value_2}) \" \\\n \"DELETE u;\"\n try:\n tx.run(cql, node_value_1=node_value_1, node_value_2=node_value_2)\n except Exception as e:\n print(str(e))", "def findAllAppContactRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:APP_CONTACT]->(n2:Person) \"\n \"RETURN ID(n1) , r , r.date , r.hour, ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def _queryset(self):\n return self.type.objects.filter(id__in=self.ids)", "def searchRelations(self):\n subcategory_id = self.concept_list.selectedItems()[0].data(Qt.UserRole)[1].id\n self.setConceptDescription()\n result = self.db.search_relation(subcategory_id)\n self.setResult(result, self.relation_list)", "def get_relationship_from_biolink(biolink_subject: Node,\n biolink_association: Association,\n biolink_object: Node):\n properties = {key: value for key, value in biolink_association.__dict__.items() if key != 'id'}\n return Relationship(biolink_subject,\n get_pythonic_name(biolink_association.__class__.__name__),\n biolink_object,\n **properties)", "def subjects(\n self,\n predicate: Optional[\"_PredicateType\"] = None,\n object: Optional[\"_ObjectType\"] = None,\n ) -> Generator[\"_SubjectType\", None, None]:\n for t, c in self.triples((None, predicate, object)):\n yield t[0]", "def subjects(\n self,\n predicate: Optional[\"_PredicateType\"] = None,\n object: Optional[\"_ObjectType\"] = None,\n ) -> Generator[\"_SubjectType\", None, None]:\n for t, c in self.triples((None, predicate, object)):\n yield t[0]", "def get_relationships_by_genus_type_for_peers_on_date(self, source_id, destination_id, relationship_genus_type, from_, to):\n raise errors.Unimplemented()", "def get_related(self, obj, queryset_or_model, num=None):\n queryset, model = get_queryset_and_model(queryset_or_model)\n model_table = qn(model._meta.db_table)\n content_type = ContentType.objects.get_for_model(obj)\n related_content_type = ContentType.objects.get_for_model(model)\n query = \"\"\"\n SELECT %(model_pk)s, COUNT(related_tagged_item.object_id) AS %(count)s\n FROM %(model)s, %(tagged_item)s, %(tag)s, %(tagged_item)s related_tagged_item\n WHERE %(tagged_item)s.object_id = %%s\n AND %(tagged_item)s.content_type_id = %(content_type_id)s\n AND %(tag)s.id = %(tagged_item)s.tag_id\n AND related_tagged_item.content_type_id = %(related_content_type_id)s\n AND related_tagged_item.tag_id = %(tagged_item)s.tag_id\n AND %(model_pk)s = related_tagged_item.object_id\"\"\"\n if content_type.pk == related_content_type.pk:\n # Exclude the given instance itself if determining related\n # instances for the same model.\n query += \"\"\"\n AND related_tagged_item.object_id != %(tagged_item)s.object_id\"\"\"\n query += \"\"\"\n GROUP BY %(model_pk)s\n ORDER BY %(count)s DESC\n %(limit_offset)s\"\"\"\n query = query % {\n 'model_pk': '%s.%s' % (model_table, qn(model._meta.pk.column)),\n 'count': qn('count'),\n 'model': model_table,\n 'tagged_item': qn(self.model._meta.db_table),\n 'tag': qn(self.model._meta.get_field('tag').rel.to._meta.db_table),\n 'content_type_id': content_type.pk,\n 'related_content_type_id': related_content_type.pk,\n 'limit_offset': num is not None and connection.ops.limit_offset_sql(num) or '',\n }\n\n cursor = connection.cursor()\n cursor.execute(query, [obj.pk])\n object_ids = [row[0] for row in cursor.fetchall()]\n if len(object_ids) > 0:\n # Use in_bulk here instead of an id__in lookup, because id__in would\n # clobber the ordering.\n object_dict = queryset.in_bulk(object_ids)\n return [object_dict[object_id] for object_id in object_ids \\\n if object_id in object_dict]\n else:\n return []", "def get_related(self, obj, queryset_or_model, num=None):\r\n queryset, model = get_queryset_and_model(queryset_or_model)\r\n model_table = qn(model._meta.db_table)\r\n content_type = ContentType.objects.get_for_model(obj)\r\n related_content_type = ContentType.objects.get_for_model(model)\r\n query = \"\"\"\r\n SELECT %(model_pk)s, COUNT(related_tagged_item.object_id) AS %(count)s\r\n FROM %(model)s, %(tagged_item)s, %(tag)s, %(tagged_item)s related_tagged_item\r\n WHERE %(tagged_item)s.object_id = %%s\r\n AND %(tagged_item)s.content_type_id = %(content_type_id)s\r\n AND %(tag)s.id = %(tagged_item)s.tag_id\r\n AND related_tagged_item.content_type_id = %(related_content_type_id)s\r\n AND related_tagged_item.tag_id = %(tagged_item)s.tag_id\r\n AND %(model_pk)s = related_tagged_item.object_id\"\"\"\r\n if content_type.pk == related_content_type.pk:\r\n # Exclude the given instance itself if determining related\r\n # instances for the same model.\r\n query += \"\"\"\r\n AND related_tagged_item.object_id != %(tagged_item)s.object_id\"\"\"\r\n query += \"\"\"\r\n GROUP BY %(model_pk)s\r\n ORDER BY %(count)s DESC\r\n %(limit_offset)s\"\"\"\r\n query = query % {\r\n 'model_pk': '%s.%s' % (model_table, qn(model._meta.pk.column)),\r\n 'count': qn('count'),\r\n 'model': model_table,\r\n 'tagged_item': qn(self.model._meta.db_table),\r\n 'tag': qn(self.model._meta.get_field('tag').rel.to._meta.db_table),\r\n 'content_type_id': content_type.pk,\r\n 'related_content_type_id': related_content_type.pk,\r\n 'limit_offset': num is not None and connection.ops.limit_offset_sql(num) or '',\r\n }\r\n\r\n cursor = connection.cursor()\r\n cursor.execute(query, [obj.pk])\r\n object_ids = [row[0] for row in cursor.fetchall()]\r\n if len(object_ids) > 0:\r\n # Use in_bulk here instead of an id__in lookup, because id__in would\r\n # clobber the ordering.\r\n object_dict = queryset.in_bulk(object_ids)\r\n return [object_dict[object_id] for object_id in object_ids \\\r\n if object_id in object_dict]\r\n else:\r\n return []", "def select_children(self):\n objs = []\n for obj in pm.selected():\n objs.extend(obj.listRelatives(ad=True, type=[\"transform\", \"joint\"]))\n pm.select(objs, add=True)", "def test_get_relation_type(self):\n pass", "def get_relationship_type(\n r: Union[\"ModelRelationship\", t.RelationshipType, t.RelationshipName, str]\n) -> t.RelationshipType:\n relationship_type = r.type if isinstance(r, ModelRelationship) else r\n return t.RelationshipType(normalize_relationship_type(relationship_type))", "def all_for_object(self, content_object, **kwargs):\r\n return self.filter(**self._generate_object_kwarg_dict(content_object, **kwargs))", "def select_related(self, *fields):\n self._not_support_combined_queries(\"select_related\")\n if self._fields is not None:\n raise TypeError(\n \"Cannot call select_related() after .values() or .values_list()\"\n )\n\n obj = self._chain()\n if fields == (None,):\n obj.query.select_related = False\n elif fields:\n obj.query.add_select_related(fields)\n else:\n obj.query.select_related = True\n return obj", "def get_relationships_by_genus_type_for_source_on_date(self, source_id, relationship_genus_type, from_, to):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_source_on_date\n relationship_list = []\n for relationship in self.get_relationships_by_genus_type_for_source():\n if overlap(from_, to, relationship.start_date, relationship.end_date):\n relationship_list.append(relationship)\n return objects.RelationshipList(relationship_list, runtime=self._runtime)", "def _get_derived_feature_types(self, limit):\n\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n raw = '/'.join((self.rawdir, 'feature_relationship'))\n logger.info(\"determining some feature types based on relationships\")\n with open(raw, 'r') as f:\n f.readline() # read the header row; skip\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n for line in filereader:\n (feature_relationship_id, subject_id, object_id, type_id, rank,\n value) = line\n\n if int(type_id) in [133526, 129784]:\n # derived_tp_assoc_alleles\n self.feature_types[subject_id] = \\\n Genotype.genoparts['transgenic_insertion']\n sid = self.idhash['allele'].get(subject_id)\n model.addType(sid, self.feature_types[subject_id])\n elif int(type_id) in [133533, 129791]:\n # only take the derived_sf_assoc_alleles\n # my subject is a reagent_targeted_gene\n # my object is the dsRNA\n self.feature_types[subject_id] = \\\n Genotype.genoparts['reagent_targeted_gene']\n sid = self.idhash['allele'].get(subject_id)\n model.addType(sid, self.feature_types[subject_id])\n\n else:\n continue\n\n return", "def get(self, *args, **kwargs):\n self.before_get(args, kwargs)\n\n relationship_field, model_relationship_field, related_type_, related_id_field = self._get_relationship_data()\n related_view = self.schema._declared_fields[relationship_field].related_view\n related_view_kwargs = self.schema._declared_fields[relationship_field].related_view_kwargs\n\n obj, data = self._data_layer.get_relationship(model_relationship_field,\n related_type_,\n related_id_field,\n kwargs)\n\n for key, value in copy(related_view_kwargs).items():\n if isinstance(value, str) and value.startswith('<') and value.endswith('>'):\n tmp_obj = obj\n for field in value[1:-1].split('.'):\n tmp_obj = getattr(tmp_obj, field)\n related_view_kwargs[key] = tmp_obj\n\n result = {'links': {'self': request.path,\n 'related': url_for(related_view, **related_view_kwargs)},\n 'data': data}\n\n qs = QSManager(request.args, self.schema)\n if qs.include:\n schema = compute_schema(self.schema, dict(), qs, qs.include)\n\n serialized_obj = schema.dump(obj)\n result['included'] = serialized_obj.data.get('included', dict())\n\n self.after_get(result)\n return result", "def filter_by_type_and_id(src, object_type, object_id, source_name):\n filters = [\n Filter(\"type\", \"=\", object_type),\n Filter(\"id\", \"=\", object_id),\n Filter(\"external_references.source_name\", \"=\", source_name),\n ]\n results = src.query(filters)\n return remove_deprecated(results)", "def filter(self, destination_object=None, source_object=None, **kwargs):\n if destination_object:\n kwargs.update({\n \"destination_id\": destination_object.pk,\n \"destination_type\": get_for_model(destination_object),\n })\n if source_object:\n kwargs.update({\n \"source_id\": source_object.pk,\n \"source_type\": get_for_model(source_object),\n })\n return super(RelatedContentQuerySet, self).filter(**kwargs)", "def read_relationships(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(relationship_query, (person_id,)) # note a tuple is needed as a parameter value for SQLITE\n\n relation_list = []\n for row in c:\n _relation = Relationship()\n _relation.person_id = row[\"personid\"]\n _relation.person.first_name = row[\"firstname\"]\n _relation.person.last_name = row[\"lastname\"]\n _relation.person.middle_initial = row[\"middleinitial\"]\n _relation.related_person_id = row[\"related_personid\"]\n _relation.relationship_id = row[\"relationshipid\"]\n _relation.relationship_type = row[\"relationshiptype\"]\n _relation.relationship_type_description = row[\"key\"]\n relation_list.append(_relation)\n conn.close()\n return relation_list\n except:\n return []", "def get_query():\n return CiscoVlanIftableRelationshipQuery", "def filter(self, **kwargs):\n related_names = []\n for argname, _ in kwargs.iteritems():\n related_name = argname.split('__')\n if len(related_name) > 1:\n related_names.append(\"__\".join(related_name[:-1]))\n if len(related_names) > 0:\n return super(\n JeevesQuerySet, self).filter(\n **kwargs).select_related(*related_names)\n else:\n return super(JeevesQuerySet, self).filter(**kwargs)", "def matching_objects(self, filter_deleted):\n from rome.core.orm.utils import get_literal_query\n from rome.lang.sql_parser import QueryParser\n from rome.core.rows.rows import construct_rows\n\n read_deleted = self.read_deleted\n if filter_deleted:\n read_deleted = \"no\"\n\n if self._autoflush:\n if self.session is not None:\n self.session.commit()\n\n if not self.query_tree:\n sql_query = get_literal_query(self.sa_query)\n parser = QueryParser()\n query_tree = parser.parse(sql_query)\n else:\n query_tree = self.query_tree\n\n if not self.entity_class_registry:\n self.entity_class_registry = self._extract_entity_class_registry()\n entity_class_registry = self.entity_class_registry\n\n # Collecting variables of sub queries\n subqueries_variables = {}\n for (variable_name, sub_query_tree) in query_tree.variables.iteritems():\n sub_query = Query()\n sub_query.set_query_tree(sub_query_tree)\n sub_query.set_entity_class_registry(entity_class_registry)\n result = sub_query.all()\n subqueries_variables[variable_name] = result\n\n rows = construct_rows(query_tree,\n entity_class_registry,\n read_deleted=read_deleted,\n subqueries_variables= subqueries_variables)\n\n def row_function(row, column_descriptions, decoder):\n from rome.core.session.utils import ObjectAttributeRefresher\n final_row = []\n one_is_an_object = False\n object_attribute_refresher = ObjectAttributeRefresher()\n for column_description in column_descriptions:\n if type(column_description[\"type\"]) in [Integer, String]:\n row_key = column_description[\"entity\"].__table__.name.capitalize(\n )\n property_name = column_description[\"name\"]\n value = None\n if row_key in row and property_name in row[row_key]:\n value = row[row_key].get(property_name, None)\n else:\n # It seems that we are parsing the result of a function call\n column_description_expr = column_description.get(\"expr\",\n None)\n if column_description_expr is not None:\n property_name = str(column_description_expr)\n value = row.get(property_name, None)\n if value is not None:\n final_row += [value]\n else:\n logging.error(\n \"Could not understand how to get the value of '%s' with this: '%s'\"\n % (column_description.get(\"expr\", \"??\"), row))\n elif type(column_description[\"type\"]) == DeclarativeMeta:\n one_is_an_object = True\n row_key = column_description[\"entity\"].__table__.name\n new_object = column_description[\"entity\"]()\n attribute_names = map(lambda x: x.key, list(\n column_description[\"entity\"].__table__.columns))\n for attribute_name in attribute_names:\n value = decoder.decode(row[row_key].get(attribute_name,\n None))\n setattr(new_object, attribute_name, value)\n\n if \"___version_number\" in row[row_key]:\n setattr(new_object, \"___version_number\", row[row_key][\"___version_number\"])\n\n load_options = None\n if hasattr(self.sa_query, \"_with_options\"):\n load_options = self.sa_query._with_options\n object_attribute_refresher.refresh(new_object, load_options=load_options)\n final_row += [new_object]\n else:\n logging.error(\"Unsupported type: '%s'\" %\n (column_description[\"type\"]))\n if not one_is_an_object:\n return [final_row]\n else:\n return final_row\n\n def row_function_subquery(row, attributes, decoder):\n result = []\n for attribute in attributes:\n tablename = attribute.split(\".\")[0]\n attribute_name = attribute.split(\".\")[1]\n result += [row[tablename][attribute_name]]\n return result\n\n decoder = Decoder()\n\n if len(self.sa_query.column_descriptions) > 0:\n final_rows = map(lambda r: row_function(\n r, self.sa_query.column_descriptions, decoder), rows)\n else:\n final_rows = map(lambda r: row_function_subquery(\n r, self.query_tree.attributes, decoder), rows)\n\n if len(self.sa_query.column_descriptions) <= 1:\n # Flatten the list\n final_rows = [item for sublist in final_rows for item in sublist]\n\n # Add watcher on objects\n if self.session is not None:\n for obj in final_rows:\n if hasattr(obj, \"id\"):\n self.session.watch(obj)\n\n return final_rows", "async def determine_contact(rec, ref: Ref, type='parent'):\n if type not in ['parent', 'impacted']:\n raise ValueError(\"Type must be impacted or parent!\")\n\n LOG.debug(f\"Determing {type} for object id: %s -- %s-%s...\", rec.id,\n rec.Name, rec.Type)\n offset_min = rec.last_seen - 2.5\n\n if type == \"parent\":\n accpt_colors = ['Blue', 'Red'\n ] if rec.Color == 'Violet' else [rec.Color]\n\n # query_filter = (\n # ~(Object.type.startswith('Decoy'))\n # & ~(Object.c.type.startswith('Misc'))\n # & ~(Object.c.type.startswith('Projectile'))\n # & ~(Object.c.type.startswith('Weapon'))\n # &\n # ~(Object.c.type.startswith(\"Ground+Light+Human+Air+Parachutist\")))\n query_filter = \" (type not like ('%Decoy%')\"\\\n \" AND type not like ('%Misc%')\"\\\n \" AND type not like ('%Weapon%')\"\\\n \" AND type not like ('%Projectile%')\"\\\n \" AND type not like ('%Ground+Light+Human+Air+Parachutist%'))\"\n\n elif type == 'impacted':\n accpt_colors = ['Red'] if rec.Color == 'Blue' else ['Red']\n # query_filter = (Object.c.type.startswith('Air+'))\n query_filter = \" type like ('%Air+%')\"\n\n else:\n raise NotImplementedError\n\n color_query = f\"\"\" color in ('{\"','\".join(accpt_colors)}')\"\"\"\n id_query = f\" id != {rec.id} \"\n query = f\"\"\" SELECT id FROM object\n WHERE {query_filter} AND {color_query} AND {id_query}\n \"\"\"\n\n nearby_objs = await DB.fetch(query)\n\n closest = []\n for nearby in nearby_objs:\n near = ref.obj_store[nearby[0]]\n if ((near.last_seen <= offset_min\n and not (near.Type.startswith('Ground') and near.alive == 1))\n and (abs(near.alt - rec.alt) < 2000)\n and (abs(near.lat - rec.lat) <= 0.0005)\n and (abs(near.lon - rec.lon) <= 0.0005)):\n continue\n\n prox = compute_dist(rec.cart_coords, near.cart_coords)\n LOG.debug(\"Distance to object %s - %s is %s...\", near.Name, near.Type,\n str(prox))\n if not closest or (prox < closest[1]):\n closest = [near.id, prox, near.Name, near.Pilot, near.Type]\n\n if not closest:\n return None\n\n if closest[1] > 1000:\n LOG.warning(\n f\"Rejecting closest {type} for {rec.id}-{rec.Name}-{rec.Type}: \"\n \"%s %sm...%d checked!\", closest[4],\n str(closest[1]), len(nearby_objs))\n\n return None\n\n return closest", "def _filter_by_audit(cls, predicate):\n return Relationship.query.filter(\n Relationship.source_type == cls.__name__,\n Relationship.source_id == cls.id,\n Relationship.destination_type == Audit.__name__,\n ).join(Audit, Relationship.destination_id == Audit.id).filter(\n predicate(Audit.slug)\n ).exists() | Relationship.query.filter(\n Relationship.destination_type == cls.__name__,\n Relationship.destination_id == cls.id,\n Relationship.source_type == Audit.__name__,\n ).join(Audit, Relationship.source_id == Audit.id).filter(\n predicate(Audit.slug)\n ).exists()", "def related_objects(self, related_model, related_fields, objs):\n predicate = reduce(operator.or_, (\n query_utils.Q(**{'%s__in' % related_field.name: objs})\n for related_field in related_fields\n ))\n return related_model._default_manager.using(self.using).filter(\n predicate\n )", "def objects(\n self,\n subject: Optional[\"_SubjectType\"] = None,\n predicate: Optional[\"_PredicateType\"] = None,\n ) -> Generator[\"_ObjectType\", None, None]:\n for t, c in self.triples((subject, predicate, None)):\n yield t[2]", "def objects(\n self,\n subject: Optional[\"_SubjectType\"] = None,\n predicate: Optional[\"_PredicateType\"] = None,\n ) -> Generator[\"_ObjectType\", None, None]:\n for t, c in self.triples((subject, predicate, None)):\n yield t[2]", "def get(self, dto):\n assert dto.using in list(self.models.keys())\n Relation = self.models[dto.using]\n return self.session.query(Relation)\\\n .filter(Relation.purpose == dto.purpose)\\\n .filter(Relation.sender == dto.sender)\\\n .filter(Relation.recipient == dto.recipient)\\\n .first()", "def test_animal_values_query__species_subject(self, select_relationships, concept_is_species,\n filter_by_concept_type):\n # Set up mocks and test data\n concept_is_species.return_value = True\n mock_1 = Mock(name='mock_1', object=Mock(concept_name='mosquitoes'))\n mock_2 = Mock(name='mock_2', object=Mock(concept_name='flies'))\n mock_3 = Mock(name='mock_2', object=Mock(concept_name='salmon'))\n select_relationships.return_value = [mock_1, mock_2, mock_3]\n filter_by_concept_type.return_value = [mock_1, mock_2]\n fact_query = FactQuery()\n\n # Make call\n result = fact_query._animal_values_query(\n relationship_type_name='eat', subject_name='reptiles')\n\n # Verify result\n self.assertEqual(['flies', 'mosquitoes'], result)\n\n # Verify mocks\n concept_is_species.assert_called_once_with('reptiles')\n select_relationships.assert_called_once_with('eat')\n filter_by_concept_type.assert_called_once_with(\n [mock_1, mock_2, mock_3], 'reptiles', relationship_attr='subject')", "def findAllMakeTestRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:MAKE_TEST]->(n2:Test) \"\n \"RETURN ID(n1) , r , r.date , r.hour , r.result , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def get_relatives(\n self, reltypes=None, relfilter=None, fetch_objects=True, ignore_missing=True\n ):\n ret = defaultdict(set)\n relations = self.icalendar_component.get(\"RELATED-TO\", [])\n if not isinstance(relations, list):\n relations = [relations]\n for rel in relations:\n if relfilter and not relfilter(rel):\n continue\n reltype = rel.params.get(\"RELTYPE\", \"PARENT\")\n if reltypes and not reltype in reltypes:\n continue\n ret[reltype].add(str(rel))\n\n if fetch_objects:\n for reltype in ret:\n uids = ret[reltype]\n ret[reltype] = []\n for obj in uids:\n try:\n ret[reltype].append(self.parent.object_by_uid(obj))\n except error.NotFoundError:\n if not ignore_missing:\n raise\n return ret", "async def get_metadata_for_object_type(\n dbcon: DBConnection, object_type: str) -> Iterable[object_models.ObjectMetadata]:\n q = '''select metadata.object_type, metadata.object_id, metadata.key, metadata.value\n from object_metadata as metadata\n where metadata.object_type=%s'''\n return [object_models.ObjectMetadata(*row) for row in await dbcon.fetch_all(q, (object_type,))]", "def get_assocs(**kwargs):\n if kwargs[\"type\"] == \"first\":\n assoc = Association.query.filter(Association.level >= kwargs[\"level\"],\n Association.users_id == kwargs[\"users_id\"],\n Association.skill_id == kwargs[\"skill_id\"]).first()\n else:\n assoc = Association.query.filter_by(users_id=kwargs[\"users_id\"]).all(\n )\n\n return assoc", "def learn_distributions(self, relation_id: int):\n adjacency_matrix = load_single_adjacency_matrix(self.input_dir, relation_id)\n result = M1Result(relation_id=relation_id)\n\n # number of edges for this relation\n # used for fact count as well as relation distribution\n result.num_edges = adjacency_matrix.nnz\n\n domain_distribution = {}\n range_distribution = {}\n\n subject_ids_row = adjacency_matrix.row\n object_ids_row = adjacency_matrix.col\n\n # iterate over all non-zero fields of the adjacency matrix\n # iterate over all edges that exist for the current relation\n for index in range(result.num_edges):\n # get subject and object id from the adjacency matrix\n subject_id = subject_ids_row[index]\n object_id = object_ids_row[index]\n # create multi types from the two sets of entity types for each the subject and the object\n\n multi_type, = self.dense_entity_types[subject_id].nonzero()\n subject_multi_type = self.multitype_index[frozenset(multi_type)]\n\n multi_type, = self.dense_entity_types[object_id].nonzero()\n object_multi_type = self.multitype_index[frozenset(multi_type)]\n\n # if the subject's multi type is not known add it to the relation domain distribution and create an\n # empty relation range distribution for that multi type\n if subject_multi_type not in domain_distribution:\n domain_distribution[subject_multi_type] = 0\n range_distribution[subject_multi_type] = {}\n\n # if the object's multi type is not known add it to the relation range distribution of the subject's\n # multi type\n if object_multi_type not in range_distribution[subject_multi_type]:\n range_distribution[subject_multi_type][object_multi_type] = 0\n\n # increment the number of occurrences of the subject's multi type in the relation domain distribution\n domain_distribution[subject_multi_type] += 1\n # increment the number of occurrences of the object's multi type in the relation range distribution\n # of the subject's multi type\n range_distribution[subject_multi_type][object_multi_type] += 1\n\n result.domain_distribution = domain_distribution\n result.range_distribution = range_distribution\n\n self.result_queue.put(result)", "def get_documents(doc_type):\n doc_type = 1 if doc_type == 'registration' else 2\n return Documents.query.filter_by(type=doc_type).all()", "def _add_relationships(\n self,\n obj: BaseContent,\n relationships: List[graph.Relationship],\n nodes_to: List[graph.Node],\n ) -> None:\n for node_to, rel in zip(nodes_to, relationships):\n if not rel.start_node or not rel.end_node:\n raise ValueError(\"Relationships must have start and end nodes\")\n obj.add_relationship(\n RelationshipType(rel.type),\n RelationshipData(\n relationship_type=rel.type,\n source_id=rel.start_node.element_id,\n target_id=rel.end_node.element_id,\n content_item_to=self._id_to_obj[node_to.element_id],\n is_direct=True,\n **rel,\n ),\n )", "def subject_objects(\n self, predicate: Optional[\"_PredicateType\"] = None\n ) -> Generator[Tuple[\"_SubjectType\", \"_ObjectType\"], None, None]:\n for t, c in self.triples((None, predicate, None)):\n yield t[0], t[2]", "def subject_objects(\n self, predicate: Optional[\"_PredicateType\"] = None\n ) -> Generator[Tuple[\"_SubjectType\", \"_ObjectType\"], None, None]:\n for t, c in self.triples((None, predicate, None)):\n yield t[0], t[2]", "def test_filter_selected_rels_raises_value_err():\n # Act and assert\n with raises(ValueError):\n _, _ = filter_selected_relationships(\n SimpleNodeSchema(),\n selected_relationships={InterestingAssetToSubResourceRel()},\n )", "def rel_from_domain(relation, category):\n # query for instance_of foolowed by sublcass_of and domain category\n query = f\"\"\"\n SELECT DISTINCT ?s ?sLabel ?property ?propertyLabel ?o ?oLabel\n {{\n hint:Query hint:optimizer \"None\"\n VALUES ?property {{wdt:{relation}}}\n ?s wdt:P31* / wdt:P279* wd:{category} . # Find items in the domain\n ?s ?property ?o .\n MINUS {{?s wdt:P31 / wdt:P279* wd:Q2725376 }} # exclude 'demographics'\n SERVICE wikibase:label {{ bd:serviceParam wikibase:language \"[AUTO_LANGUAGE],en\" . }}\n }}\n \"\"\"\n return apply_request(query)", "def _get_relationship_data(self):\n relationship_field = request.path.split('/')[-1]\n if current_app.config.get('DASHERIZE_API') == True:\n relationship_field = relationship_field.replace('-', '_')\n\n if relationship_field not in get_relationships(self.schema).values():\n raise RelationNotFound('', \"{} has no attribute {}\".format(self.schema.__name__, relationship_field))\n\n related_type_ = self.schema._declared_fields[relationship_field].type_\n related_id_field = self.schema._declared_fields[relationship_field].id_field\n model_relationship_field = get_model_field(self.schema, relationship_field)\n\n return relationship_field, model_relationship_field, related_type_, related_id_field", "def query(self, type=None, attribute_names=None, offset=None, count=None):\n\n # There is a bug in proton (PROTON-1846) wherein we cannot ask for\n # too many rows. So, as a safety we are going to ask only for\n # MAX_ALLOWED_COUNT_PER_REQUEST. Since this is used by both qdstat\n # and qdmanage, we have determined that the optimal value for\n # MAX_ALLOWED_COUNT_PER_REQUEST is 500\n MAX_ALLOWED_COUNT_PER_REQUEST = 500\n\n response_results = []\n response_attr_names = []\n if offset is None:\n offset = 0\n\n if count is None or count == 0:\n # count has not been specified. For each request the\n # maximum number of rows we can get without proton\n # failing is MAX_ALLOWED_COUNT_PER_REQUEST\n request_count = MAX_ALLOWED_COUNT_PER_REQUEST\n else:\n request_count = min(MAX_ALLOWED_COUNT_PER_REQUEST, count)\n\n while True:\n request = self.node_request(\n {'attributeNames': attribute_names or []},\n operation='QUERY', entityType=type, offset=offset,\n count=request_count)\n\n response = self.call(request)\n\n if not response_attr_names:\n response_attr_names += response.body['attributeNames']\n\n response_results += response.body['results']\n\n if len(response.body['results']) < request_count:\n break\n\n if count:\n len_response_results = len(response_results)\n if count == len_response_results:\n break\n\n if count - len_response_results < request_count:\n request_count = count - len_response_results\n\n offset += request_count\n\n query_reponse = Node.QueryResponse(self,\n response_attr_names,\n response_results)\n return query_reponse", "def get_relationship(self, relationship_id):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resource\n # NOTE: This implementation currently ignores plenary view\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find_one(\n dict({'_id': ObjectId(self._get_id(relationship_id, 'relationship').get_identifier())},\n **self._view_filter()))\n return objects.Relationship(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)", "def find_objects_by_type():\n try:\n keyword = request.form[\"keyword\"]\n object_type = request.form[\"object_type\"]\n\n # Get entities based on the selection\n entities = g.user.get_api().get_by_object_types(keyword, object_type)\n\n # Parse response object into table data\n data = raw_entities_to_table_data(entities)\n\n # If no entities were found reutrn with failure state and message\n result = get_result_template()\n if len(data[\"data\"]) == 0:\n result[\"status\"] = \"FAIL\"\n result[\"message\"] = 'No entities of type \"{TYPE}\" were found.'.format(\n TYPE=object_type\n )\n else:\n result[\"status\"] = \"SUCCESS\"\n result[\"data\"] = {\"table_field\": data}\n return jsonify(result_decorator(result))\n\n except Exception as e:\n result = get_result_template()\n result[\"status\"] = \"FAIL\"\n result[\"message\"] = str(e)\n return jsonify(result_decorator(result))", "def get_relations_for_event(\n self,\n event_id,\n relation_type=None,\n event_type=None,\n aggregation_key=None,\n limit=5,\n direction=\"b\",\n from_token=None,\n to_token=None,\n ):\n\n where_clause = [\"relates_to_id = ?\"]\n where_args = [event_id]\n\n if relation_type is not None:\n where_clause.append(\"relation_type = ?\")\n where_args.append(relation_type)\n\n if event_type is not None:\n where_clause.append(\"type = ?\")\n where_args.append(event_type)\n\n if aggregation_key:\n where_clause.append(\"aggregation_key = ?\")\n where_args.append(aggregation_key)\n\n pagination_clause = generate_pagination_where_clause(\n direction=direction,\n column_names=(\"topological_ordering\", \"stream_ordering\"),\n from_token=attr.astuple(from_token) if from_token else None,\n to_token=attr.astuple(to_token) if to_token else None,\n engine=self.database_engine,\n )\n\n if pagination_clause:\n where_clause.append(pagination_clause)\n\n if direction == \"b\":\n order = \"DESC\"\n else:\n order = \"ASC\"\n\n sql = \"\"\"\n SELECT event_id, topological_ordering, stream_ordering\n FROM event_relations\n INNER JOIN events USING (event_id)\n WHERE %s\n ORDER BY topological_ordering %s, stream_ordering %s\n LIMIT ?\n \"\"\" % (\n \" AND \".join(where_clause),\n order,\n order,\n )\n\n def _get_recent_references_for_event_txn(txn):\n txn.execute(sql, where_args + [limit + 1])\n\n last_topo_id = None\n last_stream_id = None\n events = []\n for row in txn:\n events.append({\"event_id\": row[0]})\n last_topo_id = row[1]\n last_stream_id = row[2]\n\n next_batch = None\n if len(events) > limit and last_topo_id and last_stream_id:\n next_batch = RelationPaginationToken(last_topo_id, last_stream_id)\n\n return PaginationChunk(\n chunk=list(events[:limit]), next_batch=next_batch, prev_batch=from_token\n )\n\n return self.db.runInteraction(\n \"get_recent_references_for_event\", _get_recent_references_for_event_txn\n )", "def selected_relationships(self):\n return self._selected_relationships", "def get_edge_query(from_id, rel_type, to_id):\n # TODO: what to do with labels here.\n\n return ((\"MERGE (a:user {id: %s}) \"\n \"MERGE (b:user {id: %s}) \"\n \"MERGE a-[:%s]->b \"\n \"RETURN *\") % (from_id, to_id, rel_type))", "def get_queryset(self):\r\n queryset: QuerySet = super().get_queryset().prefetch_related('film_work_genre', 'genres', 'film_work_person',\r\n 'persons', ) \\\r\n .annotate(\r\n actors=ArrayAgg('persons__person__full_name', filter=Q(persons__role__exact='actor'), distinct=True),\r\n directors=ArrayAgg('persons__person__full_name', filter=Q(persons__role__exact='director'), distinct=True),\r\n writers=ArrayAgg('persons__person__full_name', filter=Q(persons__role__exact='writer'), distinct=True),\r\n genres=ArrayAgg('film_genres__genre__name', distinct=True)\r\n )\r\n\r\n return queryset.values()", "def import_relations(self, content_type, asset_id=None):\n self.cache_content_types()\n additional_params = {}\n if asset_id is not None:\n additional_params['object_id'] = asset_id\n for ci in cdb.CI.objects.filter(\n content_type=content_type,\n **additional_params\n ).order_by('id'):\n obj = ci.content_object\n if content_type == self.network_content_type:\n self.import_network_relations(\n network=ci,\n )\n elif content_type == self.device_content_type:\n self.import_device_relations(\n obj=obj,\n ci=ci,\n )\n elif content_type == self.venture_content_type:\n self.import_venture_relations(\n obj=obj,\n ci=ci,\n )\n elif content_type == self.venture_role_content_type:\n self.import_role_relations(\n obj=obj,\n ci=ci,\n )\n elif content_type == self.business_line_content_type:\n # top level Ci without parent relations.\n pass\n elif content_type == self.datacenter_content_type:\n # top level Ci without parent relations.\n pass\n elif content_type == self.service_content_type:\n self.import_service_relations(\n obj=obj,\n ci=ci,\n )\n else:\n raise UnknownCTException(content_type)" ]
[ "0.6642475", "0.6234097", "0.61171645", "0.5905707", "0.573527", "0.5697212", "0.558768", "0.55415577", "0.5488156", "0.5187914", "0.5047478", "0.50362355", "0.5031117", "0.5009696", "0.5007061", "0.4995968", "0.4954581", "0.49293298", "0.49109417", "0.48943478", "0.4888402", "0.48587754", "0.484546", "0.48172846", "0.48087224", "0.48063368", "0.47886306", "0.47740555", "0.47653723", "0.47555318", "0.47451228", "0.474413", "0.4730197", "0.47286162", "0.4727538", "0.47250444", "0.47232872", "0.46981233", "0.4677622", "0.4666488", "0.4666488", "0.4650107", "0.4626659", "0.45879287", "0.45879287", "0.45580885", "0.45580885", "0.4532119", "0.45245197", "0.45176393", "0.45069325", "0.45068946", "0.44731733", "0.4454546", "0.44430885", "0.44430885", "0.4439302", "0.44373798", "0.4433516", "0.44263574", "0.4422124", "0.44203776", "0.4408724", "0.44053453", "0.44049188", "0.44048777", "0.43938038", "0.4392588", "0.43904558", "0.4382987", "0.43710765", "0.4360833", "0.4358236", "0.43471637", "0.4341916", "0.4322157", "0.43181986", "0.43181986", "0.43167678", "0.43079013", "0.430484", "0.42831534", "0.4276501", "0.4263045", "0.42574635", "0.42557067", "0.42452756", "0.4241308", "0.4241308", "0.42366338", "0.42282972", "0.4226673", "0.42029142", "0.42015618", "0.42008314", "0.42002645", "0.41988537", "0.41880298", "0.41852045", "0.41799968" ]
0.7105433
0
Validate requests decorator with Cerberus
def validate_request_cerberus(schema): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): body_json = request.get_json() current_app.logger.info(body_json) v = Validator(schema, require_all=True) v.allow_unknown = True # TODO: allow request params other then the ones defined on the schema level if not v.validate(body_json): valid_params_list = ', '.join(schema.keys()) return response_fail(f"You must call with all request params: {valid_params_list}") return func(*args, **kwargs) return wrapper return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def valid_request(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except BadRequest as e:\n raise InvalidRequest(description='request parameters, queries or body format are invalid.',\n code=e.code, message=e.data.get('message'))\n\n return wrapper", "def validate_twilio_request(func):\n @wraps(func)\n def decorated_function(request, *args, **kwargs):\n # Create an instance of the RequestValidator class\n validator = RequestValidator(os.environ.get('TWILIO_AUTH_TOKEN'))\n\n # Validate the request using its URL, POST data,\n # and X-TWILIO-SIGNATURE header\n request_valid = validator.validate(\n request.build_absolute_uri(),\n request.POST,\n request.META.get('HTTP_X_TWILIO_SIGNATURE', ''))\n\n # Continue processing the request if it's valid, return a 403 error if\n # it's not\n if request_valid:\n return func(request, *args, **kwargs)\n else:\n return HttpResponseForbidden()\n return decorated_function", "def check_request(request_schema):\n def decorator(f):\n @functools.wraps(f)\n def wrapper(self, addr, request):\n data, err = request_schema.load(request)\n if err:\n return Header.ERROR, Error.WRONG_REQUEST\n else:\n return f(self, addr, data)\n\n return wrapper\n return decorator", "def validate_request(f):\n\n @wraps(f)\n def wrap(self, **kwargs):\n\n data = {}\n is_error, errmsg, req = DomainConstraintView._get_req_data(kwargs)\n if is_error:\n return errmsg\n\n try:\n for key in req:\n if key == 'convalidated':\n data[key] = True if (req[key] == 'true' or req[key] is\n True) else False\n else:\n data[key] = req[key]\n\n except Exception as e:\n return internal_server_error(errormsg=str(e))\n\n self.request = data\n return f(self, **kwargs)\n\n return wrap", "def check_request(views_func):\n @wraps(views_func)\n def wrapper(*args, **kwargs):\n try:\n return views_func(*args, **kwargs)\n except (KeyError, ValueError) as ex:\n return HttpResponseBadRequest(str(ex))\n return wrapper", "def check_csrf_token(func):\n def new_fn(self, req):\n if 'csrf_token' not in req.params:\n return exc.HTTPForbidden(\"You must provide a CSRF token\")\n\n csrf_token = req.params['csrf_token']\n if not security.valid_csrf_token(csrf_secret, csrf_token):\n return exc.HTTPForbidden(\"Invalid CSRF token\")\n\n return func(self, req)\n\n new_fn.exposed = True\n return new_fn", "def validate_json(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n try:\n request.json\n except:\n return bad_request()\n\n return func(*args, **kwargs)\n\n return wrapper", "def request_is_valid(request):\n return 'method' in request", "def validator(data):\n\n request_validator = cerberus.Validator(SCHEMA)\n if request_validator.validate(data):\n return True\n else:\n return request_validator.errors", "def validate_schema(schema):\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n try:\n validate(request.json, schema)\n except:\n return bad_request()\n\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator", "def validate(**vkargs):\r\n depr('Use route wildcard filters instead.')\r\n def decorator(func):\r\n @functools.wraps(func)\r\n def wrapper(*args, **kargs):\r\n for key, value in vkargs.iteritems():\r\n if key not in kargs:\r\n abort(403, 'Missing parameter: %s' % key)\r\n try:\r\n kargs[key] = value(kargs[key])\r\n except ValueError:\r\n abort(403, 'Wrong parameter format for: %s' % key)\r\n return func(*args, **kargs)\r\n return wrapper\r\n return decorator", "def checkIsValid(f):\n\n @wraps(f)\n def wrapper(self, *args, **kwargs):\n if self.validator.isValid:\n return f(self, *args, **kwargs)\n else:\n error = self.validator._exceptionClass('Called: {} method before data validated'.format(f.__name__))\n self.validator._errors[f.__name__] = error\n if self.validator._errorHandler is not None:\n self.validator._errorHandler(error, self.getValidationContext())\n return\n\n return wrapper", "def validate(schema):\n def decorator(func):\n def wrapper(self, req, resp, *args, **kwargs):\n try:\n raw_json = req.stream.read()\n obj = json.loads(raw_json.decode('utf-8'))\n obj['req_id'] = req.context.get('request_id')\n except Exception:\n raise falcon.HTTPBadRequest(\n title='Invalid data',\n description='Could not properly parse the provided data as JSON',\n code='001'\n )\n\n try:\n jsonschema.validate(obj, schema)\n except jsonschema.ValidationError as e:\n raise falcon.HTTPBadRequest(\n title='Failed data validation',\n description=e.message,\n code='002'\n )\n\n return func(self, req, resp, *args, parsed=obj, **kwargs)\n return wrapper\n return decorator", "def post_required(func):\n def post_wrapper(request,*args,**kwds):\n res = http.ResponseBuilder()\n if request.method != 'POST':\n return res.error(\"post is required\").build_json()\n return func(request,*args,**kwds)\n return post_wrapper", "def schema_validation(schema):\n def decorator(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n data = {}\n if request.method in ['POST', 'PATCH', 'PUT']:\n data = request.get_json(force=True)\n elif request.method in ['GET', 'DELETE']:\n data = request.args.to_dict()\n\n v = Validator(schema)\n v.allow_unknown = True\n if v.validate(data):\n return function(*args, **kwargs)\n else:\n return jsonify({'errors': v.errors}), 400\n\n return wrapper\n return decorator", "def do_validate(self, request, _object):\n\n pass", "def check(self):\n invalid = []\n\n if not self.route:\n invalid.append(('route', 'missing'))\n elif not self.route[1] in ['GET', 'POST', 'PUT']:\n invalid.append(('route', 'invalid method: %s' % self.route[1]))\n\n has_2xx = False\n for rcode in self.return_codes:\n code = rcode[0]\n if code >= 200 and code < 300:\n has_2xx = True\n break\n if not has_2xx:\n invalid.append(('return_codes', 'Missing succes return code doc'))\n\n if self.client_auth is None:\n invalid.append(\n ('client_auth', 'Please provide client auth requirement'))\n\n if self.user_auth is None:\n invalid.append(\n ('user_auth', 'Please provide user auth requirement'))\n\n if invalid:\n msgs = []\n for error in invalid:\n msgs.append(\"%s: %s\" % error)\n raise ValueError(\n \"APIFunc for %s is invalid: %s\"\n % (self.viewfunc.__name__,\n ', '.join(msgs)))", "def user_required(f):\n def decorator(*args, **kwargs):\n if \"user\" not in g:\n abort(401)\n return f(*args, **kwargs)\n return decorator", "def check_honeypot(func=None, field_name=None):\n def inner(request, *args, **kwargs):\n response = verify_honeypot_value(request, field_name)\n if response:\n return response\n else:\n return func(request, *args, **kwargs)\n inner = wraps(func)(inner)\n\n if func is None:\n def decorator(func):\n return inner\n return decorator\n return inner", "def require_arguments(required):\n\n def decorator(func):\n def wrapper(request):\n request_params = get_dict_from_request(request)\n for param in required:\n if param not in request_params:\n return APIMissingArgumentResponse(error_msg=param)\n return func(request)\n\n return wrapper\n\n return decorator", "def token_required(func):\n def func_wrapper(self, *args, **kwargs):\n auth_token = self.request.headers.get('X-Auth-Token',\n self.request.get('token', ''))\n namespace = self.request.route_kwargs.get('namespace', '')\n try:\n token = base64.urlsafe_b64decode(str(auth_token))\n except TypeError:\n self.abort(412, 'Please update your token')\n try:\n token = auth_models.AuthToken.query(\n auth_models.AuthToken.token == token\n ).get()\n except datastore_errors.BadValueError:\n self.abort(401, 'Incorrect token')\n try:\n payload = jwt.decode(token.token, config.JWT_SECRET,\n algorithms=config.JWT_HASH_ALGORITHM)\n except (jwt.DecodeError, AttributeError):\n return self.abort(401)\n if payload['namespace'] != namespace:\n return self.abort(412, 'Token payload is incorrect.')\n return func(self, *args, **kwargs)\n return func_wrapper", "def validate_user_data(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n user_data = request.get_json()\n if not user_data:\n return {\"message\": \"bad request\"}, 400\n elif \"first_name\" not in user_data or \"last_name\" not in user_data \\\n or \"email\" not in user_data or \"password\" not in user_data:\n return {\"message\": \"bad request, give the required data\"}, 400\n elif user_data[\"first_name\"] == \"\" or user_data[\"last_name\"] == \"\" \\\n or user_data[\"email\"] == \"\" or user_data[\"password\"] == \"\":\n return {\"message\": \"bad request , enter all the required data\"}, 400\n elif user_data[\"first_name\"] == \" \" or user_data[\"last_name\"] == \" \" \\\n or user_data[\"email\"] == \" \" or user_data[\"password\"] == \" \":\n return {\"message\": \"bad request , enter all the required data\"}, 400\n elif \"@\" not in user_data[\"email\"] or \".\" not in user_data[\"email\"]:\n return {\"message\": \"invalid email provided\"}, 400\n return func(*args, **kwargs)\n return wrapper", "def __validate():\n # TODO: implement", "def requires_post(func):\n def decorator(request, *args, **kwargs):\n if DEBUG or request.method == 'POST':\n return func(request, *args, **kwargs)\n return HttpResponseNotAllowed(['POST'])\n return decorator", "def check_input(inputs: Optional[InputType] = None, **kwargs) -> None:\n\n if inputs is None:\n # empty inputs is considered as valid\n return\n\n if hasattr(inputs, '__call__'):\n # it is a function\n inputs = inputs()\n\n kwargs['data'] = inputs\n kwargs['exec_endpoint'] = '/'\n\n if inspect.isasyncgenfunction(inputs) or inspect.isasyncgen(inputs):\n raise ValidationError(\n 'checking the validity of an async generator is not implemented yet'\n )\n\n try:\n from ..request import request_generator\n\n r = next(request_generator(**kwargs))\n if not isinstance(r, Request):\n raise TypeError(f'{typename(r)} is not a valid Request')\n except Exception as ex:\n default_logger.error(f'inputs is not valid!')\n raise BadClientInput from ex", "def validate_request(response):\n openapi_spec = get_openapi_spec()\n\n request = TornadoOpenAPIRequest(response.request, openapi_spec)\n if V30RequestValidator:\n result = V30RequestValidator(openapi_spec).validate(request)\n else:\n result = openapi_request_validator.validate(openapi_spec, request)\n result.raise_for_errors()\n\n response = TornadoOpenAPIResponse(response)\n if V30ResponseValidator:\n result2 = V30ResponseValidator(openapi_spec).validate(request, response)\n else:\n result2 = openapi_response_validator.validate(openapi_spec, request, response)\n result2.raise_for_errors()", "def validate():", "def request_fields(*req_args):\n\tdef decorator(f):\n\t\t@wraps(f)\n\t\tdef decorated(*args, **kwargs):\n\t\t\tif not g.req: return json_response(dict(description='JSON object must be passed as HTTP body with this request'), 422)\n\t\t\tmissing = []\n\t\t\tfor arg in req_args:\n\t\t\t\tif not g.req.has_key(arg): missing.append(arg)\n\t\t\tif missing: return json_response(dict(description='Mandatory request fields missing', missing_fields=missing), 422)\n\t\t\treturn f(*args, **kwargs)\n\t\treturn decorated\n\treturn decorator", "def http_var_required(parameter_name):\n def wrap(func):\n def decorator(request, *args, **kwargs):\n if not (parameter_name in request.POST or parameter_name in request.GET):\n return HttpResponseBadRequest('Please define GET or POST parameter '+parameter_name)\n return func(request, *args, **kwargs)\n return decorator\n return wrap", "def valid(self, *args, **kwargs) -> Any:\n pass", "def validate(cls, **kwargs: Any) -> None: # pragma no cover", "def get_request(func):\r\n func.request = True\r\n return func", "def _validate_request(route_mapper, request, schema_data, resolver):\n try:\n validate_incoming_request(\n route_mapper,\n request,\n schema_data,\n resolver\n )\n except jsonschema.exceptions.ValidationError as exc:\n # This will alter our stack trace slightly, but Pyramid knows how\n # to render it. And the real value is in the message anyway.\n raise HTTPClientError(str(exc))", "def validate_with(schema):\n def decorator(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n if not (request.json or request.data or request.form):\n flask_restful.abort(400, message='Validation error.',\n errors=['No data provided.'])\n try:\n data = schema(prepare_request_data(request))\n except voluptuous.MultipleInvalid as err:\n flask_restful.abort(400,\n message='Validation error.',\n errors=[str(e) for e in err.errors])\n setattr(request, 'validated_body', data)\n return f(*args, **kwargs)\n return wrapper\n return decorator", "def verify_request(self, request, client_address):\n\t\treturn True", "def request_validation_error(error):\n return bad_request(error)", "def request_validation_error(error):\n return bad_request(error)", "def request_validation_error(error):\n return bad_request(error)", "def validate(self, apiobj, method, api, param, safe):", "def api_login_required(func):\n\n @wraps(func)\n def decorated_view(*args, **kwargs):\n \"\"\"decorator\"\"\"\n if request.method in EXEMPT_METHODS: # pragma: no cover\n return func(*args, **kwargs)\n # 'func' is a Flask.view.MethodView so we have access to some special\n # params\n cls = func.view_class\n login_required = getattr(cls, \"login_required\", True)\n if (\n bui.auth != \"none\"\n and login_required\n and not bui.config.get(\"LOGIN_DISABLED\", False)\n ):\n if not current_user.is_authenticated:\n if request.headers.get(\"X-From-UI\", False):\n abort(403)\n return Response(\n \"Could not verify your access level for that URL.\\n\"\n \"You have to login with proper credentials\",\n 401,\n {\"WWW-Authenticate\": 'Basic realm=\"Login Required\"'},\n )\n return func(*args, **kwargs)\n\n return decorated_view", "def decorator(func):\n def wrapper(resource, bundle=None, **kwargs):\n \"\"\" wraps the decorated method and verifies a list of required\n fields when a new object is being created.\n\n \"\"\"\n if not isinstance(bundle, Bundle):\n request = bundle\n data = resource.deserialize(\n request, request.body,\n format=request.META.get('CONTENT_TYPE', 'application/json')\n )\n bundle = resource.build_bundle(request=request, data=data)\n else:\n request = None\n\n for required_field in required_fields:\n if required_field not in bundle.data:\n response = HttpBadRequest(\n json.dumps(\"missing %s field\" % required_field),\n content_type=bundle.request.META['CONTENT_TYPE'])\n raise ImmediateHttpResponse(response=response)\n return func(resource, bundle=bundle, **kwargs)\n return wrapper", "def accepterror(func):\n\n @wraps(func)\n def return_error(*args, **kwargs):\n if request.headers.get(\"Accept\", '*/*') not in ['*/*', 'application/json', 'application/xml']:\n return not_accept()\n\n return func()\n\n return return_error", "def validate(self):\n ...", "def definition_validator(request):\n return validator(request, DefinitionValidator())", "def validate_view(params=None, match=None, headers=pharaoh.cors.gen_headers,\n json=json,\n json_errors=True,\n invalid_params_exc=BadMatch,\n invalid_match_exc=BadParams\n ):\n \n if params is None and match is None: # Validate the usage of the validator!\n raise ValueError(\"`validate_model` expected a `params` schema or a \"\n \"`match` schema.\")\n\n # Check to see if Validator works as well.\n if params and issubclass(params, (formencode.Schema,\n formencode.FancyValidator)):\n params = params()\n elif params is not None:\n raise ValueError(\"`params` expected a `formencode.Schema` type.\")\n\n if match and issubclass(match, (formencode.Schema,\n formencode.FancyValidator)):\n match = match()\n elif match is not None:\n raise ValueError(\"`match` expected a `formencode.Schema` type.\")\n\n def _decorator(view_callable):\n def _inner(context, request):\n def validate_params(this):\n try:\n data = request.json_body\n except ValueError:\n data = request.params\n\n try:\n data = params.to_python(data)\n except formencode.Invalid as e:\n logging.error(\"`validate_model` failed on request.params \"\n \"%s. Error: %s\" % (data, e.msg))\n\n if json_errors is True:\n body = json.dumps({'msg': e.unpack_errors()})\n else:\n body = \"\" #e.unpack_errors()\n\n raise invalid_params_exc(headers=headers(request),\n body=body)\n else:\n return data\n\n def validate_match(this):\n try:\n data = match.to_python(request.matchdict)\n except formencode.Invalid as e:\n logging.error(\"`validate_model` failed on request.matchdict\"\n \" %s.\" % request.matchdict)\n\n if json_errors is True:\n body = json.dumps({'msg': e.unpack_errors()})\n else:\n body = \"\" #e.unpack_errors()\n\n raise invalid_match_exc(headers=headers(request),\n body=body)\n else:\n return data\n\n if params:\n request.set_property(validate_params, 'validated_params',\n reify=True)\n if match:\n request.set_property(validate_match, 'validated_matchdict',\n reify=True)\n return view_callable(context, request)\n return _inner\n return _decorator", "def requires_auth(self, f):\n @wraps(f)\n def decorated(*args, **kwargs):\n\n self._require_auth_validation(*args, **kwargs)\n\n return f(*args, **kwargs)\n\n return decorated", "def validate_response(self, response):\n pass", "def check_headers(f):\n def wrapped_f(*args, **kwargs):\n if request.method in ('POST', 'PATCH'):\n if request.headers['Content-Type'] != 'application/vnd.api+json':\n error = json.dumps(jsonapi_errors([{'source': '',\n 'detail': \"Content-Type header must be application/vnd.api+json\",\n 'title': 'InvalidRequestHeader',\n 'status': 415}]))\n return make_response(error, 415, {'Content-Type': 'application/vnd.api+json'})\n if request.headers.get('Accept') and request.headers['Accept'] != 'application/vnd.api+json':\n error = json.dumps(jsonapi_errors([{'source': '',\n 'detail': \"Accept header must be application/vnd.api+json\",\n 'title': 'InvalidRequestHeader',\n 'status': 406}]))\n return make_response(error, 406, {'Content-Type': 'application/vnd.api+json'})\n return f(*args, **kwargs)\n return wrapped_f", "def __validate__(self):", "def validation(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def status_before_must_be(*valid_start_statuses):\r\n def decorator_func(func):\r\n \"\"\"\r\n Decorator function that gets returned\r\n \"\"\"\r\n @functools.wraps(func)\r\n def with_status_check(obj, *args, **kwargs):\r\n if obj.status not in valid_start_statuses:\r\n exception_msg = (\r\n u\"Error calling {} {}: status is '{}', must be one of: {}\"\r\n ).format(func, obj, obj.status, valid_start_statuses)\r\n raise VerificationException(exception_msg)\r\n return func(obj, *args, **kwargs)\r\n\r\n return with_status_check\r\n\r\n return decorator_func", "def decorator_func(func):\r\n @functools.wraps(func)\r\n def with_status_check(obj, *args, **kwargs):\r\n if obj.status not in valid_start_statuses:\r\n exception_msg = (\r\n u\"Error calling {} {}: status is '{}', must be one of: {}\"\r\n ).format(func, obj, obj.status, valid_start_statuses)\r\n raise VerificationException(exception_msg)\r\n return func(obj, *args, **kwargs)\r\n\r\n return with_status_check", "def requiresAuth(f):\n def decorated(*args, **kwargs):\n\n try:\n token = jwt_token_from_header()\n except AuthorizationError:\n abort(400, 'no autorization')\n\n try:\n token_decoded = jwt.decode(token, secret)\n args+=(token_decoded,)\n except jwt.ExpiredSignature:\n abort(401, 'token is expired')\n except jwt.DecodeError:\n abort(401, 'Error decoding signature')\n \n return f(*args, **kwargs)\n \n return decorated", "def autz_required(permission, context=None):\n def decorator(func):\n\n @wraps(func)\n async def wrapper(*args):\n request = (args[-1].request\n if isinstance(args[-1], web.View)\n else args[-1])\n\n if await autz.permit(request, permission, context):\n return await func(*args)\n\n raise web.HTTPForbidden()\n\n return wrapper\n\n return decorator", "def swagger_validate(f):\n @wraps(f)\n def swagger_validated_function(*args, **kwargs):\n converted_uri = request.path\n # convert /pet/mypetsid to /pet/{petId}\n for key, value in request.view_args.items():\n target = '{{{0}}}'.format(key)\n converted_uri = converted_uri.replace(str(value), target)\n # Grab the swagger spec for this specific uri and request type\n request_spec = spec.get_op_for_request(\n request.method.lower(), converted_uri)\n # cycle through the params and check any params that are set or required\n # by the schema\n for param in request_spec.params.values():\n param_spec = get_param_type_spec(param)\n # TODO - grab out other request types that we care about\n param_value = None\n if param.location == 'formData':\n param_value = request.form.get(param.name)\n elif param.location == 'path':\n param_value = request.view_args.get(param.name)\n if param_value or param.required:\n try:\n validate_schema_object(spec, param_spec, param_value)\n except Exception as e:\n abort(400, str(e))\n return f(*args, **kwargs)\n return swagger_validated_function", "def test_validators():", "def __call__( request, c ):", "def __call__( request, c ):", "def check_chief(function_to_decorate):\r\n @wraps(function_to_decorate)\r\n def decorated_function(*args, **kwargs):\r\n \tif g.my['rank'] > 15:\r\n \t\tabort(401)\r\n \treturn function_to_decorate(*args, **kwargs)\r\n return decorated_function", "def parse_request(*args, **kwargs):\n parser = reqparse.RequestParser(bundle_errors=True)\n for arg in args:\n parser.add_argument(arg)\n\n def decorator(f):\n @functools.wraps(f)\n def inner(*fargs, **fkwargs):\n fkwargs.update(parser.parse_args())\n return f(*fargs, **fkwargs)\n\n return inner\n\n return decorator", "def valid(schema=None):\n def dec(fun):\n @wraps(fun)\n def d_func(self, ctx, data, *a, **kw):\n try:\n validate(data['params'], schema)\n except ValidationError as err:\n raise InvalidParams(err)\n except SchemaError as err:\n raise InternalError(err)\n return fun(self, ctx, data['params'], *a, **kw)\n return d_func\n return dec", "def requires_get(func):\n def decorator(request, *args, **kwargs):\n if DEBUG or request.method == 'GET':\n return func(request, *args, **kwargs)\n return HttpResponseNotAllowed(['GET'])\n return decorator", "def test_validate_authorization_request_required_parameters(self):\n\n request = self.make_request()\n scopes, credentials = self.auth.validate_authorization_request(request)\n\n self.assertListEqual(scopes, request.scope.split())\n assert credentials['client_id'] == request.client_id\n assert credentials['redirect_uri'] == request.redirect_uri\n assert credentials['response_type'] == request.response_type\n assert credentials['state'] == request.state\n\n self.validator.validate_client_id\\\n .assert_called_once_with(request.client_id, request)\n self.validator.validate_redirect_uri\\\n .assert_called_once_with(request.client_id, request.redirect_uri, request)", "def validate_json(func):\n\n # get the function name because we're going to\n # see if we need SCHEMA_POST, SCHEMA_GET, etc.\n function_name = func.__name__.upper()\n\n def wrapped_func(self, *args, **kwargs):\n respective_schema = getattr(self, 'SCHEMA_' + function_name)\n json_request = get_valid_json_or_abort(respective_schema)\n return func(self, json_request, *args, **kwargs)\n\n return wrapped_func", "def should_validate(self):\n \n return self.request.method in self.validate_methods", "def token_required(f):\n\n @wraps(f)\n def decorated(*args, **kwargs):\n \"\"\"validate token provided\"\"\"\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if token is None:\n return make_response(jsonify({\"message\" : \"Please sign-up and login\"}), 401)\n\n try:\n data = jwt.decode(token, Config.SECRET)\n except:\n return make_response(jsonify({\n \"message\" : \"kindly provide a valid token in the header\"}), 401)\n return f(*args, **kwargs)\n\n return decorated", "def on_request_validation_error(err):\n print(err)\n return jsonify(message='Bad request'), 400", "def check(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n verify_jwt_in_request()\n token = get_jwt_identity()\n if argument.lower() == 'machine':\n if token['is_user_token'] is False:\n from api.services.data_source_token import \\\n DataSourceTokenService\n _token_usage_counter_add(token['data_source_token']['id'])\n if DataSourceTokenService.check_if_token_is_active(\n DataSourceTokenService,\n token['data_source_token']['id']) is False:\n return ErrorObject.create_response(\n ErrorObject, HTTPStatus.FORBIDDEN,\n 'Token has been revoked')\n else:\n return fn(*args, **kwargs)\n else:\n return ErrorObject.create_response(\n ErrorObject, HTTPStatus.FORBIDDEN,\n 'Unable to access this resource with provided token')\n elif argument.lower() == 'user':\n if token['is_user_token'] is False:\n _token_usage_counter_add(token['data_source_token']['id'])\n return ErrorObject.create_response(\n ErrorObject, HTTPStatus.FORBIDDEN,\n 'Unable to access this resource with provided token')\n else:\n return fn(*args, **kwargs)\n else:\n raise ValueError('Unsupported argument provided')\n\n return wrapper", "def validate(self):", "def validate(self):", "def requires_dataset():\n def decorator(view_func):\n \"\"\" for registred and logged user. NO redirect to login\"\"\"\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n if request.method == 'POST':\n if not request.POST.get('dataset_revision_id', request.POST.get('datastream-dataset_revision_id', None)):\n raise DatasetRequiredException()\n\n return view_func(request, *args, **kwargs)\n\n return _wrapped_view\n return decorator", "def api_key_required(func):\n\t@wraps(func)\n\tdef decorated_view(*args, **kwargs):\n\t\treturn func(*args,**kwargs)\n\treturn decorated_view", "def slack_read_required(function):\n\n def wrapped(request, *args, **kwargs):\n if not instanceof (request, SlackRequest):\n raise RuntimeError(\"wrapped object must be instance of SlackRequest\")\n \n if not request.slack_access(READ): return _Http403()\n return function(request, *args, **kwargs)\n\n if (function.__doc__ != None):\n wrapped.__doc__=function.__doc__+\"\\n\\n[decorated by @slack_read_required]\\n\"\n wrapped.__name__=function.__name__\n return wrapped", "def token_required(func):\n @wraps(func)\n def decorator(*args,**kwargs):\n token = request.headers.get('x-access-token') or request.headers.get('X-Access-Token')\n\n if not token:\n abort(400,description=\"Token Missing\")\n \n try:\n data = jwt.decode(token,current_app.config['SECRET_KEY'],algorithms=[\"HS256\"])\n curr_user = Users.query.filter_by(public_key=data[\"public_key\"]).first()\n token = BlacklistToken.query.filter_by(token=token).first()\n if token:\n abort(401,description=\"Invalid Token\")\n except:\n abort(401,\"Invalid token\")\n return func(curr_user,*args,**kwargs)\n return decorator", "def billing_agent_required(func):\n\n def wrapper(request, *args, **kwargs):\n\n if not base_check(request):\n return redirect('{0}?next={1}'.format(reverse('core_login'), request.path))\n\n agent_for = models.BillingAgent.objects.filter(users__id__exact=request.user.pk)\n\n if not agent_for and not request.user.is_staff:\n raise PermissionDenied\n\n return func(request, *args, **kwargs)\n\n return wrapper", "def client_required(f):\n @wraps(f)\n def client_decorator(*args, **kwargs):\n if session.get('logged_in') and session.get('type') == 'Client':\n return f(*args, **kwargs)\n else:\n abort(401)\n return client_decorator", "def check_credentials(method):\n\n @functools.wraps(method)\n def wrapper(*args, **kwargs):\n \"\"\"Check if the request is valid.\"\"\"\n response = {\n \"meta\": {\n \"status\": kwargs.pop(\"status\", True),\n \"verbose\": kwargs.pop(\"verbose\", \"OK\")\n },\n \"content\": None\n }\n if not response[\"meta\"][\"status\"]:\n cherrypy.response.headers['Content-Type'] = 'application/json'\n cherrypy.response.status = 400\n return json.dumps(response)\n return method(*args, **kwargs)\n\n return wrapper", "def auth_required(f):\n\n @wraps(f)\n def _verify(*args, **kwargs):\n\n token = request.headers.get(\"Token\", \"\")\n api_key = request.headers.get(\"key\", \"\")\n\n invalid_msg = {\"error\": \"Ошибка доступа\", \"autheticated\": False}\n expired_msg = {\"error\": \"Истёкшая сессия\", \"autheticated\": False}\n\n session = Session()\n if token:\n try:\n data = jwt.decode(token, current_app.config[\"SECRET_KEY\"])\n user = session.query(User).filter_by(email=data[\"sub\"][\"email\"]).first()\n session.close()\n if not user:\n return jsonify({\"error\": \"User not found\"}), 404\n if user.banned:\n return jsonify({\"error\": \"Access denied\"}), 403\n return f(user, *args, **kwargs)\n except jwt.ExpiredSignatureError:\n return jsonify(expired_msg), 403\n except jwt.InvalidTokenError:\n return jsonify(invalid_msg), 403\n except Exception:\n traceback.print_exc()\n return jsonify({\"error\": \"Server error\"}), 500\n elif api_key:\n try:\n user = session.query(User).filter_by(api_key=api_key).first()\n session.close()\n if not user:\n return jsonify({\"error\": \"Wrong API key\"}), 400\n if user.banned:\n return jsonify({\"error\": \"Access denied\"}), 403\n return f(user, *args, **kwargs)\n except Exception:\n traceback.print_exc()\n return jsonify({\"error\": \"Server error\"}), 500\n\n return jsonify(invalid_msg), 403\n\n return _verify", "def check_acl(func):\n\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if request.method in EXEMPT_METHODS: # pragma: no cover\n return func(*args, **kwargs)\n # 'func' is a Flask.view.MethodView so we have access to some special\n # params\n cls = func.view_class\n login_required = getattr(cls, \"login_required\", True)\n if (\n bui.auth != \"none\"\n and login_required\n and not bui.config.get(\"LOGIN_DISABLED\", False)\n ):\n if current_user.is_anonymous:\n abort(403)\n return func(*args, **kwargs)\n\n return decorated_view", "def require_authentication(f):\n def wrapper(*args, **kwargs):\n logger.info('Validating jwt')\n if request.method == 'POST':\n jwt_bearer = request.get_json()['jwt-bearer']\n logger.info(jwt_bearer)\n else:\n jwt_bearer = request.args['jwt-bearer']\n logger.info(jwt_bearer)\n if jwt_bearer:\n validate = requests.get(SERVICES['AUTHENTICATION']['VALIDATE'], params={'jwt': jwt_bearer}, headers={'Authorization':'Bearer ' + JWT}).json()\n if validate['ack'] == 'true':\n kwargs['service_name'] = validate['audience']\n return f(*args, **kwargs)\n return {'ack': 'false',\n 'msg': 'Authentication Requited.'}, 403\n return wrapper", "def not_valid_before(self):", "def before_request():\n pass", "def jwt_or_local_only(fn):\r\n @wraps(fn)\r\n def wrapper(*args, **kwargs):\r\n try:\r\n verify_jwt_in_request()\r\n except (CSRFError, FreshTokenRequired, InvalidHeaderError, NoAuthorizationError,\r\n UserLoadError) as ex:\r\n if request.remote_addr != '127.0.0.1':\r\n raise ex\r\n return fn(*args, **kwargs)\r\n return wrapper", "def decorated(*args, **kwargs):\n\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if token is None:\n return make_response(jsonify({\n \"message\" : \"Please sign-up and login\"}), 401)\n\n try:\n data = jwt.decode(token, Config.SECRET)\n driver = data['is_driver']\n except:\n return make_response(jsonify({\n \"message\" : \"kindly provide a valid token in the header\"}), 401)\n\n if not driver:\n return make_response(jsonify({\n \"message\" : \"you are not authorized to perform this function as a non-driver user\"}), 401)\n\n return f(*args, **kwargs)", "def trusted_host_required(view_func):\n def decorator(request, *args, **kwargs):\n if not hasattr(settings, 'TRUSTED_HOSTS') or not settings.TRUSTED_HOSTS:\n settings.TRUSTED_HOSTS = []\n ip = get_client_ip(request)\n allowed = IpRangeList(*settings.TRUSTED_HOSTS)\n if ip in allowed:\n return view_func(request, *args, **kwargs)\n response = HttpResponseForbidden(\"Access denied\")\n return response\n return decorator", "def __call__(self, request):", "def validate_chain():", "def is_valid_request(self, request):\r\n auth_params = request.META.get(\"HTTP_AUTHORIZATION\", [])\r\n return self.is_in(auth_params) or self.is_in(request.REQUEST)", "def handle_request(fun):\n\n def wrapper(self, *args, **kwargs):\n \"\"\"\n We raise an exception when\n the code on the client side fails\n Server side errors are taken care of\n through response codes\n \"\"\"\n try:\n return fun(self, *args, **kwargs)\n except Exception as req_exception:\n self.logger.exception(\"internal error\")\n raise ClientSideError(str(req_exception))\n\n return wrapper", "def traditional_check(req):\n\n import functions\n\n result = functions.check_time(req.request_time, req.user_name)\n print 'check_time %s' % result\n result = functions.check_name(req.user_name)\n print 'check_name %s' % result\n result = functions.check_hash(req.request_token)\n print 'check_hash %s' % result", "def validate_user_request(request, res, is_you=True):\n if not request.user.is_authenticated():\n return forbidden_json({'error': AUTH_ERROR_TXT})\n try:\n req = Request.objects.get(resource=res)\n except Request.DoesNotExist:\n return bad_request_json({'error': \"Request doesn't exist\"})\n if is_you and request.user != req.user:\n return bad_request_json({'error': AUTHOR_ERROR})\n return req", "def on_validate(\n self,\n ) -> AsyncIteratorOrIterator[None]: # pragma: no cover # pyright: ignore\n yield None", "def _validate(self):\n pass", "def before_request(self, func: typing.Callable):\n return self.add_hook(type_=\"pre\", hook=func)", "def auth_required(func):\n @wraps(func)\n def wrapper(request):\n if not request.user:\n return web.json_response({'status': 'error', 'message': 'auth required'}, status=401)\n return func(request)\n return wrapper", "def api_base_checks(request, requester, services, cnxn,\n auth_client_ids, auth_emails):\n valid_user = False\n auth_err = ''\n client_id = None\n\n try:\n client_id = oauth.get_client_id(framework_constants.OAUTH_SCOPE)\n logging.info('Oauth client ID %s', client_id)\n except oauth.Error as ex:\n auth_err = 'oauth.Error: %s' % ex\n\n if not requester:\n try:\n requester = oauth.get_current_user(framework_constants.OAUTH_SCOPE)\n logging.info('Oauth requester %s', requester.email())\n except oauth.Error as ex:\n logging.info('Got oauth error: %r', ex)\n auth_err = 'oauth.Error: %s' % ex\n\n if client_id and requester:\n if client_id in auth_client_ids:\n # A whitelisted client app can make requests for any user or anon.\n logging.info('Client ID %r is whitelisted', client_id)\n valid_user = True\n elif requester.email() in auth_emails:\n # A whitelisted user account can make requests via any client app.\n logging.info('Client email %r is whitelisted', requester.email())\n valid_user = True\n else:\n auth_err = ('Neither client ID %r nor email %r is whitelisted' %\n (client_id, requester.email()))\n\n if not valid_user:\n raise endpoints.UnauthorizedException('Auth error: %s' % auth_err)\n else:\n logging.info('API request from user %s:%s', client_id, requester.email())\n\n project_name = None\n if hasattr(request, 'projectId'):\n project_name = request.projectId\n issue_local_id = None\n if hasattr(request, 'issueId'):\n issue_local_id = request.issueId\n # This could raise exceptions.NoSuchUserException\n requester_id = services.user.LookupUserID(cnxn, requester.email())\n auth = authdata.AuthData.FromUserID(cnxn, requester_id, services)\n if permissions.IsBanned(auth.user_pb, auth.user_view):\n raise permissions.BannedUserException(\n 'The user %s has been banned from using Monorail' %\n requester.email())\n if project_name:\n project = services.project.GetProjectByName(\n cnxn, project_name)\n if not project:\n raise exceptions.NoSuchProjectException(\n 'Project %s does not exist' % project_name)\n if project.state != project_pb2.ProjectState.LIVE:\n raise permissions.PermissionException(\n 'API may not access project %s because it is not live'\n % project_name)\n if not permissions.UserCanViewProject(\n auth.user_pb, auth.effective_ids, project):\n raise permissions.PermissionException(\n 'The user %s has no permission for project %s' %\n (requester.email(), project_name))\n if issue_local_id:\n # This may raise a NoSuchIssueException.\n issue = services.issue.GetIssueByLocalID(\n cnxn, project.project_id, issue_local_id)\n perms = permissions.GetPermissions(\n auth.user_pb, auth.effective_ids, project)\n config = services.config.GetProjectConfig(cnxn, project.project_id)\n granted_perms = tracker_bizobj.GetGrantedPerms(\n issue, auth.effective_ids, config)\n if not permissions.CanViewIssue(\n auth.effective_ids, perms, project, issue,\n granted_perms=granted_perms):\n raise permissions.PermissionException(\n 'User is not allowed to view this issue %s:%d' %\n (project_name, issue_local_id))\n\n return client_id, requester.email()", "def view_validation(required_params=None, update_t=True):\n def outer_wrapper(f):\n async def inner_wrapper(request):\n # run validation\n is_valid, response_kwargs, device, params = await validate(\n request, request['db_session'], required_params)\n # update timestamp if valid\n if is_valid and update_t:\n device.timestamp = params[\"timestamp\"]\n # pass everything to request\n request['is_valid'] = is_valid\n request['response_kwargs'] = response_kwargs\n request['device'] = device\n request['params'] = params\n # run view\n return await f(request)\n return inner_wrapper\n return outer_wrapper", "def validate_http_request(request):\n request_str = request.decode('utf-8')\n print(request_str)\n split_request = request_str.split(' ')\n if (split_request[0] == 'GET') and split_request[2].startswith('HTTP/1.1'):\n request_url = split_request[1].replace(\"/\", \"\\\\\")\n x = (True, request_url)\n return x\n y = (False, None)\n return y", "def test_process_invalid1(self):\n self.skill.logic = {}\n self.skill.valid.app_id = '12345'\n @self.skill.launch\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n pass\n self.skill.logic['LaunchRequest']()\n self.assertFalse(self.skill.process(data.SAMPLE_LAUNCH_REQUEST))", "def test_decorator_middleware(self):\n request = self.factory.get(reverse('contact:home'))\n\n # middleware don't store request to decorated function\n decorated_func = not_record_request(home_page)\n request.user = self.user\n self.middleware.process_view(request, decorated_func)\n rs = RequestStore.objects.all()\n self.assertQuerysetEqual(rs, [])\n\n # middleware store request to undecorated function\n request.user = self.user\n self.middleware.process_view(request, home_page)\n rs = self.request_store.objects.all()\n self.assertEquals(len(rs), 1)\n only_one_rs = rs[0]\n self.assertEqual(only_one_rs.path, reverse('contact:home'))\n\n # middleware store request to undecorated function if user is anonymous\n request.user = AnonymousUser()\n self.middleware.process_view(request, home_page)\n rs = self.request_store.objects.all()\n self.assertEquals(len(rs), 2)\n only_one_rs = rs[1]\n self.assertEqual(only_one_rs.path, reverse('contact:home'))" ]
[ "0.68247503", "0.6648503", "0.6610281", "0.65511626", "0.64839965", "0.5961608", "0.59245205", "0.5921186", "0.589062", "0.5860312", "0.5833157", "0.5833084", "0.5802698", "0.57723325", "0.5756725", "0.5740091", "0.57035977", "0.5687878", "0.5684185", "0.5676626", "0.5673376", "0.5663634", "0.56463146", "0.5627601", "0.56071466", "0.5602063", "0.5601931", "0.55951035", "0.5592164", "0.55883795", "0.557006", "0.55672365", "0.55596226", "0.5555948", "0.5511134", "0.54886425", "0.54886425", "0.54886425", "0.54883075", "0.5482508", "0.5457512", "0.54439986", "0.5442729", "0.54353315", "0.54274845", "0.5427083", "0.54241633", "0.54225594", "0.5417046", "0.54115164", "0.54047924", "0.5400457", "0.5392578", "0.5389999", "0.5388436", "0.5385451", "0.5375312", "0.5375312", "0.5368554", "0.53659594", "0.5364774", "0.53635705", "0.53516954", "0.5349886", "0.5342275", "0.5336381", "0.5327775", "0.53215295", "0.53150856", "0.53150856", "0.53138137", "0.531331", "0.53084135", "0.530248", "0.52911013", "0.5284046", "0.5277559", "0.5270353", "0.52623415", "0.5257667", "0.5256448", "0.5253927", "0.525351", "0.5250627", "0.5248964", "0.5248723", "0.5246044", "0.52449876", "0.52405417", "0.52384", "0.52358246", "0.523567", "0.5231477", "0.5229506", "0.5228289", "0.5227004", "0.522692", "0.5225679", "0.52254033", "0.52128685" ]
0.70384467
0
Construct the core application.
def create_app(): app = Flask(__name__, instance_relative_config=False) app.register_blueprint(auth_bp, url_prefix='/auth') app.register_blueprint(errors_bp, url_prefix='/error') app.config.from_object('config.Config') db.init_app(app) store.bind(db) login_manager.init_app(app) Session(app) captcha = FlaskSessionCaptcha(app) captcha.init_app(app) with app.app_context(): from . import routes # Import routes db.create_all() # Create sql tables for our data models return app
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_core_app():\n app = web.Application(middlewares=[middleware.error_middleware])\n management_routes.setup(app, is_core=True)\n return app", "def main():\n LOGGER.info('Loading Application')\n main_app = Application()\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--console\", help=\"Command Line Mode\", action=\"store_true\")\n args = parser.parse_args()\n if args.console:\n LOGGER.info('Command Line Mode')\n main_app.run()\n else:\n main_app.controller.gui_enabled = True\n try:\n import gui\n except ModuleNotFoundError:\n from herdcl import gui\n app = gui.MainUI()\n LOGGER.info('Opening GUI')\n app.mainloop()", "def setup_application(self):\n pass", "def create_and_run():\n\n app = App()\n app.run()", "def create_app(self):\r\n self.app = Flask(__name__, instance_relative_config=True)\r\n\r\n # Init the secret key of the app -it is a must for flask to run\r\n self.app.config.from_mapping(\r\n SECRET_KEY='!ZNeverSayNever116Z!',\r\n MONGODB_SETTINGS= {'host': 'mongodb://localhost/opc_integrity'}\r\n )\r\n initialize_db(self.app)\r\n\r\n\r\n # Init the app with core routes\r\n routes.init_app(self.app)", "def create():\n\n return App()", "def main():\n app = App()\n app.run()", "def create_app(self):\n raise NotImplementedError", "def app():\n return create_app()", "def setup_app():\n\n # 1 Create Flask application\n app = Flask(\n import_name=__name__,\n template_folder=\"templates\",\n static_folder=\"static\"\n )\n\n # 2 Update the apps configuration\n app = config_selector(app)\n register_error_handlers(app)\n\n cache.init_app(app)\n\n # 3 Set up logger\n setup_logger(app.config)\n LOGGER.info(\"Set up app & logger.\")\n\n # 4 Init clients\n init_clients(app.config)\n\n # 5 Init Daemon\n start_daemon(app.config)\n\n # 6 Register blueprints\n register_blueprints(app)\n Bootstrap(app)\n\n return app", "def main():\n print(\"def main\")\n return APP.run()", "def app():\n app = create_app()\n return app", "def initialize(self, application):", "def prepare_app():\n application = service.Application('buildbot-worker')\n master = (GATEWAY\n if conf.Washer.FORCE_GATEWAY\n else conf.Buildbot.BUILDMASTER)\n worker = Worker(master,\n conf.Buildbot.BUILDMASTER_PORT,\n conf.Buildbot.WORKERNAME,\n conf.Buildbot.WORKERPASS,\n conf.Buildbot.BASEDIR,\n conf.Buildbot.KEEPALIVE,\n umask=None,\n maxdelay=conf.Buildbot.MAXDELAY,\n numcpus=None,\n allow_shutdown=None,\n maxRetries=None)\n worker.setServiceParent(application)\n\n class InlineApplication(UnixApplicationRunner):\n def createOrGetApplication(self):\n nonlocal application\n return application\n\n options = ServerOptions()\n options[\"nodaemon\"] = not conf.Washer.DAEMON\n options[\"logfile\"] = conf.Washer.LOG_FILE\n\n commands.register()\n\n return InlineApplication(options)", "def main():\n Log.info('Installing...')\n app = Application()\n app.run()\n Log.info(\"Done successfully.\")", "def startapp():", "def app():\n return aplicattion", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n\n init_includes(config)\n init_routing(config)\n init_db(config)\n return config.make_wsgi_app()", "def Application(self, app, **kargs):\n app_builddir = \"%s/%s/%s\" % (self.builddir, self.name, app)\n\n # Export env and args to the app SConstruct/home/benno/local//lib/scons-0.96/SCons/Node/FS.py\n env = self\n Export(\"env\")\n args = kargs\n Export(\"args\")\n\n # Call the apps SConstruct file to build it\n self.apps[app] = self.SConscript(app, build_dir=app_builddir, duplicate=0)\n return self.apps[app]", "def app_factory(global_conf, load_app_kwds={}, **kwargs):\n # Create the Galaxy application unless passed in\n kwargs = load_app_properties(\n kwds=kwargs,\n **load_app_kwds\n )\n if 'app' in kwargs:\n app = kwargs.pop('app')\n else:\n from galaxy.webapps.coralsnp_reports.app import UniverseApplication\n app = UniverseApplication(global_conf=global_conf, **kwargs)\n atexit.register(app.shutdown)\n # Create the universe WSGI application\n webapp = CoralSNPReportsWebApplication(app, session_cookie='galaxycoralsnpreportssession', name=\"coralsnp_reports\")\n add_ui_controllers(webapp, app)\n # These two routes handle our simple needs at the moment\n webapp.add_route('/{controller}/{action}', controller=\"root\", action='index')\n webapp.add_route('/{action}', controller='root', action='index')\n webapp.finalize_config()\n # Wrap the webapp in some useful middleware\n if kwargs.get('middleware', True):\n webapp = wrap_in_middleware(webapp, global_conf, app.application_stack, **kwargs)\n if asbool(kwargs.get('static_enabled', True)):\n webapp = wrap_if_allowed(webapp, app.application_stack, wrap_in_static,\n args=(global_conf,),\n kwargs=kwargs)\n # Close any pooled database connections before forking\n try:\n galaxy.model.corals.mapping.metadata.bind.dispose()\n except Exception:\n log.exception(\"Unable to dispose of pooled coralsnp_reports model database connections.\")\n # Return\n return webapp", "def app(self):\n\n ## set flask specific things that are non-optional\n error = lambda k: 'Fatal: You need to specify a \"flask\" section ' + \\\n 'with an entry like \"'+k+'=...\" in your .ini file'\n try: app_name = self['flask.app']\n except KeyError: raise SystemExit(error('app'))\n try: secret_key = self['flask.secret_key']\n except KeyError: raise SystemExit(error('secret_key'))\n app = Flask(app_name)\n app.secret_key = secret_key\n\n ## set flask specific things that are optional\n if 'flask.template_path' in self:\n app.jinja_loader = FileSystemLoader(self['template_path'])\n if 'flask.before_request' in self:\n before_request = self['flask.before_request']\n before_request = namedAny(before_request)\n app.before_request(before_request)\n if 'flask.after_request' in self:\n after_request = self['flask.after_request']\n after_request = namedAny(after_request)\n app.after_request(after_request)\n\n ## setup views\n try: view_holder = self['corkscrew.views']\n except KeyError:\n error = 'Fatal: could not \"view=<dotpath>\" entry in your .ini file'\n raise SystemExit(error)\n else:\n view_list = namedAny(view_holder)\n [ v(app=app, settings=self) for v in view_list]\n\n return app", "def init_app():\r\n LOG.info('Initialising web server.')\r\n app = web.Application(middlewares=[api_key()])\r\n app.router.add_routes(routes)\r\n set_cors(app)\r\n app.on_startup.append(init_db)\r\n app.on_cleanup.append(close_db)\r\n return app", "def application():\n\n configure_app(app)\n yield app", "def _initalizeApplication(self, dev: bool) -> None:\n\n # Configure our PyQt5 environment\n if has_dark:\n self.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n self.setApplicationDisplayName('Painter')\n self.setApplicationName('Painter')\n self.setApplicationVersion(__version__)\n runtime.app = self\n\n # Mount our application virtual file system\n if dev:\n vfs.vfs_mount_subdirectories('.', 'assets')\n vfs.vfs_mount_subdirectories('.', 'items')\n else:\n self._mountMultifile('assets.mf')\n self._mountMultifile('items.mf')\n\n # Initialize Panda3D environment variables\n p3d.load_prc_file('config/painter.prc')\n\n # Initialize our editor window\n self._window = editor.QEditorWindow()\n self._window.show()", "def main():\r\n app = application.Application()\r\n app.initializeLayer(menu.Menu())\r\n app.run()", "def _init_app() -> None:\n # set title\n st.title(\"Detecting Pet Faces 👁 🐶 🐱\",)\n st.markdown(\n \"This application detects the faces of some common Pet Breeds using a **RetinaNet**.\"\n )\n st.write(\"## How does it work?\")\n st.write(\n \"Upload an image of a pet (cat or dog) and the app will draw the dounding box where it detects the objects:\"\n )\n # Display demo Image\n st.image(\n Image.open(\"images/res_3.png\"), caption=\"Example\", use_column_width=True,\n )\n st.write(\"## Upload your own image\")\n st.write(\n \"**Note:** The model has been trained on pets breeds given in the `The Oxford-IIIT Pet Dataset`\"\n \" and therefore will only with those kind of images.\"\n )\n st.markdown(\"**To be more precise the model has been trained on these breeds:**\")\n # Show Train data Statistics\n st.image(Image.open(\"images/breed_count.jpg\"), use_column_width=True,)", "def main(config=config):\n root = Tk()\n root.columnconfigure(0, weight=1)\n root.wm_title(\"ayy lmap\")\n app = Application(master=root, config=config)\n return app", "def webinit():\n\troot = Root()\n\troot.player = Player()\n\troot.songs = Songs()\n\troot.albums = Albums()\n\troot.artists = Artists()\n\t\n\tapp = cherrypy.tree.mount(root, '/', 'data/cherrypy.config')\n\treturn app", "def create_app():\n\n # Create app\n app = Flask(__name__)\n app.config.from_object(\"nextbus.config.Config\")\n\n app.logger = logger.app_logger\n # Load logging configuration and log initial configuration\n logger.load_config(app)\n\n # Initialise SQLAlchemy and Migrate in app\n db.init_app(app)\n migrate.init_app(app, db)\n\n # Adding app, db and model objects to flask shell\n from nextbus import models\n app.shell_context_processor(\n lambda: {\"app\": app, \"db\": db, \"models\": models}\n )\n\n from nextbus.converters import add_converters\n add_converters(app)\n\n from nextbus.views import page\n from nextbus.resources import api\n app.register_blueprint(page)\n app.register_blueprint(api)\n\n return app", "def create_app():\n app = Flask(__name__)\n\n @app.route('/')\n def root():\n \"\"\"Base view.\"\"\"\n return 'TODO - part 2 and beyond!'\n\n return app", "def get_app(self):\n return Application()", "def _make_app():\n app = web.Application(middlewares=[middleware.error_middleware])\n admin_routes.setup(app)\n return app", "def make_app(*args, **kwargs):\n app = Flask(*args, **kwargs)\n Roots(app)\n return app", "def _configure(self):\n Application._configure(self)\n\n return", "def makeApp(ConfigClass):\n # Create config instance (raise RuntimeError if config invalid)\n global config\n config = ConfigClass()\n \n # Set number of threads\n reactor.suggestThreadPoolSize(config.threads)\n \n # The root of the HTTP hierarchy\n default = WikiRoot()\n\n # Here is where img and css and some special files come from\n default.putChild('wiki', static.File(config.docs))\n\n # Generate the Site factory\n # TODO: Maybe we can use WikiRoot instead of this\n # ----------------------------------------------\n root = vhost.NameVirtualHost()\n root.default = default\n # ----------------------------------------------\n site = MoinSite(root, logPath=config.logPath, timeout=2*60) # 2 minutes timeout\n\n # Make application\n application = service.Application(\"web\", uid=config.uid, gid=config.gid)\n sc = service.IServiceCollection(application)\n\n # Listen to all interfaces in config.interfaces\n for entry in config.interfaces:\n # Add a TCPServer for each interface.\n\n # This is an hidden experimantal feature: each entry in\n # interface may contain a port, using 'ip:port'.\n # Note: the format is subject to change!\n try:\n interface, port = entry.split(':', 1)\n except ValueError:\n interface, port = entry, config.port\n \n # Might raise ValueError if not integer.\n # TODO: check if we can use string port, like 'http'\n port = int(port) \n\n if port == 443 and ssl and ssl.supported and config.sslcert:\n sslContext = ssl.DefaultOpenSSLContextFactory(*config.sslcert)\n s = internet.SSLServer(port, site, sslContext, interface=interface)\n else:\n s = internet.TCPServer(port, site, interface=interface)\n s.setServiceParent(sc)\n\n return application", "def __init__(self, app):\n pass", "def createApp(self):\n app = self.app\n window = self.window\n window.show()\n app.exec()", "def main(config: str):\n application = Application(config_path=config)\n application.run()", "def start(self):\n\n self.app = Application()\n self.app._loop = self.loop\n self.add_routes()\n self.app.run(port=int(self.port),\n worker_num=None,\n reload=False,\n debug=False)\n # GZip support\n # Compress(self.app)\n # self.app.config['COMPRESS_MIMETYPES'] = {'text/html',\n # 'application/json'}\n # self.app.config['COMPRESS_LEVEL'] = 4\n # self.app.config['COMPRESS_MIN_SIZE'] = 300\n # Session support\n # self.session_interface = InMemorySessionInterface()\n # self.app.response_middleware.appendleft(self.save_session)\n # self.app.request_middleware.append(self.add_session_to_request)\n\n # self.add_routes()\n # return await self.app.create_server(loop=self.loop,\n # host='0.0.0.0',\n # port=self.port,\n # debug=False)", "def setup_app(app_name, app_directory, no_logs):\n\n test_mode = os.getenv(\"TEST_MODE\")\n if test_mode:\n print_warn(\"Running in TEST mode\")\n\n app_directory = abspath(app_directory)\n controller.load_app_modules(app_directory)\n\n os.chdir(app_directory)\n run_boot(app_directory)\n set_engine_config(test_mode, no_logs)\n load_tools(app_directory)\n setup_features()\n\n data_provider.set_base_dir(app_directory)\n cherrypy.tree.mount(controller.get_app_root(), config=web_app_config)", "def _binary_app(self):\n self.make_binary()", "def main() -> None:\n config = get_config()\n app = Application()\n web_config = config[\"web\"]\n webapp = WebApp(config)\n webapp.attach_to(app)\n\n run_config = keep(web_config, {\"host\", \"port\"})\n run_app(app, **run_config)", "def make_app(global_conf, **app_conf):\n app = RestishApp(root.Root())\n app = repoze.who.config.make_middleware_with_config(app, global_conf, app_conf['repoze.who.ini'])\n app = setup_environ(app, global_conf, app_conf)\n # General \"middleware\".\n app = flash.flash_middleware_factory(app)\n app = cookies.cookies_middleware_factory(app)\n return app", "def main(args=None):\n app()\n return 0", "def init_app(args, setup_logging=True):\n if setup_logging:\n pyramid.paster.setup_logging(args.config)\n settings = pyramid.paster.get_appsettings(args.config)\n if 'environment' not in settings:\n raise KeyError('Missing key \"environment\" in config. Specify '\n 'environment in INI file \"{}\".'.format(args.config))\n if not args.etc_dir:\n args.etc_dir = os.path.join(args.root_dir, 'etc')\n rc = Rc(\n environment=settings['environment'],\n root_dir=args.root_dir,\n etc_dir=args.etc_dir\n )\n rc.load()\n settings.update(rc.data)\n settings['rc'] = rc\n result = {'settings': settings}\n pym.init_auth(rc)\n\n #pym.models.init_unscoped(settings, 'db.pym.sa.')\n pym.models.init(settings, 'db.pym.sa.')\n\n return result", "def init_application(app, config):\n app.config.from_object(config)\n\n api = Api(app)\n api.add_resource(Build, config.WSPATH)\n api.add_resource(Request, config.WSPATH + '/<request_id>')\n api.add_resource(Result, config.WSPATH + '/<request_id>/result')\n api.add_resource(Image, config.WSPATH + '/<request_id>/result/image')\n api.add_resource(Output, config.WSPATH + '/<request_id>/result/output/<int:output_id>')\n api.add_resource(Log, config.WSPATH + '/<request_id>/result/log')\n\n AgroLogHandler(app).init()\n app.logger.info(\"Flask Application initialized\")", "def create_app():\n app = Flask(\n __name__,\n instance_relative_config=False,\n )\n app.config.from_object('config.Config')\n\n with app.app_context():\n # CORS\n CORS(app)\n\n # JWT & BCRYPT\n from .utils.auth import init_auth\n init_auth(app)\n\n # DB\n from .utils.db import db\n db.init_app(app)\n\n # Mail\n from .utils.mail.service import mail\n mail.init_app(app)\n app.extensions['mail'].debug = 0 # No logging\n\n # Jobs\n from .utils.scheduler import start_jobs\n start_jobs(app)\n\n # Import routes\n from .routes import (\n admin, users, files,\n suprema,\n b_locals, b_federals)\n\n app.register_blueprint(admin.bp)\n app.register_blueprint(users.bp)\n app.register_blueprint(files.bp)\n app.register_blueprint(suprema.bp)\n app.register_blueprint(b_locals.bp)\n app.register_blueprint(b_federals.bp)\n\n return app", "def main():\n\n root = tk.Tk()\n root.title(\"Exploring US Bikeshare Data\")\n app = Application(master=root)\n print(\"Application loaded! Please use the GUI window to continue...\")\n app.mainloop()", "def create_app(self):\n app = Flask(__name__)\n\n app.config[\"auth_func\"] = self.auth_func\n app.config[\"hydrator_func\"] = self.hydrator_func\n app.config[\"request_hydrator_func\"] = self.request_hydrator_func\n app.config[\"database_uri\"] = self.database_uri\n app.config[\"hmac_secret\"] = self.hmac_secret\n\n cors = CORS()\n cors.init_app(app, resources={r\"/*\": {\"origins\": self.cors_origins, \"supports_credentials\": True}})\n\n app.register_blueprint(api_v0.bp)\n\n @app.route(\"/\")\n def health_check():\n \"\"\"Can be called by e.g. Kubernetes to verify that the API is up\n\n Returns:\n str: the static string \"Comet-API\", could be anything\n \"\"\"\n return \"Comet-API\"\n\n return app", "def main():\n CLI_APP.run()", "def initialize_app(app):\n # configure_app(app)\n # log.info(\"> Starting development server at http://%s/api/ <<<<<\" %\n # app.config[\"SERVER_NAME\"])\n\n blueprint_api = Blueprint('api', __name__, url_prefix='/api')\n api.init_app(blueprint_api)\n app.register_blueprint(blueprint_api)\n\n api.add_namespace(task_namespace)\n api.add_namespace(chain_namespace)\n\n Bootstrap(app)\n nav.init_app(app)\n app.register_blueprint(frontend_blueprint)\n app.register_blueprint(processors_blueprint)\n app.register_blueprint(chains_blueprint)\n app.register_blueprint(tasks_blueprint)\n app.register_blueprint(compare_blueprint)\n\n db.init_app(app)\n db.create_all(app=app)\n\n if not os.path.exists(app.config[\"OCRD_BUTLER_RESULTS\"]):\n os.makedirs(app.config[\"OCRD_BUTLER_RESULTS\"])", "def main(_, **settings):\n config = Configurator(settings=settings)\n register_includes(config)\n register_json_renderer(config)\n register_routes(config)\n\n config.scan()\n return config.make_wsgi_app()", "def create_app():\n app = Flask(__name__)\n\n # Used by Flask to secure data\n app.config['SECRET_KEY'] = 'super-secret-secure-key'\n # Path to save the Database\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'\n\n # Initialize the Database\n db.init_app(app)\n\n # Set up login manager\n from source.models import manage_login\n manage_login(app)\n\n # Blueprint for auth routes\n from source.auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint)\n\n # Blueprint for non-auth routes\n from source.main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n\n return app", "def main(global_config, **settings):\n\n config = Configurator(\n settings=settings,\n root_factory=\"pyramid_upwork_example.models.RootFactory\")\n\n # ACL authorization callback for pyramid-upwork\n config.registry.get_acl_group = get_acl_group\n\n # External includes\n config.include('pyramid_upwork')\n\n # Views and routing\n config.add_view('pyramid_upwork_example.views.MainPage',\n renderer='templates/main.jinja2',\n permission='view')\n\n return config.make_wsgi_app()", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n db.init_app(app)\n ma.init_app(app)\n migrate = Migrate(app, db)\n\n with app.app_context():\n from . import routes\n\n # Create tables for our models\n db.create_all()\n app.logger.info(\"application started\")\n\n return app", "def create_app(argv):\n # pylint: disable=global-statement\n global __QAPPLICATION_SINGLETON\n if __QAPPLICATION_SINGLETON is None:\n __QAPPLICATION_SINGLETON = CueGuiApplication(argv)\n return __QAPPLICATION_SINGLETON", "def main():\n options = lib.main.parse_args()\n\n #Initialize all the loggings with the options specified.\n lib.main.logs_initialization(options)\n logging.debug(\"Logs are now enabled and working\")\n\n #Update the main config file with the app information.\n logging.debug(\"Updating parameters on config files\")\n lib.config.update_params()\n\n # Finally, when all the initialization schedule is completed, Flask\n # will start.\n logging.debug(\"Calling Flask initializator function\")\n api.start(options[\"debug\"])", "def main(global_config, **settings):\n config = Configurator(settings=settings, root_factory=root_factory)\n config.include('substanced')\n config.include('.resources')\n config.scan()\n return config.make_wsgi_app()", "async def init_web(manager: ConfigServiceManager):\n base = manager.env.get('WEB_BASE_HREF', '/')\n\n app = web.Application()\n app['base_href'] = base\n app['manager'] = manager\n app['static_root_url'] = base + 'assets'\n app.add_routes(get_standard_routes(app))\n _setup_cors(app)\n app.add_routes(get_custom_routes(app))\n _setup_jinja(manager, app)\n\n if base != '/':\n root_app = web.Application()\n root_app.add_subapp(base, app)\n return root_app\n return app", "def make_app(global_conf, full_stack=True, **app_conf):\n app = make_base_app(global_conf, full_stack=True, **app_conf)\n \n # Wrap your base TurboGears 2 application with custom middleware here\n from depot.manager import DepotManager\n app = DepotManager.make_middleware(app)\n\n return app", "def main():\r\n LOG.info('Starting server build.')\r\n web.run_app(init_app(),\r\n host=os.environ.get('APP_HOST', CONFIG.registry.get('app_host', '0.0.0.0')),\r\n port=int(os.environ.get('APP_PORT', CONFIG.registry.get('app_port', 8080))),\r\n shutdown_timeout=0,\r\n ssl_context=application_security())", "def make_app(global_conf, full_stack=True, **app_conf):\n app = make_base_app(global_conf, full_stack=True, **app_conf)\n \n # Wrap your base TurboGears 2 application with custom middleware here\n \n # Initialize repoze.what plugins.\n groups_path = os.path.join(global_conf.get(\"appsupport_dir\"), \"groups.ini\")\n groups = {\n \"ini_groups\": INIGroupAdapter(app_conf.get(\"what.groups_file\", groups_path)),\n \"dscl_groups\": MacOSXGroupAdapter()\n }\n permissions_path = os.path.join(global_conf.get(\"appsupport_dir\"), \"permissions.ini\")\n permissions = {\n \"ini_permissions\": INIPermissionsAdapter(app_conf.get(\"what.permissions_file\", permissions_path))\n }\n \n # Initialize repoze.who plugins.\n friendlyform = FriendlyFormPlugin(\n \"/login\",\n \"/login_handler\",\n None,\n \"/logout_handler\",\n None,\n \"auth_tkt\",\n login_counter_name=None\n )\n friendlyform.classifications = {\n IIdentifier: ['browser'],\n IChallenger: ['browser']\n }\n auth_tkt = AuthTktPlugin(secret=app_conf[\"beaker.session.secret\"])\n macosx_authenticator = MacOSXAuthenticator()\n macosx_metadataprovider = MacOSXMetadataProvider()\n file_authenticator = FileAuthenticator()\n file_metadataprovider = FileMetadataProvider()\n \n # Configuration for repoze.who.\n identifiers = [\n ('friendlyform', friendlyform),\n ('auth_tkt', auth_tkt)\n ]\n authenticators = [\n ('macosx_authenticator', macosx_authenticator),\n ('file_authenticator', file_authenticator)\n ]\n challengers = [\n ('friendlyform', friendlyform)\n ]\n mdproviders = [\n ('macosx_metadataprovider', macosx_metadataprovider),\n ('file_metadataprovider', file_metadataprovider)\n ]\n \n # Setup authentication and authorization through repoze.what.\n app = setup_auth(\n app,\n groups,\n permissions,\n identifiers=identifiers,\n authenticators=authenticators,\n challengers=challengers,\n mdproviders=mdproviders,\n #log_stream=sys.stdout,\n #log_level=logging.DEBUG\n )\n \n return app", "def main(global_config, **settings):\n LOGGER.info('= main :: settings = %s', settings)\n\n config = Configurator(settings=settings)\n\n # Home\n config.add_route('home', '/')\n\n # Lastly, we scan the config and make the app\n # config.scan()\n\n return config.make_wsgi_app()", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n\n # Initialize Plugins\n bootstrap = Bootstrap(app) # noqa: F841\n\n with app.app_context():\n # Include our Routes\n from . import routes # noqa: F401\n\n # # Register Blueprints\n # app.register_blueprint(auth.auth_bp)\n # app.register_blueprint(admin.admin_bp)\n\n return app", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n # app.config.from_object('config.Config')\n file_path = os.path.abspath(os.getcwd())+\"/mpulse.db\"\n app.config.from_mapping(\n SECRET_KEY='dev',\n SQLALCHEMY_DATABASE_URI = 'sqlite:///'+file_path,\n SCHEMA=os.path.join(os.path.dirname(__file__), 'schema.sql'),\n SQLALCHEMY_TRACK_MODIFICATIONS = False,\n JSON_SORT_KEYS=False\n )\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n \n # init database\n db.init_app(app)\n \n with app.app_context():\n \n # Create tables if they don't exist\n db.create_all() \n \n # Include our api Routes for members\n from . import members\n # Register Blueprints\n app.register_blueprint(members.bp)\n\n return app", "def make_app(conf=None):\n if not conf:\n conf = 'development'\n app = create_app(cm.get(conf))\n return app", "def create_app(self):\n\n app = create_app()\n app.config.from_object('project.config.TestingConfig')\n return app", "def main():\n ensure_not_root()\n config.setup()\n model.init_db()\n manager.run()", "def create_app(env=\"production\"):\n app = Flask(__name__, static_url_path=\"/\")\n config_app(app, env=env)\n\n with app.app_context():\n Moment(app)\n init_db(app)\n enable_parser(app)\n register_route(app)\n register_blue(app)\n init_logger(app)\n init_scheduler(app)\n return app", "def prepare_app(self):\n self.app = Flask(self.APP_NAME)\n self.app.config.from_object('mmapi.config.Config')\n CORS(self.app, origins=self.app.config['CORS_ACCEPTED_ORIGINS'])\n\n # Map urls with and without a trailing slash to the same endpoint.\n self.app.url_map.strict_slashes = False", "def app() -> Generator:\n app = create_app({\"TESTING\": True})\n\n yield app", "def init(self):\n\n # create directories\n self.createDirectory(\"src\")\n list_directories_name = {\n \"Models\": True,\n \"DTOs\": True,\n \"Repositories\": True,\n \"Services\": True,\n \"Mappeurs\": True,\n \"Ressources\": True,\n \"Logs\": False,\n \"Parsers\": False,\n \"docker\": False,\n \"Enums\": False,\n \"Config\": False,\n }\n\n for directory_name in list_directories_name:\n self.createDirectory(\"src/\" + directory_name)\n\n self.createDirectory(\"src/Logs\")\n open(\"src/Logs/debug.log\", \"w\").close()\n open(\"src/Logs/info.log\", \"w\").close()\n open(\"src/Logs/error.log\", \"w\").close()\n\n # test directories\n self.createDirectory(\"Tests\")\n for directory_name in list_directories_name.keys():\n if list_directories_name[directory_name]:\n self.createDirectory(\"Tests/\" + directory_name)\n\n # helpers Test\n path = self.getPathFileInStatic(\"helpersTest.py\")\n shutil.copy(path, \"Tests/helpersTest.py\")\n\n # Security config\n path = self.getPathFileInStatic(\"security.py\")\n shutil.copy(path, \"src/Config/SecurityConfig.py\")\n\n # Logger\n path = self.getPathFileInStatic(\"logger.py\")\n shutil.copy(path, \"src/Config/Logger.py\")\n\n self.createDirectory(\"Tests/Mocks\")\n\n self.writeAppFile()\n\n path = self.getPathFileInStatic(\"config.py\")\n shutil.copy(path, \"src/Config/ApplicationConfig.py\")\n self.info(\"[x] create config.py\")\n # shutil.copy(getPathFileInStatic(\"__init__.py\"), \"src/__init__.py\")\n # info(\"[x] create __init__.py\")\n path = self.getPathFileInStatic(\"server.py\")\n shutil.copy(path, \"server.py\")\n self.info(\"[x] create server.py\")\n path = self.getPathFileInStatic(\"docker-compose.test.yml\")\n shutil.copy(path, \"src/docker/docker-compose.test.yml\")\n self.info(\"[x] create docker-compose.test.yml\")", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n db.init_app(app)\n flask_bcrypt.init_app(app)\n jwt.init_app(app)\n\n with app.app_context():\n # Import Blueprints\n from .routes.users_route import users_bp\n from .routes.messages_route import messages_bp\n\n # REGISTER ROUTES\n app.register_blueprint(users_bp, url_prefix=\"/users\")\n app.register_blueprint(messages_bp, url_prefix=\"/messages\")\n\n\n return app", "def make_app() -> Flask:\n logger.info('creating flask application')\n app = Flask(\n 'pasta',\n static_url_path='/static',\n static_folder='./static',\n template_folder='./views')\n config.flask.SECRET_KEY = os.urandom(32)\n config.flask.SERVER_NAME = None\n app.config.from_mapping(config.flask)\n return app", "async def init_app():\n app = web.Application()\n\n # And... here our routes\n app.router.add_route(\n \"POST\", f\"/{ASTERISK_CALL_APP_ROUTE_ASTERISK_INIT}\", asterisk_init\n )\n app.router.add_route(\"POST\", f\"/{ASTERISK_CALL_APP_ROUTE_PLAY}\", asterisk_play)\n return app", "def setup(self, app_args):\n raise NotImplementedError", "def startapp(self):\n\n path = os.path.join(self.path, self.app_name)\n if os.path.exists(path):\n raise exceptions.AppDirectoryAlreadyExistsError(self.app_name)\n else:\n os.makedirs(path)\n\n context = {\n 'app_name': self.app_name,\n }\n\n self._clone_defaults(\n os.path.join(self.root, 'defaults', 'app_{}'.format(self.runtime)),\n path,\n context\n )", "def create_app():\n app = Flask(__name__)\n\n app.config.from_pyfile('../settings.py')\n\n app.register_blueprint(layout_bp, url_prefix='/layouts')\n app.register_blueprint(sheet_bp, url_prefix='/sheets')\n app.register_blueprint(user_bp, url_prefix='/users')\n\n db.init_app(app)\n ma.init_app(app)\n migrate.init_app(app)\n login_manager.init_app(app)\n\n return app", "def setUp_base(self):\n self._create_main_project_and_root()", "def create_app():\n logging.basicConfig(\n level=REANA_LOG_LEVEL,\n format=REANA_LOG_FORMAT\n )\n app = Flask(__name__)\n app.config.from_object('reana_server.config')\n app.secret_key = \"hyper secret key\"\n\n # Register API routes\n from .rest import ping, secrets, users, workflows # noqa\n app.register_blueprint(ping.blueprint, url_prefix='/api')\n app.register_blueprint(workflows.blueprint, url_prefix='/api')\n app.register_blueprint(users.blueprint, url_prefix='/api')\n app.register_blueprint(secrets.blueprint, url_prefix='/api')\n\n app.session = Session\n CORS(app)\n return app", "def main():\n\n if os.getuid() != 0:\n sys.stderr.write('{} must run as root\\n'.format(sys.argv[0]))\n sys.exit(1)\n\n Path(defaults.BASE_CONFIG_FILES_DIR).mkdir(exist_ok=True)\n\n tornado.options.parse_command_line()\n IOLoop().start(Application(), PORT)", "def make_velruse_app(global_conf, **settings):\n return make_app(**settings)", "def __init__(self, root):\n self.root = root\n self.app = Home(root, self)", "def _init_app(self):\n\n self._app = FastAPI(**self._app_kws)\n\n for rt, kwargs in self._app_routers:\n self._app.include_router(rt, **kwargs)\n\n self._app.dependency_overrides[get_dataset] = lambda: self._obj\n self._app.dependency_overrides[get_cache] = lambda: self.cache\n\n return self._app", "def _init_app():\n\n this_dir = os.path.dirname(os.path.abspath(__file__))\n css_file = os.path.join(this_dir, \"stylesheet.css\")\n app = dash.Dash(\n __name__,\n external_stylesheets=[css_file],\n suppress_callback_exceptions=True,\n )\n return app", "def main(global_config, **settings):\n zodb_uri = settings.get('zodb_uri')\n zcml_file = settings.get('configure_zcml', 'configure.zcml')\n if zodb_uri is None:\n raise ValueError(\"No 'zodb_uri' in application configuration.\")\n\n finder = PersistentApplicationFinder(zodb_uri, appmaker)\n def get_root(request):\n return finder(request.environ)\n config = Configurator(root_factory=get_root,\n autocommit=True,\n settings=settings,\n )\n config.include(pyramid_chameleon)\n config.include(pyramid_zcml)\n config.load_zcml(zcml_file)\n return config.make_wsgi_app()", "def create_app(self):\n self.setUpPyfakefs()\n self.fake_os = fake_filesystem.FakeOsModule(self.fs)\n\n populate_fakefs(self)\n\n app = MDFakeFSTestSite(\n \"MDWeb\",\n app_options={}\n )\n\n # Add the partials directory so we have access in the FakeFS\n self.fs.add_real_directory(app.config['PARTIALS_TEMPLATE_PATH'])\n \n app.start()\n\n return app", "def create_app(self):\n app.config.from_object('config.TestingConfig')\n return app", "def create_app():\n app = Flask(__name__)\n app.config.from_object('app.configs.config')\n app.config.from_object('app.configs.settings')\n return app", "def create_app():\n app = Flask(__name__)\n\n\n @app.route('/')\n def barebones():\n return 'the barebones'\n\n return app", "def make_app():\n return tornado.web.Application([\n (r'/', MainHandler),\n (r'/async', AsyncHandler),\n (r'/gen', GenHandler),\n ])", "def make_app():\n return tornado.web.Application([\n tornado.web.URLSpec(r\"/ws/\", WebSocket, name=\"websocket\"),\n tornado.web.URLSpec(r\"/\", StartPage, name='index'),\n (r\"/static/\", tornado.web.StaticFileHandler,\n dict(path=SETTINGS['static_path'])),\n ], **SETTINGS)", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include('clldmpg')\n config.registry.registerUtility(link_attrs, interfaces.ILinkAttrs)\n home_comp = config.registry.settings['home_comp']\n home_comp.append('software')\n home_comp.append('contribute')\n config.add_settings(home_comp=home_comp)\n config.add_route('software', '/software')\n config.add_route('contribute', '/contribute')\n return config.make_wsgi_app()", "def init_app():\n app = Flask(__name__)\n\n with app.app_context():\n # Import parts of our core Flask app\n from . import routes\n\n from .plotlydash.index import init_dashboard\n app = init_dashboard(app)\n\n return app", "def cmd_init(self, appname):\n if '/' in appname:\n print 'Slashes are not allowed in project names.'\n return errno.EINVAL\n if os.path.exists(appname):\n print 'Cannot initialize project \"%s\" over existing %s.' % \\\n (appname, 'directory' if os.path.isdir(appname) else 'file')\n return errno.EEXIST\n templates_dir = os.path.join(data_dir, 'templates', 'project')\n if 'virtualenv' in sys.modules:\n print 'Creating virtualenv...'\n virtualenv.create_environment(appname)\n os.mkdir(os.path.join(appname, 'src'))\n appdir = os.path.join(appname, 'src', appname)\n else:\n appdir = appname\n print 'Creating templeton app...'\n shutil.copytree(templates_dir, appdir)\n # special templatization for index.html\n html_dir = os.path.join(appdir, 'html')\n tmpl = tempita.Template.from_filename(os.path.join(html_dir,\n 'index.html.tmpl'))\n f = file(os.path.join(html_dir, 'index.html'), 'w')\n f.write(tmpl.substitute(appname=appname))\n f.close()\n os.unlink(os.path.join(html_dir, 'index.html.tmpl'))\n return 0", "def create_app():\n\n app = FastAPI()\n add_root_route(app)\n\n return app", "def test_build(self):\n self.app.build()", "def create_app():\n from server.web import create_app\n # If we do a static javascript app via flask, add it here\n # from server.web import create_app as create_web_app\n return create_app()", "async def init(self):\n self.base_tamplates = {}\n self.preparing_task = None\n self.app = aioweb.Application()\n self.runner = aioweb.AppRunner(self.app)", "def _init_fast_api_app(self):\n app = FastAPI(\n title=\"Opal Server\",\n description=\"OPAL is an administration layer for Open Policy Agent (OPA), detecting changes\" +\n \" to both policy and data and pushing live updates to your agents. The opal server creates\" +\n \" a pub/sub channel clients can subscribe to (i.e: acts as coordinator). The server also\" +\n \" tracks a git repository (via webhook) for updates to policy (or static data) and accepts\" +\n \" continuous data update notifications via REST api, which are then pushed to clients.\",\n version=\"0.1.0\",\n )\n configure_middleware(app)\n self._configure_api_routes(app)\n self._configure_lifecycle_callbacks(app)\n return app" ]
[ "0.7207223", "0.70791864", "0.6998072", "0.6950758", "0.6818118", "0.68160903", "0.67678857", "0.6719606", "0.66488445", "0.6626063", "0.65972114", "0.65353584", "0.65281093", "0.6521015", "0.64897716", "0.6488322", "0.6431467", "0.6392214", "0.6355824", "0.6346588", "0.6336478", "0.62719077", "0.6268405", "0.626456", "0.6262241", "0.62381554", "0.62315774", "0.622121", "0.6221168", "0.6212153", "0.61958426", "0.61925346", "0.61665386", "0.61614555", "0.6149484", "0.6142444", "0.61280966", "0.6127313", "0.6119369", "0.6118875", "0.61110556", "0.61042076", "0.6103736", "0.6096659", "0.60751534", "0.6074574", "0.6074271", "0.6069434", "0.60682034", "0.60651934", "0.6060527", "0.6059607", "0.6057732", "0.6054259", "0.6037253", "0.60363156", "0.6029382", "0.6028303", "0.60265875", "0.6026311", "0.6016289", "0.601379", "0.60106426", "0.6005141", "0.6000509", "0.59925634", "0.59880066", "0.5982526", "0.5975527", "0.59745616", "0.5972939", "0.59610236", "0.59607977", "0.5960262", "0.5958429", "0.5953046", "0.59510165", "0.5950355", "0.5948443", "0.5939455", "0.59379643", "0.59360975", "0.5932274", "0.5931751", "0.59288645", "0.59229934", "0.5914404", "0.5914313", "0.5905665", "0.59046054", "0.5902927", "0.5898563", "0.5890168", "0.58897567", "0.5880977", "0.588035", "0.58765966", "0.5867689", "0.5865035", "0.58581537" ]
0.59344965
82
Plots the graph. If the nodes have a position, the nodes will be placed there. Otherwise, they will be placed in a random but elegant manner.
def plot_graph(self) -> None:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()", "def plot_nodes(self, node_list):\n points = Marker()\n #visualizations points and lines..\n points.header.frame_id = \"map\"\n points.header.stamp = rospy.get_rostime()\n points.ns = \"markers\"\n points.id = 0\n points.type = points.POINTS\n points.action = points.ADD\n points.pose.orientation.w = 1.0\n points.scale.x = 2*self.rviz_tuning_plt\n points.scale.y = 2*self.rviz_tuning_plt\n points.color.r = 0.0\n points.color.g = 1.0\n points.color.b = 0.0\n points.color.a = 1.0\n points.lifetime = rospy.Duration()\n\n for node in node_list:\n p1 = Point()\n p1.x = node.x\n p1.y = node.y\n p1.z = 0.01\n points.points.append(p1)\n \n self.pub_nodes.publish(points)", "def draw_nodes(self):\n pass", "def plot_graph(self):\n g = self.get_graph()\n plt.title(\"Our graph:\" + g.__str__())\n plt.xlabel(\"X\")\n plt.ylabel(\"-<\") # I should flip 'Y' letter so I decided to write it by a tricky way. :)\n for src, node in g.get_all_v().items():\n # Print the node point\n if node.location is None:\n pos = self.get_random_location() # get a elegant location\n node.location = GeoLocation(pos)\n plt.plot(node.location.x, node.location.y, marker='o', markerfacecolor='red', markersize=3, color='yellow')\n plt.text(node.location.x, node.location.y, str(node.key))\n # Print the edge line\n for dest in g.all_out_edges_of_node(src).keys():\n x1 = g.get_all_v()[src].location.x\n y1 = g.get_all_v()[src].location.y\n if g.get_all_v()[dest].location is None:\n pos = self.get_random_location()\n g.get_all_v()[dest].location = GeoLocation(pos)\n g.get_all_v()[dest].location = GeoLocation(pos)\n x2 = g.get_all_v()[dest].location.x\n y2 = g.get_all_v()[dest].location.y\n plt.arrow(x1, y1, x2 - x1, y2 - y1, width=0.00001, linewidth=0.05)\n plt.show()", "def plot_nodes(self,x_shift,y_shift):\n\n if not self.nodes: return # Bounce if option not selected\n\n self.ax.scatter(self.node_crds[:,0]+x_shift*self.pbc[0],self.node_crds[:,1]+y_shift*self.pbc[1],\n marker=\"o\",s=self.ms,c=self.mc,zorder=1)\n\n # for i,c in enumerate(self.node_crds):\n # self.ax.text(c[0],c[1],i,size=8)", "def plot(self):\n layout = self.graph.layout(\"kk\")\n bbox = igraph.BoundingBox(600, 600)\n figure = igraph.Plot(bbox=bbox, background=\"white\")\n bbox = bbox.contract(100)\n figure.add(self.graph, layout = layout, bbox=bbox)\n figure.show()", "def plot_graph(self):\n plt.axis(\"off\")\n pos = nx.kamada_kawai_layout(self.graph)\n return nx.draw_networkx(self.graph, pos=pos, node_size=400)", "def draw(self):\n\t\tnx_graph = self.parse_graph()\n\t\tpos = nx.spring_layout(nx_graph, k=0.15, iterations=20) # to spread out the nodes\n\n\t\tnx.draw(nx_graph, pos, edge_color=\"black\", width=1, linewidths=1, node_size=500, node_color=\"pink\", alpha=0.9, with_labels=True)\n\n\t\tedge_labels = {(edge[0], edge[1]):edge[2] for edge in self.edges}\n\t\tnx.draw_networkx_edge_labels(nx_graph, pos, edge_labels=edge_labels, font_color='red')\n\n\t\tplt.show()", "def draw_graph(graph, node_positions):\n nx.draw_networkx_nodes(graph, node_positions, node_color=set_colors(graph),\n node_size=50)\n nx.draw_networkx_edges(graph, node_positions, width=0.3, alpha=0.5)", "def plot_graph(self, input_graph, NX_GRAPHS):\n self.dgl_graph = input_graph\n self.NX_GRAPHS = NX_GRAPHS\n \n self.get_nodes()\n color_monomer = self.get_colors()\n \n print(dict(zip(range(len(self.nodes_list)), self.nodes_list)))\n print('Key Monomer is', self.nodes_list[np.argmax(self.node_weights)])\n \n fig, ax = plt.subplots()\n nx.draw_networkx(\n dgl.to_networkx(self.dgl_graph),\n arrows=False,\n node_size = 300*10**self.node_weights,\n node_color = [color_monomer[node] for node in self.nodes_list],\n font_size = 18,\n font_color = 'w',\n font_weight = 'bold',)\n\n plt.axis('off')\n ax.set_xlim([1.2*x for x in ax.get_xlim()])\n ax.set_ylim([1.2*y for y in ax.get_ylim()])\n plt.show()", "def paint(self):\n x = []\n y = []\n plt.figure(figsize=(10, 5), facecolor=\"silver\")\n ax = plt.axes()\n for node in self.graph.nodes.values():\n x.append(node.get_pos()[0])\n y.append(node.get_pos()[1])\n ax.scatter(x, y, color=\"black\", s=50)\n xl = ax.get_xlim()[1] - ax.get_xlim()[0]\n yl = ax.get_ylim()[1] - ax.get_ylim()[0]\n for nd in self.graph.nodes.values():\n for ed in self.graph.all_out_edges_of_node(Node.get_key(nd)).keys():\n desti: Node = self.graph.get_node(ed)\n destx = desti.get_pos()[0] - nd.get_pos()[0]\n desty = desti.get_pos()[1] - nd.get_pos()[1]\n ax.arrow(nd.get_pos()[0], nd.get_pos()[1], destx, desty, head_width=xl * 0.007,\n length_includes_head=True,\n head_length=yl * 0.02, width=xl * 0.0001 * yl, color='grey')\n plt.title(\"Your graph!\")\n plt.show()", "def draw_graph(self):\r\n G=nx.Graph()\r\n \r\n list_location1 = []\r\n list_location2 = []\r\n list_location3 = []\r\n list_location4 = []\r\n \r\n for citizen in self.citizens:\r\n G.add_node(citizen.id)\r\n if citizen.location == 1:\r\n list_location1.append(citizen.id)\r\n elif citizen.location == 2:\r\n list_location2.append(citizen.id)\r\n elif citizen.location == 3:\r\n list_location3.append(citizen.id)\r\n else: \r\n list_location4.append(citizen.id)\r\n\r\n for citizen in self.citizens:\r\n for friend in citizen.friends:\r\n G.add_edge(citizen.id,friend.id)\r\n\r\n pos = nx.random_layout(G)\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location1, node_color='r')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location2, node_color='g')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location3, node_color='b')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location4, node_color='y')\r\n nx.draw_networkx_edges(G,pos, width=1)\r\n\r\n plt.show()", "def plot_graph(self, graph, subplot=False, axes=None):\n if subplot:\n plt.sca(axes[1, 1])\n axes[1, 1].axis('off')\n else:\n plt.figure(figsize=(5, 5))\n if len(graph.nodes) == 4:\n pos = {(0, 0): [0, 1], (0, 1): [1, 1], (1, 0): [0, 0], (1, 1): [1, 0]}\n else:\n pos = nx.circular_layout(graph)\n nx.draw_networkx_nodes(\n graph, pos, node_size=1800, node_color='w', edgecolors='k')\n nx.draw_networkx_edges(\n graph,\n pos,\n node_size=1800,\n edge_color='k',\n arrowstyle='->',\n arrowsize=10,\n width=3)\n nx.draw_networkx_labels(self.G, pos, {x: x for x in self.V}, font_size=14)", "def plot_network(genome):\n g = genome.n\n # width = g.graph[\"size\"]\n # height = g.graph[\"size\"]\n\n # fig = plt.figure(figsize=(width,height))\n fig = plt.figure()\n fig.patch.set_facecolor('white')\n ax = fig.add_subplot(111, aspect='equal')\n # ax.set_axis_off()\n\n # collision_coords = find_collisions(genome)\n # das_coords = find_das_extended(genome)\n # slp_coords = find_slp(genome)\n slp_nodes = find_attacker_path(genome.n)\n\n # Plot the parent-child tree\n for n in g.nodes_iter():\n if g.node[n][\"parent\"] is not None:\n _line(g.node[n][\"coord\"], g.node[g.node[n][\"parent\"]][\"coord\"], zorder=0, color='k')\n\n for n in g.nodes_iter():\n coord = g.node[n][\"coord\"]\n shape = _circles\n colour = 'b'\n s = 0.4\n if n in slp_nodes:\n shape = _hexagons\n colour = 'y'\n s = 0.45\n if n == g.graph[\"source\"]:\n shape = _squares\n colour = 'g'\n if n == g.graph[\"sink\"]:\n shape = _octogons\n colour = 'k'\n s = 0.45\n shape(coord[0], coord[1], s, fc=\"white\", ec=colour)\n if(len(str(g.node[n][\"slot\"])) == 1):\n ax.text(coord[0]-0.15, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 2):\n ax.text(coord[0]-0.25, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 3):\n ax.text(coord[0]-0.4, coord[1]+0.15, str(g.node[n][\"slot\"]))\n else:\n ax.text(coord[0]-0.5, coord[1]+0.15, str(g.node[n][\"slot\"]))\n\n\n plt.gca().invert_yaxis()\n fig.show()", "def plotGraph(self, title = \"Multi Layer Perceptron (MLP)\"):\n graph, pos, colorMap = self.getGraph()\n\n fig = plt.figure()\n fig.canvas.set_window_title(\"Neural Network\")\n plt.plot()\n nx.draw_networkx_nodes(graph,pos, node_color = colorMap)\n nx.draw_networkx_edges(graph,pos)\n plt.axis('off')\n plt.title(title)\n #plt.savefig(\"autoencoder.svg\", transparent = True)\n plt.show()", "def plot_graph():\n name = request.args.get('instance')\n name = str(name)\n distance = request.args.get('distance')\n path = request.args.get('path')\n if name == 'Custom':\n coords = request.args.get('coords')\n coords = str(coords)\n nodes = custom_nodes(coords)\n else:\n nodes = create_nodes(name)\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n\n axis.set_title(name + \" - Distance: \"+ str(distance))\n path = str(path).split(',')\n path = [int(i) for i in path]\n for i in range(len(path) - 1):\n\n start_node = nodes[path[i]]\n x1, y1 = start_node.x, start_node.y\n axis.scatter(x1, y1, c = 'b', label = str(path[i]))\n axis.text(x1,y1, str(path[i]))\n end_node = nodes[path[i+1]]\n x2, y2 = end_node.x, end_node.y\n axis.plot([x1,x2], [y1, y2])\n\n last_node = nodes[path[len(path)-1]]\n x1, y1 = last_node.x, last_node.y\n axis.text(x1,y1, str(path[len(path)-1]))\n\n begin_node = nodes[path[0]]\n x2, y2 = begin_node.x, begin_node.y\n axis.scatter(x1, y1, c = 'b', label = str(path[len(path)-1]))\n axis.plot([x1,x2], [y1, y2])\n\n output = io.BytesIO()\n FigureCanvas(fig).print_png(output)\n return Response(output.getvalue(), mimetype=\"image/png\")", "def plot_nodes(self,ax=None,mask=None,values=None,sizes=20,labeler=None,clip=None,\n **kwargs):\n ax=ax or plt.gca()\n \n if mask is None:\n mask=~self.nodes['deleted']\n\n if clip is not None: # convert clip to mask\n mask=mask & self.node_clip_mask(clip)\n\n if values is not None:\n values=values[mask]\n kwargs['c']=values\n\n if labeler is not None:\n if labeler=='id':\n labeler=lambda n,rec: str(n)\n \n # weirdness to account for mask being indices vs. bitmask\n for n in np.arange(self.Nnodes())[mask]: # np.nonzero(mask)[0]:\n ax.text(self.nodes['x'][n,0],\n self.nodes['x'][n,1],\n labeler(n,self.nodes[n]))\n\n coll=ax.scatter(self.nodes['x'][mask][:,0],\n self.nodes['x'][mask][:,1],\n sizes,\n **kwargs)\n request_square(ax)\n return coll", "def draw_points():\n\n for node in self._nodes:\n\n x = node_properties[\"node_x\"][node]\n y = node_properties[\"node_y\"][node]\n ax.scatter(\n x,\n y,\n zorder=10,\n edgecolors=\"k\",\n linewidths=0.5,\n **self.get_node_data(node),\n )\n\n for label in self._nodes:\n\n x = node_properties[\"label_x\"][label]\n y = node_properties[\"label_y\"][label]\n rotation = node_properties[\"rotation\"][label]\n ha = node_properties[\"ha\"][label]\n\n attr = {**dict(backgroundcolor=\"white\"), **text_attr}\n ax.text(\n x,\n y,\n textwrap.shorten(text=label, width=TEXTLEN),\n rotation=rotation,\n ha=ha,\n va=\"center\",\n rotation_mode=\"anchor\",\n bbox=dict(\n facecolor=\"w\",\n alpha=1.0,\n edgecolor=\"gray\",\n boxstyle=\"round,pad=0.5\",\n ),\n zorder=11,\n **attr,\n )", "def create_graph(self):\n robot_pix = int(math.ceil(self.robot.size / self.resolution))\n ii = 0\n jj = 0\n for i in range(0, self.height, robot_pix):\n jj = 0\n for j in range(0, self.width, robot_pix):\n block = self.occ_grid[i:i+robot_pix, j:j+robot_pix].flatten()\n avg = np.mean(block)\n robot_block = self.tesselation_image[i:i+robot_pix, j:j+robot_pix].flatten()\n n_occur = np.bincount(robot_block)\n block_id = np.argmax(n_occur)\n \n p = Pose()\n p.position.x = self.resolution * j + self.resolution / 2.0 + self.origin.position.x\n p.position.y = self.height * self.resolution - (self.resolution * i + self.resolution / 2.0) + self.origin.position.y\n node = Node(ii, jj, p)\n idx = np.where(block > 20)\n if block_id == self.robot.robot_id:\n if 0 <= avg <= 20:\n print(\"Node in path\", node)\n node.valid = True\n else:\n node.valid = False\n elif block_id == 0:\n node.valid = False\n else:\n node.belongs = False\n self.nodes[ii,jj] = node\n jj += 1\n ii += 1\n\n\n height, width = self.nodes.shape\n print(\"Node shape: \", self.nodes.shape)\n for i in range(height):\n for j in range(width):\n min_i = max(0, i-1)\n max_i = min(height - 1, i+1) + 1\n min_j = max(0, j-1)\n max_j = min(width - 1, j+1) + 1\n\n node = self.nodes[i,j]\n neighbors = self.nodes[min_i:max_i, min_j:max_j].flatten()\n for n in neighbors:\n if not n or not node:\n print(\"None %d-%d\"%(i,j))\n continue\n if n != node:\n if n.valid:\n print(\"Neighbor appended\")\n self.nodes[i,j].neighbors.append(n)\n else:\n self.nodes[i,j].obstacle_neighbors.append(n)\n print(\"Graph is created!\")", "def plot_nodes(self, filename, **kwargs):\n\n g = graph.create_nx_graph(self.es, filename=filename, **kwargs)\n\n return g", "def fullgraphplot(time_lower,time_upper):\n\n edges_list,node_list,title_list = graphformation(time_lower,time_upper)\n node_size = []\n for i in range(len(node_list)):\n node_size.append(5)\n g = Network(\n height=\"750px\",\n width=\"100%\",\n bgcolor=\"#222222\",\n font_color=\"white\")\n g.add_nodes(node_list,label=node_list,title=title_list, size= node_size)\n g.add_edges(edges_list)\n g.show(\"nx.html\")\n return", "def _draw_nodes(ax: mpl.axes.Subplot, graph: nx.classes.Graph,\n pos: dict, draw_labels: False) -> dict:\n degree = np.array([deg for node, deg in graph.degree], dtype=float)\n degree /= degree.sum()\n\n flare_kwargs = {'alpha' : 0.2,\n 'edgecolor': (0, 0, 0, 1),\n 'facecolor': None}\n\n node_kwargs = {'alpha' : 0.8,\n 'edgecolor': (0, 0, 0, 1),\n 'facecolor': None}\n\n nodes = {}\n node_params = zip(pos.items())\n\n for i, (label, xy) in enumerate(pos.items()):\n size = graph.nodes[label]['size']\n fsize = graph.nodes[label]['fsize']\n flare_kwargs['facecolor'] = 'C{}'.format(i)\n flare = patches.Circle(xy, fsize, **flare_kwargs)\n\n node_kwargs['facecolor'] = 'C{}'.format(i)\n node = patches.Circle(xy, size, **node_kwargs)\n\n ax.add_patch(flare)\n ax.add_patch(node)\n if draw_labels:\n font_style = {'size':15, 'weight':'bold'}\n text_kwargs = {'color': (0, 0, 0, .8),\n 'verticalalignment': 'center',\n 'horizontalalignment': 'center',\n 'fontdict': font_style}\n ax.text(*xy, i+1, **text_kwargs)\n\n nodes[label] = node\n return nodes", "def display_graph(variables, relations):\n graph = as_networkx_graph(variables, relations)\n\n # Do not crash if matplotlib is not installed\n try:\n import matplotlib.pyplot as plt\n\n nx.draw_networkx(graph, with_labels=True)\n # nx.draw_random(graph)\n # nx.draw_circular(graph)\n # nx.draw_spectral(graph)\n plt.show()\n except ImportError:\n print(\"ERROR: cannot display graph, matplotlib is not installed\")", "def plot_graph(G):\r\n pos = nx.random_layout(G)\r\n nx.draw(G, pos)\r\n edge_labels = dict([((u, v, ), d['label']) for u, v, d in\r\n G.edges(data=True)])\r\n nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)\r\n nx.draw_networkx_labels(G, pos, labels={i:i for i in G.nodes()},\r\n font_size=16)\r\n plt.show()", "def drawGraph(G, novel_title):\n # Drawing with network x\n page_rank = nx.pagerank(G)\n \n pos = nx.nx_pydot.graphviz_layout(G)\n plt.figure(figsize=(15,10))\n\n font = {'fontsize' : 14}\n plt.title('Character Network for: ' + novel_title, font)\n \n label_pos = {}\n for i in pos:\n label_pos[i] = (pos[i][0] , pos[i][1] - (math.exp(page_rank[i]) * 12))\n \n labels = nx.draw_networkx_labels(G, label_pos, font_weight = 'bold', font_size = 9)\n nodes = nx.draw_networkx_nodes(G, pos, \n node_size = [2000 * page_rank[i] for i in list(nx.nodes(G))],\n node_color = range(len(nx.pagerank(G))),\n cmap = plt.cm.Spectral)\n \n nodes.set_edgecolor('black')\n \n nx.draw_networkx_edges(G, pos, edge_color = 'grey', alpha = .70)\n plt.axis('off')\n plt.savefig('test.png')\n plt.show()", "def show_plot(self):\r\n\t\tself.generate_plot()\r\n\t\tplt.show()", "def showGraph(self):\r\n self.graph_button['state'] = 'disabled'\r\n # Draw connection Graph\r\n self.axGraph.set_visible(True)\r\n nx.draw(self.G, ax=self.axGraph, with_labels=True)\r\n self.canvasPlot.draw()\r\n self.canvasPlot.flush_events()", "def visualize(G, color=None, figsize=(5, 5)):\n plt.figure(figsize=figsize)\n plt.xticks([])\n plt.yticks([])\n nx.draw_networkx(G,\n pos=nx.spring_layout(G, seed=42),\n with_labels=True,\n node_color=color,\n cmap=\"Set2\")\n plt.show();", "def plot_edges(self, node_list):\n tree = MarkerArray()\n id = 1\n for node in self.node_list:\n if node.parent:\n # edge between nodes\n path = Marker()\n path.header.frame_id = \"map\"\n path.header.stamp = rospy.get_rostime()\n path.ns = \"markers\"\n path.id = id\n id += 1\n path.type = path.LINE_STRIP\n path.action = path.ADD\n path.scale.x = self.rviz_tuning_plt\n path.color.a = 1.0\n\n path.color.r = 1.0\n path.color.g = 0.7\n path.color.b = 0.0\n\n path.lifetime = rospy.Duration()\n path.pose.orientation.w = 1.0\n\n p1 = Point()\n p1.x = node.parent.x\n p1.y = node.parent.y\n p1.z = 0.02\n path.points.append(p1)\n\n p2 = Point()\n p2.x = node.x\n p2.y = node.y\n p2.z = 0.02\n path.points.append(p2)\n \n tree.markers.append(path)\n\n self.pub_edges.publish(tree)", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])", "def plot(df, ax, myself, names):\n\n df = df.sort_values(by=\"time\", ascending=True)\n offset = df.iloc[0][\"time\"]\n\n nodes = {}\n for name in names:\n nodes[name] = {\n \"master\": [],\n \"observer\": []\n }\n\n for (_id, row) in df[df[\"type\"] != \"R\"].iterrows():\n if row[\"type\"] == \"M\":\n time = row[\"time\"]\n target = row[\"args\"]\n for (name, blocks) in nodes.items():\n if name == target:\n close_block(blocks[\"observer\"], time)\n open_block(blocks[\"master\"], time)\n else:\n open_block(blocks[\"observer\"], time)\n elif row[\"type\"] == \"T\":\n time = row[\"time\"]\n target = row[\"args\"]\n blocks = nodes[target]\n close_block(blocks[\"master\"], time)\n open_block(blocks[\"observer\"], time)\n elif row[\"type\"] == \"F\":\n time = row[\"time\"]\n for blocks in nodes.values():\n close_block(blocks[\"master\"], time)\n close_block(blocks[\"observer\"], time)\n\n for (index, blocks) in enumerate(nodes.values()):\n plot_blocks(ax, index, blocks[\"master\"], offset, \"tab:blue\")\n plot_blocks(ax, index, blocks[\"observer\"], offset, \"tab:orange\")\n\n x_ticks = range(0, 10)\n y_ticks = [10, 20, 30, 40, 50]\n\n ax.title.set_text(\"View of node: {0}\".format(myself))\n ax.set_xlabel(\"seconds since start\")\n ax.set_xticks(x_ticks)\n ax.set_yticks(y_ticks)\n ax.set_yticklabels(names)\n ax.grid(True)\n\n # Add annotations:\n\n index = list(nodes.keys()).index(myself)\n for (_id, row) in df[df[\"type\"] == \"R\"].iterrows():\n x = (row[\"time\"] - offset).total_seconds()\n y = y_ticks[index]\n ax.annotate(\n \"Round {0}\".format(row[\"args\"]),\n xycoords=\"data\",\n xy=(x, y),\n xytext=(x, y + 5),\n arrowprops=dict(\n facecolor=\"black\",\n shrink=0.05\n )\n )", "def set_node_positions(self):", "def visualize_dependency_graph(self):\n if self.graph is None:\n self.logger.error(\"Graph value none cannot be plotted\")\n return\n\n nx.draw(self.graph, cmap=plt.get_cmap('jet'), with_labels=True)\n plt.show()", "def plot_graphy_resilience_random():\n \n global counter\n counter += 1\n random_graph = make_random_undirected_graph(1239, 0.004)\n attack_order = random_order(random_graph)\n random_resilience = compute_resilience(random_graph, attack_order)\n plt.plot(range(len(random_resilience)), random_resilience, '-b', label= 'random, p =0.004')\n \n synthetic_undirected_graph = make_synthetic_undirected_graph(1239, 5)\n attack_order = random_order(synthetic_undirected_graph)\n synthetic_resilience = compute_resilience(synthetic_undirected_graph, attack_order)\n plt.plot(range(len(synthetic_resilience)), synthetic_resilience, '-r', label = 'UPA, m = 5')\n \n network_graph = load_graph(NETWORK_URL)\n attack_order = random_order(network_graph)\n network_resilience = compute_resilience(network_graph, attack_order)\n plt.plot(range(len(network_resilience)), network_resilience, '-g', label = 'Network')\n \n plt.legend(loc='upper right')\n \n plt.title(\" plot of graph resilience\")\n plt.xlabel(\"number of nodes removed\")\n plt.ylabel(\"the size of the largest connect component \")\n plt.savefig(\"graph_resilience_\"+str(counter)+\".png\", dpi = 72)\n plt.gcf().clear() # hose-keeping", "def plot(nodes=None, fig: Optional[plt.Figure] = None, ax=None, view: str = 'L', edge_weights=None, frames=None, edges=None, template=None, network=None,\n edge_color='k', node_size=1, node_color='salmon', node_type='circles', hemisphere='both', highlight_nodes=None, highlight_edges=None, **kwargs):\n # Load default settings, then update with kwargs\n profile = _load_profile(**kwargs)\n if network is not None:\n if nodes is not None or edges is not None:\n raise ValueError('Network keyword arugment is specified along with edges or nodes.')\n elif isinstance(network, nx.Graph):\n nodes, edges, = _from_networkx_input(network, **profile)\n else:\n raise ValueError('Unnown netowrk input')\n\n # Check and load the input of nodes and edges\n nodes, nodeimg, node_colorby, profile['node_columnnames'] = _process_node_input(\n nodes, profile['nodes_df'], node_color, profile['node_columnnames'], template, profile['template_voxelsize'])\n edges, edge_weights = _process_edge_input(edges, edge_weights, **profile)\n # Set up legend row\n # TODO compact code into subfunction\n legends = None\n legendrows = 0\n if isinstance(profile['showlegend'], list):\n legends = profile['showlegend']\n legendrows = len(legends)\n elif profile['showlegend'] is True:\n # Only plot size legend is sphere/circle and string or list input\n # TODO setup_legend is a little clunky and could be fixed\n if node_type != 'parcel' and not isinstance(node_size, (float, int)):\n node_sizelegend = profile['node_sizelegend']\n legends = _setup_legend(\n node_size, node_sizelegend, 'node_size', legends)\n # Only plot color legend if colorby\n if node_colorby is not None:\n node_colorlegend = profile['node_colorlegend']\n legends = _setup_legend(\n node_colorby, node_colorlegend, 'node_color', legends)\n if legends is not None:\n legendrows = len(legends)\n\n # Figure setup\n # Get preset views\n if isinstance(view, str):\n if view.startswith('preset'):\n view, hemisphere = _get_presetviews(view)\n # Get number of non-legend rowsnon\n nrows, view, frames = _nrows_in_fig(view, frames)\n\n # if neither title nor subtitles are set, only view name(s) is/are shown\n if profile['subtitles'] == 'auto' and profile['title'] == 'auto':\n profile['subtitles'] = 'auto'\n profile['title'] = None\n # if title is set to None, nothing is shown (view name(s) is/are removed)\n elif profile['title'] is None and profile['subtitles'] == 'auto':\n profile['subtitles'] = None\n\n if type(profile['subtitles']) is list:\n if len(profile['subtitles']) != frames*nrows:\n raise ValueError('Length subtitles must be equal to number of sub-plots')\n\n # Init figure, if not given as input\n if ax is None:\n fig, gridspec = _init_figure(frames, nrows, legendrows)\n else:\n expected_ax_len = (nrows * frames)\n ax, gridspec = _check_axinput(ax, expected_ax_len)\n\n # Set node_color to colorby argument\n if node_colorby is not None:\n node_color = _get_colorby_colors(nodes, node_colorby, **profile)\n if isinstance(edge_color, str) and edges is not None:\n if edge_color in edges:\n edge_color = _get_colorby_colors(edges, edge_color, 'edge', **profile)\n if highlight_nodes is not None and highlight_edges is not None:\n raise ValueError('Cannot highlight based on edges and nodes at the same time.')\n if highlight_nodes is not None:\n node_color, highlight_nodes, profile['node_alpha'] = _highlight_nodes(\n nodes, node_color, highlight_nodes, **profile)\n\n if highlight_edges is not None:\n edges, highlight_edges = _process_highlightedge_input(edges, highlight_edges, **profile)\n edge_color, highlight_edges, profile['edge_alpha'] = _highlight_edges(edges, edge_color, highlight_edges, **profile)\n # Get the nodes that are touched by highlighted edges\n nodes_to_highlight = edges[highlight_edges == 1]\n nodes_to_highlight = np.unique(nodes_to_highlight[profile['edge_columnnames']].values)\n node_color, highlight_nodes, profile['node_alpha'] = _highlight_nodes(\n nodes, node_color, nodes_to_highlight, **profile)\n\n # Rename ax as ax_in and prespecfiy ax_out before forloop\n ax_in = ax\n ax_out = []\n scaled_nodes = False\n # TODO remove double forloop and make single forloop by running over nrows and frames\n # TODO add test for single image across frames and copy axis for speed.\n for ri in range(nrows):\n # Get the azim, elev and arrowaxis for each row\n azim, elev, arrowaxis_row, viewtype = _get_view(\n view[ri], frames, arrowaxis=profile['arrowaxis'])\n for fi in range(frames):\n axind = (ri * nrows) + fi\n # get_frame_input allows input arguments to be string or list of different arguments for different plots\n hemi_frame = get_frame_input(hemisphere, axind, ri, fi, nrows, frames)\n subtitle_frame = get_frame_input(profile['subtitles'], axind, ri, fi, nrows, frames)\n template_style_frame = get_frame_input(profile['template_style'], axind, ri, fi, nrows, frames)\n # Set up subplot\n if ax_in is None:\n # Dont use 3d projection for connectivity matrices\n if viewtype[fi] == 'c':\n ax = fig.add_subplot(gridspec[ri, fi])\n else:\n ax = fig.add_subplot(gridspec[ri, fi], projection='3d')\n elif isinstance(ax_in, list):\n # here ax can only be a 1d list, not 2d list.\n ax = ax_in[axind]\n else:\n ax = ax_in\n affine = None\n if template is not None and viewtype[fi]=='b':\n affine = _plot_template(ax, template_style_frame, template,\n hemisphere=hemi_frame,\n azim=azim[fi], elev=elev[fi],\n **profile)\n\n # Template voxels will have origin at 0,0,0\n # It is easier to scale the nodes from the image affine\n # Then to rescale the ax.voxels function\n # So if affine is not None, nodes get scaled in relation to origin and voxelsize,\n # If node coords are derived from nodeimg, this has already been taken care of.\n if nodes is not None and nodeimg is None and viewtype[fi]=='b' and scaled_nodes == False:\n nodes = _scale_nodes(nodes, profile['node_columnnames'], affine)\n scaled_nodes = True\n # nodes and subplot may change for each frame/subplot\n # e.g. if hemisphere is specified\n nodes_frame = None\n if nodes is not None and viewtype[fi]=='b':\n nodes_frame = nodes.copy()\n nodes_frame = _select_single_hemisphere_nodes(\n nodes_frame, profile['node_columnnames'][0], affine, hemi_frame)\n\n if node_type == 'spheres':\n _plot_spheres(ax, nodes_frame, node_color=node_color,\n node_size=node_size, **profile)\n elif node_type == 'circles':\n _plot_nodes(ax, nodes_frame, node_color=node_color,\n node_size=node_size, **profile)\n elif node_type == 'parcels':\n _plot_parcels(ax, nodeimg, cmap=node_color,\n hemisphere=hemi_frame, **profile)\n if edges is not None and viewtype[fi]=='b':\n edges_frame = edges.copy()\n _plot_edges(ax, nodes_frame, edges_frame, edgewidth=edge_weights,\n edge_color=edge_color, highlight_nodes=highlight_nodes, **profile)\n if arrowaxis_row is not None and viewtype[fi]=='b':\n _add_axis_arrows(ax, dims=arrowaxis_row,\n origin=profile['arroworigin'],\n azim=azim[fi], elev=elev[fi], **profile)\n if viewtype[fi] == 's' and nodes is not None and edges is not None:\n _plot_springlayout(ax, nodes=nodes, edges=edges, node_color=node_color, node_size=node_size,\n edge_color=edge_color, edge_weights=edge_weights, highlight_nodes=highlight_nodes, **profile)\n if viewtype[fi] == 'c' and edges is not None:\n _plot_connectivitymatrix(ax, edges=edges, nodes=nodes, node_color=node_color, node_colorby=node_colorby, **profile)\n # Set view angle for 3d projections\n if viewtype[fi] != 'c':\n ax.view_init(azim=azim[fi], elev=elev[fi])\n\n _add_subplot_title(ax, azim[fi], elev[fi], subtitle_frame, hemi_frame, viewtype[fi], **profile)\n _add_title(fig, **profile)\n\n if viewtype[fi] != 'c':\n # Fix the aspect ratio\n ax.set_box_aspect([1, 1, 1])\n _set_axes_equal(ax)\n ax.axis('off')\n # Append ax to ax_out to store it.\n ax_out.append(ax)\n\n # Add legends to plot\n if legends is not None and profile['gif'] is False:\n for li, legend in enumerate(legends):\n # setup legend subplot. Goes in centre or centre2 subplots\n spind = gridspec.ncols\n legend_span = profile['legend_span']\n if legend_span is not None:\n if legend_span is int:\n legend_subplotp_colind = legend_span\n else:\n legend_subplotp_colind= slice(legend_span[0], legend_span[1])\n elif np.remainder(spind, 2) == 0:\n # if number of columns is even, center it over the middle two columns\n # by using slice() on the GridSpec.\n legend_subplotp_colind = slice(int((spind / 2) - 1), int(spind / 2) + 1)\n else:\n legend_subplotp_colind = int(np.round(spind / 2) - 1)\n ax = fig.add_subplot(gridspec[nrows + li, legend_subplotp_colind])\n if legend == 'node_size':\n ax = _add_node_size_legend(ax, nodes, node_size, **profile)\n if legend == 'node_color':\n ax = _add_node_color_legend(\n ax, nodes, node_colorby, node_color, **profile)\n ax.axis('off')\n #ax = _add_size_legend(ax, nodes, node_size, node_scale)\n ax_out.append(ax)\n\n # Title on top of the figure\n if profile['title'] is not None:\n _add_title(fig, **profile)\n\n fig.tight_layout()\n\n # If gif is requested, create the gif.\n if profile['gif'] is True:\n _plot_gif(fig, ax_out, profile['gif_duration'], profile['savename'], profile['gif_loop'])\n # Save figure if set\n elif profile['savename'] is not None:\n if profile['savename'].endswith('.png'):\n fig.savefig(profile['savename'], dpi=profile['fig_dpi'])\n elif profile['savename'].endswith('.svg'):\n fig.savefig(profile['savename'], dpi=profile['fig_dpi'])\n else:\n fig.savefig(profile['savename'] + '.png', dpi=profile['fig_dpi'])\n fig.savefig(profile['savename'] + '.svg', dpi=profile['fig_dpi'])\n\n return (fig, ax_out)", "def _plot_robot(self):\n try:\n x = 200\n y = 200\n self.ax1.plot(x, y, marker='o', markersize=10, linestyle='None')\n except Exception as err:\n rospy.loginfo(err)", "def draw_graph(self, node_size=2000, node_color='yellow', edge_color='red'):\n G, node_label_dict = self.make_graph()\n edge_label_dict = {(c.source_name, c.target_name):(c.params.kernel_size) for c in self.layers}\n plt.figure(figsize=(12,12))\n pos = nx.nx_pydot.graphviz_layout(G, prog='dot')\n nx.draw(G, pos, node_size=node_size, node_color=node_color, edge_color=edge_color,alpha=0.4)\n nx.draw_networkx_labels(G, pos, node_label_dict, font_size=10,font_weight=640, alpha=0.7, font_color='black')\n nx.draw_networkx_edge_labels(G, pos, edge_label_dict, font_size=20, font_weight=640,alpha=0.7, font_color='red')\n plt.show()", "def plot(self, **kwds):\n c0 = 'blue' # self.latex_options()[\"color_increasing\"]\n c1 = 'red' # self.latex_options()[\"color_decreasing\"]\n G = self.poset().hasse_diagram()\n G.set_pos(self._find_node_positions())\n for a, b, c in G.edges():\n if a < b:\n G.set_edge_label(a, b, 0)\n else:\n G.set_edge_label(a, b, 1)\n return G.plot(color_by_label={0: c0, 1: c1}, **kwds)", "def draw_graph_default(graph):\r\n\r\n nx.draw_networkx(graph, with_labels=True)\r\n plt.show()", "def print(self):\n # it would be nice just to add one point instead of printing all again from scratch\n stones_player_0 = [(i, j) for i in range(self.size) for j in range(self.size) if self.board[i, j] == -1]\n stones_player_1 = [(i, j) for i in range(self.size) for j in range(self.size) if self.board[i, j] == 1]\n plt.plot([0, self.size-1, 0, self.size-1], [0, 0, self.size-1, self.size-1], marker='x', ls='')\n plt.plot(*zip(*stones_player_0), marker='o', color='r', ls='')\n plt.plot(*zip(*stones_player_1), marker='o', color='b', ls='')\n\n plt.draw()\n plt.show(block=False)", "def visualise_graph_on_circle(self, save_to_file, file_name) -> None:\n nodes_number = len(self.adjacency_matrix)\n phi = 2 * math.pi / nodes_number\n # estimate graph radius\n graph_radius = nodes_number * 1.5\n\n nodes = []\n\n for node in range(nodes_number):\n nodes.insert(node, (math.cos(phi * node) * graph_radius, math.sin(phi * node) * graph_radius))\n\n plt.close()\n figure, axes = plt.subplots()\n axes.set_aspect(1)\n figure.set_size_inches(8, 8)\n\n for i in range(len(self.adjacency_matrix)):\n for j in range(len(self.adjacency_matrix[0])):\n if self.adjacency_matrix[i][j] == 1:\n (x, y) = nodes[i]\n (x2, y2) = nodes[j]\n plt.plot([x / 15 + 0.5, x2 / 15 + 0.5], [y / 15 + 0.5, y2 / 15 + 0.5], 'r-', linewidth=2, zorder=1)\n\n i = 0\n for node in nodes:\n (x, y) = node\n i += 1\n circle_border = plt.Circle((x / 15 + 0.5, y / 15 + 0.5), radius=0.07 * nodes_number / 10, color='black',\n zorder=2)\n circle = plt.Circle((x / 15 + 0.5, y / 15 + 0.5), radius=0.06 * nodes_number / 10, color='green', zorder=3)\n axes.add_patch(circle_border)\n axes.add_patch(circle)\n if nodes_number <= 20:\n font_size = 16\n else:\n font_size = 20\n axes.annotate(i, xy=(x / 15 + 0.5, y / 15 + 0.5), fontsize=font_size, color='white',\n verticalalignment='center', horizontalalignment='center')\n\n plt.axis(\"off\")\n axes.set_aspect('equal')\n\n if save_to_file:\n plt.rcParams['savefig.format'] = 'png'\n plt.savefig(\"data/\" + file_name)\n else:\n plt.show()", "def plot_nodes(ax, Omega, radius):\n\n # calculate the slope of the line from Omega\n\n x_a = radius * np.sin(Omega)\n y_a = radius * np.cos(Omega)\n\n x_d = radius * np.sin(Omega + np.pi)\n y_d = radius * np.cos(Omega + np.pi)\n\n # plot in two segments\n ax.plot([0, x_a], [0, y_a], color=\"r\")\n ax.plot([0, x_d], [0, y_d], color=\"b\")", "def add_nodes(self):\n for node_id in self.nodes:\n x = self.nodes[node_id][0]\n y = self.nodes[node_id][1]\n if node_id == 0:\n self.G.add_node(\"Source\", x=x, y=y, demand=0)\n self.G.add_node(\"Sink\", x=x, y=y, demand=0)\n else:\n self.G.add_node(node_id, x=x, y=y, demand=0)", "def plot_bare_graph(self, show_plot=True, clf: bool = True):\n\n if clf:\n for i in plt.get_fignums():\n if plt.figure(i).get_label()[0:5] == \"(NXG)\":\n plt.close(plt.figure(i).get_label())\n # Close plot with the same name as the one we're creating (if applies)\n for i in plt.get_fignums():\n if plt.figure(i).get_label() == f\"(NXG) GEU {self.catalog}\":\n plt.close(f\"(NXG) GEU {self.catalog}\")\n # Create plot\n plt.figure(f\"(NXG) GEU {self.catalog}\")\n\n # Set node colors by domain\n\n domain_palette = ['#74299E',\n '#235785',\n '#7C1F48',\n '#B48121',\n '#5D6814',\n '#0F5A0F',\n '#818E19',\n '#1818A8',\n '#0300A7']\n colors = {'TRANSPORTE - TX': domain_palette[0],\n 'TRANSPORTE - DX': domain_palette[1],\n 'TX - RADIOENLACES Y SATELITAL': domain_palette[2],\n 'ACCESO - FIJA': domain_palette[3],\n 'ACCESO - MOVIL': domain_palette[4],\n 'CORE VOZ': domain_palette[5],\n 'ENTORNO': domain_palette[6],\n 'CMTS': domain_palette[7],\n 'Other': domain_palette[8]}\n\n # If GEU has many domains, paint each node with its corresponding color\n if self.has_multiple_domains:\n color_map = []\n for node in self.graph.nodes:\n for mat in self.materials:\n # If it finds a match, use object Material to get node's domain\n if mat.catalog == node:\n domain = mat.domain\n color_map.append(colors[domain])\n color_map_in_use = color_map\n # If that's not the case, the only color is the corresponding one\n else:\n try:\n color_map_in_use = colors[self.domain]\n except:\n color_map_in_use = domain_palette[7]\n\n # Plot graph\n nx.draw(self.graph, with_labels=True, node_color=color_map_in_use)\n if show_plot:\n plt.show()\n else:\n return plt", "def draw_graph(G, pos=None):\n if not pos:\n pos = nx.spring_layout(G)\n\n fig_size = plt.rcParams[\"figure.figsize\"]\n fig_size[0] = 10\n fig_size[1] = 8\n\n plt.figure()\n\n nx.draw_networkx_nodes(G, pos)\n nx.draw_networkx_edges(G, pos)\n nx.draw_networkx_labels(G, pos)\n\n for node in G.nodes():\n x, y = pos[node]\n plt.text(x, y + 0.1, \"${}$\".format(latex(G.formula_conj(node))), fontsize=16, horizontalalignment='center')\n \n plt.axis(\"off\")", "def visualise(self) -> None:\n nx_graph = nx.DiGraph()\n\n for v in self._vertices:\n if not v.predicate:\n name = v.name.split(\"/\")[-1]\n nx_graph.add_node(name, name=name, pred=v.predicate)\n\n for v in self._vertices:\n if not v.predicate:\n v_name = v.name.split(\"/\")[-1]\n # Neighbors are predicates\n for pred in self.get_neighbors(v):\n pred_name = pred.name.split(\"/\")[-1]\n for obj in self.get_neighbors(pred):\n obj_name = obj.name.split(\"/\")[-1]\n nx_graph.add_edge(v_name, obj_name, name=pred_name)\n\n plt.figure(figsize=(10, 10))\n _pos = nx.circular_layout(nx_graph)\n nx.draw_networkx_nodes(nx_graph, pos=_pos)\n nx.draw_networkx_edges(nx_graph, pos=_pos)\n nx.draw_networkx_labels(nx_graph, pos=_pos)\n names = nx.get_edge_attributes(nx_graph, \"name\")\n nx.draw_networkx_edge_labels(nx_graph, pos=_pos, edge_labels=names)", "def _PlotGraph(self, event):\n self._rcvLock.acquire()\n for j in event.data[0].keys():\n data = event.data[0][j]\n #print data\n line = []\n for k in data.keys():\n if k in COLORS.keys():\n c = COLORS[k]\n else:\n c = 'black'\n line.append(plot.PolyLine(data[k], colour=c, width=1,\n legend=\"Node %d\"%(k,)))\n # To draw markers: default colour = black, size = 2\n # shapes = 'circle', 'cross', 'square', 'dot', 'plus'\n #marker = plot.PolyMarker(event.data[1], marker='triangle')\n\n # set up text, axis and draw\n if j == ERRORPLOT:\n t = \"Synchronization Error\"\n xa = \"Time [s]\"\n ya = \"Error [ms]\"\n elif j == TEMPPLOT:\n t = \"Temperature Index\"\n xa = \"Time [s]\"\n ya = \"Index\"\n elif j == SKEWPLOT:\n t = \"Frequency Error\"\n xa = \"Time [s]\"\n ya = \"Frequency Error [ppm]\"\n gc = plot.PlotGraphics(line, t, xa, ya)\n # Draw graphs for each plot\n self.plotter[j].Draw(gc, xAxis=(self._x_lower,\n self._x_upper), yAxis=(float(self._y_lower[j]),\n float(self._y_upper[j])))\n self._rcvLock.release()", "def plot(self):\n pass", "def show_graphs(self):\n self.frequency_plot_graph.show()\n self.resistance_graph.show()\n self.temperature_plot_graph.show()\n self.pressure_plot_graph.show()\n self.humidity_plot_graph.show()\n self.overview_graph.show()\n self.overview_graph.setXRange(-1000, 5000)", "def show_graphs ():\n plt.ylim = (0, 300)\n plt.xlim = (0, 300)\n #Set up lidar plot to figure 1\n lidar_plot = plt.figure (1)\n #Assign title\n plt.title ('Lidar data')\n #Assign data\n plt.imshow (lidar_clean)\n #Set up radar plot to figure 2\n radar_plot = plt.figure (2)\n #Assign title\n plt.title ('Radar data')\n #Assign data\n plt.imshow (radar_clean)\n #Show plots\n plt.show ()", "def plot_blocked_nodes(self, node_list, visited_set):\n points = Marker()\n #visualizations points and lines..\n points.header.frame_id = \"map\"\n points.header.stamp = rospy.get_rostime()\n points.ns = \"markers\"\n points.id = 0\n points.type = points.POINTS\n points.action = points.ADD\n points.pose.orientation.w = 1.0\n points.scale.x = 2*self.rviz_tuning_plt\n points.scale.y = 2*self.rviz_tuning_plt\n points.color.r = 1.0\n points.color.g = 0.0\n points.color.b = 0.0\n points.color.a = 1.0\n points.lifetime = rospy.Duration()\n\n # Nodes blocked by obstacles\n for node in node_list:\n if node.cost == float('Inf'):\n p1 = Point()\n p1.x = node.x\n p1.y = node.y\n p1.z = 0.011\n points.points.append(p1)\n\n # Nodes blocked by planner\n for node in visited_set:\n p1 = Point()\n p1.x = node.x\n p1.y = node.y\n p1.z = 0.03\n points.points.append(p1)\n \n self.pub_blocked_nodes.publish(points)", "def plot_network(graph, chars = None, show_all = False, set_width = None, output='plot'):\n if chars is not None:\n graph = graph.subgraph(chars)\n\n scaled = scale_edge_weights(graph)\n pos = nx.spring_layout(graph, k =.75 , seed = 1)\n\n #Add edges\n edge_traces, edge_text_trace = make_edges(scaled, pos, graph, show_all, set_width)\n\n #Add nodes\n node_xs = [pos[node][0] for node in scaled.nodes()]\n node_ys = [pos[node][1] for node in scaled.nodes()]\n node_text = ['<b>'+node.capitalize() for node in scaled.nodes()]\n node_hovertext = []\n for node in graph.nodes():\n node_hovertext.append(node.capitalize() + ': '+ str(graph.nodes()[node]['size']) + ' appearances')\n node_trace = go.Scatter(x = node_xs,\n y = node_ys,\n text = node_text,\n textposition = \"bottom center\",\n textfont_size = 14,\n mode = 'markers+text',\n hovertext = node_hovertext,\n hoverinfo = 'text',\n marker = dict(color = 'black',#'#6959CD',\n size = 15,\n line = None))\n layout = go.Layout(paper_bgcolor='rgba(0,0,0,0)',plot_bgcolor='rgba(0,0,0,0)')\n fig = go.Figure(layout = layout)\n\n for trace in edge_traces:\n fig.add_trace(trace)\n fig.add_trace(node_trace)\n fig.add_trace(edge_text_trace)\n\n fig.update_layout(showlegend = False, width = 1000, height = 1200)\n fig.update_xaxes(showticklabels = False)\n fig.update_yaxes(showticklabels = False)\n\n if output == 'plot':\n fig.show()\n elif output == 'return':\n return fig\n elif output == 'save':\n fig.write_image('graph.png')\n else:\n fig.show()", "def show_graph(g):\r\n net.draw(g,with_labels= True,font_size=16)\r\n plt.show()", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def plot(self, title='', file_name='schelling.png'):\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots()\n #If you want to run the simulation with more than 7 colors, you should set agent_colors accordingly\n colors = ['b','r','g','c','m','y','k']\n for person in self.people:\n ax.scatter(\n person.home.x+0.5,\n person.home.y+0.5,\n s = 50.,\n color=colors[person.group]\n )\n ax.set_title(title, fontsize=10, fontweight='bold')\n ax.set_xlim([0, self.nx])\n ax.set_ylim([0, self.ny])\n ax.set_xticks([])\n ax.set_yticks([])\n plt.savefig(file_name)", "def plot_graph(station_graph):\n G = nx.DiGraph()\n edge_labels = {graph[0]: graph[1] for graph in station_graph}\n node_labels = {graph[0]: graph[0][1] for graph in station_graph}\n for graph in station_graph:\n G.add_edge(graph[0][0], graph[0][1])\n red_edges = [station_graph[0][0]]\n blue_edges = [edge for edge in G.edges() if edge not in red_edges]\n pos = nx.spring_layout(G)\n nx.draw_networkx_nodes(G, pos, node_color='green', node_size=200)\n nx.draw_networkx_labels(G, pos, node_labels=node_labels)\n nx.draw_networkx_edges(G, pos, edgelist=red_edges, edge_color='r', arrows=True)\n nx.draw_networkx_edges(G, pos, edgelist=blue_edges, edge_color='b', arrows=True, arrowsize=10)\n nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels)\n plt.show()", "def show_custom_graph(self):\n pass", "def plot_mpr_topology_per_node(options, tags=None, cursor=None):\n interval = 600\n options['cur_src'] = 'topo'\n options['prefix'] = \"mpr\"\n ################################################################################\n locs = options['locs']\n colors = options['color2'](pylab.linspace(0, 1, 101))\n colors_mprs = ['green', 'red', 'cyan', 'magenta', 'yellow', 'grey']\n #colors_mprs = ['#222222', '#444444', '#666666', '#888888', '#aaaaaa', '#cccccc']\n ################################################################################\n circ_max = 5\n line_max = 10\n floor_factor = 2\n floor_skew = -0.25\n line_min = 1\n wait_time = 30\n measure_time = wait_time + 20\n draw_non_mpr_neighbors = False\n\n hosts = get_hosts(options)\n\n min_x = min_y = min_z = numpy.infty\n max_x = max_y = max_z = 0\n\n for q, (tag_key, tag_id, nhdp_hi, nhdp_ht, mpr_minpdr) in enumerate(tags):\n # first draw the edges...\n for host in hosts:\n logging.info(\"Plotting tag_id: %s host: %s\" % (tag_id, host))\n tx_if, = cursor.execute('''\n SELECT tx_if\n FROM he\n WHERE host=?\n ''',(host,)).fetchone()\n fig2d = MyFig(options, rect=[0.1, 0.1, 0.8, 0.7], xlabel='x Coordinate', ylabel='y Coordinate')\n fig3d = MyFig(options, rect=[0.1, 0.1, 0.8, 0.7], xlabel='x Coordinate', ylabel='y Coordinate', ThreeD=True)\n try:\n host_xpos, host_ypos, host_zpos = locs[host]\n except KeyError:\n logging.warning('no position found for node %s', host)\n continue\n ################################################################################\n # src is the receiving router, i.e. in our case the MPR\n # host is the sending router, i.e. the MPR selector\n # We only want to draw an edge if it connects a host with its MPR.\n #\n ################################################################################\n\n neighbors = cursor.execute('''\n SELECT DISTINCT(host)\n FROM rh\n WHERE prev = ?\n ''', (tx_if,)).fetchall()\n min_time, = cursor.execute('''\n SELECT min(time)\n FROM nhdp_he\n WHERE tag = ?\n ''',(tag_key,)).fetchone()\n mprs = cursor.execute('''\n SELECT pdr.host, AVG(pdr)\n FROM eval_helloPDR AS pdr JOIN nhdp_mpr_selectors AS mpr\n ON pdr.host = mpr.host AND pdr.tx_if = mpr.mprselector\n WHERE pdr.tx_if = ? AND pdr.tag_key = ? AND mpr.time BETWEEN ? AND ?\n GROUP BY pdr.host\n ''', (tx_if, tag_key, min_time + wait_time, min_time + measure_time)).fetchall()\n logging.info(\"Host is %s...\" % host)\n mpr_list = []\n for mpr, pdr in mprs:\n mpr_list.append(mpr)\n try:\n src_xpos, src_ypos, src_zpos = locs[mpr]\n except KeyError:\n logging.warning('no position found for node %s', mpr)\n continue\n\n fig2d.ax.plot(\n [host_xpos+host_zpos*floor_skew*floor_factor, src_xpos+src_zpos*floor_skew*floor_factor],\n [host_ypos+host_zpos*floor_factor, src_ypos+src_zpos*floor_factor],\n linestyle='-', color=colors[pdr*100], linewidth=max(line_max*pdr, line_min), alpha=0.3, label=tag_id)\n\n fig3d.ax.plot(\n [host_xpos, src_xpos],\n [host_ypos, src_ypos],\n [host_zpos, src_zpos],\n linestyle='-', color=colors[pdr*100], linewidth=max(line_max*pdr, line_min), alpha=0.3, label=tag_id)\n if draw_non_mpr_neighbors == True:\n for _host, in neighbors:\n try:\n src_xpos, src_ypos, src_zpos = locs[_host]\n except KeyError:\n logging.warning('no position found for node %s', _host)\n continue\n\n fig2d.ax.plot(\n [host_xpos+host_zpos*floor_skew*floor_factor, src_xpos+src_zpos*floor_skew*floor_factor],\n [host_ypos+host_zpos*floor_factor, src_ypos+src_zpos*floor_factor],\n linestyle='-', color='black', linewidth=line_min, alpha=0.3)\n\n fig3d.ax.plot(\n [host_xpos, src_xpos],\n [host_ypos, src_ypos],\n [host_zpos, src_zpos],\n linestyle='-', color='black', linewidth=line_min, alpha=0.3)\n # draw nodes\n color_idx = 0;\n skip_list = []\n n2_list = []\n for _host in hosts:\n if _host in skip_list:\n continue\n try:\n xpos, ypos, zpos = locs[_host]\n except KeyError:\n logging.warning('no position found for node %s', _host)\n continue\n\n max_x = max(xpos, max_x)\n max_y = max(ypos, max_y)\n min_x = min(xpos, min_x)\n min_y = min(ypos, min_y)\n max_z = max(zpos, max_z)\n min_z = max(zpos, min_z)\n if _host == host:\n fig3d.ax.plot([xpos], [ypos], [zpos], 'o', color='blue', ms=circ_max*2)\n fig2d.ax.plot(xpos+zpos*floor_skew*floor_factor,ypos+zpos*floor_factor,'o', color='blue', ms=circ_max*2)\n elif _host in mpr_list:\n fig3d.ax.plot([xpos], [ypos], [zpos], 'o', color=colors_mprs[color_idx], ms=circ_max)\n fig2d.ax.plot(xpos+zpos*floor_skew*floor_factor,ypos+zpos*floor_factor,'o', color=colors_mprs[color_idx], ms=circ_max)\n mpr_tx_if, = cursor.execute('''\n SELECT tx_if\n FROM he\n WHERE host=? AND time BETWEEN ? AND ?\n ''',(_host, min_time + wait_time, min_time + measure_time)).fetchone()\n n2_ifs = list(pylab.flatten(cursor.execute('''\n SELECT DISTINCT(n2)\n FROM nhdp_mpr_n2\n WHERE host = ? AND time BETWEEN ? AND ?\n ''', (_host, min_time + wait_time, min_time + measure_time)).fetchall()))\n\n n2s = cursor.execute('''\n SELECT DISTINCT(host)\n FROM he\n WHERE tx_if IN (%s)\n ''' % ','.join('?'*len(n2_ifs)), n2_ifs).fetchall()\n #n2 = cursor.execute('''\n #SELECT DISTINCT(host)\n #FROM rh\n #WHERE prev = ? AND time BETWEEN ? AND ?\n #''', (mpr_tx_if, min_time + wait_time, min_time + measure_time)).fetchall()\n mpr_xpos, mpr_ypos, mpr_zpos = locs[_host]\n for __host, in n2s:\n __host = unicodedata.normalize('NFKD', __host).encode('ascii','ignore')\n if __host in n2_list:\n continue\n if __host in mpr_list or __host == host:\n continue\n n2_list.append(__host)\n skip_list.append(__host)\n try:\n _xpos, _ypos, _zpos = locs[__host]\n except KeyError:\n logging.warning('no position found for node %s', __host)\n continue\n max_x = max(_xpos, max_x)\n max_y = max(_ypos, max_y)\n min_x = min(_xpos, min_x)\n min_y = min(_ypos, min_y)\n max_z = max(_zpos, max_z)\n min_z = max(_zpos, min_z)\n fig3d.ax.plot([_xpos], [_ypos], [_zpos], 'o', color=colors_mprs[color_idx], ms=circ_max)\n fig2d.ax.plot(_xpos+_zpos*floor_skew*floor_factor,_ypos+_zpos*floor_factor,'o', color=colors_mprs[color_idx], ms=circ_max)\n\n n2_xpos, n2_ypos, n2_zpos = locs[__host]\n\n fig2d.ax.plot(\n [n2_xpos+n2_zpos*floor_skew*floor_factor, mpr_xpos+mpr_zpos*floor_skew*floor_factor],\n [n2_ypos+n2_zpos*floor_factor, mpr_ypos+mpr_zpos*floor_factor],\n linestyle='-', color=colors_mprs[color_idx], linewidth=line_min, alpha=0.3)\n\n fig3d.ax.plot(\n [n2_xpos, mpr_xpos],\n [n2_ypos, mpr_ypos],\n [n2_zpos, mpr_zpos],\n linestyle='-', color=colors_mprs[color_idx], linewidth=line_min, alpha=0.3)\n\n color_idx = color_idx + 1;\n if color_idx > 5:\n color_idx = 0\n else:\n fig3d.ax.plot([xpos], [ypos], [zpos], 'o', color='black', ms=circ_max)\n fig2d.ax.plot(xpos+zpos*floor_skew*floor_factor,ypos+zpos*floor_factor,'o', color='black', ms=circ_max)\n\n\n fig2d.colorbar = fig2d.fig.add_axes([0.1, 0.875, 0.8, 0.025])\n fig3d.colorbar = fig3d.fig.add_axes([0.1, 0.875, 0.8, 0.025])\n drawBuildingContours(fig3d.ax, options)\n alinspace = numpy.linspace(0, 1, 100)\n alinspace = numpy.vstack((alinspace, alinspace))\n for tax in [fig2d.colorbar, fig3d.colorbar]:\n tax.imshow(alinspace, aspect='auto', cmap=options['color2'])\n tax.set_xticks(pylab.linspace(0, 100, 5))\n tax.set_xticklabels(['$%.2f$' % l for l in pylab.linspace(0, 1, 5)], fontsize=0.8*options['fontsize'])\n tax.set_yticks([])\n tax.set_title('$PDR$', size=options['fontsize'])\n fig2d.ax.axis((min_x-10, max_x+10, min_y-10, max_y+10+max_z*floor_factor+10))\n logging.info(\"saving %s\" %(host))\n fig2d.save('2d_mpr_topology_%s' % (host))\n fig3d.save('3d_mpr_topology_%s' % (host))", "def _plot_map(self):\n\n # Plot points if they exist\n\n if len(self._laserX) > 0:\n self._plot_laser()\n\n if len(self._goalX) > 0:\n self._plot_goal()\n\n if len(self._summitX) > 0:\n self._plot_summit()\n\n self._plot_objects()\n\n # Update Plot\n self._fig.canvas.draw_idle()\n\n plt.pause(0.01)", "def draw(\n self,\n ax=None,\n figsize: Optional[tuple[Union[int, float]]] = None,\n node_color: str = \"#1f78b4\",\n node_size: int = 300,\n edge_color: str = \"k\",\n curvature: float = 0.2,\n font_size: int = 12,\n font_color: str = \"k\",\n ):\n import matplotlib.pyplot as plt # pylint: disable=import-outside-toplevel\n\n # Check if lattice is 1D or 2D... or notnetketwarnings.py\n if self._ndim == 1:\n positions = _np.pad(self.positions, (0, 1), \"constant\")\n elif self._ndim == 2:\n positions = self.positions\n else:\n raise ValueError(\n \"Make sure that the graph is 1D or 2D in order to be drawn. \"\n f\" Now it is {self._ndim}D\"\n )\n if ax is None:\n _, ax = plt.subplots(figsize=figsize)\n\n for edge in self.edges():\n x1, y1 = positions[edge[0]]\n x2, y2 = positions[edge[1]]\n annotation = ax.annotate(\n \"\",\n xy=(x1, y1),\n xycoords=\"data\",\n xytext=(x2, y2),\n textcoords=\"data\",\n arrowprops=dict(\n arrowstyle=\"-\",\n color=edge_color,\n shrinkA=0,\n shrinkB=0,\n patchA=None,\n patchB=None,\n connectionstyle=f\"arc3,rad={curvature}\",\n ),\n )\n ax.scatter(\n *positions.T,\n s=node_size,\n c=node_color,\n marker=\"o\",\n zorder=annotation.get_zorder() + 1,\n )\n for node in self.nodes():\n x1, y1 = positions[node]\n ax.text(\n x1,\n y1,\n str(node),\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n fontsize=font_size,\n color=font_color,\n zorder=annotation.get_zorder() + 1,\n )\n ax.axis(\"equal\")\n return ax", "def save_network(network, pos, color_nodes, weights, i, temporary_folder):\n\n # Plot a save the network without display it\n pos = nx.spring_layout(network, seed=pos, weight=7)\n plt.figure(1)\n plt.title('Random walk on a network')\n nx.draw(network, pos=pos, with_labels=True, node_color=color_nodes, width=weights)\n plt.savefig(temporary_folder + 'plotgraph' + str(i) + '.png', dpi=300, bbox_inches='tight')\n plt.close(1)", "def plot_graph(graph, labels=None):\n if labels is not None:\n unique_labels = set([v for _, v in labels.items()])\n colors = np.arange(0, 1, 1. / len(unique_labels))\n colors_list = [colors[labels[node]] for node in graph.nodes]\n else:\n colors_list = None\n pos = networkx.spring_layout(graph)\n networkx.draw_networkx_nodes(graph, pos, cmap=plt.get_cmap('jet'), node_color=colors_list,\n node_size=500)\n networkx.draw_networkx_labels(graph, pos)\n networkx.draw_networkx_edges(graph, pos, edgelist=graph.edges, edge_color='r', arrows=True)\n plt.show()", "def plot():\n pass", "def display(self):\n scatter_plot(self.points, self.hull_points, self.color, self.title)", "def plotTree(self):\n t = self.make(self.tree)\n t.draw()", "def display_game(self):\n display = plt.figure()\n\n # Plots dots.\n for dot in self.dots:\n plt.scatter(dot.x + .5, dot.y + .5, color=dot.color, s=1000)\n\n # Makes a uniform grid,\n axes = display.gca()\n axes.set_aspect('equal', adjustable='box')\n axes.set_xticks(np.arange(0, self.dimension + 1, 1))\n axes.set_yticks(np.arange(0, self.dimension + 1, 1))\n plt.grid(True, color=\"black\", linestyle=\"-\")\n axes.set_xticklabels([])\n axes.set_yticklabels([])\n for tic in axes.xaxis.get_major_ticks():\n tic.tick1On = tic.tick2On = False\n for tic in axes.yaxis.get_major_ticks():\n tic.tick1On = tic.tick2On = False\n plt.show()", "def visualize(title, particles):\n\n plt.figure(figsize=(10,10))\n plt.title(\"Best configuration for \" + str(len(particles)) + \" particles\", size=25)\n plt.xlabel(\"xcoordinate\", size=18)\n plt.ylabel(\"ycoordinate\", size=18)\n\n plt.xticks(size=13)\n plt.yticks(size=13)\n\n circle = plt.Circle((0, 0), 1)\n circle.set_edgecolor(\"red\")\n circle.set_facecolor(\"none\")\n fig = plt.gcf()\n ax = fig.gca()\n\n ax.add_artist(circle)\n plt.xlim(-1.1,1.1)\n plt.ylim(-1.1,1.1)\n\n # draw all the particles\n for particle in particles:\n plt.scatter(particle.x, particle.y)\n\n fig.savefig(title)", "def draw_plot(points, clusters):\n plot.figure().clear() # clean the canvas, ready for new draw\n plot.title(\"k-means Backbone Network\")\n plot.axis([-1, 26, -1, 26])\n plot.xticks(range(-1, 26, 1))\n plot.yticks(range(-1, 26, 1))\n\n for pt in points:\n # draws the point id\n plot.text(pt.x+0.1, pt.y+0.1, str(pt.id))\n\n for clst in clusters:\n color = tuple(numpy.random.rand(1, 3)[0])\n cir = plot.Circle((clst.centroid.x, clst.centroid.y), radius=clst.radius, alpha=0.3, fc=color)\n plot.gca().add_patch(cir)\n plot.plot([clst.centroid.x], [clst.centroid.y], '^')\n plot.plot([pt.x for pt in clst.points], [pt.y for pt in clst.points], 'o')\n\n _draw_connections(plot, clusters, SP_TABLE)\n plot.grid(True)\n #plot.show()\n plot.savefig(\"./\" + str(len(clusters)) + \" - \" + str(datetime.datetime.now()) + \".png\", format=\"png\")", "def print_graph(self, filename='', save=False):\n nx.draw_circular(self.graph, node_color='pink', node_size=1000, with_labels=True)\n if save:\n plt.savefig(filename)\n print(f'Saved graph as {filename!r}')\n else:\n plt.show()", "def plotExternalNodes( self ):\n\n max_x = max(self.mNodeWidthsEnd)\n for node_id in self.mTree.get_terminals():\n\n node = self.mTree.node( node_id )\n\n x = self.mNodeWidthsEnd[node_id]\n y = self.mNodeHeights[node_id]\n \n if self.mLeftJustifiedExternalNodes:\n x_label = max_x\n else:\n x_label = x\n \n e = self.mDecoratorExternalNodes.getElements( node_id,\n self.getHeaderWidth() + x,\n self.getHeaderHeight() + y,\n self.getHeaderWidth() + x_label,\n self.getHeaderHeight() + y )\n \n self.addElements(e)", "def draw_edges(self):\n nx.draw_networkx_edges(self.G, pos=self.positions)", "def plot_graph(graph, node_values, minscale, maxscale, edgelist, node_groups, coloring_group_index=0, figsize=(13,13)):\n \n for i, (node_group, node_options) in enumerate(node_groups):\n \n am = nx.draw_networkx_nodes(\n graph,\n pos=nx.get_node_attributes(graph, 'pos'),\n node_color=[node_values[n] for n in node_group],\n with_labels=False,\n nodelist=node_group,\n vmin=minscale,\n vmax=maxscale,\n **node_options)\n \n if i == coloring_group_index:\n ami = am\n \n nx.draw_networkx_edges(\n graph,\n edgelist=edgelist,\n pos=nx.get_node_attributes(graph, 'pos'),\n arrows=True,\n edge_color=\"grey\")\n \n return ami", "def draw(self):\n nx.draw_networkx(self.rc)", "def FillRandomizeGraphGNP(self, canvas, n_nodes, prob, inCircle=False, directedGraph=False):\n if inCircle:\n for i in range(n_nodes):\n xnext = canvas.winfo_width()/2.0 - 255 * math.cos(i * 2*math.pi / (n_nodes))\n ynext = canvas.winfo_height()/2.0 - 255 * math.sin(i * 2*math.pi / (n_nodes))\n self.AddNode(Node(i+1, xnext, ynext))\n else:\n for i in range(n_nodes):\n xx = random.randint(30, canvas.winfo_width() - 30)\n yy = random.randint(30, canvas.winfo_height() - 30)\n self.AddNode(Node(i+1, xx, yy))\n\n for node in self.nodes:\n for i in range(n_nodes):\n rand_prob = random.uniform(0, 1)\n if rand_prob <= prob:\n if not directedGraph:\n self.Connect(node.index, i+1)\n else:\n self.Connect(node.index, i+1, arrow=True)", "def plot_dag(\n self,\n filename,\n traverser,\n node_size=500,\n label_font_size=12,\n text_angle=0,\n image_width=16,\n image_height=12,\n ):\n # map nodes to a color for their operation type\n # https://stackoverflow.com/questions/27030473/how-to-set-colors-for-nodes-in-networkx-python\n color_map = []\n colors = [\"#fbb4ae\", \"#b3cde3\", \"#ccebc5\", \"#decbe4\", \"#fed9a6\"]\n for node in self.G2:\n if self.node_map[node] == OperationType.reader.value:\n color_map.append(colors[0])\n elif self.node_map[node] == OperationType.pipeline.value:\n color_map.append(colors[1])\n elif self.node_map[node] == OperationType.model.value:\n color_map.append(colors[2])\n elif self.node_map[node] == OperationType.writer.value:\n color_map.append(colors[3])\n else:\n color_map.append(colors[4])\n\n fig = plt.figure(figsize=(image_width, image_height))\n ax = plt.subplot(111)\n ax.set_title(filename, fontsize=10)\n\n try:\n import pydot\n from networkx.drawing.nx_pydot import graphviz_layout\n except ImportError: # pragma: no cover\n raise ImportError(\n \"This example needs Graphviz and pydot.\"\n \"Please refer to the Plotting requirements in the README\"\n )\n\n # pos = nx.spring_layout(G)\n # pos = nx.circular_layout(G)\n # pos = nx.kamada_kawai_layout(G)\n # pos = nx.shell_layout(G)\n # pos = nx.spectral_layout(G)\n pos = graphviz_layout(self.G2, prog=\"dot\") # , prog='twopi', args='')\n\n nx.draw(\n self.G2,\n pos,\n node_size=node_size,\n node_color=color_map,\n edge_color=\"#939393\",\n font_size=8,\n font_weight=\"bold\",\n )\n # nx.draw_networkx_nodes(G, pos, node_color='b', node_size=500, alpha=0.8)\n\n if len(self.conditional_nodes) > 0:\n cnodes = nx.draw_networkx_nodes(\n self.G2,\n pos,\n node_color=\"#e6b655\",\n node_size=1.5 * node_size,\n alpha=0.8,\n node_shape=\"D\",\n nodelist=list(self.conditional_nodes),\n )\n cnodes.set_edgecolor(\"red\")\n\n # nx.draw_networkx_labels(self.G2,pos, font_size=9)\n\n text = nx.draw_networkx_labels(\n self.G2, pos, font_size=label_font_size\n )\n\n if traverser:\n # map node name to sequence number\n sequence = traverser.traversal_list()\n idx = list(range(1, len(sequence) + 1))\n d = dict(zip(sequence, idx))\n\n # let's plot the sequence numner above the node. How far above it?\n ys = [t._y for _, t in text.items()]\n ysrange = max(ys) - min(ys)\n offset = 0.02 * abs(ysrange)\n\n for _, t in text.items():\n t.set_rotation(text_angle)\n\n if traverser:\n plt.text(t._x, t._y + offset, d[t._text], fontsize=24, color=\"red\")\n\n plt.axis(\"off\")\n plt.tight_layout()\n plt.savefig(filename, format=\"PNG\")\n logging.info(\"Graph written to %s\" % filename)", "def plot(self, axes):\n if self.is_leaf:\n axes.plot([p.x for p in self.points], [p.y for p in self.points], 'bo')\n else:\n axes.plot([self.centre.x - self.size / 2, self.centre.x + self.size / 2],\n [self.centre.y, self.centre.y], '-', color='gray')\n axes.plot([self.centre.x, self.centre.x],\n [self.centre.y - self.size / 2, self.centre.y + self.size / 2],\n '-', color='gray')\n for child in self.children:\n child.plot(axes)\n axes.set_aspect(1)", "def plot_nodes(snode, rnode, rseg):\n\n # Prepare plot\n _, ax = plt.subplots()\n plt.title('Best servers by minimal road distance')\n\n # Plot road segments\n line = [[(item['x1'], item['y1']), (item['x2'], item['y2'])]\n for item in rseg]\n c = [cm.get_cmap(COLORMAP)(item['color_num'] / (NUM_COLORS - 1))\n for item in rseg]\n lc = coll.LineCollection(line, colors=c, linewidth=ROAD_WIDTH)\n ax.add_collection(lc)\n\n # Plot road nodes\n x = [item['x'] for item in rnode]\n y = [item['y'] for item in rnode]\n c = [item['color_num'] for item in rnode]\n ax.scatter(x, y, c=c, cmap=COLORMAP, edgecolors='face',\n vmin=0, vmax=(NUM_COLORS - 1), s=ROAD_NODE_SIZE)\n\n # Plot server nodes\n x = [item['x'] for item in snode]\n y = [item['y'] for item in snode]\n c = [item['color_num'] for item in snode]\n ax.scatter(x, y, c=c, cmap=COLORMAP, edgecolors='face',\n vmin=0, vmax=(NUM_COLORS - 1), s=SERVER_SYM_SIZE)\n\n # Display plot and save\n plt.savefig(os.path.join('Output Data', 'Best server.pdf'))\n plt.show()", "def initializePlot( self ):\n\n self.mNTaxa = len(self.mTree.get_taxa())\n self.mNNodes = max( self.mTree.chain.keys() ) + 1\n\n self.calculateCoordinates()\n \n self.calculateCanvasSize( )", "def plot_system_topology(graph):\n\n plt.figure(figsize=(10,8))\n plt.title('System Topology')\n nx.draw(graph,\n pos=graphviz_layout(graph),\n node_size = [16 * graph.degree(n) for n in graph],\n with_labels = True,\n node_color = 'grey',\n font_size = 10,\n alpha = 0.5\n )", "def draw_graph(graph, start, goal, path=[], save_file=None):\n explored = graph.get_explored_nodes()\n node_pos = {n: graph.nodes[n]['pos'] for n in graph.nodes.keys()}\n edge_labels = {}\n for edge in graph.edges():\n edge_labels[edge] = graph[edge[0]][edge[1]]['weight']\n\n labels = {}\n for node in graph:\n labels[node] = node\n\n nx.draw_networkx_nodes(graph, node_pos, node_color='gray') #, nodelist=romania.nodes, node_color='w', node_size=500)\n nx.draw_networkx_edges(graph, node_pos, style='dashed')\n if len(explored) > 0:\n print(\"Explored = \"+str(explored))\n nx.draw_networkx_nodes(graph, node_pos, nodelist=explored, node_color='r')\n\n if len(path) > 0:\n nx.draw_networkx_nodes(graph, node_pos, nodelist= path, node_color='y')\n edgelist = []\n for i in range(1,len(path)):\n edgelist.append((path[i - 1], path[i]))\n nx.draw_networkx_edges(graph, node_pos, edgelist, edge_color='b', width=3)\n nx.draw_networkx_nodes(graph, node_pos, nodelist=[start, goal], node_color='g')\n\n\n\n nx.draw_networkx_labels(graph, node_pos, labels)\n nx.draw_networkx_edge_labels(graph, node_pos, edge_labels, font_size=8)\n\n plt.axis('off')\n plt.show() # display\n if save_file is not None:\n plt.savefig(save_file) # save as png", "def plot_nodes(df, nodes=None, states=None, ncol=3, sharey=False, figsize=None, cmap=None):\n if nodes is None:\n nodes = df.node.unique().tolist()\n if states is None:\n states = df.state.unique().tolist()\n if cmap is None:\n cmap = ListedColormap([\"#56B4E9\", \"#009E73\", \"#F0E442\", \"#0072B2\", \"#D55E00\", \"#CC79A7\", \"#999999\", \"#E69F00\"])\n nrow = math.ceil(len(nodes) / ncol)\n if figsize is None:\n figsize = (20, nrow * 5)\n\n if not nodes:\n raise ValueError(\"nodes cannot be an empty list\")\n if not states:\n raise ValueError(\"states cannot be an empty list\")\n\n # pre filter by states\n df = df[df.state.isin(states)]\n\n fig, axes = plt.subplots(nrow, ncol, squeeze=False, constrained_layout=True, sharey=sharey, figsize=figsize)\n\n count = 0\n ax = None\n for i in range(nrow):\n for j in range(ncol):\n if count < len(nodes):\n node = nodes[count]\n count += 1\n grouped = df[df.node == node].groupby([\"time\", \"state\"]).sum()\n indexed = grouped.reset_index().pivot(index=\"time\", columns=\"state\", values=\"total\")\n\n ax = axes[i, j]\n indexed.plot(ax=ax, legend=False, title=node, cmap=cmap)\n ax.set_ylabel(\"Number of People\")\n ax.set_xlabel(\"Time\")\n\n assert ax is not None, \"ax was never assigned\"\n handles, labels = ax.get_legend_handles_labels()\n fig.legend(handles, labels, loc=\"upper right\")\n\n return fig", "def plot_nhdp_hello_topology(options, tags=None, cursor=None):\n options['cur_src'] = 'topo'\n options['prefix'] = \"nhdp\"\n ################################################################################\n locs = options['locs']\n colors = options['color2'](pylab.linspace(0, 1, 101))\n ################################################################################\n circ_max = 5\n line_max = 10\n floor_factor = 2\n floor_skew = -0.25\n line_min = 1\n\n hosts = get_hosts(options)\n\n for q, (tag_key, tag_id, nhdp_hi, nhdp_ht, mpr_minpdr) in enumerate(tags):\n logging.info('tag_id=\\\"%s\\\" (%d/%d)', tag_id, q+1, len(tags))\n fig2d = MyFig(options, rect=[0.1, 0.1, 0.8, 0.7], xlabel='x Coordinate', ylabel='y Coordinate')\n fig3d = MyFig(options, rect=[0.1, 0.1, 0.8, 0.7], xlabel='x Coordinate', ylabel='y Coordinate', ThreeD=True)\n if not q:\n fig3d_onlynodes = MyFig(options, xlabel='x Coordinate [m]', ylabel='y Coordinate [$m$]', ThreeD=True)\n\n min_x = min_y = min_z = numpy.infty\n max_x = max_y = max_z = 0\n\n # draw the nodes\n for host in hosts:\n try:\n xpos, ypos, zpos = locs[host]\n except KeyError:\n logging.warning('no position found for node %s', host)\n continue\n\n max_x = max(xpos, max_x)\n max_y = max(ypos, max_y)\n min_x = min(xpos, min_x)\n min_y = min(ypos, min_y)\n max_z = max(zpos, max_z)\n min_z = max(zpos, min_z)\n\n fig2d.ax.plot(\n xpos+zpos*floor_skew*floor_factor,\n ypos+zpos*floor_factor,\n 'o', color='black', ms=circ_max)\n fig3d.ax.plot([xpos], [ypos], [zpos], 'o', color='black', ms=circ_max)\n if not q:\n color = 'black'\n if host.startswith('a6'):\n color = 'red'\n elif host.startswith('a3'):\n color = 'blue'\n elif host.startswith('a7'):\n color = 'orange'\n fig3d_onlynodes.ax.plot([xpos], [ypos], [zpos], 'o', color=color, ms=circ_max)\n fig2d.colorbar = fig2d.fig.add_axes([0.1, 0.875, 0.8, 0.025])\n fig3d.colorbar = fig3d.fig.add_axes([0.1, 0.875, 0.8, 0.025])\n drawBuildingContours(fig3d.ax, options)\n drawBuildingContours(fig3d_onlynodes.ax, options)\n\n alinspace = numpy.linspace(0, 1, 100)\n alinspace = numpy.vstack((alinspace, alinspace))\n for tax in [fig2d.colorbar, fig3d.colorbar]:\n tax.imshow(alinspace, aspect='auto', cmap=options['color2'])\n tax.set_xticks(pylab.linspace(0, 100, 5))\n tax.set_xticklabels(['$%.2f$' % l for l in pylab.linspace(0, 1, 5)], fontsize=0.8*options['fontsize'])\n tax.set_yticks([])\n tax.set_title('$PDR$', size=options['fontsize'])\n fig2d.ax.axis((min_x-10, max_x+10, min_y-10, max_y+10+max_z*floor_factor+10))\n fig2d.save('2d_%s' % (tag_id))\n fig3d.save('3d_%s' % (tag_id))\n if not q:\n fig3d_onlynodes.save('3d')", "def plot_nodes(recs, pat, with_fit=False, figsize=None):\n\n ls = [':', '--', '-.']\n colors = plt.cm.rainbow(np.linspace(0, 1, len(recs)))\n\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n\n for j, r in enumerate(recs):\n c = colors[j]\n for i in range(3):\n label = 'run ' + str(r.runs[0]) + ' n' + str(i+1)\n ax.plot(r.t, r.n[i]['val'], c=c, ls=ls[i], lw=0.5, label=label)\n if with_fit:\n ax.plot(r.t, r.n[i]['fit'], c=c, lw=0.5)\n\n max_x = max([r.t[-1] for r in recs])\n ax.set_xlim([0., max_x * 1.3]) # to accommodate the legend\n ax.set_xlabel(Records.time_label)\n plt.legend()\n plt.grid(True)\n fig.suptitle('number of nodes: runs ' + pat)\n\n plt.show()", "def show_plot() :\n logger.info(\"Show plot\")\n pylab.axis('equal')\n pylab.xlabel(\"Longitud\")\n pylab.ylabel(\"Latitud\")\n pylab.grid(True)\n pylab.title(\"Product tiles and product source\")\n pylab.show()", "def showGraph(self, file_name = \"\"):\n \n # prepare edges and weights for visualization\n edges = self.graph.edges()\n weights = [self.graph_data[u]['pheromones'][v] for u,v in edges]\n weights_sum = sum(weights)\n weights = [ (w/weights_sum)*50 for w in weights]\n \n # prepare different shades of red to be used to optionally differentiate\n # between edges with different costs\n # to show more informatiion on the same graph\n colors = []\n max_cost = max([self.graph_data[u]['costs'][v] for u,v in edges])\n for u,v in edges:\n if self.graph_data[u]['costs'][v] <= max_cost/32:\n colors.append('#ff7f7f')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/16:\n colors.append('#ff6666')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/8:\n colors.append('#ff4c4c')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/4:\n colors.append('#ff3232')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/2:\n colors.append('#ff1919')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost:\n colors.append('#ff0000')\n continue\n \n # print the graph \n pos=nx.circular_layout(self.graph)\n nx.draw( self.graph,pos=pos,node_size=200,node_color='#A8A8A8', with_labels=True,edges=edges, edge_color=colors,edge_cmap=plt.cm.Blues, width=weights)\n if file_name != \"\":\n path = \"img/\"+file_name\n plt.savefig(path, format=\"PNG\")\n plt.show()", "def show(self, circular=False, directed=True):\n self.plot( circular=circular, directed=directed ).show()", "def Initialite_Random_Graph(rect_x=800,rect_y=800,nodes_amount=420,link_dist=75):\n graph = [[None]*rect_x]*rect_y\n nodes = []\n links = []\n \n for _ in range(nodes_amount):\n fine = False\n while not fine:\n x = random.randrange(rect_x)\n y = random.randrange(rect_y)\n if graph[y][x] is None:\n fine = True\n near_nodes=[] \n for N in nodes:\n xo, yo = N.coordinates()\n xd = abs(x-xo)\n yd = abs(y-yo)\n if link_dist**2>=xd**2+yd**2:\n near_nodes.append(N)\n graph[y][x] = node(x,y)\n nodes.append(graph[y][x])\n for N in near_nodes:\n xo, yo = N.coordinates()\n xd = abs(x-xo)\n yd = abs(y-yo)\n li = link(N,sqrt(xd**2+yd**2),graph[y][x])\n N.add_link(li)\n ln = link(graph[y][x],sqrt(xd**2+yd**2),N)\n graph[y][x].add_link(ln)\n links.append(li)\n links.append(ln)\n return nodes, links", "def draw_graph(self):\n\t\tif None in self.graph:\n\t\t\tdel self.graph[None]\n\n\t\tfor vs in self.graph.itervalues():\n\t\t\tto_delete = []\n\t\t\tfor i in xrange(len(vs)):\n\t\t\t\tif vs[i] is None:\n\t\t\t\t\tto_delete.append(i)\n\n\t\t\tfor i in reversed(to_delete):\n\t\t\t\tdel vs[i]\n\n\t\tself.G=nx.Graph(self.graph)\n\n\t\tfor k,v in self.labels.iteritems():\n\t\t\tif v[:6] == 'Module':\n\t\t\t\troot = k\n\t\t\t\tbreak\n\n\t\treturn self.__dfs_plot(root)", "def draw_graph(graph, node_positions=None, start=None, goal=None,\r\n path=None):\r\n explored = [key for key in graph.explored_nodes if graph.explored_nodes[key] > 0]\r\n\r\n labels = {}\r\n for node in graph:\r\n labels[node] = node\r\n\r\n if node_positions is None:\r\n node_positions = networkx.spring_layout(graph)\r\n\r\n networkx.draw_networkx_nodes(graph, node_positions)\r\n networkx.draw_networkx_edges(graph, node_positions, style='dashed')\r\n networkx.draw_networkx_labels(graph, node_positions, labels)\r\n\r\n networkx.draw_networkx_nodes(graph, node_positions, nodelist=explored,\r\n node_color='g')\r\n edge_labels = networkx.get_edge_attributes(graph, 'weight')\r\n networkx.draw_networkx_edge_labels(graph, node_positions, edge_labels=edge_labels)\r\n \r\n if path is not None:\r\n edges = [(path[i], path[i + 1]) for i in range(0, len(path) - 1)]\r\n networkx.draw_networkx_edges(graph, node_positions, edgelist=edges,\r\n edge_color='b')\r\n\r\n if start:\r\n networkx.draw_networkx_nodes(graph, node_positions,\r\n nodelist=[start], node_color='b')\r\n\r\n if goal:\r\n networkx.draw_networkx_nodes(graph, node_positions,\r\n nodelist=[goal], node_color='y')\r\n\r\n plt.plot()\r\n plt.show()", "def plot(self):\n\t\tself.plotOfIP().plot()", "def plot_and_spearman_task4(infection_times_median, clustering_coefficient_net, degree_net, strength_net,\n betweenness_centrality_net, n_nodes):\n # ordered list of values, the index represent the node\n infection_times_median_list = []\n clustering_coefficient_net_list = []\n degree_net_list = []\n strength_net_list = []\n betweenness_centrality_net_list = []\n\n for i in range(n_nodes):\n infection_times_median_list.append(infection_times_median[str(i)])\n clustering_coefficient_net_list.append(clustering_coefficient_net[str(i)])\n degree_net_list.append(degree_net[str(i)])\n strength_net_list.append(strength_net[str(i)])\n betweenness_centrality_net_list.append(betweenness_centrality_net[str(i)])\n\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(clustering_coefficient_net_list, infection_times_median_list, alpha=0.5)\n plt.suptitle(r'Median infection times as a function of the unweighted clustering coefficient')\n ax.set_xlabel(r'clustering coefficient $c$')\n ax.set_ylabel(r'median infection time')\n fig.set_figwidth(6.7)\n fig.savefig(\"./plots/t4_clustering_coefficient.pdf\")\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(degree_net_list, infection_times_median_list, alpha=0.5)\n plt.suptitle(r'Median infection times as a function of the degree')\n ax.set_xlabel(r'degree $k$')\n ax.set_ylabel(r'median infection time')\n fig.set_figwidth(6.7)\n fig.savefig(\"./plots/t4_degree_net.pdf\")\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(strength_net_list, infection_times_median_list, alpha=0.5)\n plt.suptitle(r'Median infection times as a function of the strength')\n ax.set_xlabel(r'strength $s$')\n ax.set_ylabel(r'median infection time')\n fig.set_figwidth(6.7)\n fig.savefig(\"./plots/t4_strength_net.pdf\")\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(betweenness_centrality_net_list, infection_times_median_list, alpha=0.5)\n plt.suptitle(r'Median infection times as a function of the unweighted betweenness centrality')\n ax.set_xlabel(r'betweenness centrality')\n ax.set_ylabel(r'median infection time')\n fig.set_figwidth(6.7)\n fig.savefig(\"./plots/t4_betweenness_centrality_net.pdf\")\n\n # Spearman rank-correlation coefficient\n print(\"Spearman rank-correlation coefficient between median infection time and: \")\n print(\"- clustering coefficient: \" + str(\n spearmanr(infection_times_median_list, clustering_coefficient_net_list).correlation))\n print(\"- degree: \" + str(spearmanr(infection_times_median_list, degree_net_list).correlation))\n print(\"- strength: \" + str(spearmanr(infection_times_median_list, strength_net_list).correlation))\n print(\"- betweenness centrality: \" + str(\n spearmanr(infection_times_median_list, betweenness_centrality_net_list).correlation))", "def nodeRender(nodes, ax=None):\n\tif not ax:\n\t\tfig = mplt.figure()\n\t\tax = fig.add_subplot(111, projection='3d')\n\tx = nodes[:, 2]\n\ty = nodes[:, 1]\n\tz = nodes[:, 0]\n\tax.scatter(x, y, -z)\n\treturn(ax)", "def draw_network(G, ds, n = 5, label = False):\n\n top_n = top_n_users(ds,5)\n top_n = [int(i[0]) for i in top_n]\n H = G.subgraph(top_n)\n for m in top_n:\n child = ds[m]\n for item in child:\n H.add_edge(m,item)\n\n print \"Drawing figure...\"\n\n fig = plt.figure()\n nx.draw(H,pos=nx.spring_layout(H), node_size = 1, alpha = 0.25,\n width = 0.25, with_labels = label)\n fig.suptitle('Top 5 nodes by 1st degree connection', fontsize=20)\n# plt.savefig(\"images/TopN.png\", format=\"PNG\")\n plt.show()", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def plot(self, x, y, weights=None, title='Pacman Plot'):\n if np.array(x).size == 0:\n return\n \n if isinstance(x[0], np.ndarray):\n # Scrape the first element of each data point\n x = [data[0] for data in x]\n \n xmin = int(math.floor(min(x)))\n ymin = int(math.floor(min(y)))\n xmax = int(math.ceil(max(x)))\n ymax = int(math.ceil(max(y)))\n width = xmax-xmin+3\n height = ymax-ymin+3\n self.initPlot(xmin, ymin, width, height)\n \n gameState = self.blankGameState.deepCopy()\n \n gameState.agentStates = []\n \n # Add ghost at each point\n for (px,py) in zip(x,y):\n point = (px+self.xShift, py+self.yShift)\n gameState.agentStates.append( AgentState( Configuration( point, Directions.STOP), False) )\n\n# self.initialize(gameState)\n graphicsUtils.clear_screen()\n self.infoPane = InfoPane(gameState.layout, self.gridSize)\n self.drawStaticObjects(gameState)\n self.drawAgentObjects(gameState)\n\n graphicsUtils.changeText(self.infoPane.scoreText, title)\n graphicsUtils.refresh()\n graphicsUtils.sleep(1)", "def plot_distribution(self,show=True):\n\t\tk_vals,prob_vals = self.tuple_of_probabilities\n\t\t\n\t\tplt.figure(\"Probability distribution of Random Walk, theoretical\")\n\t\tplt.scatter(k_vals,prob_vals,s=4)\n\t\tplt.xlim((-self.n-1,self.n+1))\n\t\t\n\t\tplt.xlabel(\"x\\u2099 - Position after n jumps\")\n\t\tplt.ylabel(\"Probability\")\n\t\tplt.suptitle(\"Random Walk: p={}, n={}, \\u0394x={}\".format(self.p,self.n,self.delta_x))\n\t\tif show == True:\n\t\t\tplt.show()", "def draw_tree(self, agent, color='b'):\n for edge in self.all_edges[agent]:\n parent, child = edge\n for cords in self.xy_cords:\n plt.plot([parent.state[cords[0]], child.state[cords[0]]],\n [parent.state[cords[1]], child.state[cords[1]]], c=color)\n plt.xlim(self.Xi[0])\n plt.ylim(self.Xi[1])\n plt.show()", "def plot_order_star(self,N=200,bounds=[-5,5,-5,5],plotroots=False,\n color=('w','b'),filled=True,fignum=None):\n import nodepy.stability_function as stability_function\n import matplotlib.pyplot as plt\n\n p,q=self.__num__().stability_function(mode='float')\n\n fig = stability_function.plot_order_star(p,q,N,bounds,plotroots,color,fignum)\n plt.title('Order star for '+self.name)\n return fig", "def plot_states_graph(G, color_labels):\r\n pos = nx.spring_layout(G, k=0.1)\r\n plt.rcParams.update({'figure.figsize': (7, 7)})\r\n nx.draw_networkx(\r\n G, \r\n pos=pos, \r\n node_size=20, \r\n node_color=color_labels ,\r\n arrowsize=0.001,\r\n edge_color=\"#C0C0C0\", \r\n alpha=0.3, \r\n with_labels=False)\r\n plt.gca().set_facecolor(\"white\")", "def start_new_graph(self):\n self.nodes = {}\n self.reset_graph()" ]
[ "0.7539952", "0.69758826", "0.6917775", "0.6841799", "0.6772024", "0.66144824", "0.66071594", "0.65934306", "0.65229076", "0.65064275", "0.6494702", "0.6480308", "0.6477845", "0.6361443", "0.63394064", "0.6281064", "0.62662697", "0.6260735", "0.6240371", "0.62216246", "0.6214042", "0.6182424", "0.6180613", "0.6179722", "0.6165587", "0.61483055", "0.61130697", "0.6102516", "0.6086002", "0.6077428", "0.6065357", "0.6049809", "0.6035397", "0.60286176", "0.6027026", "0.600624", "0.60039306", "0.5999154", "0.5992661", "0.59789574", "0.59684956", "0.5967369", "0.59450233", "0.59437704", "0.59276086", "0.59203863", "0.5919267", "0.58988297", "0.589461", "0.58938634", "0.5893022", "0.58925635", "0.58833784", "0.58806634", "0.58791894", "0.5860784", "0.5857873", "0.58501107", "0.5846295", "0.58309686", "0.57941705", "0.5784715", "0.57834375", "0.578179", "0.5778932", "0.57753676", "0.57741135", "0.57677394", "0.57631713", "0.5761635", "0.5759966", "0.5754493", "0.57517076", "0.57469815", "0.5745332", "0.5727777", "0.57249135", "0.5715568", "0.57039475", "0.568745", "0.5685441", "0.5670686", "0.5669093", "0.56656384", "0.56599575", "0.56579536", "0.5653511", "0.56504726", "0.565031", "0.56487566", "0.5647462", "0.56391287", "0.56351936", "0.56335866", "0.5630293", "0.5629974", "0.5628085", "0.5621985", "0.56107277", "0.56087965" ]
0.7134664
1
Poll for workers to wake up wait up to 10 seconds
def wait_for_workers(self, workers_db_key): timeout = time.time() + 10 while True: n_workers = self.tempstore.conn.scard(workers_db_key) self.logger.info('Got redis scard resp: %s', n_workers) if n_workers > 0: break if time.time() > timeout: raise Exception('Workers did not come up - please check syslog') time.sleep(1) self.logger.info('Workers successfully started')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitor(self):\r\n while True:\r\n for worker, start_time in self.workers.items():\r\n if (not worker.isAlive() or\r\n self.timeout\r\n and datetime.now() - start_time > self.timeout): \r\n\r\n self.work_count.get_nowait()\r\n self.jobs.task_done()\r\n del self.workers[worker]\r\n\r\n time.sleep(1)", "def poll_thread():\n while not stop_flag.wait(0.100): # poll every 100ms\n check_jobs()", "def wake_till(seconds):\n while True:\n if int(time.time()) < seconds:\n time.sleep(5)\n else:\n return", "def _poll(self):\n return self.zmq_core.poll(10)", "def wait_forever(self):\r\n while True:\r\n time.sleep(0.5)", "def wait(self):\n while self._worker is None:\n # wait() before self._run()\n time.sleep(0.1)\n self._worker.join()\n return self.poll()", "def wait(self):\n AbstractDaemon.wait(self, SCHEDULER_PERIOD)", "def wait_for_workers(self):\r\n stop = False\r\n workers = self.aggregator.get_participants()\r\n\r\n while not stop: \r\n try:\r\n with self.aggregator:\r\n resp = self.aggregator.receive(1)\r\n participant = resp.notification['participant']\r\n workers.append(participant)\r\n print('Task %s: participant %s has joined' % (self.task_name, participant))\r\n except Exception as err:\r\n print(\"Task %s: joined %d participants out of %d\" % (self.task_name, len(workers), self.Nworkers))\r\n #print(err)\r\n #print('Check here: error')\r\n #import code\r\n #code.interact(local=locals())\r\n pass\r\n\r\n if len(workers) == self.Nworkers:\r\n stop = True\r\n\r\n workers = self.aggregator.get_participants()\r\n return list(workers.keys())", "def wait(self, poll_interval=1):\n while True:\n active = [t for t in threading.enumerate() if t.name != 'MainThread']\n if not len(active):\n break\n self.log.debug('Waiting for {:d} threads'.format(len(active)))\n time.sleep(poll_interval)", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wake_all_threads(self):\n self.advance_time(increment_by=0.0)", "def monitor(self):\n while True:\n complete = True\n for thread in self._running:\n if not thread.complete:\n complete = False\n\n if thread.complete:\n thread.join()\n elif thread.failed:\n pass\n\n if complete:\n break\n time.sleep(Threadable.THREAD_SLEEP)", "def busy_wait(self, seconds):\n end_time = time.perf_counter() + seconds\n while(time.perf_counter() < end_time):\n pass", "def poll(self):\n while self.running and reactor._started and not reactor._stopped:\n self.check_response_queue()\n sleep(0.5)", "def poll(until):\n\n start = time.time()\n\n while not until():\n time.sleep(0.5)\n cur = time.time()\n elapsed = int(round(cur - start))\n if int(elapsed) % 60 == 0:\n print('Waiting ({:0.2f} minutes)'.format(elapsed / 60.0))", "def wait_all():\n global alive\n\n try:\n while alive > 0:\n gevent.sleep(1)\n finally: \n signal.setitimer(signal.ITIMER_REAL, 0)", "def wake(self):\n self.wait = self.min_wait", "def wait(self):\r\n self.scheduler.wait()", "def block_waiting( self ):\n while self.num_waiting > 0:\n time.sleep( 1 )", "def wait(self, timeout: float):\n timeout = timeout or float(\"inf\")\n started = datetime.utcnow()\n n_tasks = self.n_tasks()\n while self.n_tasks() > 0:\n self.log.tick(\n \"%s tasks remaining, sleeping for %s s\", n_tasks, POLL_INTERVAL\n )\n time.sleep(POLL_INTERVAL)\n elapsed = datetime.utcnow() - started\n if elapsed.total_seconds() > timeout:\n raise QueueTimeoutError(\"Joining queue timed out\")\n n_tasks = self.n_tasks()\n self.log.debug(\"Waited successfully\")", "def wait (self, seconds=0.0):\r\n\t\tstart_time = time.time()\r\n\t\twhile time.time() < start_time + seconds:\r\n\t\t\tself.receive()", "def wait(interval):\n time.sleep(interval/1000.0)", "def wait(self, seconds):\n logging.info(\"sleeping\")\n self.new_message_event.wait(seconds)\n logging.info(\"waking\")", "def wait_for(func):\n \n while not func() and not rospy.is_shutdown():\n time.sleep(0.01)", "def wakeup(self):\n self.waker.notify()", "def wait_for_work(self, early_stop=lambda: False):\n self.work_notifier.acquire()\n\n try:\n while len(self.getDelayedCalls()) == 0 and not early_stop():\n self.work_notifier.wait()\n finally:\n self.work_notifier.release()", "def test_timeout(self):\n s1, s2 = self.create_bound_pair(zmqpy.PAIR, zmqpy.PAIR)\n poller = self.Poller()\n poller.register(s1, zmqpy.POLLIN)\n tic = time.time()\n evt = poller.poll(timeout=.005)\n toc = time.time()\n self.assertTrue(toc-tic < 0.1)\n tic = time.time()\n evt = poller.poll(timeout=5)\n toc = time.time()\n self.assertTrue(toc-tic < 0.1)\n self.assertTrue(toc-tic > .001)\n tic = time.time()\n evt = poller.poll(timeout=500)\n toc = time.time()\n self.assertTrue(toc-tic < 1)\n self.assertTrue(toc-tic > 0.1)", "def work(self, poll_timeout=60):\n \n continue_working = True\n worker_connections = []\n\n def continue_while_connections_alive(any_activity):\n return self.after_poll(any_activity)\n\n while continue_working and self.cont==True:\n worker_connections = self.establish_worker_connections()\n continue_working = self.poll_connections_until_stopped(worker_connections, continue_while_connections_alive, timeout=poll_timeout)\n\n for current_connection in worker_connections:\n current_connection.close()", "def wait(self):\r\n self.jobs.join()", "def wait(self, ms=None):\r\n util.raiseNotDefined()", "def _refresh_worker(self):\n\n while True:\n REFRESH_CONDITION.acquire()\n was_notified = REFRESH_CONDITION.wait(float(self._expires_in - 15))\n if was_notified or not self._refresh_thread_running:\n # Time to leave\n # logger.info('Spotify token-refresh thread exiting')\n break\n # logger.info('Spotify token being refreshed by token-refresh thread')\n self.refresh_token(False)\n REFRESH_CONDITION.release()", "def run_checks():\n while True:\n if datetime.now() > core.misc_data.check_date+timedelta(minutes=45):\n for stuff in stuff_to_do:\n threading.Thread(target=stuff).start()\n core.misc_data.check_date = datetime.now() + config.utils.tasks.repeat_every\n time.sleep(5*60*60)", "def _stopAllWorkers():\n if _workerConfig.get('waitThread'):\n return\n delay = 0\n try:\n delay = float(_workerConfig['idle-time']['all'])\n except Exception:\n delay = 300\n delay -= time.time() - _workerConfig['lastChange']\n if delay > 0:\n _workerConfig['waitThread'] = threading.Timer(delay, _delayStop)\n _workerConfig['waitThread'].daemon = True\n _workerConfig['waitThread'].start()\n return\n for worker in list(_workerConfig['started']):\n _stopWorker(worker)", "def Wait(self):\n sleep_time = min(self._time_remaining.values())\n time.sleep(sleep_time)\n\n tasks = set()\n for task in self._time_remaining:\n self._time_remaining[task] -= sleep_time\n if self._time_remaining[task] == 0:\n self._time_remaining[task] = self.task_intervals[task]\n tasks.add(task)\n return tasks", "def worker_loop(worker_id, gpu=False):\n global __worker_timer_start\n global __worker_timer_limit\n global __worker_dataset\n __worker_dataset = ''\n __worker_timer_start = 0\n __worker_timer_limit = 0\n f_stop = threading.Event()\n # start calling f now and every 60 sec thereafter\n __timer_control(f_stop)\n while True:\n try:\n # poll queue\n msg_search = brpop_key_store('controller:search_queue')\n heart_beep('worker', msg_search, worker_id, gpu)\n __worker_timer_start = time.time()\n __worker_timer_limit = 0\n __worker_dataset = ''\n if msg_search is not None:\n __worker_dataset = msg_search['dataset_id']\n __worker_timer_limit = msg_search['time_limit']\n log.info('received %s' % msg_search)\n msg_search = {**msg_search, **{'start_time': datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'host_name': socket.gethostname()}}\n job_search(msg_search)\n except KeyboardInterrupt:\n log.info('Keyboard interrupt: exiting')\n # stop the timer thread\n f_stop.set()\n exit()\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n log.error('%s in %s line:%s error: %s' % (exc_type.__name__, fname, str(exc_tb.tb_lineno), str(e)))\n with open(get_data_folder() + '/errors.txt', 'a') as f:\n f.write(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + str(msg_search) + '\\n')\n f.write('%s in %s line:%s error: %s' % (exc_type.__name__, fname, str(exc_tb.tb_lineno), str(e)) + '\\n')\n f.write('-'*80 + '\\n')", "def wait(self):\n for _ in range(15):\n time.sleep(10)\n if self.ready:\n break\n else:\n raise RuntimeError('timeout, lease failed to start')", "def wait_until_empty(self):\n while not self.is_empty():\n self.sleep(10)", "def block_while_running():\n runs = is_running()\n while runs:\n runs = is_running()\n time.sleep(10)", "def test_stop_when_result_queue_is_full(self):\n SLEEP_DELTA = 0.01\n TIMEOUT = 20\n QUEUE_SIZE = 2\n\n pool = ThreadPool(10, results_queue_size=QUEUE_SIZE)\n pool.start(WorkerIdGeneratingWorker)\n\n for _ in range(100):\n pool.ventilate()\n\n cumulative_wait = 0\n while pool.results_qsize() != QUEUE_SIZE:\n time.sleep(SLEEP_DELTA)\n cumulative_wait += SLEEP_DELTA\n # Make sure we wait no longer than the timeout. Otherwise, something is very wrong\n self.assertLess(cumulative_wait, TIMEOUT, msg='Timeout while waiting for the results queue to fill')\n\n # No need to read from the queue. We are testing ability to exit when workers might be blocked on the\n # results queue\n\n pool.stop()\n pool.join()", "def _delayStop():\n if isinstance(_workerConfig, dict):\n _workerConfig.pop('waitThread', None)\n _manageWorkers()", "def wait(self):\n self.drain_call_queue()", "def wait(self, period):\n try:\n self.logger.info(\"DataRecorder wait '%s' minutes\"%period) \n#-------------------on attend que les dservers demarres\n end_time = datetime.datetime.now()+datetime.timedelta(minutes=period)\n while end_time >= datetime.datetime.now() and self._isrunning():\n time.sleep(0.5)\n \n \n except Exception, details :\n self.logger.error(\"Wait time error : '%s'\"%str(details)) \n pass", "def wake_up(self):\n pass", "def _monitor_loop(self):\n while self._continue_running():\n for wl in self._workloads:\n if not wl.running():\n self.log.info('%-20s FAILED', wl.name())\n self._restart_workload(wl)\n else:\n self.log.info('%-20s OK', wl.name())\n\n time.sleep(self._monitor_delay)", "def fake_poll_until(retriever, condition=lambda value: value,\n sleep_time=1, time_out=0):\n from trove.common import exception\n slept_time = 0\n while True:\n resource = retriever()\n if condition(resource):\n return resource\n fake_sleep(sleep_time)\n slept_time += sleep_time\n if time_out and slept_time >= time_out:\n raise exception.PollTimeOut()", "def wait_until_all_activity_stops():\n if main_greenlet is None:\n return\n while other_threads_are_active():\n fake_sleep(1)", "def monitor(self):\n while not self.terminated:\n try:\n if (time.time() - self.updated_time) < 5:\n messages = self.messages.copy()\n # procs = np.min([ len(messages), 9 ]) + 1\n # pool = ThreadPool(procs)\n # pool.map(self.process, messages)\n # pool.close()\n # pool.join()\n for message in messages:\n self.process(message)\n elif self.ws:\n self.updated_time += 10\n self.ws.close()\n except Exception as e:\n self.on_error(None, \"Monitoring Error: {}\".format(e))\n continue\n finally:\n time.sleep(0.1)", "async def run(self):\n while True:\n await asyncio.sleep(0)\n # See if any sockets have anything\n try:\n socks, events = self.poller.poll(1000)\n for sock, event in zip(socks,events):\n if sock in self.subscriptions:\n states = sock.recv_json()\n await self.main_server.sync_states(states)\n\n # Nothing to report - Poller did not find any sockets with updates\n except ValueError:\n pass\n # Exiting\n except KeyboardInterrupt:\n break", "def check_queue():\n while True:\n logging.info( 'Awaiting task ' )\n yield from asyncio.sleep( 5 )\n loop.create_task( (start_background_tasks()) )", "def _wait_queue(self):\n while True:\n time.sleep(0.1)\n if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():\n return", "def _wait_and_check(self, sleep=5, exclude_services=[]):\n u.log.debug('Extended wait and check ...')\n time.sleep(sleep)\n self.d.sentry.wait(timeout=900)\n time.sleep(sleep)\n self._auto_wait_for_status(exclude_services=exclude_services)\n time.sleep(sleep)\n self.d.sentry.wait()\n u.log.debug('Wait and check completed.')", "def wait(self) -> None:\n now = time.time()\n if now < self.lockTime:\n diff = self.lockTime - now\n logger.debug(\"Waiting %ss to avoid ratelimit\", diff)\n time.sleep(diff)", "def wait_until_completed(self, max_poll_wait_secs=30):\n self.refresh()\n poll_wait = 0.2\n while not self.has_completed():\n logger.debug(\n f\"Waiting for job {self.id},\"\n + f\"it is in status '{self.details.status}'\"\n )\n print(\".\", end=\"\", flush=True)\n time.sleep(poll_wait)\n self.refresh()\n poll_wait = (\n max_poll_wait_secs\n if poll_wait >= max_poll_wait_secs\n else poll_wait * 1.5\n )", "def poll_gevent_sleep(max_seconds, condition=lambda: True, sleep_time=0.2):\n if max_seconds < 0:\n raise ValueError('max_seconds must be positive number')\n\n if sleep_time < 0.2:\n raise ValueError('sleep_time must be > 0.2')\n\n time_start = time.time()\n while True:\n if condition():\n return True\n gevent.sleep(sleep_time)\n if time.time() > time_start + max_seconds:\n return False", "def __work__(self):\n while not self.is_done:\n self.refreshSignal.emit()\n time.sleep(0.05)", "def poll(self):\n Monitor.poll(self)\n return deferToThread(self._poll)", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def wait(self, options):\n self.socketIO.wait(seconds=options)", "def busyWait(self):\n time.sleep(0.0)", "def wait_timeout(proc, seconds):\n start = time.time()\n end = start + seconds\n interval = 0.01\n\n while True:\n result = proc.poll()\n #print \"waiting\"\n if result is not None:\n return result\n if time.time() >= end:\n\n os.killpg(proc.pid, signal.SIGTERM)\n raise RuntimeError(\"Process timed out\")\n time.sleep(interval)", "def startWait(self, checkCallback = None, period = 0.5 ):\r\n self.scheduler.startWait(checkCallback,period)", "def event_loop(self):\n while self.ack is False:\n gevent.sleep(self.loop_interval)\n output_service = self.get_directory_service_proxy().get_service(\"mock-output-service\")\n output_service.put(\"test-worker-work-result\")\n self.ack = True", "def wait_inner():\n if (\n kernel32.WaitForMultipleObjects(\n 2,\n ctypes.pointer((HANDLE * 2)(cancel_event, timer)),\n False,\n INFINITE,\n )\n == WAIT_FAILED\n ):\n time_sleep(sleep_for)", "def __wait(min_sec, max_sec):\n time.sleep(randint(min_sec, max_sec))", "def startLoop(self):\n while not self.completed:\n self.fillJobQueue()\n self.cleanJobQueue()\n # TODO May want to revisit this:\n # http://stackoverflow.com/questions/29082268/python-time-sleep-vs-event-wait\n # probably when we move to Python 3.\n time.sleep(self.sleepTime)", "def _busy_wait_ms(self, ms):\n start = time.time()\n delta = ms/1000.0\n while (time.time() - start) <= delta:\n pass", "def setup_poll(self):\n while True:\n try:\n self.do_polling()\n time.sleep(0.01)\n except KeyboardInterrupt:\n print(self.get_stream())\n exit()", "def _sleep(self):\n while 1:\n diff = (time.time()-self.lastcall) - self.mindelay\n if diff >= 0: return\n time.sleep(max(-diff/2.0, 0.01))", "def run(self):\n if self.pollable:\n self.poll()\n if not self.EventsFactory.is_alive():\n self.EventsFactory.start()\n while True:\n if not self.EventsFactory.is_alive():\n self.logger.error(f'{self} events factory has died..')\n raise SubThreadException(self.EventsFactory)\n update_start_time = time.time()\n self.handle_events()\n wait_for(lambda: time.time() - update_start_time > self.handle_events_every and not self._busy_mutext.locked(),\n logger=self.logger, message='Waiting for work timeout to finish.')", "def wait_and_process(waiting_function):\n i = 0\n while not waiting_function(timeout=0.04):\n process_app_events()\n i += 1\n if i > 10000:\n assert False\n process_app_events()", "def waitfor(self):\r\n finished = False\r\n while finished == False:\r\n time.sleep(5)\r\n finished = self.isFinished()", "def wait_until_idle(self):\n while True:\n time.sleep(self.__interface.WT_STATE_LOOKUP)\n\n if not self.is_busy:\n break", "async def wait(self, collection, interval=60, timeout=600):\n end = time.time() + timeout\n\n not_responded = self.not_responding_instances(collection)\n\n def get_container(inst):\n try:\n inst.state.docker.get_containers()\n inst.state.docker.responded = True\n except DOCKER_RETRY_EXC:\n logger.debug(\"Docker not ready yet on %s\",\n str(inst.instance.id))\n except Exception as exc:\n logger.debug(\"Got exception on %s: %r\",\n str(inst.instance.id), exc)\n\n # Attempt to fetch until they've all responded\n while not_responded and time.time() < end:\n await gen.multi([collection.execute(get_container, x)\n for x in not_responded])\n\n # Update the not_responded\n not_responded = self.not_responding_instances(collection)\n\n if not_responded:\n await collection.wait(interval)\n\n # Prune the non-responding\n logger.debug(\"Pruning %d non-responding instances.\",\n len(not_responded))\n await collection.remove_instances(not_responded)", "def wait_for(self, condition, *args):\n start_time = int(time.time())\n while True:\n try:\n condition(*args)\n except Exception:\n pass\n else:\n return\n if int(time.time()) - start_time >= self.build_timeout:\n condition(*args)\n return\n time.sleep(self.build_interval)", "def wait(self):\n time.sleep(0.010)", "async def job_wait(self, uid):\n self._require_running()\n await self._get_job(uid).wait()", "def wait_until(func, wait_for=None, sleep_for=0.5):\n res = func()\n\n if res:\n return res\n\n if wait_for:\n deadline = time.time() + wait_for\n while not res and time.time() <= deadline:\n gevent.sleep(sleep_for)\n res = func()\n\n else:\n while not res:\n gevent.sleep(sleep_for)\n res = func()\n\n return res", "def monitor_and_terminate(self):\n import time\n import datetime\n\n keep_running = True\n\n while keep_running:\n\n print()\n print(datetime.datetime.now().replace(microsecond=0))\n print(self.get_monitor_string())\n\n time.sleep(30)\n\n _, status = self.reporter.get_job_status(self.info)\n if status[\"active\"]+status[\"running\"] == 0:\n keep_running = False\n\n print(\"All tasks done.\")", "async def test_wait_for_activity(aiopg_connector):\n pg_app = app.App(connector=aiopg_connector)\n worker = worker_module.Worker(app=pg_app, timeout=2)\n worker.notify_event = asyncio.Event()\n task = asyncio.ensure_future(worker.single_worker(worker_id=0))\n await asyncio.sleep(0.2) # should be enough so that we're waiting\n\n worker.stop_requested = True\n worker.notify_event.set()\n\n try:\n await asyncio.wait_for(task, timeout=0.2)\n except asyncio.TimeoutError:\n pytest.fail(\"Failed to stop worker within .2s\")", "def should_poll(self):\r\n return False", "def wait_for_all_cache_tasks(self):\n if self.is_server_process:\n self.update_queue.join()\n self.refresh_queue.join()", "def pulse(seconds):\n index = 0\n while index < len(fake_threads):\n t = fake_threads[index]\n t['sleep'] -= seconds\n if t['sleep'] <= 0:\n t['sleep'] = 0\n t['next_sleep_time'] = None\n t['greenlet'].run()\n sleep_time = t['next_sleep_time']\n if sleep_time is None or isinstance(sleep_time, tuple):\n del fake_threads[index]\n index -= 1\n else:\n t['sleep'] = sleep_time\n index += 1", "def finishWait(self):\r\n self.scheduler.finishWait()" ]
[ "0.7244954", "0.68966836", "0.6490199", "0.64271986", "0.63700104", "0.63443273", "0.6310271", "0.6221005", "0.62040955", "0.6180912", "0.61787575", "0.61787575", "0.61787575", "0.61610657", "0.6151061", "0.6140504", "0.6097463", "0.60734135", "0.6073053", "0.6060509", "0.60543376", "0.59932214", "0.59929806", "0.5957649", "0.5954562", "0.59499055", "0.58953786", "0.5880213", "0.58589166", "0.58498704", "0.58316135", "0.58306617", "0.5802845", "0.5787302", "0.5784273", "0.5774853", "0.5760393", "0.575668", "0.5750019", "0.57458335", "0.57452345", "0.5736778", "0.57335705", "0.5716725", "0.5704242", "0.5700569", "0.56745857", "0.5672466", "0.5659793", "0.5659122", "0.5648312", "0.5639201", "0.56265664", "0.5611218", "0.5607883", "0.5604684", "0.559016", "0.55775", "0.55685914", "0.5553958", "0.5553958", "0.5553958", "0.5553958", "0.5553958", "0.5553958", "0.5553958", "0.5553958", "0.5553958", "0.5553958", "0.5553958", "0.5553958", "0.5553958", "0.5553958", "0.5553958", "0.5553096", "0.55516267", "0.5550352", "0.5547572", "0.55406374", "0.5537467", "0.5507522", "0.54761404", "0.5473713", "0.54730064", "0.54656035", "0.5461941", "0.5457289", "0.545196", "0.5448568", "0.54356647", "0.543029", "0.5427829", "0.54230046", "0.54195577", "0.54150206", "0.5413113", "0.54110694", "0.54107076", "0.5410666", "0.540622" ]
0.66818553
2
Formats comparison as a strings
def format_comparison(objs): def formatter(comp): if not isinstance(comp, tuple): return str(comp) output = [] return "\n".join([comp.type] + [" "+errmessage for errmessage in output]) results = map(formatter,objs) return "\n".join(results) #obj1,obj2 = comp ### Sections #for i,s1,s2 in diffs: # if s1 and s2: # output.append(f"Section {i} does not match:") # result = compare_sections(s1,s2) # output.extend(almethods.linepadder(result)) # else: # if s1: # output.append(f"Door 2 missing Section {i}") # else: # output.append(f"Door 1 missing Section {i}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def comparison(self) -> str:\n return self._values.get('comparison')", "def generate_comparison_output_string(comparisons: List[Dict[str, Any]]) -> str:\n result_dict = generate_comparison_dict(comparisons)\n result_string = json.dumps(result_dict, sort_keys=True, indent=4)\n return result_string", "def format_condition(self, key, val1, val2):\n if val1 is not None and val2 is not None:\n condition = '{:.2f} < {:s} < {:.2f}'.format(val1, key, val2)\n elif val2 is None:\n condition = '{:s} == {:s}'.format(key, str(val1))\n return condition", "def comparison(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"comparison\")", "def for_comparison(self):\n\t\tif len(self.values) < 5:\n\t\t\treturn unicode(self)\n\t\telse:\n\t\t\treturn u'-'.join(self._string_values(increment=1))", "def __sub_comparison_ops(file_contents: str) -> str:\n\n return re.sub(r'(?:IS\\s+)?EQUALS?(?:\\s+TO)?', '=', file_contents)", "def _strHard(self):\n if self.checkGreaterThanThreshold:\n operator += \">\"\n else:\n operator += \"<\"\n return \"(Hard) %s %f Enabled: %s\" %(operator, self.threshold, str(self.enabled))", "def get_comparison(self, start, end):\n\n return 'https://{}/{}/{}/compare/{}...{}'.format(HOST_GITHUB, \\\n self.repo, self.product, start, end) + '\\n'", "def get_name(self):\n return str(self.comparison_type)", "def _repr_(self):\n s = 'An inequality '\n have_A = not self.A().is_zero()\n if have_A:\n s += repr(self.A()) + ' x '\n if self.b()>=0:\n if have_A:\n s += '+'\n else:\n s += '-'\n if have_A:\n s += ' '\n s += repr(abs(self.b())) + ' >= 0'\n return s", "def _repr_(self):\n s = 'An inequality '\n have_A = not self.A().is_zero()\n if have_A:\n s += repr(self.A()) + ' x '\n if self.b()>=0:\n if have_A:\n s += '+'\n else:\n s += '-'\n if have_A:\n s += ' '\n s += repr(abs(self.b())) + ' >= 0'\n return s", "def __str__(self):\n return \"{} != {} ({})\".format(self.var1.name,\n self.var2.name,\n self.satisfied())", "def __str__(self):\n r = []\n for item in sorted(self._data.keys()):\n correct, incorrect = self._data[item][True], self._data[item][False]\n acc = correct / (correct + incorrect)\n s = f\"{item:4} | Accuracy: {acc:.2f}% (diff {'+' if acc-item >=0 else ''}{acc-item:.2f}%) | correct: {correct:2}, incorrect: {incorrect:2}\" \n r.append(s)\n\n return \"\\n\".join(r)", "def _to_string(self):\n self.results.print_results()\n self.results.print_comparison()", "def __str__(self):\n\n return 'IF {0} THEN {1}'.format(', '.join([str(fv) for fv in self.fvals]),\n str(self.label))", "def print_test_comparison(test_name, expected, result):\n line = \"\\n\"\n line += \"-\" * 60 + \"\\n\"\n line += \"{}\\n\".format(test_name)\n line += \"-\" * 60 + \"\\n\"\n line += \"-\" * 26 + \"EXPECTED\" + \"-\" * 26 + \"\\n\"\n line += \"{}\\n\".format(expected)\n line += \"-\" * 28 + \"END\" + \"-\" * 29 + \"\\n\"\n line += \"-\" * 27 + \"RESULT\" + \"-\" * 27 + \"\\n\"\n line += \"{}\\n\".format(result)\n line += \"-\" * 28 + \"END\" + \"-\" * 29 + \"\\n\"\n line += \"\\n\"\n return line", "def __str__(self):\n\n return '{0} {1} {2}'.format(self.feat, '==' if self.pos else '!=',\n self.val)", "def comparison(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"comparison\")", "def comparison(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"comparison\")", "def _equals(cols, new_table, new_cols):\n return ', '.join('{}={}.{}'.format(cols[i], new_table, new_cols[i]) for i in range(len(cols)))", "def __repr__(self) -> str:\n result = \"Equal\" if self.got is None else \"Unequal\"\n return f\"<TestResultFile {self.test_id},{self.regression_test_id},{self.regression_test_output_id}: {result}>\"", "def print_comparison(node, comparisons, search_string):\n\n # dostane sa na dummy kluc\n if node is None:\n print(\"\\nPorovnanie\", str(comparisons + 1) + \":\",\n colored(\"\\n -- DUMMY kluc: \" + search_string + \" --\", \"green\", attrs=['bold']),\n \"\\n Hladany retazec:\", colored(search_string, \"green\", attrs=['bold']),\n \"\\n Zhoda:\", colored(True, \"green\", attrs=['bold']),\n \"\\n\\n*******************\")\n\n # medzivysledok\n else:\n color = \"green\" if node.value == search_string else \"red\"\n print(\"\\nPorovnanie\", str(comparisons) + \":\",\n \"\\n Aktualny kluc:\", colored(node.value, color, attrs=['bold']),\n \"\\n Hladany retazec:\", colored(search_string, color, attrs=['bold']),\n \"\\n Zhoda:\", colored(node.value == search_string, color, attrs=['bold']),\n \"\\n\\n*******************\")", "def format_errordict(self, errordict):\n errormsg = f'Comparison between {self.ad1.filename} and {self.ad2.filename}'\n for k, v in errordict.items():\n errormsg += f'\\nComparison failure in {k}'\n errormsg += '\\n' + ('-' * (22 + len(k))) + '\\n'\n errormsg += '\\n '.join(v)\n return errormsg", "def __str__(self):\n\t\treturn \"{min} ~ {max}\".format(min=str(self.min), max=str(self.max))", "def __str__(self):\n return '\\n'+'\\n'.join([\"%-15s: %s\" % (qq(w), str(v)) for w, v in sorted(self.value.items())]) + '\\0'", "def compare(self, other: Optional['PDFState']) -> str:\n ret_value = ''\n if (\n other is None or self.font_family != other.font_family or\n self.font_mode != other.font_mode or self.size != other.size\n ):\n ret_value += ' /{} {} Tf'.format(self.font.ref, round(self.size, 3))\n if other is None or self.color != other.color:\n ret_value += ' ' + str(self.color)\n if other is None or self.rise != other.rise:\n ret_value += ' {} Ts'.format(round(self.rise, 3))\n\n return ret_value", "def print_comparison(name, dates, times, orig_data, comp_data):\n\n # Output comparison of data\n print(' ORIGINAL COMPUTED')\n print(f' DATE TIME {name.upper():>9} {name.upper():>9} DIFFERENCE')\n print('------- ------ --------- --------- ----------')\n zip_data = zip(dates, times, orig_data, comp_data)\n for date, time, orig, comp in zip_data:\n diff = orig - comp\n print(f'{date} {time:>6} {orig:9.6f} {comp:9.6f} {diff:10.6f}')", "def get_compare_value_texts(self):\n return self.compare_value_texts", "def __cmp__(self, other):\n return cmp(repr(self), repr(other))", "def format(self) -> str:", "def compare(obj_a, obj_b):\n\n return tuple_to_string(obj_a) == tuple_to_string(obj_b)", "def __str__(self):\n return f\"{str(self.team1)} vs {str(self.team2)} on {str(self.date)}\"", "def __str__(self):\n\n return '{0} == {1}'.format(self.feat, self.val)", "def compare(self, other_trigger):\n response = []\n if self.name != other_trigger.name:\n response.append('DIFF: Trigger names: %s' % self.name)\n response.append('and %s' % other_trigger.name)\n # Compare types\n if self.scope != other_trigger.scope:\n response.append('DIFF: Trigger %s scope' % self.name)\n response.append('%s is different to ' % self.scope)\n response.append('trigger %s ' % other_trigger.name)\n response.append('scope %s' % other_trigger.scope)\n # Compare triggering events\n if self.events != other_trigger.events:\n response.append('DIFF: Trigger %s' % self.name)\n response.append(' events %s is ' % self.events)\n response.append('different to trigger %s' % other_trigger.name)\n response.append('events %s' % other_trigger.events)\n # Compare SQL statements\n if self.sql != other_trigger.sql:\n response.append('DIFF: Trigger %s ' % self.name)\n response.append('SQL %s ' % self.sql)\n response.append('is different to trigger %s ' % other_trigger.name)\n response.append('SQL %s' % other_trigger.sql)\n return response", "def print_comparison(name, dates, times, original_data, computed_data):\n \n # Output comparison of data\n print(' ORIGINAL COMPUTED')\n print(f' DATE TIME {name.upper():>9} {name.upper():>9} DIFFERENCE')\n print('------- ------ --------- --------- ----------')\n zip_data = zip(dates, times, original_data, computed_data)\n for date, time, orig, comp in zip_data:\n diff = orig - comp\n print(f'{date} {time:>6} {orig:9.6f} {comp:9.6f} {diff:10.6f}')", "def __str__(self) -> str:\n if self.write_back is black.WriteBack.CHECK:\n reformatted = \"would be reformatted\"\n unchanged = \"would be left unchanged\"\n failed = \"would fail to reformat\"\n cleared = \"would be cleared\"\n else:\n reformatted = \"reformatted\"\n unchanged = \"left unchanged\"\n failed = \"failed to reformat\"\n cleared = \"cleared\"\n report = []\n if self.change_count:\n s = \"s\" if self.change_count > 1 else \"\"\n report.append(\n click.style(\n f\"{self.change_count} cell{s} {reformatted}\", bold=True\n )\n )\n if self.same_count:\n s = \"s\" if self.same_count > 1 else \"\"\n report.append(f\"{self.same_count} cell{s} {unchanged}\")\n if self.failure_count:\n s = \"s\" if self.failure_count > 1 else \"\"\n report.append(\n click.style(f\"{self.failure_count} cell{s} {failed}\", fg=\"red\")\n )\n if self.output_change_count:\n s = \"s\" if self.change_count > 1 else \"\"\n report.append(\n click.style(\n f\"{self.output_change_count} output{s} {cleared}\",\n bold=True,\n )\n )\n if self.output_same_count:\n s = \"s\" if self.same_count > 1 else \"\"\n report.append(f\"{self.output_same_count} output{s} {unchanged}\")\n return \", \".join(report) + \".\"", "def pytest_assertrepr_compare(op: str, left: Any, right: Any) -> List[str]: # noqa: U100\n output = [\"Compare Result:\"]\n\n for line in list(dictdiffer.diff(left, right)):\n output.extend(pp.pformat(line).split(\"\\n\"))\n\n return output", "def __str__(self):\n return \"{ %s }1\" % str(self.__rule)", "def construct_string(first, data):\n print_string = \"Compare A: \"\n if not first:\n print_string = \"Against B: \"\n print_string += f\"{data['name']}, a {data['description']}, from {data['country']}\"\n print(print_string)", "def __str__(self):\n return \"{ %s }\" % str(self.__rule)", "def __str__(self):\n return self.team_a + \" vs \" + self.team_b", "def compare(self, other_sequence):\n response = []\n if self.name != other_sequence.name:\n response.append('DIFF: Sequence names: %s' % self.name)\n response.append('and %s' % other_sequence.name)\n if self.increment_by != other_sequence.increment_by:\n response.append('DIFF: Increment interval')\n response.append('is %d,' % self.increment_by)\n response.append('for %s' % other_sequence.name)\n response.append('it is %d' % other_sequence.increment_by)\n if self.min_value != other_sequence.min_value:\n response.append('DIFF: Min value is %d' % self.min_value)\n response.append(' for %s' % other_sequence.name)\n response.append('it is %d' % other_sequence.min_value)\n if self.max_value != other_sequence.max_value:\n response.append('DIFF: Max value is %d' % self.max_value)\n response.append(', for %s ' % other_sequence.name)\n response.append('it is %d' % other_sequence.max_value)\n # The only attribute we don't check is currval, becuase it will be \n # different in 999 cases out of a 1000\n return response", "def comparison(self):\n return self._comparison", "def __str__(self):\n result = \"\"\n if self.operator is not None:\n result = \"Operator: \" + self.operator + \"\\n\"\n result += self.dictkey() + \"\\n\"\n return result", "def _join_equality(row_dict):\n equalities = []\n for key, val in row_dict.items():\n temp = '{}='.format(key)\n if isinstance(val, (int, float)):\n temp += '{}'.format(val)\n elif isinstance(val, str):\n temp += '\\'{}\\''.format(val)\n else:\n raise TypeError('Value %s, type %s not recognised as a number or string' % (val, type(val)))\n equalities.append(temp)\n return ', '.join(equalities)", "def __str__(self):\n return \"Converter(rings=%s, branches=%s)\" % (\n [\"False\", \"True\"][self.rings],\n [\"False\", \"True\"][self.branches])", "def __str__(self):\n out = \"!!!!!!! REPORTED STATISTICS !!!!!!!\\n\"\n for k in self.order:\n if k in self.keys():\n if k in self.explainer.keys():\n out += self.explainer[k].replace('XXXX', str(\n self[k])) + \"\\n\"\n else:\n out += self[k] + \"\\n\"\n for k in self.keys():\n if k not in self.order:\n out += str(self[k])\n return out", "def __str__(self):\n return \"(%s)\" % ' '.join(map(str, self.__subrules))", "def text(self) -> str:\n text = []\n\n if self.min is not None:\n text.append(str(self.min))\n if self.include_min:\n text.append(\"<=\")\n else:\n text.append(\"<\")\n\n if self.min is not None or self.max is not None:\n text.append(\"value\")\n\n if self.max is not None:\n if self.include_max:\n text.append(\"<=\")\n else:\n text.append(\"<\")\n text.append(str(self.max))\n\n if self.step is not None:\n if self.min is not None or self.max is not None:\n text.append(\"and\")\n text.extend([\"value %\", str(self.step), \"==\"])\n if self.min is None:\n text.append(\"0\")\n else:\n text.append(str(self.min % self.step))\n\n return \" \".join(text)", "def format_condition(condition: str) -> str:\r\n for key, value in CONDITIONS_MAP.items():\r\n if condition in value:\r\n return key\r\n return condition", "def logic_program_form(self):\r\n s = ''\r\n for x in self.new_sorts:\r\n s = s + 'dom(' + x + ').\\nis_a(' + x + ', nodes).\\n' \r\n for y in self.supersorts:\r\n s = s + 'link(' + x + ', ' + y + ').\\n'\r\n return s", "def _get_problem_report_results_str(self):\n return 'curr_rew: %0.3f, best_rew: %0.3f'%(self.curr_reward, self.curr_best_reward)", "def __str_metric_expression(self,metricExpression):\n if metricExpression['type'] == \"LEAF_METRIC_EXPRESSION\":\n return metricExpression['metricDefinition']['logicalMetricName'].lower()\n else: #metricExpression['type'] == \"BOOLEAN_METRIC_EXPRESSION\"\n return __str_metric_expression(metricExpression['expression1']) + \" \" + metricExpression['operator']['type'] + \" \" + \\\n __str_metric_expression(metricExpression['expression2'])", "def __str__(self):\n\n sign = ''\n if self.get_b() > 0:\n sign = '+'\n\n elif self.get_a() == self.get_b() or self.get_b() == 0:\n return f'z = {self.get_a()}i'\n\n elif self.get_a() == 0:\n return f'z = {self.get_b()}i'\n\n return f'z = {self.get_a()}{sign}{self.get_b()}i'", "def __str__(self):\n sorted_table = InferenceUtils.get_n_best(self._table, max(len(self._table), 1))\n\n result = []\n for key, value in sorted_table.items():\n result.append('P(%s):=%f\\n' % (str(key), value))\n\n return ''.join(result)[:-1] if len(result) > 0 else ''", "def compare(self, other: 'SInt') -> str:\r\n if self.signe == other.signe :\r\n return super().compare(other)\r\n else:\r\n return other.signe + self.signe", "def compare_strings(s1, s2):\n print(s1)\n for idx, c in enumerate(s2):\n if s1[idx] == c:\n cprint(c, fg='c', end='')\n else:\n cprint(c, bg='r', style='b', end='')\n print()", "def __str__(self):\n self.vals.sort()\n result = ''\n for e in self.vals:\n result = result + str(e) + ','\n return '{' + result[:-1] + '}'", "def changes_as_string(changed_attrs, transitions):\n changes = []\n if not changed_attrs:\n return ''\n for attr, change in changed_attrs.items():\n for transition_description, possible_transitions in transitions[attr].items():\n for transition in possible_transitions:\n if transition == change:\n changes.append(transition_description)\n if len(changes) > 2:\n return '{0} and {1}'.format(', '.join(changes[:-1]), changes[-1])\n else:\n return ' and '.join(changes)", "def replace_equals_with_dash2():", "def __str__(self):\n return \"(%s)\" % ' | '.join(map(str, self.__subrules))", "def __str__(self):\r\n return '[t1=%.1fs, t2=%.1fs, tmax=%.1fs, Lmax=%.1fs]' % (self.t1, self.t2, self.tmax, self.Lmax)", "def compare(self, other_code_object):\n response = []\n if self.name != other_code_object.name:\n response.append('DIFF: Code object names: %s' % self.name)\n response.append('and %s' % other_code_object.name)\n if self.object_type != other_code_object.object_type:\n response.append('DIFF: Code object types: %s' % self.object_type)\n response.append('and %s' % other_code_object.object_type)\n return response", "def __str__(self):\r\n # for values that should be recorded exactly e.g. iteration number\r\n if self.count == 0:\r\n return str(self.val)\r\n # for stats\r\n return '%.5f (%.5f)' % (self.val, self.avg)", "def __str__(self):\n self.vals.sort()\n return '{' + ','.join([str(e) for e in self.vals]) + '}'", "def test_operator_rendering(self):\r\n self.assertEqual(\"=\", unicode(EqualsOperator()))\r\n self.assertEqual(\"IN\", unicode(InOperator()))\r\n self.assertEqual(\">\", unicode(GreaterThanOperator()))\r\n self.assertEqual(\">=\", unicode(GreaterThanOrEqualOperator()))\r\n self.assertEqual(\"<\", unicode(LessThanOperator()))\r\n self.assertEqual(\"<=\", unicode(LessThanOrEqualOperator()))", "def __str__(self) -> str:\n return F\"<{self.priority}, {self.value}>\"", "def __str__(self):\n # for values that should be recorded exactly e.g. iteration number\n if self.count == 0:\n return str(self.val)\n # for stats\n return '%.4f (%.4f)' % (self.val, self.avg)", "def test_operator_rendering(self):\n self.assertEqual(\"=\", six.text_type(EqualsOperator()))\n self.assertEqual(\"IN\", six.text_type(InOperator()))\n self.assertEqual(\">\", six.text_type(GreaterThanOperator()))\n self.assertEqual(\">=\", six.text_type(GreaterThanOrEqualOperator()))\n self.assertEqual(\"<\", six.text_type(LessThanOperator()))\n self.assertEqual(\"<=\", six.text_type(LessThanOrEqualOperator()))", "def __lt__(self, other):\n if (self.name < other.name):\n return \"Less Than\"\n else:\n return \"Not less than\"", "def __str__(self):\r\n self.vals.sort()\r\n return '{' + ','.join([str(e) for e in self.vals]) + '}'", "def __str__(self):\n return \"{0} {1}\".format(self.operation, \", \".join([v for v in [self.operand0, self.operand1, self.operand2] if v is not None]))", "def __str__(self):\n\n ret = ''\n for rule in self.rules:\n ret += str(rule) + '\\n'\n ret += 'IF TRUE THEN {0}'.format(self.default)\n\n return ret", "def diff_report(self) -> str:\n graph_a = self.graph_a\n graph_b = self.graph_b\n\n graph_a_str = str(graph_a)\n graph_b_str = str(graph_b)\n\n if graph_a_str == graph_b_str:\n return \"\"\n\n graph_diff = difflib.ndiff(\n graph_a_str.splitlines(True), graph_b_str.splitlines(True)\n )\n graph_diff_report = [\"Graph diff:\", self._indent(\"\".join(graph_diff))]\n\n for node_a, node_b in itertools.zip_longest(graph_a.nodes(), graph_b.nodes()):\n if str(node_a) != str(node_b):\n graph_diff_report.append(\"First diverging operator:\")\n node_diff = difflib.ndiff(\n str(node_a).splitlines(True), str(node_b).splitlines(True)\n )\n source_printout = [\"node diff:\", self._indent(\"\".join(node_diff))]\n\n stack_a = node_a.sourceRange() if node_a else None\n if stack_a:\n source_printout.extend(\n [\"Former source location:\", self._indent(str(stack_a))]\n )\n stack_b = node_b.sourceRange() if node_b else None\n if stack_b:\n source_printout.extend(\n [\"Latter source location:\", self._indent(str(stack_b))]\n )\n\n graph_diff_report.extend(source_printout)\n\n break\n\n return \"\\n\".join(graph_diff_report)", "def formatted(self) -> str:\r\n ...", "def compare(self, other_table):\n response = []\n if self.name != other_table.name:\n response.append('DIFF: Table names: %s and %s' % (self.name, other_table.name))\n if self.tablespace_name != other_table.tablespace_name:\n response.append('DIFF: Tablespace names: %s and %s' % (self.tablespace_name, other_table.tablespace_name))\n # Compare columns\n for column_name in list(self.columns.keys()):\n if column_name in list(other_table.columns.keys()):\n if self.columns[column_name] != other_table.columns[column_name]:\n response.append('DIFF: Definition of %s is different' % column_name)\n else:\n response.append('DIFF: Column %s not in %s' % (column_name, other_table.name))\n for column_name in list(other_table.columns.keys()):\n if column_name not in list(self.columns.keys()):\n response.append('DIFF: Column %s not in %s' % (column_name, self.name))\n return \"\\n\".join(response)", "def PrintDiffs(message, lhs, rhs):\n dif = set(lhs).difference(rhs)\n if dif:\n print message, ', '.join(dif)", "def replace_equals_with_dash():", "def compare_values(source_player, target_player, getter):\n source_value = getter(source_player)\n target_value = getter(target_player)\n\n def _compare(source_value, target_value):\n if source_value > target_value:\n return '▲'\n elif source_value < target_value:\n return '▼'\n else:\n return '='\n\n return _compare(source_value, target_value) + ' ' + _compare(target_value, source_value)", "def getStringForAndDifferential(self, a, b, c):\n command = \"(({0} & {2}) | ({1} & {2}) | (~{2}))\".format(a,b,c)\n return command", "def __str__(self):\n return \"[ %s ]\" % str(self.__rule)", "def __str__(self):\n ret = \"{address}\\t{name}\\t{otherinfo}\".format(\n address = self.email_address,\n name = self.name,\n otherinfo = self.otherinfo\n )\n if self.extrainfo:\n ret = ret + \"\\t\" + self.extrainfo\n if self.misc:\n ret = ret + self.format_misc()\n return ret", "def __str__(self):\n if self.flaky:\n fmt = 'flaky | '\n else:\n fmt = ''\n fmt += '{2}: {0}'\n if self.variant:\n fmt += ' {1}'\n return fmt.format(*self)", "def __str__(self):\n # for values that should be recorded exactly e.g. iteration number\n if self.count == 0:\n return str(self.val)\n # for stats\n return '%.4f (%.4f)' % (self.val, self.avg)", "def __str__(self) -> str:\n return '[Q]: {} || [A]: {} || [{}]'.format(self.tell, self.answer, self.created.isoformat())", "def to_formatted(self) -> str:\n return str(self.google_confidence) + \\\n \"\\t\" + str(self.normalized_sentence_score) + \\\n \"\\t\" + str(self.gaps_transcript) + \\\n \"\\t\" + str(self.gaps_google)", "def MergeLogic(self) -> str:", "def comparison_outfile(self):\n\n return f\"{self.name}.compare.out\"", "def __str__(self):\n L = []\n for s,e in self.normalized():\n if s == e:\n L.append(str(s))\n else:\n L.append(str(s) + \"-\" + str(e))\n return \",\".join(L)", "def __str__ (self):\n return f'\"{self.value[0]}|{self.value[1]}\"'", "def str_attr(self):\n return str(self.operator)", "def __str__(self):\n return '%s | %s' % (self.name_one, self.name_two)", "def _format_tracking(local_branch, remote_branch,\n left, right):\n if (left,right) == (0,0):\n return \"Your tracking branch and remote branches are up to date.\"\n elif left == 0:\n return (\"The remote branch %s is %d revisions ahead of tracking branch %s.\" %\n (remote_branch, right, local_branch))\n elif right == 0:\n return (\"Your tracking branch %s is %s revisions ahead of remote branch %s.\" %\n (local_branch, left, remote_branch))\n else:\n return ((\"Your local branch %s and remote branch %s have diverged by \" +\n \"%d and %d revisions.\") %\n (local_branch, remote_branch, left, right))", "def __repr__(self):\n s = self.name\n if self.param != \"None\":\n s += ' with parameter '+self.param\n s += '; '+self.applyTo\n if self.applyTo != \"global\":\n s += ': '+self.conditions\n return s", "def _create_formatted_string(self):\n string = NALSyntax.StatementSyntax.Start.value + \\\n self.get_subject_term().get_formatted_string()\n\n string += \" \" + self.get_copula_string() + \" \"\n\n string += self.get_predicate_term().get_formatted_string() + \\\n NALSyntax.StatementSyntax.End.value\n\n return string", "def __str__(self):\n msg = [\n f'{self.model=}',\n f'{self.field=}',\n f'{self.fxx=}',\n f'{self.date=}',\n f'{self.priority=}',\n ]\n return '\\n'.join(msg)", "def __str__(self):\n return super().formatter(\"{old}, {prefix}/{new}\")", "def __str__(self):\n return '%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s ' % (self.Month,\n self.Production,self.Hygienist_Production,\n self.Collections_Percentage,self.Overthe_Counter,\n self.AR_31_60,self.AR_61_90,self.AR_Over_90,\n self.AR_Ins_31_60,self.AR_Ins_61_90,self.AR_Ins_Over_90,\n self.New_Patients,self.Total_Patients_Seen,\n self.Broken_Apointments,self.Broken_Appt_Pct,\n self.Hygiene_Pct)", "def __str__(self):\n columns = list(self.metrics.keys())\n columns.sort()\n out = '%s\\n' % ','.join(columns)\n values = [str(self.metrics[c]) for c in columns]\n out += '%s\\n' % ','.join(values)\n return out", "def __str__(self):\n status = (\"\\na: %.2f \\n\" % self.a +\n \"e: %.2f \\n\" % self.e +\n \"inc: %.2f deg \\n\" % (self.inc * 180/math.pi) +\n \"om: %.2f deg \\n\" % (self.om * 180/math.pi) +\n \"Om: %.2f deg \\n\" % (self.Om * 180/math.pi) +\n \"H: %.2f \\n\" % self.H\n )\n return status" ]
[ "0.7065585", "0.6774839", "0.66851884", "0.66676676", "0.6445696", "0.64411914", "0.64020616", "0.63991475", "0.6312643", "0.6112813", "0.6112813", "0.610843", "0.6023886", "0.6010248", "0.59334326", "0.5931873", "0.59163105", "0.5899037", "0.5899037", "0.5896669", "0.5889875", "0.58821243", "0.58778167", "0.5857649", "0.58547544", "0.58478045", "0.5822179", "0.58094436", "0.58093524", "0.5797635", "0.57788575", "0.576928", "0.57676464", "0.5763277", "0.5755225", "0.5743587", "0.5736362", "0.57055694", "0.56802535", "0.5668477", "0.56633884", "0.5644682", "0.5637629", "0.5629481", "0.5627716", "0.56240064", "0.5621177", "0.5608891", "0.56061965", "0.55935663", "0.5591559", "0.5586865", "0.5586061", "0.5585416", "0.5573564", "0.5572957", "0.55725974", "0.55707335", "0.5565556", "0.5564286", "0.55627567", "0.55547124", "0.55349845", "0.5524855", "0.5524303", "0.5517846", "0.5515155", "0.55032873", "0.5494171", "0.54915327", "0.54836255", "0.5478335", "0.54752105", "0.54719", "0.54647917", "0.5459384", "0.54579747", "0.54579705", "0.54535896", "0.5447118", "0.5445929", "0.5435901", "0.5429652", "0.5429495", "0.5426508", "0.54208195", "0.54179984", "0.5404972", "0.54038215", "0.5402834", "0.53831965", "0.5381411", "0.53810465", "0.5380788", "0.5376179", "0.5368158", "0.5366869", "0.5364913", "0.53621614", "0.535731" ]
0.69769394
1
Catches a difference when one or both of the objects are None (since it is handled the same across methods)
def none_comparison(func): @functools.wraps(func) def inner(obj1,obj2): if obj1 is not None and obj2 is not None: return func(obj1, obj2) if obj1 is None and obj2 is None: return [] if obj1 is not None and obj2 is None: return Difference(f"Second {obj1.__class__.__name__} is None",(obj1,None)) return Difference(f"First {obj2.__class__.__name__} is None",(None,obj2)) return inner
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_none(self) -> PossibleResult[T]:\n if self.constructor == type(None):\n if not self.obj is None:\n raise DeserializeError(\n type(None), self.obj, self.new_depth, self.key\n )\n return self.obj # type: ignore\n return NO_RESULT", "def compare_with_none():\n value = {};\n if value is not None:\n print(\"value is not none\")\n else:\n print(\"value is none\")", "def assert_is_not_none(self, obj):\n if obj is None:\n raise AssertionError('unexpectedly None')", "def is_none(obj):\n return obj is None", "def __eq__(self, other: Any) -> bool:\n return isinstance(other, Nothing)", "def _merge_sanity_check(self, other):\n if self._fields is not None and (\n set(self.query.values_select) != set(other.query.values_select)\n or set(self.query.extra_select) != set(other.query.extra_select)\n or set(self.query.annotation_select) != set(other.query.annotation_select)\n ):\n raise TypeError(\n \"Merging '%s' classes must involve the same values in each case.\"\n % self.__class__.__name__\n )", "def test_none(self):\n esnA = ESN(N_in,N_out,random_state=None)\n esnB = ESN(N_in,N_out,random_state=None)\n self._compare(esnA,esnB,should_be=\"different\")", "def test_none(self):\n base1 = Base(None)\n base2 = Base(None)\n base3 = Base(None)\n self.assertEqual(base1.id, base3.id - 2)", "def test_do_check_event_type(self):\n self.assertEqual(self.a.get_type(), None)\n self.assertEqual(self.b.get_type(), None)\n self.assertTrue(self.a.do_check_event_type(self.a))\n\n self.a = +self.a\n self.assertFalse(self.a.do_check_event_type(self.b))", "def assert_type_or_none(obj, classes):\n if obj is not None:\n assert_type(obj, classes)", "def is_not_none(e):\n return e is not None", "def assert_is_none(self, obj):\n if obj is not None:\n raise AssertionError('%s is not None' % (str(obj),))", "def get_none1(self):\n pass", "def __ne__(self, other):\n if other != None:\n return self != other\n else:\n return True", "def __ne__(self, other):\n if other != None:\n return self != other\n else:\n return True", "def none(self):", "def testNoneAssignment(self):\n class MyMessage(messages.Message):\n\n my_field = messages.StringField(1)\n\n m1 = MyMessage()\n m2 = MyMessage()\n m2.my_field = None\n self.assertEquals(m1, m2)", "def assertIsNotNone(self, obj, msg=None):\r\n if obj is None:\r\n standardMsg = 'unexpectedly None'\r\n self.fail(self._formatMessage(msg, standardMsg))", "def interferes(self, other):\n return True", "def return_none() -> None:\n pass", "def test_no_rhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = object()\n rhs = None\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(lhs, result)", "def _check_union(self) -> PossibleResult[T]:\n if _is_union(self.constructor):\n args = get_args(self.constructor)\n is_optional = len(args) == 2 and type(None) in args\n is_optional_property = len(args) == 2 and Undefined in args\n if is_optional and self.obj is None:\n return None # type: ignore\n if is_optional_property and self.obj is UNDEFINED:\n return UNDEFINED # type: ignore\n for argument in args:\n convert_primitives = self.convert_primitives and (\n (is_optional and argument != type(None))\n or (is_optional_property and argument != Undefined)\n )\n try:\n return Deserialize(\n obj=self.obj,\n constructor=argument,\n depth=self.new_depth,\n convert_primitives=convert_primitives,\n ).run()\n except DeserializeError:\n pass\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n return NO_RESULT", "def __ne__(self, other):\r\n\t\treturn (self.type != other.type or self.value != other.value)", "def test_product_nullables(self):\n self.assertIsNone(self.product3.main_image)\n self.assertIsNone(self.product3.protein)\n self.assertIsNone(self.product3.fat)\n self.assertIsNone(self.product3.carbs)\n self.assertIsNone(self.product3.calories)", "def nulltest():", "def test_equal_on_not_equal_value(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_b, enums.OpaqueDataType.NONE)\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def __neq__(self, other): \n return not self == other", "def assertIsNone(self, obj, msg=None):\r\n if obj is not None:\r\n standardMsg = '%s is not None' % (safe_repr(obj),)\r\n self.fail(self._formatMessage(msg, standardMsg))", "def is_equal(o1: object, o2: object) -> bool:\n if o1 is None and o2 is None:\n return True\n if o1 is None:\n return False\n return o1 == o2", "def test_none_input(self):\n eq_(None, output())", "def handle_other(self):\n pass", "def test_not_equal_on_not_equal_value(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_b, enums.OpaqueDataType.NONE)\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "def test_not_equal_on_type_mismatch(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = \"invalid\"\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "def raise_or_return(obj, exception, msg):\n if obj is None:\n raise exception(msg)\n return obj", "def _check_other(self, other):\n # pylint: disable=protected-access\n if not isinstance(other, Spectrum):\n raise RuntimeError(\n \"Tried to binop Spectrum and %s\" % (type(other))\n )\n\n if (self._pot is None) != (other._pot is None):\n raise RuntimeError(\n \"Tried to binop Spectra with nonconformant POT\"\n )\n\n if (self._lt is None) != (other._lt is None):\n raise RuntimeError(\n \"Tried to binop Spectra with nonconformant Livetimes\"\n )", "def __ne__(self: _TT, other: object) -> bool:\n return self.ne(other) # type: ignore", "def test_differents(self):\r\n p1 = Place()\r\n p2 = Place()\r\n self.assertNotEqual(p1.id, p2.id)\r\n self.assertNotEqual(p1.updated_at, p2.updated_at)\r\n self.assertNotEqual(p1.created_at, p2.created_at)", "def test_not_equal_on_equal(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def consistent(self):\n if self.var1.get_value() is None or self.var2.get_value() is None:\n return True\n\n return self.var1.value != self.var2.value", "def isnone(cls, lhs, rhs):\n if rhs:\n return lhs is None\n else:\n return lhs is not None", "def test_check_null_weight_with_none() -> None:\n sw_out, X_out, y_out = check_null_weight(None, X_toy, y_toy)\n assert sw_out is None\n np.testing.assert_almost_equal(X_out, X_toy)\n np.testing.assert_almost_equal(y_out, y_toy)", "def __ne__(self, obj):\r\n return assert_(self.obj != obj, '%r == %r' % (self.obj, obj))", "def _validate_type_not_null(self, name, obj, *args):\n for arg in args:\n if isinstance(obj, arg):\n return\n raise TypeError(self.__class__.__name__ + '.' + name + ' is of type ' + type(obj).__name__ +\n '. Must be one of the following types: ' + str(args))", "def _nonetypeclass(*args, **kwargs):\n return None", "def __ne__(self, other):\n if not isinstance(other, OneOfFluidResultControlsFieldCalculations):\n return True\n\n return self.to_dict() != other.to_dict()", "def is_null(self) -> bool:\n return self.allele1 == -1 and self.allele2 == -1", "def exceptNull(func): # -> (*args: Unknown, **kwargs: Unknown) -> Unknown:\n ...", "def test_equal_on_type_mismatch(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = \"invalid\"\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def test_patch_none():", "def visit_none_type(self, left: NoneType) -> T:", "def test_no_lhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = None\n rhs = object()\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(rhs, result)", "def test_not_equal_on_equal_and_empty(self):\n a = Digest()\n b = Digest()\n\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def test_none(self):\n self.assertEqual(self.obj.to_json_string(None), '[]')", "def __ne__(self, Other):\n return self.date != Other.date or self.time_of_day != Other.time_of_day", "def compare_exceptions(exc1, exc2):\n return exc1 == exc2 or exc1.__class__ == exc2.__class__ and exc1.args == exc2.args", "def test_allow_multiples(self):\r\n o1 = self.b1.get(self.key)\r\n o2 = self.b2.get(self.key)\r\n\r\n o1.set_data(\"object-1\")\r\n o1.store()\r\n o2.set_data(\"object-2\")\r\n o2.store()\r\n\r\n conflicted = self.b1.get(self.key)\r\n siblings = filter(bool, (s.get_data() for s in conflicted.get_siblings()))\r\n self.assertEqual(len(siblings), 2)", "def assert_is_not(self, first, second, msg=None):\r\n assert first is not second", "def test_missing_null_value():\n # When add a Missing Block\n t = Missing(\"foo\", null_value=True)\n\n # Then I see the appropriate JSON\n results = {\n \"missing\": {\n \"field\": \"foo\",\n \"null_value\": True\n }\n }\n\n homogeneous(t, results)", "def test_is_compatible_with_nulls(self):\n\n i = Interface('/foo[0:3]')\n i['/foo[0:2]', 'interface', 'io'] = [0, 'out']\n i['/foo[2]', 'interface'] = 0\n j = Interface('/foo[0:3]')\n j['/foo[0:2]', 'interface', 'io'] = [1, 'in']\n j['/foo[2]', 'interface'] = 1\n assert i.is_compatible(0, j, 1)", "def test_multiple_with_none(self):\n with self.assertRaises(Exception) as context:\n Multiple.check_number(None)\n\n self.assertTrue('Invalid parameter value: ' in str(context.exception))", "def test_field_none_nullable(self):\n node_dict = {\n 'host_name': 'abc'\n }\n try:\n Node(**node_dict)\n except Exception as e:\n self.assertEqual(type(e), ValueError)", "def compare_fields(field1, field2):\r\n if field1 is None and field2 is None:\r\n return True\r\n\r\n if (field1 is None and field2 is not None) or\\\r\n (field2 is None and field1 is not None):\r\n return False\r\n\r\n if field1 == field2:\r\n return True\r\n\r\n return False", "def fix(self) -> Union[tuple, None]:\n raise NotImplementedError", "def __or__(self, other):\n if isinstance(other, Segment):\n return self.crossSegment(other)\n elif isinstance(other, Line):\n return self.crossLine(other)\n elif isinstance(other, HalfLine):\n return other.crossSegment(self)\n elif isinstance(other, Form):\n return other.crossSegment(self)\n else:\n raise TypeError(\"The collisions {}|{} are not dealt with.\".format(type(self), type(other)))", "def test_not_equal_on_not_equal_data_type(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b.opaque_type = \"invalid\"\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "def test_rightHandArgumentImplementsUnequality(self):\n self.assertFalse(Record(1, 2) != EqualToEverything())\n self.assertTrue(Record(1, 2) != EqualToNothing())", "def test_none(self):\n self.assertEqual(b\"\", self.successResultOf(to_xml(None)))", "def _check_undefined(self) -> PossibleResult[T]:\n if self.constructor == Undefined:\n if not self.obj is UNDEFINED:\n raise DeserializeError(\n Undefined, self.obj, self.new_depth, self.key\n )\n return self.obj # type: ignore\n return NO_RESULT", "def test_notification_ne(self) -> None:\n self.assertTrue(self.notification1 != self.notification2)\n\n # pylint: disable=unnecessary-dunder-call\n self.assertTrue(self.notification1.__ne__(self.notification2))", "def arguments_not_none(func):\n def wrapper(*args, **kwargs):\n for arg in args:\n if arg is None:\n raise NullArgument()\n for arg, val in kwargs.iteritems():\n if val is None:\n raise NullArgument()\n try:\n return func(*args, **kwargs)\n except TypeError as ex:\n if 'takes exactly' in ex.args[0]:\n raise NullArgument('Wrong number of arguments provided: ' + str(ex.args[0]))\n else:\n raise\n\n return wrapper", "def _deserialize_null(self, *args):\n return None", "def test_equality_check_against_other_object_doesnt_raise_exception(self):\n test_object = Vec3(1, 2, 3)\n self.assertFalse(test_object == Quat(1, 2, 3, 4))\n self.assertFalse(Quat(1, 2, 3, 4) == test_object)\n self.assertTrue(test_object != Quat(1, 2, 3, 4))\n self.assertTrue(Quat(1, 2, 3, 4) != test_object)", "def __ne__(self, other):\r\n return self._real != other.real or self._imag != other.imag", "def __ne__(self, other):\r\n if isinstance(other, vec4):\r\n return self.x!=other.x or self.y!=other.y or self.z!=other.z\r\n else:\r\n return 1", "def test_log_ne(self) -> None:\n self.assertTrue(self.log1 != self.log2)\n\n # pylint: disable=unnecessary-dunder-call\n self.assertTrue(self.log1.__ne__(self.log2))", "def test_equal_on_not_equal_data_type(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b.opaque_type = \"invalid\"\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def __ne__(self, other):\n pass", "def not_none(value):\n return not value is None", "def _default_checker(x, y):\r\n if x[0] != y[0]:\r\n raise Exception(\"Output mismatch.\",\r\n {'performlinker': x[0], 'clinker': y[0]})", "def compare_fields(field1, field2):\n if field1 is None and field2 is None:\n return True\n\n if (field1 is None and field2 is not None) or\\\n (field2 is None and field1 is not None):\n return False\n\n if field1 == field2:\n return True\n\n return False", "def __ne__(self, other):\n return not isinstance(other, self.__class__)", "def test_get_other_typeerror(self):\n v = versions.Version(name='foo', version='1.2.3')\n self.assertRaises(TypeError, v._get_other, 3.4)", "def is_null(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_null)", "def assert_is_not_none(self, expr, msg=None):\r\n assert expr is not None", "def test_untyped(self):\n conn = self.connect()\n c = conn.cursor()\n c.execute(\"select null,''\")\n self.assertEqual((None, \"\"), c.fetchone())\n c.execute(\"select '',null\")\n self.assertEqual((\"\", None), c.fetchone())", "def test_retNone1(self):\r\n class retNone(gof.op.Op):\r\n def make_node(self):\r\n inputs = [theano.tensor.vector()]\r\n outputs = [theano.tensor.vector()]\r\n return gof.Apply(self, inputs, outputs)\r\n\r\n def grad(self, inp, grads):\r\n x, = inp\r\n gz, = grads\r\n pass\r\n a = retNone().make_node()\r\n self.assertRaises(TypeError, grad_sources_inputs, [(a.out, one)], None)", "def _check_notnull(self):\n candnull = self.df_test_resampled[self.candidate_col_name].isnull().all()\n refnull = self.df_test_resampled[self.reference_col_name].isnull().all()\n if candnull or refnull:\n return 1, 'No data for selected time frame'\n else:\n return 0, 'No error occurred'", "def _isnull_old(obj):\n if is_scalar(obj):\n return lib.checknull_old(obj)\n # hack (for now) because MI registers as ndarray\n elif isinstance(obj, ABCMultiIndex):\n raise NotImplementedError(\"isnull is not defined for MultiIndex\")\n elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):\n return _isnull_ndarraylike_old(obj)\n elif isinstance(obj, ABCGeneric):\n return obj._constructor(obj._data.isnull(func=_isnull_old))\n elif isinstance(obj, list) or hasattr(obj, '__array__'):\n return _isnull_ndarraylike_old(np.asarray(obj))\n else:\n return obj is None", "def assert_not_equal(self, first, second, msg=\"\"):\r\n assert first != second", "def __ne__(self, other):\n if type(self) != type(other):\n return True\n s_vars = vars(self)\n o_vars = vars(other)\n for v in vars(self):\n if s_vars[v] != o_vars[v]:\n return True\n return False", "def _null_min(a, b):\n if a is None:\n return b\n if b is None:\n return a\n return min(a, b)", "def test_equal_on_equal_and_empty(self):\n a = Digest()\n b = Digest()\n\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def test_get_other_typeerror_2(self):\n v = versions.Version(name='foo', version='1.2.3')\n self.assertRaises(TypeError, v._get_other, '1')", "def not_none(item, alt=None):\r\n\r\n return item if item is not None else alt", "def test_is_compatible_with_nulls_types(self):\n\n i = Interface('/foo[0:3]')\n i['/foo[0:2]'] = [0, 'out', 'gpot']\n i['/foo[2]', 'interface'] = 0\n j = Interface('/foo[0:3]')\n j['/foo[0:2]'] = [1, 'in', 'gpot']\n j['/foo[2]', 'interface'] = 1\n assert i.is_compatible(0, j, 1)", "def test_door_no_data(self):\n door = Door({})\n\n assert door.warning is None\n assert door.closed is None\n assert door.locked is None", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def _is_no_result(obj: Any) -> bool:\n return obj is NO_RESULT", "def NOR(*objects):\n return NegationEvent(ObjectOrSeen(*objects))", "def assert_is_none(self, expr, msg=None):\r\n assert expr is None" ]
[ "0.6248695", "0.5997152", "0.59885675", "0.59798694", "0.5903614", "0.5886935", "0.58787954", "0.5814186", "0.58113503", "0.57685584", "0.5712491", "0.56863844", "0.56762654", "0.56702787", "0.56702787", "0.5659557", "0.563092", "0.5625755", "0.5596324", "0.55960506", "0.5590203", "0.5568005", "0.55187255", "0.55032694", "0.54724413", "0.54694337", "0.5465812", "0.5464359", "0.54436016", "0.54423535", "0.543887", "0.5434026", "0.54011047", "0.5392194", "0.5378286", "0.5364224", "0.5359366", "0.5356787", "0.535633", "0.53536946", "0.5352484", "0.5337288", "0.53279185", "0.5327219", "0.5326746", "0.53107744", "0.5291324", "0.5291024", "0.5276469", "0.5261242", "0.5251885", "0.5238783", "0.52380604", "0.5227368", "0.5224004", "0.5223755", "0.52189565", "0.5214178", "0.5213324", "0.51998216", "0.5197122", "0.51922405", "0.51875657", "0.51840603", "0.51737654", "0.51727504", "0.5169323", "0.5167378", "0.5162962", "0.5162151", "0.5158468", "0.51481485", "0.51443", "0.51435167", "0.513837", "0.5130749", "0.51271033", "0.5124029", "0.51217955", "0.5120909", "0.5119071", "0.51186275", "0.51154435", "0.51145184", "0.5097966", "0.50958705", "0.509418", "0.5082005", "0.50794804", "0.50752854", "0.5073434", "0.50729835", "0.5071553", "0.50711644", "0.50686073", "0.5058053", "0.50548226", "0.50538194", "0.5053622", "0.5041586" ]
0.7289079
0
Compares Attributes between 2 objects via getattr, returning the attribute values as a tuple if they do not match
def attr_comparison(obj1,obj2,attrs): return [Difference(f"{obj1.__class__.__name__}.{attr}",(result1,result2)) for attr in attrs if (result1 := getattr(obj1,attr)) != (result2 := getattr(obj2,attr))]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def attrs_to_tuple(obj):\n return tuple(getattr(obj, a) for a in attrs)", "def compare(current_formation):\n\n attribute_tuple = ()\n for attr in attributes:\n\n if attr in current_formation:\n attribute_tuple += (current_formation[attr],)\n elif attr in ['position_all']:\n position_list = list(current_formation['positions'].keys())\n attribute_tuple += (position_list,)\n else:\n print \"Invalid Attribute: %s\" % attr\n\n return attribute_tuple", "def _compare_attributes(self, first: Node, second: Node) -> bool:\n # If opsets of nodes are different, then nodes have different attributes.\n fst_opset = first.get_opset()\n snd_opset = second.get_opset()\n if fst_opset != snd_opset:\n return False\n\n if fst_opset not in ['opset1', 'opset4']:\n fst_name = first.soft_get('name', first.id)\n snd_name = second.soft_get('name', second.id)\n raise Error('Unsupported opset {} for nodes with names {} and {}'.format(fst_opset, fst_name, snd_name))\n\n if fst_opset == 'opset1':\n return self._compare_attributes_of_interpolate1(first, second)\n else:\n return self._compare_attributes_of_interpolate4(first, second)", "def compare(a, b, attrs, f):\n for attr in attrs:\n if not f(getattr(a, attr), getattr(b, attr)):\n return False\n return True", "def _attributes(self, ext1, ext2):\n errorlist = []\n for attr in ['data', 'mask', 'variance', 'OBJMASK', 'OBJCAT']:\n attr1 = getattr(ext1, attr, None)\n attr2 = getattr(ext2, attr, None)\n if (attr1 is None) ^ (attr2 is None):\n errorlist.append(f'Attribute error for {attr}: '\n f'{attr1 is not None} v {attr2 is not None}')\n elif attr1 is not None:\n if isinstance(attr1, Table):\n if len(attr1) != len(attr2):\n errorlist.append(f'attr lengths differ: '\n f'{len(attr1)} v {len(attr2)}')\n else: # everything else is pixel-like\n if attr1.dtype.name != attr2.dtype.name:\n errorlist.append(f'Datatype mismatch for {attr}: '\n f'{attr1.dtype} v {attr2.dtype}')\n if attr1.shape != attr2.shape:\n errorlist.append(f'Shape mismatch for {attr}: '\n f'{attr1.shape} v {attr2.shape}')\n if 'int' in attr1.dtype.name:\n try:\n assert_most_equal(attr1, attr2, max_miss=self.max_miss)\n except AssertionError as e:\n errorlist.append(f'Inequality for {attr}: '+str(e))\n else:\n try:\n assert_most_close(attr1, attr2, max_miss=self.max_miss,\n rtol=self.rtol, atol=self.atol)\n except AssertionError as e:\n errorlist.append(f'Mismatch for {attr}: '+str(e))\n return errorlist", "def sub_comparison(obj1,obj2,translate):\n return [Difference(f\"{obj1.__class__.__name__} > {meth.__name__}\",result) for (meth,attr) in translate if (result := meth(getattr(obj1,attr),getattr(obj2,attr))) is not None]", "def getter_attributes_test(name, from_xml, from_dict, result):\n assert getattr(from_xml, name) == result\n assert getattr(from_dict, name) == result", "def attrs_eq(received, **expected):\n for k, v in expected.iteritems():\n eq_(v, getattr(received, k))", "def assert_attributes_equal(self, video, attrs):\r\n for key, value in attrs.items():\r\n self.assertEquals(getattr(video, key), value)", "def cmpAttributeValues(self, dcObj, ignoreOrder=True, **kwargs):\n rL = []\n floatRelTolerance = kwargs.get(\"floatRelTolerance\", 1.0e-05)\n floatAbsTolerance = kwargs.get(\"floatAbsTolerance\", 1.0e-04)\n try:\n sa = set(self.getAttributeList())\n sb = set(dcObj.getAttributeList())\n atNameComList = list(sa & sb)\n #\n lenEq = self.getRowCount() == dcObj.getRowCount()\n if not lenEq:\n return [(atName, False) for atName in atNameComList]\n #\n for atName in atNameComList:\n dataType, _ = self.__getAttributeInfo(atName)\n if dataType in [\"string\", \"integer\"]:\n if ignoreOrder:\n same = sorted(self.getAttributeValueList(atName)) == sorted(dcObj.getAttributeValueList(atName))\n else:\n same = self.getAttributeValueList(atName) == dcObj.getAttributeValueList(atName)\n elif dataType in [\"float\"]:\n aVL = self.getAttributeValueList(atName)\n bVL = dcObj.getAttributeValueList(atName)\n if ignoreOrder:\n for aV, bV in zip(sorted(aVL), sorted(bVL)):\n same = self.__isClose(aV, bV, relTol=floatRelTolerance, absTol=floatAbsTolerance)\n if not same:\n break\n else:\n for aV, bV in zip(aVL, bVL):\n same = self.__isClose(aV, bV, relTol=floatRelTolerance, absTol=floatAbsTolerance)\n if not same:\n logger.info(\"%s %s (rel=%r) (abs=%r) %r (%r)\", self.getName(), atName, aV * floatRelTolerance, floatAbsTolerance, aV, abs(aV - bV))\n break\n rL.append((atName, same))\n #\n return rL\n except Exception as e:\n if self._raiseExceptions:\n raise e\n return rL", "def _PairUpAttributes(attributes):\n names = sorted(set(attr.id for attr in attributes))\n getters = {}\n setters = {}\n for attr in attributes:\n if attr.is_fc_getter:\n getters[attr.id] = attr\n elif attr.is_fc_setter and 'Replaceable' not in attr.ext_attrs:\n setters[attr.id] = attr\n return [(getters.get(id), setters.get(id)) for id in names]", "def with_cmp(attrs):\n def attrs_to_tuple(obj):\n \"\"\"\n Create a tuple of all values of *obj*'s *attrs*.\n \"\"\"\n return tuple(getattr(obj, a) for a in attrs)\n\n def eq(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) == attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def ne(self, other):\n result = eq(self, other)\n if result is NotImplemented:\n return NotImplemented\n else:\n return not result\n\n def lt(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) < attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def le(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) <= attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def gt(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) > attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def ge(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) >= attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def hash_(self):\n return hash(attrs_to_tuple(self))\n\n def wrap(cl):\n cl.__eq__ = eq\n cl.__ne__ = ne\n cl.__lt__ = lt\n cl.__le__ = le\n cl.__gt__ = gt\n cl.__ge__ = ge\n cl.__hash__ = hash_\n\n return cl\n return wrap", "def attr_is_equal(first_obj, second_obj, attr):\n import numpy as np\n\n # Avoid comparing None's.\n return attr_has_same_shape(first_obj, second_obj, attr) and np.array_equal(\n getattr(first_obj, attr), getattr(second_obj, attr)\n )", "def compare_values(self, other, value):\n if value in self.__dir__() and value in other.__dir__():\n return float(self.__getattribute__(value)) > float(other.__getattribute__(value))\n else:\n return \"Can't compare values\"", "def check_values(layer1, layer2, attr):\n attr1 = getattr(layer1, attr, None)\n attr2 = getattr(layer2, attr, None)\n if not attr1:\n return not attr2\n return numpy.allclose(attr1.eval(), attr2.eval())", "def test_mix_positional_with_attribute_access():\n data = \"{0.__class__.__name__}: {0}\".format(42)\n data2 = \"{0[0]}: {0}\".format([1])\n return (data, data2)", "def parse_attributes(self, ds_to_check=None):\n\n if ds_to_check is None:\n ds_to_check = self.ds\n\n print(\"Parsing attributes.\")\n for i in ds_to_check.attrs.keys():\n if i in self._attrs.keys():\n print(\"{} is both a property of the object and an attribute of the dataset\".format(i))\n if ds_to_check.attrs[i] == self._attrs[i]:\n print(\" ... and they are equal\")\n else:\n print(\" ... and they NOT are equal!!!\")\n\n ds_to_check.attrs = self._attrs", "def compare(obj_a, obj_b):\n\n return tuple_to_string(obj_a) == tuple_to_string(obj_b)", "def match_attribute_names(*arrays):\n rep = arrays[0].sdbtype.full_rep\n result = [arrays[0]]\n for a in arrays[1:]:\n renames = []\n reserved = list(a.att_names) # reserved att names\n for r in a.sdbtype.full_rep:\n nm = r[0]\n if _att_match(rep, r):\n reserved.append(nm)\n continue\n newname = _find_rename(rep, r, reserved)\n if newname is None:\n raise ValueError(\"Cannot rename %s in %s\" % (nm, a))\n renames.extend((nm, newname))\n reserved.append(newname)\n if renames:\n a = a.attribute_rename(a, *renames)\n result.append(a)\n return tuple(result)", "def equality_check(a, b):\n\n def check_item(x, y, attr):\n if isinstance(x, hoomd.operation._HOOMDGetSetAttrBase):\n equality_check(x, y)\n return\n if isinstance(x, Mapping):\n for k, v in x.items():\n assert k in y, f\"For attr {attr}, key difference {k}\"\n check_item(v, y[k], \".\".join((attr, str(k))))\n return\n if not isinstance(x, str) and hasattr(x, \"__len__\"):\n assert len(x) == len(y)\n for i, (v_x, v_y) in enumerate(zip(x, y)):\n check_item(v_x, v_y, attr + f\"[{i}]\")\n return\n if isinstance(x, float):\n assert numpy.isclose(x, y), f\"attr '{attr}' not equal:\"\n return\n assert x == y, f\"attr '{attr}' not equal:\"\n\n if not isinstance(a, hoomd.operation._HOOMDGetSetAttrBase):\n return a == b\n assert type(a) == type(b)\n\n _check_obj_attr_compatibility(a, b)\n\n for attr in a.__dict__:\n if attr in a._skip_for_equality:\n continue\n\n if attr == \"_param_dict\":\n param_keys = a._param_dict.keys()\n b_param_keys = b._param_dict.keys()\n # Check key equality\n assert param_keys == b_param_keys, \"Incompatible param_dict keys:\"\n # Check item equality\n for key in param_keys:\n check_item(a._param_dict[key], b._param_dict[key], key)\n continue\n\n if attr == \"_typeparam_dict\":\n keys = a._typeparam_dict.keys()\n b_keys = b._typeparam_dict.keys()\n # Check key equality\n assert keys == b_keys, \"Incompatible _typeparam_dict:\"\n # Check item equality\n for key in keys:\n for type_, value in a._typeparam_dict[key].items():\n check_item(value, b._typeparam_dict[key][type_], \".\".join(\n (key, str(type_))))\n continue\n\n check_item(a.__dict__[attr], b.__dict__[attr], attr)", "def get_attributes(self, attributes, default=''):\n if isinstance(attributes, str):\n attributes = [attributes]\n\n attrs = [getattr(self, attr, default) for attr in attributes]\n\n if len(attrs) == 1:\n return attrs[0]\n\n return tuple(attrs)", "def getattrnames(instance):\n return tuple(sorted([attr_name for attr_name in vars(instance).keys()\n if not attr_name.startswith('_')]))", "def test_attributes_equal(self):\n test1 = self.Test({ 'id': 2, 'name': 'Poop Head' })\n test2 = self.Test({ 'id': 2, 'name': 'Poop Head' })\n self.assertEqual(test1, test2)", "def _compare_attributes_of_interpolate1(self, first: Node, second: Node) -> bool:\n # If some of attributes 'mode', 'align_corners', 'antialias', 'pads_begin', 'pads_end' are different,\n # then attributes of nodes are not identical.\n op = Interpolate(graph=first.graph, attrs={})\n for attr in ['mode', 'align_corners', 'antialias', 'pads_begin', 'pads_end']:\n if first.soft_get(attr, default=op.attrs[attr]) != second.soft_get(attr, default=op.attrs[attr]):\n return False\n return True", "def assertNodesEqual(self, first, second):\n def get_attrs(l):\n result = []\n for n in l:\n result.append((n.service, n.address, n.version, n.properties))\n return result\n self.assertEqual(get_attrs(first), get_attrs(second))", "def _compare_ioc_properties(old: Dict[str, IOC], new: Dict[str, IOC]):\n new_iocs = set()\n changed_iocs = set()\n removed_iocs = set()\n\n _attributes = [\"macros\", \"pvs\", \"pvsets\", \"simlevel\", \"restart\", \"autostart\"]\n\n for ioc_name in new.keys():\n if ioc_name not in old.keys():\n # If not in previously then add it to new iocs\n new_iocs.add(ioc_name)\n elif any(getattr(old[ioc_name], attr) != getattr(new[ioc_name], attr) for attr in _attributes):\n # If any attributes have changed, add to changed iocs\n changed_iocs.add(ioc_name)\n\n for ioc_name in old.keys():\n if ioc_name not in new:\n removed_iocs.add(ioc_name)\n\n return new_iocs, changed_iocs, removed_iocs", "def _exact_compare(tree1, tree2):\n attrs = ['name', 'length', 'support']\n for n1, n2 in zip(tree1.postorder(), tree2.postorder()):\n for attr in attrs:\n if getattr(n1, attr, None) != getattr(n2, attr, None):\n return False\n return True", "def __eq__(self, other):\n\t\treturn all((getattr(self, attr, None) == getattr(other, attr, None) for attr in self.attrs))", "def _mergeAttributes(this, other, attrName):\n attr1 = getattr(this, attrName)\n attr2 = getattr(other, attrName)\n if attr1 is not None and attr2 is not None:\n raise AttributeError(\n \"Cannot merge {} and {}, the attribute `{}` has been assigned on both\"\n \"instances.\".format(this, other, attrName)\n )\n return attr1 if attr1 is not None else attr2", "def changed_attrs(old_version, new_version, interesting_attrs):\n # Use an OrderedDict so that we preserve the order from interesting_attrs\n changed = OrderedDict()\n for attr in interesting_attrs:\n if attr in old_version and attr not in new_version:\n changed[attr] = [old_version[attr], None]\n elif attr in new_version and attr not in old_version:\n changed[attr] = [None, new_version[attr]]\n elif old_version[attr] != new_version[attr]:\n changed[attr] = [old_version[attr], new_version[attr]]\n return changed", "def __cmp__(self, other):\n return cmp(type(self), type(other)) or cmp(\n (self.name, self.propertiesstr), (other.name, other.propertiesstr)\n )", "def verify_get_attr(self, indata, outdata):\n decoded = {}\n for key, val in outdata.items():\n if isinstance(val, bytes):\n decoded[key.decode()] = val\n else:\n decoded[key] = base64.b64decode(val)\n\n self.log.info(\"Verifying get_attr output:\")\n self.log.info(\" get_attr data: %s\", indata)\n self.log.info(\" set_attr data: %s\", decoded)\n\n for attr, value in indata.items():\n if value != decoded.get(attr.decode(), None):\n self.fail(\n \"FAIL: Value does not match after get({}), Expected \"\n \"val={} and received val={}\".format(attr, value,\n decoded.get(attr.decode(), None)))", "def _entuple(r):\n return tuple(getattr(r, n) for n in r.__names__)", "def __eq__(self, other):\n for attr in self._attrs_to_save:\n try:\n if getattr(self, attr) != getattr(other, attr):\n return False\n except AttributeError:\n return False\n return True", "def test_attrs(self):\n for self_attr, wrapper_attr in [(\"reactor\", \"_reactor\"),\n (\"client\", \"_client\")]:\n self.assertIdentical(getattr(self, self_attr),\n getattr(self.wrapper, wrapper_attr))", "def verify_get_attr(self, indata, outdata):\n decoded = {}\n for key, val in outdata.items():\n if isinstance(val, bytes):\n # The API returns the values as bytes already.\n decoded[key.decode()] = val\n else:\n # The JSON output encodes the bytes as base64, so\n # we need to decode them for comparison.\n decoded[key] = base64.b64decode(val)\n\n self.log.info(\"Verifying get_attr output:\")\n self.log.info(\" get_attr data: %s\", indata)\n self.log.info(\" set_attr data: %s\", decoded)\n\n for attr, value in indata.items():\n if value != decoded.get(attr.decode(), None):\n self.fail(\n \"FAIL: Value does not match after get({}), Expected \"\n \"val={} and received val={}\".format(attr, value,\n decoded.get(attr.decode(), None)))", "def csv_attribute_unpacker(self, attribute_tuples: List[Tuple[str, str]], separator: str = \",\") \\\n -> Tuple[List[str], List[str]]:\n\n if not attribute_tuples:\n raise ValueError(\"The list of tuples containing the attributes is missing.\")\n\n join_attributes_set: set = set()\n selection_attributes_set: set = set()\n\n for j_attribute_string, s_attribute_string in attribute_tuples:\n for j_attribute in j_attribute_string.split(separator):\n join_attributes_set.add(j_attribute.strip())\n\n for operator in self.operators:\n s_attribute_string = s_attribute_string.replace(separator + operator + separator, operator)\n\n for s_attribute in s_attribute_string.split(separator):\n for operator in self.operators:\n if operator in s_attribute:\n s_attribute = s_attribute.split(operator)[0].strip()\n selection_attributes_set.add(s_attribute)\n break\n\n return list(join_attributes_set), list(selection_attributes_set)", "def _compare_attributes_of_interpolate4(self, first: Node, second: Node) -> bool:\n # If some of attributes 'mode', 'coordinate_transformation_mode', 'nearest_mode', 'antialias', 'cube_coeff'\n # are different, then attributes of first and second are not identical.\n for attr in self.default_values_for_opset4.keys():\n default_value = self.default_values_for_opset4[attr]\n if first.soft_get(attr, default=default_value) != second.soft_get(attr, default=default_value):\n return False\n\n # If attributes 'pads_begin' or 'pads_end' of nodes first and second are different, then attributes\n # of first and second are not identical.\n for attr in ['pads_begin', 'pads_end']:\n if not np.array_equal(first.soft_get(attr, default=self.default_pads),\n second.soft_get(attr, default=self.default_pads)):\n return False\n return True", "def test_attributes(self):\n attributes = storage.attributes()[\"Review\"]\n b = Review()\n for k, v in attributes.items():\n self.assertTrue(hasattr(b, k))\n self.assertEqual(type(getattr(b, k, None)), v)", "def __eq__(self, other):\n return (\n isinstance(other, Attribute)\n and self.name == other.name\n and self.type == other.type\n and self.is_required == other.is_required\n )", "def _get_object_prop(self, vm, attributes):\n result = vm\n for attribute in attributes:\n try:\n result = getattr(result, attribute)\n except (AttributeError, IndexError):\n return None\n return result", "def _compare_and_set_attributes(curr, dag, muts, phen, comparison):\n params = dag.node[curr]\n\n # Get the children of this node\n children = dag.successors(curr)\n\n assert len(children) == 2, \"Tree node with #children != 2.\"\n\n x_params = dag.node[children[0]]\n y_params = dag.node[children[1]]\n x_key = x_params['dataset']\n y_key = y_params['dataset']\n value = None\n\n if x_key is None:\n if y_key is None:\n # Neither child has a dataset.\n params['dataset'] = None\n else:\n # Y has a dataset, but not X.\n params['genes'] = y_params['genes']\n params['dataset'] = y_key\n params['function'] = compare.ds_y\n params['value'] = y_params['value']\n else:\n if y_key is None:\n # X has a dataset, but not Y.\n params['genes'] = x_params['genes']\n params['dataset'] = x_key\n params['function'] = compare.ds_x\n params['value'] = x_params['value']\n else:\n # Both have datasets. This is the normal case.\n params['genes'] = x_params['genes'] + y_params['genes']\n function, dataset, value, *etc = compare.best_combination(\n muts[x_key], muts[y_key], phen, comparison)\n params['function'] = function\n params['dataset'] = curr\n muts[curr] = dataset\n params['value'] = value\n\n return value", "def _compare_scalars(self, old, new, name=None):\n # Explicitly excluded arguments\n if old != new:\n return {'---': old, '+++': new}\n else:\n return None", "def _compare_model_version_tuples( # pylint: disable=too-many-return-statements\n model_version_1: Optional[Tuple[str, str]] = None,\n model_version_2: Optional[Tuple[str, str]] = None,\n) -> int:\n if model_version_1 is None or model_version_2 is None:\n if model_version_2 is not None:\n return -1\n if model_version_1 is not None:\n return 1\n return 0\n\n model_id_1, version_1 = model_version_1\n\n model_id_2, version_2 = model_version_2\n\n if model_id_1 < model_id_2:\n return -1\n\n if model_id_2 < model_id_1:\n return 1\n\n if Version(version_1) < Version(version_2):\n return 1\n\n if Version(version_2) < Version(version_1):\n return -1\n\n return 0", "def areAttributeValuesEqual(self, sAttr, sPrefix, oValue1, oValue2):\n # Just in case someone uses it directly.\n if oValue1 == oValue2:\n return True;\n\n #\n # Timestamps can be both string (param) and object (db)\n # depending on the data source. Compare string values to make\n # sure we're doing the right thing here.\n #\n if sPrefix == 'ts':\n return str(oValue1) == str(oValue2);\n\n #\n # Some generic code handling ModelDataBase children.\n #\n if isinstance(oValue1, list) and isinstance(oValue2, list):\n if len(oValue1) == len(oValue2):\n for i, _ in enumerate(oValue1):\n if not isinstance(oValue1[i], ModelDataBase) \\\n or type(oValue1) is not type(oValue2):\n return False;\n if not oValue1[i].isEqual(oValue2[i]):\n return False;\n return True;\n\n elif isinstance(oValue1, ModelDataBase) \\\n and type(oValue1) is type(oValue2):\n return oValue1[i].isEqual(oValue2[i]);\n\n _ = sAttr;\n return False;", "def isEqualEx(self, oOther, asExcludeAttrs):\n for sAttr in self.getDataAttributes():\n if sAttr not in asExcludeAttrs \\\n and getattr(self, sAttr) != getattr(oOther, sAttr):\n # Delegate the final decision to an overridable method.\n if not self.areAttributeValuesEqual(sAttr, self.getHungarianPrefix(sAttr),\n getattr(self, sAttr), getattr(oOther, sAttr)):\n return False;\n return True;", "def __eq__(self, other):\n try:\n return (self.tag == other.tag and self.attributes == other.attributes)\n except:\n return False", "def in_attr_list(self, attrs):\n for other in attrs:\n if other.matches(self): return True\n return False", "def _check_attrs(ds_in, dset_attrs):\n attrs = dset_attrs['attrs']\n for key, value in attrs.items():\n src_value = ds_in.attrs.get(key)\n if src_value:\n if isinstance(src_value, bytes):\n src_value = src_value.decode('utf-8')\n\n if src_value != value:\n msg = ('Attr {} value ({}) does not match '\n 'source value ({}), using source value.'\n .format(key, value, src_value))\n logger.warning(msg)\n warn(msg)\n\n dset_attrs['attrs'][key] = src_value\n\n return dset_attrs", "def test_get_attributes(self):\n pass", "def get_comparison_data(self, context):\n if context is None:\n operand1 = [x for x in self[0].select()]\n operand2 = [x for x in self[1].select()]\n else:\n operand1 = [x for x in self[0].select(context.copy())]\n operand2 = [x for x in self[1].select(context.copy())]\n\n if self.parser.compatibility_mode:\n # Boolean comparison if one of the results is a single boolean value (1.)\n try:\n if isinstance(operand1[0], bool):\n if len(operand1) == 1:\n return [(operand1[0], self.boolean_value(operand2))]\n if isinstance(operand2[0], bool):\n if len(operand2) == 1:\n return [(self.boolean_value(operand1), operand2[0])]\n except IndexError:\n return []\n\n # Converts to float for lesser-greater operators (3.)\n if self.symbol in ('<', '<=', '>', '>='):\n return [\n (float(self.data_value(value1)), float(self.data_value(value2)))\n for value1 in operand1 for value2 in operand2\n ]\n\n return [(self.data_value(value1), self.data_value(value2))\n for value1 in operand1 for value2 in operand2]", "def compare(self, width, height, x, y):\n attr = [width, height, x, y]\n key = [\"width\", \"height\", \"x\", \"y\"]\n\n for x in range(len(attr)):\n if isinstance(attr[x], int) is not True:\n raise TypeError(key[x] + \" must be an integer\")\n\n for x in range(len(attr[:2])):\n if attr[x] <= 0:\n raise ValueError(key[x] + \" must be > 0\")\n\n for y in range(2, len(attr)):\n if attr[y] < 0:\n raise ValueError(key[y] + \" must be >= 0\")", "def test_ignore_dups(self):\n class Test(pyperry.Base): pass\n Test.attributes('id', 'poop', 'poop')\n\n self.assertEqual(Test.defined_attributes, set(['id', 'poop']))", "def test_attributes(self):\n comp = str(self.test1)\n attr = ['BaseModel', 'id', 'created_at', 'updated_at']\n counter = 0\n for a in attr:\n if a in attr:\n counter += 1\n self.assertTrue(counter == 4)", "def __get__(self,obj,objtype):\n if not obj:\n return [getattr(objtype,a) for a in self.attribs]\n else:\n return [getattr(obj,a) for a in self.attribs]", "def test_attribute():\n params = dict(name=\"test\", type_=str, is_required=True)\n\n assert Attribute(**params) == Attribute(**params)\n assert Attribute(**params) is not None\n assert Attribute(**params) != Attribute(name=\"another\", type_=int, is_required=True)\n assert (\n str(Attribute(**params))\n == \"Attribute(name=test,type=<class 'str'>,is_required=True)\"\n )", "def compare_contact(c1, c2, unique_id, attrs):\r\n changes = {}\r\n if c1 != c2:\r\n for key in attrs:\r\n if c1[unique_id] != c2[unique_id]:\r\n raise Exception('bad contact comparaison unique_id do not match!')\r\n # copy the unique_id\r\n changes[unique_id] = c1[unique_id]\r\n # copy all values that changed\r\n if c1[key] != c2[key]:\r\n changes[key] = c1[key]\r\n return changes", "def as_tuple(self):\n return self.value, self.name", "def get_by_attr(self, attr: object, values: List[object] = list()) -> Tuple[Optional[AdvertsData], Optional[Exception]]:\n return self.get_objects_by_attr(table=AdvertsData, attr=attr, values=values)", "def crossover_attr(self, t1, t2, body_attr):\n assert isinstance(t1, ast.AST)\n assert isinstance(t2, ast.AST)\n assert isinstance(body_attr, str)\n\n if not getattr(t1, body_attr, None) or not getattr(t2, body_attr, None):\n return False\n\n if self.crossover_branches(t1, t2):\n return t1, t2\n\n if self.log > 1:\n print(f\"Checking {t1}.{body_attr} x {t2}.{body_attr}\")\n\n body_1 = getattr(t1, body_attr)\n body_2 = getattr(t2, body_attr)\n\n # If both trees have the attribute, we can cross their bodies\n if self.can_cross(t1, body_attr) and self.can_cross(t2, body_attr):\n if self.log:\n print(f\"Crossing {t1}.{body_attr} x {t2}.{body_attr}\")\n\n new_body_1, new_body_2 = self.cross_bodies(body_1, body_2)\n setattr(t1, body_attr, new_body_1)\n setattr(t2, body_attr, new_body_2)\n return True\n\n # Strategy 1: Find matches in class/function of same name\n for child_1 in body_1:\n if hasattr(child_1, 'name'):\n for child_2 in body_2:\n if (hasattr(child_2, 'name') and\n child_1.name == child_2.name):\n if self.crossover_attr(child_1, child_2, body_attr):\n return True\n\n # Strategy 2: Find matches anywhere\n for child_1 in random.sample(body_1, len(body_1)):\n for child_2 in random.sample(body_2, len(body_2)):\n if self.crossover_attr(child_1, child_2, body_attr):\n return True\n\n return False", "def get_data_class_attr_list(self, o):\n alist = None # Attributes to store\n ff = None # Load filter function\n for cl in self.data_classes:\n if isinstance(o, cl):\n alist = self.data_classes[cl][0]\n ff = self.data_classes[cl][1]\n break\n return (alist, ff)", "def test_adding_attributes(self):\n self.assertEqual(self.compound.get_attribute(\"What\"), \"Everything\")", "def _error_on_conflicting_sign_attrs(templ):\n\n # Nothing to do\n if templ.parameters.asymDetail.scheme.scheme == TPM2_ALG.NULL:\n return\n\n is_both_set = bool(templ.objectAttributes & TPMA_OBJECT.SIGN_ENCRYPT) and bool(\n templ.objectAttributes & TPMA_OBJECT.DECRYPT\n )\n\n # One could smarten this up to behave like tpm2-tools and trun down the attribute, but for now\n # error on bad attribute sets\n if is_both_set:\n raise ParserAttributeError(\n \"Cannot set both SIGN_ENCRYPT and DECRYPT in objectAttributes\"\n )", "def get_attribute_diffs(self, omci_copy, onu_copy, me_map):\n results = list()\n ro_set = {AA.R}\n\n # Get class ID's that are in both\n class_ids = {cls_id for cls_id, _ in omci_copy.items()\n if isinstance(cls_id, int) and cls_id in onu_copy}\n\n for cls_id in class_ids:\n # Get unique instances of a class\n olt_cls = omci_copy[cls_id]\n onu_cls = onu_copy[cls_id]\n\n # Weed out read-only and table attributes. Attributes on onu may be read-only.\n # These will only show up it the OpenOMCI (OLT-side) database if it changed\n # and an AVC Notification was sourced by the ONU\n # TODO: These class IDs could be calculated once at ONU startup (at device add)\n if cls_id in me_map:\n ro_attrs = {attr.field.name for attr in me_map[cls_id].attributes\n if attr.access == ro_set}\n table_attrs = {attr.field.name for attr in me_map[cls_id].attributes\n if isinstance(attr.field, OmciTableField)}\n\n else:\n # Here if partially defined ME (not defined in ME Map)\n from pyvoltha.adapters.extensions.omci.omci_cc import UNKNOWN_CLASS_ATTRIBUTE_KEY\n ro_attrs = {UNKNOWN_CLASS_ATTRIBUTE_KEY}\n\n # Get set of common instance IDs\n inst_ids = {inst_id for inst_id, _ in olt_cls.items()\n if isinstance(inst_id, int) and inst_id in onu_cls}\n\n for inst_id in inst_ids:\n omci_attributes = {k for k in six.iterkeys(olt_cls[inst_id][ATTRIBUTES_KEY])}\n onu_attributes = {k for k in six.iterkeys(onu_cls[inst_id][ATTRIBUTES_KEY])}\n\n # Get attributes that exist in one database, but not the other\n sym_diffs = (omci_attributes ^ onu_attributes) - ro_attrs\n results.extend([(cls_id, inst_id, attr) for attr in sym_diffs])\n\n # Get common attributes with different values\n common_attributes = (omci_attributes & onu_attributes) - ro_attrs\n results.extend([(cls_id, inst_id, attr) for attr in common_attributes\n if olt_cls[inst_id][ATTRIBUTES_KEY][attr] !=\n onu_cls[inst_id][ATTRIBUTES_KEY][attr]])\n return results", "def _attrlist(self,obj, attrs):\n vlist = [obj.__getattribute__(attr) for attr in attrs]\n return vlist", "def __eq__(self, other):\n return np.all([\n self.__getattribute__(name) == other.__getattribute__(name)\n for name in self._fields\n ])", "def compare(obj_a, obj_b):\n\n return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) == 'NVC')", "def get_results(instance: Algorithm) -> Dict[str, Any]:\n all_attributes = dir(instance)\n attrs = {\n v: getattr(instance, v)\n for v in all_attributes\n if v.endswith(\"_\") and not v.startswith(\"__\") and not isinstance(getattr(instance, v), types.MethodType)\n }\n return attrs", "def testattributes(self):\n for attr in AmuletAbility.attributes:\n a = AmuletAbility('Attribute', attr=attr)\n self.assert_(attr in str(a))\n self.assertEqual(a.attribute, attr)\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))", "def get_objs_with_attr_match(self, attribute_name, attribute_value, location=None, exact=False): \n from src.objects.models import ObjAttribute\n lstring = \"\"\n if location:\n lstring = \", db_obj__db_location=location\" \n attrs = eval(\"ObjAttribute.objects.filter(db_key=attribute_name%s)\" % lstring)\n if exact: \n return [attr.obj for attr in attrs if attribute_value == attr.value]\n else:\n return [attr.obj for attr in attrs if utils.to_unicode(attribute_value) in str(attr.value)]", "def _named_attrs(self, parts:dict) -> \\\n (QA4SMNamedAttributes, list, QA4SMNamedAttributes):\n\n if not self.ismetr():\n raise IOError(self.varname, '{} is not in form of a QA4SM metric variable.')\n\n if self.g == 0:\n a = QA4SMAttributes(self.attrs)\n ref_ds = QA4SMNamedAttributes(a.ref_dc - a._offset_id_dc,\n a.get_ref_names()['short_name'], self.attrs)\n return ref_ds, None, None\n else:\n dss = []\n ref_ds = QA4SMNamedAttributes(parts['ref_id'], parts['ref_ds'], self.attrs)\n ds = QA4SMNamedAttributes(parts['sat_id0'], parts['sat_ds0'], self.attrs)\n dss.append(ds)\n if self.g == 3:\n ds = QA4SMNamedAttributes(parts['sat_id1'], parts['sat_ds1'], self.attrs)\n dss.append(ds)\n mds = QA4SMNamedAttributes(parts['mds_id'], parts['mds'], self.attrs)\n else:\n mds = None\n return ref_ds, dss, mds", "def _get_attributes(instance, typeobj):\r\n for key in dir(instance):\r\n if key.startswith(\"__\"):\r\n continue\r\n value = getattr(instance, key)\r\n if type(value) == typeobj:\r\n yield value", "def match_length(cls, a, b):\n if len(a) == len(b):\n return (a, b)\n elif len(a) < len(b):\n return (cls(a, len(b)), b)\n else:\n return (a, cls(b, len(a)))", "def attr_matches(self, text):\n m = re.match(r\"(\\w+(\\.\\w+)*)\\.(\\w*)\", text)\n if not m:\n return []\n expr, attr = m.group(1, 3)\n object = eval(expr, self.namespace)\n words = dir(object)\n if hasattr(object,'__class__'):\n words.append('__class__')\n words = words + get_class_members(object.__class__)\n matches = []\n n = len(attr)\n for word in words:\n if word[:n] == attr:\n matches.append(\"%s.%s\" % (expr, word))\n return matches", "def __cmp__(self, other):\n return cmp(MetaTestCase._cmp_str(self), MetaTestCase._cmp_str(other))", "def compare(this, other, keys):\n for key in keys:\n assert this[key] == other[key]", "def cmp ( self, object1, object2 ):\n return cmp( self.get_raw_value( object1 ),\n self.get_raw_value( object2 ) )", "def compare_frames(frame1, frame2):\n for attr in (\"naxes\", \"axes_type\", \"axes_order\", \"unit\", \"axes_names\"):\n assert getattr(frame1, attr) == getattr(frame2, attr)", "def __eq__(self, other):\n comparable_fields = ['uuid', 'name', 'email']\n return all([getattr(self, field) == getattr(other, field)\n for field in comparable_fields])", "def test_called_two(self):\n self.test_attribute.is_down = mock.Mock(return_value=False)\n second_att, second_sla = self.make_att_sla(43)\n second_att.is_down = mock.Mock(return_value=False)\n self.run_mock_analyzer((self.test_attribute, second_att))\n eq_(self.obj.analyze_attribute.call_args_list[0][0],\n (self.test_attribute, ))\n eq_(self.obj.analyze_attribute.call_args_list[1][0], (second_att, ))", "def assert_hasattributes(obj, attributes):\n \n for attr in attributes:\n # print(attr)\n assert(hasattr(obj, attr))", "def get_attributes(self, *args, i_order=None):\n\n if i_order is None:\n out = [getattr(self, arg) for arg in args]\n else:\n out = [getattr(self, arg)[i_order] for arg in args]\n\n if len(out) == 1:\n out = out[0]\n\n return out", "def __eq__(self, other):\n \n if not tools.data_are_equal(self.attrs, other.attrs):\n print('here')\n return False\n \n return tools.data_are_equal(self.components, other.components)", "def test_that_class_teacher_attribute_homework_done_is_the_same_with_instance_of_this_class():\n temp_1 = opp_teacher.homework_done\n temp_2 = Teacher.homework_done\n assert temp_1 == temp_2", "def get_attribs(self, attribs: List[str]) -> Iterable[Tuple[str, str]]:\n binfo = self.build_info\n for attrib in attribs:\n try:\n val = self._get_attrib(attrib, binfo)\n except Exception as e:\n logger.error(f'could not get attribute {attrib}: {e}')\n raise e\n if self.type_strict and not isinstance(val, str):\n raise ValueError(f'wrong value found for attribute: {attrib}')\n if val is not None:\n yield ((attrib, val))\n elif self.exist_strict:\n raise ValueError(f'no such attribute: {attrib}')", "def _get_active_attributes_and_uniforms(self):\n # This match a name of the form \"name[size]\" (= array)\n regex = re.compile(\"\"\"(?P<name>\\w+)\\s*(\\[(?P<size>\\d+)\\])\\s*\"\"\")\n # Get how many active attributes and uniforms there are\n cu = gl.glGetProgramParameter(self._handle, gl.GL_ACTIVE_UNIFORMS)\n ca = gl.glGetProgramParameter(self.handle, gl.GL_ACTIVE_ATTRIBUTES)\n # Get info on each one\n attributes = []\n uniforms = []\n for container, count, func in [(attributes, ca, gl.glGetActiveAttrib),\n (uniforms, cu, gl.glGetActiveUniform)]:\n for i in range(count):\n name, size, gtype = func(self._handle, i)\n m = regex.match(name) # Check if xxx[0] instead of xx\n if m:\n name = m.group('name')\n for i in range(size):\n container.append(('%s[%d]' % (name, i), gtype))\n else:\n container.append((name, gtype))\n #return attributes, uniforms\n return set([v[0] for v in attributes] + [v[0] for v in uniforms])", "def map(self, attr1, attr2):\n return dict(zip(getattr(self, attr1), getattr(self, attr2)))", "def map(self, attr1, attr2):\n return dict(zip(getattr(self, attr1), getattr(self, attr2)))", "def attribute_is_equal(self, attr_name, expected, observed):\n if attr_name != \"metrics\":\n return super().attribute_is_equal(attr_name, expected, observed)\n\n expected_names = [m.metric.mp_metric_name for m in expected]\n return (\n len(expected) == len(observed)\n and all(name in expected_names for name in observed)\n and all(m.value == observed.get(m.metric.mp_metric_name) for m in expected)\n )", "def sql_attribute_unpacker(self, where_string_list: List[str]) -> Tuple[List[str], List[str]]:\n\n if not where_string_list or len(where_string_list) == 0:\n raise ValueError(\"The list of strings containing the attributes is missing.\")\n\n join_attributes_set: set = set()\n selection_attributes_set: set = set()\n\n for where_string in where_string_list:\n attrs = re.split(\" AND \", where_string, flags=re.IGNORECASE)\n\n for index, attr in enumerate(attrs):\n if re.match(r'.+\\s*=\\s*[^\\d\"\\']*$', attr):\n join_attributes_set.add(attr.strip())\n else:\n for operator in self.operators:\n if operator in attr:\n attr = attr.split(operator)[0].strip()\n selection_attributes_set.add(attr)\n break\n\n return list(join_attributes_set), list(selection_attributes_set)", "def __is_hard_match(self, obj):\n for attr in self.list:\n try:\n if getattr(obj, attr) != getattr(self, attr):\n return False\n except AttributeError:\n pass\n return True", "def __gt__(self, other):\n return (self.__class__.__name__, self._values()) > (other.__class__.__name__, other._values())", "def about_attribute(self, name):\n for cdef in self.getmro():\n if name in cdef.attrs:\n s_result = cdef.attrs[name].s_value\n if s_result != s_ImpossibleValue:\n return s_result\n else:\n return None\n return None", "def getPair(self, args):\r\n return self.name, self.getValue(args)", "def test_attrs():\n assert hasattr(constants.Planck_constant, \"value\")\n assert hasattr(constants.Planck_constant, \"units\")\n assert hasattr(constants.Planck_constant, \"name\")\n assert hasattr(constants.Planck_constant, \"error\")", "def __eq__(self, other):\n if not isinstance(other, Attribute):\n return False\n\n return self.__dict__ == other.__dict__", "def get_class_attr_list(self, o):\n alist = None # Attributes to store\n ff = None # Load filter function\n for cl in self.classes:\n if isinstance(o, cl):\n alist = self.classes[cl][0]\n ff = self.classes[cl][1]\n break\n if isinstance(o, Block._ComponentDataClass):\n # If you're here you are trying to serialize an element of an\n # indexed block at the top level. We do want to allow that, so\n # we'll pretend it's a block.\n alist = self.classes[Block][0]\n ff = self.classes[Block][1]\n return (alist, ff)", "def _process_attrs(attrs):\n new_attrs = OrderedDict()\n for attr in attrs:\n col = attr\n if isinstance(attr, tuple):\n col, attr = attr\n # special cases\n if attr == 'class_name':\n attr = '__class__.__name__'\n if attr == 'repr':\n attr = repr\n new_attrs[col] = attr\n\n return new_attrs", "def check_attributes(self):\n self.assertEqual(type(self.amenity_1.name), str)", "def _builtin_eq(arg1, arg2, **kwdargs):\n try:\n result = unify_value(arg1, arg2, {})\n return [(result, result)]\n except UnifyError:\n return []\n # except VariableUnification:\n # raise VariableUnification(location = database.lineno(location))" ]
[ "0.6573749", "0.64668816", "0.6218231", "0.61928684", "0.61631376", "0.6161874", "0.61611706", "0.61574703", "0.5891995", "0.58485514", "0.5843817", "0.5834111", "0.57660866", "0.57439977", "0.5732433", "0.5712868", "0.56471366", "0.5642409", "0.5622416", "0.5610146", "0.55761474", "0.55702764", "0.55648893", "0.5556978", "0.5552826", "0.5535728", "0.5497517", "0.5476264", "0.54568684", "0.5446643", "0.54087114", "0.5404759", "0.5394253", "0.53839976", "0.5383702", "0.53835714", "0.5381407", "0.53397065", "0.5287393", "0.5268561", "0.5195137", "0.51731104", "0.5164135", "0.5157216", "0.515688", "0.515247", "0.51353836", "0.5132055", "0.51129395", "0.51099366", "0.5109115", "0.5107528", "0.5106954", "0.51034385", "0.5088264", "0.5067677", "0.5066371", "0.50508004", "0.50458026", "0.5041188", "0.50363934", "0.50337005", "0.5032355", "0.50234", "0.50184566", "0.5013352", "0.49941424", "0.4981972", "0.4978468", "0.49757487", "0.4966222", "0.49659532", "0.4943478", "0.49337396", "0.493336", "0.49329072", "0.49313146", "0.4930823", "0.49298185", "0.49208695", "0.4917447", "0.49162462", "0.48991168", "0.48962888", "0.4894846", "0.4892468", "0.48881796", "0.48881796", "0.48866722", "0.4885838", "0.48714206", "0.48626602", "0.4857856", "0.48564294", "0.4854361", "0.48531047", "0.48441014", "0.48425388", "0.48411605", "0.48394904" ]
0.73285353
0
Given a list of tuples comparised of (subcomparison method, attr name for comparison), returns any Difference tuple retunred by each method using the given attr of obj1 and obj2 as arguments (if that method is not None)
def sub_comparison(obj1,obj2,translate): return [Difference(f"{obj1.__class__.__name__} > {meth.__name__}",result) for (meth,attr) in translate if (result := meth(getattr(obj1,attr),getattr(obj2,attr))) is not None]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def attr_comparison(obj1,obj2,attrs):\n return [Difference(f\"{obj1.__class__.__name__}.{attr}\",(result1,result2)) for attr in attrs if (result1 := getattr(obj1,attr)) != (result2 := getattr(obj2,attr))]", "def with_cmp(attrs):\n def attrs_to_tuple(obj):\n \"\"\"\n Create a tuple of all values of *obj*'s *attrs*.\n \"\"\"\n return tuple(getattr(obj, a) for a in attrs)\n\n def eq(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) == attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def ne(self, other):\n result = eq(self, other)\n if result is NotImplemented:\n return NotImplemented\n else:\n return not result\n\n def lt(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) < attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def le(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) <= attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def gt(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) > attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def ge(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) >= attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def hash_(self):\n return hash(attrs_to_tuple(self))\n\n def wrap(cl):\n cl.__eq__ = eq\n cl.__ne__ = ne\n cl.__lt__ = lt\n cl.__le__ = le\n cl.__gt__ = gt\n cl.__ge__ = ge\n cl.__hash__ = hash_\n\n return cl\n return wrap", "def deep_cmp(obj1, obj2):\n pass", "def diffs(self):\n diffs = []\n # XXX i know, we are using the ZODB, so sorry for the cheesy eval()\n # uhm, some logic is not right here as well, we need to look at keys\n # in both the before and after sets :(\n if not self.before or not self.after:\n return []\n before = eval(self.before)\n # pfft!\n if not before:\n return []\n after = eval(self.after)\n for k,v in before.items():\n if k in ['objectClass','userPassword']:\n continue\n try:\n if k == 'uniqueMember':\n added, removed = uniqueMemberDiff(\n v, after['uniqueMember'] )\n diffs.append( {'attribute' : k,\n 'added' : added,\n 'removed' : removed,\n }\n )\n elif str(v) != str(after[k]):\n diffs.append( { 'attribute' : k,\n 'before' : before[k],\n 'after' : after[k] }\n )\n except KeyError:\n pass\n return diffs", "def diff(*args):\n return reduce(lambda x, y: x - y, args)", "def BackfillComparisons (cls):\n\n def applyconvert (cls, derived):\n for (opn, opx) in derived:\n opx.__name__ = opn\n opx.__doc__ = getattr(int, opn).__doc__\n setattr(cls, opn, opx)\n\n applyconvert(cls, (\n ('__gt__', lambda self, other: not (self.__lt__(other) or self.__eq__(other))),\n ('__le__', lambda self, other: self.__lt__(other) or self.__eq__(other)),\n ('__ge__', lambda self, other: not self.__lt__(other))\n ))\n applyconvert(cls, (\n ('__ne__', lambda self, other: not self.__eq__(other)),\n ))\n return cls", "def DataDiff(source, target, compare_list_as_value=True, depth=0, no_difference_value=None):\n # Ensure recursion doesnt go out of control\n if depth > 150:\n raise Exception('DataDiff recurlsion depth has hit limit (50), aborting.')\n\n # If we are not working with 2 different containers we can inspect, then do a simple check\n if type(source) not in (list, tuple, dict) or type(target) not in (list, tuple, dict):\n # If the types are different, the data is different (and cant be compared more)\n if type(source) != type(target):\n return (source, target)\n # Else, theyre the same types, if the values are different\n elif source != target:\n return (source, target)\n # Else, theyre the same types and value\n else:\n # This should only happen if this is a fresh DataDiff() call, depth==0\n if depth == 0:\n return (no_difference_value, no_difference_value)\n else:\n raise Exception('This should never happen, having a mismatching value different in anywhere but depth=0')\n\n\n if type(source) in (list, tuple):\n source_diff = []\n elif type(source) == dict:\n source_diff = {}\n else:\n raise Exception('Unhandled source_diff data type: %s' % type(source))\n\n if type(target) in (list, tuple):\n target_diff = []\n elif type(target) == dict:\n target_diff = {}\n else:\n raise Exception('Unhandled target_diff data type: %s' % type(target))\n\n # Check for incompatible types, and just return them both as theyre totally different\n if type(source_diff) != type(target_diff):\n return (source, target)\n\n # If we're handling a Dictionary compare\n if type(source_diff) == dict:\n # Process the source keys first\n for key in source.keys():\n _CompareDictValue(key, source, target, source_diff, target_diff, compare_list_as_value, no_difference_value, depth)\n\n # Process the target keys next, skipping any source keys we already processed\n for key in target.keys():\n # Skip any keys we already processed in source\n if key in source:\n continue\n\n # Reverse target/source, so that the reverse comparison/set is done\n _CompareDictValue(key, target, source, target_diff, source_diff, compare_list_as_value, no_difference_value, depth)\n\n # Else, if we're handling a List compare\n elif type(source_diff) == list:\n # If lists must be compared in total because the order of a list is important\n if compare_list_as_value:\n if source != target:\n return (list(source), list(target))\n\n # Else, compare each element of the list\n else:\n for count in range(0, len(source)):\n if count >= len(target):\n source_diff.append(source[count])\n elif source[count] != target[count]:\n source_diff.append(source[count])\n target_diff.append(target[count])\n\n # If the target has more elements than the source, add the rest \n if len(target) > len(source):\n target_diff += target[-(len(source) - len(target)):]\n\n else:\n raise Exception('Unspecified type handler for data: %s. Only dict and list/tuple types are accepted.')\n\n return (source_diff, target_diff)", "def _compare_and_set_attributes(curr, dag, muts, phen, comparison):\n params = dag.node[curr]\n\n # Get the children of this node\n children = dag.successors(curr)\n\n assert len(children) == 2, \"Tree node with #children != 2.\"\n\n x_params = dag.node[children[0]]\n y_params = dag.node[children[1]]\n x_key = x_params['dataset']\n y_key = y_params['dataset']\n value = None\n\n if x_key is None:\n if y_key is None:\n # Neither child has a dataset.\n params['dataset'] = None\n else:\n # Y has a dataset, but not X.\n params['genes'] = y_params['genes']\n params['dataset'] = y_key\n params['function'] = compare.ds_y\n params['value'] = y_params['value']\n else:\n if y_key is None:\n # X has a dataset, but not Y.\n params['genes'] = x_params['genes']\n params['dataset'] = x_key\n params['function'] = compare.ds_x\n params['value'] = x_params['value']\n else:\n # Both have datasets. This is the normal case.\n params['genes'] = x_params['genes'] + y_params['genes']\n function, dataset, value, *etc = compare.best_combination(\n muts[x_key], muts[y_key], phen, comparison)\n params['function'] = function\n params['dataset'] = curr\n muts[curr] = dataset\n params['value'] = value\n\n return value", "def _iterativediff(t1, t2, subdir):\n if t1 is None:\n t1 = {}\n if t2 is None:\n t2 = {}\n\n for e1 in t1:\n realname = subdir + pycompat.fsencode(e1.name)\n\n if e1.type == pygit2.GIT_OBJ_TREE:\n try:\n e2 = t2[e1.name]\n if e2.type != pygit2.GIT_OBJ_TREE:\n e2 = None\n except KeyError:\n e2 = None\n\n stack.append((realname + b'/', e1, e2))\n else:\n n1, fl1 = self.find(realname)\n\n try:\n e2 = t2[e1.name]\n n2, fl2 = other.find(realname)\n except KeyError:\n e2 = None\n n2, fl2 = (None, b'')\n\n if e2 is not None and e2.type == pygit2.GIT_OBJ_TREE:\n stack.append((realname + b'/', None, e2))\n\n if not match(realname):\n continue\n\n if n1 != n2 or fl1 != fl2:\n result[realname] = ((n1, fl1), (n2, fl2))\n elif clean:\n result[realname] = None\n\n for e2 in t2:\n if e2.name in t1:\n continue\n\n realname = subdir + pycompat.fsencode(e2.name)\n\n if e2.type == pygit2.GIT_OBJ_TREE:\n stack.append((realname + b'/', None, e2))\n elif match(realname):\n n2, fl2 = other.find(realname)\n result[realname] = ((None, b''), (n2, fl2))", "def compare(a, b, attrs, f):\n for attr in attrs:\n if not f(getattr(a, attr), getattr(b, attr)):\n return False\n return True", "def cmpAttributeValues(self, dcObj, ignoreOrder=True, **kwargs):\n rL = []\n floatRelTolerance = kwargs.get(\"floatRelTolerance\", 1.0e-05)\n floatAbsTolerance = kwargs.get(\"floatAbsTolerance\", 1.0e-04)\n try:\n sa = set(self.getAttributeList())\n sb = set(dcObj.getAttributeList())\n atNameComList = list(sa & sb)\n #\n lenEq = self.getRowCount() == dcObj.getRowCount()\n if not lenEq:\n return [(atName, False) for atName in atNameComList]\n #\n for atName in atNameComList:\n dataType, _ = self.__getAttributeInfo(atName)\n if dataType in [\"string\", \"integer\"]:\n if ignoreOrder:\n same = sorted(self.getAttributeValueList(atName)) == sorted(dcObj.getAttributeValueList(atName))\n else:\n same = self.getAttributeValueList(atName) == dcObj.getAttributeValueList(atName)\n elif dataType in [\"float\"]:\n aVL = self.getAttributeValueList(atName)\n bVL = dcObj.getAttributeValueList(atName)\n if ignoreOrder:\n for aV, bV in zip(sorted(aVL), sorted(bVL)):\n same = self.__isClose(aV, bV, relTol=floatRelTolerance, absTol=floatAbsTolerance)\n if not same:\n break\n else:\n for aV, bV in zip(aVL, bVL):\n same = self.__isClose(aV, bV, relTol=floatRelTolerance, absTol=floatAbsTolerance)\n if not same:\n logger.info(\"%s %s (rel=%r) (abs=%r) %r (%r)\", self.getName(), atName, aV * floatRelTolerance, floatAbsTolerance, aV, abs(aV - bV))\n break\n rL.append((atName, same))\n #\n return rL\n except Exception as e:\n if self._raiseExceptions:\n raise e\n return rL", "def complete_comparisons(cls):\n assert cls.__lt__ is not object.__lt__, \"{0} must define < and ideally ==\".format(cls.__name__)\n if cls.__eq__ is object.__eq__:\n cls.__eq__ = lambda self, other: not (cls.__lt__(self, other) or cls.__lt__(other, self))\n cls.__ne__ = lambda self, other: not cls.__eq__(self, other)\n cls.__gt__ = lambda self, other: cls.__lt__(other, self)\n cls.__le__ = lambda self, other: not cls.__lt__(other, self)\n cls.__ge__ = lambda self, other: not cls.__lt__(self, other)\n return cls", "def createFromTwoTuples(cls, tuple1, tuple2, **kwargs):\n return cls([c2 - c1 for (c1, c2) in zip(tuple1, tuple2)], **kwargs)", "def none_comparison(func):\n @functools.wraps(func)\n def inner(obj1,obj2):\n if obj1 is not None and obj2 is not None:\n return func(obj1, obj2)\n if obj1 is None and obj2 is None:\n return []\n if obj1 is not None and obj2 is None:\n return Difference(f\"Second {obj1.__class__.__name__} is None\",(obj1,None))\n return Difference(f\"First {obj2.__class__.__name__} is None\",(None,obj2))\n return inner", "def _compare_list(self, name, actual, expect):\n raise NotImplementedError(\"base class, not implement!\")", "def comparisons(self, ctx: Context) -> Iterator[AnnotatedExpression]:\n for type, expr_group in ctx.groupby_type():\n if type in (bool, Callable):\n continue\n for operator in self.compare_operators:\n for left, right in combinations(expr_group, 2):\n yield AnnotatedExpression(\n ast.Compare(\n left=left.expr, ops=[operator()], comparators=[right.expr]\n ),\n TypeAnnotation(bool),\n )", "def _compare_scalars(self, old, new, name=None):\n # Explicitly excluded arguments\n if old != new:\n return {'---': old, '+++': new}\n else:\n return None", "def get_comparison_data(self, context):\n if context is None:\n operand1 = [x for x in self[0].select()]\n operand2 = [x for x in self[1].select()]\n else:\n operand1 = [x for x in self[0].select(context.copy())]\n operand2 = [x for x in self[1].select(context.copy())]\n\n if self.parser.compatibility_mode:\n # Boolean comparison if one of the results is a single boolean value (1.)\n try:\n if isinstance(operand1[0], bool):\n if len(operand1) == 1:\n return [(operand1[0], self.boolean_value(operand2))]\n if isinstance(operand2[0], bool):\n if len(operand2) == 1:\n return [(self.boolean_value(operand1), operand2[0])]\n except IndexError:\n return []\n\n # Converts to float for lesser-greater operators (3.)\n if self.symbol in ('<', '<=', '>', '>='):\n return [\n (float(self.data_value(value1)), float(self.data_value(value2)))\n for value1 in operand1 for value2 in operand2\n ]\n\n return [(self.data_value(value1), self.data_value(value2))\n for value1 in operand1 for value2 in operand2]", "def diff(before: list, after: list) -> (list, list):\n additions = [item for item in after if item not in before]\n removals = [item for item in before if item not in after]\n return additions, removals", "def _compare_elements(self, old, new):\n res = None\n # We want to go through the tree post-order\n if isinstance(old, dict):\n res_dict = self.compare_dicts(old, new)\n if (len(res_dict) > 0):\n res = res_dict\n # Now we are on the same level\n # different types, new value is new\n elif (type(old) != type(new)):\n res = {'---': old, '+++': new}\n # recursive arrays\n # we can be sure now, that both new and old are\n # of the same type\n elif (isinstance(old, list)):\n res_arr = self._compare_arrays(old, new)\n if (len(res_arr) > 0):\n res = res_arr\n # the only thing remaining are scalars\n else:\n scalar_diff = self._compare_scalars(old, new)\n if scalar_diff is not None:\n res = scalar_diff\n\n return res", "def comparison(op):\n def comp(*args):\n if args:\n item = args[0]\n for o in args[1:]:\n if op(item, o):\n item = o\n else:\n return Boolean(False)\n return Boolean(True)\n else:\n return Boolean(True)\n return comp", "def format_comparison(objs):\n def formatter(comp):\n if not isinstance(comp, tuple):\n return str(comp)\n output = []\n return \"\\n\".join([comp.type] + [\" \"+errmessage for errmessage in output])\n\n results = map(formatter,objs)\n return \"\\n\".join(results)\n \n #obj1,obj2 = comp\n\n\n ### Sections\n #for i,s1,s2 in diffs:\n # if s1 and s2:\n # output.append(f\"Section {i} does not match:\")\n # result = compare_sections(s1,s2)\n # output.extend(almethods.linepadder(result))\n # else:\n # if s1:\n # output.append(f\"Door 2 missing Section {i}\")\n # else:\n # output.append(f\"Door 1 missing Section {i}\")", "def _PairUpAttributes(attributes):\n names = sorted(set(attr.id for attr in attributes))\n getters = {}\n setters = {}\n for attr in attributes:\n if attr.is_fc_getter:\n getters[attr.id] = attr\n elif attr.is_fc_setter and 'Replaceable' not in attr.ext_attrs:\n setters[attr.id] = attr\n return [(getters.get(id), setters.get(id)) for id in names]", "def test_comparison_overrides(self):\n\n # adding these methods directly to each class to avoid decoration\n # by the testlib decorators.\n class H1(object):\n def __init__(self, value='abc'):\n self.value = value\n def __nonzero__(self):\n return False\n def __hash__(self):\n return hash(self.value)\n def __eq__(self, other):\n if isinstance(other, type(self)):\n return self.value == other.value\n return False\n class H2(object):\n def __init__(self, value='abc'):\n self.value = value\n def __nonzero__(self):\n return False\n def __hash__(self):\n return hash(self.value)\n def __eq__(self, other):\n if isinstance(other, type(self)):\n return self.value == other.value\n return False\n class H3(object):\n def __init__(self, value='abc'):\n self.value = value\n def __nonzero__(self):\n return False\n def __hash__(self):\n return hash(self.value)\n def __eq__(self, other):\n if isinstance(other, type(self)):\n return self.value == other.value\n return False\n class H6(object):\n def __init__(self, value='abc'):\n self.value = value\n def __nonzero__(self):\n return False\n def __hash__(self):\n return hash(self.value)\n def __eq__(self, other):\n if isinstance(other, type(self)):\n return self.value == other.value\n return False\n\n \n mapper(H1, t1, properties={\n 'h2s': relation(H2, backref='h1'),\n 'h3s': relation(H3, secondary=t4, backref='h1s'),\n 'h1s': relation(H1, secondary=t5, backref='parent_h1'),\n 't6a': relation(H6, backref='h1a',\n primaryjoin=t1.c.id==t6.c.ht1a_id),\n 't6b': relation(H6, backref='h1b',\n primaryjoin=t1.c.id==t6.c.ht1b_id),\n })\n mapper(H2, t2)\n mapper(H3, t3)\n mapper(H6, t6)\n\n s = create_session()\n for i in range(3):\n h1 = H1()\n s.save(h1)\n\n h1.h2s.append(H2())\n h1.h3s.extend([H3(), H3()])\n h1.h1s.append(H1())\n\n s.flush()\n self.assertEquals(t1.count().scalar(), 4)\n\n h6 = H6()\n h6.h1a = h1\n h6.h1b = h1\n\n h6 = H6()\n h6.h1a = h1\n h6.h1b = x = H1()\n assert x in s\n\n h6.h1b.h2s.append(H2())\n\n s.flush()\n\n h1.h2s.extend([H2(), H2()])\n s.flush()\n\n h1s = s.query(H1).options(eagerload('h2s')).all()\n self.assertEqual(len(h1s), 5)\n\n self.assert_unordered_result(h1s, H1,\n {'h2s': []},\n {'h2s': []},\n {'h2s': (H2, [{'value': 'abc'},\n {'value': 'abc'},\n {'value': 'abc'}])},\n {'h2s': []},\n {'h2s': (H2, [{'value': 'abc'}])})\n\n h1s = s.query(H1).options(eagerload('h3s')).all()\n\n self.assertEqual(len(h1s), 5)\n h1s = s.query(H1).options(eagerload_all('t6a.h1b'),\n eagerload('h2s'),\n eagerload_all('h3s.h1s')).all()\n self.assertEqual(len(h1s), 5)", "def difference(*colls):\n\n # Get all the leaf paths for each collection: make each path a tuple\n leaf_paths_by_coll = list(map(lambda c: list(map(tuple, get_all_leaf_paths(c))), colls))\n\n # Find the union of all leaf paths: merge all the paths and keep only the unique paths\n union_leaf_paths = list(distinct(concat(*leaf_paths_by_coll)))\n\n # Get the values corresponding to these leaf paths in every collection: if a leaf path doesn't exist, assumes None\n values_by_coll = list(map(lambda lp: list(map(lambda coll: tz.get_in(lp, coll), colls)), union_leaf_paths))\n\n # Filter out the leaf paths that have identical values across the collections\n keep_leaf_paths = list(map(0, filter(lambda t: not allequal(t[1]), zip(union_leaf_paths, values_by_coll))))\n keep_values = list(map(1, filter(lambda t: not allequal(t[1]), zip(union_leaf_paths, values_by_coll))))\n\n # Rearrange to construct a list of dictionaries -- one per original collection.\n # Each of these dictionaries maps a 'kept' leaf path to its corresponding\n # value in the collection\n differences = list(map(lambda vals: dict(zip(keep_leaf_paths, vals)), list(zip(*keep_values))))\n\n return differences", "def compare(*fields, **kwargs):\n\tfrom sqlalchemy import select, cast, Date\n\tfrom sqlalchemy.orm import object_session\n\tfrom sqlalchemy.sql import column\n\t\n\tsession = object_session(fields[0])\n\tt = column('t')\n\tR = Record.__table__\n\tdef sel(field):\n\t\tT = session.query(Record).filter(Record.field_id==field.id).first().__table__\n\t\treturn select([t,column('x')]).select_from(R.join(T)).where(R.c.field_id==field.id).alias()\n\n\tfor i,f in enumerate(fields):\n\t\tif i==0:\n\t\t\ts0 = sel(f)\n\t\t\ts = select([s0.c.t,s0.c.x]).order_by(t)\n\t\telse:\n\t\t\ts1 = sel(f)\n\t\t\tif kwargs.get('datecast',False):\n\t\t\t\ts = s.column(s1.c.x).where(cast(s0.c.t,Date)==cast(s1.c.t,Date))\n\t\t\telse:\n\t\t\t\ts = s.column(s1.c.x).where(s0.c.t==s1.c.t)\n\t\t\tif kwargs.get('diff',False):\n\t\t\t\ts = s.where(func.round(s0.c.x)!=func.round(s1.c.x))\n\tif kwargs.get('plot',False):\n\t\tif len(fields)!=2:\n\t\t\tprint \"Works only with exactly 2 fields as input.\"\n\t\t\treturn None\n\t\timport matplotlib.pyplot as plt\n\t\ta = float(fields[0].mult) * fields[0].units.convert(fields[1].units)\n\t\tb = float(fields[1].mult) * fields[1].units.convert(fields[0].units)\n\t\tl = Session.execute(s).fetchall()\n\t\tfig = plt.figure(figsize=(6.2,6))\n\t\tplt.scatter([float(r[1])*a for r in l],[float(r[2])*b for r in l])\n\t\tplt.xlabel(fields[0].name+' '+str(fields[0].station_id))\n\t\tplt.ylabel(fields[1].name+' '+str(fields[1].station_id))\n\t\ttry: \n\t\t\tx = kwargs['xlim']\n\t\t\ty = x\n\t\texcept:\n\t\t\tx = plt.xlim()\n\t\t\ty = plt.ylim()\n\t\tplt.plot(x,x)\n\t\tfig.axes[0].set_xlim(x)\n\t\tfig.axes[0].set_ylim(y)\n\t\tfig.show()\n\telse:\n\t\treturn session.execute(s).fetchall()", "def difference(A, B, *C):\n return setutils(\"difference\", A, B, *C)", "def do_list_merge(li1, li2=None, attr=None, unique_fn=None, set_fn=set):\n if not li1 and not li2:\n return []\n elif li2 and not li1:\n li1, li2 = li2, li1\n\n new_list = li1[:]\n\n if li2 is None:\n pass\n\n elif attr is None and unique_fn is None:\n new_list.extend(li2)\n\n else:\n if attr is not None:\n if isinstance(attr, basestring):\n def unique_fn(d):\n return d[attr]\n\n if unique_fn is not None:\n unique_fn = GlobalFns(unique_fn)\n\n comparables_1 = {unique_fn(el): idx for idx, el in enumerate(li1)}\n if len(set_fn(comparables_1)) < len(comparables_1):\n raise ValueError(\"li1 is not unique wrt. unique_fn\")\n\n comparables_2 = [unique_fn(el) for el in li2]\n if len(set_fn(comparables_2)) < len(comparables_2):\n raise ValueError(\"li2 is not unique wrt. unique_fn\")\n\n for idx2, cmp_2 in enumerate(comparables_2):\n el2 = li2[idx2]\n if cmp_2 in comparables_1:\n idx1 = comparables_1[cmp_2]\n new_list[idx1] = el2\n else:\n new_list.append(el2)\n\n return new_list", "def compare_values(\n cls: Type[Object_T],\n ours: Optional[Object_T],\n theirs: Optional[Object_T],\n *,\n our_schema: s_schema.Schema,\n their_schema: s_schema.Schema,\n context: ComparisonContext,\n compcoef: float,\n ) -> float:\n similarity = 1.0\n\n if ours is not None and theirs is not None:\n if type(ours) is not type(theirs):\n similarity /= 1.4\n else:\n our_name = context.get_obj_name(our_schema, ours)\n their_name = theirs.get_name(their_schema)\n if our_name != their_name:\n similarity /= 1.2\n else:\n # If the new and old versions share a reference to\n # an object that is being deleted, then we must\n # delete this object as well.\n if (type(ours), our_name) in context.deletions:\n return 0.0\n\n elif ours is not None or theirs is not None:\n # one is None but not both\n similarity /= 1.2\n\n if similarity < 1.0:\n return compcoef\n else:\n return 1.0", "def GenerateDiff(self, args):\n raise NotImplementedError(\n \"abstract method -- subclass %s must override\" % self.__class__)", "def diff(rev_1, rev_2):\n assert isinstance(rev_1, AbstractRevision)\n assert isinstance(rev_2, AbstractRevision)\n if rev_1.created_at == rev_2.created_at:\n print 'should not compare object to itself.'\n return None\n\n set_1 = Set((\n (k, v) for k, v in zip(\n rev_1.get_values().keys(),\n rev_1.get_values().values()\n )\n if k != u'id' and k != u'created_at' and k != 'tracked_model_id'\n ))\n set_2 = Set((\n (k, v) for k, v in zip(\n rev_2.get_values().keys(),\n rev_2.get_values().values()\n )\n if k != u'id' and k != u'created_at' and k != 'tracked_model_id'\n ))\n\n # new values\n diff = set_1 - set_2 # elements in s but not in t\n # common values\n intersection = set_1 & set_2 # elements common to s and t\n # pairs of changed values\n sym_diff = set_1 ^ set_2 # elements in s and t but not in both\n # changed values - set for consistency\n mod = sym_diff - diff\n return diff, intersection, sym_diff, mod", "def compare(self, *args):\n return _ida_hexrays.carglist_t_compare(self, *args)", "def GenerateDiff(self, args):\r\n raise NotImplementedError(\r\n \"abstract method -- subclass %s must override\" % self.__class__)", "def compare_changes(obj, **kwargs):\n changes = {}\n for key, value in obj.items():\n if key in kwargs:\n if value != kwargs[key]:\n changes[key] = kwargs[key]\n return changes", "def compare_changes(obj, **kwargs):\n changes = {}\n for k, v in obj.items():\n if k in kwargs:\n if v != kwargs[k]:\n changes[k] = kwargs[k]\n return changes", "def _compare_results(self, results_a, results_b):\n results = []\n\n # Cursory check to remove FILE_INPUT_PATH key from results since it is\n # a custom added field for test cases\n if INPUT_FILE_PATH in results_a:\n results_a = dict(results_a)\n del results_a[INPUT_FILE_PATH]\n if INPUT_FILE_PATH in results_b:\n results_b = dict(results_b)\n del results_b[INPUT_FILE_PATH]\n\n # Begin comparing results\n if self._field_names:\n for field_name in self._field_names:\n try:\n comparer = self._compare_results_field(results_a, results_b, field_name)\n except:\n comparer = ResultComparer(field_name)\n logger.error(traceback.format_exc())\n results.append(comparer)\n else:\n for ignore_field in self._ignore_field_names:\n results_a.pop(ignore_field, None)\n results_b.pop(ignore_field, None)\n all_field_names = set(results_a.keys()).union(list(results_b.keys()))\n for field_name in all_field_names:\n try:\n comparer = self._compare_results_field(results_a, results_b, field_name)\n except:\n comparer = ResultComparer(field_name)\n logger.error(traceback.format_exc())\n results.append(comparer)\n\n return results", "def compare(cls, data_hist, ref_hist, params):\n raise NotImplementedError(\"This is an abstract function that needs to be implemented for each comparison function\")", "def _call_isDifferent(vecObj, vec2):\n res = vecObj.isDifferent(vec2)\n return res", "def compareToRef(ref, innerProdFun = innerProd):\n def compareFun(x):\n return compare(ref, x, innerProdFun)\n return compareFun", "def dynamic_comparison(v1, op, v2):\n assert op in ['gt', 'lt']\n\n operator_map = {'gt': operator.gt,\n 'lt': operator.lt}\n\n return operator_map[op](v1, v2)", "def gen_comparison_pairs(self, a, b, subset=None):\n # union of the keys of the two records\n # the ordering of the first record takes precedence\n # an alternative option would be to sort them, lexicographically or with a custom criteria\n keys_union = {**a, **b}.keys()\n\n if subset:\n keys_comp_a_b = self.gen_comparison_keys_subset(subset)\n else:\n keys_comp_a_b = self.gen_comparison_keys_common(keys_union)\n\n for key_comp, key_a, key_b in keys_comp_a_b:\n yield key_comp, (a.get(key_a, {}), b.get(key_b, {}))", "def bench_compare_multiple(logger, *param_groups, **fn_names):\n title_str, ans_str, time_str = \"Testing {name}:\",\"\\t{name}({params}) = {ans}\",\"\\tTime = {time}\"\n for name, fn in fn_names.items():\n str_params = {'name': name}\n print(title_str.format(**str_params))\n for pg in param_groups:\n str_params |= {'params': pg}\n try:\n start = time.time()\n result = fn(*pg)\n exec = time.time() - start\n except TypeError:\n start = time.time()\n result = fn(pg)\n exec = time.time() - start\n str_params |= {'time': exec, 'ans': result}\n print(ans_str.format(**str_params))\n print(time_str.format(**str_params))", "def find_relationships(self, fig1, fig2):\r\n \r\n rels = []\r\n \r\n # relationship based on # of objects\r\n if len(fig1) == len(fig2):\r\n rels.append({'obj': 'all', 'attr': 'count', 'type': 'match'})\r\n else:\r\n rels.append({'obj': 'all', 'attr': 'count', 'type': 'mismatch'})\r\n \r\n for obj, attrs in fig1.items():\r\n if not obj in fig2:\r\n # object has been removed in fig2\r\n rels.append({'obj': obj, 'attr': 'all', 'type': 'removed'})\r\n continue\r\n \r\n for obj in fig2:\r\n if not obj in fig1:\r\n # object is only present in fig2\r\n rels.append({'obj': obj, 'attr': 'all', 'type': 'added'})\r\n continue\r\n \r\n for attr in fig2[obj]:\r\n rel = {'obj': obj, 'attr': attr}\r\n \r\n if attr in fig1[obj] and fig1[obj][attr] == fig2[obj][attr]:\r\n rel['type'] = 'match'\r\n else:\r\n partial_match = False\r\n for subvalue in fig2[obj][attr]:\r\n if attr in fig1[obj] and subvalue in fig1[obj][attr]:\r\n partial_match = True\r\n \r\n if partial_match:\r\n rel['type'] = 'partial'\r\n else:\r\n rel['type'] = 'mismatch'\r\n rel['old_values'] = ','.join(fig1[obj].get(attr, ['missing']))\r\n rel['new_values'] = ','.join(fig2[obj][attr])\r\n if rel['new_values'].isdigit() and rel['old_values'].isdigit():\r\n rel['diff'] = float(rel['new_values']) - float(rel['old_values'])\r\n del rel['old_values']\r\n del rel['new_values']\r\n \r\n rels.append(rel)\r\n \r\n return rels", "def test_ddiff_v1(self):\n print \"\\n\"\n for d in ddiff_v1(a, b): print d\n self.assertEqual(d, \"+FUN\")", "def compare(a, b):\n return a - b", "def getmethparlist(ob):\n defText = callText = \"\"\n # bit of a hack for methods - turn it into a function\n # but we drop the \"self\" param.\n # Try and build one for Python defined functions\n args, varargs, varkw = inspect.getargs(ob.__code__)\n items2 = args[1:]\n realArgs = args[1:]\n defaults = ob.__defaults__ or []\n defaults = [\"=%r\" % (value,) for value in defaults]\n defaults = [\"\"] * (len(realArgs)-len(defaults)) + defaults\n items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)]\n if varargs is not None:\n items1.append(\"*\" + varargs)\n items2.append(\"*\" + varargs)\n if varkw is not None:\n items1.append(\"**\" + varkw)\n items2.append(\"**\" + varkw)\n defText = \", \".join(items1)\n defText = \"(%s)\" % defText\n callText = \", \".join(items2)\n callText = \"(%s)\" % callText\n return defText, callText", "def getmethparlist(ob):\n defText = callText = \"\"\n # bit of a hack for methods - turn it into a function\n # but we drop the \"self\" param.\n # Try and build one for Python defined functions\n args, varargs, varkw = inspect.getargs(ob.__code__)\n items2 = args[1:]\n realArgs = args[1:]\n defaults = ob.__defaults__ or []\n defaults = [\"=%r\" % (value,) for value in defaults]\n defaults = [\"\"] * (len(realArgs)-len(defaults)) + defaults\n items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)]\n if varargs is not None:\n items1.append(\"*\" + varargs)\n items2.append(\"*\" + varargs)\n if varkw is not None:\n items1.append(\"**\" + varkw)\n items2.append(\"**\" + varkw)\n defText = \", \".join(items1)\n defText = \"(%s)\" % defText\n callText = \", \".join(items2)\n callText = \"(%s)\" % callText\n return defText, callText", "def __cmp__(self, other):\n # note compare order deliberatly compares other first, because we want the opposite\n # of what normally be returned by the these tuples\n if isinstance(other, Operation):\n return cmp((other.is_ready, other.queue_priority, other.seq),(self.is_ready, self.queue_priority,self.seq))\n else:\n raise TypeError('Operations can only be compared to other Operation')", "def diff(lhs, rhs, profiles=None):\n profiles = profiles if profiles else globalProfile()\n if isinstance(lhs, int) and isinstance(rhs, int):\n diffTxn(lhs, rhs, profiles)\n elif isinstance(lhs, list) or isinstance(rhs, list):\n diffTxns(lhs, rhs, profiles)\n else:\n display(HTML(ERROR_TEXT.format(\n\"\"\"\nInvalid arguments:<br>\ndiff expects either a pair of txns or a pair of list of txns<br>\nusage 1: diff(&lt;txnId1&gt;, &lt;txnId2&gt;) - compares two transactions with id txnId1 vs txnId2<br>\nusage 2: diff(&lt;List of txns&gt;, &lt;List of txns&gt;) - compares stats for the first list of txns vs the second.<br>\n\"\"\"\n )))", "def differences(data: list) -> list:\n differences = []\n iterable, copy = tee(data)\n next(copy) # adjusts copy of my iterable up 1 element\n for x, y in zip(iterable, copy):\n differences.append(abs(x - y))\n\n return differences", "def test_ddiff_v2(self):\n print \"\\n\"\n for d in ddiff_v2(a, b): print d\n self.assertEqual(d, \"+FUN\")", "def query(func, tuple_=False):\n\n def structures(self, *args, **kwargs):\n objects = func(self)\n original = {s: n for n, s in enumerate(objects.structures)}\n if len(args) == 1:\n return {objects.get(args[0])} if args[0] in objects.ids else set()\n for k, v in kwargs.items():\n objects = filter_objects(objects, k, v)\n if tuple_:\n return tuple(sorted(\n objects.structures, key=lambda s: original[s]\n ))\n else:\n return set(objects.structures)\n return structures", "def compare(self, *args):\n return _ida_hexrays.cdo_t_compare(self, *args)", "def csv_attribute_unpacker(self, attribute_tuples: List[Tuple[str, str]], separator: str = \",\") \\\n -> Tuple[List[str], List[str]]:\n\n if not attribute_tuples:\n raise ValueError(\"The list of tuples containing the attributes is missing.\")\n\n join_attributes_set: set = set()\n selection_attributes_set: set = set()\n\n for j_attribute_string, s_attribute_string in attribute_tuples:\n for j_attribute in j_attribute_string.split(separator):\n join_attributes_set.add(j_attribute.strip())\n\n for operator in self.operators:\n s_attribute_string = s_attribute_string.replace(separator + operator + separator, operator)\n\n for s_attribute in s_attribute_string.split(separator):\n for operator in self.operators:\n if operator in s_attribute:\n s_attribute = s_attribute.split(operator)[0].strip()\n selection_attributes_set.add(s_attribute)\n break\n\n return list(join_attributes_set), list(selection_attributes_set)", "def all_safe_methods(obj):\n return tuple(set(all_methods(obj)).difference(__leave_alone_methods))", "def all_safe_methods(obj):\n return tuple(set(all_methods(obj)).difference(__leave_alone_methods))", "def compare_descriptors(query_descriptors, db_descriptors, descriptor_comp_methods, descriptor_names, weights): \n scores = np.zeros((len(query_descriptors[0]), len(db_descriptors[0])))\n num_descriptor = 0\n try:\n for query_descriptor, db_descriptor, descriptor_name in zip(query_descriptors, db_descriptors, descriptor_names):\n d_score = descriptor_comp_methods[num_descriptor](query_descriptor, db_descriptor)\n if descriptor_name != \"text\":\n d_score = d_score / (16*16)\n\n scores += weights[num_descriptor] * d_score\n #print(weights[num_descriptor], d_score)\n num_descriptor += 1\n except TypeError:\n print(\"Maybe you've missed to use some --weights in your arguments\")\n print('Try at least \"--weights 1\" to continue the execution')\n sys.exit(\"ABORTING ##2\")\n return scores", "def _perform_pairwise_tests(labels, dists, tail_type, num_permutations):\r\n result = []\r\n\r\n # Convert our notion of tail type into the format expected by\r\n # PyCogent.\r\n if tail_type == 'two-sided':\r\n tail_type = None\r\n\r\n # Compare each pair of distributions, keeping track of the number of actual\r\n # tests that were successfully performed so that we can correct for\r\n # multiple comparisons.\r\n num_tests = 0\r\n for g1_idx, (g1_label, g1_dist) in enumerate(zip(labels[:-1], dists[:-1])):\r\n for g2_label, g2_dist in zip(\r\n labels[(g1_idx + 1):], dists[(g1_idx + 1):]):\r\n if ((len(g1_dist) == 1 and len(g2_dist) == 1) or\r\n (len(g1_dist) < 1 or len(g2_dist) < 1)):\r\n # Not enough data to run the test.\r\n obs_t, param_p_val, nonparam_p_val = nan, nan, nan\r\n else:\r\n obs_t, param_p_val, _, nonparam_p_val = mc_t_two_sample(\r\n g1_dist, g2_dist, tails=tail_type,\r\n permutations=num_permutations)\r\n result.append([g1_label, g2_label, obs_t, param_p_val, None,\r\n nonparam_p_val, None])\r\n if obs_t is not nan:\r\n num_tests += 1\r\n\r\n # Correct the p-values for multiple comparisons, now that we know how many\r\n # tests succeeded.\r\n for stat in result:\r\n stat[4] = stat[3] if stat[3] is nan else min(stat[3] * num_tests, 1)\r\n stat[6] = stat[5] if stat[5] is nan else min(stat[5] * num_tests, 1)\r\n return result", "def compare(self, *args):\n return _ida_hexrays.operand_locator_t_compare(self, *args)", "def cmp ( self, object1, object2 ):\n return cmp( self.get_raw_value( object1 ),\n self.get_raw_value( object2 ) )", "def compareFn(impl1, impl2):\n for (v1, v2) in zip(\n [extractDigits(f.strip()) for f in impl1.split(\",\")],\n [extractDigits(f.strip()) for f in impl2.split(\",\")],\n ):\n res = comparePair(v1, v2)\n if res:\n return res\n return 0", "def testExpected_a_and_self_distr_byRef_classifiers(self):\n\t\t#Get first filter opts obj + set the classifier objects specifically\n\t\tfilterObjA = self.filterOptObj #First set\n\t\tclassifiersA = filteredAtomComboObjMaps.getClassifiersFromOptsObj(self.classifierOpts)\n\t\tfilterObjA.classificationObjs = classifiersA\n\n\t\t#Get second filter opts obj; use byReference classifiers\n\t\tself.useGroups = [ [0,0] ]\n\t\tself.createTestObjs()\n\t\tfilterObjB = self.filterOptObj\n\t\tfilterObjB.classificationOpts = None #Force to use the objects\n\t\tclassifiersB = classifierObjsHelp.getByReferenceClassifiers(classifiersA)\n\t\tfilterObjB.classificationObjs = classifiersB\n\n\t\t#Run the functions - binValGetterA must always be run first\n\t\tbinValGetterA = optsObjMapHelp.getMultiDimBinValGetterFromOptsObjs([filterObjA])\n\t\tbinValGetterB = optsObjMapHelp.getMultiDimBinValGetterFromOptsObjs([filterObjB])\n\n\t\tactValsA = binValGetterA.getValsToBin(self.sparseMatrixCalculator)\n\t\tactValsB = binValGetterB.getValsToBin(self.sparseMatrixCalculator)\n\n\t\t#Compare actual and expected\n\t\tdistAA, distBB, distCC = 0,0,0\n\t\tdistAB, distAC, distBC = 1,2,1\n\t\tdistBA, distCA, distCB = distAB, distAC, distBC\n\n\t\texpValsA = [ (3,), (2,), (1,) ]\n\t\texpValsB = [ (distAA,), (distAB,), (distAC,), (distBA,), (distBB,), (distBC,),\n\t\t (distCA,), (distCB,), (distCC,) ]\n\n\t\tfor expIter,actIter in it.zip_longest(expValsA, actValsA):\n\t\t\t[self.assertAlmostEqual(exp,act) for exp,act in it.zip_longest(expIter,actIter)]\n\n\t\tfor expIter,actIter in it.zip_longest(expValsB, actValsB):\n\t\t\t[self.assertAlmostEqual(exp,act) for exp,act in it.zip_longest(expIter,actIter)]", "def difference(list1, list2):\n new_list = []\n for rule1 in list1:\n in_list2 = False\n literals1 = [x.string() for x in rule1]\n for rule2 in list2:\n literals2 = [x.string() for x in rule2]\n if literals1 == literals2:\n in_list2 = True\n if not in_list2:\n new_list.append(rule1)\n return new_list", "def cmpWork(subInfo1, subInfo2):\n work1 = subInfo1[WORK]\n work2 = subInfo2[WORK]\n return work1 < work2", "def cmpWork(subInfo1, subInfo2):\n work1 = subInfo1[WORK]\n work2 = subInfo2[WORK]\n return work1 < work2", "def difference(seq, *seqs):\n yield from differenceby(None, seq, *seqs)", "def test_neg_sub():\n c=[1,2]\n def myfunc(x,y):\n f1=1-x-y-2\n return -f1\n\n f_obj=ADiff(myfunc)\n res=f_obj.Jac(c)\n\n expectAns={'diff': [1,1], 'value': 4}\n\n assert res==expectAns", "def _pairwise_differences(\n X, y,\n *,\n classes=None,\n ordered=False,\n operation=lambda x: x.mean(axis=0)):\n if classes is None:\n classes = np.unique(y)\n\n n_classes = len(classes)\n # All pairwise combinations\n n_class_pairs = n_classes * (n_classes - 1) // 2\n\n # Cache the average vector of each class\n class_averages = group_by(\n X, y, category_orders=classes, operation=operation)\n\n # Compute the actual pairwise differences\n M = np.zeros((n_class_pairs * (1 if not ordered else 2), X.shape[1]))\n index_to_pair_dict = {}\n\n # Make sure to use range(n_classes) when indexing instead of classes,\n # to allow for arbitrary class labels.\n for index, (i, j) in enumerate(combinations(range(n_classes), 2)):\n difference = class_averages[i] - class_averages[j]\n if ordered:\n # Clip negative values to 0\n # Assign i - j to index and j - i to index + n_class_pairs\n M[index] = np.clip(difference, 0, None)\n index_to_pair_dict[index] = (i, j)\n M[index + n_class_pairs] = np.clip(-difference, 0, None)\n index_to_pair_dict[index + n_class_pairs] = (j, i)\n else:\n M[index] = np.abs(difference)\n index_to_pair_dict[index] = (i, j)\n\n return M, index_to_pair_dict", "def diff_measure(diff_function,measureA,measureB,obj,Qfilter=None,**options):\n\tresultA=diff_function(measure=measureA,obj=obj,Qfilter=Qfilter,**options)\n\tresultB=diff_function(measure=measureB,obj=obj,Qfilter=Qfilter,**options)\n\ttry:\n\t\tresult=resultA-resultB\n\texcept:\n\t\tresult=np.nan\n\treturn result", "def compare_data_info(lst):\n\n # Check data information is the same across the list of given objects\n for ind, f_obj in enumerate(lst[:-1]):\n if get_data_info(f_obj) != get_data_info(lst[ind+1]):\n return False\n\n # If no data info comparisons fail, return that objects have consistent information\n return True", "def diffs(current, target):\r\n \r\n additions = [val for val in target if val not in current]\r\n deletions = [val for val in current if val not in target]\r\n\r\n return additions, deletions", "def compare(self, **kwargs):\n\n source_params = {'sid': kwargs.get('source_sid'),\n 'did': kwargs.get('source_did'),\n 'scid': kwargs.get('source_scid')\n }\n\n target_params = {'sid': kwargs.get('target_sid'),\n 'did': kwargs.get('target_did'),\n 'scid': kwargs.get('target_scid')\n }\n\n if 'source_tid' in kwargs:\n source_params['tid'] = kwargs['source_tid']\n if 'target_tid' in kwargs:\n target_params['tid'] = kwargs['target_tid']\n\n source = self.fetch_objects_to_compare(**source_params)\n\n target = self.fetch_objects_to_compare(**target_params)\n\n # If both the dict have no items then return None.\n if not (source or target) or (\n len(source) <= 0 and len(target) <= 0):\n return None\n\n return compare_dictionaries(source, target,\n self.node_type,\n self.blueprint.COLLECTION_LABEL,\n self.keys_to_ignore)", "def _exact_compare(tree1, tree2):\n attrs = ['name', 'length', 'support']\n for n1, n2 in zip(tree1.postorder(), tree2.postorder()):\n for attr in attrs:\n if getattr(n1, attr, None) != getattr(n2, attr, None):\n return False\n return True", "def diff(self, other, match=lambda x: True, clean=False):\n result = {}\n\n def _iterativediff(t1, t2, subdir):\n \"\"\"compares two trees and appends new tree nodes to examine to\n the stack\"\"\"\n if t1 is None:\n t1 = {}\n if t2 is None:\n t2 = {}\n\n for e1 in t1:\n realname = subdir + pycompat.fsencode(e1.name)\n\n if e1.type == pygit2.GIT_OBJ_TREE:\n try:\n e2 = t2[e1.name]\n if e2.type != pygit2.GIT_OBJ_TREE:\n e2 = None\n except KeyError:\n e2 = None\n\n stack.append((realname + b'/', e1, e2))\n else:\n n1, fl1 = self.find(realname)\n\n try:\n e2 = t2[e1.name]\n n2, fl2 = other.find(realname)\n except KeyError:\n e2 = None\n n2, fl2 = (None, b'')\n\n if e2 is not None and e2.type == pygit2.GIT_OBJ_TREE:\n stack.append((realname + b'/', None, e2))\n\n if not match(realname):\n continue\n\n if n1 != n2 or fl1 != fl2:\n result[realname] = ((n1, fl1), (n2, fl2))\n elif clean:\n result[realname] = None\n\n for e2 in t2:\n if e2.name in t1:\n continue\n\n realname = subdir + pycompat.fsencode(e2.name)\n\n if e2.type == pygit2.GIT_OBJ_TREE:\n stack.append((realname + b'/', None, e2))\n elif match(realname):\n n2, fl2 = other.find(realname)\n result[realname] = ((None, b''), (n2, fl2))\n\n stack = []\n _iterativediff(self._tree, other._tree, b'')\n while stack:\n subdir, t1, t2 = stack.pop()\n # stack is populated in the function call\n _iterativediff(t1, t2, subdir)\n\n return result", "def equality_check(a, b):\n\n def check_item(x, y, attr):\n if isinstance(x, hoomd.operation._HOOMDGetSetAttrBase):\n equality_check(x, y)\n return\n if isinstance(x, Mapping):\n for k, v in x.items():\n assert k in y, f\"For attr {attr}, key difference {k}\"\n check_item(v, y[k], \".\".join((attr, str(k))))\n return\n if not isinstance(x, str) and hasattr(x, \"__len__\"):\n assert len(x) == len(y)\n for i, (v_x, v_y) in enumerate(zip(x, y)):\n check_item(v_x, v_y, attr + f\"[{i}]\")\n return\n if isinstance(x, float):\n assert numpy.isclose(x, y), f\"attr '{attr}' not equal:\"\n return\n assert x == y, f\"attr '{attr}' not equal:\"\n\n if not isinstance(a, hoomd.operation._HOOMDGetSetAttrBase):\n return a == b\n assert type(a) == type(b)\n\n _check_obj_attr_compatibility(a, b)\n\n for attr in a.__dict__:\n if attr in a._skip_for_equality:\n continue\n\n if attr == \"_param_dict\":\n param_keys = a._param_dict.keys()\n b_param_keys = b._param_dict.keys()\n # Check key equality\n assert param_keys == b_param_keys, \"Incompatible param_dict keys:\"\n # Check item equality\n for key in param_keys:\n check_item(a._param_dict[key], b._param_dict[key], key)\n continue\n\n if attr == \"_typeparam_dict\":\n keys = a._typeparam_dict.keys()\n b_keys = b._typeparam_dict.keys()\n # Check key equality\n assert keys == b_keys, \"Incompatible _typeparam_dict:\"\n # Check item equality\n for key in keys:\n for type_, value in a._typeparam_dict[key].items():\n check_item(value, b._typeparam_dict[key][type_], \".\".join(\n (key, str(type_))))\n continue\n\n check_item(a.__dict__[attr], b.__dict__[attr], attr)", "def rgetattr(obj, attr, *args):\n\n def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split('.'))", "def visit_Compare(self, node):\n self.generic_visit(node)\n if len(node.ops) > 1:\n raise NotImplementedError(\"Multiple comparisons not supported\")\n\n op, comparator = node.ops[0], node.comparators[0]\n if isinstance(op, ast.In):\n # Special case: `contains` reverses the operands.\n return to_call(to_attribute(self.operator, 'contains'),\n [comparator, node.left])\n elif isinstance(op, ast.NotIn):\n # Special case: there is no `not_contains`.\n return to_call(to_attribute(self.operator, 'not_'), [\n to_call(to_attribute(self.operator, 'contains'),\n [comparator, node.left])\n ])\n else:\n # General case\n return to_call(self.op_to_function(op), [node.left, comparator])", "def eval_det(pred_all, gt_all, ovthresh=0.25, use_07_metric=False):\n pred = {} # map {classname: pred}\n gt = {} # map {classname: gt}\n for img_id in pred_all.keys():\n for classname, bbox, score in pred_all[img_id]:\n if classname not in pred: pred[classname] = {}\n if img_id not in pred[classname]:\n pred[classname][img_id] = []\n if classname not in gt: gt[classname] = {}\n if img_id not in gt[classname]:\n gt[classname][img_id] = []\n pred[classname][img_id].append((bbox,score))\n for img_id in gt_all.keys():\n for classname, bbox in gt_all[img_id]:\n if classname not in gt: gt[classname] = {}\n if img_id not in gt[classname]:\n gt[classname][img_id] = []\n gt[classname][img_id].append(bbox)\n\n rec = {}\n prec = {}\n ap = {}\n for classname in gt.keys():\n print('Computing AP for class: ', classname)\n rec[classname], prec[classname], ap[classname] = eval_det_cls(pred[classname], gt[classname], ovthresh, use_07_metric)\n print(classname, ap[classname])\n \n return rec, prec, ap", "def test_find_diff(self):\n\n # Ensure lists and sets are handled appropriately\n self.assertEqual(\"unchanged\",\n utils.find_diff_of_lists_and_sets([3, 2], [2, 3, 2]))\n self.assertEqual([[1], [2, 3], [4]],\n utils.find_diff_of_lists_and_sets([1, 2, 3], [2, 3, 4]))\n self.assertEqual(\"unchanged\",\n utils.find_diff_of_lists_and_sets({3, 2}, {2, 3}))\n self.assertEqual([[1], [2, 3], [4]],\n utils.find_diff_of_lists_and_sets({1, 2, 3}, {2, 3, 4}))\n self.assertEqual(\"unchanged\",\n utils.find_diff_of_lists_and_sets({2, 3}, [2, 3]))\n self.assertEqual([[1], [2, 3], [4]],\n utils.find_diff_of_lists_and_sets([1, 2, 3], {2, 3, 4}))\n self.assertEqual([None, {1, 2}],\n utils.find_diff_of_lists_and_sets(None, {1, 2}))\n self.assertEqual(\"unchanged\",\n utils.find_diff_of_lists_and_sets(None, None))\n\n # Ensure ints and floats are handled appropriately\n self.assertEqual(1, utils.find_diff_of_numbers(5, 4))\n self.assertEqual(1.0, utils.find_diff_of_numbers(5.0, 4.0))\n self.assertEqual(1.0, utils.find_diff_of_numbers(5.0, 4))\n self.assertEqual(\"unchanged\", utils.find_diff_of_numbers(5.0, 5.0))\n self.assertEqual(\"unchanged\", utils.find_diff_of_numbers(5, 5.0))\n self.assertEqual([4, None],\n utils.find_diff_of_numbers(4, None))\n self.assertEqual(\"unchanged\", utils.find_diff_of_numbers(None, None))\n\n # Ensure strings are handled appropriately\n self.assertEqual(\"unchanged\",\n utils.find_diff_of_strings_and_bools(\"Hello\", \"Hello\"))\n self.assertEqual([\"Hello\", \"team\"],\n utils.find_diff_of_strings_and_bools(\"Hello\", \"team\"))\n self.assertEqual(\"unchanged\",\n utils.find_diff_of_strings_and_bools(None, None))\n\n # Ensure dates are handled appropriately\n a = datetime(2021, 6, 28)\n b = datetime(2021, 6, 27, 1)\n self.assertEqual(\"unchanged\", utils.find_diff_of_dates(a, a))\n self.assertEqual(\"+23:00:00\", utils.find_diff_of_dates(a, b))\n self.assertEqual(\"-23:00:00\", utils.find_diff_of_dates(b, a))\n self.assertEqual([\"06/28/21 00:00:00\", None], utils.find_diff_of_dates(a, None))\n self.assertEqual(\"unchanged\", utils.find_diff_of_numbers(None, None))\n\n # Ensure that differencing dictionaries is handled appropriately\n dict1 = {\n \"a\": 0.25,\n \"b\": 0.0,\n \"c\": [1, 2],\n \"d\": datetime(2021, 6, 28),\n \"e\": \"hi\",\n \"f\": \"hi2\"\n }\n dict2 = {\n \"a\": 0.25,\n \"b\": 0.01,\n \"c\": [2, 3],\n \"d\": datetime(2021, 6, 27, 1),\n \"e\": \"hihi\",\n \"g\": 15\n }\n expected_diff = {\n \"a\": \"unchanged\",\n \"b\": -0.01,\n \"c\": [[1], [2], [3]],\n \"d\": \"+23:00:00\",\n \"e\": [\"hi\", \"hihi\"],\n \"f\": [\"hi2\", None],\n \"g\": [None, 15]\n }\n self.assertDictEqual(expected_diff, utils.find_diff_of_dicts(dict1, dict2))", "def get_pairwise_matches(pos1, descs1, pos2, descs2, up_to=30):\n assert pos1.shape[0] * pos2.shape[0] < 1e8, \\\n \"Too many points: increase cornerness threshold\"\n assert pos1.shape[0] > 10 and pos1.shape[0] > 10, \\\n \"Not enough points: lower cornerness threshold\"\n # get the similarities between all descriptors\n sims = np.dot(descs1, descs2.T)\n # get the best matches\n mi2 = sims.argmax(axis=1).squeeze()\n ms = sims.max(axis=1).squeeze()\n bmi1 = ms.argsort()[::-1][:up_to]\n bmi2 = mi2[bmi1]\n # return their positions\n bp1 = pos1[bmi1]\n bp2 = pos2[bmi2]\n return bp1, bp2", "def changed_attrs(old_version, new_version, interesting_attrs):\n # Use an OrderedDict so that we preserve the order from interesting_attrs\n changed = OrderedDict()\n for attr in interesting_attrs:\n if attr in old_version and attr not in new_version:\n changed[attr] = [old_version[attr], None]\n elif attr in new_version and attr not in old_version:\n changed[attr] = [None, new_version[attr]]\n elif old_version[attr] != new_version[attr]:\n changed[attr] = [old_version[attr], new_version[attr]]\n return changed", "def _attributes(self, ext1, ext2):\n errorlist = []\n for attr in ['data', 'mask', 'variance', 'OBJMASK', 'OBJCAT']:\n attr1 = getattr(ext1, attr, None)\n attr2 = getattr(ext2, attr, None)\n if (attr1 is None) ^ (attr2 is None):\n errorlist.append(f'Attribute error for {attr}: '\n f'{attr1 is not None} v {attr2 is not None}')\n elif attr1 is not None:\n if isinstance(attr1, Table):\n if len(attr1) != len(attr2):\n errorlist.append(f'attr lengths differ: '\n f'{len(attr1)} v {len(attr2)}')\n else: # everything else is pixel-like\n if attr1.dtype.name != attr2.dtype.name:\n errorlist.append(f'Datatype mismatch for {attr}: '\n f'{attr1.dtype} v {attr2.dtype}')\n if attr1.shape != attr2.shape:\n errorlist.append(f'Shape mismatch for {attr}: '\n f'{attr1.shape} v {attr2.shape}')\n if 'int' in attr1.dtype.name:\n try:\n assert_most_equal(attr1, attr2, max_miss=self.max_miss)\n except AssertionError as e:\n errorlist.append(f'Inequality for {attr}: '+str(e))\n else:\n try:\n assert_most_close(attr1, attr2, max_miss=self.max_miss,\n rtol=self.rtol, atol=self.atol)\n except AssertionError as e:\n errorlist.append(f'Mismatch for {attr}: '+str(e))\n return errorlist", "def svn_diff_fns2_invoke_token_compare(_obj, diff_baton, ltoken, rtoken):\n return _diff.svn_diff_fns2_invoke_token_compare(_obj, diff_baton, ltoken, rtoken)", "def methods_of(obj):\r\n result = []\r\n for i in dir(obj):\r\n if callable(getattr(obj, i)) and not i.startswith('_'):\r\n result.append((i, getattr(obj, i)))\r\n return result", "def _unpack_pre_snap_posts(wrapper: CallableT) -> Tuple[List[List[Contract]], List[Snapshot], List[Contract]]:\n preconditions = getattr(wrapper, \"__preconditions__\") # type: List[List[Contract]]\n snapshots = getattr(wrapper, \"__postcondition_snapshots__\") # type: List[Snapshot]\n postconditions = getattr(wrapper, \"__postconditions__\") # type: List[Contract]\n\n return preconditions, snapshots, postconditions", "def _build_comparator(op, merge_op, shortcut, return_root_if_empty_other):\n def comparator(self, other, return_inds=False):\n \"\"\"`plist` comparison operator. **Comparisons filter plists.**\n\n **IMPORTANT:** `plist` comparisons all filter the `plist` and return a new\n `plist`, rather than a truth value.\n\n `comparator` is not callable directly from `plist`. It implements the various\n python comparison operations: `==`, `<`, `>`, etc. The comparison operators\n can be called directly with their corresponding 'magic' functions,\n `plist.__eq__`, `plist.__lt__`, `plist.__gt__`, etc., but are generally just\n called implicitly.\n\n Examples:\n `plist` comparators can filter on leaf values:\n ```python\n foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])\n assert (foos.aslist() ==\n [{'foo': 0, 'bar': 0},\n {'foo': 1, 'bar': 1},\n {'foo': 2, 'bar': 0}])\n zero_bars = foos.bar == 0\n assert (zero_bars.aslist() ==\n [{'foo': 0, 'bar': 0},\n {'foo': 2, 'bar': 0}])\n nonzero_bars = foos.bar != 0\n assert (nonzero_bars.aslist() ==\n [{'foo': 1, 'bar': 1}])\n ```\n\n They can also filter on other plists so long as the structures are\n compatible:\n ```python\n assert ((foos == zero_bars).aslist() ==\n [{'foo': 0, 'bar': 0},\n {'foo': 2, 'bar': 0}])\n assert ((foos.foo > foos.bar).aslist() ==\n [{'foo': 2, 'bar': 0}])\n ```\n\n The same is true when comparing against lists with compatible structure:\n ```python\n assert ((foos.foo == [0, 1, 3]).aslist() ==\n [{'foo': 0, 'bar': 0},\n {'foo': 1, 'bar': 1}])\n ```\n\n This all generalizes naturally to plists that have been grouped:\n ```python\n by_bar_foo = foos.bar.groupby().foo.groupby()\n assert (by_bar_foo.aslist() ==\n [[[{'foo': 0, 'bar': 0}],\n [{'foo': 2, 'bar': 0}]],\n [[{'foo': 1, 'bar': 1}]]])\n nonzero_by_bar_foo = by_bar_foo.bar > 0\n assert (nonzero_by_bar_foo.aslist() ==\n [[[],\n []],\n [[{'bar': 1, 'foo': 1}]]])\n zero_by_bar_foo = by_bar_foo.foo != nonzero_by_bar_foo.foo\n assert (zero_by_bar_foo.aslist() ==\n [[[{'foo': 0, 'bar': 0}],\n [{'foo': 2, 'bar': 0}]],\n [[]]])\n assert ((by_bar_foo.foo == [[[0], [3]], [[1]]]).aslist() ==\n [[[{'foo': 0, 'bar': 0}],\n []],\n [[{'foo': 1, 'bar': 1}]]])\n ```\n\n Lists with incompatible structure are compared to `self` one-at-a-time,\n resulting in set-like filtering where the two sets are merged with an 'or':\n ```python\n\n assert ((foos.foo == [0, 1, 3, 4]).aslist() ==\n [{'foo': 0, 'bar': 0},\n {'foo': 1, 'bar': 1}])\n\n assert ((by_bar_foo.foo == [0, 1, 3, 4]).aslist() ==\n [[[{'foo': 0, 'bar': 0}],\n []],\n [[{'foo': 1, 'bar': 1}]]])\n ```\n\n When comparing against an empty list, `==` always returns an empty list, but\n all other comparisons return `self`:\n ```python\n assert ((foos.foo == []).aslist() == [])\n assert ((foos.foo < []).aslist() ==\n [{'foo': 0, 'bar': 0},\n {'foo': 1, 'bar': 1},\n {'foo': 2, 'bar': 0}])\n assert ((by_bar_foo == nonzero_by_bar_foo).aslist() ==\n [[[],\n []],\n [[{'foo': 1, 'bar': 1}]]])\n assert ((by_bar_foo.foo > nonzero_by_bar_foo.foo).aslist() ==\n [[[{'foo': 0, 'bar': 0}],\n [{'foo': 2, 'bar': 0}]],\n [[]]])\n ```\n\n Note that `plist.nonempty` can be used to remove empty internal `plist`s\n after filtering a grouped `plist`:\n ```python\n assert ((by_bar_foo == nonzero_by_bar_foo).nonempty(-1).aslist() ==\n [[[{'foo': 1, 'bar': 1}]]])\n ```\n\n Args:\n other: Object to compare against.\n return_inds: Optional bool. When `True`, causes the comparison to return\n the plist indices of the matching items. When `False`\n (the default), causes the comparison to return a plist of the\n matching values.\n\n Returns:\n A new plist, filtered from `self` and `other` according to the operation\n provided to `_build_comparator`, if `return_inds` is `False`. Otherwise,\n returns the corresponding indices into self.\n \"\"\"\n if self is other:\n return shortcut(self, return_inds)\n inds = []\n if isinstance(other, list):\n if len(self) == len(other):\n for i, (x, o) in enumerate(zip(self, other)):\n if isinstance(x, plist):\n child_inds = comparator(x, o, return_inds=True)\n inds.append(child_inds)\n elif op(x, o):\n inds.append(i)\n elif len(other) > 0:\n inds = comparator(self, other[0], return_inds=True)\n for o in other[1:]:\n inds = _merge_indices(inds, comparator(self, o, return_inds=True), merge_op)\n else:\n # len(other) == 0\n if return_inds:\n inds = self.lfill(pepth=-1) if return_root_if_empty_other else []\n else:\n return self.__root__ if return_root_if_empty_other else plist()\n else:\n for i, x in enumerate(self):\n if isinstance(x, plist):\n child_inds = comparator(x, other, return_inds=True)\n inds.append(child_inds)\n elif op(x, other):\n inds.append(i)\n\n if return_inds:\n return inds\n\n return self.__root__[inds]\n\n return comparator", "def compare_all_signals(signal_list, reference_result, new_result):\n for signal in signal_list:\n if signal[u'signalName'] in reference_result.names():\n \n reference_signal = reference_result.data(signal[u'signalName'])\n \n try:\n new_result_signal = new_result.data(signal[u'signalName'])\n except:\n error_message = (u'Signal ' + signal[u'signalName'] +\n u' of the model ' +\n \" NOT FOUND!!!!!!\")\n raise EDRISError.ComparisonError(error_message) \n \n comparison_method = get_comparison_method(signal)\n parameters = get_comparison_parameters(signal)\n \n if not comparison_method(reference_signal, new_result_signal,\n parameters):\n error_message = (u'Signal ' + signal[u'signalName'] +\n u' of the model ' +\n # model_name +\n \" has a large deviation from the reference \"\n \"signal, please check if everything is ok\")\n raise EDRISError.ComparisonError(error_message)\n else:\n print(\"Signal\" + signal[u'signalName'] + \" NOT FOUND in reference result\")", "def minusRes(res1, res2):\n return [(x - y) for x, y in zip(res1, res2)]", "def difference(self, *args, **keys):\n diffs = super(InstrumentContext, self).difference(*args, **keys)\n for diff in diffs:\n diff.instrument = self.instrument\n return diffs", "def test_perform_pairwise_tests_multi_comp(self):\r\n # Verified with R's t.test function.\r\n exp = [['foo', 'bar', -6.5999999999999996, 0.0070804795641244006,\r\n 0.021241438692373202, nan, nan], ['foo', 'baz',\r\n -\r\n 9.7979589711327115, 0.00060818494446333643, 0.0018245548333900093,\r\n nan, nan], ['bar', 'baz', -3.0, 0.05766888562243732,\r\n 0.17300665686731195, nan, nan]]\r\n obs = _perform_pairwise_tests(self.labels2, self.dists2, 'two-sided',\r\n 0)\r\n self.compare_multiple_level_array(obs, exp)", "def GetDiffParams(expr='d', min_match_ratio=0.6, min_match_size=2, dbg=False):\n assert expr in EXPRS\n assert min_match_size in xrange(1, 5)\n assert min_match_ratio > 0.0 and min_match_ratio < 1.0\n return (expr, min_match_ratio, min_match_size, dbg)", "def diff_list(self, other):\n assert (isinstance(other, type(self)))\n result = []\n\n if not len(self.mechs):\n return result\n\n for this_mech in self.mechs:\n assert (isinstance(this_mech, SingleMechStats))\n this_name = this_mech.Name\n other_mech = other.mech_data_by_name(this_name)\n try:\n if other_mech == None:\n print \"Mech %s not found! Not played before?\" % this_name\n other_mech = SingleMechStats()\n other_mech.zero()\n\n #print \"names: self, other: \" , this_name, other_mech\n #print \"self\" , this_mech.data\n #print \"other\", other_mech.data\n\n diff = this_mech\n\n #print \"************ diff - \",diff\n #print \"************\" , diff.data\n else:\n diff = this_mech - other_mech\n #if this_mech.MatchesPlayed != other_mech.MatchesPlayed:\n # result.append(this_mech-other_mech)\n if diff.Wins + diff.Losses > 0:\n #print \"Appending\"\n result.append(diff)\n #print \"result:\", \"\\n\".join([str(x) for x in result])\n except AttributeError:\n print \"Mech not played before!\"\n print \"names: self, other: \", this_name, other_mech\n print \"self, other: \", this_mech.data, other.data\n raise\n print \"FINAL result:\", \"\\n\".join([str(x) for x in result])\n\n return result", "def cmp ( self, object1, object2 ):\n return cmp( object1[ self.index ], object2[ self.index ] )", "def compare(*args):\n return _ida_hexrays.compare(*args)", "def _diff(self, param, diff):\n pass", "def compare(obj_a, obj_b):\n\n return tuple_to_string(obj_a) == tuple_to_string(obj_b)", "def svn_diff_fns_invoke_token_compare(_obj, diff_baton, ltoken, rtoken):\n return _diff.svn_diff_fns_invoke_token_compare(_obj, diff_baton, ltoken, rtoken)", "def _cmp_dispatcher(other_method_name):\n\n def dispatched_cmp(self, other):\n try:\n other_method = getattr(other, other_method_name)\n except AttributeError:\n return False\n return other_method(self)\n return dispatched_cmp", "def diffDicts(d1, d2, hashfunc, both_ref=None):\n o_map = {}\n for o in d1:\n o_map[hashfunc(o)] = o\n both = []\n only_d2 = []\n for o in d2:\n hashd = hashfunc(o)\n if hashd in o_map:\n if both_ref is d2:\n both.append(o)\n o_map.pop(hashd)\n else:\n both.append(o_map.pop(hashd))\n else:\n only_d2.append(o)\n only_d1 = o_map.values()\n return (only_d1, only_d2, both)", "def _DiffFunc(self, before=None, after=None, sort=True):\n before = before if before is not None else self._size_infos[0]\n after = after if after is not None else self._size_infos[1]\n return diff.Diff(before, after, sort=sort)" ]
[ "0.6840443", "0.57526386", "0.5622923", "0.54995364", "0.54768133", "0.5458876", "0.5385492", "0.5371001", "0.5325865", "0.5241565", "0.51938784", "0.5181397", "0.51578325", "0.51564384", "0.5137712", "0.50981116", "0.50660944", "0.50567687", "0.50435317", "0.5015092", "0.5006091", "0.50037897", "0.49938032", "0.49924865", "0.49335596", "0.4925117", "0.49073595", "0.4897069", "0.48746058", "0.48631263", "0.4859487", "0.4854815", "0.48513603", "0.48400187", "0.48272586", "0.48158136", "0.48154673", "0.47974128", "0.4779612", "0.47775492", "0.47733888", "0.47671202", "0.47602904", "0.47485903", "0.47418082", "0.4738107", "0.4738107", "0.47370166", "0.4728499", "0.47171524", "0.4716284", "0.47135237", "0.4710414", "0.4710397", "0.4706936", "0.4706936", "0.47051543", "0.46958125", "0.46817875", "0.4679653", "0.46774697", "0.46742302", "0.4672815", "0.4670379", "0.4670379", "0.46631494", "0.465884", "0.46445146", "0.46445134", "0.4643646", "0.46367398", "0.46324238", "0.46293926", "0.46262217", "0.4618218", "0.46145603", "0.4612896", "0.46121374", "0.46094006", "0.46054935", "0.46052083", "0.4604692", "0.4604197", "0.46018234", "0.45987824", "0.45956627", "0.45921645", "0.4590084", "0.45852408", "0.45802632", "0.45798722", "0.45788875", "0.45783296", "0.457828", "0.4572965", "0.45728275", "0.4561553", "0.45520318", "0.45405608", "0.4540142" ]
0.7408652
0
Postmortem, using a custom debug function if passed
def post_mortem(*args, debug_fn: Optional[Callable] = None, **kwargs) -> None: if debug_fn is None: import pdb debug_fn = pdb.post_mortem debug_fn()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def debug():", "def debug_run(self):\n raise NotImplementedError", "def xpm(Pdb=Pdb):\n info = sys.exc_info()\n print(traceback.format_exc())\n post_mortem(info[2], Pdb)", "def after_step(context, step):\n if context.config.userdata.getbool(\"debug\") and step.status == \"failed\":\n spost_mortem(step.exc_traceback)", "def debug(self, *args, **kwargs):", "def debug(state: bool, /) -> None:", "def debug() -> bool:", "def do_debug(self, arg):\n orig_trace = sys.gettrace()\n if orig_trace:\n sys.settrace(None)\n globals = self.curframe.f_globals\n locals = self.curframe_locals\n Config = self.ConfigFactory\n\n class PdbppWithConfig(self.__class__):\n def __init__(self_withcfg, *args, **kwargs):\n kwargs.setdefault(\"Config\", Config)\n super(PdbppWithConfig, self_withcfg).__init__(*args, **kwargs)\n\n # Backport of fix for bpo-31078 (not yet merged).\n self_withcfg.use_rawinput = self.use_rawinput\n\n local.GLOBAL_PDB = self_withcfg\n local.GLOBAL_PDB._use_global_pdb_for_class = self.__class__\n\n prev_pdb = local.GLOBAL_PDB\n p = PdbppWithConfig(self.completekey, self.stdin, self.stdout)\n p._prompt = \"({}) \".format(self._prompt.strip())\n self.message(\"ENTERING RECURSIVE DEBUGGER\")\n self._flush_sticky_messages()\n try:\n with self._custom_completer():\n sys.call_tracing(p.run, (arg, globals, locals))\n except Exception:\n exc_info = sys.exc_info()[:2]\n self.error(traceback.format_exception_only(*exc_info)[-1].strip())\n finally:\n local.GLOBAL_PDB = prev_pdb\n self.message(\"LEAVING RECURSIVE DEBUGGER\")\n\n if orig_trace:\n sys.settrace(orig_trace)\n self.lastcmd = p.lastcmd", "def debug():\n assert current_app.debug == False, \"Don't panic! You're here by request of debug()\"", "def __debug(msg):\n\n pass", "def pm(conn):\n #pdb.post_mortem(conn.root.getconn()._last_traceback)\n redir = redirected_stdio(conn)\n try:\n conn.modules.pdb.post_mortem(conn.root.getconn()._last_traceback)\n finally:\n redir.restore()", "def debug_option(args, run):\n run.debug = True", "def debug(self, *args: Any, **kwargs) -> None:\n ...", "def debug(msg):\n #print(msg)\n pass\n #end debug", "def debug(self):\n self._debug = True\n self.run()\n self._debug = False", "def gdb_breakpoint():\n _gdb_python_call_gen('gdb_breakpoint')()", "def _debug_trace():\n from PyQt4.QtCore import pyqtRemoveInputHook\n from pdb import set_trace\n pyqtRemoveInputHook()\n set_trace()", "def debug_on(*skip_exceptions):\n if not skip_exceptions:\n skip_exceptions = ()\n\n def decorator(f):\n global DEBUG\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n if not DEBUG:\n return f(*args, **kwargs)\n try:\n return f(*args, **kwargs)\n except Exception as e:\n for skip in skip_exceptions:\n if isinstance(e, skip):\n raise e\n print '\\n'\n for line in traceback.format_tb(sys.exc_info()[2]):\n print line\n print str(e.__class__.__name__) + ': ' + str(e) + '\\n'\n pdb.post_mortem(sys.exc_info()[2])\n raise e\n return wrapper\n\n return decorator", "def debugging_tests():\n logging.warning(\"Running debugging tests...\")\n pass", "def debug(self, message):\r\n pass", "def pdb(item, item2=None):\n import pdb # noqa\n pdb.set_trace() # noqa", "def debug():\n # written before I knew about the pdb module\n caller = currentframe().f_back\n method_name = caller.f_code.co_name\n line_no = getframeinfo(caller).lineno\n print(method_name + \": line \" + str(line_no))\n code.interact(local=dict(globals(), **caller.f_locals))", "def debug(msg):", "def debuggable(f):\n \n debugging = f.func_globals.get(\"DEBUGGING\",False)\n if debugging: return f\n\n f.func_code = __transform_codeobjects__(f.func_code,__debuggable__)\n return f", "def debug(self):\r\n debug = _DebugResult()\r\n self._wrapped_run(debug, True)\r\n self._tearDownPreviousClass(None, debug)\r\n self._handleModuleTearDown(debug)", "def report_debug(self, rc):\n pass", "def fault_debug(value: bool = False) -> None:", "def debugger(self, force=False):\r\n from IPython.utils.warn import error\r\n if not (force or self.call_pdb):\r\n return\r\n\r\n if not hasattr(sys, 'last_traceback'):\r\n error('No traceback has been produced, nothing to debug.')\r\n return\r\n\r\n from pudb import pm\r\n\r\n with self.readline_no_record:\r\n pm()", "def pdb_view(request):\n import pdb; pdb.set_trace()\n return HttpResponse(\"This works.\")", "def debug(msg):\n if(CONFIG['debug']):\n logIt(msg)", "def __smartdebug__(co,func_globals):\n\n from byteplay import Code,SetLineno,Label,LOAD_GLOBAL,POP_JUMP_IF_FALSE,POP_JUMP_IF_TRUE,JUMP_FORWARD\n code = Code.from_code(co)\n instructions = code.code\n\n # First, find all the \"if DEBUG:\" and \"if not DEBUG\"\n # We collect in reverse order so that we can update\n # in place more easily\n debugs = []\n for offset,op_arg in enumerate(instructions):\n if op_arg == (LOAD_GLOBAL,'DEBUG') and instructions[offset+1][0] in (POP_JUMP_IF_FALSE,POP_JUMP_IF_TRUE):\n debugs.insert(0,offset)\n\n # We want the bounds of the DEBUG true part and DEBUG false part for each\n # most ifs look like\n # LOAD_GLOBAL DEBUG\n # POP_JUMP_IF_FALSE L1 (sense may be reversed with _TRUE)\n # ...\n # JUMP_FORWARD L2\n # L1:\n # ...\n # L2:\n # They look different at the ends of loops, but I'm skipping those\n def back_one(x):\n while x > 0:\n opcode = instructions[x][0]\n if opcode != SetLineno and not isinstance(opcode,Label):\n break\n x -= 1\n return x\n def offset_of(L):\n for off,(op,_) in enumerate(instructions):\n if op is L: return off\n return None\n def true_false(x):\n pop_jump,L1 = instructions[x+1]\n O1 = offset_of(L1)\n if O1 < x: return None # Jumping backward, Loop if\n OJF = back_one(O1)\n jf,L2 = instructions[OJF]\n if jf != JUMP_FORWARD: return None # Not my pattern\n O2 = offset_of(L2)\n if pop_jump == POP_JUMP_IF_FALSE:\n return ((x+2,OJF),(OJF+1,O2),(x,O2))\n return ((OJF+1,O2),(x+2,OJF),(x,O2))\n \n\n while debugs:\n x = debugs[0]\n del debugs[0]\n bounds = true_false(x)\n if not bounds: continue\n (t0,t1),(f0,f1),(a,b) = bounds\n if func_globals.get('DEBUG',False):\n using = instructions[t0:t1]\n else:\n using = instructions[f0:f1]\n instructions[a:b] = using\n\n return code.to_code()", "def _debug():\n return _DEBUG", "def debug(self):\n raise NotImplementedError", "def main(config, debug):\n config.debug = debug\n if config.debug:\n click.echo('Debug info...')", "def setDebug():\n\tglobal debug\n\tdebug = True", "def toggle_remote_debug():\n import sys\n import os\n\n debug_on = len(sys.argv) >= 2 and '--remote-debug' in sys.argv[1]\n\n if debug_on:\n egg_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"pycharm-debug-py3k.egg\"))\n sys.path.append(egg_path)\n import pydevd\n pydevd.settrace('localhost', port=9090)\n\n yield\n\n if debug_on:\n import pydevd\n pydevd.stoptrace()", "def debug(debug_string, to_debug):\n if to_debug:\n print(\"DEBUG {0}: {1}\".format(strftime('%H:%M:%S'), debug_string))", "def debug(self, msg, *args, **kwargs):\n pass", "def compute_debug(self):", "def DEBUG(*args, **kwargs):\n if __name__ != \"__main__\":\n print(*args, **kwargs)", "def main(debug):\n click.echo('Debug mode is {{}}'.format(debug))", "def run_example_debug_cmd(example_module_name, example_argv):\n return run_example_debug(example_module_name, example_argv)", "def set_trace():\n import pdb\n import sys\n stdout = sys.stdout\n sys.stdout = sys.__stdout__\n pdb.Pdb().set_trace(sys._getframe().f_back)", "def debugger(self):\n\n if not self.rc.pdb:\n return\n pdb.pm()", "def gdb(*args):\n _gdb_python_call_gen('gdb', *args)()", "def in_debugger():\n return bool(sys.gettrace())", "def debugLog(message):\n if debugFlag != None:\n print \"#debug: \" + str(message)", "def set_debug_flag(flag):\n pma._pma_set_debug_flag(flag)", "def debugprint(debugobject, debugstring):\n if CMDLINEARGS.debug:\n print \"===== \" + debugstring + \" =====\"\n pprint.pprint(debugobject)\n print \"===== \" + debugstring + \" =====\"\n print \"\"", "def debugprint(debugobject, debugstring):\n if CMDLINEARGS.debug:\n print \"===== \" + debugstring + \" =====\"\n pprint.pprint(debugobject)\n print \"===== \" + debugstring + \" =====\"\n print \"\"", "def debug(debug=False):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if debug:\n print(f\"Calling {func.__name__.upper()}\")\n value = func(*args, **kwargs)\n return value\n else:\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator", "def debug_print(function):\n def debug(thing):\n print(function(thing))\n return thing\n return debug", "def test_func(debug: bool) -> None:\n click.echo(debug)", "def exec_debug(command_string) :\n\t\t\tline += cmd_string + \"\\n\"\n\t\t\treturn line", "def debug_print(debug_data):\n if DEBUG_MODE == \"true\":\n pp.pprint(debug_data)", "def pdb_option(args, run):\n run.pdb = True", "def ddebug(msg, err=None): # pragma: no cover\n import os\n if err:\n err = ''.join(traceback.format_exception(*err))\n else:\n err = ''\n sys.__stdout__.write(\"({}) {} {}\".format(os.getpid(), msg, err)+'\\n')\n sys.__stdout__.flush()", "def debug(func):\n\[email protected](func)\n\tdef wrapper_debug(*args, **kwargs):\n\t\targs_repr = [repr(a) for a in args] \n\t\tkwargs_repr = [f\"{k}={v}\" for k, v in kwargs.items()] \n\t\tsignature = \", \".join(args_repr + kwargs_repr) \n\n\t\tprint(f\"Calling {func.__name__} ({signature})\")\n\n\t\tvalue = func(*args, **kwargs)\n\t\tprint(f\"{func.__name__!r} returned {value!r}\") \n\t\t\n\t\treturn value\n\n\treturn wrapper_debug", "def settrace_patch(tracefunc: Any) -> None:\n global _is_debugger_active\n _is_debugger_active = bool(tracefunc)\n try:\n _original_settrace(tracefunc)\n except Exception:\n # IDEs, such as PyCharm, may ban calls to settrace().\n # http://pydev.blogspot.com/2007/06/why-cant-pydev-debugger-work-with.html\n # In such cases, do nothing.\n pass", "def output_debug_info(self):", "def mock_utils_debugger(mocker):\n\n def call_orig_func(func, *args, **kwargs):\n \"\"\"\n Helper to mock pdf.runcall interface\n \"\"\"\n return func(*args, **kwargs)\n\n debugger_mock = mocker.patch(\"radish.utils.get_debugger\")\n debugger_mock.return_value.runcall = mocker.MagicMock(side_effect=call_orig_func)\n return debugger_mock.return_value", "def debug(state, message):\n if state:\n print(message)", "def enter_pdb():\n import sys, pdb\n sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__\n pdb.set_trace()", "def debug(target=None):\n logger.verbose(True)\n man = Manager()\n man.mode_dbg = True\n man.init_components(target)\n man.start_app()", "def _debug_wrap(func):\n\n def wrapper(*args, **kwargs):\n _debug_print(f\"{datetime.datetime.now()} - About to run: {func.__name__}\")\n ret_val = func(*args, **kwargs)\n _debug_print(f\"{datetime.datetime.now()} - Completed run: {func.__name__}\")\n return ret_val\n\n return wrapper", "def print_debug(msg):\n if IS_DEBUG:\n print(msg)", "def setup_debugging():\n import sys\n sys.path.append('/root/pycharm-debug-py3k.egg')\n import pydevd\n pydevd.settrace('192.168.4.47', port=5422, stdoutToServer=True, stderrToServer=True, suspend=False)", "def debugPrint(dbg, msg):\n if(dbg):\n print(msg)", "def debug(func):\n\n @functools.wraps(func)\n def wrapper_debug(*args, **kwargs):\n args_repr = [repr(a) for a in args]\n kwargs_repr = [f\"{k}={v!r}\" for k, v in kwargs.items()]\n signature = \", \".join(args_repr + kwargs_repr)\n print(f\"Calling {func.__name__}({signature})\")\n value = func(*args, **kwargs)\n print(f\"{func.__name__!r} returned {value!r}\")\n return value\n return wrapper_debug", "def debug(func):\n\n @functools.wraps(func)\n def wrapper_debug(*args, **kwargs):\n args_repr = [repr(a) for a in args]\n kwargs_repr = [f\"{k}={v!r}\" for k, v in kwargs.items()]\n signature = \", \".join(args_repr + kwargs_repr)\n print(f\"Calling {func.__name__}({signature})\")\n value = func(*args, **kwargs)\n print(f\"{func.__name__!r} returned {value!r}\")\n return value\n\n return wrapper_debug", "def debug(self):\r\n self.setUp()\r\n getattr(self, self._testMethodName)()\r\n self.tearDown()\r\n while self._cleanups:\r\n function, args, kwargs = self._cleanups.pop(-1)\r\n function(*args, **kwargs)", "def handle_admindebugon(bot, event):\n event.chan.data.debug = True;\n event.chan.save()\n event.reply(\"debugging is enabled for %s\" % event.channel)", "def _debuglog(self, string):\n\t\tif self.debug:\n\t\t\tsys.stderr.write(\"MemCached: %s\\n\" % string)", "def debug(func):\n\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n if args and not kwargs:\n print(\"~ input of {}: args: {}\".format(func.__name__, args))\n elif not args and kwargs:\n print(\"~ input of {}: kwargs: {}\".format(func.__name__, kwargs))\n elif args and kwargs:\n print(\"~ input of {}: args: {}, kwargs: {}\".format(func.__name__, args, kwargs))\n else:\n print(\"~ input of {}: NO_ARGS\".format(func.__name__))\n output = func(*args, **kwargs) # stores the result of the function\n print(\"~ output of {}:\".format(func.__name__), output)\n return output\n\n return decorated", "def debug(func):\n @functools.wraps(func)\n def wrapper_debug(*args, **kwargs):\n caller = sys._getframe().f_back.f_code.co_name\n args_repr = [repr(a) for a in args]\n kwargs_repr = [f\"{k}={v!r}\" for k, v in kwargs.items()]\n signature = \", \".join(args_repr + kwargs_repr)\n logger.debug(f\"{caller} called {func.__name__}({signature})\")\n value = func(*args, **kwargs)\n logger.debug(f\"{func.__name__!r} returned {value!r}\")\n return value\n return wrapper_debug", "def debug(self, input):\n # Pass the debug information that you may think is important for your\n # evaluators\n debug_info = 'debug info'\n return debug_info", "def debug(self, input):\n # Pass the debug information that you may think is important for your\n # evaluators\n debug_info = 'debug info'\n return debug_info", "def debug(string):\n if verbose:\n print string\n return", "def set_debug(state):\n global _DEBUG\n _DEBUG = bool(state)", "def debug(line):\n sys.stderr.write(line + \"\\n\")\n sys.stderr.flush()", "def _debug_print(message):\n\n if _debug == True:\n print(message)", "def debug(self):\n #breakpoint() # infinite loop\n print(self.ttl)", "def hook_debug(module, input, output):\n print('Hooking ' + module.__class__.__name__)\n print('output size:', output.data.size())\n return output", "def debug(msg):\n if not DEBUG_ON:\n return\n print(\"DEBUG:\" + str(msg))", "def test_debug(self, test_debug: Debug):\n\n self._test_debug = test_debug", "def debug(self, msg=\"\"):\n if self.verbose:\n print(\"Debug: \" + msg)", "def debug(string):\n if conf.DEBUG:\n outputs.print_debug(string)", "def test_debug(self) -> Debug:\n return self._test_debug", "def debug():\n return int(DEBUG)", "def debug(verbose, bot, proxy, no_browsers=False, exp_config=None):\n debugger = DebugDeployment(Output(), verbose, bot, proxy, exp_config, no_browsers)\n log(header, chevrons=False)\n debugger.run()", "def toggle_debug(self):\n self.__debug = not self.__debug", "def NeedsDebugInfo(self):\n return True", "def debug(text, *args, **kwargs):\n logging.debug(resolve(text, *args, **kwargs))", "def debug(msg):\n return log().debug(msg)", "def debugPrint(text: str):\r\n if DEBUG:\r\n print(text)", "def checkDebug(message):\n if debug == True:\n print(message)", "def debug(func):\n if VERBOSE > 0:\n @functools.wraps(func)\n def wrapper_debug(*args, **kwargs):\n args_repr = [repr(a) for a in args]\n kwargs_repr = [f\"{k}={v!r}\" for k, v in kwargs.items()]\n signature = \", \".join(args_repr + kwargs_repr)\n\n print(f\"Calling {func.__name__}({signature})\\n\")\n value = func(*args, **kwargs)\n print(f\"{func.__name__!r} returned {value!r}\\n\")\n\n return value\n\n return wrapper_debug\n else:\n return func", "def debug(self, msg):\n debug(msg)", "def debug(statement,level=0):\n if config['debug']:\n if level <= config['debug_level']:\n print(statement)", "def set_debug(flag):\n global debug\n debug = flag\n XLM.XLM_Object.debug = flag\n XLM.xlm_library.debug = flag\n XLM.ms_stack_transformer.debug = flag\n XLM.stack_transformer.debug = flag\n XLM.excel2007.debug = flag" ]
[ "0.7278622", "0.69504553", "0.68875915", "0.6863419", "0.6848077", "0.6839908", "0.6770106", "0.66839147", "0.66665906", "0.66077083", "0.660491", "0.6530755", "0.6507357", "0.6416765", "0.63856596", "0.63856345", "0.6353304", "0.63263094", "0.6322992", "0.6319444", "0.63086915", "0.6280563", "0.6278247", "0.6272527", "0.6190115", "0.61791235", "0.6174057", "0.61706305", "0.61599326", "0.6112147", "0.6110951", "0.6091526", "0.60777193", "0.6058383", "0.60381263", "0.6033594", "0.6031773", "0.60268134", "0.6009691", "0.59975445", "0.5993259", "0.5990199", "0.5982645", "0.5953938", "0.5947851", "0.5927745", "0.59177613", "0.5893969", "0.58871233", "0.58871233", "0.5871119", "0.5867714", "0.58660865", "0.58629763", "0.58622134", "0.5846834", "0.58466065", "0.5842392", "0.5832003", "0.583131", "0.58140206", "0.5805578", "0.5798892", "0.57960534", "0.57934177", "0.579262", "0.5788096", "0.57818884", "0.5776865", "0.576532", "0.57638824", "0.5761428", "0.5760148", "0.5750597", "0.5741894", "0.5740338", "0.5740338", "0.57380456", "0.57378703", "0.57331294", "0.572784", "0.57244", "0.5724171", "0.57143307", "0.5697687", "0.569171", "0.5680831", "0.5669032", "0.56618816", "0.5652258", "0.56439537", "0.5640852", "0.56406766", "0.5628491", "0.56282467", "0.56206125", "0.56158465", "0.56148607", "0.5610583", "0.56093454" ]
0.8282989
0
Process batch and produce inputs for the model.
def process_batch(batch): args = get_args() tokens = batch['text'].long().cuda().contiguous() types = batch['types'].long().cuda().contiguous() labels = batch['label'].long().cuda().contiguous() attention_mask = batch['padding_mask'].float().cuda().contiguous() if args.fp16: attention_mask = attention_mask.half() return tokens, types, labels, attention_mask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_batch(self, inputs):\n for key, ipt in inputs.items():\n inputs[key] = ipt.to(self.device)\n\n # we only feed the image with frame_id 0 through the depth encoder\n features = self.models[\"encoder\"](inputs[\"color_aug\", 0, 0])\n outputs = self.models[\"depth\"](features)\n\n outputs.update(self.predict_poses(inputs, features))\n\n self.generate_images_pred(inputs, outputs)\n losses = self.compute_losses(inputs, outputs)\n\n return outputs, losses", "def process(self, data_batch: Any, data_samples: Sequence[dict]) -> None:", "def process_inputs(self, inputs):", "def get_inputs_(self, batch, **kwargs):\n raise NotImplementedError", "def forward(self, batch: Mapping[str, Any], **kwargs) -> Mapping[str, Any]:\n output = self._process_input(batch, **kwargs)\n output = self._process_output(output)\n return output", "def preprocess_model_inputs(self, data_batch):\n\n unique_ids, input_ids, input_mask, segment_ids, start_positions, \\\n end_positions, cls_index, p_mask, is_impossible = data_batch\n\n x = {\n \"input_ids\": input_ids,\n \"attention_mask\": input_mask,\n \"token_type_ids\": segment_ids,\n \"cls_index\": cls_index,\n \"p_mask\": p_mask\n }\n\n y = {\n \"unique_ids\": unique_ids,\n \"start_positions\": start_positions,\n \"end_positions\": end_positions,\n \"is_impossible\": is_impossible,\n }\n\n return x, y", "def process(self, data_batch: Any, predictions: Sequence[dict]) -> None:\n self.results.extend(_to_cpu(predictions))", "def preprocess_model_inputs(self, data_batch):\n\n return data_batch, np.array([])", "def forward(self, batch):\n raise NotImplementedError", "def forward(self, batch):\n raise NotImplementedError", "def feed_inputs(self):\n feed_dict = collections.defaultdict(list)\n for i in range(self._pipe.batch_size):\n data = self.example_to_data(self._buffer.get())\n for k, v in data.items():\n feed_dict[k].append(v)\n for k, v in self.features.items():\n self._pipe.feed_input(self.features[k], feed_dict[k])", "def instantiate_batch(self, inputs):\n return inputs", "def process_batch(self, inputs):\n # Otherwise, we only feed the image with frame_id 0 through the depth encoder\n features, raw_hrnet_features = self.models[\"encoder\"](inputs[\"color_aug\", 0, 0])\n outputs = self.models[\"depth_decoder\"](features)\n\n if self.opt.use_dc:\n lambda_ = 1.0\n outputs['domain_classifier'] = self.models['domain_classifier'](raw_hrnet_features, lambda_)\n\n if self.opt.use_pose_net and \"real\" in self.syn_or_real:\n outputs.update(self.predict_poses(inputs, features))\n\n # convert estimated disparity from neural network to depth\n self.generate_images_pred_local(inputs, outputs)\n\n # loss functions\n losses = self.compute_losses_local(inputs, outputs)\n\n return outputs, losses", "def processInputs(self):", "def process(self, data_batch: Sequence[Dict],\n data_samples: Sequence[Dict]) -> None:\n for data_sample in data_samples:\n pred_labels = data_sample.get('pred_instances').get(self.key).cpu()\n gt_labels = data_sample.get('gt_instances').get(self.key).cpu()\n\n result = dict(\n pred_labels=pred_labels.flatten(),\n gt_labels=gt_labels.flatten())\n self.results.append(result)", "def predict_batch(self, model, context, data=None):\n pass", "def process_batch(self, data):\n [embedding_batch] = self._sess.run([self._embedding_tensor],\n feed_dict={self._features_tensor: data})\n return embedding_batch", "def preprocess(batch):\n batch_size = batch[\"idx\"].shape[0]\n input_ids = np.zeros((batch_size, max_len), dtype=np.int32)\n type_ids = np.zeros((batch_size, max_len), dtype=np.int32)\n\n for i in range(batch_size):\n sentence_a = batch[key_a][i]\n sentence_b = batch[key_b][i]\n tokens_a = tokenizer.EncodeAsIds(sentence_a)\n tokens_b = tokenizer.EncodeAsIds(sentence_b)[1:] # Strip start token\n\n ex_input_ids = (tokens_a + tokens_b)[:max_len]\n ex_type_ids = ([0] * len(tokens_a) + [1] * len(tokens_b))[:max_len]\n\n input_ids[i, :len(ex_input_ids)] = ex_input_ids\n type_ids[i, :len(ex_type_ids)] = ex_type_ids\n\n return {\n \"input_ids\": input_ids,\n \"type_ids\": type_ids,\n \"idx\": batch[\"idx\"].astype(np.int32),\n \"label\": batch[\"label\"],\n }", "def process_state_batch(self, batch):\n return batch", "def process_state_batch(self, batch):\n return batch", "def handle_batch(self, batch: Mapping[str, Any]) -> None:\n self.batch = {**batch, **self.forward(batch)}", "def process(self, data_batch: Sequence[dict],\n data_samples: Sequence[dict]) -> None:\n for data_sample in data_samples:\n # predicted keypoints coordinates, [1, K, D]\n pred_coords = data_sample['pred_instances']['keypoints']\n # ground truth data_info\n gt = data_sample['gt_instances']\n # ground truth keypoints coordinates, [1, K, D]\n gt_coords = gt['lifting_target']\n # ground truth keypoints_visible, [1, K, 1]\n mask = gt['lifting_target_visible'].astype(bool).reshape(1, -1)\n # instance action\n img_path = data_sample['target_img_path']\n _, rest = osp.basename(img_path).split('_', 1)\n action, _ = rest.split('.', 1)\n\n result = {\n 'pred_coords': pred_coords,\n 'gt_coords': gt_coords,\n 'mask': mask,\n 'action': action\n }\n\n self.results.append(result)", "def _prepare_batch(self, batch):\n try:\n import dgl\n except:\n raise ImportError('This class requires dgl.')\n\n inputs, labels, weights = batch\n dgl_graphs = [\n graph.to_dgl_graph(self_loop=self._self_loop) for graph in inputs[0]\n ]\n inputs = dgl.batch(dgl_graphs).to(self.device)\n _, labels, weights = super(MPNNModel, self)._prepare_batch(\n ([], labels, weights))\n return inputs, labels, weights", "def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool, ) -> List[Dict[str, paddle.Tensor]]:\n input_images = paddle.stack(\n [self.preprocess(x[\"image\"]) for x in batched_input], axis=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input,\n image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"],\n image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None), )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output, )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"], )\n masks = masks > self.mask_threshold\n outputs.append({\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n })\n return outputs", "def input_fn():\n bos_id = tf.constant(BOS_ID, tf.int32)\n eos_id = tf.constant(EOS_ID, tf.int32)\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_files)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.map(lambda record: _decode_record(record, name_to_features))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n tf.concat([[bos_id], src_ids, [eos_id]], 0),\n tf.concat([tgt_ids, [eos_id]], 0),\n label))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n src_ids[:FLAGS.max_sequence_length],\n tgt_ids[:FLAGS.max_sequence_length],\n label\n ))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n tf.concat([src_ids, tgt_ids], 0),\n tf.concat([tf.zeros_like(src_ids), tf.ones_like(tgt_ids)], 0),\n label\n ))\n\n d = d.map(lambda input_ids, segment_ids, label_ids: (\n input_ids,\n segment_ids,\n tf.ones_like(input_ids),\n label_ids\n ))\n\n def batching_func(x):\n return x.padded_batch(\n batch_size,\n # The entry is the source line rows;\n # this has unknown-length vectors. The last entry is\n # the source row size; this is a scalar.\n padded_shapes=(\n tf.TensorShape([None]), # src\n tf.TensorShape([None]), # tgt\n tf.TensorShape([None]),\n tf.TensorShape([])), # src_len\n # Pad the source sequences with eos tokens.\n # (Though notice we don't generally need to do this since\n # later on we will be masking out calculations past the true sequence.\n padding_values=(\n PAD_ID, # src\n PAD_ID,\n PAD_ID,\n 0)) # src_len -- unused\n\n batched_dataset = batching_func(d)\n features = batched_dataset.map(lambda input_ids, segment_ids, input_mask, label:\n {\n \"input_ids\": input_ids,\n \"segment_ids\": segment_ids,\n \"input_mask\": input_mask,\n \"label_ids\": label\n\n })\n\n return features", "def infer_batch(self, input_seq, logger):\r\n return self.forward(input_seq, None)", "def eval_batch(self, outputs, target):\n raise NotImplementedError", "def transform_batch(self, inputs_batch, target_ids_batch, targets_batch):\n # extract one-hot encoded feature vectors and reshape them\n # so we can feed them to the RNN\n batch_inputs = inputs_batch.toarray()\n batch_inputs = batch_inputs.reshape(\n self.batch_size, self.max_num_ans, self.encoding_dim)\n # targets_batch is a list of lists, which we need to flatten\n batch_targets = [i for sublist in targets_batch for i in sublist]\n batch_targets = np.array(batch_targets, dtype=np.float32)\n # during learning, the data for each student in a batch gets shuffled together\n # hence, we need a vector of indices to locate their predictions after learning\n batch_target_ids = target_ids_batch.toarray()\n batch_target_ids = np.array(\n batch_target_ids.reshape(-1),\n dtype=np.int32)\n\n return batch_inputs, batch_target_ids, batch_targets", "def _data_generation(self, batch_data):\n # Initialization\n batch_x = []\n batch_y = defaultdict(list)\n\n for ind, item_data in batch_data.iterrows():\n img_path = os.path.join(self.img_dir, \"images\", \"rgb\", item_data[\"name\"])\n img = cv2.imread(img_path)\n try:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n except Exception as error:\n print(img_path)\n print(error)\n not_valid_mask = self.read_masks_borders(item_data[\"name\"])\n img[not_valid_mask] = 0\n\n # getmasks\n targets = np.zeros((img.shape[0], img.shape[1], len(self.classes)))\n for i, c in enumerate(self.classes):\n mask_path = os.path.join(self.img_dir, \"labels\", c, item_data[\"name\"])\n mask = cv2.imread(\n mask_path.replace(\".jpg\", \".png\"), cv2.IMREAD_GRAYSCALE\n )\n mask[not_valid_mask[:, :, 0]] = 0\n mask = mask > 0\n targets[:, :, i] = mask\n\n res = self.reshape_func(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n if self.do_aug:\n res = self.aug(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n\n for i, c in enumerate(self.classes):\n batch_y[c].append(targets[:, :, i])\n\n batch_x.append(img)\n\n batch_x = np.array(batch_x, np.float32)\n batch_y = {k: np.array(v, np.float32) for k, v in batch_y.items()}\n batch_y = {k: np.expand_dims(v, axis=-1) for k, v in batch_y.items()}\n\n return (\n imagenet_utils.preprocess_input(batch_x, \"channels_last\", mode=\"tf\"),\n batch_y\n )", "def train(self, batch):\n pass", "def multiple_batch(self, handler, dataset):\n iterator = tf.data.Iterator.from_string_handle(handler, dataset.output_types, dataset.output_shapes)\n (src_ids, tgt_input_ids, tgt_output_ids, src_seq_len, tgt_seq_len) = (iterator.get_next())\n return BatchedInput(iterator=None,\n batched_dataset=None,\n handle=None,\n initializer=None,\n source=src_ids,\n target_input=tgt_input_ids,\n target_output=tgt_output_ids,\n source_sequence_length=src_seq_len,\n target_sequence_length=tgt_seq_len)", "def _batch_inference(self, batched_inputs, detected_instances=None):\n if detected_instances is None:\n detected_instances = [None] * len(batched_inputs)\n\n outputs = []\n all_scores = []\n all_boxes = []\n inputs, instances = [], []\n for idx, input, instance in zip(count(), batched_inputs, detected_instances):\n inputs.append(input)\n instances.append(instance)\n if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1:\n output, all_score, all_box = self.model.inference(\n inputs, instances if instances[0] is not None else None, do_postprocess=False\n )\n outputs.extend(output)\n all_scores.extend(all_score)\n all_boxes.extend(all_box)\n inputs, instances = [], []\n return outputs, all_scores, all_boxes", "def preprocess(batch):\n batch_size = batch[\"idx\"].shape[0]\n input_ids = np.zeros((batch_size, max_len), dtype=np.int32)\n type_ids = np.zeros((batch_size, max_len), dtype=np.int32)\n\n for i in range(batch_size):\n sentence_a = batch[key_a][i]\n tokens_a = tokenizer.EncodeAsIds(sentence_a)\n input_ids[i, :len(tokens_a)] = tokens_a[:max_len]\n\n return {\n \"input_ids\": input_ids,\n \"type_ids\": type_ids,\n \"idx\": batch[\"idx\"].astype(np.int32),\n \"label\": batch[\"label\"],\n }", "def process_batch(self, batch):\n # extend with current batch\n self._extend(batch)\n\n # unpack and compute bounds\n length = len(self.obs)\n c = self.c\n\n # normally we cannot compute samples for the last c elements, but\n # in the terminal case, we halluciante values where necessary\n end = length if batch.terminal else length - c\n\n # collect samples to return in a FeudalBatch\n feudal_batch = FeudalBatch()\n for t in range(c, end):\n\n # state difference\n s_diff = self.s[t + c] - self.s[t]\n\n # intrinsic reward\n ri = 0\n # note that this for loop considers s and g values\n # 1 timestep to c timesteps (inclusively) ago\n for i in range(1, c + 1):\n ri_s_diff = self.s[t] - self.s[t - i]\n if np.linalg.norm(ri_s_diff) != 0:\n ri += cosine_similarity(ri_s_diff, self.g[t - i])\n ri /= c\n\n # sum of g values used to derive w, input to the linear transform\n gsum = np.zeros_like(self.g[t - c])\n for i in range(t - c, t + 1):\n gsum += self.g[i]\n\n # add to the batch\n feudal_batch.add(self.obs[t], self.a[t], self.returns[t], s_diff,\n ri, gsum, self.features[t])\n\n # in the terminal case, set reset flag\n if batch.terminal:\n self.last_terminal = True\n # in the general case, forget all but the last 2 * c elements\n # reason being that the first c of those we have already computed\n # a batch for, and the second c need those first c\n else:\n twoc = 2 * self.c\n self.obs = self.obs[-twoc:]\n self.a = self.a[-twoc:]\n self.returns = self.returns[-twoc:]\n self.s = self.s[-twoc:]\n self.g = self.g[-twoc:]\n self.features = self.features[-twoc:]\n\n return feudal_batch.get_batch()", "def on_predict_batch_begin(self, batch, logs=None):", "def __predict_batch(self, model: AutoModel, batch: Tuple):\n input_ids_batch = batch[0]\n token_type_ids_batch = batch[1]\n attention_mask_batch = batch[2]\n\n output = model(\n input_ids=input_ids_batch,\n token_type_ids=token_type_ids_batch,\n attention_mask=attention_mask_batch,\n )\n\n logits = output.logits\n preds_batch = np.argmax(torch.softmax(logits, dim=1).detach().numpy(), axis=1)\n preds_batch_list = list(preds_batch)\n\n return preds_batch_list", "def process_state_batch(self, batch):\n # batch = np.squeeze(batch, axis=1)\n batch = np.array([np.concatenate(obs, axis=-1) for obs in batch])\n return batch", "def __batch_node(self, dataset, level):\n if isinstance(dataset, de.BatchDataset):\n return\n for input_op in dataset.input:\n self.__batch_node(input_op, level + 1)", "def run_batch(self, batch_x, batch_y):\n raise NotImplementedError()", "def evaluate_batch(self, batch: TorchData, model: nn.Module) -> Dict[str, Any]:\n pass", "def iter_batch(self):\n\n # model initialization\n self._set_train()\n\n if not self.batch_process:\n self.batch_process = self._train_batch()\n return self.batch_process.__next__()\n else:\n try:\n return self.batch_process.__next__()\n except StopIteration:\n # update the state if StopIteration\n if self.info:\n print(f\"\\rEpoch: { self.epoch } | Average loss: { self.epoch_loss.avg }\")\n\n # update epoch and reset the epoch_loss\n self.epoch_loss.reset()\n self.epoch += 1\n\n # reset the batch process\n del self.batch_process\n self.batch_process = self._train_batch()\n return self.batch_process.__next__()", "def build_inputs(self):\n if self.mode == \"inference\":\n # In inference mode, images and inputs are fed via placeholders.\n image_feed = tf.placeholder(dtype=tf.string, shape=[], name=\"image_feed\")\n input_feed = tf.placeholder(\n dtype=tf.int64,\n shape=[None], # batch_size\n name=\"input_feed\")\n\n # Process image and insert batch dimensions.\n images = tf.expand_dims(self.load_image(image_feed), 0)\n input_seqs = tf.expand_dims(input_feed, 1)\n\n # No target sequences or input mask in inference mode.\n target_seqs = None\n input_mask = None\n else:\n def _load_example(serialized_example):\n encoded_image, caption = input_ops.parse_example(\n serialized_example,\n image_feature=self.config.image_feature_name,\n caption_feature=self.config.caption_feature_name)\n image = self.load_image(encoded_image)\n\n # strings.split expects a batch\n input_seqs, target_seqs, input_mask = input_ops.pad_caption_to_input(\n caption)\n return image, input_seqs, target_seqs, input_mask\n\n def _load_dataset(filename):\n return tf.data.TFRecordDataset(filename, buffer_size=16 * 1024 * 1024)\n\n df = tf.data.Dataset.list_files(\n self.config.input_file_pattern, shuffle=self.mode == \"train\")\n df = df.apply(\n tf.data.experimental.parallel_interleave(\n _load_dataset, cycle_length=64, sloppy=True))\n\n if self.mode == \"train\":\n df = df.repeat()\n df = df.shuffle(1024)\n\n df = df.apply(\n tf.data.experimental.map_and_batch(\n _load_example,\n self.config.batch_size,\n num_parallel_batches=8,\n drop_remainder=True))\n df = df.prefetch(8)\n images, input_seqs, target_seqs, input_mask = df.make_one_shot_iterator(\n ).get_next()\n\n self.images = images\n self.input_seqs = input_seqs\n self.target_seqs = target_seqs\n self.input_mask = input_mask", "def __call__(self):\n if self.numbatches is None:\n pool = self.pooler()\n if self.batchsize is None:\n self.batchsize = self.pooler.nInPool()\n self.numbatches = self.pooler.nInPool()//self.batchsize\n for i in xrange(self.numbatches):\n pool = self.pooler()\n self._reset_batch()\n if self.samplemethod == 'balance' and len(self.keysamplers)>0:\n batchinds,keyids = self._samplebalanced(pool)\n elif self.samplemethod == 'uniform':\n batchinds,keyids = self._sampleuniform(pool)\n else:\n batchinds,keyids = self._samplesequential(i)\n batch = self._extractInds(pool,batchinds,keyids)\n for k in batch:\n batch[k][np.isnan(batch[k])] = self.nanreplacement\n yield batch", "def _process_batch(self, batch):\n x, y, _ = batch\n x = x.float()\n y = F.one_hot(y, num_classes=2).float()\n return x, y", "def batch_iter(input_data,batch_size):\r\n batch_ids,batch_mask,batch_segment,batch_label=[],[],[],[]\r\n for features in input_data:\r\n if len(batch_ids) == batch_size:\r\n yield batch_ids,batch_mask,batch_segment,batch_label\r\n batch_ids, batch_mask, batch_segment, batch_label = [], [], [], []\r\n\r\n batch_ids.append(features['input_ids'])\r\n batch_mask.append(features['input_mask'])\r\n batch_segment.append(features['segment_ids'])\r\n batch_label.append(features['label_ids'])\r\n\r\n if len(batch_ids) != 0:\r\n yield batch_ids, batch_mask, batch_segment, batch_label", "def get_batch_inputs(self, inputs, batch_size=None):\n total_num = inputs.shape[0]\n batch_size = batch_size or self.batch_size\n for i in range(0, total_num, batch_size):\n yield inputs[i:i + batch_size]", "def move_and_process_input(batch):\n x, y = batch\n x = x.to(device).float()\n y = torch.as_tensor(y).to(device)\n x = x.permute(0, -1, 1, 2, 3)\n return x, y", "def build_inputs(self):\n # in prediction mode, we use a batch size of one\n batch_size = self.config.batch_size\n \n if self.mode == \"prediction\":\n batch_size = 1\n \n # In inference mode, images and inputs are fed via placeholders.\n image_feed = tf.placeholder(dtype=tf.string, shape=[], name=\"image_feed\") # shape: scalar value\n\n #image_fn_feed = tf.placeholder(dtype=tf.string, shape=[], name=\"image_fn_feed\")\n \n #image_filename_queue = tf.train.string_input_producer([image_fn_feed]) # list of files to read\n \n #reader = tf.WholeFileReader()\n #_, image_feed = reader.read(image_filename_queue)\n \n \n text_feed = tf.placeholder(dtype=tf.int64,\n shape=[None, self.config.sentence_length], # shape 2D tensor - variable size (first dimension sentence sequence, second dimension token sequence (actually fixed size))\n name=\"text_feed\")\n \n # arbitrary labels (not used)\n mi_label = tf.constant(-1, dtype=tf.int64) \n sc_label = tf.constant(-1.0, dtype=tf.float32) \n\n image = self.process_image(image_feed)\n\n # Process image and insert batch dimensions.\n images = tf.expand_dims(self.process_image(image_feed), 0)\n input_seqs = tf.expand_dims(text_feed, 0) \n mi_labels = tf.expand_dims(mi_label, 0)\n sc_labels = tf.expand_dims(sc_label, 0)\n input_mask = tf.expand_dims(tf.constant([1], dtype=tf.int32) , 0)\n \n else:\n # Prefetch serialized SequenceExample protos.\n input_queue = input_ops.prefetch_input_data(\n self.reader,\n self.config.input_file_pattern,\n is_training=self.is_training(),\n batch_size=batch_size,\n values_per_shard=self.config.values_per_input_shard,\n input_queue_capacity_factor=self.config.input_queue_capacity_factor,\n num_reader_threads=self.config.num_input_reader_threads,\n mode=self.mode)\n\n # Image processing and random distortion. Split across multiple threads\n # with each thread applying a slightly different distortion.\n assert self.config.num_preprocess_threads % 2 == 0\n images_and_texts = []\n for thread_id in range(self.config.num_preprocess_threads):\n serialized_sequence_example = input_queue.dequeue()\n encoded_image, text, mi, sc = input_ops.parse_sequence_example(\n serialized_sequence_example,\n image_feature=self.config.image_feature_name,\n sentences_feature=self.config.sentences_feature_name,\n sentence_length=self.config.sentence_length,\n mi_feature=self.config.mi_feature_name,\n sc_feature=self.config.sc_feature_name)\n image = self.process_image(encoded_image, thread_id=thread_id)\n images_and_texts.append([image, text, mi, sc])\n\n # Batch inputs.\n queue_capacity = (2 * self.config.num_preprocess_threads *\n batch_size)\n images, input_seqs, mi_labels, sc_labels, input_mask = (\n input_ops.batch_with_dynamic_pad(images_and_texts,\n batch_size=batch_size,\n queue_capacity=queue_capacity))\n \n #print('Shapes') \n #print('Shape images: ' + str(images.get_shape()))\n #print('Shape input_seqs: ' + str(input_seqs.get_shape())) \n #print('Shape input_mask: ' + str(input_mask.get_shape())) \n\n self.images = images\n self.input_seqs = input_seqs\n if self.mode == \"prediction\":\n self.mi_labels = None\n self.sc_labels = None\n else:\n self.mi_labels = mi_labels\n self.sc_labels = sc_labels\n self.input_mask = input_mask", "def forward_step(self, batch):\n input_ids = torch.as_tensor(batch.input_ids).to(self.device).reshape((1, -1)) # batch.get('input_ids').to(self.device)\n attention_mask = torch.as_tensor(batch.attention_mask).to(self.device).reshape((1, -1)) # batch.get('attention_mask').to(self.device)\n outputs = self.model(input_ids=input_ids, attention_mask=attention_mask)[0]\n\n _, _, num_label = outputs.shape\n \"\"\"\n outputs : (batch, seq_length, feat_dim) => (seq_length, feat_dim)\n labels : (batch, seq_length) => (seq_length,)\n \"\"\"\n outputs = outputs.view(-1, num_label)\n labels = torch.argmax(outputs, dim=1) # torch.argmax(outputs, dim=1)\n batch_losses = self.criterion(outputs, labels)\n loss = torch.mean(batch_losses) # mean average\n self.batch_output = [input_ids, outputs]\n return loss", "def evaluate_batch(self, pipelines):", "def preprocess(self, inputs: InputsType, batch_size: int = 1, **kwargs):\n chunked_data = self._get_chunk_data(\n map(self.pipeline, inputs), batch_size)\n yield from map(self.collate_fn, chunked_data)", "def _prepare_batch(self, batch):\n try:\n import dgl\n except:\n raise ImportError(\"This class requires DGL to be installed.\")\n\n inputs, labels, weights = batch\n dgl_graphs = [graph.to_dgl_graph() for graph in inputs[0]]\n inputs = dgl.batch(dgl_graphs).to(self.device)\n _, labels, weights = super(LCNNModel, self)._prepare_batch(\n ([], labels, weights))\n return inputs, labels, weights", "def handle(self, data, context):\n \n model_input = self.preprocess(data)\n model_out = self.inference(model_input)\n return self.postprocess(model_out)", "def call(self, inputs, **kwargs):\n\n # unpack all the requires model inputs, some might be empty tensors:\n [queries, values, queries_mask, values_mask, ids, permutation,\n absolute_positions, relative_positions, pointer_labels, \n logits_labels, partial_pos, pointer_probs, log_probs,\n object_detections, object_features, object_boxes] = inputs\n\n y = self.detection_embedding(object_detections, **kwargs)\n values = self.dense(tf.concat([\n object_features, object_boxes, y], 2), **kwargs)\n a = position_encoding(tf.shape(queries)[1], self.hidden_size)\n b = self.word_embedding(queries, **kwargs)\n if self.mode == 'decoder':\n b = tf.matmul(absolute_positions, b)\n if self.decoder_pos_emb:\n b = a + b \n elif self.mode == 'pt' and self.decoder_pos_emb:\n # we do need positional encoding for Permutation Transformer\n b = a + b\n \n return [b, values, queries_mask, values_mask, ids, permutation,\n absolute_positions, relative_positions,\n pointer_labels, logits_labels, \n partial_pos, pointer_probs, log_probs,\n object_detections, object_features, object_boxes]", "def _train_batch(self):\n\n # start epoch\n for i, (source, target) in enumerate(self.train_dataset):\n result = self._batch_iter(source, target, i)\n\n # yield\n yield result", "def forward(self, batch_inputs):\n\n batch_seq_wordpiece_tokens_repr, batch_seq_cls_repr = self.bert_encoder(\n batch_inputs['wordpiece_tokens'])\n batch_seq_tokens_repr = batched_index_select(batch_seq_wordpiece_tokens_repr,\n batch_inputs['wordpiece_tokens_index'])\n\n results = {}\n\n entity_feature = self.entity_span_extractor(batch_seq_tokens_repr,\n batch_inputs['span_mention'])\n entity_feature = self.ent2hidden(entity_feature)\n\n subj_pos = torch.LongTensor([-1, 0, 1, 2, 3]) + 3\n obj_pos = torch.LongTensor([-3, -2, -1, 0, 1]) + 3\n\n if self.device > -1:\n subj_pos = subj_pos.cuda(device=self.device, non_blocking=True)\n obj_pos = obj_pos.cuda(device=self.device, non_blocking=True)\n\n subj_pos_emb = self.position_embedding(subj_pos)\n obj_pos_emb = self.position_embedding(obj_pos)\n pos_emb = torch.cat([subj_pos_emb, obj_pos_emb], dim=1).unsqueeze(0).repeat(\n batch_inputs['wordpiece_tokens_index'].size()[0], 1, 1)\n\n span_mention_attention_repr = self.attention_encoder(inputs=entity_feature,\n query=batch_seq_cls_repr,\n feature=pos_emb)\n results['span_mention_repr'] = self.mlp_head2(self.mlp_head1(span_mention_attention_repr))\n\n if self.momentum:\n return results\n\n zero_loss = torch.Tensor([0])\n zero_loss.requires_grad = True\n if self.device > -1:\n zero_loss = zero_loss.cuda(device=self.device, non_blocking=True)\n\n if sum([len(masked_index) for masked_index in batch_inputs['masked_index']]) == 0:\n results['masked_token_loss'] = zero_loss\n else:\n masked_wordpiece_tokens_repr = []\n all_masked_label = []\n for masked_index, masked_position, masked_label, seq_wordpiece_tokens_repr in zip(\n batch_inputs['masked_index'], batch_inputs['masked_position'],\n batch_inputs['masked_label'], batch_seq_wordpiece_tokens_repr):\n masked_index_tensor = torch.LongTensor(masked_index)\n masked_position_tensor = torch.LongTensor(masked_position)\n\n if self.device > -1:\n masked_index_tensor = masked_index_tensor.cuda(device=self.device,\n non_blocking=True)\n masked_position_tensor = masked_position_tensor.cuda(device=self.device,\n non_blocking=True)\n\n masked_wordpiece_tokens_repr.append(\n torch.cat([\n seq_wordpiece_tokens_repr[masked_index_tensor],\n self.global_position_embedding(masked_position_tensor)\n ],\n dim=1))\n all_masked_label.extend(masked_label)\n\n masked_wordpiece_tokens_input = torch.cat(masked_wordpiece_tokens_repr, dim=0)\n masked_wordpiece_tokens_output = self.masked_token_decoder(\n self.masked_token_mlp(\n masked_wordpiece_tokens_input)) + self.masked_token_decoder_bias\n\n all_masked_label_tensor = torch.LongTensor(all_masked_label)\n if self.device > -1:\n all_masked_label_tensor = all_masked_label_tensor.cuda(device=self.device,\n non_blocking=True)\n results['masked_token_loss'] = self.masked_token_loss(masked_wordpiece_tokens_output,\n all_masked_label_tensor)\n\n all_spans = []\n all_spans_label = []\n all_seq_tokens_reprs = []\n for spans, spans_label, seq_tokens_repr in zip(batch_inputs['spans'],\n batch_inputs['spans_label'],\n batch_seq_tokens_repr):\n all_spans.extend(spans)\n all_spans_label.extend(spans_label)\n all_seq_tokens_reprs.extend(seq_tokens_repr for _ in range(len(spans)))\n\n assert len(all_spans) == len(all_seq_tokens_reprs) and len(all_spans) == len(\n all_spans_label)\n\n if len(all_spans) == 0:\n results['span_loss'] = zero_loss\n else:\n if self.span_batch_size > 0:\n all_span_loss = []\n for idx in range(0, len(all_spans), self.span_batch_size):\n batch_ents_tensor = torch.LongTensor(\n all_spans[idx:idx + self.span_batch_size]).unsqueeze(1)\n if self.device > -1:\n batch_ents_tensor = batch_ents_tensor.cuda(device=self.device,\n non_blocking=True)\n\n batch_seq_tokens_reprs = torch.stack(all_seq_tokens_reprs[idx:idx +\n self.span_batch_size])\n\n batch_spans_feature = self.ent2hidden(\n self.entity_span_extractor(batch_seq_tokens_reprs,\n batch_ents_tensor).squeeze(1))\n\n batch_spans_label = torch.LongTensor(all_spans_label[idx:idx +\n self.span_batch_size])\n if self.device > -1:\n batch_spans_label = batch_spans_label.cuda(device=self.device,\n non_blocking=True)\n\n span_outputs = self.entity_span_decoder(\n self.entity_span_mlp(batch_spans_feature), batch_spans_label)\n\n all_span_loss.append(span_outputs['loss'])\n results['span_loss'] = sum(all_span_loss) / len(all_span_loss)\n else:\n all_spans_tensor = torch.LongTensor(all_spans).unsqueeze(1)\n if self.device > -1:\n all_spans_tensor = all_spans_tensor.cuda(device=self.device, non_blocking=True)\n all_seq_tokens_reprs = torch.stack(all_seq_tokens_reprs)\n all_spans_feature = self.entity_span_extractor(all_seq_tokens_reprs,\n all_spans_tensor).squeeze(1)\n\n all_spans_feature = self.ent2hidden(all_spans_feature)\n\n all_spans_label = torch.LongTensor(all_spans_label)\n if self.device > -1:\n all_spans_label = all_spans_label.cuda(device=self.device, non_blocking=True)\n\n entity_typing_outputs = self.entity_span_decoder(\n self.entity_span_mlp(all_spans_feature), all_spans_label)\n\n results['span_loss'] = entity_typing_outputs['loss']\n\n return results", "def _process_batch(sess, original_images, semantic_predictions, image_names,\n image_heights, image_widths, image_id_offset, save_dir,\n raw_save_dir, train_id_to_eval_id=None):\n (original_images,\n semantic_predictions,\n image_names,\n image_heights,\n image_widths) = sess.run([original_images, semantic_predictions,\n image_names, image_heights, image_widths])\n\n num_image = semantic_predictions.shape[0]\n for i in range(num_image):\n image_height = np.squeeze(image_heights[i])\n image_width = np.squeeze(image_widths[i])\n original_image = np.squeeze(original_images[i])\n semantic_prediction = np.squeeze(semantic_predictions[i])\n crop_semantic_prediction = semantic_prediction[:image_height, :image_width]\n\n # Save image.\n save_annotation.save_annotation(\n original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i),\n add_colormap=False)\n\n # Save prediction.\n save_annotation.save_annotation(\n crop_semantic_prediction, save_dir,\n _PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,\n colormap_type=FLAGS.colormap_type)\n\n if FLAGS.also_save_raw_predictions:\n image_filename = os.path.basename(image_names[i])\n\n if train_id_to_eval_id is not None:\n crop_semantic_prediction = _convert_train_id_to_eval_id(\n crop_semantic_prediction,\n train_id_to_eval_id)\n save_annotation.save_annotation(\n crop_semantic_prediction, raw_save_dir, image_filename,\n add_colormap=False)", "def flow(self, batch_size=32):\n nb_batches = int(len(self.image_ids_in_subset) / batch_size) + 1\n while True:\n # Before each epoch we shuffle the images' ids\n random.shuffle(self.image_ids_in_subset)\n\n for i in range(nb_batches):\n # We first get all the image ids for the next batch\n current_bach = self.image_ids_in_subset[i*batch_size:(i+1)*batch_size]\n X_batch = []\n Y_batch = []\n\n for image_id in current_bach:\n # Load the image and resize it. We get a PIL Image object\n img = image.load_img(self.get_img_path(int(image_id)), grayscale=False, target_size=(cfg.IMAGE.IMG_SIZE, cfg.IMAGE.IMG_SIZE))\n # Cast the Image object to a numpy array and put the channel has the last dimension\n img_arr = image.img_to_array(img, data_format='channels_last')\n X_batch.append(img_arr)\n # Y_batch.append(self.id_to_label[image_id])\n Y_batch.append(self.get_labels(image_id))\n\n # resize X_batch in (batch_size, IMG_HEIGHT, IMG_WIDTH, 3)\n X_batch = np.reshape(X_batch, (-1, cfg.IMAGE.IMG_SIZE, cfg.IMAGE.IMG_SIZE, 3))\n # resize Y_batch in (None, nb_classes)\n Y_batch = np.reshape(Y_batch, (-1, self.nb_classes))\n\n # substract mean values from imagenet\n X_batch = preprocess_input(X_batch, data_format='channels_last')\n yield(X_batch, Y_batch)", "def generate_layer_outputs(self, input_batch: Union[np.ndarray, List[np.ndarray], Tuple[np.ndarray]]):\n logger.info(\"Generating layer-outputs for %d input instances\", len(input_batch))\n\n input_dict = create_input_dict(self.model, input_batch)\n\n layer_output_dict = self.layer_output.get_outputs(input_dict)\n\n self.save_input_output.save(input_batch, layer_output_dict)\n\n logger.info('Layer-outputs generated for %d input instances', len(input_batch))", "def process(self, inputs):\n output = None\n return output", "def _get_batch(self):\n # index = self._index[self._current]\n # im_path = self._imdb.image_path_from_index(0)\n # im_path = 'data/demo/dog.jpg'\n # with open(im_path, 'rb') as fp:\n # img_content = fp.read()\n\n batch_data = mx.nd.zeros((self.batch_size, 3, self._data_shape[0], self._data_shape[1]))\n batch_label = [] \n global imgi\n # img = mx.nd.array(imgi)\n # imgr = mx.img.imdecode(img_content)\n data = self._data_augmentation(imgi)\n batch_data[0] = data\n \n self._data = {'data': batch_data}\n self._label = {'label': None}", "def feed_batch(self, generated_batch, generated_labels):\n _, self.act2, _ = self.inference_net(generated_batch.cuda(self.gpu_id))\n self.g_labels = generated_labels", "def _process_batch_data(\n self, tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]]\n ) -> Tuple[tf.Tensor, Optional[tf.Tensor], Optional[tf.Tensor]]:\n # encode each attribute present in tf_batch_data\n text_output = None\n text_sequence_lengths = None\n batch_encoded = {}\n for attribute in tf_batch_data.keys():\n if attribute in SENTENCE_FEATURES_TO_ENCODE + STATE_LEVEL_FEATURES:\n (\n attribute_features,\n _text_output,\n _text_sequence_lengths,\n ) = self._encode_features_per_attribute(tf_batch_data, attribute)\n\n batch_encoded[attribute] = attribute_features\n if attribute == TEXT:\n text_output = _text_output\n text_sequence_lengths = _text_sequence_lengths\n\n # if both action text and action name are present, combine them; otherwise,\n # return the one which is present\n\n if (\n batch_encoded.get(ACTION_TEXT) is not None\n and batch_encoded.get(ACTION_NAME) is not None\n ):\n batch_action = batch_encoded.pop(ACTION_TEXT) + batch_encoded.pop(\n ACTION_NAME\n )\n elif batch_encoded.get(ACTION_TEXT) is not None:\n batch_action = batch_encoded.pop(ACTION_TEXT)\n else:\n batch_action = batch_encoded.pop(ACTION_NAME)\n # same for user input\n if (\n batch_encoded.get(INTENT) is not None\n and batch_encoded.get(TEXT) is not None\n ):\n batch_user = batch_encoded.pop(INTENT) + batch_encoded.pop(TEXT)\n elif batch_encoded.get(TEXT) is not None:\n batch_user = batch_encoded.pop(TEXT)\n else:\n batch_user = batch_encoded.pop(INTENT)\n\n batch_features = [batch_user, batch_action]\n # once we have user input and previous action,\n # add all other attributes (SLOTS, ACTIVE_LOOP, etc.) to batch_features;\n for key in batch_encoded.keys():\n batch_features.append(batch_encoded.get(key))\n\n batch_features = tf.concat(batch_features, axis=-1)\n\n return batch_features, text_output, text_sequence_lengths", "def call(self, inputs, *args, **kwargs):\n batch_dims = inputs.shape[:nest_utils.get_outer_rank(inputs, self._spec)]\n num_batch_elems = tf.reduce_prod(batch_dims)\n transformed_inputs = tf.reshape(inputs, (num_batch_elems, -1))\n result = self._batch(transformed_inputs, *args, **kwargs)\n return tf.reshape(result, inputs.shape)", "def compute_forward(self, batch, stage):\n batch = batch.to(self.device)\n wavs, lens = batch.sig\n\n # Feature extraction and normalization\n feats = self.modules.compute_features(wavs)\n feats = self.modules.mean_var_norm(feats, lens)\n\n # Embeddings + speaker classifier\n embeddings = self.modules.embedding_model(feats)\n outputs = self.modules.classifier(embeddings)\n\n return outputs", "def batch_generate(self, inputs, labels, batch_size=64):\n inputs_image, inputs, labels = check_inputs_labels(inputs, labels)\n arr_x = inputs\n arr_y = labels\n len_x = inputs_image.shape[0]\n batch_size = check_int_positive('batch_size', batch_size)\n batches = int(len_x / batch_size)\n rest = len_x - batches*batch_size\n res = []\n for i in range(batches):\n if isinstance(arr_x, tuple):\n x_batch = tuple([sub_items[i*batch_size: (i + 1)*batch_size] for sub_items in arr_x])\n else:\n x_batch = arr_x[i*batch_size: (i + 1)*batch_size]\n if isinstance(arr_y, tuple):\n y_batch = tuple([sub_labels[i*batch_size: (i + 1)*batch_size] for sub_labels in arr_y])\n else:\n y_batch = arr_y[i*batch_size: (i + 1)*batch_size]\n adv_x = self.generate(x_batch, y_batch)\n # Black-attack methods will return 3 values, just get the second.\n res.append(adv_x[1] if isinstance(adv_x, tuple) else adv_x)\n\n if rest != 0:\n if isinstance(arr_x, tuple):\n x_batch = tuple([sub_items[batches*batch_size:] for sub_items in arr_x])\n else:\n x_batch = arr_x[batches*batch_size:]\n if isinstance(arr_y, tuple):\n y_batch = tuple([sub_labels[batches*batch_size:] for sub_labels in arr_y])\n else:\n y_batch = arr_y[batches*batch_size:]\n adv_x = self.generate(x_batch, y_batch)\n # Black-attack methods will return 3 values, just get the second.\n res.append(adv_x[1] if isinstance(adv_x, tuple) else adv_x)\n\n adv_x = np.concatenate(res, axis=0)\n return adv_x", "def get_batch(batch_data, config):\n N = len(batch_data['obs_traj_rel'])\n P = config.P\n OF = config.flow_size\n T_in = config.obs_len\n T_pred = config.pred_len\n\n returned_inputs = []\n traj_obs_gt = np.zeros([N, T_in, P], dtype='float32')\n traj_pred_gt = np.zeros([N, T_pred, P], dtype='float32')\n # --- xy input\n for i, (obs_data, pred_data) in enumerate(zip(batch_data['obs_traj_rel'],\n batch_data['pred_traj_rel'])):\n for j, xy in enumerate(obs_data):\n traj_obs_gt[i, j, :] = xy\n for j, xy in enumerate(pred_data):\n traj_pred_gt[i, j, :] = xy\n returned_inputs.append(traj_obs_gt)\n # ------------------------------------------------------\n # Social component (through optical flow)\n if config.add_social:\n obs_flow = np.zeros((N, T_in, OF),dtype ='float32')\n # each batch\n for i, flow_seq in enumerate(batch_data['obs_optical_flow']):\n for j , flow_step in enumerate(flow_seq):\n obs_flow[i,j,:] = flow_step\n returned_inputs.append(obs_flow)\n # -----------------------------------------------------------\n # Person pose input\n if config.add_kp:\n obs_kp = np.zeros((N, T_in, KP, 2), dtype='float32')\n # each bacth\n for i, obs_kp_rel in enumerate(batch_data['obs_kp_rel']):\n for j, obs_kp_step in enumerate(obs_kp_rel):\n obs_kp[i, j, :, :] = obs_kp_step\n return returned_inputs,traj_pred_gt", "def on_predict_batch_begin(self, step, logs=None):", "def featurize_batch(\n self, input_record_list: Sequence[InputRecord]\n ) -> Sequence[OutputRecord]:\n return [self.featurize(record) for record in input_record_list]", "def process(self, conn):\n batch = msgpack.unpackb(self._request(conn), raw=False)\n ids = list(batch.keys())\n self.logger.debug(f'Received job ids: {ids}')\n\n # validate request\n validated = []\n errors = []\n for i, byte in enumerate(batch.values()):\n try:\n data = self._unpack(byte)\n obj = self.req_schema.parse_obj(data)\n validated.append(obj)\n self.logger.debug(f'{obj} passes the validation')\n except ValidationError as err:\n errors.append((i, self._pack(err.errors())))\n self.logger.info(\n f'Job {ids[i]} validation error',\n extra={'Validation': err.errors()}\n )\n except (json.JSONDecodeError,\n msgpack.ExtraData, msgpack.FormatError, msgpack.StackError) as err:\n errors.append((i, self._pack(str(err))))\n self.logger.info(f'Job {ids[i]} error: {err}')\n\n # inference\n self.logger.debug(f'Validated: {validated}, Errors: {errors}')\n result = []\n if validated:\n result = self.infer(validated)\n assert len(result) == len(validated), (\n 'Wrong number of inference results. '\n f'Expcet {len(validated)}, get{len(result)}.'\n )\n\n # validate response\n for data in result:\n self.resp_schema.parse_obj(data)\n\n # add errors information\n err_ids = ''\n result = [self._pack(data) for data in result]\n for index, err_msg in errors:\n err_ids += ids[index]\n result.insert(index, err_msg)\n\n # build batch job table\n resp = dict(zip(ids, result))\n if err_ids:\n resp['error_ids'] = err_ids\n self._response(conn, resp)", "def forward(self, batch):\n self.output = np.dot(np.array(batch), self.weights) + self.biases", "def process_batch(self, X, y):\n # normalize to [-1.0, 1.0]\n X = X / 127.5 - 1.0\n\n for i in range(X.shape[0]):\n # scaling and bias for contrast and brightness augmentation\n scale = 1.0 + 0.1 * np.random.randn()\n bias = 0.0 + 0.1 * np.random.randn()\n X[i] = np.clip(scale*X[i] + bias, -1.0, 1.0)\n\n # transformations for geometric augmentations\n angle = 6.0 * np.random.randn()\n zoom = 1 + 0.1 * np.random.randn()\n translation = 2.0 * np.random.randn()\n shear = 0.1 * np.random.randn()\n\n trafo = skimage.transform.AffineTransform(\n translation = translation,\n rotation = np.deg2rad(angle),\n scale = (zoom, zoom),\n shear = shear)\n centered_trafo = (self.postshift + (trafo + self.preshift))\n X[i] = skimage.transform.warp(X[i], centered_trafo, mode = \"edge\", order = 1)\n return X, y", "def _prepare_batch(self, batch):\n try:\n from torch_geometric.data import Batch\n except:\n raise ValueError(\"This class requires PyTorch Geometric to be installed.\")\n\n inputs, labels, weights = batch\n pyg_graphs = [graph.to_pyg_graph() for graph in inputs[0]]\n inputs = Batch.from_data_list(pyg_graphs)\n _, labels, weights = super(GATModel, self)._prepare_batch(([], labels,\n weights))\n return inputs, labels, weights", "def postprocessing(batch, vocab):\n\n return batch", "def process(self):\n self.extract()\n self.transform()\n self.load()", "def preprocessing_pipeline(self):\n self.__multilabel_processing()\n self.__split_dataset()\n self.__save_datasets()", "def predict_batch_generator(self):\n input = np.zeros((self.batch_size, self.max_seq_len,\n self.embedding_size))\n seq_lengths = np.zeros((self.batch_size), dtype=np.intp)\n unique_counts = np.zeros((self.batch_size), dtype=np.intp)\n i = 0\n\n fi = open(self.config.parsed_predict_file)\n sample_gen = self.predict_sample_generator(fi)\n self.load_embedding()\n\n for sequence, seq_length, unique_count in sample_gen:\n seq_lengths[i], unique_counts[i] = seq_length, unique_count\n if seq_lengths[i] > self.max_seq_len:\n seq_lengths[i] = self.max_seq_len\n sequence = sequence[:seq_lengths[i]]\n input[i, 0:seq_lengths[i], :] = self.embedding[sequence, :]\n\n i += 1\n\n if i == self.batch_size:\n yield input, seq_lengths, unique_counts\n input = np.zeros(\n (self.batch_size, self.max_seq_len,\n self.embedding_size)\n )\n i = 0\n\n if i < self.batch_size:\n yield input[:i, :, :], seq_lengths[:i], unique_counts[:i]\n\n fi.close()", "def processBatchFunc(inputBatch): #this is redefined automatically, no need to do any special code here\n with tf.Session(graph=graph, config=config) as sess:\n sess.run(init)\n \n (outValue, ) = sess.run([outputs], feed_dict={inputs: inputBatch})\n \n #print \"count now is: %d\" % count\n\n return outValue", "def process_batch(self, batch):\n # shapes are [time, ...original dims...]\n v_global = np.stack(batch[:,0]) # [time, agents, l_state_one_agent]\n # note that *_local objects have shape\n # [time, agents, ...original dim...]\n obs_others = np.stack(batch[:,1]) # [time,agents,h,w,c] or [time, agents, obs_others]\n v_local = np.stack(batch[:,2]) # [time,agents,l]\n actions = np.stack(batch[:,3]) # [time,agents]\n reward = np.stack(batch[:,4]) # [time]\n reward_local = np.stack(batch[:,5]) # [time,agents]\n v_global_next = np.stack(batch[:,6]) # [time, agents, l_state_one_agent]\n obs_others_next = np.stack(batch[:,7]) # [time,agents,h,w,c]\n v_local_next = np.stack(batch[:,8]) # [time,agents,l]\n done = np.stack(batch[:,9]) # [time]\n goals = np.stack(batch[:,10]) # [time, agents, l_goal]\n\n batch = None\n \n n_steps = v_global.shape[0]\n \n # For all global quantities, for each time step,\n # duplicate values <n_agents> times for\n # batch processing of all agents\n reward = np.repeat(reward, self.n_agents, axis=0)\n\n # In-place reshape for *_local quantities,\n # so that one time step for one agent is considered\n # one batch entry\n if self.experiment == 'sumo':\n obs_others.shape = (n_steps*self.n_agents, self.h_obs,\n self.w_obs, self.c_obs)\n obs_others_next.shape = (n_steps*self.n_agents, self.h_obs,\n self.w_obs, self.c_obs)\n elif self.experiment == 'particle':\n obs_others.shape = (n_steps*self.n_agents, self.l_obs_others)\n obs_others_next.shape = (n_steps*self.n_agents, self.l_obs_others)\n v_local.shape = (n_steps*self.n_agents, self.l_obs)\n reward_local.shape = (n_steps*self.n_agents)\n v_local_next.shape = (n_steps*self.n_agents, self.l_obs)\n\n actions_1hot, actions_others_1hot = self.process_actions(n_steps, actions)\n \n return n_steps, v_global, obs_others, v_local, actions_1hot, actions_others_1hot, reward, reward_local, v_global_next, obs_others_next, v_local_next, done, goals", "def update_batch(self, *args, **kwargs):\n pass", "def _get_batch_data(self, batch):\n try:\n encoders = [ encoder for encoder in self._data_encoder ]\n except:\n encoders = (self._data_encoder,)\n\n try:\n data_batches = [ encoder.transform_batch(rec for _, rec in batch.iterrows())\n for encoder in encoders ]\n except AttributeError:\n data_batches = [\n [ self._get_data(record, encoder) for _, record in batch.iterrows() ]\n for encoder in encoders ]\n\n try:\n batches = [ np.array(encoder.finalize_batch(batch))\n for encoder, batch in zip(encoders, data_batches)]\n except AttributeError:\n batches = [ np.array(batch) for batch in data_batches ]\n\n return batches if len(batches) > 1 else batches[0]", "def preprocess(self, requests):\r\n input_batch = None\r\n for idx, data in enumerate(requests):\r\n text = data.get(\"data\")\r\n if text is None:\r\n text = data.get(\"body\")\r\n input_text = text.decode('utf-8')\r\n\r\n ################input处理\r\n question = input_text\r\n entity = self.NER(question)\r\n print('your question:{}\\nentity:{}'.format(question,entity))\r\n ################处理完毕\r\n return [entity]", "def __call__(self, inputs):\n for layer in self.prenet_layers:\n f = layer(inputs)\n if self._enable_dropout:\n inputs = tf.layers.dropout(\n f, rate=self._dropout, training=True\n )\n else:\n inputs = tf.layers.batch_normalization(\n f, momentum=0.1, epsilon=1e-5\n )\n inputs = self._activation_fn(inputs)\n return inputs", "def batch_generator(batch_size):\n\n # Infinite loop.\n while True:\n # Get a list of random indices for images in the training-set.\n idx = np.random.randint(100,size=batch_size)\n \n # Get the pre-computed transfer-values for those images.\n # These are the outputs of the pre-trained image-model.\n transf_values = np.array([transfer_values[_] for _ in idx])\n\n # For each of the randomly chosen images there are\n # at least 5 captions describing the contents of the image.\n # Select one of those captions at random and get the\n # associated sequence of integer-tokens.\n tokens = [caps_markedwords[_] for _ in idx]\n\n # Count the number of tokens in all these token-sequences.\n num_tokens = [len(t) for t in tokens]\n \n # Max number of tokens.\n max_tokens = np.max(num_tokens)\n # Pad all the other token-sequences with zeros\n # so they all have the same length and can be\n # input to the neural network as a numpy array.\n tokens_padded = pad_sequences(tokens,\n maxlen=max_tokens,\n padding='post',\n truncating='post')\n \n # Further prepare the token-sequences.\n # The decoder-part of the neural network\n # will try to map the token-sequences to\n # themselves shifted one time-step.\n decoder_input_data = tokens_padded[:, 0:-1]\n decoder_output_data = tokens_padded[:, 1:]\n\n # Dict for the input-data. Because we have\n # several inputs, we use a named dict to\n # ensure that the data is assigned correctly.\n x_data = \\\n {\n 'decoder_input': decoder_input_data,\n 'transfer_values_input': transf_values\n }\n\n\n # Dict for the output-data.\n y_data = \\\n {\n 'decoder_output': decoder_output_data\n }\n \n yield (x_data, y_data)", "def predict_on_batch(self, X):\n len_unpadded = len(X)\n if self.pad_batches:\n X = pad_features(self.batch_size, X)\n\n if not self._restored_model:\n self.restore()\n with self.eval_graph.graph.as_default():\n\n # run eval data through the model\n n_tasks = self.n_tasks\n output = []\n with self._get_shared_session(train=False).as_default():\n feed_dict = self.construct_feed_dict(X)\n data = self._get_shared_session(train=False).run(\n self.eval_graph.output, feed_dict=feed_dict)\n batch_output = np.asarray(data[:n_tasks], dtype=float)\n # reshape to batch_size x n_tasks x ...\n if batch_output.ndim == 3:\n batch_output = batch_output.transpose((1, 0, 2))\n elif batch_output.ndim == 2:\n batch_output = batch_output.transpose((1, 0))\n else:\n raise ValueError('Unrecognized rank combination for output: %s' %\n (batch_output.shape,))\n output.append(batch_output)\n\n outputs = np.array(\n from_one_hot(np.squeeze(np.concatenate(output)), axis=-1))\n\n outputs = np.copy(outputs)\n outputs = np.reshape(outputs, (len(X), n_tasks))\n outputs = outputs[:len_unpadded]\n return outputs", "def __call__(\n self, data_batch: Dict[str, List[str]]\n ) -> Tuple[\n BatchEncoding,\n List[Dict[str, Union[int, str]]],\n List[SquadExample],\n List[SquadFeatures],\n ]:\n self._check_values_len(data_batch)\n concatenated_batch, evidences = self._concatenate_batch(data_batch)\n dataset, examples, features = load_examples(\n concatenated_batch, self.tokenizer, evaluate=True, output_examples=True\n )\n\n input_ids = [torch.unsqueeze(instance[0], 0) for instance in dataset]\n attention_mask = [torch.unsqueeze(instance[1], 0) for instance in dataset]\n token_type_ids = [torch.unsqueeze(instance[2], 0) for instance in dataset]\n\n output = {\n \"input_ids\": torch.cat(input_ids, axis=0),\n \"attention_mask\": torch.cat(attention_mask, axis=0),\n \"token_type_ids\": torch.cat(token_type_ids, axis=0),\n }\n output = BatchEncoding(output)\n\n return output, evidences, examples, features", "def forward_batch(self,batcher,phase=0):\n mapped_results={}\n inputs=batcher.get_batched_input(mapper=self)\n for type_ in inputs.keys():\n mapper = self.mappers[type_]\n\n mapped_results[type_] = mapper.forward_batch(inputs[type_],phase=0)\n return mapped_results", "def generate_inputs_and_wrap_model(config_path, checkpoint_path, input_config):\n\n model = get_detector(cfg, checkpoint_path, device=\"cpu\")\n one_img, one_meta = preprocess_example_input(input_config)\n tensor_data = [one_img]\n model.forward = partial(model.forward, img_metas=[[one_meta]], return_loss=False)\n\n return model, tensor_data", "def training_step(self, batch, batch_nb):\n # batch\n input_ids, attention_mask, token_type_ids, labels, emph_probs = batch\n inputs = {\n 'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'labels': labels,\n }\n\n # XLM and RoBERTa don't use segment_ids\n if self.hparams.model_type != 'distilbert':\n inputs['token_type_ids'] = (\n token_type_ids if self.hparams.model_type in ['bert', 'xlnet'] else None\n )\n\n # forward and loss\n loss, _ = self.forward(**inputs)\n\n # logs\n logs = {\n 'train_loss': loss,\n 'lr': self.lr_scheduler.get_last_lr()[-1],\n }\n\n # output dict\n output = {\n 'loss': loss,\n 'progress_bar': logs,\n 'log': logs\n }\n return output", "def inference(self, input_batch):\r\n inferences = []\r\n # Handling inference for token_classification.\r\n batch_size = len(input_batch)\r\n\r\n num_rows = batch_size\r\n for i in range(num_rows):\r\n inferences.append({'entity':input_batch[i]})\r\n logger.info(\"Model predicted: '%s'\", input_batch)\r\n\r\n return inferences", "def multi_input_generator(df, batch_size, source_dir,shuffle=True):\n\n idx = 0\n\n while True:\n if shuffle:\n batch = df.sample(n=batch_size, replace=False)\n else:\n batch = df.loc[idx:(idx*batch_size), :] #attention:works only with batch_size=1\n\n batch_input1 = []\n batch_input2 = []\n batch_output = []\n\n # Read in each input, perform preprocessing and get labels\n for i in batch.index:\n\n full_path = source_dir + str(batch.loc[i].dx) + \"/\" + str(batch.loc[i].aug_id)\n input1 = get_input(full_path)\n input2 = [batch.loc[i].age, batch.loc[i].sex]\n output = batch.loc[i].dx\n\n input_pre = preprocess_input(input1)\n batch_input1 += [ input_pre ]\n batch_input2 += [ input2 ]\n batch_output += [ output ]\n\n # flatten the image list so that it looks like the tensorflow iterator\n batch_input1 = [val for sublist in batch_input1 for val in sublist]\n\n # Return a tuple of ([input,input],output) to feed the network\n batch_x1 = np.array(batch_input1)\n batch_x2 = np.array(batch_input2, dtype=\"float32\")\n batch_y = lb.transform(np.array(batch_output)).astype(\"float32\")\n\n yield[batch_x1, batch_x2], batch_y\n idx += 1\n\n if idx >= len(df):\n break", "def apply_to_batch(self, batch_dict):\r\n \r\n self._last_batch = batch_dict\r\n \r\n if isinstance(self.model,NMTModelWithMLTM):\r\n y_pred = self.model(x_source=batch_dict['x_source'], \r\n x_mltm=batch_dict['x_source_mltm_vector'],\r\n x_source_lengths=batch_dict['x_source_length'], \r\n target_sequence=batch_dict['x_target'])\r\n else:\r\n y_pred = self.model(x_source=batch_dict['x_source'], \r\n x_source_lengths=batch_dict['x_source_length'], \r\n target_sequence=batch_dict['x_target'])\r\n self._last_batch['y_pred'] = y_pred\r\n \r\n attention_batched = np.stack(self.model.decoder._cached_p_attn).transpose(1, 0, 2)\r\n self._last_batch['attention'] = attention_batched", "def __getitem__(self, batch_index):\n batch_images = np.zeros(shape=(self.batch_size, *MODEL_INPUT_SIZE, MODEL_INPUT_CHANNELS), dtype=np.float32)\n # For ages use -1 instead of zeros, because for black images age should be 0 months\n batch_ages = np.full(shape=(self.batch_size, 1), fill_value=-1, dtype=np.float32)\n batch_males = np.zeros(shape=(self.batch_size, 1), dtype=np.uint8)\n\n # Generate image indexes of the batch\n batch_image_indexes = self.image_indexes[batch_index * self.batch_size:(batch_index + 1) * self.batch_size]\n\n for item_number, batch_image_index in enumerate(batch_image_indexes):\n image_id = self.image_ids[batch_image_index][0]\n age = self.ages[batch_image_index]\n male = self.males[batch_image_index]\n\n image_path = self.images_path / f'{image_id}.png'\n image = skimage.io.imread(str(image_path))\n image = normalized_image(image)\n\n if self.is_train:\n augmented_image = augmentate_image(image)\n else:\n augmented_image = image\n\n augmented_image = augmented_image * 255\n augmented_image = np.stack((augmented_image,) * MODEL_INPUT_CHANNELS, axis=-1)\n batch_images[item_number, ...] = augmented_image\n\n batch_ages[item_number, ...] = age\n batch_males[item_number, ...] = male\n\n batch_images = preprocess_input(batch_images)\n return [batch_images, batch_males], batch_ages", "def _batch_train(self, batch, training_step, step):\n lstm_size = (self.batch_size, self.Qmain.h_size)\n batch_mem = np.zeros(lstm_size)\n batch_carry = np.zeros(lstm_size)\n input_shape = (self.batch_size,\n self.trace_length,\n self.observation_size)\n m_data = np.vstack(batch[:, 0])\n m_data = m_data.reshape(input_shape)\n t_data = np.vstack(batch[:, 4])\n t_data = t_data.reshape(input_shape)\n q_input = [np.copy(batch_mem), np.copy(batch_carry), np.copy(m_data)]\n q1_input = [np.copy(batch_mem), np.copy(batch_carry), np.copy(t_data)]\n\n # Batch predict\n self.Qmain.trace_length.assign(self.trace_length)\n self.Qmain.dropout_rate.assign(0.0)\n self.Qtarget.trace_length.assign(self.trace_length)\n self.Qtarget.dropout_rate.assign(0.0)\n\n # Save the graph just the first time\n if training_step == 0:\n tf.summary.trace_on()\n\n # T batch predict\n pred = self.Qmain.model.predict(q_input,\n batch_size=self.batch_size)\n Q = pred[0]\n batch_bus = pred[1]\n batch_line = pred[2]\n batch_disp = pred[3]\n\n ## Log graph once and disable graph logging\n if training_step == 0:\n with self.tf_writer.as_default():\n tf.summary.trace_export(self.name + \"-graph\", step)\n\n # T+1 batch predict\n Qn, *_ = self.Qtarget.model.predict(q1_input,\n batch_size=self.batch_size)\n \n # Compute batch Q update to Qtarget\n for i in range(self.batch_size):\n idx = i * (self.trace_length - 1)\n a = batch[idx][1]\n grid = a[0]\n batch_bus[i][:] = a[1][:]\n batch_line[i][:] = a[2][:]\n batch_disp[i][:] = a[3][:]\n r = batch[idx][2]\n d = batch[idx][3]\n Q[i][grid] = r\n if d == False:\n Q[i][grid] += DISCOUNT_FACTOR * Qn[i][grid]\n\n # Batch train\n batch_x = [batch_mem, batch_carry, m_data]\n batch_y = [\n Q,\n batch_bus, batch_line, batch_disp,\n batch_mem, batch_carry\n ]\n loss = self.Qmain.model.train_on_batch(batch_x, batch_y)\n loss = loss[0]\n\n # Log to tensorboard\n self._tf_log_summary(loss, step)", "def forward_batch(self, *args, batchsize=16, retain_inputs=False,\n calc_score=False, converter=concat_examples):\n # data may be \"train_x array\" or \"chainer dataset\"\n data = args[0]\n data, _ = self._check_X_y(data)\n\n input_list = None\n output_list = None\n total_score = 0\n for i in range(0, len(data), batchsize):\n inputs = converter(data[i:i + batchsize], device=self.device)\n if not isinstance(inputs, tuple):\n inputs = (inputs,)\n #print('forward batch inputs', len(inputs), inputs)\n #print('forward batch inputs', len(inputs[0]))\n outputs = self._forward(*inputs, calc_score=calc_score)\n if not isinstance(outputs, tuple):\n outputs = (outputs,)\n # Init\n if retain_inputs:\n if input_list is None:\n input_list = [[] for _ in range(len(inputs))]\n for j, input in enumerate(inputs):\n input_list[j].append(cuda.to_cpu(input))\n if output_list is None:\n output_list = [[] for _ in range(len(outputs))]\n for j, output in enumerate(outputs):\n # print(j, 'output', type(output), output.shape)\n output_list[j].append(cuda.to_cpu(output.data))\n if calc_score:\n # switch accuracy or loss depends on situation.\n if self.compute_accuracy:\n total_score += self.accuracy * outputs[0].shape[0]\n else:\n total_score += self.loss * outputs[0].shape[0]\n\n if retain_inputs:\n self.inputs = [numpy.concatenate(input) for input in input_list]\n if calc_score:\n self.total_score = cuda.to_cpu(total_score.data) / len(data)\n\n result = [numpy.concatenate(output) for output in output_list]\n if len(result) == 1:\n return result[0]\n else:\n return result", "def process_state_batch(self, batch):\n return np.squeeze(batch, axis=1)", "def generate_train_batch(self):\n\n patients_indices = self.get_indices()\n patients_for_batch = [self._data[i] for i in patients_indices]\n\n data = np.zeros((self.batch_size, 1, *self.patch_size), dtype=np.short)\n labels = np.empty(self.batch_size, dtype=np.float32)\n\n # iterate over patients_for_batch and include them in the batch\n for i, j in enumerate(patients_for_batch):\n patient_data_ct = np.load(j).astype(np.short)\n\n data[i] = self.preprocess_func(patient_data_ct).astype(np.short)\n path = str(j).split('/')[-1].replace('.npy', '')\n labels[i] = float(self.age_info[path])\n\n return {'data': np.array(data), 'label': np.array(labels)}", "def batchify(batch):\n\n PAD_ID = batch[0]['<PAD>']\n inputs_list = [ex['input'] for ex in batch]\n max_length_list = []\n for docs in inputs_list:\n max_length = max([len(doc[1]) for doc in docs])\n max_length_list.append(max_length)\n inputs = []\n for index,docs in enumerate(inputs_list):\n bat_size = len(docs)\n tp_vecs = torch.zeros((bat_size,max_length_list[index]),dtype=torch.long)\n tp_vecs += PAD_ID\n for k,doc in enumerate(docs):\n for j,word in enumerate(doc[1]):\n tp_vecs[k,j] = word\n tp_list = [doc[0] for doc in docs]\n tp_list = torch.tensor(tp_list,dtype=torch.long)\n inputs.append([tp_list,tp_vecs])\n week_index_list = torch.tensor([ex['target'][0] for ex in batch],dtype=torch.long)\n word_index_list = torch.tensor([ex['target'][1] for ex in batch],dtype=torch.long)\n targets = (week_index_list,word_index_list)\n return inputs,targets", "def process_images(self):\n self.processed_content_image = tf.keras.applications.vgg19.preprocess_input(\n self.content_image)\n self.processed_style_image = tf.keras.applications.vgg19.preprocess_input(\n self.style_image)", "def process_sample_train(self):\n raise NotImplementedError" ]
[ "0.75249064", "0.7168187", "0.7088052", "0.69800156", "0.69082737", "0.6876216", "0.67433715", "0.6727425", "0.67233706", "0.67233706", "0.6711491", "0.66323096", "0.6620821", "0.662077", "0.6583442", "0.65692496", "0.65633017", "0.6563035", "0.65559244", "0.65559244", "0.6536702", "0.653374", "0.6516111", "0.6514095", "0.6508423", "0.64978176", "0.64807796", "0.6473708", "0.6464474", "0.63613826", "0.6359474", "0.6350067", "0.63382083", "0.62930757", "0.62770957", "0.62702566", "0.62561965", "0.6247787", "0.62384415", "0.62379247", "0.62294334", "0.62223923", "0.6216352", "0.6216083", "0.6211379", "0.6201846", "0.61894125", "0.618487", "0.61845386", "0.61347854", "0.6134363", "0.6130888", "0.6127552", "0.6125413", "0.61201704", "0.6117388", "0.6111203", "0.6107685", "0.610539", "0.60979676", "0.6094118", "0.60738134", "0.6064429", "0.6063173", "0.60569525", "0.6054127", "0.605063", "0.6033299", "0.6029822", "0.6024761", "0.6024591", "0.6024157", "0.602383", "0.60195154", "0.60180634", "0.6016187", "0.60132873", "0.60081697", "0.600515", "0.6002645", "0.5995537", "0.59799767", "0.5973487", "0.59666985", "0.5956527", "0.5956202", "0.5952439", "0.5951134", "0.59508556", "0.5938358", "0.59382", "0.5935826", "0.59351885", "0.59345347", "0.5932086", "0.5931291", "0.5925199", "0.592301", "0.59209245", "0.5913846" ]
0.6349032
32
Simple forward step with crossentropy loss.
def _cross_entropy_forward_step(batch, model): timers = get_timers() # Get the batch. timers('batch-generator', log_level=2).start() try: batch_ = next(batch) except BaseException: batch_ = batch tokens, types, labels, attention_mask = process_batch(batch_) timers('batch-generator').stop() # Forward model. output_tensor = model(tokens, attention_mask, tokentype_ids=types) return output_tensor, partial(cross_entropy_loss_func, labels)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward_train(self, preds_T: torch.Tensor) -> torch.Tensor:\n fake_label = preds_T.data.max(1)[1]\n return F.cross_entropy(preds_T, fake_label)", "def train_step(self, X_batch: np.ndarray, Y_batch: np.ndarray):\n\n # Almost the same as previous task, calculates the cross entropy loss for multiple classes using the softmax loss equation provided in the assignment.\n targets = Y_batch\n outputs = self.model.forward(X_batch)\n self.model.backward(X_batch, outputs, targets)\n \n self.model.w += -self.learning_rate*self.model.grad\n \n loss = cross_entropy_loss(targets, outputs)\n return loss", "def _forward(self):\n\n tf.summary.image(\"image\", tensor=tf.reshape(self.x, (self.batch_size, 28, 28, 1)), max_outputs=10)\n x = self.x\n\n # x = layers.dropout(self.x, keep_prob=0.7)\n # with tf.variable_scope(\"layer1\") as scope:\n h = tf.nn.relu(layers.fully_connected(x, num_outputs=self.input_size // 2, activation_fn=None))\n # tf.summary.histogram(\"moving_mean1\", tf.get_variable(scope + \"moving_mean\"))\n # with tf.variable_scope(\"layer2\") as scope:\n # h = tf.nn.relu(layers.fully_connected(h, num_outputs=32, activation_fn=None))\n # tf.summary.histogram(\"moving_mean2\", tf.get_variable(\"moving_mean\"))\n # with tf.variable_scope(\"layer3\") as scope:\n self.logits = layers.fully_connected(h, num_outputs=10, activation_fn=None)\n # tf.summary.histogram(\"moving_mean3\", tf.get_variable(\"moving_mean\"))\n\n self.probability = tf.nn.softmax(self.logits)\n self.prediction = tf.argmax(self.probability, axis=1)", "def forward_train(self, *args, **kwargs):\n pass", "def forward(self, x):\n x = self.first_deconv(x)\n x = self.first_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.second_deconv(x)\n x = self.second_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.third_deconv(x)\n x = self.third_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.fourth_deconv(x)\n x = self.fourth_batch_norm(x)\n\n x = self.fifth_deconv(x)\n x = self.fifth_batch_norm(x)\n\n x = self.sixth_deconv(x)\n x = self.sixth_batch_norm(x)\n\n x = self.seventh_deconv(x)\n\n # sigmoid_out = nn.functional.sigmoid(x)\n tanh_out = nn.functional.tanh(x)\n\n out = (tanh_out + 1) * 255 / 2\n\n # print 'out.shape =', out.shape\n\n return out", "def train_step(input, target, model, loss_fn, optimizer, **unused):\r\n model.train()\r\n output = model(input)\r\n loss = loss_fn(output, target)\r\n optimizer.backward(loss)\r\n optimizer.step()", "def forward(self, x):\n x = self.feature_extractor(x)\n batch_size, hidden = x.size()\n\n x = self.layer_1(x)\n x = torch.relu(x)\n x = self.layer_2(x)\n x = torch.relu(x)\n x = self.layer_3(x)\n\n x = torch.log_softmax(x, dim=1)\n return x", "def forward(self, X, training=False):\n pass", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)):\n if l == 0:\n z = self.layers[l].forward(x)\n else:\n z = self.layers[l].forward(a)\n a = self.activations[l].forward(z)\n\n # output from softmax layer\n out = a\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def training_step(self, x):\n self.train()\n rec_error, feat, y = self.forward(x)\n # Reconstruction Loss\n rec_loss = torch.mean(rec_error)\n loss = rec_loss\n\n self.zero_grad()\n loss.backward()\n self.optimizer.step()\n self.eval()\n print('Rec Loss: {}'.format(rec_loss.cpu().data))\n print()\n return loss, feat, y", "def crossentropy_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n return -torch.log2(probability_fn(args))", "def forward(self, x):\r\n y = self.en_fc1(x)\r\n y = F.relu(y)\r\n y = self.en_fc2(y)\r\n y = F.relu(y)\r\n y = self.en_fc3(y)\r\n y = F.relu(y)\r\n\r\n mean = self.en_mu(y)\r\n stddev_p = self.en_log(y)\r\n \r\n n = x.shape[0]\r\n z = torch.randn(n,self.latent_dim)\r\n std = torch.exp(stddev_p/2.0)\r\n z = z.mul(std) + mean\r\n \r\n xhat = self.de_fc1(z)\r\n xhat = F.relu(xhat)\r\n xhat = self.de_fc2(xhat)\r\n xhat = F.relu(xhat)\r\n xhat = self.de_fc3(xhat)\r\n xhat = F.sigmoid(xhat)\r\n \r\n return y,mean,stddev_p,z,xhat", "def forward(self, input, target):\n target = target.squeeze_()\n return self.ratio * F.cross_entropy(input, target, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction)", "def training_step(self, train_batch, batch_idx):\n x, y = train_batch\n logits = self.forward(x)\n loss = self.cross_entropy_loss(logits, y)\n logs = {\"train_loss\": loss}\n return {\"loss\": loss, \"log\": logs}", "def training_step(self, x):\n self.train() # Sets network to train mode\n rec_error, feat, y = self.forward(x)\n # Reconstruction Loss\n rec_loss = torch.mean(rec_error)\n loss = rec_loss\n\n self.zero_grad()\n loss.backward()\n self.optimizer.step()\n self.eval() # Sets network to evaluation mode\n print('Rec Loss: {}'.format(rec_loss.cpu().data))\n print()\n return loss, feat, y", "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob,\n }\n _, step, loss = sess.run([train_op, global_step, cnn.loss], feed_dict)", "def training_step(self, input_tensor: torch.Tensor, target_tensor: torch.Tensor):\n self.forward(input_tensor)\n self.backprop(target_tensor)", "def forward(self, x):\n h = self.linear1(x)\n h = torch.nn.functional.relu(h)\n h = self.linear2(h)\n h = torch.nn.functional.relu(h)\n h = self.linear3(h)\n y_pred = torch.sigmoid(h)\n return y_pred", "def step(self, inputs=None, targets=None):\n if not self.training:\n self.train_mode()\n\n outputs, loss = self.forward(\n inputs=inputs,\n targets=targets\n )\n\n self.update(\n loss=loss,\n inputs=inputs,\n targets=targets,\n outputs=outputs\n )\n\n return outputs, loss", "def forward(self, observation: Tensor) -> Tensor:\n pass", "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: opts[\"dropout_keep_prob\"]\n }\n _, step, loss, accuracy = sess.run(\n [train_op, global_step, cnn.loss, cnn.accuracy],\n feed_dict)", "def train_step(x_batch, y_batch):\n\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, summaries, loss, accuracy = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss,\n cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n logger.info(\"{}: step {}, loss {:g}, acc {:g}\".format(\n time_str, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)", "def forward(self, x):\n h = self.linear1(x)\n h = self.linear2(h)\n y_pred = torch.sigmoid(h)\n return y_pred", "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.x: x_batch,\n cnn.y_: y_batch,\n step_time_placeholder : last_step_time,\n cnn.keep_prob : FLAGS.keep_prob\n }\n _, step, summaries, loss, accuracy = sess.run(\n [train_op, global_step, train_summary_op, cnn.cross_entropy, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)\n train_summary_writer.flush()", "def forward(self, x):\n h_relu = self.linear1(x).clamp(min=0)\n y_pred = self.linear2(h_relu)\n return y_pred", "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, loss, accuracy = sess.run(\n [train_op, global_step, cnn.loss, cnn.accuracy],\n feed_dict)", "def forward_tensor(self, x):\n pass", "def train_step(x_batch, y_batch, x_batch_lex):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n # lexicon\n cnn.input_x_lexicon: x_batch_lex,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, summaries, loss, accuracy, neg_r, neg_p, f1_neg, f1_pos, avg_f1 = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy,\n cnn.neg_r, cnn.neg_p, cnn.f1_neg, cnn.f1_pos, cnn.avg_f1],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n # print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n #print(\"{}: step {}, loss {:g}, acc {:g}, neg_r {:g} neg_p {:g} f1_neg {:g}, f1_pos {:g}, f1 {:g}\".\n # format(time_str, step, loss, accuracy, neg_r, neg_p, f1_neg, f1_pos, avg_f1))\n train_summary_writer.add_summary(summaries, step)", "def forwardpass_train(self, X):\n # hidden_1\n h1_input = np.dot(X, self.W1) + self.b1\n h1_output = functions.relu(h1_input)\n # hidden_2\n h2_input = np.dot(h1_output, self.W2) + self.b2\n h2_output = functions.relu(h2_input)\n # output\n o_input = np.dot(h2_output, self.W3) + self.b3\n final_output = functions.softmax(o_input)\n return h1_input, h1_output, h2_input, h2_output, final_output", "def train_step(x_batch, y_batch):\r\n feed_dict = {\r\n cnn.input_x: x_batch,\r\n cnn.input_y: y_batch,\r\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\r\n }\r\n\r\n _, step, summaries, loss, accuracy, predictions,y_actual = sess.run(\r\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy, cnn.predictions,cnn.y],\r\n feed_dict)\r\n\r\n time_str = datetime.datetime.now().isoformat()\r\n # print(\"train_f1_score:\", f1_score(y_actual, predictions, average=None))\r\n # print (predictions)\r\n # print(y_actual)\r\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\r\n return accuracy\r\n\r\n train_summary_writer.add_summary(summaries, step)", "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, summaries, loss, accuracy = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)", "def forward(self,x):\n\t\th_relu = self.linear1(x).clamp(min=0)\n\t\ty_pred = self.linear2(h_relu)\n\t\treturn y_pred", "def forward(self, x):\n #batch_size = x.shape[0]\n out = self.model(x)\n return out", "def training_step(self, *args: Any, **kwargs: Any) -> Tensor:\n batch = args[0]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n # by default, the train step logs every `log_every_n_steps` steps where\n # `log_every_n_steps` is a parameter to the `Trainer` object\n self.log(\"train_loss\", loss, on_step=True, on_epoch=False)\n self.train_metrics(y_hat_hard, y)\n\n return cast(Tensor, loss)", "def step(self, x, y, learning_rate=1e-3):\n \n # Input transformation\n \"\"\"\n Input is represented with M-dimensional vectors\n We convert them to (N, M) matrices such that columns are one-hot \n representations of the input\n \"\"\"\n x = self.one_hot(x, self.N)\n y = self.one_hot(y, self.N)\n\n \n # Forward propagation\n \"\"\"\n Returns\n -------\n embedding: array\n (D, M) matrix where columns are word embedding from U matrix\n logits: array\n (N, M) matrix where columns are output logits\n prob: array\n (N, M) matrix where columns are output probabilities\n \"\"\"\n \n ### YOUR CODE HERE ###\n #Omran:\n #U and V of dimension (D, N) and (N, D) respectively\n\n embedding = np.dot(self.U, x)\n logits = np.dot(self.V, embedding)\n prob = self.softmax(logits,0)# take care of the axis, I am not quite sure how you will implement it\n \n assert embedding.shape == (self.D, x.shape[1])\n assert logits.shape == (self.N, x.shape[1])\n assert prob.shape == (self.N, x.shape[1])\n \n \n # Loss calculation\n \"\"\"\n Returns\n -------\n loss: int\n Cross-entropy loss using true values and probabilities\n \"\"\"\n \n ### YOUR CODE HERE ###\n loss = self.loss(y, prob)\n \n # Backward propagation\n \"\"\"\n Returns\n -------\n d_U: array\n (N, D) matrix of partial derivatives of loss w.r.t. U\n d_V: array\n (D, N) matrix of partial derivatives of loss w.r.t. V\n \"\"\"\n \n ### YOUR CODE HERE ###\n #I am not quite sure of this!!\n \n# difference = np.sum(np.subtract(prob, y), axis=1)\n difference = prob - y\n d_V = difference @ embedding.T\n# print(self.N, self.D)\n# print(difference.shape)\n# print(d_V.shape)\n d_U = (self.V.T @ difference) @ x.T\n# d_U = self.V.T @ np.outer(difference, x)\n \n assert d_V.shape == (self.N, self.D)\n assert d_U.shape == (self.D, self.N)\n \n \n # Update the parameters\n \"\"\"\n Updates the weights with gradient descent such that W_new = W - alpha * dL/dW, \n where alpha is the learning rate and dL/dW is the partial derivative of loss w.r.t. \n the weights W\n \"\"\"\n \n ### YOUR CODE HERE ###\n self.V = self.V - learning_rate * d_V\n self.U = self.U - learning_rate * d_U\n\n return loss, d_U, d_V", "def forward(self, x):\n \n x = F.relu(self.conv1_bn(self.conv1(self.conv0_bn(x))))\n x = F.relu(self.conv2_bn(self.conv2(x)))\n x = F.relu(self.conv3_bn(self.conv3( self.maxpool2(x))))\n x = F.relu(self.conv4_bn(self.conv4( self.maxpool3(x))))\n x = self.maxpool4(x) \n x = x.view(-1, 1184)\n x = F.relu(self.fc1(x))\n x = self.dense1_bn(x)\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x)", "def forward(self, x):\r\n out = self.conv(x) # out with shape\r\n out, _ = self.lstm(out.permute(2, 0, 1, 3, 4).contiguous().view(self.seq_len, -1, 64 * 1 * 4))\r\n out = torch.mean(out.permute(1, 0, 2), 1) # [batch, num_directions * hidden_size]\r\n if self.embed:\r\n return out\r\n else:\r\n logits = self.linear(out)\r\n return logits, out", "def forward(self, x, target, k, a, m):\n x = x.view(-1, 28*28)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n y = F.relu(self.fc3(x))\n loss = self.loss(y, target, k, a, m)\n return y, loss", "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n # print(x_batch[0])\n _, step, summaries, loss, accuracy = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n if step%100==0:\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)", "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: self.cfg['dropout_keep_prob']\n }\n _, step, summaries, loss, accuracy = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n self.logger.debug(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)", "def prox_soft(X, step): \n return torch.sign(X) * nn.functional.relu(torch.abs(X) - step)", "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, summaries, loss, accuracy = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)", "def train_step(x_batch, y_batch):\r\n feed_dict = {\r\n cnn.input_x: x_batch,\r\n cnn.input_y: y_batch,\r\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\r\n }\r\n _, step, summaries, loss, accuracy = sess.run(\r\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],\r\n feed_dict)\r\n time_str = datetime.datetime.now().isoformat()\r\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\r\n train_summary_writer.add_summary(summaries, step)", "def forward(self, y_pred: torch.Tensor, y_actual: torch.Tensor) -> torch.Tensor:\n raise NotImplementedError()", "def forward(self, x):\n # action\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = self.conv3(x)\n x = F.relu(x)\n x = x.view(-1, 32 * 7 * 7)\n x = self.linear1(x)\n x = F.relu(x)\n\n mean = self.mu(x) # N, num_actions\n logstd = self.logstd.expand_as(mean)\n std = torch.exp(logstd)\n action = torch.normal(mean, std)\n\n # value\n v = self.critic_linear(x)\n\n # action prob on log scale\n logprob = log_normal_density(action, mean, std=std, log_std=logstd)\n return v, action, logprob, mean", "def forward(self, x):\n y_pred = self.net(x)\n return y_pred", "def forward_train(self, preds_T: torch.Tensor) -> torch.Tensor:\n raise NotImplementedError", "def forward(self,x):\n \n out,_ = self.lstm(x,self.hidden)\n out = out.detach()\n x = out[:,-1,:][-1]\n x = self.relu(x)\n if self.stochastic:\n x= x+ (x- self.memory[0]).detach()* torch.rand(x.shape) * self.sigmoid(self.SM.gamma[0])\n self.memory[0] = x.clone().detach()\n\n x = self.relu(self.layer1(x))\n if self.stochastic:\n x= x+ (x- self.memory[1]).detach()* torch.rand(x.shape) * self.sigmoid(self.SM.gamma[1])\n self.memory[1] = x.clone().detach()\n\n x = self.relu(self.layer2(x))\n if self.stochastic:\n x= x+ (x- self.memory[2]).detach()* torch.rand(x.shape) * self.sigmoid(self.SM.gamma[2])\n self.memory[2] = x.clone().detach()\n\n x = self.relu(self.layer3(x))\n if self.stochastic:\n x= x+ (x- self.memory[3]).detach()* torch.rand(x.shape) * self.sigmoid(self.SM.gamma[3])\n self.memory[3] = x.clone().detach()\n\n x = self.relu(self.layer4(x))\n return x", "def forward(self, x):\n batch_size, channels, width, height = x.size()\n\n # Input Layer: (batch_size, 1, 28, 28) -> (batch_size, 1*28*28)\n x = x.view(batch_size, -1)\n\n # Layer 1: (batch_size, 1*28*28) -> (batch_size, 128)\n x = self.layer_1(x)\n x = torch.relu(x)\n\n # Layer 2: (batch_size, 128) -> (batch_size, 256)\n x = self.layer_2(x)\n x = torch.relu(x)\n\n # Layer 3: (batch_size, 256) -> (batch_size, 10)\n x = self.layer_3(x)\n x = torch.log_softmax(x, dim=1)\n\n return x", "def predict_step(self, *args: Any, **kwargs: Any) -> Tensor:\n batch = args[0]\n x = batch[\"image\"]\n y_hat: Tensor = self(x).softmax(dim=1)\n return y_hat", "def forward(self, x):\n h_relu = self.linear1(x).clamp(min=0)\n h_relu2 = self.linear2(h_relu)\n h_relu3 = self.linear3(h_relu2)\n y_pred = self.linear4(h_relu3)\n return y_pred", "def forward(self, x):\n x, self.hidden = self.lstm(x, self.hidden)\n self.detach_hidden()\n x = self.dropout(x)\n x = self.out(x)\n return x", "def forward(self, x):\n\n x = self.first_conv_layer(x)\n x = self.second_conv_layer(x)\n x = self.third_conv_layer(x)\n x = self.fourth_conv_layer(x)\n\n #print 'x.shape=', x.shape\n x = x.view(-1, 5 * 5 * 64)\n x = F.relu(self.fc1(x))\n\n sigmoid_out = nn.functional.sigmoid(x)\n\n return sigmoid_out", "def forward(self, x):\n\n x = self.first_conv_layer(x)\n x = self.second_conv_layer(x)\n x = self.third_conv_layer(x)\n x = self.fourth_conv_layer(x)\n x = self.fifth_conv_layer(x)\n\n '''\n x = x.view(-1, 4 * 4 * 512)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n '''\n\n sigmoid_out = nn.functional.sigmoid(x)\n\n return sigmoid_out", "def forward(self, x):\n h_relu = self.linear1(x).clamp(min=0)\n h_relu2 = self.linear2(h_relu)\n y_pred = self.linear4(h_relu2)\n return y_pred", "def forward(self, x):\n y_pred = self.linear(x)\n return y_pred", "def forward(self, x):\n y_pred = self.linear(x)\n return y_pred", "def forward_propagation(self):\n pred_y = argmax(self.model.predict(train_x), axis=1)\n\n accuracy_func = Accuracy()\n accuracy_func.update_state(pred_y, train_y)\n self.accuracy = accuracy_func.result().numpy()", "def train_step(x_batch, y_batch):\r\n feed_dict = {\r\n rnn.input_x: x_batch,\r\n rnn.input_y: y_batch,\r\n rnn.dropout_keep_prob: FLAGS.dropout_keep_prob\r\n }\r\n _, step, loss, accuracy = sess.run(\r\n [train_op, global_step, rnn.loss, rnn.accuracy],\r\n feed_dict)\r\n return step, loss, accuracy", "def forward(self,y_out, y_truth): \n result = None\n #########################################################################\n # TODO: #\n # Implement the forward pass and return the output of the BCE loss. #\n #########################################################################\n\n result = -1 * (np.multiply(y_truth, np.log(y_out)) + np.multiply((1 - y_truth), np.log(1 - y_out)))\n \n \n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n \n return result", "def cross_entropy_loss():\n return nn.CrossEntropyLoss()", "def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.cnn.extractor.forward(x)\n return self.cnn.regressor(x)", "def forward(self, x):\n for layer in self.hidden_layers:\n x = F.relu(layer(x))\n x = self.dropout(x)\n x = self.output(x)\n\n return F.log_softmax(x, dim=1)", "def forward(self, input):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n assert input.shape[1] == self.n_neurons, \"The shape of the input tensor is not correct.\"\n\n bn_fct = CustomBatchNormManualFunction()\n out = bn_fct.apply(input, self.gamma, self.beta, self.eps)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def forward(self, x: torch.Tensor):\n x = self.linear1(x)\n x = torch.relu(x)\n x = self.linear2(x)\n x = self.dropout(x)\n return x", "def forward(self, x):\n if self.train():\n # N(1, alpha)\n epsilon = torch.randn(x.size()) * self.alpha + 1\n \n epsilon = Variable(epsilon)\n if x.is_cuda:\n epsilon = epsilon.cuda()\n \n return x * epsilon\n else:\n return x", "def forward(self, x):\n x1 = x[:, 0, :, :].reshape((-1, 1, obs_size * 2 + 1, obs_size * 2 + 1))\n x2 = x[:, 1, :, :].reshape((-1, (obs_size * 2 + 1) ** 2))\n if x2.shape[0] == 1:\n x2 = np.tile(x2, (minibatch_size, 1))\n h = F.relu(self.bn1(self.conv1(x)))\n h = F.relu(self.bn2(self.conv2(x)))\n h = F.relu(self.bn3(self.conv3(x)))\n h = self.l(h)\n return DiscreteActionValue(h)", "def forward(self, src, device):\n\n src = torch.as_tensor(src).float().to(device)\n\n\n # Set initial hidden and cell states \n h0 = torch.zeros(self.num_layers, src.shape[0], self.hidden_dim).to(device)\n c0 = torch.zeros(self.num_layers, src.shape[0], self.hidden_dim).to(device)\n\n # shape of lstm_out: [batch_size, input_size, hidden_dim]\n # shape of self.hidden: (a, b), where a and b both have shape (num_layers, batch_size, hidden_dim).\n lstm_out, self.hidden = self.lstm(src, (h0, c0)) \n \n # Only take the output from the final timetep\n # Can pass on the entirety of lstm_out to the next layer if it is a seq2seq prediction\n #print(lstm_out.size())\n y_pred = self.linear(lstm_out[:, -1, :].view(src.shape[0], -1))\n return y_pred", "def forward(self, x):\n h_relu_1 = self.linear1(x).clamp(min=0)\n h_relu_2 = self.linear2(h_relu_1).clamp(min=0)\n y_pred = self.linear3(h_relu_2)\n return y_pred", "def forward(self, inputs, target_oneHot):\n\n N = inputs.size()[0]\n\n # predicted probabilities for each pixel along channel\n inputs = F.softmax(inputs, dim=1)\n\n # Numerator Product\n inter = inputs * target_oneHot\n # Sum over all pixels N x C x H x W => N x C\n inter = inter.view(N, self.classes, -1).sum(2)\n\n # Denominator\n union = inputs + target_oneHot - (inputs * target_oneHot)\n # Sum over all pixels N x C x H x W => N x C\n union = union.view(N, self.classes, -1).sum(2)\n\n loss = inter / union\n\n ## Return average loss over classes and batch\n # return 1 - loss.mean()\n return -(loss.mean() - 1.)", "def train_step(x_batch, y_batch, len_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.doc_len: len_batch,\n cnn.dropout_keep_prob: dropout_keep_prob,\n }\n _, step, summaries, loss, accuracy, acc_max = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy_sigmoid, cnn.accuracy_max],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print((\"{}: step {}, loss {:g}, acc {:g}, acc_max {:g}\".format(time_str, step, loss, accuracy, acc_max)))\n train_summary_writer.add_summary(summaries, step)", "def forward(self, x):\n h_relu = self.linear1(x).clamp(min=0)\n y_pred = self.linear2(h_relu)\n return y_pred", "def forward(self, x):\n h_relu = self.linear1(x).clamp(min=0)\n y_pred = self.linear2(h_relu)\n return y_pred", "def forward(self, x):\n h_relu = self.linear1(x).clamp(min=0)\n y_pred = self.linear2(h_relu)\n return y_pred", "def forward_pass(self, x, targets=None):\n self.x = x\n if targets is None:\n loss = None\n else:\n self.targets = targets\n\n result = x\n for layer in self.layers:\n result = layer.forward_pass(result)\n\n # softamax activation on input\n self.y = softmax(result)\n\n if targets is not None:\n loss = self.loss_func(self.y, self.targets)\n\n return loss, self.y", "def train(self, inputs, labels):\n # make sure that the amount of data and label is match\n assert inputs.shape[0] == labels.shape[0]\n\n n = inputs.shape[0]\n self.X = inputs\n for epochs in range(self.num_step):\n for idx in range(n):\n # operation in each training step:\n # 1. forward passing\n # 2. compute loss\n # 3. propagate gradient backward to the front\n self.input = inputs[idx:idx+1, :]\n self.output = self.forward(inputs[idx:idx+1, :])\n self.error = self.output - labels[idx:idx+1, :] #derevative of cross entropy\n self.backward(idx)\n\n if epochs % self.print_interval == 0:\n print('Epochs {}: '.format(epochs))\n self.test(inputs, labels)\n\n print('Training finished')\n self.test(inputs, labels)", "def train_step(model, model_0, mu:int, optimizer, train_data, loss_f):\n \n total_loss=0\n \n for idx, (features,labels) in enumerate(train_data):\n \n optimizer.zero_grad()\n \n predictions= model(features)\n \n loss=loss_f(predictions,labels)\n loss+=mu/2*difference_models_norm_2(model,model_0)\n total_loss+=loss\n \n loss.backward()\n optimizer.step()\n \n return total_loss/(idx+1)", "def forward(self, batch):\n self.output = np.dot(np.array(batch), self.weights) + self.biases", "def forward(self, X):\r\n # input layer\r\n self.ff[0] = X\r\n # hidden layer\r\n for x in range(1, np.shape(self.ff)[0]-1):\r\n self.ff[x] = self.hid_transfer(self.weights[x-1].dot(self.ff[x-1]) + self.bias[x-1])\r\n # output layer\r\n self.ff[-1] = self.out_transfer(self.weights[-1].dot(self.ff[-2]) + self.bias[-1])", "def forward(self, x):\n # x = state\n \n x = F.relu(self.input(x))\n x = self.output(x)\n \n return x", "def run_non_targeted_attack(step_size, image, model, n_iterations, eps, loss=nn.CrossEntropyLoss()):\n # Here we do not care about the value of the target label\n label = torch.zeros(1, 1)\n # Record our loss values\n losses = []\n # Create PyTorch tensor variables\n x, y = Variable(image, requires_grad=True), Variable(label)\n # Perform our gradient ascent\n for _ in range(n_iterations):\n # Reset the gradients\n zero_gradients(x)\n # Forward propagation\n out = model(x)\n # Our prediction\n y.data = out.data.max(1)[1]\n # Compute our loss\n loss_tensor = loss(out, y)\n # Record our loss\n losses.append(loss_tensor.data[0])\n # Back propagation\n loss_tensor.backward()\n # Fixed norm n_iterations not to stay trapped around a local minima\n normed_grad = step_size * torch.sign(x.grad.data)\n # Perform our gradient ascent step\n step_adv = x.data + normed_grad\n # Compute our adversarial noise\n attacking_noise = step_adv - image\n # Clamp our adversarial noise\n attacking_noise = torch.clamp(attacking_noise, -eps, eps)\n # Compute our adversarial image\n adversarial_image = image + attacking_noise\n # Normalize it to feed it to inception\n adversarial_image = torch.clamp(adversarial_image, 0.0, 1.0)\n x.data = adversarial_image\n return adversarial_image, attacking_noise, losses", "def train_step(x, y):\n with tf.GradientTape() as tape:\n predictions = self.model(x)\n loss = self.loss_object(y, predictions)\n gradients = tape.gradient(loss, self.model.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))\n\n self.train_loss(loss)\n self.train_acc(y, predictions)", "def forward(self, x):\n if self.train():\n # N(0,1)\n epsilon = Variable(torch.randn(x.size()))\n if x.is_cuda:\n epsilon = epsilon.cuda()\n \n # Clip alpha\n self.log_alpha.data = torch.clamp(self.log_alpha.data, max=self.max_alpha)\n alpha = self.log_alpha.exp()\n \n # N(1, alpha)\n epsilon = epsilon * alpha\n \n return x * epsilon\n else:\n return x", "def forward(self, labels: torch.Tensor) -> torch.Tensor:\n raise NotImplementedError", "def train_step(images, targets):\n # Save all operations\n with tf.GradientTape() as tape:\n # Make prediction\n predictions = model(images)\n # Compute loss\n loss = tf.keras.losses.categorical_crossentropy(targets, predictions)\n # Compute gradients\n gradients = tape.gradient(loss, model.trainable_variables)\n # Update model\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))", "def forward(self, x):\n x = self.conv1(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x)\n x = self.conv2(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x) \n x = self.maxpool(x) \n return x", "def forward(self, x):\n batch_size = x.shape[0]\n x = x.mean(dim=-1).mean(dim=-1)\n init_pose = self.init_pose.expand(batch_size, -1)\n init_shape = self.init_shape.expand(batch_size, -1)\n init_cam = self.init_cam.expand(batch_size, -1)\n pred_pose = init_pose\n pred_shape = init_shape\n pred_cam = init_cam\n for _ in range(self.n_iter):\n xc = torch.cat([x, pred_pose, pred_shape, pred_cam], 1)\n xc = self.fc1(xc)\n xc = self.drop1(xc)\n xc = self.fc2(xc)\n xc = self.drop2(xc)\n pred_pose = self.decpose(xc) + pred_pose\n pred_shape = self.decshape(xc) + pred_shape\n pred_cam = self.deccam(xc) + pred_cam\n pred_rotmat = rot6d_to_rotmat(pred_pose).view(batch_size, 24, 3, 3)\n out = pred_rotmat, pred_shape, pred_cam\n return out", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n x = self.pool1(F.relu(self.batch1(self.conv1(x))))\n x = self.pool2(F.relu(self.batch2(self.conv2(x))))\n x = F.relu(self.batch3a(self.conv3a(x)))\n x = self.pool3(F.relu(self.batch3b(self.conv3b(x))))\n x = F.relu(self.batch4a(self.conv4a(x)))\n x = self.pool4(F.relu(self.batch4b(self.conv4b(x))))\n x = F.relu(self.batch5a(self.conv5a(x)))\n x = self.pool5(F.relu(self.batch5b(self.conv5b(x))))\n x = self.avgpool(x)\n x = x.reshape(x.shape[0], -1)\n out = self.fc1(x)\n\n# raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def forward(self, x: torch.Tensor) -> torch.Tensor:\n model_output = None\n #######################################################################\n # Student code begins\n #######################################################################\n\n (N,C,H,W) = x.shape\n\n conv_features = self.conv_layers(x)\n \n flat_features = conv_features.reshape(-1, 500)\n model_output = self.fc_layers(flat_features)\n\n\n #######################################################################\n # Student code ends\n #######################################################################\n return model_output", "def forward(self, x: torch.tensor) -> torch.tensor:\n # flatten image input\n x = x.flatten(start_dim=1)\n # add hidden layer, with relu activation function\n x = self.relu(self.fc1(x))\n x = self.drop(x)\n \n x = self.relu(self.fc2(x))\n x = self.drop(x)\n \n x = self.relu(self.fc3(x))\n x = self.drop(x)\n \n x = self.fc4(x)\n x = self.sigmoid(self.classifier(x))\n \n return x.squeeze(), None", "def forward(self, x, c):\n batch_size, _, _, _ = x.size()\n mu, logvar = self.recognition_model(x, c)\n std = torch.exp(0.5 * logvar)\n eps = torch.randn([batch_size, self.latent_size]).to('cuda' if torch.cuda.is_available() else 'cpu')\n z_hat = eps * std + mu\n x_hat = self.generation_model(z_hat, c)\n return x_hat, mu, logvar", "def forward(self, x, c):\n batch_size, _, _, _ = x.size()\n mu, logvar = self.recognition_model(x, c)\n std = torch.exp(0.5 * logvar)\n eps = torch.randn([batch_size, self.latent_size]).to('cuda' if torch.cuda.is_available() else 'cpu')\n z_hat = eps * std + mu\n x_hat = self.generation_model(z_hat, c)\n return x_hat, mu, logvar", "def run_non_targeted_attack_v2(step_size, image, model, n_iterations, eps, loss=nn.CrossEntropyLoss()):\n # Here we do not care about the value of the target label\n label = torch.zeros(1, 1)\n # Record our loss values\n losses = []\n # Create PyTorch tensor variables\n x, y = Variable(image, requires_grad=True), Variable(label)\n # Perform our gradient ascent\n for _ in range(n_iterations):\n # Reset the gradients\n zero_gradients(x)\n # Forward propagation\n out = model(x)\n # Our prediction\n y.data = out.data.max(1)[1]\n # Compute our loss\n loss_tensor = loss(out, y)\n # Record our loss\n losses.append(loss_tensor.data[0])\n # Back propagation\n loss_tensor.backward()\n # Inverse the gradient\n inv_grad = torch.Tensor(np.linalg.pinv(x.grad[0][0].data.numpy()))\n # Fixed norm n_iterations not to stay trapped around a local minima\n normed_grad = step_size * inv_grad\n # Perform our gradient ascent step\n step_adv = x.data + normed_grad\n # Compute our adversarial noise\n attacking_noise = step_adv - image\n # Clamp our adversarial noise\n attacking_noise = torch.clamp(attacking_noise, -eps, eps)\n # Compute our adversarial image\n adversarial_image = image + attacking_noise\n # Normalize it to feed it to inception\n adversarial_image = torch.clamp(adversarial_image, 0.0, 1.0)\n x.data = adversarial_image\n return adversarial_image, attacking_noise, losses", "def training_step( # type: ignore[override]\n self, batch: Dict[str, Any], batch_idx: int\n ) -> Tensor:\n x = batch[\"image\"]\n y = batch[\"label\"].view(-1, 1)\n y_hat = self.forward(x)\n\n loss = F.mse_loss(y_hat, y)\n\n self.log(\"train_loss\", loss) # logging to TensorBoard\n self.train_metrics(y_hat, y)\n\n return loss", "def forward(self,x):\n x = x.transpose(1,2).contiguous()\n x = F.leaky_relu(self.fc1(x), 0.2)\n x = F.leaky_relu(self.bn2(self.fc2(x)), 0.2)\n x = F.leaky_relu(self.bn3(self.fc3(x)), 0.2)\n x = torch.sigmoid(self.fc4(x))\n return x.transpose(1,2)", "def run_non_targeted_attack_v3(step_size, image, model, n_iterations, eps, loss=nn.CrossEntropyLoss()):\n # Here we do not care about the value of the target label\n label = torch.zeros(1, 1)\n # Record our loss values\n losses = []\n # Create PyTorch tensor variables\n x, y = Variable(image, requires_grad=True), Variable(label)\n # Perform our gradient ascent\n for _ in range(n_iterations):\n # Reset the gradients\n zero_gradients(x)\n # Forward propagation\n out = model(x)\n # Our prediction\n y.data = out.data.max(1)[1]\n # Compute our loss\n loss_tensor = loss(out, y)\n # Record our loss\n losses.append(loss_tensor.data[0])\n # Back propagation\n loss_tensor.backward()\n # Fixed norm n_iterations not to stay trapped around a local minima\n normed_grad = step_size * x.grad.data\n # Perform our gradient ascent step\n step_adv = x.data + normed_grad\n # Compute our adversarial noise\n attacking_noise = step_adv - image\n # Clamp our adversarial noise\n attacking_noise = torch.clamp(attacking_noise, -eps, eps)\n # Compute our adversarial image\n adversarial_image = image + attacking_noise\n # Normalize it to feed it to inception\n adversarial_image = torch.clamp(adversarial_image, 0.0, 1.0)\n x.data = adversarial_image\n return adversarial_image, attacking_noise, losses", "def forward(inputs,weights,function=sigmoid,step=-1):\n if step == 0:\n return inputs\n elif step == -1:\n step = len(weights) #go to output layer \n output = np.append(1, inputs)\n for i in range(step):\n output = np.append(1, function(np.dot(weights[i], output))) #calculating activation\n return output[1:]", "def forward(self, inputs: Tensor, targets: Tensor, **kwargs) -> Tensor:\n return NotImplemented", "def forward_train(self, img, img_metas, **kwargs):", "def forward(self, x, y):\n x1 = x.clone()\n x1[range(x1.size(0)), y] = -float(\"Inf\")\n x_gt = x[range(x.size(0)), y].unsqueeze(1)\n x_topk = torch.topk(x1, 15, dim=1)[0] # 15 Negative classes to focus on, its a hyperparameter\n x_new = torch.cat([x_gt, x_topk], dim=1)\n\n return self.ce(x_new, torch.zeros(x_new.size(0)).cuda().long())" ]
[ "0.7079546", "0.7007037", "0.6882113", "0.67776626", "0.6773821", "0.6759933", "0.6734602", "0.67251396", "0.66867614", "0.66795677", "0.6665437", "0.66561353", "0.6633398", "0.66322726", "0.6624705", "0.6619393", "0.6619258", "0.6606617", "0.65844935", "0.65608215", "0.6553423", "0.65445954", "0.65419996", "0.6540261", "0.653904", "0.6534821", "0.65156615", "0.6515253", "0.65108985", "0.6510812", "0.65035003", "0.65003633", "0.64975095", "0.64973074", "0.64943695", "0.64898735", "0.64851886", "0.64844644", "0.6481503", "0.6474218", "0.646114", "0.64573914", "0.64380896", "0.64370966", "0.6422519", "0.64172286", "0.64171654", "0.6411342", "0.6410802", "0.64104176", "0.6395168", "0.63891774", "0.6386267", "0.63763267", "0.63750315", "0.63672304", "0.63672304", "0.63652664", "0.63612884", "0.6360982", "0.6360634", "0.63589334", "0.6350028", "0.634912", "0.63490254", "0.6334838", "0.6333466", "0.63307667", "0.6321092", "0.6320832", "0.6315231", "0.6313745", "0.6313745", "0.6313745", "0.63103086", "0.63059515", "0.630035", "0.62986845", "0.6295414", "0.62899685", "0.6289727", "0.6283768", "0.62832326", "0.6279324", "0.62785935", "0.6277887", "0.627731", "0.62765163", "0.62754494", "0.62742054", "0.62735844", "0.62735844", "0.6272278", "0.6270537", "0.6259336", "0.62569225", "0.6256669", "0.62529373", "0.62498695", "0.62324595" ]
0.7801202
0
Data loader. Note that batchsize is the local (per GPU) batchsize.
def build_data_loader(dataset, micro_batch_size, num_workers, drop_last, task_collate_fn=None): # Sampler. world_size = mpu.get_data_parallel_world_size() rank = mpu.get_data_parallel_rank() sampler = torch.utils.data.distributed.DistributedSampler( dataset, num_replicas=world_size, rank=rank) # Data loader. Note that batch size is the per GPU batch size. data_loader = torch.utils.data.DataLoader(dataset, batch_size=micro_batch_size, sampler=sampler, shuffle=False, num_workers=num_workers, drop_last=drop_last, pin_memory=True, collate_fn=task_collate_fn) return data_loader
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_loader(\n self, batch_size: int = 1, iter_steps: int = 0, batch_as_list: bool = True\n ) -> DataLoader:\n data = self.data\n datasets = []\n\n for _, dat in data.items():\n datasets.append(dat.dataset())\n\n if len(datasets) < 1:\n raise FileNotFoundError(\n \"no datasets available for this model to create a loader from\"\n )\n\n return DataLoader(\n *datasets,\n batch_size=batch_size,\n iter_steps=iter_steps,\n batch_as_list=batch_as_list,\n )", "def get_loader(self, batch_size=1, num_threads=3):\n\n gen_func, gen_types, gen_shapes = self.get_batch_gen(\n self, self.steps_per_epoch, batch_size)\n\n loader = tf.data.Dataset.from_generator(gen_func, gen_types, gen_shapes)\n\n loader = loader.map(map_func=self.transform,\n num_parallel_calls=num_threads)\n\n if ('batcher' not in self.model_cfg.keys() or\n self.model_cfg.batcher == 'DefaultBatcher'):\n loader = loader.batch(batch_size)\n\n length = len(self.dataset) / batch_size + 1 if len(\n self.dataset) % batch_size else len(self.dataset) / batch_size\n length = length if self.steps_per_epoch is None else self.steps_per_epoch\n\n return loader, int(length)", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def get_data_loader(batch_size=10, num_workers=2):\n \n data_loader = torch.utils.data.DataLoader(dataset=TempuckeyDataSet(),\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=num_workers,\n collate_fn=collate)\n return data_loader", "def get_batch_data(train_loader, device, unlabelled_data, batch_size):\n labelled_data, labelled_target = next(iter(train_loader))\n labelled_target = labelled_target[3]\n unlabelled_data = unlabelled_data.to(device)\n labelled_target = labelled_target.to(device=device, dtype=torch.int64)\n labelled_data = labelled_data.to(device)\n labelled_target = labelled_target.squeeze()\n # print(\"labelled_target\", labelled_target.shape)\n\n labelled_data = labelled_data.view(batch_size, 1, 3750)\n unlabelled_data = unlabelled_data.view(batch_size, 1, 3750)\n\n labelled_data = pp.Preprocessor().forward(labelled_data)\n unlabelled_data = pp.Preprocessor().forward(unlabelled_data)\n\n labelled_data = labelled_data.view(batch_size, 1, 3750)\n unlabelled_data = unlabelled_data.view(batch_size, 1, 3750)\n\n return labelled_data, labelled_target, unlabelled_data", "def data_loader(root, batch_size=64):\n input_transform = get_transform()\n dataset = CustomDataset(root, input_transform)\n return data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=False)", "def load_data(batch_size=batch_size):\n trainset = LibriSpeechDataset(training_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds))\n testset = LibriSpeechDataset(validation_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds), stochastic=False)\n\n train_loader = DataLoader(trainset, batch_size=batch_size, num_workers=1, shuffle=True, drop_last=True)\n test_loader = DataLoader(testset, batch_size=1, num_workers=1, drop_last=True)\n\n return train_loader, test_loader", "def get_loader(data, json, batch_size, shuffle, num_workers):\n dataset = FinNumDataset(data, json)\n\n data_loader = torch.utils.data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate_fn)\n return data_loader", "def get_loader(dataset='train.txt', crop_size=128, image_size=28, batch_size=2, mode='train', num_workers=1): \n transform = [] \n if mode == 'train': \n transform.append(transforms.RandomHorizontalFlip()) \n transform.append(transforms.CenterCrop(crop_size)) \n transform.append(transforms.Resize(image_size)) \n transform.append(transforms.ToTensor()) \n transform.append(transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))) \n transform = transforms.Compose(transform) \n train_data=MyDataset(txt=dataset, transform=transform) \n data_loader = DataLoader(dataset=train_data, \n batch_size=batch_size, \n shuffle=(mode=='train'), \n num_workers=num_workers) \n return data_loader", "def dataloaders():\n # train data path\n data_train = '../dataset/train/'\n # set transformations\n train_transforms = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n \n train_data = datasets.ImageFolder(data_train, transform = train_transforms)\n trainloader = torch.utils.data.DataLoader(train_data, batch_size = 16, shuffle = True)\n \n return trainloader", "def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=True,\n num_workers=multiprocessing.cpu_count(),\n )", "def creates_data_loader():\n dataset_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=True)\n\n dataset_no_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=False)\n\n datasets_faces_split = train_val_test(dataset_faces, 0.2, 0.0)\n datasets_no_faces_split = train_val_test(dataset_no_faces, 0.2, 0.0)\n\n datasets = {}\n datasets[\"train\"] = datasets_faces_split[\"train\"] + \\\n datasets_no_faces_split[\"train\"]\n datasets[\"test\"] = datasets_no_faces_split[\"test\"]\n datasets[\"val\"] = datasets_faces_split[\"val\"] + \\\n datasets_no_faces_split[\"val\"]\n\n train_loader = DataLoader(dataset=datasets[\"train\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n\n val_loader = DataLoader(dataset=datasets[\"val\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n return train_loader, val_loader", "def _load_dataset(self, data_path, augmentation, batch_size):\n if path.split(data_path)[1] == \"\":\n # Deal with edge case where there's a \"/\" at the end of the path.\n data_path = path.split(data_path)[0]\n\n if path.split(data_path)[1].endswith(\"training\"):\n dataset_name = \"training dataset\"\n else:\n dataset_name = \"validation dataset\"\n\n start_time = time.time()\n self._update_status(\"Loading {}.\".format(dataset_name))\n\n\n dataset = MapillaryDataset(data_path, augmentation, self.iaa)\n data_loader = DataLoader(dataset,\n batch_size,\n shuffle=True)\n\n self._update_status(\"{} loaded. ({} ms)\".format(\n dataset_name.capitalize(),\n int((time.time() - start_time) * 1000)))\n\n return data_loader", "def test_cached_dataloader(self):\n\n v = [\"data\", \"target\", \"model_out_sqnet\"]\n\n for data, target in self.train_loader:\n b, c, h, w = data[v[0]].shape\n assert data[v[1]].shape == (b, )\n assert data[v[2]].shape == (b, 100)\n assert data[v[1]].shape == target.shape", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def _get_batch_data(batch, ctx):\n data, label = batch\n return (mx.gluon.utils.split_and_load(data, ctx),\n mx.gluon.utils.split_and_load(label, ctx),\n data.shape[0])", "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def load_batch(self):\r\n\r\n #if we've seen all the data, start again with them in a new random order\r\n if self.batchcounter+self.batchsize > self.num_data:\r\n self.batchcounter = 0\r\n self.epochs += 1\r\n self._permutation = np.random.permutation(self.num_data)\r\n\r\n this_perm = self._permutation[self.batchcounter:self.batchcounter+self.batchsize]\r\n\r\n self.X_batch = self.X[this_perm]\r\n self.likelihood.set_data(self.Y[this_perm])\r\n if self.has_uncertain_inputs:\r\n self.X_variance_batch = self.X_variance[this_perm]\r\n\r\n self.batchcounter += self.batchsize\r\n\r\n self.data_prop = float(self.batchsize)/self.num_data\r\n\r\n self._compute_kernel_matrices()\r\n self._computations()", "def load_data(dataset, root, batch_size, workers):\n # Data transform\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n train_transform = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n query_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])\n\n # Construct data loader\n index = dataset.index(\"IF\")\n sub = dataset[index:]\n if sub == 'IF100':\n train_dir = os.path.join(root, 'train-IF100')\n elif sub == 'IF50':\n train_dir = os.path.join(root, 'train-IF50')\n elif sub == 'IF20':\n train_dir = os.path.join(root, 'train-IF20')\n elif sub == 'IF10':\n train_dir = os.path.join(root, 'train-IF10')\n elif sub == 'IF1':\n train_dir = os.path.join(root, 'train-IF1')\n else:\n print('train path error')\n return\n # train_dir = os.path.join(root, 'train')\n query_dir = os.path.join(root, 'query')\n database_dir = os.path.join(root, 'database')\n\n train_dataset = ImagenetDataset(\n train_dir,\n transform=train_transform,\n targets_transform=Onehot(100),\n )\n\n train_dataloader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=workers,\n pin_memory=True,\n )\n\n query_dataset = ImagenetDataset(\n query_dir,\n transform=query_transform,\n targets_transform=Onehot(100),\n )\n\n query_dataloader = DataLoader(\n query_dataset,\n batch_size=batch_size,\n num_workers=workers,\n pin_memory=True,\n )\n\n database_dataset = ImagenetDataset(\n database_dir,\n transform=query_transform,\n targets_transform=Onehot(100),\n )\n\n database_dataloader = DataLoader(\n database_dataset,\n batch_size=batch_size,\n num_workers=workers,\n pin_memory=True,\n )\n\n return train_dataloader, query_dataloader, database_dataloader", "def get_dataloaders(args):\n if args.dataset == 'heat':\n dataset_class = heat.HeatDiffusionDataset\n else:\n raise ValueError(f'Unknown dataset {args.dataset}')\n train_dataset = dataset_class(\n dataset_class.get_train_path(args.data_path), args, train=True)\n if args.dist:\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset)\n else:\n train_sampler = torch.utils.data.RandomSampler(train_dataset)\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, num_workers=args.workers,\n sampler=train_sampler, pin_memory=True, drop_last=args.drop_last)\n if not args.no_eval:\n validation_dataset = dataset_class(\n dataset_class.get_validation_path(args.data_path), args, train=False)\n if args.dist:\n validation_sampler = torch.utils.data.distributed.DistributedSampler(\n validation_dataset, shuffle=False)\n else:\n validation_sampler = torch.utils.data.SequentialSampler(\n validation_dataset)\n validation_loader = torch.utils.data.DataLoader(\n validation_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=validation_sampler,\n pin_memory=True, drop_last=args.drop_last)\n\n test_dataset = dataset_class(\n dataset_class.get_test_path(args.data_path), args, train=False)\n if args.dist:\n test_sampler = torch.utils.data.distributed.DistributedSampler(\n test_dataset, shuffle=False)\n else:\n test_sampler = torch.utils.data.SequentialSampler(\n test_dataset)\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=test_sampler,\n pin_memory=True, drop_last=args.drop_last)\n else:\n validation_loader = None\n test_loader = None\n\n # Update the data shape if needed.\n if args.data_shape is None:\n args.data_shape = train_dataset.get_shape()\n if args.data_target_shape is None:\n args.data_target_shape = train_dataset.get_target_shape()\n\n return train_loader, validation_loader, test_loader", "def train_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_train, **self.dl_kwargs)", "def _custom_data_loader(self) -> DataLoader:\n dataloaders = DataLoader(self.dataset, batch_size=1)\n return dataloaders", "def create_dataloader(data):\r\n input_ids = torch.LongTensor([sent['input_ids'] for sent in data])\r\n input_mask = torch.LongTensor([sent['input_mask'] for sent in data])\r\n segment_ids = torch.LongTensor([sent['segment_ids'] for sent in data])\r\n label_ids = torch.LongTensor([sent['label_ids'] for sent in data])\r\n\r\n dataset = TensorDataset(input_ids, input_mask, segment_ids, label_ids)\r\n\r\n train_sampler = RandomSampler(dataset)\r\n dataloader = DataLoader(dataset, sampler=train_sampler, batch_size=BATCH_SIZE)\r\n\r\n return dataloader", "def build_training_data_loader(self) -> DataLoader:\n pass", "def get_train_loader(batch_size, train_set, train_sampler):\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, sampler=train_sampler, num_workers=4)\n\n return train_loader", "def batch_fit(self, data_loader: torch.utils.data.DataLoader, epochs: int):\n pass", "def get_loaders(img_size=CONFIG[\"matrix_size\"], batch_size=CONFIG[\"batch_size\"],\n used_keypoints=CONFIG[\"used_keypoints\"], interpolation_frames=CONFIG[\"interpolation_frames\"],\n noise_frames=CONFIG[\"noise_frames\"], all_data=None, all_labels=None):\n\n if all_data is None or all_labels is None:\n all_data, all_labels = load_video_data_labels(interpolation_frames, noise_frames, used_keypoints, img_size)\n\n p = np.random.permutation(len(all_data))\n train_len = int(len(p) / 80)\n others_len = int((len(p) - train_len) / 2)\n\n train_data, train_labels = all_data[p[:train_len]], all_labels[p[:train_len]]\n val_data = all_data[p[train_len:train_len + others_len]]\n val_labels = all_labels[p[train_len:train_len + others_len]]\n test_data, test_labels = all_data[p[-others_len:]], all_labels[p[-others_len:]]\n\n # Transform to tensor\n train_data_tensor, train_labels_tensor = torch.from_numpy(train_data), torch.from_numpy(train_labels)\n val_data_tensor, val_labels_tensor = torch.from_numpy(val_data), torch.from_numpy(val_labels)\n test_data_tensor, test_labels_tensor = torch.from_numpy(test_data), torch.from_numpy(test_labels)\n\n # Data Loader for easy mini-batch return in training, load the Dataset from the numpy arrays\n train_loader = DataLoader(TensorDataset(train_data_tensor, train_labels_tensor), batch_size=batch_size)\n val_loader = DataLoader(TensorDataset(val_data_tensor, val_labels_tensor), batch_size=batch_size)\n test_loader = DataLoader(TensorDataset(test_data_tensor, test_labels_tensor), batch_size=batch_size)\n\n data = {\"train_data\": train_data,\n \"train_labels\": train_labels,\n \"val_data\": val_data,\n \"val_labels\": val_labels,\n \"test_data\": test_data,\n \"test_labels\": test_labels,\n \"all_data\": all_data[p],\n \"all_labels\": all_labels[p]}\n\n return data, train_loader, val_loader, test_loader", "def get_each_loader(data_path, batch_size, trn_negnum, shuffle=True, num_workers=0):\n \n dataset = ML_Dataset(data_path, trn_negnum)\n \n if data_path.endswith('trn') == True:\n collate = dataset.train_collate\n else:\n collate = test_collate\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate)\n\n return data_loader", "def data_loader(data, train=True):\n\n loader_config = {\n 'batch_size':64,\n 'shuffle':train\n }\n \n return torch.utils.data.DataLoader(data, **loader_config)", "def train_dataloader(self, batch_size: Optional[int] = None) -> DataLoader:\n dataset = TabularDataset(\n task=self.config.task,\n data=self.train,\n categorical_cols=self.config.categorical_cols,\n continuous_cols=self.config.continuous_cols,\n embed_categorical=(not self.do_leave_one_out_encoder()),\n target=self.target,\n )\n return DataLoader(\n dataset,\n batch_size if batch_size is not None else self.batch_size,\n shuffle=True if self.train_sampler is None else False,\n num_workers=self.config.num_workers,\n sampler=self.train_sampler,\n pin_memory=self.config.pin_memory,\n )", "def split_and_load(batch_data, num_gpus):\n return [batch_data[i].data[0] for i in range(num_gpus)], \\\n [batch_data[i].label[0].as_in_context(mx.gpu(i)) for i in range(num_gpus)]", "def __init__(self, dataset, batch_size, n_threads=4,\n\t ten_crop=False, data_path='/home/dataset/', logger=None):\n\t\tself.dataset = dataset\n\t\tself.batch_size = batch_size\n\t\tself.n_threads = n_threads\n\t\tself.ten_crop = ten_crop\n\t\tself.data_path = data_path\n\t\tself.logger = logger\n\t\tself.dataset_root = data_path\n\t\t\n\t\tself.logger.info(\"|===>Creating data loader for \" + self.dataset)\n\t\t\n\t\tif self.dataset in [\"cifar100\"]:\n\t\t\tself.train_loader, self.test_loader = self.cifar(\n\t\t\t\tdataset=self.dataset)\n\n\t\telif self.dataset in [\"cifar10\"]:\n\t\t\tself.train_loader, self.test_loader = self.cifar(\n dataset=self.dataset)\n\t\t\n\t\telif self.dataset in [\"imagenet\"]:\n\t\t\tself.train_loader, self.test_loader = self.imagenet(\n\t\t\t\tdataset=self.dataset)\n\t\telse:\n\t\t\tassert False, \"invalid data set\"", "def batch_loader(data_set: Union[IterableDataset, Dataset],\n batch_size: bool,\n shuffle=False) -> DataLoader:\n return DataLoader(\n data_set,\n batch_size=batch_size,\n collate_fn=lambda x: x,\n shuffle=shuffle\n )", "def data_loader(self, dataset=None, shuffle=True, size=None):\n self.log.info(\"Loading data\")\n assert dataset is not None, \"Please provide a dataset (folder name)\"\n data_path = os.path.join(config.DATA_DIR, dataset)\n # Load dataset using pickle\n with open(data_path, 'rb') as file:\n image_paths, labels = pickle.load(file)\n one_hot_labels = []\n images = []\n for i, (path, label) in enumerate(zip(image_paths, labels)):\n try:\n # One-hot encode the vectors\n one_hot = [0, 0, 0, 0]\n one_hot[label] = 1.0\n # Clean up image, normalize (mean to 0, std to 1)\n image = Image.open(path)\n image = np.asarray(image, dtype=np.float32) / 255\n except: # If there is some issue reading in data, skip datapoint\n continue\n one_hot_labels.append(np.asarray(one_hot, dtype=np.float32))\n images.append(image)\n # Shuffle data before cutting it into test and src\n if shuffle:\n x = list(zip(images, one_hot_labels))\n random.shuffle(x)\n images, one_hot_labels = zip(*x)\n self.log.info(\"Separating data into test and src.\")\n split_idx = int(config.TRAIN_TEST_SPLIT * len(one_hot_labels))\n train_input = images[:split_idx]\n train_target = one_hot_labels[:split_idx]\n test_input = images[split_idx:]\n test_target = one_hot_labels[split_idx:]\n if size:\n assert size < len(train_input), \"Final dataset size too big, not enough data\"\n train_input = train_input[:size]\n train_target = train_target[:size]\n self.log.info(\" -- test : {}\".format(len(test_target)))\n self.log.info(\" -- src: {}\".format(len(train_target)))\n # Convert to nparray before sending over\n return np.array(train_input), \\\n np.array(train_target), \\\n np.array(test_input), \\\n np.array(test_target)", "def load_data(root, num_seen, batch_size, num_workers):\n CIFAR10.init(root, num_seen)\n query_dataset = CIFAR10('query', transform=query_transform())\n seen_dataset = CIFAR10('seen', transform=train_transform())\n unseen_dataset = CIFAR10('unseen', transform=train_transform())\n retrieval_dataset = CIFAR10('retrieval', transform=train_transform())\n\n query_dataloader = DataLoader(\n query_dataset,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n seen_dataloader = DataLoader(\n seen_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n unseen_dataloader = DataLoader(\n unseen_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n retrieval_dataloader = DataLoader(\n retrieval_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n return query_dataloader, seen_dataloader, unseen_dataloader, retrieval_dataloader", "def load_data():\n global batch_size, num_batches\n # import data\n data, labels = original_clean()\n test_data = data[:test_size, :]\n test_labels = labels[:test_size]\n\n data = data[test_size:, :]\n\n # make landmarks with points with most neighbors\n N = NearestNeighbors(n_neighbors=k_start).fit(data).kneighbors_graph(data).todense()\n N = np.array(N)\n num_connections = N.sum(axis=0).argsort()[::-1] # see how many neighbors each point has\n top_landmarks_idxs = num_connections[:num_lm] # sort in descending order\n land_marks = data[top_landmarks_idxs, :] # pick the top ones\n data = np.delete(data, top_landmarks_idxs, axis=0) # delete the landmarks\n # find the nearest landmarks for the landmarks\n landmark_neighbors = NearestNeighbors(n_neighbors=k_lm).fit(land_marks).kneighbors_graph(land_marks).todense()\n # break data into batches, create empty holders\n batch_loader = np.zeros((num_batches, batch_size + num_lm, n))\n batch_graph = np.zeros((num_batches, batch_size + num_lm, batch_size + num_lm))\n # create the full neighborhood graph for each batch\n for i in range(num_batches):\n holder = data[batch_size * i: batch_size * (i + 1)]\n # find the nearest landmarks for the rest of the points\n holder_graph = NearestNeighbors(n_neighbors=k_other).fit(land_marks).kneighbors_graph(holder).todense()\n for j in range(batch_size): # copy over the holder graph\n for l in range(num_lm):\n if holder_graph[j, l] == 1:\n batch_graph[i, j, l + batch_size] = 1\n batch_graph[i, l + batch_size, j] = 1\n for j in range(num_lm): # copy over landmark neighbors\n for l in range(j, num_lm):\n if landmark_neighbors[j, l] == 1 and j != l:\n batch_graph[i, j + batch_size, l + batch_size] = 1\n batch_graph[i, l + batch_size, j + batch_size] = 1\n holder = np.concatenate((holder, land_marks))\n batch_loader[i] = holder\n batch_size += num_lm # adjust the batch size\n return batch_loader, data, batch_graph, landmark_neighbors, test_data, test_labels, land_marks", "def load_data(data_feeder):\n return data_feeder(BATCH_SIZE,\n SEQ_LEN,\n OVERLAP,\n Q_LEVELS,\n Q_ZERO,\n Q_TYPE)", "def get_data_loaders(args, tokenizer):\n personachat = get_dataset(tokenizer, args.dataset_path, args.dataset_cache, args.train_lang)\n _ = personachat.pop(\"test\", None)\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": [], \"valid\": []}\n\n if args.train_lang in [\"En\", \"Fr\", \"It\", \"Id\", \"Jp\", \"Ko\", \"Zh\"]: #monolingual data\n for dataset_name, dataset in personachat.items():\n for dial in dataset[args.train_lang]: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lm_labels = True)\n datasets[dataset_name].append(instance) \n else: #multilingual data\n for dataset_name, dataset in personachat.items():\n for lang, dials in dataset.items():\n for dial in dials: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lang_id=\"<{}>\".format(lang.lower()), lm_labels = True)\n datasets[dataset_name].append(instance) #all langs together\n\n\n logger.info(\"Build train and validation dataloaders\")\n train_dataset = DatasetTrain(datasets[\"train\"])\n valid_dataset = DatasetTrain(datasets[\"valid\"])\n\n #logger.info(\"Build train and validation dataloaders\")\n #train_dataset, valid_dataset = TensorDataset(*tensor_datasets[\"train\"]), TensorDataset(*tensor_datasets[\"valid\"])\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None\n valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None\n train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, shuffle=(not args.distributed), collate_fn=collate_fn)\n valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.valid_batch_size, shuffle=False, collate_fn=collate_fn)\n\n # logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[0].shape))\n # #logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[1].shape))\n # logger.info(\"Valid dataset (Batch, Candidates, Seq length): {}\".format(valid_dataset.tensors[0].shape))\n logger.info(\"Train dataset length: {}\".format(len(train_dataset)))\n logger.info(\"Valid dataset length: {}\".format(len(valid_dataset)))\n return train_loader, valid_loader, train_sampler, valid_sampler", "def load_torch_data(load_data_func):\n\n def torch_loader(dataset, data_path, batch_size, shuffle=True, cuda_device=None, num_workers=1):\n (train_data, val_data), (train_labels, val_labels), label_names = load_data_func(dataset, data_path)\n\n kwargs = {'num_workers': num_workers, 'pin_memory': True} if cuda_device is not None else {}\n kwargs['drop_last'] = True\n\n if type(train_data) == numpy.ndarray:\n train_dataset = TensorDataset(torch.from_numpy(train_data), torch.from_numpy(train_labels))\n val_dataset = TensorDataset(torch.from_numpy(val_data), torch.from_numpy(val_labels))\n elif type(train_data) == scipy.sparse.csr.csr_matrix:\n from sklearn.feature_extraction.text import TfidfTransformer\n tfidf_trans = TfidfTransformer(norm=None)\n tfidf_trans.fit(train_data)\n train_dataset = SparseDataset(train_data, tfidf_trans.idf_)\n val_dataset = SparseDataset(val_data, tfidf_trans.idf_)\n else:\n train_dataset = torchvision.datasets.ImageFolder(train_data)\n val_dataset = torchvision.datasets.ImageFolder(val_data)\n\n train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, **kwargs)\n val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, **kwargs)\n\n return train_loader, val_loader, label_names\n\n return torch_loader", "def create_dataset_sampler_loader(file_path, cuda, batch_size, hvd):\n # When supported, use 'forkserver' to spawn dataloader workers\n # instead of 'fork' to prevent issues with Infiniband implementations\n # that are not fork-safe.\n kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}\n if (kwargs.get('num_workers', 0) > 0 and hasattr(mp, '_supports_context')\n and 'forkserver' in mp.get_all_start_methods()):\n kwargs['multiprocessing_context'] = 'forkserver'\n\n # create dataset\n dataset = MNISTDataset(file_path)\n # Horovod: use DistributedSampler to partition the training data\n sampler = Data.distributed.DistributedSampler(\n dataset, num_replicas=hvd.size(), rank=hvd.rank())\n loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, sampler=sampler, **kwargs)\n return dataset, sampler, loader", "def get_data_loaders(data, batch_size, ratio=0.8, num_workers=1):\n train_size = int(len(data) * ratio)\n val_size = len(data) - train_size\n train_set, val_set = random_split(data, [train_size, val_size])\n data_train_loader = DataLoader(train_set, batch_size=batch_size, num_workers=num_workers, shuffle=True)\n data_val_loader = DataLoader(val_set, batch_size=batch_size, num_workers=num_workers, shuffle=True)\n return data_train_loader, data_val_loader", "def _init_train_loader(self):\n # Choose the right dataset type\n if self.config_args[\"num_members\"] > 1:\n class_dataset_wrapper = dataset_wrapper.MixMoDataset\n else:\n class_dataset_wrapper = dataset_wrapper.MSDADataset\n\n # Load augmentations\n self.traindatasetwrapper = class_dataset_wrapper(\n dataset=self.train_dataset,\n num_classes=int(self.config_args[\"data\"][\"num_classes\"]),\n num_members=self.config_args[\"num_members\"],\n dict_config=self.config_args[\"training\"][\"dataset_wrapper\"],\n properties=self.properties\n )\n\n # Build standard sampler\n _train_sampler = torch.utils.data.sampler.RandomSampler(\n data_source=self.traindatasetwrapper, ## only needed for its length\n num_samples=None,\n replacement=False,\n )\n\n # Wrap it with the repeating sampler used for multi-input models\n batch_sampler = batch_repetition_sampler.BatchRepetitionSampler(\n sampler=_train_sampler,\n batch_size=self.batch_size,\n num_members=self.config_args[\"num_members\"],\n drop_last=True,\n config_batch_sampler=self.config_args[\"training\"][\"batch_sampler\"]\n )\n\n self.train_loader = torch.utils.data.DataLoader(\n self.traindatasetwrapper,\n batch_sampler=batch_sampler,\n num_workers=self.num_workers,\n batch_size=1,\n shuffle=False,\n sampler=None,\n drop_last=False,\n pin_memory=True,\n )", "def get_loader(path, batch_size, device, directory_name):\n if str(device) == \"cpu\":\n path = \"/home/amit/PycharmProjects/ML_Project_1/nsynth-test\"\n # audio samples are loaded as an int16 numpy array\n # rescale intensity range as float [-1, 1]\n toSelectCols = transforms.Lambda(lambda x: x[0:16000])\n toFloat = transforms.Lambda(lambda x: x / np.iinfo(np.int16).max + 1)\n # use instrument_family and instrument_source as classification targets\n dataset = NSynth(\n path,\n transform=transforms.Compose([toSelectCols, toFloat]),\n blacklist_pattern=[\"synth_lead\"], # blacklist string instrument\n categorical_field_list=[\"instrument_family\", \"instrument_source\"])\n\n print(path, \"Length: \", len(dataset))\n plot_waveforms(dataset[0][0], \"1-D_audio_waveform.png\", \"1-D audio waveform\", directory_name)\n return dataset, torch_data.DataLoader(dataset, batch_size=batch_size, shuffle=True)", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def get_data_loader_from_data(cls, batch_size, X, Y, **kwargs):\n X_torch = torch.from_numpy(X).float()\n\n if (\n \"classification_problem\" in kwargs\n and kwargs[\"classification_problem\"] == False\n ):\n Y_torch = torch.from_numpy(Y).float()\n else:\n Y_torch = torch.from_numpy(Y).long()\n dataset = TensorDataset(X_torch, Y_torch)\n kwargs.pop(\"classification_problem\", None)\n return DataLoader(dataset, batch_size=batch_size, **kwargs)", "def load_data():\r\n global labelNames\r\n print(\"Loading Data...\")\r\n\r\n fnpath = \"rawdata\\\\cifar-10-batches-py\"\r\n fnprefix = 'data_batch_'\r\n fnlblnames = 'batches.meta'\r\n fntstbatch = 'test_batch'\r\n\r\n labelNames = unpickle(path.join(fnpath, fnlblnames))\r\n label_names = []\r\n for label in labelNames['label_names']:\r\n label_names.append(\"\".join(map(chr, label)))\r\n labelNames['label_names'] = label_names\r\n\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fntstbatch)))\r\n for n in range(1, 6):\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fnprefix + str(n))))", "def get_loader(config, image_path, crop_size, batch_size,sampler, num_workers=2, mode='train', augmentation_prob=0.4):\r\n\r\n dataset = ImageFolder(config)\r\n data_loader = data.DataLoader(dataset=dataset, batch_size=batch_size, num_workers=num_workers, sampler=sampler)\r\n \r\n return data_loader", "def load_batch(batch_name):\n data_dict = unpickle('./datasets/cifar-10-batches-py/' + batch_name)\n X = data_dict[b'data'] / 255\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).reshape(10000, 3072).transpose(1,0)\n y = data_dict[b'labels']\n Y = make_one_hot(y)\n return X, Y, y", "def prepare_data(self,d):\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n #CONVERT TO FLOAT32\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))\n train = data_utils.TensorDataset(features,target)\n train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))\n uset = data_utils.TensorDataset(features,target)\n unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n \n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters", "def _local_train(self, dataloader_with_memory, num_updates):\n # Local train\n _size = len(dataloader_with_memory)\n self.model = self.model.train()\n for _batch in range(num_updates):\n X, y = dataloader_with_memory.get_samples()\n X, y = X.to(self._device), y.to(self._device)\n if _batch == 0:\n # Initialize the batch-size using the first batch to avoid\n # edge cases with drop_last=False\n _batch_size = X.shape[0]\n _num_batches_per_epoch = (_size // _batch_size) + int(\n (_size % _batch_size) != 0\n )\n # Compute prediction and loss\n _pred = self.model(X)\n _loss = self._loss(_pred, y)\n\n # Backpropagation\n _loss.backward()\n self._optimizer.step()\n self._optimizer.zero_grad()\n self.num_batches_seen += 1\n _loss, _current_epoch = (\n _loss.item(),\n self.num_batches_seen // _num_batches_per_epoch,\n )\n\n if self.log:\n if _batch % self.log_period == 0:\n print(\n f\"loss: {_loss:>7f} after {self.num_batches_seen:>5d}\"\n f\" batches of data amounting to {_current_epoch:>5d}\"\n \" epochs.\"\n )\n self.writer.add_scalar(\n f\"client{self.client_id}/train/Loss\",\n _loss,\n self.num_batches_seen,\n )\n\n if _current_epoch > self.current_epoch:\n # At each epoch we look at the histograms of all the\n # network's parameters\n for name, p in self.model.named_parameters():\n self.writer.add_histogram(\n f\"client{self.client_id}/{name}\", p, _current_epoch\n )\n\n self.current_epoch = _current_epoch", "def batch(data_path):\n train, _, _ = get_datasets(\n data_path=data_path,\n nb_nodes=7,\n task_type=\"classification\",\n nb_classes=2,\n split=None,\n k_fold=None,\n seed=1234,\n )\n for batch in torch.utils.data.DataLoader(\n train, shuffle=False, batch_size=25, drop_last=False\n ):\n return batch", "def load_data(self, dataset_path: str,\n is_training: bool,\n epoch_num: int,\n batch_size: int,\n data_param: dict) -> (tf.Tensor, int):\n pass", "def get_data_loader (imgs_path, labels, extra_info=None, transform=None, params=None):\n\n\n dt = BuildDataset(imgs_path, labels, extra_info, transform)\n\n # Checking the params values. If it's not defined in params of if params is None, the default values are described\n # below:\n batch_size = 30\n shuf = True\n num_workers = 4\n pin_memory = True\n\n # However, if the params is defined, we used the values described on it:\n if (params is not None):\n if ('batch_size' in params.keys()):\n batch_size = params['batch_size']\n if ('shuf' in params.keys()):\n shuf = params['shuf']\n if ('num_workers' in params.keys()):\n num_workers = params['num_workers']\n if ('pin_memory' in params.keys()):\n pin_memory = params['pin_memory']\n\n # Calling the dataloader\n dl = data.DataLoader (dataset=dt, batch_size=batch_size, shuffle=shuf, num_workers=num_workers,\n pin_memory=pin_memory)\n\n return dl", "def build_dataloaders(dataset, batch_size, train_test_split=0.1, train_shuffle=True, eval_shuffle=True):\n # 데이터셋 길이\n dataset_len = len(dataset)\n\n # 학습, 평가 데이터 나누기\n eval_len = int(dataset_len * train_test_split)\n train_len = dataset_len - eval_len\n\n train_dataset, eval_dataset = random_split(dataset, (train_len, eval_len))\n\n train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=train_shuffle)\n eval_loader = DataLoader(eval_dataset, batch_size=batch_size, shuffle=eval_shuffle)\n\n\n logging.info(f'''train_dataloader size: {len(train_loader.dataset)} | shuffle: {train_shuffle}\n eval_dataloader size: {len(eval_loader.dataset)} | shuffle: {eval_shuffle}''')\n\n return train_loader, eval_loader", "def torch_dataset_loader(dataset, batch_size, shuffle, kwargs):\n loader = DataLoader(TorchData(dataset),\n batch_size=batch_size,\n shuffle=shuffle,\n **kwargs)\n return loader", "def load_data(dataset_class, batch_size, shuffle=True, num_workers=4):\n loader = torch.utils.data.TensorDataset(dataset_class.data.float(),\n dataset_class.labels.long(),\n dataset_class.adjacent_matrix.float())\n\n loader_dataset = torch.utils.data.DataLoader(loader,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers)\n return loader_dataset", "def train_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def get_dataloader(dataset, batchsize):\n\n train_set_size = int(0.8 * dataset.__len__())\n train_indices = np.random.choice(np.arange(dataset.__len__()),\n train_set_size, replace=False)\n train_sampler = SubsetRandomSampler(train_indices)\n\n val_indices = np.setdiff1d(np.arange(dataset.__len__()),\n train_indices, assume_unique=True)\n val_sampler = SubsetRandomSampler(val_indices)\n\n trainloader = DataLoader(dataset, batch_size=batchsize,\n sampler=train_sampler, num_workers=2)\n valloader = DataLoader(dataset, batch_size=batchsize,\n sampler=val_sampler, num_workers=2)\n\n return trainloader, valloader", "def get_loader(csv_file, img_dir, image_size, batch_size, mode='val', dataset='vg'):\n\n if dataset == 'VG':\n \n transform = transforms.Compose([\n transforms.Resize(image_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n dataset = GeneratedVGDataset(csv_file, img_dir, transform)\n else:\n \traise Exception(\"currently only VG generated images dataset is provided \")\n\n shuffle = True if mode == 'train' else False\n \n data_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=4)\n\n return data_loader", "def get_loader(config):\n train_transform = [T.Resize((256, 128)), T.RandomHorizontalFlip(), T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]\n train_transform = T.Compose(train_transform)\n\n test_transform = [T.Resize((256, 128)), T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]\n test_transform = T.Compose(test_transform)\n\n # Datasets.\n if config.source_dataset in ['duke'] and config.target_dataset in ['market']:\n source_image_dir = config.duke_image_dir\n target_image_dir = config.market_image_dir\n elif config.source_dataset in ['market'] and config.target_dataset in ['duke']:\n source_image_dir = config.market_image_dir\n target_image_dir = config.duke_image_dir\n else:\n assert 'Dataset not support!'\n source_set = ReidDataset(source_image_dir, train_transform)\n target_set = ReidDataset(target_image_dir, train_transform, config.expanding_cam)\n test_set = ReidDataset(source_image_dir, test_transform)\n\n # Dataloader.\n source_loader = data.DataLoader(dataset=source_set, batch_size=config.batch_size,\n num_workers=config.num_workers, shuffle=True, pin_memory=True, drop_last=True)\n\n target_loader = data.DataLoader(dataset=target_set, batch_size=config.batch_size,\n num_workers=config.num_workers, shuffle=True, pin_memory=True, drop_last=True)\n\n test_loader = data.DataLoader(dataset=test_set, batch_size=config.batch_size, num_workers=config.num_workers,\n shuffle=False, pin_memory=True, drop_last=False)\n\n return {'source_loader': source_loader, 'target_loader': target_loader, 'test_loader': test_loader}", "def init_loaders(self, *args, **kwargs):\n\n # Convert the data to Dataset\n dataset_dict = self.init_datasets(*args, **kwargs)\n\n # If the Dataset implements collate_fn, that is used. Otherwise, default_collate is used\n if hasattr(dataset_dict[\"train\"], \"collate_fn\") and callable(\n getattr(dataset_dict[\"train\"], \"collate_fn\")\n ):\n collate_fn = dataset_dict[\"train\"].collate_fn\n else:\n collate_fn = default_collate\n\n # If 'iters_per_epoch' is defined, then a fixed number of random sample batches from the training set\n # are drawn per epoch.\n # Otherwise, an epoch is defined by a full run through all of the data in the dataloader.\n #\n if self.config_dict.get(\"iters_per_epoch\") is not None:\n num_samples = (\n self.config_dict[\"iters_per_epoch\"] * self.config_dict[\"batch_size\"]\n )\n loaders_dict = {}\n for key in dataset_dict.keys():\n if key == \"train\":\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_sampler=BatchSampler(\n RandomSampler(\n dataset_dict[key],\n replacement=True,\n num_samples=num_samples,\n ),\n batch_size=self.config_dict[\"batch_size\"],\n drop_last=False,\n ),\n collate_fn=collate_fn,\n )\n else:\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n else:\n loaders_dict = {\n key: DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n for key in data_dict.keys()\n }\n\n return loaders_dict", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def get_data_loader(target_classes, batch_size):\n classes = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n ########################################################################\n # The output of torchvision datasets are PILImage images of range [0, 1].\n # We transform them to Tensors of normalized range [-1, 1].\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n trainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\n # Get the list of indices to sample from\n relevant_train_indices = get_relevant_indices(\n trainset,\n classes,\n target_classes)\n # Split into train and validation\n np.random.seed(1000) # Fixed numpy random seed for reproducible shuffling\n np.random.shuffle(relevant_train_indices)\n split = int(len(relevant_train_indices) * 0.8)\n relevant_train_indices, relevant_val_indices = relevant_train_indices[:split], relevant_train_indices[split:]\n train_sampler = SubsetRandomSampler(relevant_train_indices)\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n num_workers=0, sampler=train_sampler)\n val_sampler = SubsetRandomSampler(relevant_val_indices)\n val_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n num_workers=0, sampler=val_sampler)\n testset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=transform)\n relevant_test_indices = get_relevant_indices(testset, classes, target_classes)\n test_sampler = SubsetRandomSampler(relevant_test_indices)\n test_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size,\n num_workers=0, sampler=test_sampler)\n return train_loader, val_loader, test_loader, classes", "def load_data(is_train, num_par=4):\n if is_train:\n src = FLAGS.train_data_path\n else:\n src = FLAGS.dev_data_path\n\n if src is None:\n raise ValueError(\"Missing data path\")\n\n if FLAGS.dataset == \"boolq\":\n return load_boolq_file(src, num_par)\n else:\n return load_nli_file(src, num_par)", "def get_dataloader(data_folder, model_name, data_name, size=\"default\"):\n training_set = None\n validation_set = None\n\n if model_name == \"Howe_Patterson\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder)\n validation_set = Dataset_full(partition['validation'], data_folder)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"Deep_Sleep\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n # TODO combined dataset https://discuss.pytorch.org/t/train-simultaneously-on-two-datasets/649/17\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"ConvNet_IID\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition_IID_windows.pkl')))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition_IID_windows.pkl'))\n if data_name == \"SHHS\":\n training_set = Dataset_IID_window_SHHS(partition['train'], data_folder)\n validation_set = Dataset_IID_window_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_IID_window(partition['train'], data_folder)\n validation_set = Dataset_IID_window(partition['validation'], data_folder)\n elif data_name == \"philips\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_IID_window(partition[0]['train'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['train'], data_folder[1]))\n validation_set = ConcatDataset(\n Dataset_IID_window(partition[0]['validation'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['validation'], data_folder[1]))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n\n else:\n print(\"{} wrong model for dataloader\".format(model_name))\n exit()\n\n return training_set, validation_set", "def make_loader(dataset, train_batch_size, validation_split=0.2):\n # number of samples in train and test set\n train_len = int(len(dataset) * (1 - validation_split))\n test_len = len(dataset) - train_len\n train_set, test_set = torch.utils.data.random_split(dataset, [train_len, test_len])\n # create train_loader\n train_loader = torch.utils.data.DataLoader(\n train_set, batch_size=train_batch_size, shuffle=True,\n )\n # create test_loader\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False,)\n return train_loader, test_loader", "def get_data_loaders(img_dir, img_height, img_width, batch_size=8):\n total_count = sum([len(files) for r, d, files in os.walk(img_dir)])\n\n data_transform = torchvision.transforms.Compose(\n [\n transforms.Resize((img_height, img_width)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ]\n )\n \n # build a dataset of images from the img_dir directory\n im_folder = torchvision.datasets.ImageFolder(img_dir, transform=data_transform)\n model_dataset = td.datasets.WrapDataset(im_folder)\n\n dataset_loader = torch.utils.data.DataLoader(model_dataset, batch_size=batch_size)\n\n return dataset_loader, total_count", "def fit(self, data_loader):\n train_data, valid_data = data_loader.load()\n\n self.compile(self.optimizer, self.loss)\n super().fit(\n x=train_data,\n validation_data=valid_data,\n validation_steps=32, # validate 32 batches at a time\n validation_freq=1, # validate every 1 epoch\n epochs=self.hparams.num_epochs,\n shuffle=False, # dataset instances already handle shuffling\n )\n self.save()", "def __init__(self, data_path):\r\n\t\tfile_names = ['data_batch_%d' % i for i in range(1,6)]\r\n\t\tfile_names.append('test_batch')\r\n\r\n\t\tX = []\r\n\t\ty = []\r\n\t\tfor file_name in file_names:\r\n\t\t\twith open(data_path + file_name) as fin:\r\n\t\t\t\tdata_dict = cPickle.load(fin)\r\n\t\t\tX.append(data_dict['data'].ravel())\r\n\t\t\ty = y + data_dict['labels']\r\n\r\n\t\tself.X = np.asarray(X).reshape(60000, 32*32*3)\r\n\t\tself.y = np.asarray(y)\r\n\r\n\t\tfin = open(data_path + 'batches.meta')\r\n\t\tself.LABEL_NAMES = cPickle.load(fin)['label_names']\r\n\t\tfin.close()", "def load_preprocess_training_batch(batch_id, batch_size):\n path, dataset = select_dataset(training = True)\n data = dataset_lib.get_data(batch_id, dataset=dataset, path=path)\n features = [np.array(x[1]) for x in data]\n labels = np.array([x[0] for x in data])\n\n # Return the training data in batches of size <batch_size> or less\n return batch_features_labels(features, labels, batch_size)", "def get_data_loader(image_type, image_dir, \n image_size=128, batch_size=16, num_workers=0):\n \n # resize and normalize the images\n transform = transforms.Compose([transforms.Resize(image_size), # resize to 128x128\n transforms.ToTensor()])\n\n # get training and test directories\n image_path = './' + image_dir\n train_path = os.path.join(image_path, image_type)\n test_path = os.path.join(image_path, 'test_{}'.format(image_type))\n\n # define datasets using ImageFolder\n train_dataset = datasets.ImageFolder(train_path, transform)\n test_dataset = datasets.ImageFolder(test_path, transform)\n\n # create and return DataLoaders\n train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)\n test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)\n\n return train_loader, test_loader", "def create_loader(self):\n # load data to memory.\n if self.is_cifar100:\n (x_train, y_train), (x_test,\n y_test) = tf.keras.datasets.cifar100.load_data()\n else:\n (x_train, y_train), (x_test,\n y_test) = tf.keras.datasets.cifar10.load_data()\n\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n\n x_train, y_train = shuffle_dataset(x_train, y_train)\n n_probe = int(math.floor(x_train.shape[0] * FLAGS.probe_dataset_hold_ratio))\n\n # TODO(zizhaoz): add other noise types.\n if 'asymmetric' in self.dataset_name:\n assert 'cifar100' not in self.dataset_name, 'Asymmetric only has CIFAR10'\n (x_train, y_train, y_gold), (x_probe, y_probe) = load_asymmetric(\n x_train,\n y_train,\n noise_ratio=self.noise_ratio,\n n_val=n_probe,\n random_seed=FLAGS.seed)\n elif 'uniform' in self.dataset_name:\n (x_train, y_train, y_gold), (x_probe,\n y_probe) = load_train_val_uniform_noise(\n x_train,\n y_train,\n n_classes=self.num_classes,\n noise_ratio=self.noise_ratio,\n n_val=n_probe)\n else:\n assert self.dataset_name in ['cifar10', 'cifar100']\n\n if not self.split_probe and x_probe is not None:\n # Usually used for supervised comparison.\n tf.logging.info('Merge train and probe')\n x_train = np.concatenate([x_train, x_probe], axis=0)\n y_train = np.concatenate([y_train, y_probe], axis=0)\n y_gold = np.concatenate([y_gold, y_probe], axis=0)\n\n conf_mat = sklearn_metrics.confusion_matrix(y_gold, y_train)\n conf_mat = conf_mat / np.sum(conf_mat, axis=1, keepdims=True)\n tf.logging.info('Corrupted confusion matirx\\n {}'.format(conf_mat))\n x_test, y_test = shuffle_dataset(x_test, y_test)\n self.train_dataset_size = x_train.shape[0]\n self.val_dataset_size = x_test.shape[0]\n if self.split_probe:\n self.probe_size = x_probe.shape[0]\n\n input_tuple = (x_train, y_train.squeeze())\n self.train_dataflow = self.create_ds(input_tuple, is_train=True)\n self.val_dataflow = self.create_ds((x_test, y_test.squeeze()),\n is_train=False)\n if self.split_probe:\n self.probe_dataflow = self.create_ds((x_probe, y_probe.squeeze()),\n is_train=True)\n\n tf.logging.info('Init [{}] dataset loader'.format(self.dataset_name))\n verbose_data('train', x_train, y_train)\n verbose_data('test', x_test, y_test)\n if self.split_probe:\n verbose_data('probe', x_probe, y_probe)\n\n return self", "def train_dataloader(self):\r\n\r\n # transformation\r\n train_transform = Compose(\r\n [\r\n ApplyTransformToKey(\r\n key='video',\r\n transform=Compose(\r\n [\r\n UniformTemporalSubsample(8),\r\n Lambda(lambda x: x / 255.0),\r\n Normalize((0.45, 0.45, 0.45), (0.225, 0.225, 0.225)),\r\n RandomShortSideScale(min_size=256, max_size=320),\r\n RandomCrop(244),\r\n RandomHorizontalFlip(p=0.5),\r\n ]\r\n )\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = pv.data.Kinetics(\r\n data_path=os.path.join(self._DATA_PATH, \"train\"),\r\n clip_sampler=pv.data.make_clip_sampler(\"random\", self._CLIP_DURATION),\r\n decode_audio=False,\r\n transform=train_transform\r\n )\r\n return torch.utils.data.DataLoader(\r\n train_dataset,\r\n batch_size=self._BATCH_SIZE,\r\n num_workers=self._NUM_WORKERS,\r\n )", "def data_batch(self, batch_size, input_size, seed=None):\n listing = self.listing\n if seed:\n listing, _ = train_test_split(self.listing, random_state=seed, test_size=0.25)\n image_list = [item + '_orig.jpg' for item in listing]\n label_list = [item + '_contour.png' for item in listing]\n image_files, label_files = tf.convert_to_tensor(image_list), tf.convert_to_tensor(label_list)\n queue = tf.train.slice_input_producer([image_files, label_files],\n shuffle=True)\n img_contents = tf.read_file(queue[0])\n label_contents = tf.read_file(queue[1])\n image = tf.image.decode_jpeg(img_contents, channels=3)\n label = tf.image.decode_png(label_contents, channels=1)\n image, label = default_image_prep(image, label, input_size)\n return tf.train.batch([image, label],\n batch_size=batch_size)", "def load(self, handler, name, size, \n batch_size=None, shuffle=False, \n sample_transform=None, batch_transform=None):\n if sample_transform is None:\n sample_transform = self.sample_transform\n if batch_transform is None:\n batch_transform = self.batch_transform\n dataset = DatasetIterator(name, size, handler, \n shuffle=shuffle,\n transform=sample_transform)\n if batch_size is None:\n return dataset\n batches = BatchIterator(dataset, \n batch_size=batch_size, \n transform=batch_transform)\n return batches", "def load_data_in_folder(self):\n if self.data_filenames:\n print('removing existing data files')\n for f in tqdm(self.data_filenames):\n os.remove(f)\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in tqdm(range(0, idx_max-1)):\n data = []\n for f in self.filenames[idx:idx+self.batch_size]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))", "def make_standard_loader(self, dataset):\n return torch.utils.data.DataLoader(\n dataset,\n batch_size=self.batch_size,\n shuffle=False,\n drop_last=False,\n pin_memory=not (cfg.DEBUG > 0),\n num_workers=self.num_workers,\n )", "def train(self, data_loader):\n step = 0\n train_data, valid_data = data_loader()\n\n # Allow to call `next` builtin indefinitely.\n valid_data = iter(valid_data.repeat())\n\n for epoch in range(self.hparams.num_epochs):\n for x, y in train_data:\n\n with tf.GradientTape() as g:\n train_loss = self.loss(y, self(x))\n\n grads = g.gradient(train_loss, self.trainable_variables)\n self.optimizer.apply_gradients(zip(grads, self.trainable_variables))\n\n # Validate every 1000 training steps.\n if step % 1000 == 0:\n x, y = next(valid_data)\n valid_loss = self.loss(y, self(x))\n print(\n f\"step {step} (train_loss={train_loss} valid_loss={valid_loss})\"\n )\n step += 1\n\n print(f\"epoch {epoch} finished\")\n self.save()", "def get_driving_data_loaders(batch_size, train_dataset, valid_dataset, test_dataset, num_workers=0): \n\n valid_loader = DataLoader(dataset=valid_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=True)\n\n train_loader = DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n drop_last=True, \n shuffle=True)\n\n test_loader = DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=False)\n\n return train_loader, valid_loader, test_loader", "def get_loader(root_folder, batch_size=16, shuffle=False, num_workers=0, pin_memory=False):\n cal101_dset = get_dataset(root_folder) \n\n # train test split \n split_ratio = 0.2 \n dataset_size = len(cal101_dset)\n indices = np.arange(dataset_size)\n np.random.shuffle(indices)\n split = int(np.floor(split_ratio * dataset_size))\n train_indices, val_indices = indices[split:], indices[:split]\n\n train_sampler = data.SubsetRandomSampler(train_indices)\n valid_sampler = data.SubsetRandomSampler(val_indices) \n\n train_loader = data.DataLoader( cal101_dset, batch_size=batch_size, \n shuffle=shuffle,num_workers=num_workers, sampler=train_sampler, pin_memory=pin_memory)\n validation_loader = data.DataLoader(cal101_dset, batch_size=batch_size,\n shuffle=shuffle,num_workers=num_workers, sampler=valid_sampler, pin_memory=pin_memory)\n\n return train_loader, validation_loader", "def load_array(data_arrays, batch_size, is_train=True): #@save\n dataset = tf.data.Dataset.from_tensor_slices(data_arrays)\n if is_train:\n dataset = dataset.shuffle(buffer_size=1000)\n dataset = dataset.batch(batch_size)\n return dataset", "def load_array(data_arrays, batch_size, is_train=True): #@save\n dataset = tf.data.Dataset.from_tensor_slices(data_arrays)\n if is_train:\n dataset = dataset.shuffle(buffer_size=1000)\n dataset = dataset.batch(batch_size)\n return dataset", "def load_array(data_arrays, batch_size, is_train=True):\n dataset = data.TensorDataset(*data_arrays)\n return data.DataLoader(dataset, batch_size, shuffle=is_train)", "def data_loaders(dataset_path):\n dataset_path = dataset_path\n news_stock_dataset = NewsStockDataLoader(dataset_path)\n \n dataset_size = len(news_stock_dataset)\n indices = list(range(dataset_size))\n training_split = int(0.8 * dataset_size)\n validation_split = int(0.9 * dataset_size)\n\n np.random.seed(96)\n np.random.shuffle(indices)\n\n train_indices = indices[:training_split]\n valid_indices = indices[training_split:validation_split]\n test_indices = indices[validation_split:]\n\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(valid_indices)\n test_sampler = SubsetRandomSampler(test_indices)\n \n collate = PadSequence()\n\n training_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"training_batch_size\"),\n sampler = train_sampler,\n collate_fn = collate)\n\n validation_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"validation_batch_size\"),\n sampler = valid_sampler,\n collate_fn = collate)\n\n testing_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"testing_batch_size\"),\n sampler= test_sampler,\n collate_fn = collate)\n \n return training_loader, validation_loader, testing_loader", "def get_loader(sentences, conversation_length, sentence_length, vocab, batch_size=100, data=None, shuffle=True):\n\n def collate_fn(data):\n \"\"\"\n Collate list of data in to batch\n\n Args:\n data: list of tuple(source, target, conversation_length, source_length, target_length)\n Return:\n Batch of each feature\n - source (LongTensor): [batch_size, max_conversation_length, max_source_length]\n - target (LongTensor): [batch_size, max_conversation_length, max_source_length]\n - conversation_length (np.array): [batch_size]\n - source_length (LongTensor): [batch_size, max_conversation_length]\n \"\"\"\n # Sort by conversation length (descending order) to use 'pack_padded_sequence'\n data.sort(key=lambda x: x[1], reverse=True)\n\n # Separate\n sentences, conversation_length, sentence_length = zip(*data)\n\n # return sentences, conversation_length, sentence_length.tolist()\n return sentences, conversation_length, sentence_length\n\n dataset = DialogDataset(sentences, conversation_length,\n sentence_length, vocab, data=data)\n\n data_loader = DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n collate_fn=collate_fn)\n\n return data_loader", "def data_loader(origin_data, batch_size, num_epochs=1):\n data = {}\n for key, value in origin_data.items():\n data[key] = np.copy(value)\n\n data_size = len(data['text_len'])\n num_batches_per_epoch = int((data_size-1)/batch_size) + 1\n\n for epoch in range(num_epochs):\n # shuffle the dataset at the begging of each epoch\n shuffle_indices = np.random.permutation(np.arange(data_size))\n for key, value in data.items():\n data[key] = value[shuffle_indices]\n\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n\n max_text_len = max(data['text_len'][start_index:end_index])\n\n yield (data['text'][start_index:end_index, :max_text_len],\n data['text_len'][start_index:end_index],\n data['label'][start_index:end_index],\n data['raw'][start_index:end_index])", "def create_loader(dataset: Dataset, cfg: trainer_configs.BaseDatasetConfig, batch_size: int, *,\r\n collate_fn: Optional[Callable[[List[Any]], Any]] = None) -> DataLoader:\r\n # return DataLoader(\r\n # dataset, batch_size=batch_size, num_workers=cfg.num_workers,\r\n # drop_last=cfg.drop_last, collate_fn=collate_fn) # type: ignore\r\n return DataLoader(\r\n dataset, batch_size=batch_size, shuffle=cfg.shuffle, num_workers=cfg.num_workers,\r\n drop_last=cfg.drop_last, collate_fn=collate_fn) # type: ignore\r", "def __init__(self, data_path, batch_size, **kwargs):\n super().__init__(batch_size, **kwargs)\n\n _, num_classes, X_train, y_train, X_val, y_val = load_cifar10_shard(shard_num=data_path, **kwargs)\n\n self.training_data_size = len(X_train)\n self.validation_data_size = len(X_val)\n self.num_classes = num_classes\n self.train_loader = self.create_loader(X=X_train, y=y_train, shuffle=True)\n self.val_loader = self.create_loader(X=X_val, y=y_val, shuffle=False)", "def get_placement_loader(\n foldername,\n dtype=\"train\",\n batch_size=1,\n sample_ratio=1.0,\n shuffle=True,\n stateless=True,\n augment=False,\n background_subtract=None,\n num_channels=2,\n radius=2,\n num_workers=4,\n use_cuda=True,\n):\n\n def _collate_fn(batch):\n \"\"\"A custom collate function.\n\n This is to support variable length suction labels.\n \"\"\"\n # imgs = [b[0] for b in batch]\n # labels = [b[1] for b in batch]\n # imgs = torch.stack(imgs, dim=0)\n # return [imgs, labels]\n imgs = [b[0] for b in batch]\n labels = [b[1] for b in batch]\n imgs = torch.cat(imgs, dim=0)\n labels = [l for sublist in labels for l in sublist]\n return [imgs, labels]\n\n num_workers = min(num_workers, multiprocessing.cpu_count())\n root = os.path.join(config.ml_data_dir, foldername, dtype)\n\n dataset = PlacementDataset(\n root,\n sample_ratio,\n stateless,\n augment,\n background_subtract,\n num_channels,\n radius,\n )\n\n loader = DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n collate_fn=_collate_fn,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n return loader", "def provide_data(self, batch_size):\n with tf.name_scope('inputs'):\n with tf.device('/cpu:0'):\n dataset = self.dataset.provide_dataset()\n dataset = dataset.shuffle(buffer_size=1000)\n dataset = dataset.map(self._map_fn, num_parallel_calls=4)\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(1)\n\n iterator = tf.data.make_initializable_iterator(dataset)\n tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,\n iterator.initializer)\n\n data, one_hot_labels = iterator.get_next()\n data.set_shape([batch_size, None, None, None])\n one_hot_labels.set_shape([batch_size, None])\n return data, one_hot_labels", "def get_train_data(batch_size=8):\n transform_train = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomFlipLeftRight(),\n transforms.RandomColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),\n transforms.RandomLighting(0.1),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n ])\n\n img_folder, img_file = get_data_path()\n td = MultilabelDataset(data_folder=img_folder, data_file=img_file)\n train_data = DataLoader(td.transform_first(transform_train), batch_size=batch_size, shuffle=True)\n return train_data", "def _training(self, data_loader: torch.utils.data.DataLoader,\n data_size: int):\n\n self.model.train()\n total_loss = torch.Tensor([0])\n with tqdm(total=data_size//self.batch_size) as pbar:\n for _, ((row, col), val) in enumerate(data_loader):\n self.optimizer.zero_grad()\n\n row = row.long()\n if isinstance(col, list):\n col = tuple(c.long() for c in col)\n else:\n col = col.long()\n\n preds = self.model(row, col)\n loss = self.loss_function(preds)\n loss.backward()\n\n self.optimizer.step()\n\n total_loss += loss.item()\n batch_loss = loss.item() / row.size()[0]\n\n pbar.update(1)\n\n total_loss /= data_size\n return total_loss" ]
[ "0.7391102", "0.7355233", "0.7307113", "0.7191186", "0.7188778", "0.71511465", "0.7052301", "0.69866663", "0.6968007", "0.69573814", "0.6949311", "0.69279605", "0.6880958", "0.68485147", "0.6814167", "0.68017983", "0.6782323", "0.6779926", "0.67751354", "0.67704856", "0.6745013", "0.6740169", "0.6737448", "0.6736633", "0.6726307", "0.6723672", "0.6716955", "0.6709136", "0.66960186", "0.6678685", "0.6673559", "0.66705656", "0.664284", "0.6636292", "0.6608212", "0.66001356", "0.6593245", "0.65886694", "0.6574975", "0.65695554", "0.65671265", "0.6559682", "0.65487146", "0.65396124", "0.65396124", "0.65396124", "0.65396124", "0.65396124", "0.65396124", "0.65396124", "0.65396124", "0.65396124", "0.65366423", "0.6527227", "0.65202916", "0.6517068", "0.65111464", "0.65063065", "0.6497467", "0.64958894", "0.64943606", "0.6490334", "0.648695", "0.6485285", "0.6483524", "0.6483464", "0.6477901", "0.64706796", "0.64674634", "0.6465372", "0.6462955", "0.64504766", "0.6443212", "0.6441604", "0.6436795", "0.64363444", "0.64286256", "0.6415378", "0.6413892", "0.64127415", "0.640681", "0.64050835", "0.6398545", "0.63932335", "0.63883215", "0.63816106", "0.63814247", "0.6360945", "0.6357817", "0.6357817", "0.63565934", "0.6356431", "0.6352173", "0.63497007", "0.6342616", "0.6341671", "0.63410383", "0.6335064", "0.6325008", "0.6319916" ]
0.6592258
37
Build a looped dataloader with infinite size.
def _build_infinite_size_dataloader(dataloader): iterator = dataloader.__iter__() while True: try: yield iterator.__next__() except StopIteration: iterator = dataloader.__iter__()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, len):\n self.data = []\n i = 0\n while i < len:\n i += 1\n self.data.append(self.Data())\n self.length = len", "def __iter__(self):\n\t\tfor i, data in enumerate(self.dataloader):\n\t\t\tif i * self.opt.batch_size >= self.opt.max_dataset_size:\n\t\t\t\tbreak\n\t\t\tyield data", "def make_data(self, limit: int):", "def one_shot_iterator(dataloader):\n while True:\n for data in dataloader:\n yield data", "def __iter__(self):\n for i, data in enumerate(self.dataloader):\n if i * self.opt.batch_size >= self.opt.max_dataset_size:\n break\n yield data", "def __init__(self, size: int = 100):\n self.data = [None] * size\n self.size = size", "def dynamic(seq: List[int]):\n return Data._create_dataset(seq, pad=False)", "def create_dataloader(data):\r\n input_ids = torch.LongTensor([sent['input_ids'] for sent in data])\r\n input_mask = torch.LongTensor([sent['input_mask'] for sent in data])\r\n segment_ids = torch.LongTensor([sent['segment_ids'] for sent in data])\r\n label_ids = torch.LongTensor([sent['label_ids'] for sent in data])\r\n\r\n dataset = TensorDataset(input_ids, input_mask, segment_ids, label_ids)\r\n\r\n train_sampler = RandomSampler(dataset)\r\n dataloader = DataLoader(dataset, sampler=train_sampler, batch_size=BATCH_SIZE)\r\n\r\n return dataloader", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def data_generator(delta=1, batch_size=32):\n while True:\n yield generate_samples(delta=delta, n=batch_size)", "def training_start(self, dataloader):\n self.datasize = len(dataloader)", "def data_loader(\n self, batch_size: int = 1, iter_steps: int = 0, batch_as_list: bool = True\n ) -> DataLoader:\n data = self.data\n datasets = []\n\n for _, dat in data.items():\n datasets.append(dat.dataset())\n\n if len(datasets) < 1:\n raise FileNotFoundError(\n \"no datasets available for this model to create a loader from\"\n )\n\n return DataLoader(\n *datasets,\n batch_size=batch_size,\n iter_steps=iter_steps,\n batch_as_list=batch_as_list,\n )", "def open_dataset(dataset_path, batch_size, img_shape, infinite=True):\n dataset = generate_paths()\n\n dataset_gen = dataset_generator(\n dataset,\n batch_size=batch_size, infinite=infinite,\n img_shape=img_shape\n )\n steps = len(dataset) // batch_size\n return dataset_gen, steps", "def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=True,\n num_workers=multiprocessing.cpu_count(),\n )", "def build_dataloader(cfg, augmentor=None, mode='train', dataset=None, rank=None,\n dataset_class=VolumeDataset, dataset_options={}, cf=collate_fn_train):\n assert mode in ['train', 'val', 'test']\n print('Mode: ', mode)\n\n if mode == 'train':\n batch_size = cfg.SOLVER.SAMPLES_PER_BATCH\n elif mode == 'val':\n batch_size = cfg.SOLVER.SAMPLES_PER_BATCH * 4\n else:\n cf = collate_fn_test # update the collate function\n batch_size = cfg.INFERENCE.SAMPLES_PER_BATCH * cfg.SYSTEM.NUM_GPUS\n\n if dataset is None: # no pre-defined dataset instance\n if cfg.MODEL.TARGET_OPT_MULTISEG_SPLIT is not None:\n dataset_class = VolumeDatasetMultiSeg\n dataset = get_dataset(cfg, augmentor, mode, rank, dataset_class, dataset_options)\n\n sampler = None\n num_workers = cfg.SYSTEM.NUM_CPUS\n if cfg.SYSTEM.DISTRIBUTED:\n num_workers = cfg.SYSTEM.NUM_CPUS // cfg.SYSTEM.NUM_GPUS\n if cfg.DATASET.DISTRIBUTED == False:\n sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n\n # In PyTorch, each worker will create a copy of the Dataset, so if the data\n # is preload the data, the memory usage should increase a lot.\n # https://discuss.pytorch.org/t/define-iterator-on-dataloader-is-very-slow/52238/2\n img_loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, shuffle=False, collate_fn=cf,\n sampler=sampler, num_workers=num_workers, pin_memory=True)\n\n return img_loader", "def RandomDataloader(num_batches,\n batch_size,\n seq_width,\n min_len,\n max_len):\n for batch_num in range(num_batches):\n\n # All batches have the same sequence length\n seq_len = random.randint(min_len, max_len)\n seq = np.random.binomial(1, 0.5, (seq_len, batch_size, seq_width))\n seq = torch.from_numpy(seq)\n\n # The input includes an additional channel used for the delimiter\n inp = torch.zeros(seq_len + 1, batch_size, seq_width + 1)\n inp[:seq_len, :, :seq_width] = seq\n inp[seq_len, :, seq_width] = 1.0 # delimiter in our control channel\n outp = seq.clone()\n\n yield inp.float(), outp.float()", "def generate_dataset():\n num_list = 10\n return [generate_list() for _ in range(num_list)]", "def some_payloaded_data(length=1000000, size=32, var=0):\n for datum in some_simple_data(length):\n yield DataWithPayload(datum, some_payload(size, var))", "def _buffered_func(dataset, size):\n\n class _EndSignal(object):\n pass\n\n end = _EndSignal()\n\n def _read_worker(r, q):\n for d in r:\n q.put(d)\n q.put(end)\n\n def _data_reader():\n r = dataset()\n q = multiprocessing.Queue(maxsize=size)\n t = multiprocessing.Process(\n target=_read_worker, args=(\n r,\n q, ))\n t.daemon = True\n t.start()\n e = q.get()\n while e != end:\n yield e\n e = q.get()\n\n return _data_reader", "def load(self, handler, name, size, \n batch_size=None, shuffle=False, \n sample_transform=None, batch_transform=None):\n if sample_transform is None:\n sample_transform = self.sample_transform\n if batch_transform is None:\n batch_transform = self.batch_transform\n dataset = DatasetIterator(name, size, handler, \n shuffle=shuffle,\n transform=sample_transform)\n if batch_size is None:\n return dataset\n batches = BatchIterator(dataset, \n batch_size=batch_size, \n transform=batch_transform)\n return batches", "def _get_dataloader(samples, batch_size):\n print(\"Cogiendo dataloader\")\n return DataLoader(samples, shuffle=True, batch_size=batch_size)", "def build(self, block_size):", "def dynamic_batch(data, max_frames_in_batch=12000):\n buf = []\n longest_frames = 0\n for sample in data:\n assert \"feat\" in sample\n assert isinstance(sample[\"feat\"], torch.Tensor)\n new_sample_frames = sample[\"feat\"].size(0)\n longest_frames = max(longest_frames, new_sample_frames)\n frames_after_padding = longest_frames * (len(buf) + 1)\n if frames_after_padding > max_frames_in_batch:\n yield buf\n buf = [sample]\n longest_frames = new_sample_frames\n else:\n buf.append(sample)\n if len(buf) > 0:\n yield buf", "def __init__(self, size = 0):\n self.data = []\n self.size = size", "def train_dynamic(batch_size=10):\n \n return", "def static_batch(data, batch_size=16):\n buf = []\n for sample in data:\n buf.append(sample)\n if len(buf) >= batch_size:\n yield buf\n buf = []\n if len(buf) > 0:\n yield buf", "def batch(self, data, size):\n\n return [data[x : x + size] for x in range(0, len(data), size)]", "def init_batch(self, src):\n batch, bos = src.size(1), self.src_dict.get_bos()\n return src.data.new(1, batch).fill_(bos)", "def train_dataloader(self, batch_size: Optional[int] = None) -> DataLoader:\n dataset = TabularDataset(\n task=self.config.task,\n data=self.train,\n categorical_cols=self.config.categorical_cols,\n continuous_cols=self.config.continuous_cols,\n embed_categorical=(not self.do_leave_one_out_encoder()),\n target=self.target,\n )\n return DataLoader(\n dataset,\n batch_size if batch_size is not None else self.batch_size,\n shuffle=True if self.train_sampler is None else False,\n num_workers=self.config.num_workers,\n sampler=self.train_sampler,\n pin_memory=self.config.pin_memory,\n )", "def get_data_loader(batch_size=10, num_workers=2):\n \n data_loader = torch.utils.data.DataLoader(dataset=TempuckeyDataSet(),\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=num_workers,\n collate_fn=collate)\n return data_loader", "def repeater(data_loader):\n for loader in itertools.repeat(data_loader):\n for data in loader:\n yield data", "def data_gen(voc_size, batch, nbatches, seq_len = 15):\r\n for i in range(nbatches):\r\n # (batch_size, seq_len)\r\n data = torch.from_numpy(\r\n np.random.randint(1, voc_size, size=(batch, seq_len)))\r\n data[:, 0] = 1 # add start token\r\n src = Variable(data, requires_grad=False)\r\n tgt = Variable(data, requires_grad=False)\r\n yield Batch(src, tgt, 0) # Accessed by next function one by one\r", "def data_gen(\n v: int, batch: int, nbatches: int, device: torch.device = torch.device(\"cpu\")\n) -> Iterator[Batch]: # TODO bad name\n for i in range(nbatches):\n data = np.random.randint(1, v, size=(batch, 10))\n data[:, 0] = 1\n src: LongTensorType = torch.from_numpy(data)\n tgt: LongTensorType = torch.from_numpy(data)\n src, tgt = src.to(device), tgt.to(device)\n yield Batch(src, tgt, 0)", "def lazy_read_file(self):\n store = zarr.DirectoryStore(self.fpath)\n z_array = zarr.open(store=store, mode='r')\n self.da_input = da.from_array(z_array)\n self.data = self.da_input\n self.data_dim = self.data.shape\n self.chunk_size = z_array.chunks", "def __init__ (self, initial_length=4, infinite=False) :\r\n # Array<T> buff_;\r\n # int nextPut_; // Points where next put will occur\r\n # int nextGet_; // Points to where next get will occur\r\n # bool empty_; // nextPut_==nextGet is either empty or full\r\n # bool infinite_; // Puts into empty cause a doubling\r\n \r\n self.buff_ = [None for x in xrange(initial_length)]\r\n self.nextPut_ = 0\r\n self.nextGet_ = 0\r\n self.empty_ = True\r\n self.infinite_ = infinite", "def train_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_train, **self.dl_kwargs)", "def create_dataloader(datafile, dataset_type, batch_size, mechanism, shuffle=False):\n dataset = MazeDataset(datafile, dataset_type)\n assert dataset.num_actions == mechanism.num_actions\n return torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, shuffle=shuffle, num_workers=0)", "def chunk_generator(input_file, chunksize = 100000, dataset_name = \"\") :\n\n with h5py.File(input_file, 'r', libver = 'latest') as f :\n dataset = f[dataset_name]\n for x in range(0, dataset.size, chunksize) :\n yield dataset[x:x+chunksize]", "def get_dataloader(self, mode, label_mode, batch_size):\n cached_features_file = self._feature_file(mode, label_mode)\n logger.info('Loading features from cached file %s', cached_features_file)\n features = torch.load(cached_features_file)\n\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)\n all_emph_probs = torch.tensor([f.emph_probs for f in features], dtype=torch.float)\n\n dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_emph_probs)\n\n if mode == 'train':\n sampler = RandomSampler(dataset)\n else:\n sampler = SequentialSampler(dataset)\n\n dataloader = DataLoader(dataset=dataset, batch_size=batch_size, sampler=sampler)\n return dataloader", "def load_data(data_feeder):\n return data_feeder(BATCH_SIZE,\n SEQ_LEN,\n OVERLAP,\n Q_LEVELS,\n Q_ZERO,\n Q_TYPE)", "def __init__(self, buffer_size=3000):\n self.buffer = []\n self.buffer_size = buffer_size", "def __init__(self, buffer_size=3000):\n self.buffer = []\n self.buffer_size = buffer_size", "def train_dataloader(self) -> data.DataLoader:\n # Random weighted sampler to approach the imbalanced dataset\n self.weights = [1.0 / i for i in self.weights]\n\n _sample_weights = [0] * len(self.datasets['train'])\n\n for idx, (_, label) in enumerate(self.datasets['train']):\n _weight = self.weights[label]\n _sample_weights[idx] = _weight\n\n random_sampler = data.WeightedRandomSampler(_sample_weights,\n len(self.datasets['train']), replacement=False)\n\n return data.DataLoader(dataset=self.datasets['train'], batch_size=self.batch_size,\n num_workers=self.num_workers, pin_memory=False,\n sampler=random_sampler)", "def __init__(self, maxSize=3):\r\n try:\r\n if maxSize % 2 == 1 and maxSize >= 3:\r\n self._maxSize = maxSize\r\n else:\r\n raise ValueError(\"maxSize must be an odd integer >= 3\")\r\n except ValueError:\r\n raise\r\n self._data = np.ndarray(0)", "def build_dataloader(bs, shfle):\n # change get_labels to correct version (classification vs regression)\n dataset = TensorDataset(rand_data(), get_labels())\n dataset = TensorDataset(rand_data(), get_regression_labels())\n\n return DataLoader(dataset, batch_size=bs, shuffle=shfle, num_workers=0)", "def get_one_epoch_dataloader(dataset, micro_batch_size=None):\n args = get_args()\n\n if micro_batch_size is None:\n micro_batch_size = args.micro_batch_size\n num_workers = args.num_workers\n\n # Use megatron's sampler with consumed samples set to 0 as\n # this is only for evaluation and don't intend to resume half way.\n # Also, set the drop last to false as don't intend to remove\n # the last batch\n batch_sampler = MegatronPretrainingSampler(\n total_samples=len(dataset),\n consumed_samples=0,\n micro_batch_size=args.micro_batch_size,\n data_parallel_rank=mpu.get_data_parallel_rank(),\n data_parallel_size=mpu.get_data_parallel_world_size(),\n drop_last=False)\n\n return torch.utils.data.DataLoader(dataset,\n batch_sampler=batch_sampler,\n num_workers=num_workers,\n pin_memory=True)", "def get_dataloader(self):\n shuffle = True if self.mode == \"train\" else False\n return DataLoader(self.get_dataset(), batch_size=self.batch_size, shuffle = shuffle, \n collate_fn=create_mini_batch)", "def buffered(reader, size):\n\n class EndSignal():\n pass\n\n end = EndSignal()\n\n def read_worker(r, q):\n for d in r:\n q.put(d)\n q.put(end)\n\n def data_reader():\n r = reader()\n q = Queue(maxsize=size)\n t = Thread(\n target=read_worker, args=(\n r,\n q, ))\n t.daemon = True\n t.start()\n e = q.get()\n while e != end:\n yield e\n e = q.get()\n\n return data_reader", "def build_finetuning_dataloader(cfg: DictConfig,\n tokenizer: PreTrainedTokenizerBase,\n device_batch_size: int) -> DataLoader:\n _validate_config(cfg.dataset)\n\n # Use EOS as the pad token if none exists\n if tokenizer.pad_token is None: # type: ignore\n tokenizer.pad_token = tokenizer.eos_token\n\n dataset = None # for pyright\n if cfg.dataset.get('remote') is not None:\n dataset = dataset_constructor.build_from_streaming(\n tokenizer=tokenizer,\n local=cfg.dataset.local,\n remote=cfg.dataset.get('remote', None),\n split=cfg.dataset.get('split'),\n shuffle=cfg.dataset.get('shuffle', False),\n predownload=cfg.dataset.get('predownload', 100_000),\n keep_zip=cfg.dataset.get('keep_zip', False),\n download_retry=cfg.dataset.get('download_retry', 2),\n download_timeout=cfg.dataset.get('download_timeout', 60),\n validate_hash=cfg.dataset.get('validate_hash', None),\n shuffle_seed=cfg.dataset.get('shuffle_seed', 9176),\n num_canonical_nodes=cfg.dataset.get('num_canonical_nodes', 128),\n batch_size=device_batch_size,\n )\n\n collate_fn, dataloader_batch_size = _build_collate_fn(\n cfg.dataset, tokenizer, device_batch_size)\n\n return DataLoader(\n dataset,\n collate_fn=collate_fn,\n batch_size=dataloader_batch_size,\n drop_last=cfg.drop_last,\n num_workers=cfg.num_workers,\n pin_memory=cfg.get('pin_memory', True),\n prefetch_factor=cfg.get('prefetch_factor', 2),\n persistent_workers=cfg.get('persistent_workers', True),\n timeout=cfg.get('timeout', 0),\n )\n\n else:\n backend, _, _ = parse_uri(cfg.dataset.hf_name)\n if backend not in ['', None]:\n if cfg.dataset.get('split') is None:\n raise ValueError(\n 'When using a HuggingFace dataset from a URL, you must set the ' + \\\n '`split` key in the dataset config.'\n )\n dataset = _build_hf_dataset_from_remote(cfg, tokenizer)\n else:\n dataset = dataset_constructor.build_from_hf(\n cfg.dataset,\n max_seq_len=cfg.dataset.max_seq_len,\n tokenizer=tokenizer,\n )\n\n collate_fn, dataloader_batch_size = _build_collate_fn(\n cfg.dataset, tokenizer, device_batch_size)\n\n if cfg.drop_last:\n world_size = dist.get_world_size()\n minimum_dataset_size = world_size * dataloader_batch_size\n if hasattr(dataset, '__len__'):\n full_dataset_size = len(dataset)\n if full_dataset_size < minimum_dataset_size:\n raise ValueError(\n f'Your dataset (name={cfg.dataset.hf_name}, split={cfg.dataset.split}) '\n +\n f'has {full_dataset_size} samples, but your minimum batch size '\n +\n f'is {minimum_dataset_size} because you are running on {world_size} gpus and '\n +\n f'your per device batch size is {dataloader_batch_size}. Please increase the number '\n +\n f'of samples in your dataset to at least {minimum_dataset_size}.'\n )\n\n assert dataset is not None\n return DataLoader(\n dataset,\n collate_fn=collate_fn,\n batch_size=dataloader_batch_size,\n drop_last=cfg.drop_last,\n sampler=dist.get_sampler(dataset,\n drop_last=cfg.drop_last,\n shuffle=cfg.dataset.shuffle),\n num_workers=cfg.num_workers,\n pin_memory=cfg.get('pin_memory', True),\n prefetch_factor=cfg.get('prefetch_factor', 2),\n persistent_workers=cfg.get('persistent_workers', True),\n timeout=cfg.get('timeout', 0),\n )", "def data_loader(origin_data, batch_size, num_epochs=1):\n data = {}\n for key, value in origin_data.items():\n data[key] = np.copy(value)\n\n data_size = len(data['text_len'])\n num_batches_per_epoch = int((data_size-1)/batch_size) + 1\n\n for epoch in range(num_epochs):\n # shuffle the dataset at the begging of each epoch\n shuffle_indices = np.random.permutation(np.arange(data_size))\n for key, value in data.items():\n data[key] = value[shuffle_indices]\n\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n\n max_text_len = max(data['text_len'][start_index:end_index])\n\n yield (data['text'][start_index:end_index, :max_text_len],\n data['text_len'][start_index:end_index],\n data['label'][start_index:end_index],\n data['raw'][start_index:end_index])", "def Dataloader(dataset, batch_size, shuffle=False, drop_last=False, shuffle_buffer_size=10000):\n\n if shuffle:\n dataset = Shuffle(dataset, buffer_size=shuffle_buffer_size)\n\n dataset = Batch(dataset, batch_size=batch_size, drop_last=drop_last)\n dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n\n return dataset", "def data_source():\n dataset = [0.1, 0.2, 0.3, 0.4, 0.5]\n while True:\n time.sleep(2)\n yield random.choice(dataset)", "def test_dataloader(self) -> DataLoader:\n return DataLoader(\n self.test_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=False,\n num_workers=multiprocessing.cpu_count(),\n )", "def Repeat(dataset, count=None):\n return dataset.repeat(count=count)", "def next_batch(self, batch_size=8):\n raise NotImplementedError()", "def __init__(self, maxlen, dtype):\n self._start_index = np.int64(0)\n self._len = np.int64(0)\n self._maxlen = np.array(maxlen)\n initial_len = 10 if np.isinf(self._maxlen) else self._maxlen\n self._buffer = np.zeros(shape=(initial_len,), dtype=dtype)", "def __init__(self, size):\n self.data = list()\n self.data_len = 0\n self.start_idx = -1\n self.size = size\n self.average = None", "def next_batch(self, batch_size):\n raise NotImplementedError", "def test_variable_length():\n size = 1350\n batch_size = 4\n dataset = datasets.digit(\n split=\"train\", epochs=1, batch_size=batch_size, dataset_dir=DATASET_DIR,\n )\n assert dataset.batches_per_epoch == (size // batch_size + bool(size % batch_size))\n\n x, y = dataset.get_batch()\n assert x.dtype == object\n assert x.shape == (batch_size,)\n for x_i in x:\n assert x_i.ndim == 1\n assert 1148 <= len(x_i) <= 18262\n assert y.shape == (batch_size,)", "def __init__(self, maxlen, shape, dtype='float32'):\r\n self.maxlen = maxlen\r\n self.start = 0\r\n self.length = 0\r\n self.data = np.zeros((maxlen,) + shape).astype(dtype)", "def get_loader(data, json, batch_size, shuffle, num_workers):\n dataset = FinNumDataset(data, json)\n\n data_loader = torch.utils.data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate_fn)\n return data_loader", "def configure_for_performance(ds):\r\n ds = ds.batch(BATCH_SIZE)\r\n ds = ds.prefetch(buffer_size=AUTOTUNE)\r\n return ds", "def load_chunk(self, start): # TODO parallelize this whole process\n self.X = queue.Queue()\n n = 0 # number of loaded batches\n print('stop loading : %s' % self.stop_loading())\n print('start + n : %s' % str(start + n))\n while (not self.stop_loading()) and (start + n) < self.size:\n print('load')\n self.X.put(np.load(self.data_filenames[start+n]))\n n += 1\n print('return chunk')\n return n", "def static(seq: List[int]):\n return Data._create_dataset(seq, pad=True)", "def build_dataset(self, data_filename):\n\n # Load all data\n print(\"Loading target-context pairs from {}\".format(data_filename))\n self.data = pd.read_csv(data_filename,\n delimiter=self.delimiter,\n dtype='int32',\n header=None,\n engine='python').values\n\n # Force an adjustment to the node indices\n self.data += self.force_offset\n\n n_total = len(self.data)\n self.split_sizes = [int(n_total * split) for split in self.splits]\n self.split_offset = [0] + self.split_sizes[:-1]\n self.data_index = [0] * self.n_splits", "def train_dataloader(self):\r\n\r\n # transformation\r\n train_transform = Compose(\r\n [\r\n ApplyTransformToKey(\r\n key='video',\r\n transform=Compose(\r\n [\r\n UniformTemporalSubsample(8),\r\n Lambda(lambda x: x / 255.0),\r\n Normalize((0.45, 0.45, 0.45), (0.225, 0.225, 0.225)),\r\n RandomShortSideScale(min_size=256, max_size=320),\r\n RandomCrop(244),\r\n RandomHorizontalFlip(p=0.5),\r\n ]\r\n )\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = pv.data.Kinetics(\r\n data_path=os.path.join(self._DATA_PATH, \"train\"),\r\n clip_sampler=pv.data.make_clip_sampler(\"random\", self._CLIP_DURATION),\r\n decode_audio=False,\r\n transform=train_transform\r\n )\r\n return torch.utils.data.DataLoader(\r\n train_dataset,\r\n batch_size=self._BATCH_SIZE,\r\n num_workers=self._NUM_WORKERS,\r\n )", "def _chunk_data(self):\n for n in range(0, len(self.data) + 1, len(self.data) //\n self.num_of_chunks):\n yield self.data[0 + n:len(self.data) // self.num_of_chunks + n]", "def make_dataset(\n root_path: Union[str, Path] = \"~/.cache/\", max_workers: int = 4\n ):\n\n make_dataset(root_path, max_workers=max_workers)", "def next_batch(self, batch_size, batch_wrap=True, shuffle=True):\n start = self.i_in_epoch\n if self.epochs_completed == 0 and start == 0 and shuffle:\n self.shuffle()\n\n data_batch = [0] * self.nb_data\n if start + batch_size >= self.d_size:\n # Finished epoch\n self.epochs_completed += 1\n self.i_in_epoch = 0\n for idx_dt in range(self.nb_data):\n data_batch[idx_dt] = self.data_list[idx_dt][start:self.d_size]\n if shuffle:\n self.shuffle()\n\n if batch_wrap:\n # Start next epoch\n self.i_in_epoch = batch_size - (self.d_size - start)\n end = self.i_in_epoch\n\n for idx_dt in range(self.nb_data):\n data_new_part = self.data_list[idx_dt][0:end]\n # e.g.shape of two inputs: (58, 12), (70, 12)\n data_batch[idx_dt] = np.vstack([data_batch[idx_dt], data_new_part])\n return data_batch\n else:\n self.i_in_epoch += batch_size\n end = self.i_in_epoch\n for idx_dt in range(self.nb_data):\n data_batch[idx_dt] = self.data_list[idx_dt][start:end]\n return data_batch", "def next_batch(self, batch_size):\n batch_data = np.zeros([batch_size,] + list(self.example_shape))\n for i in range(batch_size):\n index = self.q.pop()\n batch_data[i,...] = self.data[index]\n if len(self.q)==0:\n self.__new_epoch()\n\n return batch_data", "def batch_loader(data_set: Union[IterableDataset, Dataset],\n batch_size: bool,\n shuffle=False) -> DataLoader:\n return DataLoader(\n data_set,\n batch_size=batch_size,\n collate_fn=lambda x: x,\n shuffle=shuffle\n )", "def make_batch(self, batch_size):\n filenames = self.get_filenames()\n\n if os.path.isdir(filenames):\n num_records = len(os.listdir(filenames))\n print(\"Loading from directory. \" + str(num_records) + \" tfRecords found.\")\n files = tf.data.Dataset.list_files(filenames + \"/\" + \"*.tfrecord\").shuffle(num_records)\n dataset = files.apply(\n tf.contrib.data.parallel_interleave(\n lambda x: tf.data.TFRecordDataset(x, num_parallel_reads=256, buffer_size=8*1024*1024),\n cycle_length=32, sloppy=True)\n )\n else:\n print(\"Loading from single tfRecord...\")\n dataset = tf.data.TFRecordDataset(filenames + \".tfrecord\").repeat()\n \n dataset = dataset.map(self.parser, num_parallel_calls=128)\n \n if self.subset == 'train':\n min_queue_examples = int(\n Cifar10DataSet.num_examples_per_epoch(self.subset) * 0.4)\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * batch_size)\n \n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n dataset = dataset.prefetch(10)\n \n iterator = dataset.make_one_shot_iterator()\n seq_batch, input_batch, map_batch, transformation_batch = iterator.get_next()\n\n return seq_batch, input_batch, map_batch, transformation_batch", "def data_repeated(data):\n\n def gen(count):\n for _ in range(count):\n yield data\n\n yield gen", "def __init__(self, dataloader):\n self._dataloader = dataloader\n\n self._iterator = iter(self._dataloader)", "def _next_batch(self):\n if not self.use_cache or self.first_run:\n while self.buffer_label.shape[0] < self.batch_size:\n # read more samples into buffer\n if self.next_read_index >= self.num_samples:\n if self.use_cache:\n self._dump_cache()\n self.first_run = False\n return None\n self._read_buffer(self.next_read_index)\n self.next_read_index += 1\n # get the next batch from buffer\n data, self.buffer_data = self.buffer_data[:self.batch_size], self.buffer_data[self.batch_size:]\n label, self.buffer_label = self.buffer_label[:self.batch_size], self.buffer_label[self.batch_size:]\n if self.use_cache:\n self.cache.append((data, label))\n else:\n next_batch = self._get_data_from_cache()\n if next_batch is None:\n return None\n else:\n data, label = next_batch\n\n return mx.io.DataBatch(data=[mx.nd.array(data), mx.ndarray.ones((self.batch_size, 1), dtype='int32') * self.npoints], label=[mx.nd.array(label)])", "def next_batch(self, batch_size):\r\n raise NotImplementedError", "def test_dataloader(self) -> data.DataLoader:\n return data.DataLoader(dataset=self.datasets['test'], batch_size=self.batch_size,\n num_workers=self.num_workers, shuffle=False, pin_memory=False)", "def make_data_iterator(input):\n assert isinstance(input, DataLoader)\n data_iterator = iter(input)\n return data_iterator", "def build(self):\n # open json, len 161,260\n at_json = open_json(self.json_names[0])\n link_json = open_json(self.json_names[1])\n # if need preprocessing, do it\n if self.args.img_preprocessing:\n print(\"resize imgs\")\n for i in tqdm(range(len(link_json))):\n image_url = \"image/\" + link_json[i][\"image_url_4x\"].split('/')[-1]\n img = Image.open(image_url)\n img = img.resize((224, 224))\n img.save(image_url)\n\n # create dataset\n itemlen = 0\n previd = 0\n for i in tqdm(range(len(link_json))):\n image_url = link_json[i][\"image_url_4x\"].split('/')[-1]\n uid = image_url.split('-')[0]\n if previd != uid:\n self.label.append(list(at_json[i].values())[2:])\n if i != 0:\n self.itemlen.append(itemlen)\n itemlen = 0\n self.input.append(f\"{self.frontpath}dataset/image/\" + image_url)\n previd = uid\n itemlen += 1\n self.itemlen.append(itemlen)\n self.separate()\n self.dataset = {\n 'train': self.train,\n 'validation': self.val,\n 'test': self.test\n }\n\n print('finished dataset')", "def make_linear_batch_data(self):\n self.reset_batch()\n g = SampleGenerator()\n for i in range(self.batch_size):\n # Draw a random sample on the interval [0,1]\n x = np.random.random()\n y = g.generate_linear_samples(x)\n self.x_data.append(x)\n self.y_data.append(y)", "def pack_unpack_extreme():\n # this will create a huge array, and then use the\n # blosc.BLOSC_MAX_BUFFERSIZE as chunk-szie\n pack_unpack(300, chunk_size=blosc.BLOSC_MAX_BUFFERSIZE,\n progress=simple_progress)", "def __init__(self):\n self.data = [[] for i in range(self._MOD)]", "def __init__(self):\n self.container = [[-1]*1000 for _ in range(1000)]", "def loadInMemoryOneBatch(fileName,batchSize):\n\n inputFile = open(fileName)\n\n while True:\n objects = []\n allDone = False\n while True:\n line = inputFile.readline()\n if line:\n objects.append(line)\n if len(objects) == batchSize:\n break\n else:\n allDone = True\n break\n yield objects\n if allDone == True:\n break", "def _init_al_dataset(self):\n\n self._init_dataset()\n\n train_dataset = self.datasets['train']\n\n dataset_size = len(train_dataset)\n self.budget = math.ceil(self.budget_frac*dataset_size)\n Sampler.__init__(self, config, self.budget) # TODO: Weird place to initialise this\n\n all_indices = set(np.arange(dataset_size))\n k_initial = math.ceil(len(all_indices)*self.initial_budget_frac)\n initial_indices = random.sample(list(all_indices), k=k_initial)\n\n sampler_init = data.sampler.SubsetRandomSampler(initial_indices) # need to sample from training dataset\n\n self.labelled_dataloader = data.DataLoader(train_dataset, sampler=sampler_init, batch_size=self.batch_size, drop_last=True)\n self.val_dataloader = data.DataLoader(self.datasets['valid'], batch_size=self.batch_size, drop_last=False)\n self.test_dataloader = data.DataLoader(self.datasets['test'], batch_size=self.batch_size, drop_last=False)\n\n return all_indices, initial_indices", "def __init__(self):\n self.num_mini_batches = 0", "async def _load_next_chunk(self):\n raise NotImplementedError", "def generate_batch():\n\n # Initialize variables\n example = np.zeros(self.batch_size)\n labels = np.zeros((self.batch_size, 1))\n alphas = np.zeros(self.batch_size)\n n_items = 0\n index = 0\n\n while index < len(data):\n reduced_window = random.randint(0, self.window_size)\n if data[index] is not None:\n\n left = max(0, index - self.window_size + reduced_window)\n right = min((index + self.window_size + 1 -\n reduced_window), len(data) - 1)\n for pos2 in range(left, right, 1):\n\n if n_items == self.batch_size:\n queue.put((example, labels, index))\n example = np.zeros(self.batch_size)\n labels = np.zeros((self.batch_size, 1))\n n_items = 0\n\n if pos2 != index and data[pos2] is not None:\n example[n_items] = data[pos2]\n labels[n_items] = data[index]\n alpha = self.learning_rate - \\\n (self.learning_rate - 0.001) * (index / self.n_words)\n alphas[n_items] = max(0.001, alpha)\n n_items += 1\n index += 1\n\n # Poison pills\n for _ in range(n_workers):\n queue.put(None)", "def __iter__(self):\n logging.info(\"DataLoader __iter__()\")\n if self.enable_prefetch:\n self.join()\n self.start_async()\n else:\n self.start()\n return self", "def __iter__(self):\n logging.info(\"DataLoader __iter__()\")\n if self.enable_prefetch:\n self.join()\n self.start_async()\n else:\n self.start()\n return self", "def create_train_dataloader(configs):\n train_lidar_aug = OneOf([\n Random_Rotation(limit_angle=np.pi / 4, p=1.0),\n Random_Scaling(scaling_range=(0.95, 1.05), p=1.0),\n ], p=0.66)\n train_dataset = KittiDataset(configs, mode='train', lidar_aug=train_lidar_aug, hflip_prob=configs.hflip_prob,\n num_samples=configs.num_samples)\n train_sampler = None\n if configs.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, batch_size=configs.batch_size, shuffle=(train_sampler is None),\n pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=train_sampler)\n\n return train_dataloader, train_sampler", "def generate_batches(source,n=20):\n blist=[]\n with open(source) as f_source:\n start=next_n_lines(f_source, n); string=gen_string(start); blist.append(string)\n while start!=[]: start=next_n_lines(f_source, n); string=gen_string(start); blist.append(string)\n print('2. Generation of batches completed!')\n return blist", "def prepare_batches(self, data):\n batches = []\n start, end = 0, 100\n if len(data) > 100:\n while True:\n data_batch = data[start:end]\n if not data_batch:\n break\n temp = end + 100\n start, end = end, temp\n if data_batch:\n batches.append(data_batch)\n else:\n batches.append(data)\n return batches", "def datasubset(loader, start, count, batch_size):\n # Note: start is the start index of batch, not image\n smaller_dataset = []\n end_idx = count / batch_size\n for batch_idx, (orig_images, labels) in enumerate(loader):\n if start <= batch_idx < end_idx:\n smaller_dataset.append((orig_images, labels))\n if batch_idx > end_idx:\n break\n return smaller_dataset", "def __init__(self, dataset_dir, listfile=None, period_length=48.0):\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n self._data = [(x, int(y)) for (x, y) in self._data]\n self._period_length = period_length", "def range(n: int, *, parallelism: int = -1) -> Dataset[int]:\n return read_datasource(\n RangeDatasource(), parallelism=parallelism, n=n, block_format=\"list\"\n )", "def __iter__(self):\n\n # Open the data reader\n self.data.open()\n\n starts = np.arange(self.start, self.stop, self.chunksize)\n for a, b in zip_longest(starts, starts[1:], fillvalue=self.stop):\n yield self.data.read(a, b, **self.kwargs)", "def get_dataloader(self, cid, batch_size=None, type=\"train\"):\n dataset = self.get_dataset(cid, type)\n batch_size = len(dataset) if batch_size is None else batch_size\n data_loader = DataLoader(dataset, batch_size=batch_size)\n return data_loader", "def __init__(self, dataset_dir, listfile=None, period_length=48.0):\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(\",\") for line in self._data]\n self._data = [(x, int(y)) for (x, y) in self._data]\n self._period_length = period_length", "def __init__(self, size):\n self._storage = []\n self._maxsize = int(size)\n self._next_idx = 0" ]
[ "0.62806386", "0.62140703", "0.62047887", "0.6165703", "0.6139719", "0.5913532", "0.5898376", "0.58914024", "0.5870807", "0.58379287", "0.5795319", "0.5757869", "0.5757244", "0.57561076", "0.57423776", "0.5669886", "0.5666476", "0.5624543", "0.5618841", "0.5613911", "0.5603851", "0.55625093", "0.55617183", "0.5551853", "0.55263984", "0.5503303", "0.5502659", "0.54529005", "0.5445508", "0.5438684", "0.5415972", "0.54122823", "0.5408283", "0.5387838", "0.5386066", "0.53657436", "0.5344487", "0.5342493", "0.5340172", "0.5326661", "0.532479", "0.532479", "0.5320737", "0.53170085", "0.53134394", "0.5312126", "0.5309265", "0.52977836", "0.52958244", "0.5291137", "0.5285517", "0.5278001", "0.52576447", "0.52415", "0.5234347", "0.5232117", "0.5229181", "0.52206135", "0.52146417", "0.5212416", "0.5206998", "0.5184529", "0.5184517", "0.51822597", "0.51571155", "0.51563066", "0.5153912", "0.5153401", "0.5132232", "0.513124", "0.5131099", "0.51289105", "0.5119765", "0.5119247", "0.51153696", "0.5112273", "0.51050216", "0.5102187", "0.5101872", "0.509893", "0.5097591", "0.5096996", "0.5096923", "0.50963557", "0.5090654", "0.50863785", "0.5085292", "0.50794506", "0.5079396", "0.5079396", "0.50787336", "0.50744075", "0.5072973", "0.5070434", "0.5070022", "0.5065148", "0.50631243", "0.5056636", "0.50564444", "0.5056001" ]
0.83487415
0
Traing and validation dataloaders.
def _build_train_valid_dataloaders(train_dataset, valid_dataset, task_collate_fn=None): args = get_args() print_rank_0('building train and validation dataloaders ...') # Training dataset. train_dataloader = build_data_loader(train_dataset, args.micro_batch_size, args.num_workers, not args.keep_last, task_collate_fn) # Set the training iterations. args.train_iters_per_epoch = len(train_dataloader) args.train_iters = args.epochs * args.train_iters_per_epoch # Validation dataset. For this dataset, we do not need to set up # shuffling so we can just use a simple infinite loop. valid_dataloader_ = build_data_loader(valid_dataset, args.micro_batch_size, args.num_workers, not args.keep_last, task_collate_fn) valid_dataloader = _build_infinite_size_dataloader(valid_dataloader_) # Now that we've built the data loaders, set batch_size arguments # to the actual batch size the model will see for this dataset. # This is necessary so pipeline transfers know what size they are # and the LR schedule, which is based on samples seen, gets set # correctly. args.orig_micro_batch_size = args.micro_batch_size args.orig_global_batch_size = args.global_batch_size if hasattr(train_dataset, 'sample_multiplier'): # If our dataset as a sample_multiplier attribute that means # each "sample" from the dataset actually has multiple samples # that will collapse into the batch dimension (for example in # the RACE dataset that has several options), we need to # account for that when setting the micro batch size. args.micro_batch_size *= train_dataset.sample_multiplier args.global_batch_size *= train_dataset.sample_multiplier return train_dataloader, valid_dataloader
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data_loaders(args, tokenizer):\n personachat = get_dataset(tokenizer, args.dataset_path, args.dataset_cache, args.train_lang)\n _ = personachat.pop(\"test\", None)\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": [], \"valid\": []}\n\n if args.train_lang in [\"En\", \"Fr\", \"It\", \"Id\", \"Jp\", \"Ko\", \"Zh\"]: #monolingual data\n for dataset_name, dataset in personachat.items():\n for dial in dataset[args.train_lang]: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lm_labels = True)\n datasets[dataset_name].append(instance) \n else: #multilingual data\n for dataset_name, dataset in personachat.items():\n for lang, dials in dataset.items():\n for dial in dials: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lang_id=\"<{}>\".format(lang.lower()), lm_labels = True)\n datasets[dataset_name].append(instance) #all langs together\n\n\n logger.info(\"Build train and validation dataloaders\")\n train_dataset = DatasetTrain(datasets[\"train\"])\n valid_dataset = DatasetTrain(datasets[\"valid\"])\n\n #logger.info(\"Build train and validation dataloaders\")\n #train_dataset, valid_dataset = TensorDataset(*tensor_datasets[\"train\"]), TensorDataset(*tensor_datasets[\"valid\"])\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None\n valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None\n train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, shuffle=(not args.distributed), collate_fn=collate_fn)\n valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.valid_batch_size, shuffle=False, collate_fn=collate_fn)\n\n # logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[0].shape))\n # #logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[1].shape))\n # logger.info(\"Valid dataset (Batch, Candidates, Seq length): {}\".format(valid_dataset.tensors[0].shape))\n logger.info(\"Train dataset length: {}\".format(len(train_dataset)))\n logger.info(\"Valid dataset length: {}\".format(len(valid_dataset)))\n return train_loader, valid_loader, train_sampler, valid_sampler", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def get_dataloaders(logging, batch_size):\n # Load Data\n logging.info(\"Reading Train and Test data...\")\n train_df = pd.read_csv(\"C:/tmp/avila_classification/data/avila-tr.txt\", header=None)\n test_df = pd.read_csv(\"C:/tmp/avila_classification/data/avila-ts.txt\", header=None)\n\n # Fix column names\n col_names = ['col_' + str(j + 1) for j in range(train_df.shape[1] - 1)]\n indep_cols = col_names.copy()\n col_names.append('y')\n\n logging.debug(\"Assigning columns\")\n train_df.columns = col_names\n test_df.columns = col_names\n\n # Encode dependent variable column\n le = LabelEncoder()\n le.fit(train_df['y'])\n logging.debug(f\"Classes: {le.classes_}\")\n logging.debug(f\"Transformed Classes: {le.transform(le.classes_)}\")\n\n train_df['y_enc'] = le.transform(train_df['y'])\n test_df['y_enc'] = le.transform(test_df['y'])\n\n # train_df.head()\n logging.debug(f\"Shape of train data: {train_df.shape}\")\n logging.debug(f\"Shape of test data: {test_df.shape}\")\n\n # Create train and validation dataloaders\n train_ds = AvilaDataset(data_frame=train_df, indep_cols=indep_cols, dep_col='y_enc')\n valid_ds = AvilaDataset(data_frame=test_df, indep_cols=indep_cols, dep_col='y_enc')\n\n # Should be some exponent of 2 (128, 256)\n # batch_size = 256\n train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True)\n valid_dl = DataLoader(valid_ds, batch_size=batch_size, shuffle=False)\n\n return train_dl, valid_dl, le", "def train(self, train_loader):\n pass", "def train_with_loader(self, data, validating_data=None, scheduler=None, epochs=1):\n print('Training...')\n for epoch in range(epochs):\n self.train()\n for train_in, train_out in data:\n self.compute_loss(train_in, train_out, is_guess=False, training=True)\n self.eval()\n if validating_data:\n with torch.no_grad():\n valid_loss = self.compute_loss_loader(validating_data).item()\n print('Average validation error at step ',epoch+1,': ', valid_loss)\n if scheduler and valid_loss:\n scheduler.step()", "def prepare_dataloaders(data,\n seq_len,\n batch_size=64,\n validation_set=False,\n validation_size=0.1,\n random_seed=42):\n vocab = set(data)\n token2id = {k: v for v, k in enumerate(vocab)}\n id2token = {k: v for v, k in token2id.items()}\n data_range = range(0, len(data) - seq_len, seq_len)\n\n data = [token2id[t] for t in data]\n data = np.array([data[i:i + seq_len] for i in data_range])\n tensor_data = torch.from_numpy(data)\n\n if validation_set:\n np.random.seed(random_seed)\n idx = np.random.choice(\n range(len(tensor_data)), size=len(tensor_data), replace=False)\n split = int(len(idx) * (1 - validation_size))\n train_idx = idx[:split]\n valid_idx = idx[split:]\n\n train_data = TensorDataset(torch.LongTensor(tensor_data[train_idx]))\n valid_data = TensorDataset(torch.LongTensor(tensor_data[valid_idx]))\n\n train_loader = DataLoader(\n train_data, shuffle=True, batch_size=batch_size)\n valid_loader = DataLoader(\n valid_data, shuffle=True, batch_size=batch_size)\n\n return train_loader, valid_loader, vocab, token2id, id2token\n else:\n train_data = TensorDataset(torch.LongTensor(tensor_data))\n train_loader = DataLoader(\n train_data, shuffle=True, batch_size=batch_size)\n return train_loader, vocab, token2id, id2token", "def create_trainval_dataloaders(params):\n # ----------------Create Dataset objects and Dataloaders----------------\n mr_dataset_train, tokenizer = get_dataset(params, run_mode=\"train\")\n params.vocab_size = tokenizer.get_vocab_size()\n print(\"SystemLog: Vocab size used for training is %d\" % (params.vocab_size))\n print(\"SystemLog: Number of items in the train dataset=%d\" % len(mr_dataset_train))\n sys.stdout.flush()\n # Collate Function pads the sequences to have a uniform length for the entire batch\n mr_dataloader_train = DataLoader(mr_dataset_train, batch_size=params.batch_size,\n shuffle=True, num_workers=params.num_workers, collate_fn=CollateMRSequence(params.architecture))\n\n mr_dataset_valid, _ = get_dataset(params, run_mode=\"valid\")\n print(\"SystemLog: Number of items in the valid dataset=%d\" % len(mr_dataset_valid))\n mr_dataloader_valid = DataLoader(mr_dataset_valid, batch_size=params.batch_size_validation,\n shuffle=False, num_workers=0, collate_fn=CollateMRSequence(params.architecture))\n\n return mr_dataset_train, mr_dataloader_train, mr_dataset_valid, mr_dataloader_valid", "def get_dataloaders(args):\n if args.dataset == 'heat':\n dataset_class = heat.HeatDiffusionDataset\n else:\n raise ValueError(f'Unknown dataset {args.dataset}')\n train_dataset = dataset_class(\n dataset_class.get_train_path(args.data_path), args, train=True)\n if args.dist:\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset)\n else:\n train_sampler = torch.utils.data.RandomSampler(train_dataset)\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, num_workers=args.workers,\n sampler=train_sampler, pin_memory=True, drop_last=args.drop_last)\n if not args.no_eval:\n validation_dataset = dataset_class(\n dataset_class.get_validation_path(args.data_path), args, train=False)\n if args.dist:\n validation_sampler = torch.utils.data.distributed.DistributedSampler(\n validation_dataset, shuffle=False)\n else:\n validation_sampler = torch.utils.data.SequentialSampler(\n validation_dataset)\n validation_loader = torch.utils.data.DataLoader(\n validation_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=validation_sampler,\n pin_memory=True, drop_last=args.drop_last)\n\n test_dataset = dataset_class(\n dataset_class.get_test_path(args.data_path), args, train=False)\n if args.dist:\n test_sampler = torch.utils.data.distributed.DistributedSampler(\n test_dataset, shuffle=False)\n else:\n test_sampler = torch.utils.data.SequentialSampler(\n test_dataset)\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=test_sampler,\n pin_memory=True, drop_last=args.drop_last)\n else:\n validation_loader = None\n test_loader = None\n\n # Update the data shape if needed.\n if args.data_shape is None:\n args.data_shape = train_dataset.get_shape()\n if args.data_target_shape is None:\n args.data_target_shape = train_dataset.get_target_shape()\n\n return train_loader, validation_loader, test_loader", "def get_driving_data_loaders(batch_size, train_dataset, valid_dataset, test_dataset, num_workers=0): \n\n valid_loader = DataLoader(dataset=valid_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=True)\n\n train_loader = DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n drop_last=True, \n shuffle=True)\n\n test_loader = DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=False)\n\n return train_loader, valid_loader, test_loader", "def train(self, data_loader):\n step = 0\n train_data, valid_data = data_loader()\n\n # Allow to call `next` builtin indefinitely.\n valid_data = iter(valid_data.repeat())\n\n for epoch in range(self.hparams.num_epochs):\n for x, y in train_data:\n\n with tf.GradientTape() as g:\n train_loss = self.loss(y, self(x))\n\n grads = g.gradient(train_loss, self.trainable_variables)\n self.optimizer.apply_gradients(zip(grads, self.trainable_variables))\n\n # Validate every 1000 training steps.\n if step % 1000 == 0:\n x, y = next(valid_data)\n valid_loss = self.loss(y, self(x))\n print(\n f\"step {step} (train_loss={train_loss} valid_loss={valid_loss})\"\n )\n step += 1\n\n print(f\"epoch {epoch} finished\")\n self.save()", "def fit(self, data_loader):\n train_data, valid_data = data_loader.load()\n\n self.compile(self.optimizer, self.loss)\n super().fit(\n x=train_data,\n validation_data=valid_data,\n validation_steps=32, # validate 32 batches at a time\n validation_freq=1, # validate every 1 epoch\n epochs=self.hparams.num_epochs,\n shuffle=False, # dataset instances already handle shuffling\n )\n self.save()", "def train_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_train, **self.dl_kwargs)", "def get_dataloaders(data_dir,train_batch_size,val_batch_size,aug_flag):\n # Create the dataset object.\n transformed_dataset = PersonDataset(data_dir,False)\n # dataloader for train and validation\n validation_split = 0.2\n shuffle_dataset = True\n #random seed to keep the train-val split constant for inference purpose\n random_seed= 42\n # create indices for training and validation splits.\n dataset_size = len(transformed_dataset)\n # we create the indices using python range function and store it into a list\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split*dataset_size))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices,val_indices = indices[split:],indices[:split]\n # create dataloaders...\n train_sampler = SubsetRandomSampler(train_indices)\n val_sampler = SubsetRandomSampler(val_indices)\n train_aug,val_aug = aug_flag,False\n train_loader = DataLoader(PersonDataset(data_dir,train_aug), batch_size=train_batch_size, shuffle=False, num_workers=0,sampler = train_sampler)\n val_loader = DataLoader(PersonDataset(data_dir,val_aug), batch_size=val_batch_size, shuffle=False, num_workers=0,sampler = val_sampler)\n\n # dictionary for data loaders..\n dataloaders = {\"train\" :train_loader,\n \"val\":val_loader\n }\n return dataloaders", "def prepare_dataloader(opt, dataobj):\n\n def load_data(name):\n with open(name, 'rb') as f:\n data = pickle.load(f)\n num_types = 1 # There is no event type prediction, hence using a dummy value, this will basically be a constant value field\n return data, num_types\n\n print('[Info] Loading train data...')\n train_data, num_types = load_data(opt.data + 'train_ny.pkl')\n print('[Info] Loading dev data...')\n val_data, _ = load_data(opt.data + 'val_ny.pkl')\n print('[Info] Loading test data...')\n test_data, _ = load_data(opt.data + 'test_ny.pkl')\n\n trainloader = get_dataloader(train_data, opt.batch_size, shuffle=True)\n validationloader = get_dataloader(val_data, opt.batch_size, shuffle=True)\n testloader = get_dataloader(test_data, opt.batch_size, shuffle=False)\n return trainloader, validationloader, testloader, num_types", "def train_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def load_dataloaders(args):\n logger.info(\"Loading dataloaders...\")\n p_path = os.path.join(\"./data/\", \"df_unencoded.pkl\")\n train_path = os.path.join(\"./data/\", \"df_encoded.pkl\")\n if (not os.path.isfile(p_path)) and (not os.path.isfile(train_path)):\n df = get_data(args, load_extracted=False)\n elif os.path.isfile(p_path) and (not os.path.isfile(train_path)):\n df = get_data(args, load_extracted=True)\n elif os.path.isfile(train_path):\n df = load_pickle(\"df_encoded.pkl\")\n \n # Train-Test split\n msk = np.random.rand(len(df)) < args.train_test_ratio\n trainset = df[msk]\n testset = df[~msk]\n \n trainset = text_dataset(trainset, args)\n max_features_length = trainset.max_x_len\n max_seq_len = trainset.max_y_len\n train_length = len(trainset)\n train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True,\\\n num_workers=0, collate_fn=Pad_Sequence(), pin_memory=False)\n \n testset = text_dataset(testset, args)\n test_length = len(testset)\n test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=True,\\\n num_workers=0, collate_fn=Pad_Sequence(), pin_memory=False)\n return train_loader, train_length, max_features_length, max_seq_len, test_loader, test_length", "def create_dataloaders(data_dir):\n\n trng_dataset = datasets.ImageFolder(data_dir / TRNG_FOLDER,\n transform=flowernet.trng_transform)\n trng_dataloader = torch.utils.data.DataLoader(trng_dataset,\n batch_size=64,\n shuffle=True)\n\n valn_dataset = datasets.ImageFolder(data_dir / VALN_FOLDER,\n transform=flowernet.pred_transform)\n valn_dataloader = torch.utils.data.DataLoader(valn_dataset,\n batch_size=64,\n shuffle=True)\n\n return trng_dataloader, valn_dataloader", "def get_dataloaders(datasets, split, args, is_eval=False):\n dataloaders = []\n for task, dataset in datasets.items():\n if is_eval:\n num_rows = dataset.num_rows if args.eval_rows == -1 else args.eval_rows\n else:\n num_rows = dataset.num_rows if args.train_rows == -1 else args.train_rows\n all_input_ids = np.zeros([num_rows, args.max_length])\n all_attention_mask = np.zeros([num_rows, args.max_length])\n all_token_type_ids = np.zeros([num_rows, args.max_length])\n for i in range(num_rows):\n features = dataset[i]\n curr_len = len(features[\"attention_mask\"])\n all_input_ids[i,:curr_len] = features[\"input_ids\"]\n all_attention_mask[i,:curr_len] = features[\"attention_mask\"]\n all_token_type_ids[i,:curr_len] = features[\"token_type_ids\"]\n all_input_ids = torch.tensor(all_input_ids, dtype=torch.long)\n all_attention_mask = torch.tensor(all_attention_mask, dtype=torch.long)\n all_token_type_ids = torch.tensor(all_token_type_ids, dtype=torch.long)\n all_label = torch.tensor(dataset[:num_rows][\"label\"], dtype=torch.long)\n if task == \"stsb\":\n all_label = all_label.float()\n \n data = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_label)\n if split in [\"train\", \"support\"]:\n sampler = RandomSampler(data)\n dataloader = DataLoader(data, sampler=sampler, batch_size=args.train_batch_size)\n else:\n sampler = SequentialSampler(data)\n dataloader = DataLoader(data, sampler=sampler, batch_size=args.eval_batch_size)\n dataloaders.append(dataloader)\n return dataloaders", "def train(\n train_dataloader: torch.utils.data.dataloader,\n model: nn.Module,\n loss_function: nn.Module,\n optimizer: torch.optim.Optimizer,\n device: torch.device,\n scheduler=None,\n):\n model.train()\n\n total_loss = 0\n\n for step, batch in tqdm(enumerate(train_dataloader)):\n\n if step % 50 == 0 and not step == 0:\n print(\" Batch {:>5,} of {:>5,}.\".format(step, len(train_dataloader)))\n\n data = batch[\"data\"].to(device)\n label = batch[\"label\"].to(device)\n model.zero_grad()\n\n preds = model(data)\n\n loss = loss_function(preds, label)\n\n total_loss = total_loss + loss.item()\n\n loss.backward()\n\n optimizer.step()\n # scheduler.step()", "def validation_start(self, dataloader):\n self.val_datasize = len(dataloader)", "def test_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_test, **self.dl_kwargs)", "def train(self, data):\n pass", "def val_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_valid, **self.dl_kwargs)", "def validation_step__multiple_dataloaders(self, batch, batch_idx, dataloader_idx, **kwargs):\n x, y = batch\n x = x.view(x.size(0), -1)\n y_hat = self(x)\n\n loss_val = self.loss(y, y_hat)\n\n # acc\n labels_hat = torch.argmax(y_hat, dim=1)\n val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)\n val_acc = torch.tensor(val_acc).type_as(x)\n\n output = OrderedDict({\n f'val_loss_{dataloader_idx}': loss_val,\n f'val_acc_{dataloader_idx}': val_acc,\n })\n return output", "def test_cached_dataloader(self):\n\n v = [\"data\", \"target\", \"model_out_sqnet\"]\n\n for data, target in self.train_loader:\n b, c, h, w = data[v[0]].shape\n assert data[v[1]].shape == (b, )\n assert data[v[2]].shape == (b, 100)\n assert data[v[1]].shape == target.shape", "def make_dataloaders(params):\r\n transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])\r\n\r\n transform_validation = transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize([0.4914, 0.4822, 0.4465],\r\n [0.2023, 0.1994, 0.2010])])\r\n\r\n transform_validation = transforms.Compose([transforms.ToTensor()])\r\n\r\n trainset = torchvision.datasets.CIFAR10(root=params['path'], train=True, transform=transform_train)\r\n testset = torchvision.datasets.CIFAR10(root=params['path'], train=False, transform=transform_validation)\r\n\r\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, num_workers=4)\r\n testloader = torch.utils.data.DataLoader(testset, batch_size=params['batch_size'], shuffle=False, num_workers=4)\r\n return trainloader, testloader", "def data_loaders(dataset_path):\n dataset_path = dataset_path\n news_stock_dataset = NewsStockDataLoader(dataset_path)\n \n dataset_size = len(news_stock_dataset)\n indices = list(range(dataset_size))\n training_split = int(0.8 * dataset_size)\n validation_split = int(0.9 * dataset_size)\n\n np.random.seed(96)\n np.random.shuffle(indices)\n\n train_indices = indices[:training_split]\n valid_indices = indices[training_split:validation_split]\n test_indices = indices[validation_split:]\n\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(valid_indices)\n test_sampler = SubsetRandomSampler(test_indices)\n \n collate = PadSequence()\n\n training_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"training_batch_size\"),\n sampler = train_sampler,\n collate_fn = collate)\n\n validation_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"validation_batch_size\"),\n sampler = valid_sampler,\n collate_fn = collate)\n\n testing_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"testing_batch_size\"),\n sampler= test_sampler,\n collate_fn = collate)\n \n return training_loader, validation_loader, testing_loader", "def test_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)", "def dataloaders():\n # train data path\n data_train = '../dataset/train/'\n # set transformations\n train_transforms = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n \n train_data = datasets.ImageFolder(data_train, transform = train_transforms)\n trainloader = torch.utils.data.DataLoader(train_data, batch_size = 16, shuffle = True)\n \n return trainloader", "def build_validation_data_loader(self) -> DataLoader:\n pass", "def test_dataloader(self) -> data.DataLoader:\n return data.DataLoader(dataset=self.datasets['test'], batch_size=self.batch_size,\n num_workers=self.num_workers, shuffle=False, pin_memory=False)", "def __epoch_train(self, data_loader, writer):\n self.model.train()\n for i, encode in enumerate(data_loader, 1):\n # update model\n encode = {k: v.to(self.device) for k, v in encode.items()}\n self.optimizer.zero_grad()\n loss = self.model(**encode)[0]\n if self.n_gpu > 1:\n loss = loss.mean()\n if self.args.fp16:\n with self.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n # optimizer and scheduler step\n self.optimizer.step()\n self.scheduler.step()\n # log instantaneous accuracy, loss, and learning rate\n inst_loss = loss.cpu().detach().item()\n inst_lr = self.optimizer.param_groups[0]['lr']\n writer.add_scalar('train/loss', inst_loss, self.__step)\n writer.add_scalar('train/learning_rate', inst_lr, self.__step)\n if self.__step % PROGRESS_INTERVAL == 0:\n LOGGER.info('[epoch %i] * (training step %i) loss: %.3f, lr: %0.8f'\n % (self.__epoch, self.__step, inst_loss, inst_lr))\n self.__step += 1\n # break\n if self.__step >= self.args.total_step:\n LOGGER.info('reached maximum step')\n return True\n return False", "def _get_data(\n self, train_dataset: TensorDataset, validation_dataset: TensorDataset\n ):\n return (\n DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True),\n DataLoader(validation_dataset, batch_size=self.batch_size * 2),\n )", "def training_start(self, dataloader):\n self.datasize = len(dataloader)", "def get_data_loaders():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_2sentences_finalgenerated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+5):]\n \n #history_complete.append(history)\n if len(persona) == 4:\n if len(history) > (len(persona)+3):\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss_2(persona, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets", "def CrossCheck(dataloader):", "def create_dataloader(data):\r\n input_ids = torch.LongTensor([sent['input_ids'] for sent in data])\r\n input_mask = torch.LongTensor([sent['input_mask'] for sent in data])\r\n segment_ids = torch.LongTensor([sent['segment_ids'] for sent in data])\r\n label_ids = torch.LongTensor([sent['label_ids'] for sent in data])\r\n\r\n dataset = TensorDataset(input_ids, input_mask, segment_ids, label_ids)\r\n\r\n train_sampler = RandomSampler(dataset)\r\n dataloader = DataLoader(dataset, sampler=train_sampler, batch_size=BATCH_SIZE)\r\n\r\n return dataloader", "def build_training_data_loader(self) -> DataLoader:\n pass", "def train(self, trainData):\n pass", "def train(args, data_loader, model, global_stats):\n # Initialize meters + timers\n train_loss = AverageMeter()\n epoch_time = Timer()\n \n for batch_idx, (input_idxs, target_idxs, input_tokens, target_tokens) in enumerate(data_loader):\n # input_idxs and target_idxs have dim (batch_size x max_len)\n # they are NOT sorted by length\n\n lengths = (input_idxs != 0).long().sum(dim=1)\n sorted_lengths, order = torch.sort(lengths, descending=True)\n\n input_variable = Variable(input_idxs[order, :][:, :max(lengths)])\n target_variable = Variable(target_idxs[order, :])\n \n model.optimizer.zero_grad()\n output_log_probs, output_ses = model(input_variable,\n list(sorted_lengths),\n targets=target_variable)\n \n batch_size = input_variable.shape[0]\n flattened_outputs = output_log_probs.view(batch_size * model.max_length, -1)\n \n batch_loss = model.citerion(flattened_outputs, target_variable.contiguous().view(-1))\n batch_loss.backward()\n model.optimizer.step()\n \n model.updates += 1\n \n train_loss.update(batch_loss[0], batch_size)\n \n if batch_idx % args.display_iter == 0:\n logger.info('train: Epoch = %d | iter = %d/%d | ' %\n (global_stats['epoch'], batch_idx, len(data_loader)) +\n 'loss = %.2f | elapsed time = %.2f (s)' %\n (train_loss.avg, global_stats['timer'].time()))\n train_loss.reset()\n \n logger.info('train: Epoch %d done. Time for epoch = %.2f (s)' %\n (global_stats['epoch'], epoch_time.time()))\n \n # Checkpoint\n if args.checkpoint:\n model.checkpoint(args.model_file + '.checkpoint',\n global_stats['epoch'] + 1)", "def learnDataset(self, data_loader):\n\n print(\"learning dataset\")\n # we have 127940 sentences in total\n count = 0\n for sample in data_loader:\n input_sentence = sample[\"input\"][0]\n target_sentence = sample[\"target\"][0]\n\n # NOTE: target_word & input_word are actually indecies of words, instead of word strings\n # NOTE: the first word has index 1\n first_target = int(target_sentence[1])\n first_input = int(input_sentence[1])\n\n self.emiss_factors[0][(first_input, first_target)] += 1\n\n prev_target = first_target\n for word_idx in range(2, 16):\n # note that word_idx is 0 is always <BOS>\n target_word = int(target_sentence[word_idx])\n input_word = int(input_sentence[word_idx])\n\n self.emiss_factors[word_idx - 1][(input_word, target_word)] += 1\n self.trans_factors[word_idx - 2][(prev_target, target_word)] += 1\n prev_target = target_word\n\n print(\"{}/127940\".format(count), end = \"\\r\")\n count += 1\n print(\"127940/127940\")\n\n # all data updated, no need to do any insertion\n for i in range(15):\n self.emiss_factors[i].fixed()\n for i in range(14):\n self.trans_factors[i].fixed()", "def dataio_prepare(hparams, tokenizer):\n\n # 1. Define datasets\n data_folder = hparams[\"data_folder\"]\n\n train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder},\n )\n\n if hparams[\"sorting\"] == \"ascending\":\n # we sort training data to speed up training and get better results.\n train_data = train_data.filtered_sorted(\n sort_key=\"duration\",\n key_max_value={\"duration\": hparams[\"avoid_if_longer_than\"]},\n )\n # when sorting do not shuffle in dataloader ! otherwise is pointless\n hparams[\"dataloader_options\"][\"shuffle\"] = False\n\n elif hparams[\"sorting\"] == \"descending\":\n train_data = train_data.filtered_sorted(\n sort_key=\"duration\",\n reverse=True,\n key_max_value={\"duration\": hparams[\"avoid_if_longer_than\"]},\n )\n # when sorting do not shuffle in dataloader ! otherwise is pointless\n hparams[\"dataloader_options\"][\"shuffle\"] = False\n\n elif hparams[\"sorting\"] == \"random\":\n pass\n\n else:\n raise NotImplementedError(\n \"sorting must be random, ascending or descending\"\n )\n\n valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"valid_csv\"], replacements={\"data_root\": data_folder},\n )\n # We also sort the validation data so it is faster to validate\n valid_data = valid_data.filtered_sorted(sort_key=\"duration\")\n\n test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"test_csv\"], replacements={\"data_root\": data_folder},\n )\n\n # We also sort the validation data so it is faster to validate\n test_data = test_data.filtered_sorted(sort_key=\"duration\")\n\n datasets = [train_data, valid_data, test_data]\n\n # 2. Define audio pipeline:\n @sb.utils.data_pipeline.takes(\"wav\")\n @sb.utils.data_pipeline.provides(\"sig\")\n def audio_pipeline(wav):\n info = torchaudio.info(wav)\n sig = sb.dataio.dataio.read_audio(wav)\n resampled = torchaudio.transforms.Resample(\n info.sample_rate, hparams[\"sample_rate\"],\n )(sig)\n return resampled\n\n sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)\n\n # 3. Define text pipeline:\n @sb.utils.data_pipeline.takes(\"wrd\")\n @sb.utils.data_pipeline.provides(\n \"tokens_list\", \"tokens_bos\", \"tokens_eos\", \"tokens\"\n )\n def text_pipeline(wrd):\n tokens_list = tokenizer.sp.encode_as_ids(wrd)\n yield tokens_list\n tokens_bos = torch.LongTensor([hparams[\"bos_index\"]] + (tokens_list))\n yield tokens_bos\n tokens_eos = torch.LongTensor(tokens_list + [hparams[\"eos_index\"]])\n yield tokens_eos\n tokens = torch.LongTensor(tokens_list)\n yield tokens\n\n sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)\n\n # 4. Set output:\n sb.dataio.dataset.set_output_keys(\n datasets, [\"id\", \"sig\", \"tokens_bos\", \"tokens_eos\", \"tokens\"],\n )\n return train_data, valid_data, test_data", "def creates_data_loader():\n dataset_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=True)\n\n dataset_no_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=False)\n\n datasets_faces_split = train_val_test(dataset_faces, 0.2, 0.0)\n datasets_no_faces_split = train_val_test(dataset_no_faces, 0.2, 0.0)\n\n datasets = {}\n datasets[\"train\"] = datasets_faces_split[\"train\"] + \\\n datasets_no_faces_split[\"train\"]\n datasets[\"test\"] = datasets_no_faces_split[\"test\"]\n datasets[\"val\"] = datasets_faces_split[\"val\"] + \\\n datasets_no_faces_split[\"val\"]\n\n train_loader = DataLoader(dataset=datasets[\"train\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n\n val_loader = DataLoader(dataset=datasets[\"val\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n return train_loader, val_loader", "def learnDataset(self, data_loader):\n print(\"learning dataset\")\n\n count = 0\n for sample in data_loader:\n input_sentence = sample[\"input\"][0]\n target_sentence = sample[\"target\"][0]\n\n prev_word = None\n for word_idx in range(1, 16):\n target_word = int(target_sentence[word_idx])\n self.model_parts[word_idx - 1].populateFactors(\n input_sentence, target_word, prev_word\n )\n prev_word = target_word\n\n print(\"{}/127940\".format(count), end = \"\\r\")\n count += 1\n print(\"127940/127940\")\n\n print(\"before fixed\", list(self.model_parts[0].factors[0].d.keys())[:10])\n for i in range(15):\n self.model_parts[i].fixed()\n print(\"after fixed\", self.model_parts[0].factors[0].keys[:10])", "def train_dataloader(self):\r\n\r\n # transformation\r\n train_transform = Compose(\r\n [\r\n ApplyTransformToKey(\r\n key='video',\r\n transform=Compose(\r\n [\r\n UniformTemporalSubsample(8),\r\n Lambda(lambda x: x / 255.0),\r\n Normalize((0.45, 0.45, 0.45), (0.225, 0.225, 0.225)),\r\n RandomShortSideScale(min_size=256, max_size=320),\r\n RandomCrop(244),\r\n RandomHorizontalFlip(p=0.5),\r\n ]\r\n )\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = pv.data.Kinetics(\r\n data_path=os.path.join(self._DATA_PATH, \"train\"),\r\n clip_sampler=pv.data.make_clip_sampler(\"random\", self._CLIP_DURATION),\r\n decode_audio=False,\r\n transform=train_transform\r\n )\r\n return torch.utils.data.DataLoader(\r\n train_dataset,\r\n batch_size=self._BATCH_SIZE,\r\n num_workers=self._NUM_WORKERS,\r\n )", "def prepare_data(self,d):\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n #CONVERT TO FLOAT32\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))\n train = data_utils.TensorDataset(features,target)\n train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))\n uset = data_utils.TensorDataset(features,target)\n unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n \n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters", "def get_dataloaders(self,\n dataset_locations_dict,\n batch_size=32,\n test_only=False):\n # if test_only:\n # test_dataset = TweetSentimentDataset(csv_path=dataset_locations_dict[\"TEST\"],\n # transform=None,\n # freq_threshold=5,\n # vocab_file=dataset_locations_dict[\"VOCAB\"],\n # create_vocab=False)\n # return get_dataloader(test_dataset,\n # test_dataset.vocab,\n # batch_size=1,shuffle=False,num_workers=0,\n # add_collate_fn=True)\n \n train_val_dataset = TweetSentimentDataset(csv_path=dataset_locations_dict[\"TRAIN_TEST\"],\n transform=None,\n freq_threshold=5,\n vocab_file=dataset_locations_dict[\"VOCAB\"],\n create_vocab=False)\n \n # test_dataset = TweetSentimentDataset(csv_path=dataset_locations_dict[\"TEST\"],\n # transform=None,\n # freq_threshold=5,\n # vocab_file=dataset_locations_dict[\"VOCAB\"],\n # create_vocab=False)\n \n train_ds_len = int(0.9*len(train_val_dataset))\n \n val_ds_len = int(0.05*len(train_val_dataset))\n \n test_ds_len = len(train_val_dataset)-train_ds_len-val_ds_len\n \n train_dataset,val_dataset,test_dataset = random_split(train_val_dataset,\n lengths=[train_ds_len,val_ds_len,test_ds_len],\n generator=torch.Generator().manual_seed(seed))\n \n train_dataloader = get_dataloader(train_dataset,\n train_val_dataset.vocab,\n batch_size=batch_size,shuffle=True,num_workers=0,\n add_collate_fn=True)\n val_dataloader = get_dataloader(val_dataset,\n train_val_dataset.vocab,\n batch_size=batch_size,shuffle=False,num_workers=0,\n add_collate_fn=True)\n test_dataloader = get_dataloader(test_dataset,\n train_val_dataset.vocab,\n batch_size=batch_size,shuffle=False,num_workers=0,\n add_collate_fn=True)\n \n # test_dataset.df.to_csv('sentiment_analysis_test_dataset_4990.csv')\n print(f\"Training Dataset size : {len(train_dataset)}\\n\")\n print(f\"Validation Dataset size : {len(val_dataset)}\\n\")\n print(f\"Test Dataset size : {len(test_dataset)}\\n\")\n \n if test_only:\n return test_dataloader\n return train_dataloader,val_dataloader,test_dataloader", "def get_dataloader(dataset, datadir, train_bs, test_bs, dataidxs=None, noise_level=0, net_id=None, total=0):\n if dataset in ('mnist', 'femnist', 'fmnist', 'cifar10','cifar100', 'svhn', 'generated', 'covtype', 'a9a', 'rcv1', 'SUSY','tinyimagenet'):\n if dataset == 'mnist':\n dl_obj = MNIST_truncated\n\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n elif dataset == 'femnist':\n dl_obj = FEMNIST\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n elif dataset == 'fmnist':\n dl_obj = FashionMNIST_truncated\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n elif dataset == 'svhn':\n dl_obj = SVHN_custom\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n\n elif dataset == 'cifar10':\n dl_obj = CIFAR10_truncated\n\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n transforms.Lambda(lambda x: F.pad(\n Variable(x.unsqueeze(0), requires_grad=False),\n (4, 4, 4, 4), mode='reflect').data.squeeze()),\n transforms.ToPILImage(),\n transforms.RandomCrop(32),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)\n ])\n # data prep for test set\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n \n elif dataset == 'cifar100':\n dl_obj = CIFAR100_truncated\n\n normalize = transforms.Normalize(mean=[0.5070751592371323, 0.48654887331495095, 0.4409178433670343],\n std=[0.2673342858792401, 0.2564384629170883, 0.27615047132568404])\n # transform_train = transforms.Compose([\n # transforms.RandomCrop(32),\n # transforms.RandomHorizontalFlip(),\n # transforms.ToTensor(),\n # normalize\n # ])\n transform_train = transforms.Compose([\n # transforms.ToPILImage(),\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(15),\n transforms.ToTensor(),\n normalize\n ])\n # data prep for test set\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n normalize])\n\n elif dataset == 'tinyimagenet': \n # random_ids = np.random.randint(1000, size=datasize)\n # train_indices = random_ids\n\n imagenet_mean = [0.485, 0.456, 0.406]\n imagenet_std = [0.229, 0.224, 0.225]\n\n train_dl = torch.utils.data.DataLoader(\n torchvision.datasets.ImageFolder(datadir +\"/train\",\n transform=transforms.Compose([\n transforms.Resize(32), \n transforms.ToTensor(),\n # Phuong 09/26 change (mean, std) -> same as pretrained imagenet\n transforms.Normalize(mean=imagenet_mean, std=imagenet_std)])),\n #Phuong 09/26 drop_last=False -> True\n batch_size=train_bs, drop_last=True)\n \n test_dl = torch.utils.data.DataLoader(\n torchvision.datasets.ImageFolder(datadir +\"/test\",\n transform=transforms.Compose([\n transforms.Resize(32), \n transforms.ToTensor(),\n # Phuong 09/26 change (mean, std) -> same as pretrained imagenet\n transforms.Normalize(mean=imagenet_mean, std=imagenet_std)])),\n #Phuong 09/26 drop_last=False -> True\n batch_size=test_bs, drop_last=True)\n\n return train_dl, test_dl, None, None\n\n\n else:\n dl_obj = Generated\n transform_train = None\n transform_test = None\n\n\n train_ds = dl_obj(datadir, dataidxs=dataidxs, train=True, transform=transform_train, download=True)\n test_ds = dl_obj(datadir, train=False, transform=transform_test, download=True)\n\n train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last= dataset in ['cifar100'])\n test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=False)\n\n return train_dl, test_dl, train_ds, test_ds", "def trainer(model, train_dataloader, val_dataloader, num_epochs, path_to_save='/home/atharva',\n checkpoint_path='/home/atharva',\n checkpoint=100, train_batch=1, test_batch=1, device='cuda:0'): # 2 Marks.\n #torch.backends.cudnn.benchmark = True #Comment this if you are not using a GPU...\n # set the network to training mode.\n model.cuda() # if gpu available otherwise comment this line. \n # your code goes here. \n def accuracy(y1,y2):\n aa = list((y1==y2).astype('int'))\n acc = sum(aa) / len(aa)\n del aa\n return acc\n training_acc = []\n training_loss = []\n val_acc = []\n val_loss = []\n\n #Train the model on the train_dataloader.\n from torch.nn import CrossEntropyLoss\n criterion = CrossEntropyLoss()\n for epoch in range(num_epochs): # loop over the dataset multiple times\n preds = []\n labels = []\n \n for i in range(len(train_dataloader)):\n # get the inputs; data is a list of [inputs, labels]\n data_dict = train_dataloader[i]\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n output = model(data_dict['statement'], data_dict['justification'], data_dict['credit_history'])\n loss = criterion(outputs, data_dict['label'])\n loss.backward()\n optimizer.step()\n preds.append(output)\n labels.append(data_dict['label'])\n\n #Calculate the metrics, that is the loss and accuracy for the training phase per epoch and store them in a list.\n training_acc.append(accuracy(preds.numpy(), labels.numpy()))\n training_loss.append(criterion(preds, label))\n\n #Validating\n preds = []\n labels = []\n for i in range(len(val_dataloader)):\n # get the inputs; data is a list of [inputs, labels]\n data_dict = val_dataloader[i]\n\n # forward + backward + optimize\n outputs = model(data_dict['statement'], data_dict['justification'], data_dict['credit_history'])\n loss = criterion(outputs, labels)\n preds.append(output)\n labels.append(data_dict['label'])\n\n val_acc.append(accuracy(preds.numpy(), labels.numpy()))\n val_loss.append(criterion(preds, label))\n\n #Save your model at the maximum validation accuracy obtained till the latest epoch.\n if val_acc[-1] > max(val_acc[:-1]):\n #Save model\n torch.save(model.state_dict(), save_path)\n\n #Checkpoint at the 100th epoch\n if epoch%100 == 0:\n #make a checkpoint\n torch.save(model.state_dict(), save_path)\n\n\n\n \n\n\n plt.plot(training_acc)\n plt.plot(val_acc)\n plt.plot(training_loss)\n plt.plot(val_loss)\n plt.show()", "def train(self, training_data):\n pass", "def get_loaders(train_dataset, val_dataset, test_dataset, batch_size=128):\n train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=8,\n shuffle=True)\n\n val_loader = DataLoader(val_dataset, batch_size=batch_size, num_workers=8,\n shuffle=False)\n\n test_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=8,\n shuffle=False)\n\n return train_loader, val_loader, test_loader", "def trainer(model,\n optimizer,\n dataset,\n count_of_epoch=5,\n batch_size=64,\n callback=None,\n progress=None):\n iterations = range(count_of_epoch)\n\n if progress is not None:\n iterations = progress(iterations)\n\n for it in iterations:\n\n batch_generator = DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True)\n\n train_epoch(\n \tmodel=model,\n train_generator=batch_generator,\n optimizer=optimizer,\n callback=callback)\n\n return", "def run_step(self):\n assert self.model.training, \"[SimpleTrainer] model was changed to eval mode!\"\n start = time.perf_counter()\n \"\"\"\n If your want to do something with the data, you can wrap the dataloader.\n \"\"\"\n data = next(self._data_loader_iter)\n data_time = time.perf_counter() - start\n\n \"\"\"\n If your want to do something with the losses, you can wrap the model.\n \"\"\"\n loss_dict = self.model(data)\n losses = sum(loss for loss in loss_dict.values())\n self._detect_anomaly(losses, loss_dict)\n\n metrics_dict = loss_dict\n metrics_dict[\"data_time\"] = data_time\n self._write_metrics(metrics_dict)\n \n validation_data = next(self.validation_data_loader_iter)\n val_losses_dict = self.model(validation_data)\n val_losses = sum(loss for loss in val_losses_dict.values())\n self._detect_anomaly(val_losses, val_losses_dict)\n\n val_metrics_dict = val_losses_dict\n val_metrics_dict[\"data_time\"] = data_time\n self._write_validation_metrics(val_metrics_dict)\n\n \"\"\"\n If you need accumulate gradients or something similar, you can\n wrap the optimizer with your custom `zero_grad()` method.\n \"\"\"\n self.optimizer.zero_grad()\n losses.backward()\n\n \"\"\"\n If you need gradient clipping/scaling or other processing, you can\n wrap the optimizer with your custom `step()` method.\n \"\"\"\n self.optimizer.step()", "def _init_train_loader(self):\n # Choose the right dataset type\n if self.config_args[\"num_members\"] > 1:\n class_dataset_wrapper = dataset_wrapper.MixMoDataset\n else:\n class_dataset_wrapper = dataset_wrapper.MSDADataset\n\n # Load augmentations\n self.traindatasetwrapper = class_dataset_wrapper(\n dataset=self.train_dataset,\n num_classes=int(self.config_args[\"data\"][\"num_classes\"]),\n num_members=self.config_args[\"num_members\"],\n dict_config=self.config_args[\"training\"][\"dataset_wrapper\"],\n properties=self.properties\n )\n\n # Build standard sampler\n _train_sampler = torch.utils.data.sampler.RandomSampler(\n data_source=self.traindatasetwrapper, ## only needed for its length\n num_samples=None,\n replacement=False,\n )\n\n # Wrap it with the repeating sampler used for multi-input models\n batch_sampler = batch_repetition_sampler.BatchRepetitionSampler(\n sampler=_train_sampler,\n batch_size=self.batch_size,\n num_members=self.config_args[\"num_members\"],\n drop_last=True,\n config_batch_sampler=self.config_args[\"training\"][\"batch_sampler\"]\n )\n\n self.train_loader = torch.utils.data.DataLoader(\n self.traindatasetwrapper,\n batch_sampler=batch_sampler,\n num_workers=self.num_workers,\n batch_size=1,\n shuffle=False,\n sampler=None,\n drop_last=False,\n pin_memory=True,\n )", "def val_dataloader(self) -> data.DataLoader:\n return data.DataLoader(dataset=self.datasets['valid'], batch_size=self.batch_size,\n num_workers=self.num_workers, shuffle=False, pin_memory=False)", "def build_dataloaders(dataset, batch_size, train_test_split=0.1, train_shuffle=True, eval_shuffle=True):\n # 데이터셋 길이\n dataset_len = len(dataset)\n\n # 학습, 평가 데이터 나누기\n eval_len = int(dataset_len * train_test_split)\n train_len = dataset_len - eval_len\n\n train_dataset, eval_dataset = random_split(dataset, (train_len, eval_len))\n\n train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=train_shuffle)\n eval_loader = DataLoader(eval_dataset, batch_size=batch_size, shuffle=eval_shuffle)\n\n\n logging.info(f'''train_dataloader size: {len(train_loader.dataset)} | shuffle: {train_shuffle}\n eval_dataloader size: {len(eval_loader.dataset)} | shuffle: {eval_shuffle}''')\n\n return train_loader, eval_loader", "def train(self,\n dataloader_train: DataLoader,\n dataloader_val: Optional[DataLoader] = None\n ) -> Tuple[List[float], List[float]]:\n loss_tr_epochs = []\n loss_val_epochs = []\n f1_best = .0\n lrs = []\n self.model.to(self.device)\n\n for epoch in range(self.n_epochs):\n tr_loss_mean = .0\n tr_loss_cum = .0\n step = -1\n\n # Training\n # -----------------------------\n self.model.train()\n self.model.zero_grad()\n for i, batch in enumerate(dataloader_train):\n # Estimate gradients and accumulate them\n tr_loss = self._estimate_gradients(batch)\n tr_loss_cum += tr_loss\n\n # Update params every acumulated steps\n if (i + 1) % self.accumulate_grad_every == 0:\n self._update_network_params()\n if self.scheduler is not None:\n lrs.append(self.scheduler.get_last_lr()[0])\n step += 1\n else:\n continue\n\n if step % self.print_every == 0:\n tr_loss_mean = tr_loss_cum/(i+1)\n print(f\"- Epoch: {epoch}/{self.n_epochs - 1}\",\n f\"- Step: {step:3}/{(len(dataloader_train)// self.accumulate_grad_every) - 1}\",\n f\"- Training Loss: {tr_loss_mean:.6f}\")\n\n loss_tr_epochs.append(tr_loss_mean)\n print(f\"- Epoch: {epoch}/{self.n_epochs - 1} - Training Loss: {tr_loss_mean}\")\n\n # Plot training curve\n plt.plot(loss_tr_epochs)\n plt.xlabel('#Epochs')\n plt.ylabel('Error')\n plt.legend(['training'])\n\n # Validation\n # -----------------------------\n if dataloader_val is not None:\n val_loss, f1, report_ent, report_toks = self.evaluate(dataloader_val,\n epoch=epoch,\n verbose=True)\n loss_val_epochs.append(val_loss)\n\n if f1 > f1_best:\n f1_best = f1\n self._save_model()\n self._write_report_to_file(report_ent, report_toks, epoch,\n tr_loss_mean, val_loss)\n\n # Plot val curve\n plt.plot(loss_val_epochs)\n plt.legend(['training', 'validation'])\n\n plt.tight_layout()\n plt.savefig(os.path.join(self.output_dir, 'error_curves.jpg'))\n plt.close()\n\n # Plot learning rate curve\n plt.plot(lrs)\n plt.xlabel('#Batches')\n plt.ylabel('Learning rate')\n plt.tight_layout()\n plt.savefig(os.path.join(self.output_dir, 'learning_rate.jpg'))\n plt.close()\n return loss_tr_epochs, loss_val_epochs", "def dataio_prepare(hparams):\n data_folder = hparams[\"data_folder\"]\n\n train_data = dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"train_data\"],\n replacements={\"data_root\": data_folder}, )\n\n if hparams[\"sorting\"] == \"ascending\":\n # we sort training data to speed up training and get better results.\n train_data = train_data.filtered_sorted(sort_key=\"duration\")\n # when sorting do not shuffle in dataloader ! otherwise is pointless\n hparams[\"train_dataloader_opts\"][\"shuffle\"] = False\n\n elif hparams[\"sorting\"] == \"descending\":\n train_data = train_data.filtered_sorted(\n sort_key=\"duration\", reverse=True)\n # when sorting do not shuffle in dataloader ! otherwise is pointless\n hparams[\"train_dataloader_opts\"][\"shuffle\"] = False\n\n elif hparams[\"sorting\"] == \"random\":\n pass\n\n else:\n raise NotImplementedError(\n \"sorting must be random, ascending or descending\")\n\n valid_data = dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"valid_data\"],\n replacements={\"data_root\": data_folder}, )\n valid_data = valid_data.filtered_sorted(sort_key=\"duration\")\n\n test_data = dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"test_data\"],\n replacements={\"data_root\": data_folder}, )\n test_data = test_data.filtered_sorted(sort_key=\"duration\")\n\n datasets = [train_data, valid_data, test_data]\n\n # Defining tokenizer and loading it\n tokenizer = transformers.BertTokenizer.from_pretrained('bert-base-chinese')\n\n # 2. Define audio pipeline:\n @data_pipeline.takes(\"wav\")\n @data_pipeline.provides(\"sig\")\n def audio_pipeline(wav):\n sig = dataio.read_audio(wav)\n return sig\n\n dataset.add_dynamic_item(datasets, audio_pipeline)\n\n # 3. Define text pipeline:\n @data_pipeline.takes(\"transcript\")\n @data_pipeline.provides(\"wrd\", \"tokens_list\", \"tokens\")\n def text_pipeline(wrd):\n wrd = \"\".join(wrd.split(\" \"))\n yield wrd\n tokens_list = tokenizer(wrd)[\"input_ids\"]\n yield tokens_list\n tokens = numpy.array(tokens_list, dtype=\"int64\")\n yield tokens\n\n dataset.add_dynamic_item(datasets, text_pipeline)\n\n # 4. Set output:\n dataset.set_output_keys(\n datasets,\n [\"id\", \"sig\", \"wrd\", \"tokens\"], )\n\n # 5. If Dynamic Batching is used, we instantiate the needed samplers.\n train_batch_sampler = None\n valid_batch_sampler = None\n if hparams[\"dynamic_batching\"]:\n from sampler import DynamicBatchSampler # noqa\n\n dynamic_hparams = hparams[\"dynamic_batch_sampler\"]\n num_buckets = dynamic_hparams[\"num_buckets\"]\n\n train_batch_sampler = DynamicBatchSampler(\n train_data,\n dynamic_hparams[\"max_batch_len\"],\n num_buckets=num_buckets,\n length_func=lambda x: x[\"duration\"],\n shuffle=dynamic_hparams[\"shuffle_ex\"],\n batch_ordering=dynamic_hparams[\"batch_ordering\"], )\n\n valid_batch_sampler = DynamicBatchSampler(\n valid_data,\n dynamic_hparams[\"max_batch_len\"],\n num_buckets=num_buckets,\n length_func=lambda x: x[\"duration\"],\n shuffle=dynamic_hparams[\"shuffle_ex\"],\n batch_ordering=dynamic_hparams[\"batch_ordering\"], )\n\n return (train_data, valid_data, test_data, tokenizer, train_batch_sampler,\n valid_batch_sampler, )", "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def fetch_dataloader(types, data_dir, hyper_params, train_idx=None, val_idx=None):\n dataloaders = {}\n \n # TODO: write this to hyper_params, make hyper_params an out variable? then save? yes, AND: when ONLY test is requested, load from hyperparams!\n # TODO: also, add 3rd variation of types: for testing, only read it from hyper_params (DO I NEED TO READ HYPER_PARAMS FOR JUST TESTING?)\n if train_idx is not None:\n mean, std = mean_std_calc(DataLoader(Subset(Heart2DSegmentationDataset(str(Path(data_dir) / \"train_heart_scans\"), hyper_params.endo_or_epi), train_idx)))\n hyper_params.mean = mean.item()\n hyper_params.std = std.item()\n else:\n if 'train' in types:\n mean, std = mean_std_calc(DataLoader(Heart2DSegmentationDataset(str(Path(data_dir) / \"train_heart_scans\"), hyper_params.endo_or_epi)))\n hyper_params.mean = mean.item()\n hyper_params.std = std.item()\n else:\n mean, std = torch.tensor(hyper_params.mean), torch.tensor(hyper_params.std)\n \n print(\"Mean: {}, Std: {}\".format(mean.item(), std.item()))\n # borrowed from http://pytorch.org/tutorials/advanced/neural_style_tutorial.html\n # and http://pytorch.org/tutorials/beginner/data_loading_tutorial.html\n train_transformer = transforms.Compose([\n transforms.Normalize(mean=[mean.item()], std=[std.item()])\n ])\n \n eval_transformer = transforms.Compose([\n transforms.Normalize(mean=[mean.item()], std=[std.item()])\n ])\n\n for split in ['train', 'val', 'test']:\n if split in types:\n path = str(Path(data_dir) / \"{}_heart_scans\".format(split if split != 'val' else 'train'))\n\n if split == 'train':\n if train_idx is not None:\n dl = DataLoader(Subset(Heart2DSegmentationDataset(path, hyper_params.endo_or_epi, train_transformer), train_idx), \n batch_size=hyper_params.batch_size, \n shuffle=True,\n num_workers=hyper_params.num_workers,\n pin_memory=hyper_params.cuda)\n else:\n dl = DataLoader(Heart2DSegmentationDataset(path, hyper_params.endo_or_epi, train_transformer), \n batch_size=hyper_params.batch_size, \n shuffle=True,\n num_workers=hyper_params.num_workers,\n pin_memory=hyper_params.cuda)\n else:\n if (split == 'val') and (val_idx is not None): \n dl = DataLoader(Subset(Heart2DSegmentationDataset(path, hyper_params.endo_or_epi, eval_transformer), val_idx), \n batch_size=hyper_params.batch_size, \n shuffle=False,\n num_workers=hyper_params.num_workers,\n pin_memory=hyper_params.cuda) \n else:\n dl = DataLoader(Heart2DSegmentationDataset(path, hyper_params.endo_or_epi, eval_transformer), \n batch_size=hyper_params.batch_size, \n shuffle=False,\n num_workers=hyper_params.num_workers,\n pin_memory=hyper_params.cuda)\n\n dataloaders[split] = dl\n\n return dataloaders", "def get_each_loader(data_path, batch_size, trn_negnum, shuffle=True, num_workers=0):\n \n dataset = ML_Dataset(data_path, trn_negnum)\n \n if data_path.endswith('trn') == True:\n collate = dataset.train_collate\n else:\n collate = test_collate\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate)\n\n return data_loader", "def get_loaders(opt):\n train_samples, val_samples = get_train_val_metadata(opt.dataset_dir,\n opt.validation_cities,\n opt.patch_size,\n opt.stride)\n print('train samples : ', len(train_samples))\n print('val samples : ', len(val_samples))\n\n logging.info('STARTING Dataset Creation')\n\n full_load = full_onera_loader(opt.dataset_dir, opt)\n\n train_dataset = OneraPreloader(opt.dataset_dir,\n train_samples,\n full_load,\n opt.patch_size,\n opt.augmentation)\n val_dataset = OneraPreloader(opt.dataset_dir,\n val_samples,\n full_load,\n opt.patch_size,\n False)\n\n logging.info('STARTING Dataloading')\n\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=opt.batch_size,\n shuffle=True,\n num_workers=opt.num_workers)\n val_loader = torch.utils.data.DataLoader(val_dataset,\n batch_size=opt.batch_size,\n shuffle=False,\n num_workers=opt.num_workers)\n return train_loader, val_loader", "def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=True,\n num_workers=multiprocessing.cpu_count(),\n )", "def __init__(self, dataloader):\n self._dataloader = dataloader\n\n self._iterator = iter(self._dataloader)", "def on_epoch_end(self):\n if self.empty():\n print(\"WARNING: {}: dataloaders are not loaded yet, cannot process data\".format(__file__))\n\n self.dl_idx = range(len(self.dataloaders))\n self.dl_data_idx = []\n for idx in self.dl_idx:\n # Extract data indices\n data_idx_list = self.__get_data_idx(idx)\n if self.shuffle: # Shuffle dataloader data list\n np.random.shuffle(data_idx_list)\n # Store Data indices\n self.dl_data_idx.append(data_idx_list)\n\n if self.shuffle: # Shuffle dataloader list\n np.random.shuffle(self.dl_idx)\n\n if self.balance_data:\n self.__balance_generator_data()", "def _tokenize_data(train_dataset, dev_dataset, use_spacy, pool):\n tokenizer = SQuADDataTokenizer(use_spacy)\n\n tic = time.time()\n print('Train examples [{}] transformation started.'.format(len(train_dataset)))\n train_examples = list(tqdm.tqdm(tokenizer.run_async(pool, train_dataset),\n total=len(train_dataset)))\n print('Train examples transformed [{}/{}] in {:.3f} sec'.format(len(train_examples),\n len(train_dataset),\n time.time() - tic))\n tic = time.time()\n print('Dev examples [{}] transformation started.'.format(len(dev_dataset)))\n dev_examples = list(tqdm.tqdm(tokenizer.run_async(pool, dev_dataset),\n total=len(dev_dataset)))\n print('Dev examples transformed [{}/{}] in {:.3f} sec'.format(len(dev_examples),\n len(dev_dataset),\n time.time() - tic))\n return train_examples, dev_examples", "def load_data(batch_size=batch_size):\n trainset = LibriSpeechDataset(training_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds))\n testset = LibriSpeechDataset(validation_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds), stochastic=False)\n\n train_loader = DataLoader(trainset, batch_size=batch_size, num_workers=1, shuffle=True, drop_last=True)\n test_loader = DataLoader(testset, batch_size=1, num_workers=1, drop_last=True)\n\n return train_loader, test_loader", "def data_loaders(args):\n\n transform = transforms.Compose([\n transforms.Resize(64),\n transforms.ToTensor(),\n lambda image: (image - 0.5) * 2\n ])\n\n train_mnist = datasets.MNIST(\n root=args.database_root,\n train=True,\n download=True,\n transform=transform\n )\n train_loader = DataLoader(\n dataset=train_mnist,\n batch_size=args.train_batch_size,\n shuffle=True,\n num_workers=1,\n pin_memory=True\n )\n\n test_mnist = datasets.MNIST(\n root=args.database_root,\n train=False,\n download=True,\n transform=transform\n )\n test_loader = DataLoader(\n dataset=test_mnist,\n batch_size=args.test_batch_size,\n shuffle=True,\n num_workers=1,\n pin_memory=True\n )\n\n return train_loader, test_loader", "def train(self, data_dict, label_dict):\n loaders = self.init_loaders(data_dict, label_dict)\n best_performance = 1e18\n loss_dict = self.init_loss_dict()\n performance_dict = self.init_performance_dict()\n\n for epoch in range(self.config_dict[\"num_epochs\"]):\n print(\"Epoch {}/{}\".format(epoch, self.config_dict[\"num_epochs\"] - 1))\n print(\"-\" * 10)\n\n if self.scheduler is not None:\n self.scheduler.step()\n\n for phase in [\"train\", \"val\"]:\n self.model.train(phase == \"train\")\n running_loss_dict = self.init_running_loss_dict(\n list(loss_dict[phase].keys())\n )\n output_dict = self.init_output_dict()\n i = 0\n for the_data in loaders[phase]:\n i += 1\n batch_loss_dict = {}\n inputs, labels = self.transform_batch(the_data)\n\n # zero parameter gradients\n self.optimizer.zero_grad()\n\n # forward\n outputs = self.model(inputs)\n\n output_dict = self.update_output_dict(output_dict, outputs, labels)\n\n batch_loss_dict[\"loss\"] = self.criterion(outputs, labels)\n if phase == \"train\":\n batch_loss_dict[\"loss\"].backward()\n self.optimizer.step()\n\n for key in batch_loss_dict.keys():\n running_loss_dict[key] += batch_loss_dict[key].item()\n\n # Compute epoch losses and update loss dict\n epoch_loss_dict = {\n key: running_loss_dict[key] / i for key in running_loss_dict.keys()\n }\n loss_dict[phase] = self.update_metric_dict(\n loss_dict[phase], epoch_loss_dict\n )\n\n # Compute epoch performance and update performance dict\n epoch_statistics = self.compute_epoch_statistics(output_dict)\n performance_dict[phase] = self.update_metric_dict(\n performance_dict[phase], epoch_statistics\n )\n\n print(\"Phase: {}:\".format(phase))\n self.print_metric_dict(epoch_loss_dict)\n self.print_metric_dict(epoch_statistics)\n\n if phase == \"val\":\n best_model_condition = epoch_loss_dict[\"loss\"] < best_performance\n if best_model_condition:\n print(\"Best model updated\")\n best_performance = epoch_loss_dict[\"loss\"]\n best_model_wts = copy.deepcopy(self.model.state_dict())\n\n print(\"Best val performance: {:4f}\".format(best_performance))\n self.model.load_state_dict(best_model_wts)\n result_dict = {\n phase: {**performance_dict[phase], **loss_dict[phase]}\n for phase in performance_dict.keys()\n }\n return result_dict", "def train(self, args_hpo, index):\n\n PrintColors.prYellow(f'\\n===== training with: {args_hpo} index={index}')\n PrintColors.prGreen(f'---- in mode: {self.configurations.execution_mode}, tag: {self.configurations.tag} ----')\n ''' ============ LOAD DATA ================================================================================ '''\n starting_time = time.time()\n dataset_reader = self.dataset_reader(\n candidate_types=self.configurations.clustering_mode,\n word_indexer={'words': PretrainedTransformerIndexer(self.configurations.pretrained_model_name)},\n mode=self.configurations.loading_mode)\n ''' .read returns list of instances '''\n train_data, val_data, test_data = (dataset_reader.read(folder) for folder in\n [self.configurations.train_data_dir,\n self.configurations.val_data_dir,\n self.configurations.test_data_dir])\n\n # count state pairs\n preceeds = dict()\n\n for instance in train_data:\n for ind in range(len(instance.fields['squeezed_labels'].tokens) - 1):\n [event_1, event_2] = [instance.fields['squeezed_labels'].tokens[i].text for i in [ind, ind + 1]]\n scenario = self.dataset_reader.scenario_of_label(event_1)\n if scenario not in preceeds:\n preceeds[scenario] = dict()\n if (event_1, event_2) not in preceeds[scenario]:\n preceeds[scenario][(event_1, event_2)] = 0\n preceeds[scenario][(event_1, event_2)] += 1\n\n pretrained_tokenizer = PretrainedTransformerTokenizer(self.configurations.pretrained_model_name)\n supply_token_indices(train_data + val_data, 'story', pretrained_tokenizer)\n\n ''' build vocabulary and associate it with datasets '''\n vocabulary = Vocabulary.from_instances(train_data + val_data)\n train_data.index_with(vocabulary), val_data.index_with(vocabulary)\n\n train_data_loader = DataLoader(dataset=train_data, batch_size=args_hpo.batch_size)\n val_data_loader = DataLoader(dataset=val_data, batch_size=args_hpo.batch_size)\n\n ''' ============ DEFINE MODEL ============================================================================= '''\n ''' i keep .to() here instead of in model.__init__() to accomadate better abstraction '''\n event_labels = [i for i in range(vocabulary.get_vocab_size('scr_labels'))\n if '#' in vocabulary.get_token_from_index(i, 'scr_labels')]\n participant_labels = [i for i in range(vocabulary.get_vocab_size('scr_labels'))\n if '@' in vocabulary.get_token_from_index(i, 'scr_labels')]\n model = self.model(args_hpo, vocabulary, configurations=self.configurations,\n preceeds=preceeds,\n event_indices=event_labels,\n participant_indices=participant_labels).to(self.configurations.device)\n\n ''' ============ DEFINE TRAINER =========================================================================== '''\n ''' -- serialization --------------------------------------------------- '''\n if not os.path.exists(os.path.join(*['.', 'models'])):\n os.mkdir(os.path.join(*['.', 'models']))\n if index == 0:\n for file in os.listdir(os.path.join(*['.', 'models'])):\n path = os.path.join(*['.', 'models', file])\n if os.path.isfile(path):\n os.remove(path)\n else:\n shutil.rmtree(path)\n serialization_path = 'models_{}_{}'.format(self.configurations.tag, index)\n serialization_path_longer = os.path.join(*['.', 'models', serialization_path])\n vocab_path = 'vocab_{}_{}'.format(self.configurations.tag, index)\n vocab_dir_longer = os.path.join(*['.', 'models', vocab_path])\n if not os.path.exists(serialization_path_longer):\n os.mkdir(serialization_path_longer)\n model_checkpointer = Checkpointer(serialization_dir=serialization_path_longer, num_serialized_models_to_keep=1)\n ''' -- logging ---------------------------------------------------------- '''\n tensorboard_writer = TensorboardWriter(serialization_dir='tensorboard', summary_interval=1)\n if index == 0:\n shutil.rmtree(os.path.join(*['.', 'tensorboard', 'log']))\n\n optimizer = torch.optim.Adam(model.parameters(), lr=args_hpo.lr, weight_decay=args_hpo.l2)\n trainer = GradientDescentTrainer(\n model=model,\n optimizer=optimizer,\n data_loader=train_data_loader,\n validation_data_loader=val_data_loader,\n # note: this is the metric for early stopping\n validation_metric='-loss',\n patience=self.configurations.patience,\n num_epochs=self.configurations.max_epochs,\n serialization_dir=serialization_path_longer,\n checkpointer=model_checkpointer,\n cuda_device=self.configurations.device,\n grad_norm=args_hpo.clip,\n tensorboard_writer=tensorboard_writer,\n learning_rate_scheduler=ReduceOnPlateauLearningRateScheduler(optimizer=optimizer)\n )\n\n ''' trainer saves the model, but the vocabulary needs to be saved, too '''\n vocabulary.save_to_files(vocab_dir_longer)\n\n ''' check the metric names to synchronize with the class '''\n metrics = trainer.train()\n test_metrics = model.test(test_data=test_data, dataset_reader=dataset_reader)\n metrics.update(test_metrics)\n metrics['time_consumed(hrs)'] = round((time.time() - starting_time) / 3600, 4)\n\n return metrics", "def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test", "def train_epoch(self, data_loader):\n raise NotImplementedError", "def test_dataloader(self) -> DataLoader:\n return DataLoader(\n self.test_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=False,\n num_workers=multiprocessing.cpu_count(),\n )", "def init_dataset(validation_dataset_name):\n transform = transforms.Compose([transforms.ToPILImage(),transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n \n if validation_dataset_name == 'datasetRAP':\n # validation = 8317 images = 166 batches of 50 images + 1 batch of 17 images\n dataset_valid = loader_rapdataset_yiqiang.RAPDataset(0,False,'/storage/Datasets/Rap-PedestrianAttributeRecognition/',transform)\n labels = loader_rapdataset_yiqiang.ATTRIBUTES\n datset_attr_nbr = 92\n elif validation_dataset_name == 'datasetPETA':\n dataset_valid = loader_peta_dataset.PETADataset(False, '/storage/Datasets/PETA-PEdesTrianAttribute', transform)\n labels = loader_peta_dataset.ATTRIBUTES\n datset_attr_nbr = 104\n elif validation_dataset_name == 'datasetRAPPETA':\n dataset_valid = loader_rap_plus_peta_dataset.RAPPlusPETADataset(False, '/storage/Datasets/Rap-PedestrianAttributeRecognition/', '/storage/Datasets/PETA-PEdesTrianAttribute', transform)\n labels = [peta_label for rap_label,peta_label in loader_rap_plus_peta_dataset.ATTRIBUTES]\n datset_attr_nbr = 49\n\n print (\"Dataset valid size :\", dataset_valid.__len__())\n print (\"Dataset Attributes number :\", datset_attr_nbr)\n assert (len(labels) == datset_attr_nbr)\n\n dataloader_valid = DataLoader(dataset_valid, batch_size=Param_Batchsize, shuffle=True, num_workers=Param_Nb_Workers)\n\n return dataloader_valid, dataset_valid", "def prepare(self):\n bcolz.set_nthreads(2)\n\n # step 0: load only when not loaded yet\n if TRAINING in self.data and VALIDATION in self.data: return\n\n # step 1: load the file names\n patients = sorted(glob.glob(self.location+'/*.*/'))\n print len(patients), \"patients\"\n\n # step 1: load the file names\n # make a stratified validation set\n # note, the seed decides the validation set, but it is deterministic in the names\n random.seed(317070)\n patient_names = [self.patient_name_from_file_name(f) for f in patients]\n validation_patients = random.sample(patient_names, int(VALIDATION_SET_SIZE*len(patient_names)))\n\n labels_as_dict = defaultdict(list)\n\n with open(paths.LUNA_LABELS_PATH, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n next(reader) # skip the header\n for row in reader:\n label = (float(row[1]), float(row[2]), float(row[3]), float(row[4]))\n labels_as_dict[str(row[0])].append(label)\n\n # make the static data empty\n for s in self.datasets:\n self.data[s] = []\n self.labels[s] = []\n self.names[s] = []\n self.spacings[s] = []\n self.origins[s] = []\n\n with gzip.open(paths.INTERMEDIATE_DATA_PATH + 'spacings.pkl.gz') as f:\n spacings = cPickle.load(f)\n\n with gzip.open(paths.INTERMEDIATE_DATA_PATH + 'origins.pkl.gz') as f:\n origins = cPickle.load(f)\n\n # load the filenames and put into the right dataset\n for i, patient_folder in enumerate(patients):\n patient_id = str(patient_folder.split(path.sep)[-2])\n if patient_id in validation_patients:\n dataset = VALIDATION\n else:\n dataset = TRAIN\n\n\n label = labels_as_dict[patient_id]\n if self.only_positive and not label:\n continue\n\n self.data[dataset].append(patient_folder)\n self.labels[dataset].append(label)\n self.names[dataset].append(patient_id)\n self.spacings[dataset].append(spacings[patient_id])\n self.origins[dataset].append(origins[patient_id])\n\n # give every patient a unique number\n last_index = -1\n for set in self.datasets:\n self.indices[set] = range(last_index+1,last_index+1+len(self.data[set]))\n if len(self.indices[set]) > 0:\n last_index = self.indices[set][-1]\n print set, len(self.indices[set]), \"samples\"", "def validate(self):\n\n # start validate\n self.model.eval()\n preds, labels = [], []\n for batch_idx, data in enumerate(self.valid_dataloader):\n # calculate and log losses\n losses_report, valid_preds, valid_labels = self.forward_one_batch(\n data)\n self._update_losses(losses_report, train=False)\n\n preds.append(valid_preds)\n labels.append(valid_labels)\n\n preds = np.concatenate(preds, axis=0)\n labels = np.concatenate(labels, axis=0)\n if IS_REG:\n preds = disc(preds)\n # calculate and log metrics\n metrics_report = self.evaluate_metrics(preds, labels)\n self._update_metrics(metrics_report, train=False)\n\n # TODO: lr scheduler step setting\n self.lr_scheduler.step(self.valid_loss_meters['CrossEntropyLoss'].avg)\n\n # end validate\n self.model.train()", "def train_test_loaders(dataset, validation_ratio=0.2, **kwargs):\n dataset_size = len(dataset)\n test_size = int(np.floor(validation_ratio * dataset_size))\n train_size = dataset_size - test_size\n print('TRAIN SIZE {}'.format(train_size))\n print('TEST SIZE {}'.format(test_size))\n train_dataset, test_dataset = random_split(dataset, (train_size, test_size),\n generator=torch.Generator().manual_seed(RANDOM_SEED))\n train_loader = torch.utils.data.DataLoader(train_dataset, **kwargs)\n test_loader = torch.utils.data.DataLoader(test_dataset, **kwargs)\n return train_loader, test_loader", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def _train(self):\n step = 0\n for epoch in range(self.opts.num_epochs):\n self.hub.publish(Topic.EPOCH, epoch)\n for i, data in enumerate(self.loader):\n # Compute loss ...\n # NOTE(ycho): if one of the callbacks require training loss,\n # e.g. for logging, simply register a hook to the loss module\n # rather than trying to extract them here.\n loss = self.loss_fn(self.model, data)\n self.hub.publish(Topic.TRAIN_LOSS, loss)\n\n # Backprop + Optimize ...\n self.optim.zero_grad()\n loss[\"total\"].backward()\n self.optim.step()\n\n # Emit `step` event.\n # == logging, saving, evaluation\n self.hub.publish(Topic.STEP, step)\n step += 1\n\n if step >= self.opts.train_steps:\n return", "def load_data_wrapper():\r\n \r\n global training_inputs, training_results\r\n global validation_inputs, validation_results\r\n global test_inputs, test_results\r\n global num_samples, numpixels, num_test_samples\r\n \r\n tr_d, va_d, te_d = load_data()\r\n \r\n num_samples=len(tr_d[0])\r\n training_inputs=zeros([num_samples,numpixels])\r\n training_results=zeros([num_samples,10]) \r\n for j in range(num_samples):\r\n training_inputs[j,:] = reshape(tr_d[0][j], (numpixels))\r\n training_results[j,:] = vectorized_result(tr_d[1][j])\r\n# validation_inputs = [reshape(x, (numpixels)) for x in va_d[0]]\r\n# validation_results = [vectorized_result(y) for y in va_d[1]]\r\n\r\n num_test_samples=len(te_d[0])\r\n test_inputs=zeros([num_test_samples,numpixels])\r\n test_results=zeros([num_test_samples,10]) \r\n for j in range(num_test_samples):\r\n test_inputs[j,:] = reshape(te_d[0][j], (numpixels))\r\n test_results[j,:] = vectorized_result(te_d[1][j])", "def prep_data():\n loader = DLoader()\n cap = loader.visitor_cnt\n\n pass", "def _train_task(self, train_loader, val_loader):\n if self._task == 0:\n epochs = 90\n optimizer = factory.get_optimizer(self._network.parameters(), self._opt_name, 0.1, 0.001)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [50, 60], gamma=0.1)\n self._train(train_loader, val_loader, epochs, optimizer, scheduler)\n return\n\n # Training on all new + examplars\n print(\"Training\")\n self._finetuning = False\n epochs = 60\n optimizer = factory.get_optimizer(self._network.parameters(), self._opt_name, 0.1, 0.001)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [40, 50], gamma=0.1)\n self._train(train_loader, val_loader, epochs, optimizer, scheduler)\n\n # Fine-tuning on sub-set new + examplars\n print(\"Fine-tuning\")\n self._old_model = self._network.copy().freeze()\n\n self._finetuning = True\n self._build_examplars(train_loader,\n n_examplars=self._k // (self._n_classes - self._task_size))\n train_loader.dataset.set_idxes(self.examplars) # Fine-tuning only on balanced dataset\n\n optimizer = factory.get_optimizer(self._network.parameters(), self._opt_name, 0.01, 0.001)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [10, 20], gamma=0.1)\n self._train(train_loader, val_loader, 40, optimizer, scheduler)", "def get_data_loaders_4sentence():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_1generated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"]\n #history_complete.append(history)\n if len(history_splitted) > (len(persona)-1):\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss(persona_selected, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets", "def load_datset_train_tokenization(dataset,tokenizer,tokenizer_trainer,batch_size : int):\n def make_batch_iter(dataset):\n for i in range(0, len(dataset), batch_size):\n yield dataset[i : i + batch_size][\"code\"]\n tokenizer.train_from_iterator(make_batch_iter(), trainer=tokenizer_trainer, length=len(dataset))\n return tokenizer", "def post_process_datasets(self, train_data, val_data, test_data, info=None):\n return train_data, val_data, test_data, info", "def _load_data(self):\n if self._name in BALANCE_DATASET:\n _loader = dataset_loaders[self._name]\n xnp, y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.y_ts = y[test_idx]\n\n else:\n _loader = dataset_loaders[self._name]\n xnp, xp, y = _loader()\n # self.xnp, self.xp, self.y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.xp_tr = xp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.xp_ts = xp[test_idx]\n self.y_ts = y[test_idx]", "def train(args: dict, lr_index: int, dataloaders: Tuple[DataLoader]):\n train_dataloader, val_dataloader = dataloaders\n device = torch.device('cuda') if args.gpu else torch.device('cpu')\n model = get_model(args, device)\n if args.label_weights is None:\n label_weights = None\n else:\n label_weights = args.label_weights.to(device)\n loss_criterion = nn.CrossEntropyLoss(weight=label_weights)\n optimizer = torch.optim.AdamW(\n model.parameters(), lr=args.learning_rates[lr_index]\n )\n train_losses = []\n val_losses = []\n stats = {\"true\": [], \"pred\": []}\n for epoch in range(args.epochs):\n start = time.time()\n train_loss = train_epoch(\n model, loss_criterion, optimizer, train_dataloader, device\n )\n val_loss, epoch_stats = val_epoch(\n model, loss_criterion, val_dataloader, device\n )\n train_losses.append(train_loss)\n val_losses.append(val_loss)\n stats[\"true\"] += epoch_stats[\"true\"]\n stats[\"pred\"] += epoch_stats[\"pred\"]\n duration = (time.time() - start) / 60 # epoch duration in minutes\n print_training_update(\n epoch, duration, lr_index, (train_loss, val_loss), epoch_stats\n )\n recent_10_percent = int(0.9*args.epochs)\n return (\n model,\n np.mean(train_losses[recent_10_percent:]),\n np.mean(val_losses[recent_10_percent:]),\n training_metric(stats)\n )", "def __init__(self):\n # self.model = get_pretrained_model()\n self.tokenizer = get_tokenizer()\n self.model = transformers.Trainer(model=get_pretrained_model())\n self.summarizer = pipeline(\"summarization\") # ~1.2 GB download the first time this is run.", "def run():\r\n \r\n LABEL = data.LabelField(use_vocab=True)\r\n TEXT = data.Field(sequential=True, tokenize=lambda x:x.split(), lower=True, fix_length=config.MAX_LENGTH)\r\n\r\n### 1/5\r\n dataset = data.TabularDataset(path=config.TRAIN_DATASET_FNAME, \r\n format='csv', \r\n fields=[('text', TEXT),('label', LABEL)], \r\n skip_header=True)\r\n # split the dataset, 8:2\r\n train_dataset, valid_dataset = dataset.split(split_ratio=[0.8,0.2], random_state=random.getstate())\r\n \r\n test_data = data.TabularDataset(path=config.TEST_DATASET_FNAME,\r\n format='csv', \r\n fields=[('text', TEXT),('label', LABEL)], \r\n skip_header=True)\r\n \r\n### 2\r\n# train_dataset = data.TabularDataset(path=config.TRAIN_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True) \r\n# valid_dataset = data.TabularDataset(path=config.VAL_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True) \r\n \r\n# test_data = data.TabularDataset(path=config.TEST_DATASET_FNAME,\r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True)\r\n \r\n### 3/4\r\n# train_dataset = data.TabularDataset(path=config.TRAIN_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True) \r\n \r\n# dataset = data.TabularDataset(path=config.TEST_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True)\r\n# # split the dataset, 5:5\r\n# valid_dataset, test_data = dataset.split(split_ratio=[0.5,0.5], random_state=random.getstate())\r\n\r\n### 5\r\n\r\n\r\n\r\n # load embeddings\r\n vectors_data = load_vectors(config.EMBEDDING_FNAME)\r\n\r\n TEXT.build_vocab(train_dataset, vectors=vectors_data)\r\n LABEL.build_vocab(train_dataset)\r\n print ('vector size:',TEXT.vocab.vectors.size())\r\n embedding_pretrained_matrix = TEXT.vocab.vectors\r\n \r\n # create torch device\r\n print(\"To device...\")\r\n USE_CUDA = torch.cuda.is_available()\r\n device = torch.device(\"cuda\" if USE_CUDA else \"cpu\")\r\n\r\n train_it, valid_it = data.BucketIterator.splits((train_dataset, valid_dataset),\r\n batch_sizes=(config.TRAIN_BATCH_SIZE,config.VAL_BATCH_SIZE), \r\n device=device, \r\n sort_key=lambda x: len(x.text),\r\n sort_within_batch=False,\r\n shuffle=True,\r\n repeat=False)\r\n test_it = data.BucketIterator(test_data, \r\n batch_size=config.TEST_BATCH_SIZE, \r\n sort_key=lambda x: len(x.text), \r\n shuffle=False,\r\n device=device)\r\n \r\n \r\n # fetch model\r\n vocab_size = len(TEXT.vocab) # TEXT.vocab.vectors.size()\r\n# pretrained_vec = TEXT.vocab.vectors\r\n \r\n # selecte network \r\n x = import_module('networks.'+config.NETWORK)\r\n model = x.Model(vocab_size,embedding_pretrained=embedding_pretrained_matrix)\r\n \r\n # send model to device\r\n model.to(device)\r\n\r\n # initialize Adam optimizer\r\n optimizer = torch.optim.Adam(model.parameters(), lr=config.LEARNING_RATE)\r\n\r\n # if you have multiple GPUs, model model to DataParallel to use multiple GPUs\r\n if torch.cuda.device_count() > 1:\r\n model = nn.DataParallel(model)\r\n \r\n params_list = []\r\n # train and validate for all epochs\r\n for epoch in range(config.EPOCHS):\r\n epoch_start_time = time.time()\r\n\r\n ###----Train--------\r\n train_outputs, train_labels, train_loss = engine.train_fn(train_it, model, optimizer, device)\r\n train_outputs = torch.Tensor(train_outputs)\r\n _, train_predicted = torch.max(train_outputs, dim=1)\r\n train_parameters_dict = metrics_func.performance_evaluation_func(train_predicted,train_labels,epoch=str(epoch))\r\n # save train paremeters\r\n params_list.append(train_parameters_dict)\r\n train_f1 = train_parameters_dict['f1_score_macro']\r\n train_prec = train_parameters_dict['precision_macro']\r\n train_recall = train_parameters_dict['precision_macro']\r\n print('\\n')\r\n print(f\" Train Epoch: {epoch}, F1 = {train_f1},precision = {train_prec},recall = {train_recall}\")\r\n ###------------\r\n \r\n # validate\r\n val_outputs, val_labels, valid_loss = engine.evaluate_fn(valid_it, model, device)\r\n val_outputs = torch.Tensor(val_outputs)\r\n _, val_predicted = torch.max(val_outputs, dim=1) \r\n # calculate evaluation paremeters\r\n val_parameters_dict = metrics_func.performance_evaluation_func(val_predicted, val_labels, epoch=str(epoch),flag='val')\r\n # save evaluation paremeters\r\n params_list.append(val_parameters_dict)\r\n \r\n val_f1 = val_parameters_dict['f1_score_macro']\r\n val_prec = val_parameters_dict['precision_macro']\r\n val_recall = val_parameters_dict['recall_macro']\r\n print(f\"Val Epoch: {epoch},F1 = {val_f1},precision = {val_prec}, recall = {val_recall}\")\r\n \r\n ###-------Test-----------------------\r\n test_outputs, test_labels, test_loss = engine.evaluate_fn(test_it, model, device)\r\n test_outputs = torch.Tensor(test_outputs)\r\n _, test_predicted = torch.max(test_outputs, dim=1) \r\n # calculate evaluation paremeters\r\n test_parameters_dict = metrics_func.performance_evaluation_func(test_predicted, test_labels, epoch=str(epoch),flag='test')\r\n # save evaluation paremeters\r\n params_list.append(test_parameters_dict)\r\n \r\n test_f1 = test_parameters_dict['f1_score_macro']\r\n test_prec = test_parameters_dict['precision_macro']\r\n test_recall = test_parameters_dict['recall_macro']\r\n print(f\"test Epoch: {epoch},F1 = {test_f1},precision = {test_prec}, recall = {test_recall}\")\r\n \r\n lr_scheduler = LRScheduler(optimizer)\r\n lr_scheduler(valid_loss)\r\n \r\n \r\n # simple early stopping\r\n# val_f1 = float(val_f1)\r\n #f1 = (float(train_f1) + float(val_f1)) / 2\r\n val_loss = float(valid_loss)\r\n early_stopping(val_loss, model)\r\n if early_stopping.early_stop:\r\n print(\"Early stopping\")\r\n break\r\n # 获得 early stopping 时的模型参数\r\n# model.load_state_dict(torch.load('checkpoint.pt'))\r\n\r\n# save_model_func(model, epoch, path='outputs')\r\n \r\n metrics_func.save_parameters_txt(params_list)", "def load_dataset():\n\n\n train_dd_loader = DailyDialogLoader(PATH_TO_TRAIN_DATA, load=False)\n train_dataloader = DataLoader(train_dd_loader, batch_size=16, shuffle=True, num_workers=0,\n collate_fn=PadCollate())\n\n test_dd_loader = DailyDialogLoader(PATH_TO_TEST_DATA, load=True)\n test_dataloader = DataLoader(test_dd_loader, batch_size=1, shuffle=False, num_workers=0,\n collate_fn=PadCollate())\n\n assert train_dd_loader.vocabulary.n_words == test_dd_loader.vocabulary.n_words\n\n return train_dd_loader, train_dataloader, test_dataloader", "def train(model, ids, data, scaler):\n if model.model_type == 'torch':\n train_ids, val_ids, _, _ = train_test_split(ids, ids, test_size=0.1, random_state=1)\n train_loader = double_loader(data, train_ids, batch_size=model.batch_size)\n val_loader = double_loader(data, val_ids, batch_size=len(val_ids))\n \n regressor = copy.deepcopy(model.model) \n optimiser = model.optimiser(regressor.parameters(), lr=model.lr)\n loss_function = torch.nn.MSELoss()\n name = model.name.replace(' ','_')\n early_stopping = EarlyStopping(name,regressor)\n \n for epoch in range(model.num_epochs):\n #train\n for (sol,solv,targets) in train_loader:\n if model.data_type == 'sentences':\n sol, solv = sol.to(device), solv.to(device)\n targets = targets.view(-1,1)\n targets = scaler.transform(targets)\n optimiser.zero_grad()\n outputs = regressor(sol,solv).to(device)\n cuda_targets = targets.to(device)\n loss = loss_function(outputs, cuda_targets)\n loss.backward()\n optimiser.step()\n #evaluate\n for (sol,solv,targets) in val_loader:\n if model.data_type == 'sentences':\n sol, solv = sol.to(device), solv.to(device)\n targets = targets.view(-1,1)\n targets = scaler.transform(targets)\n outputs = regressor(sol,solv).to(device)\n cuda_targets = targets.to(device)\n loss = loss_function(outputs, cuda_targets)\n val_loss = loss.item()\n #early stopping\n early_stopping.store(val_loss, regressor)\n if early_stopping.stop:\n #print(\"Stopping at epoch \"+str(epoch))\n break\n regressor.load_state_dict(torch.load('checkpoints/'+name+'.pt'))\n else:\n regressor = sklearn.base.clone(model.model)\n targets = scaler.transform(data[1][ids])\n regressor.fit(data[0][ids], targets)\n return regressor", "def get_loader(dataset='train.txt', crop_size=128, image_size=28, batch_size=2, mode='train', num_workers=1): \n transform = [] \n if mode == 'train': \n transform.append(transforms.RandomHorizontalFlip()) \n transform.append(transforms.CenterCrop(crop_size)) \n transform.append(transforms.Resize(image_size)) \n transform.append(transforms.ToTensor()) \n transform.append(transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))) \n transform = transforms.Compose(transform) \n train_data=MyDataset(txt=dataset, transform=transform) \n data_loader = DataLoader(dataset=train_data, \n batch_size=batch_size, \n shuffle=(mode=='train'), \n num_workers=num_workers) \n return data_loader", "def learn(self):\n metrics_hist = dict()\n max_runs = 3\n for run in range(max_runs):\n all_indices, initial_indices = self._init_al_dataset()\n\n metrics_hist[str(run)] = dict()\n\n current_indices = list(initial_indices)\n \n for split in self.data_splits_frac:\n print(f'\\nRUN {run} - SPLIT - {split*100:0.0f}%')\n\n # Initialise models\n self._init_models(mode='svaal')\n\n # Do some label stuff\n unlabelled_indices = np.setdiff1d(list(all_indices), current_indices)\n unlabelled_sampler = data.sampler.SubsetRandomSampler(unlabelled_indices)\n unlabelled_dataloader = data.DataLoader(self.datasets['train'],\n sampler=unlabelled_sampler,\n batch_size=64,\n drop_last=False)\n\n print(f'Labelled: {len(current_indices)} Unlabelled: {len(unlabelled_indices)} Total: {len(all_indices)}')\n\n # TODO: Make the SVAAL allow 100% labelled and 0% unlabelled to pass through it. Breaking out of loop for now when data hits 100% labelled.\n if len(unlabelled_indices) == 0:\n break\n\n metrics, svae, discriminator = self.train(dataloader_l=self.labelled_dataloader,\n dataloader_u=unlabelled_dataloader,\n dataloader_v=self.val_dataloader,\n dataloader_t=self.test_dataloader,\n mode='svaal') \n print(f'Test Eval.: F1 Scores - Macro {metrics[0]*100:0.2f}% Micro {metrics[1]*100:0.2f}%') \n \n # Record performance at each split\n metrics_hist[str(run)][str(split)] = metrics\n\n \n sampled_indices = self.sample_adversarial(svae, discriminator, unlabelled_dataloader, indices=unlabelled_indices, cuda=True) # TODO: review usage of indices arg\n current_indices = list(current_indices) + list(sampled_indices)\n sampler = data.sampler.SubsetRandomSampler(current_indices)\n self.labelled_dataloader = data.DataLoader(self.datasets['train'], sampler=sampler, batch_size=self.batch_size, drop_last=True)\n \n # write results to disk\n with open('results.json', 'w') as fj:\n json.dump(metrics_hist, fj, indent=4)", "def __init__(self):\n print ('Initializing Data reader object...')\n data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels = self.readDataFromFile()\n test_10k_x, test_10k_y, training_55k_x, training_55k_y, validation_5k_x, validation_5k_y = self.dataTransform(\n data_Test_Image, data_Test_Labels, data_Train_Images, data_Train_Labels)\n self.train = zip(training_55k_x, training_55k_y)\n self.valid = zip(validation_5k_x, validation_5k_y)\n self.test = zip(test_10k_x, test_10k_y)\n\n self.train_position = 0\n print ('Initialized!')" ]
[ "0.68130106", "0.6807883", "0.67034125", "0.6500614", "0.6431421", "0.63997084", "0.62811214", "0.6247464", "0.6246502", "0.6224758", "0.6217445", "0.61995816", "0.6196357", "0.613247", "0.6116584", "0.6099218", "0.60890913", "0.60885996", "0.6040629", "0.60174674", "0.59817773", "0.59638256", "0.5936083", "0.5931423", "0.59292465", "0.5925677", "0.59212947", "0.5899031", "0.5885288", "0.58727807", "0.5869287", "0.5837708", "0.583685", "0.5834299", "0.58322275", "0.5823677", "0.581379", "0.5796107", "0.5789411", "0.5785577", "0.57710457", "0.57685405", "0.5767739", "0.5757041", "0.57562447", "0.57460225", "0.5727176", "0.57271135", "0.5715334", "0.57139665", "0.5706105", "0.5698149", "0.5692103", "0.56903565", "0.568793", "0.56817216", "0.56693834", "0.5669293", "0.56691825", "0.5669086", "0.56686705", "0.5663566", "0.5663132", "0.5657058", "0.5654558", "0.5653309", "0.5651346", "0.56472975", "0.56442684", "0.5626139", "0.5624193", "0.5621712", "0.5609582", "0.5605024", "0.56008136", "0.5574019", "0.5571343", "0.5568024", "0.55663353", "0.55587065", "0.55587065", "0.55587065", "0.55587065", "0.55587065", "0.5556375", "0.55446076", "0.55428153", "0.5541441", "0.5540703", "0.5538998", "0.553637", "0.5521006", "0.5517754", "0.55128294", "0.5510254", "0.55101883", "0.5497091", "0.5490224", "0.54898334", "0.548523" ]
0.6290276
6
Main finetune function used across all tasks.
def finetune(train_valid_datasets_provider, model_provider, model_type=ModelType.encoder_or_decoder, forward_step=_cross_entropy_forward_step, end_of_epoch_callback_provider=None, task_collate_fn=None): args = get_args() timers = get_timers() assert args.rampup_batch_size is None, \ 'batch size scaling is not supported for finetuning' # Train and validation data loaders. timers('train/valid/test dataset/dataloder', log_level=0).start() if args.epochs > 0: train_dataset, valid_dataset = train_valid_datasets_provider() train_dataloader, valid_dataloader = _build_train_valid_dataloaders( train_dataset, valid_dataset, task_collate_fn) else: args.train_iters = 0 timers('train/valid/test dataset/dataloder').stop() # Build calback function. timers('callback function', log_level=0).start() end_of_epoch_callback = None if end_of_epoch_callback_provider is not None: end_of_epoch_callback = end_of_epoch_callback_provider() timers('callback function').stop() # Build model, optimizer and learning rate scheduler. timers('model and optimizer', log_level=0).start() model, optimizer, opt_param_scheduler = setup_model_and_optimizer(model_provider, model_type) timers('model and optimizer').stop() # If pretrained checkpoint is provided and we have not trained for # any iteration (i.e., iteration is zero), then load the pretrained # checkpoint. timers('pretrained checkpoint', log_level=0).start(barrier=True) if args.iteration == 0 and args.pretrained_checkpoint is not None: original_load = args.load args.load = args.pretrained_checkpoint original_rng = args.no_load_rng args.no_load_rng = True _ = load_checkpoint(model, None, None) args.load = original_load args.no_load_rng = original_rng # This is critical when only model is loaded. We should make sure # main parameters are also updated. optimizer.reload_model_params() timers('pretrained checkpoint').stop() # Print setup timing. print_rank_0('done with setups ...') timers.log(['train/valid/test dataset/dataloder', 'callback function', 'model and optimizer', 'pretrained checkpoint'], barrier=True) print_rank_0('training ...') # Finetune the model. if args.epochs > 0: _train(model, optimizer, opt_param_scheduler, forward_step, train_dataloader, valid_dataloader, end_of_epoch_callback) # Or just evaluate. else: if end_of_epoch_callback is not None: print_rank_0('evaluation only mode, setting epoch to -1') end_of_epoch_callback(model, epoch=-1, output_predictions=True) print_rank_0('done :-)')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tasks():", "def task():", "def main():\n run_test_all()", "def finetune(ft_ds, model, task, epochs=10, eval_ds=None):\n\n print('==========FINETUNE==========')\n\n # Filter out undesired examples with excluded_label\n ds = ft_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.finetune_preprocess)\n ds = ds.shuffle(1000)\n ds = ds.batch(FLAGS.finetune_bs)\n\n # loss, metrics, optimizers\n train_loss= tf.keras.metrics.Mean(name='train_loss')\n train_sup_acc = tf.keras.metrics.Accuracy(name='train_supervised_accuracy')\n criterion_sup = tf.nn.softmax_cross_entropy_with_logits \n optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) \n for epoch in range(epochs): \n train_loss.reset_states()\n train_sup_acc.reset_states()\n for x in ds:\n with tf.GradientTape() as tape:\n image = x['image']\n labels = x[task['name']]\n out = model(image, mode='supervised', sup_layers=1, training=True)\n # print(tf.math.argmax(out, axis=-1))\n metrics.update_supervised_accuracy(train_sup_acc, labels, out)\n loss = criterion_sup(tf.one_hot(labels, depth=task['num_classes']), out)\n loss = tf.math.reduce_mean(loss)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(\n filter(lambda gv: gv[0] is not None, zip(gradients, model.trainable_variables))\n )\n train_loss.update_state(loss)\n print('supervised loss')\n print(train_loss.result())\n print('supervised accuracy')\n print(train_sup_acc.result())\n\n # Evaluate results on eval_ds if possible\n if eval_ds: \n evaluate(eval_ds, model, task)", "def task():\n pass", "def task():\n pass", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def run():\n main()", "def main():\n # get arguments from command line\n args = parse_arguments()\n\n # checks on the output file\n # if args.stats_only:\n # assert args.output, \"The output file was not provided\"\n if args.output and os.path.exists(args.output):\n warnings.warn(\"Overwriting task file \" + args.output, UserWarning)\n os.remove(args.output)\n\n # initialize the task\n task = Task(\n args.database, args.on,\n across=args.across,\n by=args.by,\n filters=args.filters,\n regressors=args.regressors,\n verbose=args.verbose)\n\n if args.stats_only:\n task.print_stats()\n else:\n if args.tempdir and not os.path.exists(args.tempdir):\n os.makedirs(args.tempdir)\n\n # generate triplets and unique pairs\n task.generate_triplets(\n output=args.output,\n threshold=args.threshold,\n tmpdir=args.tempdir,\n seed=args.seed)", "def finetuned():\n launch_training_on_all_splits(experiment='full', splits=NAME_SPLIT, base_model='ft', dropout=0.7304, learning_rate=0.0000976)", "def run(args):\n # CONFIG\n run_name = get_run_name(args)\n logger.info(f'*** Starting run {run_name} ***')\n data_dir = f'gs://{args.bucket_name}/{args.project_name}/finetune/finetune_data/{args.finetune_data}'\n output_dir = f'gs://{args.bucket_name}/{args.project_name}/finetune/runs/{run_name}'\n\n # Get configs\n pretrained_model_config_path = get_model_config_path(args)\n model_config = get_model_config(pretrained_model_config_path)\n\n # Meta data/label mapping\n input_meta_data = get_input_meta_data(data_dir)\n label_mapping = get_label_mapping(data_dir)\n logger.info(f'Loaded training data meta.json file: {input_meta_data}')\n\n # Calculate steps, warmup steps and eval steps\n train_data_size = input_meta_data['train_data_size']\n num_labels = input_meta_data['num_labels']\n max_seq_length = input_meta_data['max_seq_length']\n if args.limit_train_steps is None:\n steps_per_epoch = int(train_data_size / args.train_batch_size)\n else:\n steps_per_epoch = args.limit_train_steps\n warmup_steps = int(args.num_epochs * train_data_size * args.warmup_proportion/ args.train_batch_size)\n if args.limit_eval_steps is None:\n eval_steps = int(math.ceil(input_meta_data['eval_data_size'] / args.eval_batch_size))\n else:\n eval_steps = args.limit_eval_steps\n\n # some logging\n if args.init_checkpoint is None:\n logger.info(f'Finetuning on datset {args.finetune_data} using default pretrained model {args.model_class}')\n else:\n logger.info(f'Finetuning on datset {args.finetune_data} using pretrained model in {args.init_checkpoint} of type {args.model_class}')\n logger.info(f'Running {args.num_epochs} epochs with {steps_per_epoch:,} steps per epoch')\n logger.info(f'Using warmup proportion of {args.warmup_proportion}, resulting in {warmup_steps:,} warmup steps')\n logger.info(f'Using learning rate: {args.learning_rate}, training batch size: {args.train_batch_size}, num_epochs: {args.num_epochs}')\n\n # Get model\n classifier_model, core_model = get_model(args, model_config, steps_per_epoch, warmup_steps, num_labels, max_seq_length)\n optimizer = classifier_model.optimizer\n loss_fn = get_loss_fn(num_labels)\n try:\n if ',' in args.validation_freq:\n validation_freq = args.validation_freq.split(',')\n validation_freq = [int(v) for v in validation_freq]\n else:\n validation_freq = int(args.validation_freq)\n except:\n raise ValueError(f'Invalid argument for validation_freq!')\n logger.info(f'Using a validation frequency of {validation_freq}')\n\n # Restore checkpoint\n if args.init_checkpoint:\n checkpoint_path = f'gs://{args.bucket_name}/{args.project_name}/pretrain/runs/{args.init_checkpoint}'\n checkpoint = tf.train.Checkpoint(model=core_model)\n checkpoint.restore(checkpoint_path).assert_existing_objects_matched()\n logger.info(f'Successfully restored checkpoint from {checkpoint_path}')\n\n # Run keras compile\n logger.info(f'Compiling keras model...')\n classifier_model.compile(\n optimizer=optimizer,\n loss=loss_fn,\n metrics=get_metrics())\n logger.info(f'... done')\n\n # Create all custom callbacks\n summary_dir = os.path.join(output_dir, 'summaries')\n summary_callback = tf.keras.callbacks.TensorBoard(summary_dir, profile_batch=0)\n time_history_callback = keras_utils.TimeHistory(\n batch_size=args.train_batch_size,\n log_steps=args.time_history_log_steps,\n logdir=summary_dir)\n custom_callbacks = [summary_callback, time_history_callback]\n if args.save_model:\n logger.info('Using save_model option...')\n checkpoint_path = os.path.join(output_dir, 'checkpoint')\n checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1)\n custom_callbacks.append(checkpoint_callback)\n if args.early_stopping_epochs > 0:\n logger.info(f'Using early stopping of after {args.early_stopping_epochs} epochs of val_loss not decreasing')\n early_stopping_callback = tf.keras.callbacks.EarlyStopping(patience=args.early_stopping_epochs, monitor='val_loss')\n custom_callbacks.append(early_stopping_callback)\n\n # Generate dataset_fn\n train_input_fn = get_dataset_fn(\n os.path.join(data_dir, 'tfrecords', 'train.tfrecords'),\n max_seq_length,\n args.train_batch_size,\n is_training=True)\n eval_input_fn = get_dataset_fn(\n os.path.join(data_dir, 'tfrecords', 'dev.tfrecords'),\n max_seq_length,\n args.eval_batch_size,\n is_training=False)\n\n # Add mertrics callback to calculate performance metrics at the end of epoch\n performance_metrics_callback = Metrics(\n eval_input_fn,\n label_mapping,\n os.path.join(summary_dir, 'metrics'),\n eval_steps,\n args.eval_batch_size,\n validation_freq)\n custom_callbacks.append(performance_metrics_callback)\n\n # Run keras fit\n time_start = time.time()\n logger.info('Run training...')\n history = classifier_model.fit(\n x=train_input_fn(),\n validation_data=eval_input_fn(),\n steps_per_epoch=steps_per_epoch,\n epochs=args.num_epochs,\n validation_steps=eval_steps,\n validation_freq=validation_freq,\n callbacks=custom_callbacks,\n verbose=1)\n time_end = time.time()\n training_time_min = (time_end-time_start)/60\n logger.info(f'Finished training after {training_time_min:.1f} min')\n\n # Write training log\n all_scores = performance_metrics_callback.scores\n all_predictions = performance_metrics_callback.predictions\n if len(all_scores) > 0:\n final_scores = all_scores[-1]\n logger.info(f'Final eval scores: {final_scores}')\n else:\n final_scores = {}\n full_history = history.history\n if len(full_history) > 0:\n final_val_loss = full_history['val_loss'][-1]\n final_loss = full_history['loss'][-1]\n logger.info(f'Final training loss: {final_loss:.2f}, Final validation loss: {final_val_loss:.2f}')\n else:\n final_val_loss = None\n final_loss = None\n data = {\n 'created_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'run_name': run_name,\n 'final_loss': final_loss,\n 'final_val_loss': final_val_loss,\n 'max_seq_length': max_seq_length,\n 'num_train_steps': steps_per_epoch * args.num_epochs,\n 'eval_steps': eval_steps,\n 'steps_per_epoch': steps_per_epoch,\n 'training_time_min': training_time_min,\n 'data_dir': data_dir,\n 'output_dir': output_dir,\n 'all_scores': all_scores,\n 'all_predictions': all_predictions,\n 'num_labels': num_labels,\n 'label_mapping': label_mapping,\n **full_history,\n **final_scores,\n **vars(args),\n }\n # Write run_log\n f_path_training_log = os.path.join(output_dir, 'run_logs.json')\n logger.info(f'Writing training log to {f_path_training_log}...')\n save_to_json(data, f_path_training_log)\n # Write bert config\n model_config.id2label = label_mapping\n model_config.label2id = {v:k for k, v in label_mapping.items()}\n model_config.max_seq_length = max_seq_length\n model_config.num_labels = num_labels\n f_path_bert_config = os.path.join(output_dir, 'bert_config.json')\n logger.info(f'Writing BERT config to {f_path_bert_config}...')\n save_to_json(model_config.to_dict(), f_path_bert_config)", "def task_4_3_1():\n # TODO Task 4.3.1: Your code goes here\n pass", "def train_entry_point():", "def task_4_2_1():\n # TODO Task 4.2.1: Your code goes here\n pass", "def task_4_3_2():\n # TODO Task 4.3.2: Your code goes here\n pass", "def task5(self):\n\n pass", "def main():\n tng.api.runner()", "def task_4_3_3():\n # TODO Task 4.3.3: Your code goes here\n pass", "def main():\r\n\r\n print(\"\\nTask 4a:\")\r\n days = cold_days([1, -5, 3, 0, -6, -3, 15, 0])\r\n print(days)\r\n\r\n print(\"\\nTask 4b:\")\r\n A = [-70, 30, 0, 90, 23, -12, 95, 12]\r\n result = cap_data(A, -50, 50)\r\n print(result)\r\n\r\n print(\"\\nTask 4c:\")\r\n for i in range(4):\r\n print(generate_testdata(10, -5, 10))\r\n\r\n print(\"\\nTask 4d:\")\r\n temp = [1, 5, 3]\r\n rain = [0, 30, 120]\r\n humidity = [30, 50, 65]\r\n wind = [3, 5, 7]\r\n weather = create_db(temp, rain, humidity, wind)\r\n print(weather)\r\n\r\n print(\"\\nTask 4e:\")\r\n print_db(weather)\r\n\r\n print(\"\\nTask 4f:\")\r\n temp = [1, 3, 4, -5, -6, -7, -8, -9, 3, 0]\r\n rain = [0, 20, 30, 0, 10, 30, 50, 0, 5, 2]\r\n print(strange_weather(temp, rain))", "def complete_run():\n pass", "def main():\n # Create the flow\n with Flow('pickle flow') as flow:\n db_table = create_table()\n weather_data = get_weather(LAT_NYC, LON_NYC, 2018)\n parsed_data = parse_weather(weather_data)\n populated_table = store_weather(parsed_data)\n populated_table.set_upstream(db_table)\n\n # Run the flow\n flow.run()", "def main():\n spark_it_up()", "def main(run_tests=False):\n if run_tests:\n print(\"Running Tests\")\n tests = UarmTests()\n object_locations = tests.run_tests()\n print(\"Object locations polar: \", object_locations)\n find_and_touch_test_loop(object_locations)\n else:\n # set mutex flags to false\n camera_event.clear()\n data_ready.clear()\n camera_started.clear()\n\n # create and call thread for UArm to execute find and touch task\n ddpg_thread = threading.Thread(target=execute_find_and_touch_task)\n ddpg_thread.start()\n\n # use main thread for camera execution\n camera_exec()\n\n ddpg_thread.join()\n print(\"uarm done searching\")", "def main():\r\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ldc_analysis.settings\")\r\n # hourly_sm_reading_histogram()\r\n # reading_count_histogram()\r\n sm_reading_exception_count_histogram()", "def main(_):\n\n params = create_params()\n\n assert params[\"train_dataset_path\"]\n assert params[\"eval_dataset_path\"]\n\n input_fn = input_fn_from_files(\n params[\"train_dataset_path\"])\n eval_input_fn = input_fn_from_files(\n params[\"eval_dataset_path\"])\n\n feature_columns = create_feature_columns(params)\n\n model_fn = create_model_fn(feature_columns)\n estimator = create_tpu_estimator(model_fn, feature_columns, params)\n\n for cycle_index in range(params[\"train_epochs\"]):\n tf.logging.info(\"Starting a training cycle: {}/{}\".format(\n cycle_index + 1, params[\"train_epochs\"]))\n estimator.train(input_fn=input_fn, steps=params[\"steps_per_epoch\"])\n tf.logging.info(\"Beginning evaluation.\")\n eval_results = estimator.evaluate(eval_input_fn,\n steps=params[\"num_eval_steps\"])\n tf.logging.info(\"Evaluation complete.\")\n\n recall_1 = float(eval_results[\"recall@1\"])\n recall_5 = float(eval_results[\"recall@5\"])\n loss = float(eval_results[\"loss\"])\n tf.logging.info(\n \"Iteration {}: recall@1 = {:.4f}, recall@5 = {:.4f}, Loss = {:.4f}\"\n .format(cycle_index + 1, recall_1, recall_5, loss))", "def main(_):\n # Set FLAGS defaults.\n words = FLAGS.words\n if FLAGS.vocab_size == -1:\n FLAGS.__setattr__(\"vocab_size\", word_default_vocab_size if words else char_default_vocab_size)\n if FLAGS.num_samples == -1:\n FLAGS.__setattr__(\"num_samples\", word_default_num_samples if words else char_default_num_samples)\n if FLAGS.tensorboard_logdir is None:\n FLAGS.__setattr__(\"tensorboard_logdir\", FLAGS.train_dir)\n\n if FLAGS.words:\n data_utils._START_VOCAB = data_utils.START_VOCAB_WORD\n\n # Check compatibility with word2vec file\n if FLAGS.word_embeddings:\n # For now, assume the embedding size is 300. If variable, reprogram.\n print(\"Setting LSTM size to 300 to conform to the word2vec file\")\n FLAGS.__setattr__(\"size\", 300)\n\n # Start task according to flags.\n if FLAGS.self_test:\n self_test()\n elif FLAGS.decode:\n decode()\n else:\n train_distributed() if FLAGS.distributed else train_not_distributed()", "def main():\n flags = PARSER.parse_args()\n\n if flags.to == 'savedmodel':\n to_savedmodel(input_shape=flags.input_shape,\n model_fn=unet_fn,\n src_dir=flags.checkpoint_dir,\n dst_dir='./saved_model',\n input_names=['IteratorGetNext'],\n output_names=['total_loss_ref'],\n use_amp=flags.use_amp,\n use_xla=flags.use_xla,\n compress=flags.compress)\n if flags.to == 'tensorrt':\n ds = Dataset(data_dir=flags.data_dir,\n batch_size=1,\n augment=False,\n gpu_id=0,\n num_gpus=1,\n seed=42)\n iterator = ds.test_fn(count=1).make_one_shot_iterator()\n features = iterator.get_next()\n\n sess = tf.Session()\n\n def input_data():\n return {'input_tensor:0': sess.run(features)}\n\n to_tensorrt(src_dir=flags.savedmodel_dir,\n dst_dir='./tf_trt_model',\n precision=flags.precision,\n feed_dict_fn=input_data,\n num_runs=1,\n output_tensor_names=['Softmax:0'],\n compress=flags.compress)\n if flags.to == 'onnx':\n to_onnx(src_dir=flags.savedmodel_dir,\n dst_dir='./onnx_model',\n compress=flags.compress)", "def main() -> None:\n\n task_results = {}\n for task in (Task.SINGLE_SEQUENCE, Task.MULTI_SEQUENCE):\n task_results[task] = []\n for category in CO3D_CATEGORIES[: (20 if task == Task.SINGLE_SEQUENCE else 10)]:\n for single_sequence_id in (\n (0, 1) if task == Task.SINGLE_SEQUENCE else (None,)\n ):\n category_result = evaluate_dbir_for_category(\n category, task=task, single_sequence_id=single_sequence_id\n )\n print(\"\")\n print(\n f\"Results for task={task}; category={category};\"\n + (\n f\" sequence={single_sequence_id}:\"\n if single_sequence_id is not None\n else \":\"\n )\n )\n pretty_print_nvs_metrics(category_result)\n print(\"\")\n\n task_results[task].append(category_result)\n _print_aggregate_results(task, task_results)\n\n for task in task_results:\n _print_aggregate_results(task, task_results)", "def task():\n\n\tprint('Example task executed.')", "def do_workload(self):\n pass", "def task2_3():", "def main(args):\n\n print(now(), \"benchmark_test.py running.\")\n out_dir = args.output\n if not args.from_scratch:\n print(\n f\"Testing poisons from {args.poisons_path}, in the transfer learning setting...\\n\".format()\n )\n\n ####################################################\n # Frozen Feature Extractor (ffe)\n print(\"Frozen Feature Extractor test:\")\n args.num_poisons = 25\n args.trainset_size = 2500\n args.val_period = 20\n args.optimizer = \"SGD\"\n args.lr = 0.01\n args.lr_schedule = [30]\n args.epochs = 40\n\n args.end2end = False\n\n # white-box attack\n args.output = os.path.join(out_dir, \"ffe-wb\")\n args.model = \"resnet18\"\n args.model_path = whitebox_modelpath\n poison_test.main(args)\n\n # grey box attack\n args.model = \"resnet18\"\n args.model_path = greybox_modelpath\n args.output = os.path.join(out_dir, \"ffe-gb\")\n poison_test.main(args)\n\n # black box attacks\n args.output = os.path.join(out_dir, \"ffe-bb\")\n\n args.model = \"MobileNetV2\"\n args.model_path = blackbox_modelpath[0]\n poison_test.main(args)\n\n args.model_path = blackbox_modelpath[1]\n args.model = \"VGG11\"\n poison_test.main(args)\n ####################################################\n\n ####################################################\n # End-To-End Fine Tuning (e2e)\n print(\"End-To-End Fine Tuning test:\")\n args.num_poisons = 25\n args.trainset_size = 2500\n args.val_period = 20\n args.optimizer = \"SGD\"\n args.lr = 0.01\n args.lr_schedule = [30]\n args.epochs = 40\n\n args.end2end = True\n\n # white-box attack\n args.output = os.path.join(out_dir, \"e2e-wb\")\n args.model = \"resnet18\"\n args.model_path = whitebox_modelpath\n poison_test.main(args)\n\n # grey box attack\n args.model = \"resnet18\"\n args.model_path = greybox_modelpath\n args.output = os.path.join(out_dir, \"e2e-gb\")\n poison_test.main(args)\n\n # black box attacks\n args.output = os.path.join(out_dir, \"e2e-bb\")\n\n args.model = \"MobileNetV2\"\n args.model_path = blackbox_modelpath[0]\n poison_test.main(args)\n\n args.model = \"VGG11\"\n args.model_path = blackbox_modelpath[1]\n poison_test.main(args)\n ####################################################\n\n else:\n print(\n f\"Testing poisons from {args.poisons_path}, in the from scratch training setting...\\n\".format()\n )\n\n ####################################################\n # From Scratch Training (fst)\n args.num_poisons = 500\n args.trainset_size = 50000\n args.val_period = 20\n args.optimizer = \"SGD\"\n args.lr = 0.1\n args.lr_schedule = [100, 150]\n args.epochs = 200\n args.model_path = \"\"\n args.output = os.path.join(out_dir, \"fst\")\n\n args.model = \"resnet18\"\n poison_test.main(args)\n\n args.model = \"MobileNetV2\"\n poison_test.main(args)\n\n args.model = \"VGG11\"\n poison_test.main(args)\n ####################################################", "def main():\n\n parser = ArgumentParser()\n parser.add_argument('--config', '-c', type=str, required=True, help='Path to config file')\n parser.add_argument('--snapshot_path', '-s', type=str, required=True, default='', help='Path to model snapshot')\n parser.add_argument('--output_dir', '-o', type=str, required=True, default='', help='Path to output directory')\n args = parser.parse_args()\n\n assert exists(args.config)\n assert exists(args.snapshot_path + '.index')\n\n if not exists(args.output_dir):\n makedirs(args.output_dir)\n\n task_monitor = get_monitor(args.config, snapshot_path=args.snapshot_path)\n\n converted_snapshot_path = join(args.output_dir, CKPT_FILE_NAME)\n task_monitor.eliminate_train_ops(converted_snapshot_path)\n\n converted_model_path = '{}-{}'.format(converted_snapshot_path,\n int(basename(args.snapshot_path).split('-')[-1]))\n task_monitor.save_model_graph(converted_model_path, args.output_dir)\n\n task_monitor.freeze_model_graph(converted_model_path,\n join(args.output_dir, PB_FILE_NAME),\n join(args.output_dir, FROZEN_FILE_NAME))", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--allData', default='', action='store', nargs='+',\n help=\"add train test validation dataset together\")\n parser.add_argument('--topic', default='', help=\"target topic\")\n parser.add_argument('--contentWordNumber', default='', help=\"threshold for content Word Number\")\n parser.add_argument('--returnNSents', default='', help=\"top N sentences\")\n\n args = parser.parse_args()\n data = loadData(args.allData, args.topic, args.contentWordNumber)\n allDataDic, targetTweets = data.readData()\n \"\"\"\n ##modify at def_processing, re-run below line##\n outputDB = data.processing(allDataDic)\n with open(ROOT.DATA_ROOT+'/'+'allDataDic.txt', 'w') as file:\n file.write(json.dumps(outputDB))\n \"\"\"\n\n targData = data.processing(targetTweets)\n outputDB = json.load(open(ROOT.DATA_ROOT + '/allDataDic.txt'))\n contentWords = data.tfidf(outputDB, targData)\n return targetTweets,targData, contentWords", "def main():\n\n # Chdir into script directory so to properly resolve relative paths in configuration\n os.chdir(os.path.dirname(os.path.realpath(__file__)) + \"/\")\n\n # Disable proxy as we access localhost, both to avoid overhead and issues with proxy misconfiguration\n os.environ['NO_PROXY'] = '*'\n\n # Stop any GraphDB server that we previously started and is possibly still around due to script interruption/crash\n shell(f\"{cmd_graphdb} stopall\")\n\n # Generate synthetic traces, both for populating the repositories and for the {sf, sp, pf, pp} tests\n prepare_traces()\n \n # Generate central repositories (if needed)\n for size, approach in itertools.product(sizes, approaches):\n prepare_repository(size, approach)\n \n # Run experiments (if needed)\n for size, approach in itertools.product(sizes, approaches):\n run_experiments(size, approach)", "def finetuning_single(phase,token2id_dict,id2embedding_dict,inference,dataloaders,model,optimizer,device,weighted_sampling,criterion,classification,auxiliary_loss=False,attn_loss=False,epoch_count=None,new_task_epochs=None,trial=None,goal='IC',save_path_dir=None): #b/c it is single, models_list contains one model only\n running_loss = 0.0\n \n# outputs_list = []\n# representations_list = []\n# labels_list = []\n# modality_list = []\n# indices_list = []\n# task_names_list = []\n# attn_coefs_list = []\n# sentence_lens_list = []\n# class_labels_list = []\n# class_predictions_list = []\n \n \"\"\" Initialize Dictionaries to Store Results \"\"\" \n outputs_dict = dict()\n representations_dict = dict()\n attn_coefs_dict = dict()\n labels_dict = dict()\n sentence_lens_dict = dict()\n class_labels_dict = dict()\n class_predictions_dict = dict()\n epoch_bleu = dict()\n epoch_rouge = dict()\n epoch_meteor = dict()\n\n for dest_lang in token2id_dict.keys():\n outputs_dict[dest_lang] = list()\n attn_coefs_dict[dest_lang] = list()\n representations_dict[dest_lang] = list()\n labels_dict[dest_lang] = list()\n sentence_lens_dict[dest_lang] = list()\n class_labels_dict[dest_lang] = list()\n class_predictions_dict[dest_lang] = list()\n epoch_bleu[dest_lang] = 0\n epoch_rouge[dest_lang] = 0\n epoch_meteor[dest_lang] = 0\n\n batch_num = 0\n batch = 0\n #class label is that in IC setting, but class label is answer in VQA setting\n for inputs, text_indices, sentence_lens, class_labels, languages, document_level_text_indices, document_level_sentence_lens in tqdm(dataloaders[phase]):\n \"\"\" Weaning Off of Teacher Forcing in a Linear Manner \"\"\"\n #sampling_prob = (0.4/30000)*(batch+1)*(epoch_count+1)\n #uniform_value = np.random.uniform(0,1)\n #sampling = True if uniform_value < sampling_prob else False\n sampling = False\n batch += 1\n \"\"\" Send Data to Device \"\"\"\n inputs = inputs.to(device)\n class_labels = class_labels.to(device)\n #print(text_indices)\n with torch.set_grad_enabled('train1' in phase):# and inference == False): #('train' in phase and inference == False)\n \"\"\" Image Captioning Path \"\"\"\n if goal == 'IC':\n \"\"\" Perform Forward Pass i.e. Encoder and Decoder \"\"\"\n current_labels_dict = dict() #text\n# current_class_labels_dict = dict()\n# current_class_predictions_dict = dict()\n current_outputs_dict = dict()\n# current_attn_coefs_dict = dict()\n# current_representations_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()): #, sorted_indices, attn_coefs, class_predictions\n outputs, representations = model(inputs,current_text_indices,current_sentence_lens,token2id_dict[dest_lang],id2embedding_dict[dest_lang],dest_lang,phase,sampling,device) #outputs is B x S x Words\n \"\"\" Convert Text Indices/Targets to Tensor \"\"\"\n current_text_indices = current_text_indices.to(device) #torch.tensor(current_text_indices,device=device)\n \"\"\" Remove '/START' Index from Target Indices \"\"\"\n #current_text_indices = current_text_indices[:,1:] # B x (S-1)\n if phase == 'train1':\n attn_coefs = 5\n class_predictions = 6\n loss = calculate_IC_loss(criterion,outputs,current_text_indices[:,1:],class_predictions,class_labels,attn_coefs,auxiliary_loss,attn_loss)\n total_loss = total_loss + loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \"\"\" Store Results \"\"\"\n current_labels_dict[dest_lang] = current_text_indices[:,1:].cpu().detach().numpy()\n# current_class_labels_dict[dest_lang] = class_labels\n# current_class_predictions_dict[dest_lang] = class_predictions\n current_outputs_dict[dest_lang] = outputs.cpu().detach().numpy() #text\n# current_attn_coefs_dict[dest_lang] = attn_coefs\n# current_representations_dict[dest_lang] = representations\n #\"\"\" Detach Outputs and Attn Coefs To Avoid Memory Leakage \"\"\"\n #outputs = outputs.detach()\n #attn_coefs = attn_coefs.detach()\n current_text_indices.detach()\n elif goal == 'VQA':\n \"\"\" Perform Forward Pass and Get Answers \"\"\"\n outputs, representations, attn_coefs, class_predictions = model(inputs,text_indices,sentence_lens,id2embedding_dict,phase,device)\n \"\"\" Calculate MSE Loss \"\"\"\n #criterion = nn.MSELoss()\n #class_labels = class_labels.type(torch.float)\n \"\"\" Calculate CrossEntropyLoss \"\"\"\n criterion = nn.CrossEntropyLoss()\n class_labels = class_labels.type(torch.long)\n #print(outputs,outputs.shape)\n loss = criterion(outputs,class_labels)\n elif goal == 'Supervised': #encoder supervised pre-training\n h, representations, class_predictions = model(inputs)#,text_indices,sentence_lens,id2embedding_dict,phase,device)\n criterion = nn.CrossEntropyLoss()\n class_labels = class_labels.type(torch.long)\n loss = criterion(class_predictions,class_labels)\n elif goal == 'Text_Supervised':\n #h, class_predictions = model.supervised_forward(text_indices,sentence_lens,token2id_dict,id2embedding_dict,phase,device)\n criterion = nn.CrossEntropyLoss()\n class_labels = class_labels.type(torch.long)\n current_class_labels_dict = dict()\n current_class_predictions_dict = dict()\n# current_representations_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()):\n class_predictions = model.supervised_forward(current_text_indices,current_sentence_lens,token2id_dict[dest_lang],id2embedding_dict[dest_lang],phase,device)\n loss = criterion(class_predictions,class_labels)\n total_loss = total_loss + loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n\n current_class_labels_dict[dest_lang] = class_labels.cpu().detach().numpy()\n current_class_predictions_dict[dest_lang] = class_predictions.cpu().detach().numpy()\n# current_representations_dict[dest_lang] = h\n #loss = criterion(class_predictions,class_labels)\n #print(loss)\n elif goal == 'Language_Change_Detection':\n criterion = nn.BCEWithLogitsLoss()\n class_labels = class_labels.type(torch.long)\n current_class_labels_dict = dict()\n current_class_predictions_dict = dict()\n# current_representations_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()):\n \"\"\" Forward Pass \"\"\"\n replacement_predictions, replacement_labels = model.language_change_detection_forward(current_text_indices,current_sentence_lens,token2id_dict,id2embedding_dict,dest_lang,phase,device)\n #replacement_labels = replacement_labels.type(torch.float) #needed for BCELoss\n \"\"\" Instance-Wise Loss Because Each Sentence is of a Different Length \"\"\"\n loss = 0\n for i,(replacement_prediction,replacement_label) in enumerate(zip(replacement_predictions,replacement_labels)):\n current_loss = criterion(replacement_prediction,replacement_label)\n loss = loss + current_loss\n if i == len(replacement_predictions)-1:\n loss = loss / len(replacement_predictions)\n #loss = torch.mean(torch.tensor([criterion(replacement_prediction,replacement_label) for replacement_prediction,replacement_label in zip(replacement_predictions,replacement_labels)]))\n total_loss = total_loss + loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \n \"\"\" Store Representations and Labels \"\"\"\n current_class_predictions_dict[dest_lang] = [predictions.cpu().detach().numpy() for predictions in replacement_predictions]\n current_class_labels_dict[dest_lang] = [labels.cpu().detach().numpy() for labels in replacement_labels]\n# current_representations_dict[dest_lang] = h \n elif goal == 'Language_Detection':\n criterion = nn.CrossEntropyLoss(ignore_index=0)\n class_labels = class_labels.type(torch.long)\n current_class_labels_dict = dict()\n current_class_predictions_dict = dict()\n# current_representations_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()):\n \"\"\" Forward Pass \"\"\"\n replacement_predictions, replacement_labels = model.language_detection_forward(current_text_indices,current_sentence_lens,token2id_dict,id2embedding_dict,dest_lang,phase,device)\n #replacement_labels = replacement_labels.type(torch.long) #needed for CrossEntropyLoss\n \"\"\" Instance-Wise Loss Because Each Sentence is of a Different Length \"\"\"\n# loss = 0\n# for i,(replacement_prediction,replacement_label) in enumerate(zip(replacement_predictions,replacement_labels)):\n# replacement_label = replacement_label.type(torch.long)\n# current_loss = criterion(replacement_prediction,replacement_label)\n# loss = loss + current_loss\n# if i == len(replacement_predictions)-1:\n# loss = loss / len(replacement_predictions)\n #print(replacement_predictions.shape,replacement_labels.shape)\n loss = criterion(replacement_predictions.permute(0,2,1),replacement_labels)\n #print(loss)\n total_loss = total_loss + loss\n #print(dest_lang,total_loss)\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \n \"\"\" Store Representations and Labels \"\"\"\n current_class_predictions_dict[dest_lang] = [predictions.cpu().detach().numpy() for predictions in replacement_predictions]\n current_class_labels_dict[dest_lang] = [labels.cpu().detach().numpy() for labels in replacement_labels]\n# current_representations_dict[dest_lang] = h\n elif goal == 'MLM':\n criterion = nn.CrossEntropyLoss(reduction='none')\n# current_labels_dict = dict() #text\n# current_outputs_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()): #, sorted_indices, attn_coefs, class_predictions\n outputs, replacement_predictions = model.MLM_forward(current_text_indices,current_sentence_lens,token2id_dict,id2embedding_dict,dest_lang,phase,device) #outputs is B x S x Words\n \"\"\" Convert Text Indices/Targets to Tensor \"\"\"\n current_text_indices = current_text_indices.to(device) #torch.tensor(current_text_indices,device=device)\n \"\"\" Remove '/START' Index from Target Indices \"\"\"\n current_text_indices = current_text_indices[:,1:] # B x (S-1)\n \"\"\" Obtain Applicable Loss Locations (i.e., Where Token Was Masked) \"\"\"\n token_loss_mask = torch.where(replacement_predictions == 1,torch.tensor(1,device=device),torch.tensor(0,device=device)).type(torch.bool)\n #print(outputs.shape)\n #if phase == 'train1':\n \"\"\" Obtain Each Token's Loss \"\"\"\n token_loss = criterion(outputs.permute(0,2,1),current_text_indices)\n \"\"\" Retrieve Only Relevant Losses (Masked) \"\"\"\n loss = torch.mean(token_loss.masked_select(token_loss_mask))\n \"\"\" Aggregate Loss Across Languages \"\"\"\n total_loss = total_loss + loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \n del current_text_indices\n del token_loss\n del token_loss_mask\n# \"\"\" Store Results \"\"\"\n# current_labels_dict[dest_lang] = current_text_indices.cpu().detach().numpy()\n# current_outputs_dict[dest_lang] = outputs.cpu().detach().numpy() #text\n elif goal == 'ELECTRA':\n generator_criterion = nn.CrossEntropyLoss(reduction='none')\n discriminator_criterion = nn.BCEWithLogitsLoss(reduction='none')\n# current_labels_dict = dict() #text\n# current_outputs_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()): #, sorted_indices, attn_coefs, class_predictions\n \"\"\" Convert Text Indices/Targets to Tensor \"\"\"\n current_text_indices = current_text_indices.to(device) #torch.tensor(current_text_indices,device=device)\n \"\"\" Perform Forward Pass Through ELECTRA \"\"\"\n generator_outputs, generator_labels, discriminator_outputs, discriminator_labels = model.ELECTRA_forward(current_text_indices,current_sentence_lens,token2id_dict,id2embedding_dict,dest_lang,phase,sampling,device) #outputs is B x S x Words\n \"\"\" Remove '/START' Index from Target Indices \"\"\"\n current_text_indices = current_text_indices[:,1:] # B x (S-1)\n \"\"\" Generator Loss Mask (i.e., Only Consider Originally Masked Tokens ) \"\"\"\n generator_token_loss_mask = torch.where(generator_labels == 1,torch.tensor(1,device=device),torch.tensor(0,device=device)).type(torch.bool)\n \"\"\" Discrimiantor Loss Mask (i.e., Do Not Consider Padded Regions ) \"\"\"\n discriminator_labels = discriminator_labels.view_as(discriminator_outputs) \n discriminator_token_loss_mask = torch.ones_like(discriminator_labels)\n for i,sentence_len in zip(range(discriminator_token_loss_mask.shape[0]),current_sentence_lens):\n discriminator_token_loss_mask[i,sentence_len:] = 0\n \n #if phase == 'train1':\n \"\"\" Obtain Each Generator Token's Loss \"\"\"\n generator_token_loss = generator_criterion(generator_outputs.permute(0,2,1),current_text_indices) # B x S\n #print(generator_token_loss.shape,generator_token_loss_mask.shape)\n \"\"\" Retrieve Only Relevant Loss (Masked) \"\"\"\n generator_loss = torch.mean(generator_token_loss.masked_select(generator_token_loss_mask)) #scalar\n \n \"\"\" Obtain Each Discriminator Token's Loss \"\"\" \n discriminator_token_loss = discriminator_criterion(discriminator_outputs,discriminator_labels) # B x S\n #print(discriminator_token_loss.shape,discriminator_token_loss_mask.shape)\n \"\"\" Retrieve Only Relevant Loss (Masked) \"\"\"\n discriminator_loss = torch.mean(discriminator_token_loss.masked_select(discriminator_token_loss_mask.type(torch.bool))) #scalar\n \n #print(generator_loss,discriminator_loss)\n \"\"\" Aggregate Loss Across Languages \"\"\"\n total_loss = total_loss + generator_loss + discriminator_loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \"\"\" Store Results \"\"\"\n# current_labels_dict[dest_lang] = discriminator_labels.cpu().detach().numpy()\n# current_outputs_dict[dest_lang] = discriminator_outputs.cpu().detach().numpy() #text\n elif goal == 'MARGE':\n# current_labels_dict = dict() #text\n# current_outputs_dict = dict()\n #total_loss = 0\n #for (dest_lang,current_text_indices),current_sentence_lens,current_languages in zip(text_indices.items(),sentence_lens.values(),languages.values()): #, sorted_indices, attn_coefs, class_predictions\n \"\"\" Randomly Choose Target Lang for This Mini-Batch \"\"\"\n #lang_list = list(text_indices.keys())\n #target_lang = random.sample(lang_list,1).item()\n #target_lang = 'de' #option to change based on dataset (MUST CHANGE IN PAD COLLATE)\n outputs, target_lang = model(text_indices,sentence_lens,languages,document_level_text_indices,document_level_sentence_lens,token2id_dict,id2embedding_dict,phase,device)\n \"\"\" Convert Text Indices/Targets to Tensor \"\"\"\n current_text_indices = text_indices[target_lang].to(device) #torch.tensor(current_text_indices,device=device)\n \"\"\" Remove '/START' Index from Target Indices \"\"\"\n current_text_indices = current_text_indices[:,1:] # B x (S-1)\n #if phase == 'train1':\n \"\"\" Obtain Each Token's Loss \"\"\"\n loss = criterion(outputs.permute(0,2,1),current_text_indices)\n #print(loss)\n #\"\"\" Aggregate Loss Across Languages \"\"\"\n #total_loss = total_loss + loss\n #\"\"\" Average Loss if This is Final Loss Collected \"\"\"\n #if dest_lang == list(text_indices.keys())[-1]:\n # loss = total_loss / len(text_indices)\n# print(loss)\n# \"\"\" Store Results \"\"\"\n# current_labels_dict[target_lang] = current_text_indices.cpu().detach().numpy()\n# current_outputs_dict[target_lang] = outputs.cpu().detach().numpy() #text\n \n\n \"\"\" Backpropagation and Update Step \"\"\"\n if phase == 'train1': #only perform backprop for train1 phase \n loss.backward()\n \n \"\"\" Network Parameters \"\"\"\n if isinstance(optimizer,tuple):\n optimizer[0].step()\n \"\"\" Task-Instance Parameters \"\"\"\n optimizer[1].step() \n optimizer[0].zero_grad()\n optimizer[1].zero_grad()\n else:\n optimizer.step()\n optimizer.zero_grad()\n \n \"\"\" Calculate Metrics \"\"\"\n if goal == 'IC':\n if phase == 'train1':\n running_loss += loss.item() * inputs.shape[0]\n elif goal == 'VQA':\n running_loss += loss.item() * inputs.shape[0] \n elif goal in ['Supervised','Text_Supervised','Language_Change_Detection','Language_Detection','MLM','ELECTRA','MARGE']:\n running_loss += loss.item() * inputs.shape[0] \n \n# \"\"\" These Need to be Language Specific \"\"\"\n \n if goal in ['IC']:\n batch_bleu = calculate_bleu_score(current_outputs_dict,current_labels_dict,token2id_dict)\n batch_rouge = calculate_rouge_score(current_outputs_dict,current_labels_dict,token2id_dict)\n batch_meteor = calculate_meteor_score(current_outputs_dict,current_labels_dict,token2id_dict) \n \n for dest_lang in batch_bleu.keys():\n epoch_bleu[dest_lang] = epoch_bleu[dest_lang] + (1/batch)*(batch_bleu[dest_lang] - epoch_bleu[dest_lang])\n epoch_rouge[dest_lang] = epoch_rouge[dest_lang] + (1/batch)*(batch_rouge[dest_lang] - epoch_rouge[dest_lang])\n epoch_meteor[dest_lang] = epoch_meteor[dest_lang] + (1/batch)*(batch_meteor[dest_lang] - epoch_meteor[dest_lang])\n \n if phase in ['val']:\n for dest_lang in text_indices.keys():\n predicted_sentences = convert_predicted_ids_to_sentences(current_outputs_dict[dest_lang],token2id_dict[dest_lang],dest_lang)\n target_sentences = convert_target_ids_to_sentences(current_labels_dict[dest_lang],token2id_dict[dest_lang],dest_lang)\n outputs_dict[dest_lang].extend(predicted_sentences)\n labels_dict[dest_lang].extend(target_sentences)\n \n elif goal in ['Language_Change_Detection','Language_Detection']:\n for dest_lang in text_indices.keys():\n if goal in ['Language_Change_Detection','Language_Detection']:\n \"\"\" Store Batch Data in The Dictionaries \"\"\"\n class_labels_dict[dest_lang].extend(current_class_labels_dict[dest_lang]) #.cpu().detach().numpy())\n class_predictions_dict[dest_lang].extend(current_class_predictions_dict[dest_lang]) #.cpu().detach().numpy())\n \n# elif goal in ['Text_Supervised']:\n## current_class_labels = current_class_labels_dict[dest_lang]\n## current_class_predictions = current_class_predictions_dict[dest_lang]\n## current_class_labels = current_class_labels.cpu().detach().numpy()\n## current_class_predictions = current_class_predictions.cpu().detach().numpy()\n# \n# \"\"\" Store Batch Data in The Dictionaries \"\"\"\n# #sentence_lens_dict[dest_lang].extend(current_sentence_lens)\n# class_labels_dict[dest_lang].extend(current_class_labels_dict[dest_lang]) #.cpu().detach().numpy())\n# class_predictions_dict[dest_lang].extend(current_class_predictions_dict[dest_lang]) #.cpu().detach().numpy())\n#\n# elif goal in ['MARGE']:\n# labels_dict[target_lang].extend(current_labels_dict[target_lang]) #.cpu().detach().numpy())\n# outputs_dict[target_lang].extend(current_outputs_dict[target_lang]) #.cpu().detach().numpy())\n# break # because only one target language per minibatch \n# if goal not in ['Supervised','Text_Supervised','Language_Change_Detection','Language_Detection']:\n## if current_labels_dict[dest_lang].data.dtype != torch.long:\n## current_labels_dict[dest_lang].data = current_labels_dict[dest_lang].data.type(torch.long)\n# \n## current_text_indices = current_labels_dict[dest_lang]\n## current_outputs = current_outputs_dict[dest_lang]\n## current_attn_coefs = current_attn_coefs_dict[dest_lang]\n## current_representations = current_representations_dict[dest_lang]\n# \"\"\" Store Batch Data in The Dictionaries \"\"\" \n# labels_dict[dest_lang].extend(current_labels_dict[dest_lang]) #.cpu().detach().numpy())\n# outputs_dict[dest_lang].extend(current_outputs_dict[dest_lang]) #.cpu().detach().numpy())\n## attn_coefs_dict[dest_lang].extend(current_attn_coefs.cpu().detach().numpy())\n## representations_dict[dest_lang].extend(current_representations.cpu().detach().numpy())\n## elif goal in ['Text_Supervised']:\n## current_representations = current_representations_dict[dest_lang]\n## representations_dict[dest_lang].extend(current_representations.squeeze().cpu().detach().numpy()) \n## else:\n## current_representations = current_representations_dict[dest_lang]\n## if goal in ['Language_Change_Detection','Language_Detection']:\n## current_representations = [representations.cpu().detach().numpy() for representations in current_representations]\n## else:\n## current_representations = current_representations.cpu().detach().numpy()\n## representations_dict[dest_lang].extend(current_representations) \n# \n## modality_list.append(modality)\n## indices_list.append(indices)\n## task_names_list.append(task_names)\n \n batch_num += 1\n #if batch_num == 2:\n # break\n \n #outputs_list, labels_list, modality_list, indices_list, task_names_list, pids_list = flatten_arrays(outputs_list,labels_list,modality_list,indices_list,task_names_list,pids_list)\n if goal == 'IC':\n if phase == 'train1':\n epoch_loss = running_loss / len(dataloaders[phase].dataset)\n else:\n epoch_loss = 0 #filler\n elif goal in ['VQA','Supervised','Text_Supervised','Language_Change_Detection','Language_Detection','MLM','ELECTRA','MARGE']:\n epoch_loss = running_loss / len(dataloaders[phase].dataset) \n \n \"\"\" Removed Recently \"\"\"\n #representations_list = np.concatenate(representations_list)\n \n if goal == 'IC':\n \"\"\" BLEU Score Evaluation \"\"\"\n# epoch_bleu = calculate_bleu_score(outputs_dict,labels_dict,token2id_dict)\n# epoch_rouge = calculate_rouge_score(outputs_dict,labels_dict,token2id_dict)\n# epoch_meteor = calculate_meteor_score(outputs_dict,labels_dict,token2id_dict) \n return epoch_loss, epoch_bleu, epoch_rouge, epoch_meteor, outputs_dict, labels_dict #, modality_list, indices_list, task_names_list, class_labels_list, attn_coefs_list, sentence_lens_list\n elif goal == 'VQA':\n \"\"\" Accuracy of Answers \"\"\"\n epoch_acc = calculate_answer_accuracy(outputs_dict,class_labels_dict)\n return epoch_loss, epoch_acc #representations_list, labels_list #, modality_list, indices_list, task_names_list, class_labels_list, attn_coefs_list, sentence_lens_list\n elif goal in ['Supervised','Text_Supervised','Language_Change_Detection','Language_Detection']:\n if goal in ['Language_Change_Detection','Language_Detection']:\n epoch_acc = calculate_language_detection_accuracy(class_predictions_dict,class_labels_dict,goal)\n else:\n \"\"\" Accuracy of Answers \"\"\"\n epoch_acc = calculate_answer_accuracy(class_predictions_dict,class_labels_dict)\n return epoch_loss, epoch_acc #representations_list, labels_list #, modality_list, indices_list, task_names_list, class_labels_list, attn_coefs_list, sentence_lens_list\n elif goal in ['MLM','ELECTRA','MARGE']:\n return epoch_loss#, outputs_dict, labels_dict #representations_list, labels_list #, modality_list, indices_list, task_names_list, class_labels_list, attn_coefs_list, sentence_lens_list", "def task1(self):\n \n pass", "def main():\n args = parse_args()\n\n make_session = bootstrap(\n 'sqlite:///{}'.format(DB_NAME),\n )\n session = make_session()\n\n for vase in session.query(Vase).order_by(Vase.produced_start):\n for query_str in make_searches(vase):\n pass", "def main():\n vunit = vunit_pkg.VUnit.from_argv()\n vunit = map_sources(vunit)\n run_tests(vunit)", "def main():\n if VERBOSE:\n logging.basicConfig(format='%(levelname)s:\\t%(message)s',\n level=logging.DEBUG)\n\n # Run tests\n stderr_lbs()\n nocleanup()\n multiple_lbs()\n basic_kivaloo()\n\n # Final shutdown of any remaining servers\n kivaloo.servers.Server_kvlds.shutdown()\n kivaloo.servers.Server_lbs.shutdown()", "def task_process(args):\n if args.mode == 'change model':\n for i in range(config.center_len):\n h, w = config.center_list[i][0], config.center_list[i][1]\n os.system('rm -rf ctpn_change_{}x{}.onnx'.format(h, w))\n for i in range(config.center_len):\n h, w = config.center_list[i][0], config.center_list[i][1]\n os.system('{} change_model.py --input_path={}/ctpn_{}x{}.onnx --output_path={}/ctpn_change_{}x{}.onnx' \\\n .format(args.interpreter, args.src_dir, h, w,args.res_dir, h, w)) \n if args.mode == 'preprocess':\n for i in range(config.center_len):\n os.system('mkdir -p {}_{}x{}'.format(args.res_dir, config.center_list[i][0], config.center_list[i][1]))\n os.system('{} ctpn_preprocess.py --src_dir={} --save_path={}' \\\n .format(args.interpreter, args.src_dir, args.res_dir))\n if args.mode == 'ais_infer':\n fps_all = 0\n os.system('mkdir -p {}/inf_output'.format(args.res_dir))\n for i in range(config.center_len):\n h, w = config.center_list[i][0], config.center_list[i][1]\n\n os.system('{} --model={} --input={}_{}x{} --dymHW {},{} --device {} --batchsize={} --output={}/inf_output' \\\n .format(args.interpreter, args.om_path, args.src_dir ,h , w, h, w,args.device, args.batch_size, args.res_dir))\n\n sumary_path = glob.glob('{}/inf_output/*ary.json'.format(args.res_dir))[0]\n with open(sumary_path, 'r') as f:\n output = json.load(f)\n throughput = output['throughput'] \n fps_all = fps_all + throughput * config.center_count[i]\n os.system('rm -f {}'.format(sumary_path))\n os.system('mv {}/inf_output/*/*.bin {}'.format(args.res_dir, args.res_dir))\n os.system('rm {}/inf_output -rf'.format(args.res_dir))\n fps_all = fps_all / config.imgs_len\n print(\"====performance data====\")\n print('CTPN bs{} models fps:{}'.format(args.batch_size, fps_all))", "def main():\r\n parser = get_parser()\r\n config = parser.parse_args(['--cfg', 'config.yaml'])\r\n result_filing.init_config_vars(config)\r\n run_id = config.info.run_id\r\n logger = custom_logger.CustomLogger(run_id+':'+file_id)\r\n\r\n operation = config.info.operation_type\r\n logger.info(\"Selected operation type %s.\"%(operation))\r\n if operation == const.TRAIN_OP:\r\n train.train_model(config)\r\n elif operation == const.DEPLOY_OP:\r\n test.test_model(config)", "def task_stagnant(task):", "def main():\n pass", "async def main():\n if os.environ['Reset'] == 'True':\n Scanner.enable_all_projects()\n if os.environ['Edit'] == 'True':\n Scanner.edit()\n await Scanner.Scan()", "def test_solve_task(self):\n pass", "def run(self):\n\t\tlog = logging.getLogger()\n\t\tsuccess = True\n\t\tself.task[\"custom\"] = str(self.task[\"custom\"])\n\t\tself.db = CuckooDatabase()\n\n\t\t# Generate analysis results storage folder path with current task id.\n\t\tresults_path = CuckooConfig().get_analysis_results_path()\n\t\tsave_path = os.path.join(results_path, str(self.task[\"id\"]))\n\n\t\tif (self.task[\"custom\"] == \"sleep\"):\n\t\t\timport time\n\t\t\t# sleep longer than default timeout of hsn2-cuckoo\n\t\t\ttime.sleep(905)\n\t\t# Additional check to verify that the are not saved results with the\n\t\t# same task ID.\n\t\tif os.path.exists(save_path):\n\t\t\tlog.error(\"There are already stored results for current task \" \\\n\t\t\t\t\t \"with ID %d at path \\\"%s\\\". Abort.\"\n\t\t\t\t\t % (self.task[\"id\"], save_path))\n\t\t\tself.db.complete(self.task[\"id\"], False)\n\t\t\treturn False\n\n\t\t# Check if target file exists.\n\t\tlog.debug(os.path.exists(self.task[\"custom\"]))\n\t\tif not os.path.exists(self.task[\"custom\"]):\n\t\t\tlog.error(\"Cannot find custom file \\\"%s\\\". Abort.\"\n\t\t\t\t\t % self.task[\"custom\"])\n\t\t\tself.db.complete(self.task[\"id\"], False)\n\t\t\treturn False\n\n\t\t# Check if target is a directory.\n\t\tif os.path.isdir(self.task[\"custom\"]):\n\t\t\tlog.error(\"Specified target \\\"%s\\\" is a directory. Abort.\"\n\t\t\t\t\t % self.task[\"custom\"])\n\t\t\tself.db.complete(self.task[\"id\"], False)\n\t\t\treturn False\n\n\t\t# 4. Extract appropriate log archive as mock logs analysis results\n\t\t# Modified _save_results so that it extracts the tar file passed in target\n\t\tself._save_results(self.task[\"custom\"], save_path)\n\n\t\t# 5. Update task in database with proper status code.\n\t\tif success:\n\t\t\tself.db.complete(self.task[\"id\"], True)\n\t\telse:\n\t\t\tself.db.complete(self.task[\"id\"], False)\n\t\tlog.info(\"Analyis completed.\")\n\n\t\treturn True", "def main():\n\n # set up output directory and file\n output_file_folder = \"output/{}\".format(args.experiment_name)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = \"{}/{}.csv\".format(output_file_folder, args.model_name)\n args.checkpoint_name = \"{}/{}.pt\".format(output_file_folder, args.model_name + \"_best_model\")\n\n # read lcquad merged data\n if args.dataset_name == \"lcquad\":\n df_train = pd.read_csv(\"./data/lcquad/gold_db/train_gold.csv\")\n df_valid = pd.read_csv(\"./data/lcquad/gold_db/valid_gold.csv\")\n df_test = pd.read_csv(\"./data/lcquad/gold_db/lcquad_test_sorted.csv\")\n args.gold_file_name = \"lcquad/lcquad_gt_5000.csv\"\n # elif args.dataset_name == \"qald9\":\n # df_train = pd.read_csv(\"./data/qald-9/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/qald-9/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/qald-9/test_gold.csv\")\n # args.gold_file_name = \"qald/qald_data_gt.csv\"\n # elif args.dataset_name == \"webqsp\":\n # df_train = pd.read_csv(\"./data/webqsp/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/webqsp/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/webqsp/test_gold.csv\")\n # args.gold_file_name = \"webqsp/webqsp_data_gt.csv\"\n\n train_data = read_data_file(df_train, device, \"train\")\n valid_data = read_data_file(df_valid, device, \"valid\")\n test_data = read_data_file(df_test, device, \"test\")\n\n # train model and evaluate\n if args.model_name == \"pure\":\n model = PureNameLNN(args.alpha, -1, False)\n elif args.model_name == \"ctx\":\n model = None\n elif args.model_name == 'type':\n model = None\n elif args.model_name == \"pure_ctx\":\n model = None\n elif args.model_name == \"pure_type\":\n model = None\n elif args.model_name == \"ctx_type\":\n model = None\n\n model = model.to(device)\n print(\"model: \", args.model_name, args.alpha)\n\n # training\n train(model, train_data, valid_data, test_data, args.checkpoint_name, args.num_epoch, args.margin, args.learning_rate)", "def main():\n args = get_args()\n\n src_dir = args.input\n\n if os.path.exists(args.output):\n print(\"output directory already exists\")\n sys.exit(1)\n os.makedirs(args.output)\n copy_submission_dir(args.input, args.output, args.submitter)\n src_dir = args.output\n\n config = checker.Config(\n args.version,\n args.extra_model_benchmark_map)\n\n if not args.nodelete_empty_dirs:\n delete_empty_dirs(os.path.join(src_dir))\n\n os.chdir(src_dir)\n\n infer_scenario_results(args.submitter, args.noinfer_low_accuracy_results, config)\n\n return 0", "def main():\n try:\n res = requests.get('http://localhost:9200')\n pprint(json.loads(res.content.decode('utf-8')))\n except requests.exceptions.ConnectionError:\n print(\"ERROR: ELASTICSEARCH Server is not running!\")\n exit(-1)\n\n # scrapeAndSaveNewsArticles()\n # generateNewsDocsCSV() # may need to be modified based on how scrapeAndSave function file output\n if not es_client.indices.exists(index='huffpost_news_index'):\n print(\"PLEASE WAIT... LOADING DOCUMENTS INTO INVERTED INDEX\")\n indexDocsToES('huffpost_news_index')", "def main():\n\n\tgdl = TwitterDataLoader()\t\n\tgdl.load_twitter_data_to_db(truncate_table=False, skip_loaded_files=True)", "def main(args):\n \n ## Load & Preprocess data \n if args.data_name == 'amsterdam': \n file_name = '../data/amsterdam/test_longitudinal_data.csv'\n ori_data = data_preprocess(file_name, args.max_seq_len)\n \n # Divide the data into training and testing\n divided_data, _ = data_division(ori_data, seed = args.seed, divide_rates = [args.train_rate, 1-args.train_rate])\n \n train_data = np.asarray(divided_data[0])\n test_data = np.asarray(divided_data[1])\n\n print('Finish data loading: ' + str(args.data_name)) \n \n ## Run hider algorithm\n if args.hider_model == 'timegan':\n generated_data = timegan.timegan(train_data)\n elif args.hider_model == 'add_noise':\n generated_data = add_noise.add_noise(train_data, args.noise_size) \n print('Finish hider algorithm training') \n \n ## Define enlarge data and its labels\n enlarge_data = np.concatenate((train_data, test_data), axis = 0)\n enlarge_data_label = np.concatenate((np.ones([train_data.shape[0],]), np.zeros([test_data.shape[0],])), axis = 0)\n \n # Mix the order\n idx = np.random.permutation(enlarge_data.shape[0])\n enlarge_data = enlarge_data[idx]\n enlarge_data_label = enlarge_data_label[idx]\n \n ## Run seeker algorithm\n reidentified_data = knn_seeker(generated_data, enlarge_data)\n \n print('Finish seeker algorithm training') \n \n ## Evaluate the performance\n # 1. Feature prediction\n feat_idx = np.random.permutation(train_data.shape[2])[:args.feature_prediction_no]\n ori_feat_pred_perf = feature_prediction(train_data, test_data, feat_idx)\n new_feat_pred_perf = feature_prediction(generated_data, test_data, feat_idx)\n \n feat_pred = [ori_feat_pred_perf, new_feat_pred_perf]\n \n print('Feature prediction results: ' + \n '(1) Ori: ' + str(np.round(ori_feat_pred_perf, 4)) + \n '(2) New: ' + str(np.round(new_feat_pred_perf, 4)))\n \n # 2. One step ahead prediction\n ori_step_ahead_pred_perf = one_step_ahead_prediction(train_data, test_data)\n new_step_ahead_pred_perf = one_step_ahead_prediction(generated_data, test_data)\n \n step_ahead_pred = [ori_step_ahead_pred_perf, new_step_ahead_pred_perf]\n \n print('One step ahead prediction results: ' + \n '(1) Ori: ' + str(np.round(ori_step_ahead_pred_perf, 4)) + \n '(2) New: ' + str(np.round(new_step_ahead_pred_perf, 4)))\n \n # 3. Reidentification score\n reidentification_score = reidentify_score(enlarge_data_label, reidentified_data)\n \n print('Reidentification score: ' + str(np.round(reidentification_score, 4)))\n \n shutil.rmtree('tmp')\n \n return feat_pred, step_ahead_pred, reidentification_score", "def __main() :\n launchTests()", "def RUN(self):", "def run_everything(self):\n try:\n if self.database == \"genome\":\n self.genome_deprecation()\n return\n\n record = self.ncbi_search(self.database, self.term)\n count = record[\"count\"]\n self.original_count = count\n\n self.main_organizer(count, record[\"qkey\"], record[\"webenv\"])\n except ProgramDone:\n return", "def main(seed, filter_, num_classes, setup, model_name, images_dir, precision_mode, test):\n f1, f2 = filter_\n model_name = 'flex_random_seed_{}_resnet_manual_highres_center_only_f1_{}_f2_{}'.format(seed, f1, f2)\n frozen_graph_filepath = './Models/Frozen_graphs/{}_{}/'.format(f1,f2) + model_name + '_frozen_graph.pb'\n frozen_graph, x_tensor, y_tensor = trt_frozen_graph_and_tensors(\n model_name=model_name, \n frozen_graph_filepath=frozen_graph_filepath, \n precision_mode=precision_mode\n )\n\n elapsed_time_full_dataset = []\n sum_of_confusion_matrices = np.zeros((6, 6))\n \n with tf.compat.v1.Session(graph=frozen_graph) as sess:\n for image_file in [img for img in os.listdir(images_dir) if img.endswith('.JPG')]:\n\n img = Image.open(images_dir + image_file)\n sx,sy = img.size\n\n print(\"Image size is %i x %i\" % (sx,sy)) # sx = 4912, sy = 3264\n print(\"Loading image %s\" % image_file)\n\n img_np = np.array(img)/255.0\n del img\n\n print(\"Predicting for image %s (%i x %i pixel)\" % (image_file,sx,sy))\n\n start = time.time()\n predictions_flex = sess.run(y_tensor, feed_dict={x_tensor:np.expand_dims(img_np, 0)})\n elapsed = time.time() - start\n elapsed_time_full_dataset.append(elapsed)\n del img_np #deleting afterwards to not take the deleting time into account\n\n print(\"Prediction took %f seconds (inference on full image)\" % elapsed)\n print(\"Merging predictions\")\n # merge the predictions on the quarter images\n predictions_flex_combined = np.zeros(predictions_flex.shape)\n\n elapsed = time.time()-start\n if embedded_version:\n print(\"Prediction took %f seconds (inference on split up image)\" % elapsed)\n\n if embedded_version:\n predictions_flex = predictions_flex_combined\n\n if save_annotations:\n print(\"Computing annotations...\")\n annotations = []\n d = 4\n for x in range(100, sx-101, d):\n for y in range(100, sy-101, d):\n x0 = int(round(float(x-100)/4) + 15)\n y0 = int(round(float(y-100)/4) + 15)\n probs_flex = np.squeeze(predictions_flex[0, y0, x0, :])\n annotations.append((probs_flex, x, y))\n\n if test: # add a prefix for test to not replace real experiments\n model_name = 'TEST_' + model_name\n\n # saving annotations\n annotation_dir = images_dir.replace('Data', 'Results/seeds/annotations_trt') + image_file\n annotate_and_save(annotations, d, annotation_dir, model_name, precision_mode)\n classes_image = annotate_and_save_per_class(\n annotations, \n d, \n annotation_dir, \n model_name, \n precision_mode\n )\n\n labels = load_labels(annotation_dir)\n confusion_matrix = np.zeros((num_classes, num_classes))\n for (c_name, x, y) in labels:\n if 100 <= x < sx-101 and 100 <= y < sy-101:\n x0 = int(round(float(x-100)/4) + 15 )\n y0 = int(round(float(y-100)/4) + 15)\n probs_flex = np.squeeze(predictions_flex[0, y0, x0, :])\n\n predicted_class = np.argmax(probs_flex)\n c = train_model.get_classes().index(c_name)\n confusion_matrix[c, predicted_class] += 1\n print(confusion_matrix)\n sum_of_confusion_matrices += confusion_matrix\n\n print(sum_of_confusion_matrices)\n sum_of_cm_fp = './Results/seeds/preds_trt/{}/{}_{}/sum_of_cm_'\\\n .format(precision_mode.lower(), f1,f2) + model_name + '_fp32.npy'\n elapsed_time_fp = './Results/seeds/elapsed_trt/{}/{}_{}/time_taken_'\\\n .format(precision_mode.lower(), f1,f2) + model_name + '_fp32.npy'\n\n\n np.save(sum_of_cm_fp, sum_of_confusion_matrices)\n np.save(elapsed_time_fp, elapsed_time_full_dataset)\n tf.reset_default_graph()", "def runall():\n sclogic.runall()", "def main():\r\n \r\n from TweetProcessor import TweetProcessor\r\n \r\n consumer_key = ''\r\n consumer_secret = ''\r\n tweepy_base_filter = \"Filter:links -Filter:retweets\"\r\n \r\n hashtags = [\r\n \"#covid-19\", \"#covid19\", \"#covid\", \"#coronavirus\", \"#corona\",\r\n \"#covid_19\"\r\n ]\r\n \r\n vt_keys = [\"\"]\r\n batch_size = 5000\r\n \r\n for i in range(len(hashtags)):\r\n \r\n try:\r\n tweepy_filter = hashtags[i] + \" \" + tweepy_base_filter\r\n print(\"starting pull with this filter: \" + str(tweepy_filter))\r\n \r\n tp = TweetProcessor(consumer_key, consumer_secret,\r\n tweepy_filter, vt_keys, batch_size)\r\n \r\n tp.run()\r\n\r\n except Exception as e: \r\n with open(\"tweetProcessorLog.txt\", \"a\") as file:\r\n file.write(\"\\n\" + str(datetime.now()) + \", error: \" + str(e))\r\n \r\n \r\n if e != \"Twitter error response: status code = 429\":\r\n raise e\r\n\r\n \r\n print(\"ERROR OCCURED: waiting for 15 minutes to avoid hitting tweepy request limit\")\r\n print(e)\r\n time.sleep(15 * 60)", "def main():\n run_nutanix_vm_creation_module()", "def main():\n\n experiment_config_path = _parse_input()\n all_experiments = read_experiments_config(experiment_config_path)\n\n for experiment_name, experiment_config in all_experiments.items():\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n results, model = perform_experiment(experiment_config)\n weights_file_name = save_model_weights(experiment_name, model)\n testing_layers_files = save_layers_logs(results['Layers Testing Output'], 'Testing')\n training_layers_files = save_layers_logs(results['Layers Training Output'], 'Training')\n\n results.pop('Layers Training Output')\n results.pop('Layers Testing Output')\n print(\"Testing Data Confusion Matrix\")\n print(np.array2string(results['Confusion Matrix']))\n results['Confusion Matrix'] = str(results['Confusion Matrix'].tolist())\n print(\"Experiment Results:\")\n print(json.dumps(results, indent=2, sort_keys=True))\n\n results_file = save_experiment_log(results, experiment_name)\n upload_to_s3([], [], [results_file], [weights_file_name], testing_layers_files + training_layers_files)", "def task4(self):\n\n pass", "def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))", "def main(self):\n\n self._setup_task_manager()\n self._setup_source_and_destination()\n self.task_manager.blocking_start(waiting_func=self.waiting_func)\n self._cleanup()", "def main():\n test_merge_quick_sort()\n test_compare()", "def main():\n data_loader = TinyPerformanceLoader()\n data_loader.archive_corpus()", "def run_all_tasks(data_dir):\n print(\"Training and testing for all tasks ...\")\n for t in range(20):\n run_task(data_dir, task_id=t + 1)", "def task3(self):\n\n pass", "def run_offenseval_task_a(training_data, test_data):\n #grid_search_svm(training_data, test_data)\n compare_classifiers(classifiers(), training_data, test_data, dev_stage=True)\n #compare_classifiers(classifiers(), training_data, test_data, dev_stage=False)", "def _run_local_tests(self, *args, **kwargs):\n pass", "def _main_helper(self):\n asyncio.create_task(self._main())", "def main():\n parser = optparse.OptionParser(usage='%prog [options]', version=\"0.0.1\")\n parser.add_option('--settings', \\\n help='Python path to settings module. If this isn\\'t provided, the DJANGO_SETTINGS_MODULE enviroment variable will be used.')\n\n parser.add_option('-v', '--verbose', action='store_true', dest='verbose', \\\n default=False, help='Verbose output.')\n options = parser.parse_args()[0]\n if options.settings:\n os.environ[\"DJANGO_SETTINGS_MODULE\"] = options.settings\n else:\n os.environ[\"DJANGO_SETTINGS_MODULE\"] = \"settings\"\n\n probe_all()", "def main(houseid, restart):\r\n if str(restart) == \"yes\":\r\n os.system('echo \">>>>>>>RESTARTING MODEL<<<<<<<<<<<<<<<\"')\r\n try:\r\n os.remove(\"data/train.csv\")\r\n except:\r\n pass\r\n\r\n try:\r\n os.remove(\"tmp/clean_data/clean_data.csv\")\r\n except:\r\n pass\r\n\r\n try:\r\n os.remove(\"tmp/selected_features/selected_features_data.pkl\")\r\n except:\r\n pass\r\n\r\n try:\r\n os.remove(\"tmp/split_sets/split_dataset.pkl\")\r\n except:\r\n pass\r\n\r\n try:\r\n os.remove(\"tmp/models/model_elasticnet.pkl\")\r\n except:\r\n pass\r\n\r\n try:\r\n os.remove(\"tmp/models/model_mlp.pkl\")\r\n except:\r\n pass\r\n\r\n try:\r\n os.remove(\"tmp/models/model_lasso.pkl\")\r\n except:\r\n pass\r\n\r\n try:\r\n os.remove(\"tmp/models/model_randomforest.pkl\")\r\n except:\r\n pass\r\n\r\n try:\r\n os.remove(\"model/selected_model.pkl\")\r\n except:\r\n pass\r\n\r\n try:\r\n for f in glob.glob(\"predictions/*\"):\r\n os.remove(f)\r\n except:\r\n pass\r\n\r\n # os.system(\"rm tmp/clean_data/* tmp/selected_features/* tmp/models/* tmp/split_sets/* ../predictions/* data/* ../model/*\")\r\n os.system(\r\n \"PYTHONPATH=. luigi --module pipeline.task_7_prediction Prediction --idx \"\r\n + str(houseid)\r\n + \" --local-scheduler\"\r\n )\r\n print(\r\n os.system(\r\n \"echo 'Your prediction is:' & cat predictions/prediction-id-\"\r\n + str(houseid)\r\n + \".txt\"\r\n ),\r\n file=sys.stdout,\r\n )\r\n else:\r\n os.system(\r\n \"PYTHONPATH=. luigi --module pipeline.task_7_prediction Prediction --idx \"\r\n + str(houseid)\r\n + \" --local-scheduler > /dev/null\"\r\n )\r\n print(\r\n os.system(\r\n \"echo 'Your prediction is:' & cat predictions/prediction-id-\"\r\n + str(houseid)\r\n + \".txt\"\r\n ),\r\n file=sys.stdout,\r\n )", "def setup_to_finetune(model):\r\n # for layer in model.layers[:NB_IV3_LAYERS_TO_FREEZE]:\r\n # layer.trainable = False\r\n for layer in model.layers[:]:\r\n layer.trainable = True\r\n model.compile(optimizer=Adam(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])", "def main():\n lake_drivers = Dynamic_Lake_Drivers()\n #lake_drivers.prepare_orography_ICE5G_0k_uncorrected()\n #lake_drivers.prepare_orography_ICE5G_0k_corrected()\n #lake_drivers.prepare_orography_ICE6G_21k_corrected()\n #lake_drivers.prepare_river_directions_with_depressions_from_glac1D()\n #lake_drivers.evaluate_glac1D_ts1900_basins()\n #import time\n # start = time.time()\n #lake_drivers.evaluate_ICE6G_lgm_basins()\n # end = time.time()\n # print(end - start)\n #lake_drivers.prepare_basins_from_glac1D()\n #lake_drivers.extract_lake_volumes_from_glac1D_basins()\n #lake_drivers.connect_catchments_for_glac1D()\n lake_drivers.connect_catchments_for_transient_run()\n #lake_drivers.extract_volumes_for_transient_run()\n #lake_drivers.add_10min_rmouth_to_transient_data()\n #lake_drivers.expand_transient_data_catchments_to_include_rmouth()\n #lake_drivers.remove_no_data_values_from_upscaled_MERIT_correction_set()\n #lake_drivers.remove_disconnected_points_from_slm()", "def train_on_tasks(config):\n seed = config['seed']\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n tasks = config.pop('tasks')\n\n task_vis_params = config.pop('vis_params')\n\n # all_stats = []\n transfer_matrix = defaultdict(list)\n total_steps = 0\n\n if 'learner' in config:\n learner = config.pop('learner')\n else:\n learner_path = config.pop('learner_path')\n learner = torch.load(learner_path)\n task_level_tuning = config.pop('task_level_tuning')\n if task_level_tuning:\n ray_params = config.pop('ray_params')\n local_mode = config.pop('local_mode')\n redis_address = config.pop('redis_address')\n all_analysis = []\n selected_tags = []\n for t_id, (task, vis_p) in enumerate(zip(tasks, task_vis_params)):\n #todo sync transfer matrix\n static_params = dict(\n t_id=t_id, task=task, tasks=tasks, vis_p=vis_p,\n transfer_matrix=transfer_matrix, total_steps=total_steps\n )\n\n if task_level_tuning:\n if not ray.is_initialized():\n if local_mode:\n ray.init(local_mode=local_mode)\n else:\n ray.init(redis_address,\n log_to_driver=False,\n logging_level=logging.ERROR)\n\n config['static_params'] = static_params\n config['learner_path'] = learner_path\n config['seed'] += t_id\n\n # reporter = CLIReporter(max_progress_rows=10)\n # print(reporter._metric_columns)\n # print(reporter.DEFAULT_COLUMNS)\n # reporter.add_metric_column('avg_acc_val')\n # reporter.add_metric_column('total_params')\n # reporter.add_metric_column('fw_t')\n # reporter.add_metric_column('data_t')\n # reporter.add_metric_column('eval_t')\n # reporter.add_metric_column('epoch_t')\n # reporter.add_metric_column('total_t')\n # ray_params['progress_reporter'] = reporter\n analysis = tune.run(train_t, config=config, **ray_params)\n\n all_analysis.append(analysis)\n\n def get_key(trial):\n # return trial.last_result['avg_acc_val_so_far']\n return trial.last_result['best_val']\n best_trial = max(analysis.trials, key=get_key)\n for trial in analysis.trials:\n if trial != best_trial:\n trial_path = trial.logdir\n shutil.rmtree(trial_path)\n # am = np.argmax(list(map(get_key, analysis.trials)))\n # print(\"BEST IS {}: {}\".format(am, best_trial.last_result['avg_acc_val']))\n\n # t = best_trial.last_result['duration_iterations']\n total_steps = best_trial.last_result['total_steps']\n selected_tags.append(best_trial.experiment_tag)\n best_learner_path = os.path.join(best_trial.logdir, 'learner.pth')\n learner = torch.load(best_learner_path, map_location='cpu')\n shutil.rmtree(best_trial.logdir)\n\n #todo UPDATE LEARNER AND SAVE\n torch.save(learner, learner_path)\n else:\n rescaled, t, metrics, b_state_dict, \\\n stats = train_single_task(config=deepcopy(config), learner=learner,\n **static_params)\n\n # all_stats.append(stats)\n # update_rescaled(list(rescaled.values()), list(rescaled.keys()), tag,\n # g_task_vis, False)\n\n if task_level_tuning:\n return all_analysis, selected_tags\n else:\n save_path = path.join(tune.get_trial_dir(), 'learner.pth')\n logger.info('Saving {} to {}'.format(learner, save_path))\n torch.save(learner, save_path)", "def run(_):\n pass", "def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)", "def run_task(data_dir, task_id):\n print(\"Train and test for task %d ...\" % task_id)\n\n print(\"We are going to use this\")\n \n\n # Parse data\n train_files = glob.glob('%s/qa3_*_train.txt' % (data_dir, task_id))\n test_files = glob.glob('%s/qa3_*_test.txt' % (data_dir, task_id))\n\n dictionary = {\"nil\": 0}\n train_story, train_questions, train_qstory = parse_babi_task(train_files, dictionary, False)\n test_story, test_questions, test_qstory = parse_babi_task(test_files, dictionary, False)\n\n general_config = BabiConfig(train_story, train_questions, dictionary)\n\n\n # #### R: this line build a empty model to train\n # memory, model, loss = build_model(general_config)\n\n # if general_config.linear_start:\n # train_linear_start(train_story, train_questions, train_qstory, memory, model, loss, general_config)\n # else:\n # train(train_story, train_questions, train_qstory, memory, model, loss, general_config)\n\n\n\n # memory, model, loss = build_model(general_config)\n\n # this line\n test(test_story, test_questions, test_qstory, memory, model, loss, general_config)", "def main():\n analyze_perturbations()", "def main(_):\n build_vocabularies(source_languages, target_languages)\n build_buckets(buckets, max_vocab_size, source_languages, target_languages)\n\n # Load bucketed data (all in memory)\n # print \"Loading data into memory (all buckets)\"\n # data_set = []\n # for bucket_id in range(len(buckets)):\n # data_set.append(load_data(bucket_id))\n\n # Start session\n with tf.Session() as sess:\n # Build model\n model = MultiTaskMT(source_languages, target_languages, max_vocab_size, FLAGS.hidden,\n FLAGS.num_layers, buckets, FLAGS.learning_rate,\n FLAGS.learning_rate_decay_factor, FLAGS.max_gradient_norm,\n FLAGS.batch_size)\n ckpt = tf.train.get_checkpoint_state(FLAGS.log_dir)\n data_set, prev_b = None, None\n if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):\n print \"Reading model parameters from %s. Restoring vars!\" % ckpt.model_checkpoint_path\n model.saver.restore(sess, ckpt.model_checkpoint_path)\n else:\n print \"Created model with fresh parameters. Initializing vars!\"\n sess.run(tf.initialize_all_variables())\n\n # Start Training Loop\n writer = tf.train.SummaryWriter(\"log/graph\", sess.graph)\n print \"Entering Training Loop\"\n step_time, loss = 0.0, 0.0\n while True:\n # Set training index variables\n current_step = sess.run(model.global_step)\n previous_losses = []\n\n # Pick a bucket for training run\n b = np.random.choice(3)\n\n # Load data set into memory\n # if prev_b != b:\n # data_set = load_data(b)\n # prev_b = b\n\n # Initialize cycle counter\n cycle_count = int(current_step) / int(len(order))\n\n # Evaluate model every so often.\n if cycle_count % FLAGS.eval_every == 0:\n eval_data = [('de-en-eval.de', 'de-en-eval.en'), ('en-es-eval.en', 'en-es-eval.es'),\n ('en-fr-eval.en', 'en-fr-eval.fr')]\n for b in range(len(buckets)):\n for s, t in eval_data:\n source_data, target_data = [], []\n path = os.path.join(FLAGS.bucket_dir, str(b))\n with tf.gfile.GFile(os.path.join(path, s)) as src:\n with tf.gfile.GFile(os.path.join(path, t)) as trg:\n source, target = src.readline(), trg.readline()\n while source and target:\n source_data.append([int(x) for x in source.split()])\n target_data.append([int(x) for x in target.split()])\n source, target = src.readline(), trg.readline()\n src_embeddings, trg_embeddings = model.eval(sess, source_data, target_data,\n s[-2:], t[-2:], b)\n random.shuffle(target_data)\n shuf_src, shuf_trg = model.eval(sess, source_data, target_data, s[-2:],\n t[-2:], b)\n\n cos_dist, euc_dist = get_distance_metrics(src_embeddings, trg_embeddings)\n shuf_cos, shuf_euc = get_distance_metrics(shuf_src, shuf_trg)\n\n print \"Average Cosine Distance for %s-%s in bucket %s is %s\" % (s[-2:],\n t[-2:], b,\n cos_dist)\n print \"Average Euclidean Distance for %s-%s in bucket %s is %s\" % (s[-2:],\n t[-2:],\n b,\n euc_dist)\n print \"Average Cosine Distance for Random %s-%s in bucket %s is %s\" % (\n s[-2:], t[-2:], b, shuf_cos\n )\n print \"Average Euclidean Distance for Random %s-%s in bucket %s is %s\" % (\n s[-2:], t[-2:], b, shuf_euc\n )\n print \"Cosine Distance between Random and Real is the same: %s\" % (\n str(shuf_cos == cos_dist)\n )\n print \"Euclidean Distance between Random and Real is the same: %s\" % (\n str(shuf_euc == euc_dist)\n )\n with open(os.path.join(FLAGS.eval_dir, '%s_%s-%s.cos' % (str(b), s[-2:], t[-2:])), 'a+') as f:\n f.write(\"%s\\t%s\\n\" % (str(cycle_count), str(cos_dist)))\n\n with open(os.path.join(FLAGS.eval_dir, '%s_%s-%s.euc' % (str(b), s[-2:], t[-2:])), 'a+') as f:\n f.write(\"%s\\t%s\\n\" % (str(cycle_count), str(euc_dist)))\n\n with open(os.path.join(FLAGS.eval_dir, 'random_%s_%s-%s.cos' % (str(b), s[-2:], t[-2:])), 'a+') as f:\n f.write(\"%s\\t%s\\n\" % (str(cycle_count), str(shuf_cos)))\n\n with open(os.path.join(FLAGS.eval_dir, 'random_%s_%s-%s.euc' % (str(b), s[-2:], t[-2:])), 'a+') as f:\n f.write(\"%s\\t%s\\n\" % (str(cycle_count), str(shuf_euc)))\n\n # Iterate through order cycle\n for p in order:\n print \"Loading data for\", p\n data_set = load_data(p, b)\n # Get a batch and make a step.\n start_time = time.time()\n encoder_inputs, decoder_inputs, target_weights = model.get_batch(data_set[p], b)\n step_loss, embeddings, _ = model.step(sess, encoder_inputs, decoder_inputs,\n target_weights, p, b, False)\n step_time += (time.time() - start_time) / FLAGS.check_every\n loss += step_loss / FLAGS.check_every\n current_step += 1\n progress(current_step % FLAGS.check_every, FLAGS.check_every,\n \" Step %s\" % (current_step / FLAGS.check_every))\n\n # Once in a while, we save checkpoint, and print statistics.\n if current_step % FLAGS.check_every == 0:\n # Print statistics for the previous chunk.\n print \"\"\n perplexity = math.exp(loss) if loss < 1000 else float('inf')\n print (\"Global step %d, Learning rate %.4f, Step-time %.2f, Perplexity %.2f\" %\n (model.global_step.eval(), model.learning_rate.eval(), step_time,\n perplexity))\n\n with open(os.path.join(FLAGS.eval_dir, 'loss.txt'), 'a+') as loss_file:\n loss_file.write(\"%s %s\\n\" % (str(model.global_step.eval()), str(perplexity)))\n\n # Decrease learning rate if no improvement was seen over last 3 times.\n if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):\n sess.run(model.learning_rate_decay_op)\n previous_losses.append(loss)\n\n # Save checkpoint and zero timer and loss.\n checkpoint_path = os.path.join(FLAGS.log_dir, \"translate.ckpt\")\n model.saver.save(sess, checkpoint_path, global_step=model.global_step)\n step_time, loss = 0.0, 0.0\n sys.stdout.flush()\n print \"\"\n cycle_count += 1", "def main():\n args = load_args()\n\n perturbation_file = args.perturbation_file\n vm_params = load_yaml(args.vm_params_location)\n processes = args.n_processes\n verbose = args.verbose\n\n if args.perturbation:\n if args.model:\n perturbation_model = pd.read_csv(args.model)\n generate_velocity_model_perturbation_file_from_model(\n vm_params, perturbation_model, perturbation_file, processes, verbose\n )\n elif args.parameter_file:\n common_params, layer_params = load_parameter_file(args.parameter_file)\n generate_velocity_model_perturbation_file_from_config(\n common_params, layer_params, perturbation_file, processes, verbose\n )\n else:\n create_constant_vm_file(\n perturbation_file, vm_params[\"nx\"] * vm_params[\"ny\"] * vm_params[\"nz\"]\n )\n\n if args.fault_damage_zone:\n apply_fault_damage_zone(\n srf_location=args.srf_location,\n vm_params=vm_params,\n pert_f_location=perturbation_file,\n depth_km=args.depth_km,\n max_depth_km=args.max_depth_km,\n width_km=args.width_km,\n max_width_km=args.max_width_km,\n min_damage_velocity=args.max_velocity_drop,\n n_processes=processes,\n )", "def main():\n datasets = {}\n for dataset_name in tqdm(SOURCE_DATASET_NAMES, desc=\"Processing datasets and fitting base models\"):\n logger.info(f\"processing dataset {dataset_name}\")\n clusters_path: Optional[str] = None\n if dataset_name not in PAIRWISE_ONLY_DATASETS:\n clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + \"_clusters.json\")\n train_pairs_path = None\n val_pairs_path = None\n test_pairs_path = None\n else:\n train_pairs_path = os.path.join(DATA_DIR, dataset_name, \"train_pairs.csv\")\n val_pairs_path = os.path.join(DATA_DIR, dataset_name, \"val_pairs.csv\")\n if not os.path.exists(val_pairs_path):\n val_pairs_path = None\n test_pairs_path = os.path.join(DATA_DIR, dataset_name, \"test_pairs.csv\")\n\n logger.info(f\"loading dataset {dataset_name}\")\n anddata = ANDData(\n signatures=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_signatures.json\"),\n papers=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_papers.json\"),\n name=dataset_name,\n mode=\"train\",\n specter_embeddings=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_specter.pickle\"),\n clusters=clusters_path,\n block_type=BLOCK_TYPE,\n train_pairs=train_pairs_path,\n val_pairs=val_pairs_path,\n test_pairs=test_pairs_path,\n train_pairs_size=N_TRAIN_PAIRS_SIZE,\n val_pairs_size=N_VAL_TEST_SIZE,\n test_pairs_size=N_VAL_TEST_SIZE,\n preprocess=True,\n )\n\n logger.info(f\"featurizing {dataset_name}\")\n train, val, test = featurize(\n anddata,\n FEATURIZER_INFO,\n n_jobs=N_JOBS,\n use_cache=True,\n chunk_size=100,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO,\n nan_value=NAN_VALUE,\n )\n X_train, y_train, nameless_X_train = train\n X_val, y_val, nameless_X_val = val\n X_test, y_test, nameless_X_test = test\n\n dataset = {}\n dataset[\"anddata\"] = anddata\n dataset[\"X_train\"] = X_train\n dataset[\"y_train\"] = y_train\n dataset[\"X_val\"] = X_val\n dataset[\"y_val\"] = y_val\n dataset[\"X_test\"] = X_test\n dataset[\"y_test\"] = y_test\n dataset[\"nameless_X_train\"] = nameless_X_train\n dataset[\"nameless_X_val\"] = nameless_X_val\n dataset[\"nameless_X_test\"] = nameless_X_test\n dataset[\"name\"] = anddata.name\n datasets[dataset_name] = dataset\n\n anddatas = [\n datasets[dataset_name][\"anddata\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in PAIRWISE_ONLY_DATASETS\n ]\n\n X_train = np.vstack([datasets[dataset_name][\"X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n y_train = np.hstack([datasets[dataset_name][\"y_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n X_val = np.vstack(\n [datasets[dataset_name][\"X_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n y_val = np.hstack(\n [datasets[dataset_name][\"y_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n\n nameless_X_train = np.vstack([datasets[dataset_name][\"nameless_X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n nameless_X_val = np.vstack(\n [\n datasets[dataset_name][\"nameless_X_val\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in {\"augmented\"}\n ]\n )\n\n logger.info(\"fitting pairwise\")\n union_classifier = PairwiseModeler(n_iter=N_ITER, monotone_constraints=MONOTONE_CONSTRAINTS)\n union_classifier.fit(X_train, y_train, X_val, y_val)\n\n nameless_union_classifier = None\n if USE_NAMELESS_MODEL:\n logger.info(\"nameless fitting pairwise for \" + str(SOURCE_DATASET_NAMES))\n nameless_union_classifier = PairwiseModeler(\n n_iter=N_ITER,\n monotone_constraints=NAMELESS_MONOTONE_CONSTRAINTS,\n )\n nameless_union_classifier.fit(nameless_X_train, y_train, nameless_X_val, y_val)\n logger.info(\"nameless pairwise fit for \" + str(SOURCE_DATASET_NAMES))\n\n logger.info(\"fitting clusterer for\")\n union_clusterer = Clusterer(\n FEATURIZER_INFO,\n union_classifier.classifier,\n cluster_model=FastCluster(),\n search_space=search_space,\n n_jobs=N_JOBS,\n nameless_classifier=nameless_union_classifier.classifier if nameless_union_classifier is not None else None,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO if nameless_union_classifier is not None else None,\n )\n union_clusterer.fit(anddatas)\n print(\n \"best clustering parameters:\",\n union_clusterer.best_params,\n )\n\n models = {}\n models[\"clusterer\"] = union_clusterer\n\n with open(\n f\"full_union_model_script_dump_average_{FEATURIZER_VERSION}.pickle\",\n \"wb\",\n ) as _pickle_file:\n pickle.dump(models, _pickle_file)\n logger.info(\"Done.\")", "def run_task(self) -> Task:", "def run_all_tests():\n remove_dbs()\n run_training_tests()\n run_custom_training_tests()\n run_training_save_tests()\n run_validation_tests()\n run_feature_extraction_tests()", "def runner():\r\n logging.basicConfig(filename='log/anonymizer.log', level=logging.INFO)\r\n\r\n task = find_job_smallest_colset()\r\n if task is None:\r\n return False\r\n\r\n df = anonymizer.retrieve_data()\r\n print(\"Data fetched with {0} columns, processing now\".format(len(task[\"columns\"])))\r\n\r\n df_filenames = anonymizer.identify_treat_1st_2nd_class_identifier(df[task[\"columns\"]], len(task[\"columns\"]), retrieve=\"df\")\r\n store_dfs_in_HANA(df_filenames, task[\"table_name\"])\r\n\r\n status = r.delete(task[\"job_id\"])\r\n logging.info(\"Job finished with status {0}\".format(status))\r\n return True", "def main(ft_setups, ft_strategies):\n\n num_procs = 16\n\n # initialize level parameters\n level_params = dict()\n level_params['restol'] = 1e-09\n\n # initialize step parameters\n step_params = dict()\n step_params['maxiter'] = 50\n\n # initialize space transfer parameters\n space_transfer_params = dict()\n space_transfer_params['finter'] = True\n space_transfer_params['rorder'] = 2\n space_transfer_params['iorder'] = 6\n\n # initialize sweeper parameters\n sweeper_params = dict()\n sweeper_params['quad_type'] = 'RADAU-RIGHT'\n sweeper_params['num_nodes'] = [3]\n\n # initialize controller parameters\n controller_params = dict()\n controller_params['logger_level'] = 30\n\n for setup in ft_setups:\n if setup == 'HEAT':\n # initialize problem parameters\n problem_params = dict()\n problem_params['nu'] = 0.5\n problem_params['freq'] = 1\n problem_params['nvars'] = [255, 127]\n problem_params['bc'] = 'dirichlet-zero'\n\n level_params['dt'] = 0.5\n\n space_transfer_params['periodic'] = False\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = heatNd_forced # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class\n description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer\n\n # setup parameters \"in time\"\n t0 = 0.0\n Tend = 8.0\n\n elif setup == 'ADVECTION':\n # initialize problem parameters\n problem_params = dict()\n problem_params['c'] = 1.0\n problem_params['nvars'] = [256, 128]\n problem_params['freq'] = 2\n problem_params['order'] = 2\n problem_params['bc'] = 'periodic' # boundary conditions\n\n level_params['dt'] = 0.125\n\n space_transfer_params['periodic'] = True\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = advectionNd # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = generic_implicit # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class\n description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer\n\n # setup parameters \"in time\"\n t0 = 0.0\n Tend = 2.0\n\n else:\n raise NotImplementedError('setup not implemented')\n\n # do a reference run without any faults to see how things would look like (and to get maxiter/ref_niter)\n ft.strategy = 'NOFAULT'\n\n controller = controller_nonMPI_hard_faults(\n num_procs=num_procs, controller_params=controller_params, description=description\n )\n\n # get initial values on finest level\n P = controller.MS[0].levels[0].prob\n uinit = P.u_exact(t0)\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # stats magic: get iteration counts to find maxiter/niter\n sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')\n ref_niter = max([item[1] for item in sortedlist_stats])\n\n print('Will sweep over %i steps and %i iterations now...' % (num_procs, ref_niter))\n\n # loop over all strategies\n for strategy in ft_strategies:\n ft_iter = range(1, ref_niter + 1)\n ft_step = range(0, num_procs)\n\n print('------------------------------------------ working on strategy ', strategy)\n\n iter_count = np.zeros((len(ft_step), len(ft_iter)))\n\n # loop over all steps\n xcnt = -1\n for step in ft_step:\n xcnt += 1\n\n # loop over all iterations\n ycnt = -1\n for iter in ft_iter:\n ycnt += 1\n\n ft.hard_step = step\n ft.hard_iter = iter\n ft.strategy = strategy\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # stats magic: get iteration counts to find maxiter/niter\n sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')\n niter = max([item[1] for item in sortedlist_stats])\n iter_count[xcnt, ycnt] = niter\n\n print(iter_count)\n\n np.savez(\n 'data/' + setup + '_results_hf_' + strategy,\n iter_count=iter_count,\n description=description,\n ft_step=ft_step,\n ft_iter=ft_iter,\n )", "def preprocess_main():", "def runTests(self):\n \n pass", "def Run():\r\n pass", "def task(self):", "def task(self):", "def main():\n args, config = parse_args()\n\n \"\"\"\n Log on wandb for track of experiments\n \"\"\"\n wandb.init(project=\"adaptive-finetuning-resnet\", name=f'Inference_{config.VERSION}', config=config)\n\n \"\"\"\n Set config GPUs and torch cuda device\n \"\"\"\n config.GPUS = str(0)\n torch.cuda.set_device(0)\n\n \"\"\"\n Create the model, put it to GPU and then create dataloader\n \"\"\"\n model = eval(config.MODULE)(config=config.NETWORK)\n model = model.cuda()\n\n val_loader = make_dataloader(config, mode='val', distributed=False)\n\n \"\"\"\n Load the model with pretrained weights\n \"\"\"\n assert config.NETWORK.PRETRAINED_MODEL != '', \"For inference, there must be pre-trained weights\"\n\n pretrain_state_dict = torch.load(config.NETWORK.PRETRAINED_MODEL, map_location = lambda storage, loc: storage)['net_state_dict']\n smart_model_load(model, pretrain_state_dict, loading_method=config.NETWORK.PRETRAINED_LOADING_METHOD)\n\n \"\"\"\n Pass the model and val loader for validation\n \"\"\"\n print(\"Inference started!!\")\n val_accuracy = do_validation(config, model, val_loader)\n print(f\"Inference complete!!\\nAccuracy:{val_accuracy}\")\n\n wandb.log({'Accuracy': val_accuracy})", "def main():\n\tpass", "def test_run(self):\n engine = Engine(self.config_file, self.api_token, 23)\n engine.msg_wait_iterations = 0\n\n # Put some stuff on the task queue\n self.setup_helper.add_volumetric_tasks(self.aws_creds[\"access_key\"],\n self.aws_creds['secret_key'],\n self.upload_queue_url, engine.backend)\n\n engine.join()\n engine.run()\n\n # Check for tile to exist\n s3 = boto3.resource('s3')\n ingest_bucket = s3.Bucket(self.ingest_bucket_name)\n\n with tempfile.NamedTemporaryFile() as test_file:\n with open(test_file.name, 'wb') as raw_data:\n ingest_bucket.download_fileobj(VOLUMETRIC_CUBOID_KEY, raw_data)\n with open(test_file.name, 'rb') as raw_data:\n # Using an empty CloudVolume dataset so all values should be 0.\n # dtype set in boss-v0.2-test.json under chunk_processor.params.info.data_type\n cuboid = self.s3_object_to_cuboid(raw_data.read(), 'uint8')\n unique_vals = np.unique(cuboid)\n assert 1 == len(unique_vals)\n assert 0 == unique_vals[0]", "async def setup(self):\n pass", "def main():\n\n while True:\n print(\"Let's explore some US bikeshare data!\")\n city, month, day = get_filters()\n df = load_data(city, month, day)\n # printing filter\n print(f\"Month: {month}, Day: {day}\")\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n display_records(df)\n restart = prompts.yes_no_prompt(\"\\nWould you like to restart?\\n\").launch()\n if not restart:\n break\n system(\"clear\")", "def run_all_default_tasks():\n for func in DEFAULT_TASKS_KEY:\n func()", "def main():\n parser = argparse.ArgumentParser(\n description=\"\"\"Lookup and Store Tweets utility. Fetches a tweet from\n the Twitter API given its GUID. Stores or updates the author\n Profile and Tweet in the db.\"\"\"\n )\n parser.add_argument(\n \"tweetGUIDs\",\n metavar=\"TWEET_GUID\",\n nargs=\"+\",\n help=\"\"\"List of one or more Tweet GUIDs to lookup, separated by spaces.\n The Tweet 'GUID' in the local db is equivalent to the Tweet 'ID'\n on the Twitter API.\"\"\",\n )\n parser.add_argument(\n \"-u\",\n \"--update-all-fields\",\n action=\"store_true\",\n help=\"\"\"If supplied, update all fields when updating an existing\n local Tweet record. Otherwise, the default behavior is to\n only update the favorite and retweet counts of the record.\"\"\",\n )\n args = parser.parse_args()\n\n APIConn = authentication.getAppOnlyConnection()\n tweets.lookupTweetGuids(\n APIConn, args.tweetGUIDs, onlyUpdateEngagements=not (args.update_all_fields)\n )", "def main():\n # process CLI arguments\n argparser = argparse.ArgumentParser(description=\"\"\"Script for classifying\ntweets according to their sentiment polarity\"\"\")\n\n subparsers = argparser.add_subparsers(help=\"type of operation to perform\", dest = \"mode\")\n # training options\n tr_parser = subparsers.add_parser(TRAIN, help = \"train the model\")\n tr_parser.add_argument(\"-d\", \"--dev-set\", help = \"development set\",\n type = argparse.FileType('r'))\n tr_parser.add_argument(\"-l\", \"--lexicon\", help = \"sentiment lexicon to use for sampling\",\n type = str, action = \"append\", default = [])\n _add_cmn_options(tr_parser)\n # testing options\n test_parser = subparsers.add_parser(TEST, help = \"test the model\")\n test_parser.add_argument(\"-d\", \"--debug\", help = \"output debug information\", \\\n action = \"store_true\")\n test_parser.add_argument(\"-v\", \"--verbose\", help = \"output scores along with predicted labels\",\n action = \"store_true\")\n test_parser.add_argument(\"--scikit\", help = \"use supervised scikit classifier istead of deep\",\n action = \"store_true\")\n _add_cmn_options(test_parser)\n # evaluation options (train and test at the same time)\n ev_parser = subparsers.add_parser(EVALUATE, help = \"evaluate trained model\")\n _add_cmn_options(ev_parser)\n ev_parser.add_argument(\"-v\", \"--verbose\", help = \"output errors along with evaluation\",\n action = \"store_true\")\n args = argparser.parse_args()\n # perform the requied action\n if args.mode == TRAIN:\n classifier = SentimentClassifier(a_path = None)\n if args.dev_set is None:\n dev_set = None\n else:\n dev_set = _read_dataset([args.dev_set])\n lexica = [_read_lexicon(ilex) for ilex in args.lexicon]\n pos, pos_re, neg, neg_re = _merge_lexica(lexica)\n classifier.train(_read_dataset(args.files), a_path=args.model,\n a_dev_set=dev_set, a_pos_re=pos_re, a_pos=pos,\n a_neg_re=neg_re, a_neg=neg)\n elif args.mode == TEST:\n # load model from default location\n y = \"\"; score = 0.\n if args.model:\n classifier = SentimentClassifier(args.model)\n else:\n classifier = SentimentClassifier()\n for ifile in args.files:\n for ifields in iterlines(ifile, TEST_TOPIC_IDX):\n if args.debug:\n classifier.debug(list(ifields[TXT_IDX]))\n else:\n y, score = classifier.predict(list(ifields[TXT_IDX]))\n if args.verbose:\n ifields.append(str(score))\n ifields.append(y)\n print(TAB.join(ifields))\n else:\n raise NotImplementedError\n # for ifile in a_files:\n # macro_MAE, micro_MAE = evaluate(classify(classifier, ifile), args.verbose, lambda x: x)\n # print(\"{:20s}{:.7}\".format(\"Macro-averaged MAE:\", macro_MAE), file = sys.stderr)\n # print(\"{:20s}{:.7}\".format(\"Micro-averaged MAE:\", micro_MAE), file = sys.stderr)\n return 0", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def task(ctx, config):\n pass" ]
[ "0.6263237", "0.6186667", "0.6184663", "0.60012114", "0.59752226", "0.59752226", "0.5871787", "0.58552486", "0.58440095", "0.58113", "0.579075", "0.57533723", "0.5727783", "0.57255954", "0.572342", "0.5712383", "0.5693964", "0.5688711", "0.567766", "0.56723386", "0.56669414", "0.5653863", "0.5635792", "0.56251514", "0.56023914", "0.55962884", "0.55865896", "0.5585294", "0.558345", "0.55782145", "0.55651", "0.5561043", "0.5559984", "0.55556655", "0.5553881", "0.55469966", "0.55407494", "0.5523066", "0.5508146", "0.5503226", "0.549945", "0.5496782", "0.54942065", "0.5458639", "0.54512984", "0.54448926", "0.54439884", "0.5442724", "0.5441703", "0.5441405", "0.54412913", "0.54366577", "0.5431818", "0.5431127", "0.5428524", "0.5427512", "0.5423094", "0.5416195", "0.5413223", "0.540926", "0.54072714", "0.54055893", "0.5400621", "0.5395344", "0.5391259", "0.53909063", "0.5388057", "0.5386581", "0.5381875", "0.5368102", "0.5364666", "0.53572565", "0.5351529", "0.5351474", "0.53339314", "0.53319186", "0.5330953", "0.53287786", "0.53266317", "0.53238696", "0.532318", "0.53229886", "0.5320402", "0.5309466", "0.53068554", "0.5297613", "0.5297194", "0.52954525", "0.52920693", "0.5291692", "0.5291692", "0.52859426", "0.52840567", "0.5277008", "0.5274158", "0.52740943", "0.52733827", "0.5271923", "0.5271121", "0.52710116", "0.5268772" ]
0.0
-1
Get absolute path to resource, works for dev and for PyInstaller
def resource_path(relative_path): try: # PyInstaller creates a temp folder and stores path in _MEIPASS base_path = sys._MEIPASS except Exception: base_path = os.path.abspath(".") return os.path.join(base_path, relative_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_resource_path():\n return os.path.join(os.path.dirname(__file__), \"resources\") + os.path.sep", "def resourcePath(relative):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(os.path.join(os.path.dirname(sys.modules[__name__].__file__), 'assets'))\r\n\r\n return os.path.join(base_path, relative)", "def resourcePath(relative, dirname=\"data\"):\n # first look in pyinstaller bundle\n if hasattr(sys, \"_MEIPASS\"):\n path = os.path.join(sys._MEIPASS, dirname)\n \n else:\n # then look in py2app bundle\n path = os.environ.get(\"RESOURCEPATH\", None)\n if path is None:\n # then look in source code directory\n path = os.path.join(RESOURCE_BASE, dirname)\n \n path = os.path.join(path, relative)\n \n return path", "def resource_path(relative_path) :\n\n try :\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except :\n base_path = os.path.abspath(\".\")\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)\n\t# \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n\t# base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n\t# return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n\r\n except:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path=None):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n if not relative_path:\n return base_path\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\"../..\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\"..\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.dirname(os.path.realpath(__file__))\n \n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS # pylint: disable=no-member\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\"\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except AttributeError:\n base_path = abspath(\".\")\n\n return join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n print(\"resource_path:\", os.path.join(base_path, relative_path))\n except Exception:\n base_path = os.path.abspath(\".\")\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS \n base_path = sys._MEIPASS\n _BINARY_DIST = True\n #print sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n\t\ttry:\r\n\t\t\t# PyInstaller creates a temp folder and stores path in _MEIPASS\r\n\t\t\tbase_path = sys._MEIPASS\r\n\t\texcept Exception:\r\n\t\t\tbase_path = os.path.abspath(\".\")\r\n\r\n\t\treturn os.path.join(base_path, relative_path)", "def resource_path(relative_path) :\n\n try :\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except :\n base_path = os.path.abspath(\".\")\n return os.path.join(base_path, relative_path)", "def get_absolute_resource_path(resource_path):\n return pkg_resources.resource_filename(\n cloudify_agent.__name__,\n os.path.join('resources', resource_path)\n )", "def resource_path(relative_path):\n # base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n # return os.path.join(base_path, relative_path)\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), relative_path)", "def path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\"./\")\n\n print(\"[RESOURCE]\", relative_path)\n rPath = os.path.join(base_path, relative_path)\n return rPath", "def resource_path(relative_path):\r\n try:\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(self, relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path= getattr(sys,'MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(self,relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\\\\Visual_Ressources\\\\\"+self.language+\"\\\\\") \n # \".\"\n # 'Content\\\\Back_End\\\\'\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\r\n return os.path.join(base_path, relative_path)", "def resource_path(self, relative_path):\r\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n path = os.path.join(base_path, relative_path)\n return path", "def resource_path(name):\n return os.path.join(\n os.path.dirname(__file__), 'images', 'resource', name)", "def resourcePath(relative_path):\n if hasattr(sys, '_MEIPASS'):\n return os.path.join(sys._MEIPASS, relative_path)\n return os.path.join(os.path.abspath(\".\"), relative_path)", "def resource_path(relative_path):\n if hasattr(sys, '_MEIPASS'):\n return os.path.join(sys._MEIPASS, relative_path)\n return os.path.join(os.path.abspath('.'), relative_path)", "def _get_resource_path(filename, path=Path.TEST):\n return os.path.normpath(os.path.join(path.value, filename))", "def get_resources_abs_path() -> pathlib.Path:\n return PathManager._ROOT.joinpath(\n PathManager._TILINGS_GUI, PathManager._RESOURCES\n )", "def resource_path(relative_path):\n try:\n if hasattr(sys, '_MEIPASS'):\n return os.path.join(sys._MEIPASS, relative_path)\n return os.path.join(os.path.abspath(\".\"), relative_path)\n except:\n pass", "def resource_path(relative_path):\n try:\n if hasattr(sys, '_MEIPASS'):\n return os.path.join(sys._MEIPASS, relative_path)\n return os.path.join(os.path.abspath(\".\"), relative_path)\n except:\n pass", "def get_resource_dir(cls) -> str:\n return os.path.join(\n os.path.realpath(os.path.dirname(__file__)),\n os.pardir,\n os.pardir,\n os.pardir,\n \"gem5\",\n \"resources\",\n )", "def resourcePath(self,relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n output = base_path + relative_path\n return output", "def get_resource(filename: str, path: str | None = None) -> str:\n root = Path(__file__).parent\n full_path = root if path is None else root / Path(path)\n return str(full_path / filename)", "def resource_path(relative_path):\n return os.path.join(BASEPATH, relative_path)", "def resource_path(p=()):\n # map a string to a tuple containing the string to provide the obvious shortcut\n if isinstance(p, str):\n p = (p,)\n return os.path.join(os.path.dirname(os.path.realpath(__file__)), *p)", "def absPath(myPath):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n return os.path.join(base_path, os.path.basename(myPath))\n except Exception:\n base_path = os.path.abspath(os.path.dirname(__file__))\n return os.path.join(base_path, myPath)", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def get_resource(self, rsc_path):\n\n\t\ttry:\n\t\t\tfrom pkg_resources import resource_filename\n\t\t\treturn resource_filename(__name__, rsc_path)\n\t\texcept ImportError:\n\t\t\treturn os.path.join(os.path.dirname(__file__), rsc_path)", "def _localfile(name):\n return os.path.abspath(resource_filename(__name__, name))", "def get_recipe_resource():\n return os.getenv(\"DKU_CUSTOM_RESOURCE_FOLDER\")", "def get_resource_filename(local_filename):\n return os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"resources\", local_filename\n )", "def get_resource(res_name, res_type=\"icons\"):\n own_path = os.path.dirname(__file__)\n resource_path = os.path.abspath(os.path.join(own_path, os.pardir, \"resources\", res_type))\n return os.path.join(resource_path, res_name)", "def get_resource_base_path(self): # real signature unknown; restored from __doc__\n return \"\"", "def resource(request):\n local_path = os.path.dirname(request.module.__file__)\n return lambda *args: get_resource_path(args, local_path)", "def _resource(path): # pragma: NO COVER\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def get_recipe_resource():\n return os.getenv(\"SKU_CUSTOM_RECIPE_RESOURCE_FOLDER\")", "def resource(self, *path):\n # TODO(vadimsh): Verify that file exists. Including a case like:\n # module.resource('dir').join('subdir', 'file.py')\n return self._module.RESOURCE_DIRECTORY.join(*path)", "def _abs_path(fn):\n return os.path.join(os.path.dirname(__file__), fn)", "def resource_path(self, resource):\n return str(self.path.joinpath(resource))", "def resource_path(self, resource):\n return str(self.path.joinpath(resource))", "def resource_path(relative_path):\n if hasattr(sys, '_MEIPASS'):\n return os.path.join(sys._MEIPASS, \"TopasGraphSim\", relative_path)\n\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir, os.pardir, relative_path)", "def getResource(resname, loc = None):\n # check the HOME for personal config file\n prv_filename = os.path.join(os.getenv(\"HOME\"), \".aphla\", resname)\n if os.path.exists(prv_filename):\n return prv_filename\n elif loc and resource_exists(loc, resname):\n # use the config within distribution\n return resource_filename(loc, resname)\n else:\n return None", "def GetResourcePath(self, resource_name, check=True):\n path = os.path.join(self.resources_dir, resource_name)\n if check:\n file_utils.CheckPath(path, 'resource')\n return path", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def get_resource(resource_path):\n\n return pkg_resources.resource_string(\n cloudify_agent.__name__,\n os.path.join('resources', resource_path)\n )", "def bundle_path(self, app):\n return (\n self.platform_path / self.output_format / safe_formal_name(app.formal_name)\n )", "def get_path():\n return path.abspath(path.dirname(path.dirname(__file__)))", "def absPath(path):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), path)", "def launcher_path() -> Optional[str]:\n return u.resource(LAUNCHER_SCRIPT)", "def _path(name: str):\n return os.path.join(ASSET_PATH, name)", "def widget_path(relative_path):\n real_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),\"..\"))\n base_path = getattr(sys, '_MEIPASS', real_path)\n return os.path.join(base_path, relative_path)", "def _file_path(self, file: str) -> str:\n return os.path.abspath(f\"tests/resources/{file}\")", "def test_resource_path(self):\n\n # Without arguments\n resources_root_path = os.path.abspath(os.path.join(\n MY_DIRECTORY, '..', '..', 'resources'\n ))\n self.assertEqual(resources_root_path, paths.resource())", "def GetSrc():\n return os.path.abspath(os.path.join(_THIS_DIR, os.pardir, os.pardir,\n os.pardir))", "def get_agent_resource_local_path(ctx, agent_config, resource):\n if agent_config.get(resource):\n origin = agent_config[resource]\n else:\n resource_path = DEFAULT_AGENT_RESOURCES.get(resource)\n if not resource_path:\n raise NonRecoverableError('no such resource: {0}'.format(resource))\n if resource == 'agent_package_path':\n origin = resource_path.format(agent_config['distro'],\n agent_config['distro_codename'])\n else:\n origin = resource_path.format(agent_config['distro'])\n ctx.logger.debug('resource origin: {0}'.format(origin))\n return origin", "def resource_filename(name):\n return pkg_resources.resource_filename(__name__, name)", "def asset_path(bundle_key: str) -> str:\n asset_base_path = current_app.config.get('ASSET_BASE_PATH', '')\n asset_file = current_app.config.get('assets', {}).get(bundle_key)\n if not asset_file:\n raise LookupError(f\"Missing asset file for {bundle_key}.\")\n return os.path.join(asset_base_path, asset_file)", "def get_bundled_schema_path():\n return str(data.load_resource(\"schema\"))", "def path(self) -> str:\n return self.src + \"/\"" ]
[ "0.8370706", "0.8281029", "0.8249505", "0.81395245", "0.81143504", "0.80990154", "0.8094557", "0.8080119", "0.8068112", "0.8067861", "0.806291", "0.806291", "0.806291", "0.806291", "0.806291", "0.806291", "0.806291", "0.80574095", "0.80550015", "0.80407953", "0.80257154", "0.8007452", "0.79747736", "0.7951895", "0.79437083", "0.7927402", "0.7884059", "0.788364", "0.78746396", "0.78711206", "0.78366745", "0.78366745", "0.78366745", "0.78366745", "0.78366745", "0.7831737", "0.78184", "0.7816456", "0.7801747", "0.7769968", "0.7759422", "0.7650801", "0.7620746", "0.7585498", "0.75051534", "0.7499348", "0.7499348", "0.7481732", "0.7459538", "0.7458743", "0.74318534", "0.73730826", "0.73582643", "0.72450906", "0.7236445", "0.72145814", "0.7186117", "0.71325016", "0.7131516", "0.71238446", "0.70901495", "0.70883083", "0.70719856", "0.70614535", "0.7045942", "0.69866765", "0.69866765", "0.6982334", "0.6981496", "0.6931782", "0.6917148", "0.68754506", "0.68338764", "0.6825664", "0.6802021", "0.67897403", "0.67522585", "0.67378074", "0.67342407", "0.6733692", "0.6719603", "0.67040336", "0.66820043", "0.6623042", "0.6601861", "0.659843" ]
0.8045328
29
Treats wildcards as errors
def custom_score(stats): convention = stats['convention'] error = stats['error'] refactor = stats['refactor'] warning = stats['warning'] statement = stats['statement'] wildcards = stats['by_msg'].get('wildcard-import', False) if wildcards: warning = warning - wildcards error = error + wildcards return 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) return stats['global_note']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_wildcard_all(self):\n with self.assertRaisesRegex(\n ValueError, \"WILDCARD_ALL passed with other key information\"):\n _path.RootOper.Foo(_defs.WILDCARD_ALL, 4)", "def test_wildcard_at_opening_of_string(self):\n with self.assertRaises(index.QueryError):\n wildcard_escape(\"*nope\")\n\n with self.assertRaises(index.QueryError):\n Q_(\"match\", \"title\", \"*nope\")", "def _wildcardformat(regxval):\n if regxval == None:\n return None\n else:\n try:\n return regxval.replace(\"*\",\"%\").replace(\"?\",\"_\")\n except AttributeError:\n return regxval", "def test_handle_wildcard(self):\n sequence1 = 'ATCG'\n sequence2 = 'ATNG'\n sequence3 = 'NNCN'\n self.assertEqual(handle_wildcard(sequence1), ['ATCG'])\n self.assertEqual(handle_wildcard(sequence2), [\"%AT_G%\"])\n self.assertEqual(handle_wildcard(sequence3), [\"%__C_%\"])", "def test_searchWildcard(self):\n self.assertFalse(\n self.server.search_UID([b'2:3'], self.seq, self.msg, (1, 1234)))\n # 2:* should get translated to 2:<max UID> and then to 1:2\n self.assertTrue(\n self.server.search_UID([b'2:*'], self.seq, self.msg, (1, 1234)))\n self.assertTrue(\n self.server.search_UID([b'*'], self.seq, self.msg, (1, 1234)))", "def test_asterisk(self):\n with self.assertRaises(ValidationError):\n field_name_validator('logstash*')", "def test_match_any_wildcard_is_present(self):\n qs = \"Foo t*\"\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertTrue(wildcard, \"Wildcard should be detected\")\n self.assertEqual(qs, qs_escaped, \"The querystring should be unchanged\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"wildcard\", title=qs)),\n \"Wildcard Q object should be generated\",\n )", "def test_multiple_match_any_wildcard_in_literal(self):\n qs = '\"Fo*o t*\"'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped, r'\"Fo\\*o t\\*\"', \"Both wildcards should be escaped\"\n )\n self.assertFalse(wildcard, \"Wildcard should not be detected\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"match\", title=r'\"Fo\\*o t\\*\"')),\n \"Wildcard Q object should not be generated\",\n )", "def test_match_any_wildcard_in_literal(self):\n qs = '\"Foo t*\"'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped, r'\"Foo t\\*\"', \"Wildcard should be escaped\"\n )\n self.assertFalse(wildcard, \"Wildcard should not be detected\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"match\", title=r'\"Foo t\\*\"')),\n \"Wildcard Q object should not be generated\",\n )", "def test_asterisk(self):\n with self.assertRaises(ValidationError):\n db_name_validator('logstash*')", "def test_mixed_wildcards_in_literal(self):\n qs = '\"Fo? t*\"'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped, r'\"Fo\\? t\\*\"', \"Both wildcards should be escaped\"\n )\n self.assertFalse(wildcard, \"Wildcard should not be detected\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"match\", title=r'\"Fo\\? t\\*\"')),\n \"Wildcard Q object should not be generated\",\n )", "def test_wildcards_inside_outside_multiple_literals(self):\n qs = '\"Fo?\" s* \"yes*\" o?'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped,\n r'\"Fo\\?\" s* \"yes\\*\" o?',\n \"Wildcards in literal should be escaped\",\n )\n self.assertTrue(wildcard, \"Wildcard should be detected\")\n\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"wildcard\", title=r'\"Fo\\?\" s* \"yes\\*\" o?')),\n \"Wildcard Q object should be generated\",\n )", "def test_searchWildcardHigh(self):\n self.assertTrue(\n self.server.search_UID([b'1235:*'], self.seq, self.msg, (1234, 1)))", "def test_pattern_fail_load(self):\n\t\tsettings.put('output.file_name_pattern', '[type]-[id]-[title]-[author-[subreddit]-[source_alias]')\n\t\ttp = self.sess.query(sql.Post).filter(sql.Post.title == 'test').first()\n\t\twith self.assertRaises(Exception, msg='Failed to catch broken pattern!'):\n\t\t\tng.choose_file_name(tp.urls[0], tp, sql.session(), album_size=1)", "def EscapeWildcards(string: Text) -> Text:\n precondition.AssertType(string, Text)\n return string.replace(\"%\", r\"\\%\").replace(\"_\", r\"\\_\")", "def test_wildcards_both_inside_and_outside_literal(self):\n qs = '\"Fo? t*\" said the *'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped,\n r'\"Fo\\? t\\*\" said the *',\n \"Wildcards in literal should be escaped\",\n )\n self.assertTrue(wildcard, \"Wildcard should be detected\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"wildcard\", title=r'\"Fo\\? t\\*\" said the *')),\n \"Wildcard Q object should be generated\",\n )", "def test_mult_specifiers_missing(self):\n template = '{0} too few {1}'\n value_count = 3\n msg = ('The formatter contains too few \"{}\" '\n 'specifiers for the number of source fields.')\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)", "def test_add_patterns_warns_if_spaczz_type_unrecognized(ruler: SpaczzRuler) -> None:\n with pytest.warns(PatternTypeWarning):\n ruler.add_patterns([{\"label\": \"GPE\", \"pattern\": \"Montana\", \"type\": \"invalid\"}])", "def test_wildcard(word_list):\n failure=False\n\n # test 1\n hand = {'a': 1, 'r': 1, 'e': 1, 'j': 2, 'm': 1, '*': 1}\n word = \"e*m\"\n\n if is_valid_word(word, hand, word_list):\n print(\"FAILURE: test_is_valid_word() with wildcards\")\n print(\"\\tExpected False, but got True for word: '\" + word + \"' and hand:\", hand)\n\n failure = True\n\n # test 2\n hand = {'n': 1, 'h': 1, '*': 1, 'y': 1, 'd':1, 'w':1, 'e': 2}\n word = \"honey\"\n\n if is_valid_word(word, hand, word_list):\n print(\"FAILURE: test_is_valid_word() with wildcards\")\n print(\"\\tExpected False, but got True for word: '\"+ word +\"' and hand:\", hand)\n\n failure = True\n\n # test 3\n hand = {'n': 1, 'h': 1, '*': 1, 'y': 1, 'd':1, 'w':1, 'e': 2}\n word = \"h*ney\"\n\n if not is_valid_word(word, hand, word_list):\n print(\"FAILURE: test_is_valid_word() with wildcards\")\n print(\"\\tExpected True, but got False for word: '\"+ word +\"' and hand:\", hand)\n\n failure = True\n\n # test 4\n hand = {'c': 1, 'o': 1, '*': 1, 'w': 1, 's':1, 'z':1, 'y': 2}\n word = \"c*wz\"\n\n if is_valid_word(word, hand, word_list):\n print(\"FAILURE: test_is_valid_word() with wildcards\")\n print(\"\\tExpected False, but got True for word: '\"+ word +\"' and hand:\", hand)\n\n failure = True \n\n # dictionary of words and scores WITH wildcards\n words = {(\"h*ney\", 7):290, (\"c*ws\", 6):176, (\"wa*ls\", 7):203}\n for (word, n) in words.keys():\n score = get_word_score(word, n)\n if score != words[(word, n)]:\n print(\"FAILURE: test_get_word_score() with wildcards\")\n print(\"\\tExpected\", words[(word, n)], \"points but got '\" + \\\n str(score) + \"' for word '\" + word + \"', n=\" + str(n))\n failure=True \n\n if not failure:\n print(\"SUCCESS: test_wildcard()\")", "def wildcard(s, star_min=1):\n\n def _feed_parts(input_parts):\n for part in input_parts:\n if part == \"*\":\n if star_min == 0:\n yield \".*\"\n elif star_min == 1:\n yield \".+\"\n else:\n yield f\".{{{star_min},}}\"\n elif part == \"?\":\n yield \".\"\n else:\n yield re.escape(part)\n\n return \"\".join(_feed_parts(re.split(r'([\\?\\*])', s)))", "def _regexify_matching_pattern(rule_pattern: str, wildcard_optional=False) -> str:\n return rule_pattern.replace(\"*\", f\"(.{'+*'[wildcard_optional]})\")", "def test_TSE_common(self):\n\n regexpr = re.compile(r'^Error [0-9]{4}: [\\w :-]+$')\n for code in TwitterSearchException._error_codes:\n self.assertTrue( regexpr.match( str(TwitterSearchException(code)) ), \"Odd string patterns detected\")\n\n foo = \"someString\"\n tse = \"%s\" % TwitterSearchException(2000,foo)\n self.assertTrue( regexpr.match(tse) and tse[len(foo)*-1:] == foo )", "def _patternToRegEx(self,pattern):\n if (pattern == \"*\"):\n # special case that matches anything\n regex = \".*?\"\n else:\n regex = pattern\n if (regex.find(\".\") >= 0):\n regex = regex.replace(\".\", \"\\.\")\n #endIf\n \n asteriskIndex = regex.find(\"*\")\n if (asteriskIndex < 0):\n # no wildcard in pattern\n regex = \"%s$\" % regex\n elif (asteriskIndex + 1 != len(regex)):\n raise TraceSpecificationException(\"Invalid entity pattern: %s. A wildcard character may only be used to terminate a pattern.\" % pattern)\n else:\n # remove * and add \".*?\"\n regex = \"%s.*?\" % regex[:-1]\n #endIf\n #endIf\n return regex", "def validate(**vkargs):\r\n depr('Use route wildcard filters instead.')\r\n def decorator(func):\r\n @functools.wraps(func)\r\n def wrapper(*args, **kargs):\r\n for key, value in vkargs.iteritems():\r\n if key not in kargs:\r\n abort(403, 'Missing parameter: %s' % key)\r\n try:\r\n kargs[key] = value(kargs[key])\r\n except ValueError:\r\n abort(403, 'Wrong parameter format for: %s' % key)\r\n return func(*args, **kargs)\r\n return wrapper\r\n return decorator", "def not_found(error):\n pass", "def test_filterSamples_strict(self):\n with self.assertRaises(ValueError):\n self.overview_map.filterSamples(['PC.356', 'abc123'])\n\n with self.assertRaises(ValueError):\n self.empty_map.filterSamples(['foo'])", "def wildcard(pattern):\n wildcards = pattern.count('?')\n alphabet = ['0', '1']\n\n def xcombinations(items, length):\n if length == 0:\n yield []\n else:\n for i in xrange(len(items)):\n for sc in xcombinations(items, length - 1):\n yield [items[i]] + sc\n\n for combination in xcombinations(alphabet, wildcards):\n buff = ''\n for c in pattern:\n if c == '?':\n buff += combination.pop()\n else:\n buff += c\n yield buff", "def test_no_greplist_raises(self):\n line_no_matches_ngreps(self.line)", "def _is_wildcard_match(s, wildcard):\n\n wildcard = wildcard.strip()\n glob_pat = re.compile(r'\\*(:(?P<type>\\w+))?$')\n m = glob_pat.match(wildcard)\n if m:\n if m.group('type'):\n type_to_meth = globals()['__builtins__']\n type_to_meth = {k:v for k,v in type_to_meth.items()\n if k in ['str','int','float','bool']}\n try:\n return isinstance(s, type_to_meth[m.group('type')])\n except KeyError:\n raise InvalidWildcardError(\"{} is an invalid type in {}\".format(\n m.group('type'), wildcard))\n return True\n raise InvalidWildcardError(wildcard)", "def test_filterSamples_strict(self):\r\n with self.assertRaises(ValueError):\r\n self.overview_map.filterSamples(['PC.356', 'abc123'])\r\n\r\n with self.assertRaises(ValueError):\r\n self.empty_map.filterSamples(['foo'])", "def add_command_wildcard(self, pattern):\n self._command_wildcards.append(pattern)", "def check_errors(stderr):\n for ee in err_regex:\n if ee['re'].search(stderr) is not None:\n raise RuntimeError(ee['message'])", "def test_validate_and_write_error_pattern_raises(req):\n handle = StringIO()\n req.get('http://fake/', text=u'ID list is empty')\n r = requests.get('http://fake/')\n config = core.Config()\n\n with pytest.raises(BadPatternError):\n core._validate_and_write(r, handle, 'FAKE', config)\n\n req.get('http://fake/', text=u'Error: CEFetchPApplication::proxy_stream(): Failed to retrieve sequence: NC_405534')\n r = requests.get('http://fake/')\n with pytest.raises(BadPatternError):\n core._validate_and_write(r, handle, 'FAKE', config)", "def _check_params(pattern, unknown):\n try:\n pattern = str(pattern)\n unknown = str(unknown)\n except ValueError as error:\n print \"Do something with error: %s\" % error\n raise\n return pattern, unknown", "def test_single_specifier_needed(self):\n template = '{0} one too many {1}'\n value_count = 1\n msg = ('The formatter should only contain one '\n '\"{}\" specifier for the source field.')\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)", "def test_single_specifier_missing(self):\n template = 'missing'\n value_count = 1\n msg = 'The formatter should contain one \"{}\" specifier.'\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)", "def test_createWithSingleWildcard(self):\n m = MessageSet(None)\n self.assertEqual(str(m), \"*\")\n self.assertEqual(len(m), 1)\n self.assertRaises(TypeError, list, m)", "def match(pattern: List[str], source: List[str]) -> List[str]:\n sind = 0 # current index we are looking at in the source list\n pind = 0 # current index we are looking at in the pattern list\n result: List[str] = [] # to store the substitutions that we will return if matched\n acc = ''\n\n # keep checking as long as we haven't hit the end of both pattern and source\n while sind != len(source) or pind != len(pattern): \n # Your job is to fill out the body fo this loop\n # 1) if we reached the end of the pattern but not source \n if pind == len(pattern):\n return None\n # 2) if the current thing in the pattern is a %\n elif pattern[pind] == '%':\n pind += 1 # moving from % to next word \n while sind != len(source):\n if pind != len(pattern) and pattern[pind] == source[sind]:\n break \n else: \n if acc == \"\": \n acc += source[sind] # if it is the first character do not add a space \n else: \n acc += \" \"\n acc += source[sind]\n sind += 1\n result.append(acc)\n acc = ''\n # 3) if we reached the end of the source but not the pattern\n elif sind == len(source):\n return None \n # 4) if the current thing in the pattern is an _\n elif pattern[pind] == '_':\n result.append(source[sind])\n sind += 1\n pind += 1\n #appending is for lists and adding is for strings\n # 5) if the current thing in the pattern is the same as the current thing \n # in the source\n elif pattern[pind] == source[sind]:\n sind += 1\n pind += 1\n # 6) else : this will happen if none of the other conditions are met\n # it indicates the current thing it pattern doesn't match the current\n # thing in source\n else: \n return None\n return result", "def wildcard_match(item, base, wildcard):\n if wildcard.startswith(\"**/\"):\n wildcard = wildcard[3:]\n for base_element in base.split(\"/\"):\n if fnmatch.fnmatch(base_element, wildcard):\n return True\n return False\n else:\n return fnmatch.fnmatch(item, wildcard)", "def check_tie(results):\n best_pattern = ''\n greater_rate = 0\n for item in results:\n rate = 0\n for index, value in enumerate(item.split(config['pattern_separator'])):\n if(value == config['wildcard']):\n rate += index\n\n if(rate > greater_rate):\n greater_rate = rate\n best_pattern = item\n\n return best_pattern", "def __init__(self, error_search=\"error\"):\n self.error_search = error_search", "def test_invalid_type_input(self):\n\n with self.assertRaises(TypeError):\n sv.match('div', \"not a tag\")\n\n with self.assertRaises(TypeError):\n sv.select('div', \"not a tag\")\n\n with self.assertRaises(TypeError):\n sv.filter('div', \"not a tag\")\n\n with self.assertRaises(TypeError):\n sv.comments('div', \"not a tag\")", "def err(reason):\n raise AliasException(reason)", "def _filter_return_errors(self, url, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n for entry in har[\"log\"][\"entries\"]:\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n if url in entry[\"request\"][\"url\"] and temp not in matches and entry[\"response\"][\"status\"] >= 400:\r\n print \"\\nRequest failed w/ \" + str(entry[\"response\"][\"status\"]) + \" error:\\n\" + entry[\"request\"][\"url\"]\r\n if entry[\"response\"][\"content\"].get(\"text\"):\r\n print \"RESPONSE: \" + str(entry[\"response\"][\"content\"][\"text\"].encode('ascii', 'ignore'))\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n matches.append(temp)\r\n return matches", "def test_bad_filter_names(tool):\n\n for cmd in (\"filter\", \"stats\", \"report\"):\n for argname in (\"rfilt\", \"rsel\", \"cfilt\", \"csel\"):\n # 'report' command don't have 'cfilt' and 'csel' arguments.\n if cmd == \"report\" and argname.startswith(\"c\"):\n continue\n # Need only one good testdata path.\n args = f\"--{argname} 'bad_filter' {tool.good_paths[0]}\"\n with pytest.raises(Exceptions.Error):\n tool.command(cmd, args)", "def test_raises_useful_exception(self):\n exp = Expression(r'inalid (\\d]', {}, [], lambda x: x)\n with self.assertRaises(exp.InvalidPattern):\n assert not exp.pattern", "def test_name(self):\n\n self.check_search(\n dict(name=u'flamethrower'),\n [u'Flamethrower'],\n 'searching by name',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'durp'),\n [],\n 'searching for a nonexistent name',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'quICk AttACk'),\n [u'Quick Attack'],\n 'case is ignored',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'thunder'),\n [ u'Thunder', u'Thunderbolt', u'Thunder Wave',\n u'ThunderShock', u'ThunderPunch', u'Thunder Fang'],\n 'no wildcards is treated as substring',\n exact=True,\n )\n self.check_search(\n dict(name=u'*under'),\n [u'Thunder'], # not ThunderShock, etc.!\n 'splat wildcard works and is not used as substring',\n exact=True,\n )\n self.check_search(\n dict(name=u'b?te'),\n [u'Bite'], # not Bug Bite!\n 'question wildcard works and is not used as substring',\n exact=True,\n )", "def test_exception_both(self):\n for word in ['pod', 'container']:\n pp.pod_or_container = word\n with self.assertRaisesRegex(Exception, \"in both left and right sides\"):\n pp.replace_type('<<pod 123|pod 321>>')", "def manifest_with_many_types_of_errors_helper(error_log):\n assert '\"invalid_authz\"' in error_log\n assert '\"invalid_int\"' in error_log\n assert '\"invalid_md5\"' in error_log\n assert '\"invalid_url\"' in error_log", "def is_wildcard(obj):\n return isinstance(obj, Symbol) and obj == Symbol('*')", "def test_extra_substitutions(modpath):\n retcode, out = flake8(\n join(modpath, \"RST305/sphinx-substitutions\"),\n substitutions=\"bar\",\n )\n assert not retcode, out", "def test_no_greplist_raises(self):\n line_matches_greps(self.line)", "def test_add_patterns_raises_error_if_not_spaczz_pattern(ruler: SpaczzRuler) -> None:\n with pytest.raises(ValueError):\n ruler.add_patterns([{\"label\": \"GPE\", \"pattern\": \"Montana\"}])", "def test_url_pattern(self):\n\t\turl = URLFilter()\n\t\turl.set_limit(\"goog*\")\n\t\tself.assertTrue(url.check(Object(get_urls=lambda: ['google.com'])))", "def test_exception_neither(self):\n for word in ['pod', 'container']:\n pp.pod_or_container = word\n with self.assertRaisesRegex(Exception, \"in either side\"):\n pp.replace_type('<<container 123|container 321>>')", "def test_bad_names(self):\n self.do_test_bad_name('', 'tmp/frog')\n self.do_test_bad_name('.b', 'tmp/frog')\n self.do_test_bad_name('a b', 'tmp/frog') # FAILS\n self.do_test_bad_name('a-b', 'tmp/frog') # FAILS", "def scan_error(self, line: int, message: str):\n self.report(line, \"\", message)", "def test_prepare_source_excepts(value, exception, pattern):\n with pytest.raises(exception, match=pattern):\n PseudoPotentialData.prepare_source(value)", "def test_raises_for_duplicates():\n with pytest.raises(ValueError, match='duplicate'):\n alias('name', ('duplicate', 'duplicate'))", "def regexp_error_msg(self, regexp_error_msg):\n\n self._regexp_error_msg = regexp_error_msg", "def test_lengthWithWildcardRange(self):\n self.assertRaises(TypeError, len, MessageSet(1, None))", "def error(self, *args, **kwargs):", "def metachar_demo():\n\n strings = [\n \"[email protected]\",\n \"[email protected]\",\n \"[email protected]\"\n ]\n regex = re.compile(r'[a-z]+@[a-z]*\\.[a-z]{3,6}')\n\n for string in strings:\n if not regex.search(string):\n print(f'{string} does not match!')", "def test_wrong_input(self):\n\n test_float = 2954.02\n test_list = [\"anagram\", \"gramana\"]\n with pytest.raises(AttributeError) as exc_info:\n is_anagram(test_float, test_list)\n expected_error_msg = \"Words must be strings!\"\n assert exc_info.match(expected_error_msg)", "def test_typeerror_in_case_of_string(self):\n eq_(None,grepit(\"\",\"\",\"\"))", "def check_imported_panda_wild_path(self, wild_id, sourcepath):\n if wild_id not in sourcepath:\n raise IdError(\"ERROR: %s: file path and wild id don't match: %s\"\n % (sourcepath, wild_id))", "def precious(*patterns: Any) -> Any: # type: ignore\n strings: List[str] = []\n for pattern in each_string(*patterns):\n if not isinstance(pattern, AnnotatedStr):\n pattern = AnnotatedStr(pattern)\n pattern.precious = True\n strings.append(pattern)\n if len(patterns) == 1 and isinstance(patterns[0], str):\n assert len(strings) == 1\n return strings[0]\n return strings", "def test_bad_input_data(tool):\n\n for cmd in (\"filter\", \"report\", \"start\", \"stats\"):\n for args in tool.bad_paths:\n if cmd == \"filter\":\n args = f\"--rfilt 'index!=0' {args}\"\n with pytest.raises(Exceptions.Error):\n tool.command(cmd, args)", "def resolve_filenames(expr):\n log.error('Cannot resolve: %s', expr)", "def match(path):\r\n url_args = re_match(path).groupdict()\r\n for name, wildcard_filter in filters:\r\n try:\r\n url_args[name] = wildcard_filter(url_args[name])\r\n except ValueError:\r\n raise HTTPError(400, 'Path has wrong format.')\r\n return url_args", "def check_funny_chars_in_names(names, is_full_qualified_name=True):\n if names and len(names) > 0:\n for name in names:\n if ('\\t' in name or '\\n' in name or '!' in name or ',' in name or\n (is_full_qualified_name and name.count('.') > 1) or (not is_full_qualified_name and name.count('.') > 0)):\n raise Exception('Name has an invalid character \"\\\\t\" \"\\\\n\" \"!\" \",\" \".\": \"%s\"' % name)", "def test_useless_alias():\n with pytest.raises(ValueError, match='duplicate'):\n alias('name', ('name',))", "def _format_wildcard(self, condition, value, query_type=\"sql\"):\n def _format(operator, param, match, match_replace=None):\n if match_replace:\n match = set([i.replace(*match_replace) for i in match])\n if operator in (\"IN\", \"NOT IN\"):\n match_str = \"(%s)\" % \", \".join([\"'%s'\" % i for i in match])\n ret = [\"%s %s %s\" % (param, operator, match_str)]\n else:\n ret = [\"%s %s '%s'\" % (param, operator, i) for i in match]\n return ret\n\n def _format_in(param, match):\n return _format(\"IN\", param, match)\n\n def _format_not_in(param, match):\n return _format(\"NOT IN\", param, match)\n\n def _format_like(param, match):\n return _format(\"LIKE\", param, match, match_replace=('*', '%'))\n\n def _format_not_like(param, match):\n return _format(\"NOT LIKE\", param, match, match_replace=('*', '%'))\n\n d = {\n \"sql\": {\n \"IN\": _format_in,\n \"NOT IN\": _format_not_in,\n \"CONTAINS\": _format_like,\n \"NOT CONTAINS\": _format_not_like,\n }\n }\n\n d_negate = {\n \"sql\": {\n \"IN\": \"NOT IN\",\n \"NOT IN\": \"IN\",\n \"CONTAINS\": \"NOT CONTAINS\",\n \"NOT CONTAINS\": \"CONTAINS\",\n }\n }\n\n try:\n d[query_type]\n except KeyError:\n raise exception.CollectorException(lang=query_type)\n\n # _expand_wildcards iterates over a list\n if not isinstance(value, list):\n value = [value]\n do_proportion, d_condition = self._expand_wildcards(value)\n logger.debug(\"Wildcard expanding result: %s\" % d_condition)\n\n r = []\n r_negate = []\n for mtype, mset in d_condition.iteritems():\n r.extend(d[query_type][mtype](condition, mset))\n if do_proportion:\n aux = d[query_type][d_negate[query_type][mtype]](condition,\n mset)\n r_negate.extend(aux)\n # Sort the results to get an expected output (unittest)\n r.sort()\n r_negate.sort()\n return r, r_negate", "def get_regex_mismatch_error_text(field_name, source_regex):\n\n\treturn(\"Value entered for '{0}' does not match regex '{1}'\"\n\t\t .format(field_name, source_regex.pattern))", "def test_wrong_input_type(self):\n with self.assertRaises(TypeError):\n votes_to_percentages(['not', 'a', 'queryset'])\n with self.assertRaises(TypeError):\n votes_to_percentages(Disposable.objects.all())", "def test_regex_bad_case_sensitivity(self):\n with self.assertRaises(despydb.UnknownCaseSensitiveError):\n self.dbh.get_regex_clause(\"'ABC'\", 'a.*', 'F')", "def filter(self, pattern):\n if isinstance(pattern, REGEX_TYPE):\n func = tools.filter_regex\n elif pattern.startswith('/'):\n pattern = re.compile(pattern.strip('/'))\n func = tools.filter_regex\n else:\n func = tools.filter_wildcard\n\n return SeeResult(func(self, pattern))", "def advanced_search(self, pattern):\n pass", "def refined_errors(self):\r\n errs = []\r\n for err in self.errors:\r\n if err['typo'].lower() not in self.terms:\r\n errs.append(err)\r\n return errs", "def test_pattern_with_asterix_dot_prefix(create_user):\n emails = [\"[email protected]\"]\n patterns = [\"*.bar.com\"]\n assert create_user.preprocess_pattern(emails, patterns) == True", "def insensitive_glob(pattern):\n def either(c):\n return '[%s%s]' % (c.lower(), c.upper()) if c.isalpha() else c\n\n file_list = sorted(glob.glob(''.join(map(either, pattern))))\n\n return file_list", "def clean_errors(self):\n self._vim.eval('clearmatches()')\n self._errors = []\n self._matches = []\n # Reset Syntastic notes - TODO: bufdo?\n self._vim.current.buffer.vars['ensime_notes'] = []", "def search_invalid_parameters(error):\n current_app.logger.info(str(error))\n return render_template(\"search.html\", query=error.query, error=error), 400", "def test_handle_raise_value_error(self) -> None:\n with pytest.raises(ValueError) as excinfo:\n FileLookup.handle(\"foo\")\n assert (\n str(excinfo.value) == \"Query 'foo' doesn't match regex: \"\n \"^(?P<codec>[base64|json|json-parameterized|parameterized|\"\n \"parameterized-b64|plain|yaml|yaml-parameterized]:.+$)\"\n )", "def t_error(t):\n print(\"Illegal character '%s'\" % repr(t.value[0]))\n t.lexer.skip(1)", "def test_regex_constraint(self):\n from petstore_api.model import apple\n\n # Test with valid regex pattern.\n inst = apple.Apple(\n cultivar=\"Akane\"\n )\n assert isinstance(inst, apple.Apple)\n\n inst = apple.Apple(\n cultivar=\"Golden Delicious\",\n origin=\"cHiLe\"\n )\n assert isinstance(inst, apple.Apple)\n\n # Test with invalid regex pattern.\n err_regex = r\"Invalid value `.+?`, must match regular expression `.+?` at \\('args\\[0\\]', 'cultivar'\\)\"\n with self.assertRaisesRegex(\n petstore_api.ApiValueError,\n err_regex\n ):\n inst = apple.Apple(\n cultivar=\"!@#%@$#Akane\"\n )\n\n err_regex = r\"Invalid value `.+?`, must match regular expression `.+?` at \\('args\\[0\\]', 'origin'\\)\"\n with self.assertRaisesRegex(\n petstore_api.ApiValueError,\n err_regex\n ):\n inst = apple.Apple(\n cultivar=\"Golden Delicious\",\n origin=\"!@#%@$#Chile\"\n )", "def __reWildcard(self, regexp, string):\n regexp = re.sub(\"\\*+\", \"*\", regexp)\n match = True\n if regexp.count(\"*\") == 0:\n if regexp == string:\n return True\n else:\n return False\n blocks = regexp.split(\"*\")\n start = \"\"\n end = \"\"\n if not regexp.startswith(\"*\"):\n start = blocks[0]\n if not regexp.endswith(\"*\"):\n end = blocks[-1]\n if start != \"\":\n if string.startswith(start):\n blocks = blocks[1:]\n else:\n return False\n if end != \"\":\n if string.endswith(end):\n blocks = blocks[:-1]\n else:\n return False\n blocks = [block for block in blocks if block != \"\"]\n if blocks == []:\n return match\n for block in blocks:\n i = string.find(block)\n if i == -1:\n return False\n string = string[i + len(block):]\n return match", "def sanitize_input(term: str) -> str:\n return term.strip().replace(\"*\", \"\").replace(\"'\", \"\\\\'\").replace(\"~\", \"\")", "def testWildcardGet(self):\n def _check(ignored):\n self.assertFilesEqual(self.testDir.child('testRemoveFile'),\n FilePath('testRemoveFile'),\n 'testRemoveFile get failed')\n self.assertFilesEqual(self.testDir.child('testRenameFile'),\n FilePath('testRenameFile'),\n 'testRenameFile get failed')\n\n d = self.runCommand('get testR*')\n return d.addCallback(_check)", "def findParsingFailure(self, s):\n\n rest = s\n matches = []\n for i in range(len(self.reParts)):\n thisre = '\\s*' + self.reParts[i] + '(.*)'\n m = re.match(thisre, rest, re.VERBOSE|re.IGNORECASE)\n if not m:\n if i == 0:\n dtype = self.name\n else:\n dtype = self.dtypes[i-1][0]\n raise RuntimeError('Cannot parse field %d (%s) at: %s; previous matches: %s' % (i, dtype, rest, ';'.join(matches)))\n newRest = m.groups()[-1]\n matchedText = rest[:-len(newRest)]\n matches.append(matchedText)\n rest = newRest\n raise RuntimeError('Hunh? Failed to find parsing error in %s' % s)", "def _checkSSFormatArg(ssformat):\n if ssformat == '':\n raise ShortStrException('ssformat argument cannot be the empty string')\n\n if not isinstance(ssformat, str):\n raise ShortStrException('ssformat argument must be a string with only characters *, c, l, u, and d')\n\n for c in ssformat:\n if c not in '*clud':\n raise ShortStrException('ssformat argument must be a string with only characters *, c, l, u, and d')", "def handle_errors(self, err_list):\n for (etype, err_cde, err_str, err_value, src_line) in err_list:\n if etype == 'isa':\n self.isa_error(err_cde, err_str)\n elif etype == 'gs':\n self.gs_error(err_cde, err_str)\n elif etype == 'st':\n self.st_error(err_cde, err_str)\n elif etype == 'seg':\n self.seg_error(err_cde, err_str, err_value, src_line)", "def search(self, pattern):\n raise NotImplementedError()", "def register_errors(app):\n # TODO\n pass", "def test_raise_error_unknown_field_filtered_files():\n\n files = ['Unihan_Variants.txt']\n\n options = {'input_files': files, 'fields': ['kDefinition']}\n\n with pytest.raises(KeyError) as excinfo:\n process.Packager(options)\n excinfo.match('Field ([a-zA-Z].*) not found in file list.')", "def _get_retriable_errors(out: List[str]) -> List[str]:\n return [\n line for line in out\n if any(error in line for error in RETRIABLE_ERRORS)\n ]", "def test_syntax_converter_expand_search_patterns_alone(self):\n spi_search = \"find t bob sam\"\n inv_search = \"title:bob and title:sam\"\n self._compare_searches(inv_search, spi_search)", "def error_check(command):\r\n\r\n # TODO\r", "def execute(subs,patterns,wm):\n res = [] \n\n for p in patterns:\n tmp = substitute(subs, p)\n if tmp not in wm:\n res.append(tmp)\n \n return res", "def _restricted_search_mentions(val: str):\n try:\n val = str(val)\n except ValueError:\n raise argparse.ArgumentTypeError(f\"{val} could not be parsed to a string\")\n\n if not val.startswith('@'):\n return '@' + val\n return val", "def glob_to_regex(glob):\n res = \"\"\n for c in glob:\n if c == \"*\":\n res = res + \".*\"\n elif c == \"?\":\n res = res + \".\"\n else:\n res = res + re.escape(c)\n\n # \\A anchors at start of string, \\Z at end of string\n return re.compile(r\"\\A\" + res + r\"\\Z\", re.IGNORECASE)" ]
[ "0.6498623", "0.645085", "0.6279789", "0.6237322", "0.58695716", "0.57996774", "0.56907517", "0.5679271", "0.5674153", "0.55909884", "0.5565901", "0.5562286", "0.5515654", "0.5460359", "0.5418555", "0.5417609", "0.5408707", "0.53870875", "0.53637373", "0.53242457", "0.5293056", "0.52761424", "0.5276024", "0.5242383", "0.524235", "0.52177113", "0.521262", "0.5208669", "0.5205224", "0.5189475", "0.5173933", "0.5170795", "0.516431", "0.5162394", "0.5159902", "0.51101804", "0.50848454", "0.50594974", "0.50305444", "0.5003991", "0.49865937", "0.49861047", "0.4981612", "0.49537233", "0.49483413", "0.49475268", "0.49204993", "0.49055442", "0.49045888", "0.49033207", "0.48862612", "0.48815766", "0.48746502", "0.48706645", "0.4866408", "0.48660085", "0.48584357", "0.48556793", "0.4843128", "0.48407257", "0.4830137", "0.48272896", "0.48212978", "0.481482", "0.4784064", "0.4779967", "0.47745934", "0.4764156", "0.47614092", "0.47611347", "0.47599122", "0.47575468", "0.47574598", "0.47522682", "0.4744582", "0.4736423", "0.4735982", "0.47354582", "0.4733061", "0.4716816", "0.4713492", "0.47123766", "0.47107875", "0.47073862", "0.47065106", "0.4704855", "0.47029474", "0.46993414", "0.4694192", "0.46920988", "0.4687113", "0.4676978", "0.4661464", "0.46583414", "0.4656321", "0.4652615", "0.46519163", "0.4650364", "0.46493262", "0.46481615", "0.464411" ]
0.0
-1
Returns the data representation of the timer, will be used to send it over the web socket.
def get_list_data(self): key = 'timer' if self.repeated: key += '_repeat' return '%s %s' % (key, self.data.get_list_data())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_timer_data(self):\n return {\"sRef\": self._timer_service_ref_entry.get_text(),\n \"begin\": int(datetime.strptime(self._timer_begins_entry.get_text(), self._TIME_STR).timestamp()),\n \"end\": int(datetime.strptime(self._timer_ends_entry.get_text(), self._TIME_STR).timestamp()),\n \"name\": self._timer_name_entry.get_text(),\n \"description\": self._timer_desc_entry.get_text(),\n \"dirname\": \"\",\n \"eit\": self._timer_event_id_entry.get_text(),\n \"disabled\": int(not self._timer_enabled_switch.get_active()),\n \"justplay\": self._timer_action_combo_box.get_active_id(),\n \"afterevent\": self._timer_after_combo_box.get_active_id(),\n \"repeated\": self.get_repetition_flags()}", "def serialize(self):\n if self._serialized_timer is None:\n self._serialized_timer = self._enclave_wait_timer.serialize()\n\n return self._serialized_timer", "def timer(self):\n\n res = self.read_block(REG_TIMER, 4)\n\n return (res[3] << 24) + (res[2] << 16) + (res[1] << 8) + (res[0] << 0)", "def get_time(self, async = False):\n\n\t\tself._send_message(\"TIME\", \"\\x00\")\n\n\t\tif not async:\n\t\t\treturn EndpointSync(self, \"TIME\").get_data()", "def getTime(sock):\n MagicNo = 0x497E .to_bytes(2, \"big\")\n PacketType = 0x0002 .to_bytes(2, \"big\")\n if sock is s_english:\n LanguageCode = 0x0001\n flag = \"english\"\n elif sock is s_maori:\n LanguageCode = 0x0002\n flag = \"maori\"\n elif sock is s_german:\n LanguageCode = 0x0003\n flag = \"german\"\n date = datetime.datetime.today()\n LanguageCode = LanguageCode.to_bytes(2, \"big\")\n year = date.year.to_bytes(2, \"big\")\n month = (date.month).to_bytes(1, \"big\")\n day = date.day.to_bytes(1, \"big\")\n hour = date.hour.to_bytes(1, \"big\")\n minute = date.minute.to_bytes(1, \"big\")\n if flag == \"english\":\n text = f\"The current time is {date.hour}:{date.minute}\"\n elif flag == \"maori\":\n text = f\"Ko te wa o tenei wa {date.hour}:{date.minute}\"\n else:\n text = f\"Die Uhrzeit ist {date.hour}:{date.minute}\"\n\n lengthNow = len(text)\n length = lengthNow.to_bytes(1, \"big\")\n\n bytelist = [\n MagicNo,\n PacketType,\n LanguageCode,\n year,\n month,\n day,\n hour,\n minute,\n length,\n ]\n\n out = bytearray()\n\n for byteset in bytelist:\n out += byteset\n\n out.extend(text.encode(\"utf-8\"))\n\n return out", "def time(self):\n return self.raw[\"logTime\"]", "def _get_time(self):\n # get the current time in UTC (make sure we are timezone aware)\n now_utc = datetime.datetime.now(pytz.UTC)\n \n # convert to our local timezone\n timenow = now_utc.astimezone(self.timezone)\n \n # save the data to our data\n self.data['year'][0] = timenow.year\n self.data['month'][0] = timenow.month\n self.data['day'][0] = timenow.day\n self.data['hour'][0] = timenow.hour\n self.data['minute'][0] = timenow.minute\n self.data['second'][0] = timenow.second\n \n return", "def time(self):\n return self.time_array", "def on_timer(context, data_type, data):\n pass", "def heartbeat():\n return jsonify(int(time.time()))", "def gettime(self):\n return self.t", "def time(self) -> int:\n return self.raw[\"time\"]", "def getTime(self):\n return self.time", "def _send_data(self, data, time):\n pass", "def time(self):\n return signal_base_get_time(self.obj)", "def data(self) -> datetime:\n return self._data", "def recv_ts(self) -> int:\n pass", "def read_clock():\n return json.loads(_pump_output(\"read_clock\"))", "def output(self):\n return {\n \"time\": self.time,\n \"dmx\": self.dmx\n }", "def getTime(self, request, context):\n\t\t\n date = re.split(\"\\s\", datetime.utcnow().strftime(\"%Y %m %d %H %M %S\"))\n\n return droneconnect_pb2.Time(year = int(date[0]), month = int(date[1]), day = int(date[2]), hour = int(date[3]), minute = int(date[4]), second = int(date[5]))", "def getTimestamp(self):\r\n\t\treturn self.data['timestamp']", "def time(self):\n raise \"use method time of class ReactorNet\"\n #return _cantera.reactor_time(self.__reactor_id)", "def time(self):\n\t\treturn self._time", "def get_time(self):\n return self._ticks", "def time(self):\n return _cantera.reactornet_time(self.__reactornet_id)", "def get_time(self):\n return \"%02u:%02u:%02u (%d)\" % self.rtc.datetime()[4:8]", "def timer(self):\n\n # Start timer, if it hasn't been started already\n if self.start is None:\n self.start = datetime.datetime.now()\n return 0\n # End the timer, calculate elapsed time, format it, and return it\n else:\n self.end = datetime.datetime.now()\n elapsed = self.end - self.start\n minutes, seconds = divmod(elapsed.total_seconds(), 60)\n minutes = int(minutes)\n seconds = int(seconds)\n if seconds < 10:\n seconds = '0' + str(seconds)\n result = str(minutes) + ':' + str(seconds)\n return result", "def sendTime(self):\n timestamp = datetime.datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\")\n self.send(timestamp)", "def __create_msg(self, ping):\n now = rospy.get_rostime()\n output = {\n \"info\": {},\n \"timestamp\": int(now.secs * 1e3 + now.nsecs * 1e-6),\n \"data\": ping.T.tolist()\n }\n return json.dumps(output)", "def makeIdleData():\n return simplejson.dumps([\"idle\",[]])", "def servertime(self):\r\n return servertime.Servertime(self)", "def get_timed(self):\n ret = self.send(\"?T\", recv=True)\n ret = int(ret, 10)\n # FIXME: range?\n assert 1 <= ret <= 9999\n return ret", "def get_time(self) -> float:\n raise NotImplementedError()", "def getdata(self):\n return self.cwt", "def encode(self):\r\n tint = long(self.time)\r\n tfrac = long((self.time - tint)*1000000)\r\n return struct.pack(Format.Event, tsec, tfrac, self.eventType,\r\n self.eventCode, self.eventValue)", "def getRxTime(self):\n return self.rx_time", "def data(self):\n\t\tself.dworker()\n\t\treturn self.d", "def time(self):\n return self._time", "def get_time(self):\n return self.time", "def request_realtime_info(self):\n self.socket_datastream.sendto(b\"!r\", self.ip_port_arduino_datastream)\n self.socket_datastream.sendto(b\"!s\", self.ip_port_arduino_datastream)", "def output(self):\n time = \"%04d-%02d-%02d %02d:%02d:%02d\" % (self.year, self.month, \\\n self.date, self.hours, self.minutes, self.seconds)\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"type\": 4,\n \"enabled\": 1,\n \"abstime\": time\n }", "def _time(self):\n return self.r.eval(self.LUA_TIME, 1, 1)", "def time(self):\n return parse_time(self['timestamp'])", "def timers(self):\n return self['timers']", "def timers(self):\n return self.client.call('GET', self.name + '/timers')", "def timestamp(self):\n return self._data.get('timestamp')", "def json(self):\n beat = self.beat + 1.4 # replace with hjd\n w, h = self.getWidth(), self.getHeight()\n \n return {\n \"_time\": beat,\n \"_duration\": self.dur,\n #\"_lineIndex\": 0,\n #\"_type\": 0,\n #\"_width\": 0,\n \"_customData\": {\n # to undo the local rotation z transform we have to take trig parts of it and multiply them by the dimensions of the wall, then add them to the position\n \"_position\": [self.l + math.cos(math.radians(self.lrot[2] - 90)) * h / 2, self.d + math.sin(math.radians(self.lrot[2]-90)) * h / 2 + h / 2],\n \"_scale\": [w, h],\n \"_rotation\": self.rot,\n \"_localRotation\": self.lrot\n }\n }", "def get_data(self): # TODO: add smooth possibility\n return self.data", "def _send_time(self):\n if 'time' not in self.loopback_guard:\n content = {'time': self.time.isoformat()}\n self.send_action('set_time', content)", "def _get_meas_times_web_service(self, last_meas_time):\n subst = ''\n if self._segment and self._segment_value:\n if self._segment['partition_value_type'] == 'int':\n subst = self._segment_value['value_int']\n elif self._segment['partition_value_type'] == 'varchar':\n subst = self._segment_value['value_varchar']\n data_fetch_command_bind_parameter = self._segment['data_fetch_command_bind_parameter']\n else:\n data_fetch_command_bind_parameter = ''\n subst = ''\n\n #meas_times = self._outer_conn.query(last_meas_time, data_fetch_command_bind_parameter, subst, 'get_meas_times', None)\n ret_data = self._outer_conn.query(last_meas_time, data_fetch_command_bind_parameter, subst)\n self._web_service_data = dict()\n meas_times = {'header':'meas_time', 'data': list()}\n for meas_time, meas_data in ret_data.iteritems():\n meas_times['data'].append([meas_time])\n self._web_service_data[meas_time] = meas_data \n \n return meas_times", "def time_encoded(self):\n # type: () -> int\n return self._time_encoded", "def get_timestamp(self, data):\n timestamp = data['timestamp']\n return timestamp", "def getTime(self) -> float:\n return self.t", "def time(self):\r\n raise NotImplementedError", "def get_operation_times(self):\n self.write(\"TIMERS?\")\n timers = {}\n timers['psu'] = int(re.search(r\"\\d+\", self.read()).group())\n timers['laser'] = int(re.search(r\"\\d+\", self.read()).group())\n timers['laser_above_1A'] = int(re.search(r\"\\d+\", self.read()).group())\n self.read() # an empty line is at the end.\n return timers", "def elapsed(self):\n done, data1 = self._request('GS')\n if done:\n if data1[0] != '3':\n raise NotCharging\n done, data2 = self._request('GU')\n if done:\n return {\n 'seconds': int(data1[1]),\n 'Wh': float(data2[0])/3600\n }\n raise EvseError", "def t(self):\n return self._data_writer.get_current_run_time_ms()", "def get_time() -> int:\n return store.time", "def get_time(self):\n return self._time", "def get_time(self):\n return self._time", "def get_time(self):\n return self._time", "def recording_data(self):\n return self._get('recording/data')", "def time(self):\n raise NotImplementedError()", "def get_time(self):\n return self.widget().time()", "def get_io_time(self):\n return self._io_time", "def realtime(self):\r\n return resource.RealTime(self)", "def get_time(self):\n return self._total_time", "def get_timer_details(id):\n\twith postgres, postgres.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur:\n\t\tcur.execute(\"select * from mustard.timers where id=%s\", (id,))\n\t\treturn cur.fetchone()", "def read_raw(self):\n beats = self.microblaze.read_mailbox(0x4)\n interval_ms = self.microblaze.read_mailbox(0x8 + (beats % 4)*4)\n return beats, interval_ms", "def data(self) -> RawData:\r\n\r\n return self.__data", "def ts(self):\n return self._ts", "def into_data(self) -> Dict[str, Any]:\n data = dict(producer=self.producer)\n if self.mtime_ns > 0:\n data[\"mtime\"] = str(_datetime_from_nanoseconds(self.mtime_ns))\n return data", "def get_time(self):\n return ''", "def total_timer(msg):\n start = timer()\n yield\n t = timer() - start\n _TOTAL_TIMER_DATA[msg].feed(t)", "def data(self) -> dict:\n return self._event.get('data')", "def time_return(self):\n return self.time", "def get_time(self):\n return self.__time", "def time(self):\n return self._begin", "def json(self):\n\t\treturn datetime.now()", "def getDate(sock):\n months = {\n \"english\": [\n \"January\",\n \"February\",\n \"March\",\n \"April\",\n \"May\",\n \"June\",\n \"July\",\n \"August\",\n \"September\",\n \"October\",\n \"November\",\n \"December\",\n ],\n \"maori\": [\n \"Kohitatea\",\n \"Hui-tanguru\",\n \"Poutu ̄-te-rangi\",\n \"Paenga-whawha\",\n \"Haratua\",\n \"Pipiri\",\n \"Hongongoi\",\n \"Here-turi-koka\",\n \"Mahuru\",\n \"Whiringa-a-nuku\",\n \"Whiringa-a-rangi\",\n \"Hakihea\",\n ],\n \"german\": [\n \"Januar\",\n \"Februar\",\n \"Marz\",\n \"April\",\n \"Mai\",\n \"Juni\",\n \"Juli\",\n \"August\",\n \"September\",\n \"Oktober\",\n \"November\",\n \"Dezember\",\n ],\n }\n\n MagicNo = 0x497E .to_bytes(2, \"big\")\n PacketType = 0x0002 .to_bytes(2, \"big\")\n if sock is s_english:\n LanguageCode = 0x0001\n flag = \"english\"\n elif sock is s_maori:\n LanguageCode = 0x0002\n flag = \"maori\"\n elif sock is s_german:\n LanguageCode = 0x0003\n flag = \"german\"\n date = datetime.datetime.today()\n LanguageCode = LanguageCode.to_bytes(2, \"big\")\n year = date.year.to_bytes(2, \"big\")\n language_months = months[flag]\n chosen_month = language_months[(date.month - 1)]\n month = date.month.to_bytes(1, \"big\")\n day = date.day.to_bytes(1, \"big\")\n hour = date.hour.to_bytes(1, \"big\")\n minute = date.minute.to_bytes(1, \"big\")\n if flag == \"english\":\n text = \"Today's date is {} {}, {}\".format(chosen_month, date.day, date.year)\n elif flag == \"maori\":\n text = \"Ko te ra o tenei ra ko {} {}, {}\".format(\n chosen_month, date.day, date.year\n )\n else:\n text = \"Heute ist der {} {}, {}\".format(chosen_month, date.day, date.year)\n\n lengthNow = len(text)\n length = lengthNow.to_bytes(1, \"big\")\n\n bytelist = [\n MagicNo,\n PacketType,\n LanguageCode,\n year,\n month,\n day,\n hour,\n minute,\n length,\n ]\n\n out = bytearray()\n\n for byteset in bytelist:\n out += byteset\n\n out.extend(text.encode(\"utf-8\"))\n\n return out", "def get_measurements(self):\r\n self.msg_send_upr.data[0] = b\"\\xff\"[0]\r\n self.send_and_flush(self.msg_send_upr)", "def data(self):\n return self._data", "def get_time_info(self):\n\n raise NotImplementedError", "def encode(self, rosMsg):\r\n try:\r\n return (rosMsg.to_sec(), {})\r\n except AttributeError:\r\n raise TypeError('Received object is not a Duration instance.')", "def time(self) -> str:\n return typing.cast(\n str,\n self._properties.get(\"time\"),\n )", "def text(message):\n room = session.get('room')\n emit('timerupdate', {'msg': message}, room=room)", "def __str__(self):\n return_text = \"Time-Triggered Frame information =>\\n\"\n return_text += \" Sender id : \" + str(self.__sender_id) + \"\\n\"\n return_text += \" Receivers ids : \" + str(self.__receivers_id) + \"\\n\"\n return_text += \" Path : \" + str(self.__paths) + \"\\n\"\n return_text += \" End_to_End : \" + str(self.__end_to_end_delay) + \" nanoseconds\\n\"\n return_text += \" Period : \" + str(self.__period) + \" nanoseconds\\n\"\n return_text += \" Starting : \" + str(self.__starting_time) + \" nanoseconds\\n\"\n return_text += \" Deadline : \" + str(self.__deadline) + \" nanoseconds\\n\"\n return_text += \" Size : \" + str(self.__size) + \" bytes\"\n return return_text", "def get_data(self):\n data = {\n \"ts\": self.drone.pos[0][0],\n \"drone\": self.drone,\n \"subject\": self.subject,\n \"peds\": self.peds, # can be None\n \"objs\": self.objs # can be None\n }\n self.empty_bag()\n return data", "def recv_time(self) -> float:\n return ntp_to_system_time(self.recv_timestamp)", "def net_delay_data(self):\n return self._net_delay_data", "def rcvStrTimeOut(self, num=1, tou=0.1):\r\n\t\treturn self.rcvDataTimeOut(num, tou)", "def time(self):\n # type: () -> int\n return self._time", "def get_time(self) -> int:\n t = str(self.eval(\"pyb.RTC().datetime()\").encode(\"utf-8\"))[1:-1].split(\", \")\n return int(t[4]) * 3600 + int(t[5]) * 60 + int(t[6])", "def get_time(self):\n return self._current_time_sec", "def time(self):\n self.convert_window(\"Time\", \"seconds\", [\"centuries\", \"days\", \"decades\", \"femtoseconds\", \"fortnights\", \"hours\", \"microseconds\", \"millenia\", \"milliseconds\", \"minutes\", \"months(Common)\", \"months(Synodic)\", \"nanoseconds\", \"picoseconds\", \"quarters(Common)\", \"seconds\", \"shakes\", \"weeks\", \"years(Average Gregorian)\", \"years(Common)\", \"years(Julian)\", \"years(Leap)\", \"years(Tropical)\"])", "def get_rtt_message(self):\n return self.messages[\"rtt\"].get()", "def getTimeframedData(self, website, timeframe, currentTime=time.time()):\n timeList = list(website.log.keys())\n # inside the dic from most recent to most ancient\n # reverse order\n # list of time of requests\n inFrame = []\n # getting the times within the timeframe\n for listind in range(len(timeList)):\n if (currentTime-timeList[len(timeList)-1-listind] <= timeframe):\n inFrame.append(timeList[len(timeList)-1-listind])\n # Indicators\n # Max\n maxTime = self.computeMaxResponseTime(website, inFrame)\n # Avg\n avgTime = self.computeAvgResponsetime(website, inFrame)\n # Availability\n availability = self.computeAvailability(website, inFrame)\n # Status\n status = self.computeStatus(website, currentTime)\n\n # Alert checking with 120 timeframe\n if (timeframe == 120):\n self.checkForIsDownAlert(website= website, availability= availability)\n self.checkForIsUpAlert(website=website, availability=availability)\n\n\n return {'website': website, 'frame': timeframe,'time': currentTime, 'indicators': {'maxTime': maxTime, 'avgTime': avgTime, 'availability': availability, 'status': status}}", "def gettime(self):\n interval, value = _timerfd._timerfd.gettime(self)\n interval = self._join_time(*interval)\n value = self._join_time(*value)\n return interval, value", "def get_time(self):\n\t\treturn time.time()", "async def send_data(message, nats_handler, shared_storage, logger):\n time = message.data[\"time\"]\n for i in range(shared_storage[\"data_rate\"]):\n time_struct= datetime.strptime(time, \"%Y-%m-%dT%H:%M:%S.%f\").timetuple()\n x = time_struct.tm_mon + time_struct.tm_mday/30 + time_struct.tm_hour/720 + time_struct.tm_min/43200 + time_struct.tm_sec/2592000\n data_value = 4.2*math.sin((270000*(x + 1))*3.14/6) + 13.7\n message = nats_handler.create_message(data_value, MessageSchemas.IOT_DATA_MESSAGE)\n message.message_type = \"temperature\"\n await nats_handler.send_data(\"data.out\", message)\n await asyncio.sleep(0.5/shared_storage[\"data_rate\"])" ]
[ "0.7045231", "0.6424909", "0.63322824", "0.61530703", "0.61145544", "0.6017771", "0.592645", "0.59186465", "0.59073746", "0.5903025", "0.5891447", "0.58581436", "0.5799672", "0.5785056", "0.575003", "0.5743803", "0.5738986", "0.5706574", "0.5649952", "0.5620999", "0.5616731", "0.5606849", "0.5595778", "0.5593622", "0.5584529", "0.5584442", "0.5577718", "0.5561627", "0.5555483", "0.5553853", "0.55471677", "0.5538207", "0.5536148", "0.5513676", "0.55039215", "0.55031854", "0.54957896", "0.5467836", "0.54626304", "0.5455963", "0.54494894", "0.5443651", "0.5437653", "0.54345816", "0.5424659", "0.5422028", "0.54120797", "0.540859", "0.5404225", "0.5385586", "0.5377068", "0.53737056", "0.53676236", "0.5367555", "0.53603905", "0.5357288", "0.5356994", "0.53509796", "0.5350158", "0.5350158", "0.5350158", "0.53497076", "0.53478116", "0.53445387", "0.5342203", "0.5341307", "0.5338737", "0.53176314", "0.5314807", "0.5309566", "0.5296483", "0.5293535", "0.52933407", "0.52891076", "0.5285794", "0.5277746", "0.5263928", "0.5263774", "0.5261562", "0.52610075", "0.52596754", "0.5256898", "0.5252535", "0.52502924", "0.52431315", "0.5241716", "0.5239632", "0.5237435", "0.5234069", "0.52209926", "0.52195686", "0.521871", "0.52177995", "0.5217759", "0.52176595", "0.5213565", "0.5209078", "0.5207727", "0.5201932", "0.5198534" ]
0.63464636
2
Return a dictionary version json serializable
def to_dict(self) -> Dict[str, Any]: return {'solver_type': type(self).__name__, 'lr_policy': self.lr_policy, 'base_lr': self.base_lr, 'gamma': self.gamma, 'momentum': self.momentum, 'max_iter': self.max_iter, 'weight_decay': self.weight_decay, 'iter_size': self.iter_size, 'stepsize': self.stepsize, 'stepvalues': self.stepvalues, 'cold_start': self.cold_start, 'cold_start_lr': self.cold_start_lr, 'cold_start_duration': self.cold_start_duration, 'from_prototxt': str(self.from_prototxt)}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def json(self) -> Dict[str, Union[List, Dict, str, int, float]]:", "def asdict():\n pass", "def dict(self):\n\t\treturn self.json", "def to_json(self) -> Dict[str, Any]:\n return self.__dict__", "def to_dict(self) -> dict:", "def to_obj(self):\n return dict()", "def _as_dict(self):\n return dict(self.items())", "def dict(self):\n return objToDict(self)", "def as_dict(self):\n item = {}\n item['data'] = self.data\n item['created'] = str(self.created)\n item['tags'] = list(self.tags)\n item['path'] = str(self.path)\n\n #TODO\n #is item equivalent to a json.loads(json.dumps(self)) ???\n\n return item", "def as_dict(self):\n return dict(self.items())", "def as_dict(self):\n return dict(self.as_OD())", "def as_dict(self):\n return dict(self._d)", "def serialize(self):\n return {\n \"key\": self.key,\n \"value\": self.value\n }", "def dict(self) -> Dict:\r\n return super().dict()", "def dict(self) -> Dict:\r\n return super().dict()", "def as_dict(self):\n return asdict(self)", "def to_dict(self):\n data = {}\n for key, value in self.__dict__.items():\n try:\n data[key] = value.to_dict()\n except AttributeError:\n data[key] = value\n return data", "def convert_to_json(self):\n return self.__dict__", "def to_dict(cls) -> dict:\n raise NotImplementedError()", "def to_dict(self):", "def to_dict(self) -> Dict[str, Any]:\n return self.__dict__.copy()", "def to_dict(self):\r\n raise NotImplementedError", "def to_json(self):\n return json.dumps(self.dict)", "def asdict(self) -> dict:\n return self.__asdict(self)", "def json(self) -> Union[dict, list, str, int, float]:\n raise NotImplementedError # pragma: no cover", "def to_dict(self):\n return dict(self.__data)", "def to_dict(self):\n raise NotImplementedError", "def to_dict(self):\n return to_dict(self.__dict__)", "def toJson(self):\r\n return self.__dict__", "def to_dict(self) -> Dict:\n obj = super().to_dict()\n obj['values'] = self.values\n return obj", "def to_dict(self):\n return dict(self)", "def as_dict(self) -> dict[str, Any]:\n return {\n \"type\": self.type,\n \"timestamp\": self.timestamp,\n \"data\": self.data or {},\n }", "def asdict(v: Any) -> Dict[Any, Any]:\n return to_dict(v, reuse_instances=False, convert_sets=False)", "def to_dict(self) -> Dict[str, Any]:\n data = asdict(self)\n\n data = self._to_dict_transform(data)\n\n return data", "def get_dict(self):\n return", "def _to_dict(self) -> dict:\n pass", "def to_json(self):\n return json.dumps(self._asdict())", "def json(self):\n return json.dumps(self, default=lambda o: o.__dict__,\n sort_keys=True, indent=4)", "def __json__(self):\n filtered_dict = dict()\n\n for k, item in six.iteritems(self.__dict__):\n if k.startswith('_'):\n continue\n\n if hasattr(item, '__json__'):\n filtered_dict[k] = item.__json__\n else:\n filtered_dict[k] = serialize_obj(item)\n\n return filtered_dict", "def to_dict(self):\n raise NotImplementedError('To be implemented in subclass.')", "def to_dict(self, data):\n return json.loads(json.dumps(data))", "def dict() -> Dict:\n pass", "def to_dict(self):\n pass", "def GetJSON(self):\n return json.dumps(self.GetDict())", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def _to_dict(self):\n return self.to_dict()", "def to_json(self) -> Dict[str, Any]:\n return json.loads(self.get_state().to_json())", "def as_dict(self):\n def append(d, key, value, is_iterative, is_primitive, is_enum):\n if value is None:\n if is_iterative:\n value = []\n elif is_primitive == False and is_enum == False:\n if is_iterative:\n value = map(lambda i : i.as_dict(), value)\n else:\n value = value.as_dict()\n d[key] = value\n\n # Populate a deep dictionary.\n d = dict()\n append(d, 'file', self.__file, False, False, False)\n append(d, 'file_reference', self.__file_reference, False, False, False)\n append(d, 'name', self.__name, False, True, False)\n return d", "def serialize(self):\n return {\n\n\n }", "def as_dict(self) -> dict:\n tmp_dict = self.__dict__\n return tmp_dict", "def json(self):\n json_data = {}\n\n for item in self.iteritems():\n try:\n value = item.get_value(json_serializable=True)\n except TypeError:\n value = \"NOT_SERIALIZABLE\"\n item_data = {\n \"tags\": list(item.tags),\n \"value\": value,\n }\n if item.name in json_data:\n json_data[item.name].append(item_data)\n else:\n json_data[item.name] = [item_data]\n\n return json_data", "def to_dict(self):\n return self._dict", "def to_dict(self) -> Dict[str, Union[dict, str, int]]:\n d = {}\n\n for k, v in self.__dict__.items():\n if k in self._exempt or v is None:\n continue\n if '__dataclass_fields__' in dir(v):\n d[k] = asdict(v)\n elif inspect.isclass(v) and issubclass(v, Base):\n d[k] = v.to_dict()\n elif hasattr(v, 'value'): # we assume this is an Enum value.\n d[k] = v.value\n else:\n d[k] = v\n return d", "def to_dict(self):\n\n # Return dict\n return self.dict", "def to_dict(self):\n return {}", "def to_dict(self):\n\n def transform_value(value):\n if isinstance(value, list):\n return [transform_value(v) for v in value]\n elif isinstance(value, abc.Mapping):\n return dict([\n (k, transform_value(v))\n for k, v in iteritems(value)])\n else:\n return value\n\n return transform_value(dict(self))", "def to_dict(self):\n out_dict = dict(self)\n for k, v in out_dict.items():\n if v is self:\n out_dict[k] = out_dict\n elif hasattr(v, 'to_dict'):\n out_dict[k] = v.to_dict()\n elif hasattr(v, 'to_list'):\n out_dict[k] = v.to_list()\n return out_dict", "def dict(self):\n return self.__dict__" ]
[ "0.8122776", "0.8008097", "0.79259527", "0.7621561", "0.7570507", "0.7563755", "0.7456708", "0.73646736", "0.7295484", "0.72896343", "0.72842276", "0.7273497", "0.72578883", "0.7255007", "0.7255007", "0.72454506", "0.7242269", "0.72290695", "0.72273296", "0.7204922", "0.7174911", "0.71731627", "0.7166118", "0.7152538", "0.7151474", "0.7114718", "0.710103", "0.7093956", "0.70930076", "0.70805573", "0.7074352", "0.7071119", "0.70616084", "0.70524424", "0.7044171", "0.7044025", "0.7043699", "0.70344657", "0.7031844", "0.7028355", "0.7004902", "0.70047474", "0.7000036", "0.6998036", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.69963247", "0.6992851", "0.6968231", "0.69551563", "0.69479066", "0.6944068", "0.6938871", "0.6936467", "0.6932285", "0.6925924", "0.6922566", "0.69182444", "0.69051516" ]
0.0
-1
Load parameters from dictionary.
def from_dict(cls, dictionary: Dict[str, Any]): return cls(**dictionary)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load(self, load_dict):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n if \"data\" in load_dict:\n self._data = load_dict[\"data\"][\"data\"][0]\n self._default = self._data\n else:\n self._logger.warning(\n \"Your parameter `%s` is empty, \"\n \"I did not find any data on disk.\" % self.v_full_name\n )\n\n if \"explored_data\" in load_dict:\n self._explored_range = [\n x for x in load_dict[\"explored_data\"][\"data\"].tolist()\n ]\n self._explored = True\n\n self._locked = True", "def _load(self, load_dict):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n if \"data\" in load_dict:\n dump = load_dict[\"data\"]\n self._data = pickle.loads(dump)\n else:\n self._logger.warning(\n \"Your parameter `%s` is empty, \"\n \"I did not find any data on disk.\" % self.v_full_name\n )\n\n try:\n self.v_protocol = load_dict[PickleParameter.PROTOCOL]\n except KeyError:\n # For backwards compatibility\n self.v_protocol = PickleParameter._get_protocol(dump)\n\n if \"explored_data\" in load_dict:\n explore_table = load_dict[\"explored_data\"]\n\n name_col = explore_table[\"idx\"]\n\n explore_list = []\n for name_id in name_col:\n arrayname = self._build_name(name_id)\n loaded = pickle.loads(load_dict[arrayname])\n explore_list.append(loaded)\n\n self._explored_range = explore_list\n self._explored = True\n\n self._default = self._data\n self._locked = True", "def _load(self, load_dict):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n try:\n self._data = load_dict[\"data\" + ArrayParameter.IDENTIFIER]\n\n if \"explored_data\" + ArrayParameter.IDENTIFIER in load_dict:\n explore_table = load_dict[\"explored_data\" + ArrayParameter.IDENTIFIER]\n\n idx = explore_table[\"idx\"]\n\n explore_list = []\n\n # Recall the arrays in the order stored in the ObjectTable 'explored_data__rr__'\n for name_idx in idx:\n arrayname = self._build_name(name_idx)\n explore_list.append(load_dict[arrayname])\n\n self._explored_range = [x for x in explore_list]\n self._explored = True\n\n except KeyError:\n super(ArrayParameter, self)._load(load_dict)\n\n self._default = self._data\n self._locked = True", "def load(self,params):\n self._register.clear()\n for key in params:\n self._register[key] = params[key]", "def load_parameters(self, session, data_dict):\n for layer in self.layers:\n layer.load_parameters(session, data_dict)", "def _load_parameter(self):", "def _load(self, load_dict):\n try:\n self.v_protocol = load_dict.pop(PickleParameter.PROTOCOL)\n except KeyError:\n # For backwards compatibility\n dump = next(load_dict.values())\n self.v_protocol = PickleParameter._get_protocol(dump)\n for key in load_dict:\n val = load_dict[key]\n self._data[key] = pickle.loads(val)", "def load_dict(self, dct):\n pass", "def __loadParametersAndDefaults(self, dataPath, confFilename, nkeys, nvalues, keyType, valueType):\n params = self.loadConf(dataPath, confFilename=confFilename)\n\n # filter dict to include only recognized field names:\n for k in params.keys():\n if k not in SeriesLoader.BinaryLoadParameters._fields:\n del params[k]\n keywordParams = {'nkeys': nkeys, 'nvalues': nvalues, 'keytype': keyType, 'valuetype': valueType}\n for k, v in keywordParams.items():\n if not v:\n del keywordParams[k]\n params.update(keywordParams)\n return SeriesLoader.BinaryLoadParameters(**params)", "def load_params(self):\n return self.params", "def load_params(params_filename: str) -> Dict:\n \n # If no params filename is specified, return the default parameter setting.\n if not params_filename:\n return RunParams()\n\n return RunParams(**load_json(params_filename))", "def load_params():\r\n return pickle.load(open('params.p', mode='rb'))", "def _load(self, load_dict):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n try:\n serial_string = load_dict[\"data%s\" % SparseParameter.IDENTIFIER]\n self._data = self._reconstruct_matrix(serial_string)\n\n if \"explored_data\" + SparseParameter.IDENTIFIER in load_dict:\n explore_table = load_dict[\"explored_data\" + SparseParameter.IDENTIFIER]\n idx_col = explore_table[\"idx\"]\n explore_list = []\n for irun, name_idx in enumerate(idx_col):\n serial_string = load_dict[\n \"xspm%s%08d\" % (SparseParameter.IDENTIFIER, name_idx)\n ]\n matrix = self._reconstruct_matrix(serial_string)\n explore_list.append(matrix)\n\n self._explored_range = explore_list\n self._explored = True\n\n except KeyError as e:\n super(SparseParameter, self)._load(load_dict)\n\n self._default = self._data\n self._locked = True", "def load_parameters(self, session, data_dict):\n for w in self.weights:\n name = w.name.rsplit(':', 1)[0]\n if name in data_dict:\n session.run(w.assign(data_dict[name]))", "def set_params(self, dic):\n if dic is not None:\n for key, val in zip(dic.keys(), dic.values()):\n if key in self.__dict__.keys():\n if isinstance(self.__dict__[key], Parameter):\n if isinstance(val, Parameter):\n self.__dict__[key] = val\n else:\n d = self.__dict__[key].__dict__\n self.__dict__[key] = Parameter(val, input_dimensional=d['_input_dimensional'],\n units=d['_units'],\n description=d['_description'],\n scale_object=d['_scale_object'],\n return_dimensional=d['_return_dimensional'])\n else:\n self.__dict__[key] = val", "def load_parameters(self):\n json_data = open(\"param.json\")\n data = json.load(json_data)\n self.items = data[\"items\"]\n self.pollInterval = self.items[0]['poll_interval']", "def __init__(self, yaml_dict):\n self._params = self._get_params_from_yaml_dict(yaml_dict)", "def get_params(self, params_dict={}):\n raise NotImplementedError()", "def set_params(self, dic):\n if dic is not None:\n for key, val in zip(dic.keys(), dic.values()):\n if key in self.__dict__.keys():\n self.__dict__[key] = val\n\n if 'scale_params' in self.__dict__.keys():\n self.scale_params.set_params(dic)\n if 'atmospheric_params' in self.__dict__.keys():\n if self.atmospheric_params is not None:\n self.atmospheric_params.set_params(dic)\n\n if 'atemperature_params' in self.__dict__.keys():\n if self.atemperature_params is not None:\n self.atemperature_params.set_params(dic)\n\n if 'oceanic_params' in self.__dict__.keys():\n if self.oceanic_params is not None:\n self.oceanic_params.set_params(dic)\n\n if 'ground_params' in self.__dict__.keys():\n if self.ground_params is not None:\n self.ground_params.set_params(dic)\n\n if 'otemperature_params' in self.__dict__.keys():\n if self.gotemperature_params is not None:\n self.gotemperature_params.set_params(dic)\n\n if 'gtemperature_params' in self.__dict__.keys():\n if self.gotemperature_params is not None:\n self.gotemperature_params.set_params(dic)", "def set_params(cls, param_dict):\n for param in param_dict:\n if param in cls.params:\n cls.params[param] = param_dict[param]\n else:\n raise AttributeError(\"Invalid parameter dictionary! Format: {'<param>': <value>}\")", "def from_dict(cls, dikt) -> 'Parameters':\n return util.deserialize_model(dikt, cls)", "def load(proxy=None, mode=None, parameters=None, json_path=None):\n ps = load_params(json_path)\n \n try:\n ps = ps[proxy]\n except:\n raise KeyError(\"`proxy` incorrect. Try one of: ['{}']\".format(\"', '\".join(ps.keys())))\n\n try:\n ps = ps[mode]\n except:\n raise KeyError(\"`mode` incorrect. Try one of: ['{}']\".format(\"', '\".join(ps.keys())))\n \n try:\n ps = ps[parameters]\n except:\n raise KeyError(\"`parameters` incorrect. Try one of: ['{}']\".format(\"', '\".join(ps.keys())))\n \n p = params(values=ps)\n p.param_name = parameters\n \n return p", "def load_params(param_vector=[]):\n params = {}\n param_vector_default = [-1.43,0.05,7.5,0.05,1.,40.,0.6,1.,5.5]\n\n if len(param_vector) != 0:\n params['alpha'], params['sigma_M'], params['M50'], params['sigma_mpeak'], params['B'], params['A'], params['sigma_r'], params['n'], params['Mhm'] = param_vector\n else:\n params['alpha'], params['sigma_M'], params['M50'], params['sigma_mpeak'], params['B'], params['A'], params['sigma_r'], params['n'], params['Mhm'] = param_vector_default\n\n return params", "def load_params(self, path: str):\n DistributedWorker.load_params(self, path)\n\n params = torch.load(path)\n self.dqn.load_state_dict(params[\"dqn_state_dict\"])\n print(\"[INFO] loaded the model and optimizer from\", path)", "def load_params():\n with open('params.p', mode='rb') as in_file:\n return pickle.load(in_file)", "def _prepare_param_dict(self, params_dict):\n return params_dict", "def load_params_from_file(path):\n save_dict = mx.nd.load(path)\n arg_params = {}\n aux_params = {}\n for k, v in save_dict.items():\n tp, name = k.split(':', 1)\n if tp == 'arg':\n arg_params[name] = v\n if tp == 'aux':\n aux_params[name] = v\n return arg_params, aux_params", "def _load(self, load_dict):\n self._data_ = load_dict", "def test_params_loading(datadir: Path):\n config_fn = datadir / \"datapane.yaml\"\n initial_vals = dict(p1=\"a\", p3=3)\n\n assert len(dp.Params) == 0\n\n # load some values\n api._reset_runtime(initial_vals)\n assert len(dp.Params) == 2\n assert dp.Params[\"p1\"] == initial_vals[\"p1\"]\n\n # clear and load again\n api._reset_runtime({})\n assert len(dp.Params) == 0\n api._reset_runtime(initial_vals)\n\n # load from file\n dp.Params.load_defaults(config_fn=config_fn)\n # ensure values are merged\n assert len(dp.Params) == 3\n assert dp.Params[\"p1\"] == \"hello\"\n assert dp.Params[\"p2\"] == 4\n assert dp.Params[\"p3\"] == initial_vals[\"p3\"]", "def loadFromDict(self, d):\n self._data = d\n self._doDefaults()", "def _load_dict(self, kwargs):\n # TaskInfo always needs a description and task_type, but all other\n # supported (optional) parameters are loaded from kwargs to\n keyword_args = dict(kwargs)\n task_id = TaskInfo._dpop(keyword_args, \"id\")\n if task_id is not None:\n self.id = task_id\n\n priority = TaskInfo._dpop(keyword_args, \"priority\")\n if priority is None:\n priority = TaskPriority.MEDIUM\n else:\n priority = int(priority)\n\n description = TaskInfo._dpop(keyword_args, \"description\")\n if description is not None:\n self.description = description\n\n task_type = TaskInfo._dpop(keyword_args, \"type\")\n if task_type is not None:\n self.type = task_type\n\n # store unknown args so that they are not lost across\n # serialization/deserialization\n self._unknown_args = keyword_args\n\n self.set_priority(priority)", "def load_from_dict(self, dict_):\n policies = dict_.get('policies', None)\n super(Config, self).load_from_dict(\n {k: v for k, v in six.iteritems(dict_) if k != 'policies'})\n if policies is not None:\n self.policies = policies", "def load(self, path):\n parameters = paddle.load(path)\n self.set_dict(parameters)", "def load_parameters(gp, target):\n with open(target) as f:\n pdict = json.load(f)\n gp.likelihood.set_state(pdict['likelihood'])\n gp.kern.variance.set_state(pdict['kern_variance'])\n gp.kern.lengthscales.set_state(pdict['kern_lengthscale'])\n #for p in pdict:\n # if p == 'warp_tanh.psi':\n # gp[p] = np.array(pdict[p]).reshape(3, 3)\n # else:\n # gp[p] = pdict[p]", "def get_parameters(**kwargs):\r\n parameters = vars(global_file.params)\r\n for key, value in kwargs.items():\r\n parameters[str(key)] = value\r\n return parameters", "def _load(self):\n for k,v in self.parameters.items():\n if isinstance(v,list):\n setattr(self,k,np.array(v,dtype=np.float32))\n else:\n setattr(self,k,v)", "def _load_parameters(self, default):\n params = {}\n for (key, value) in default:\n params[key] = self._parse_parameter(value)\n \n if not os.path.exists(self._datadir):\n os.makedirs(self._datadir)\n \n # Check if the file already exists, and create a new one, using the \n # passed default values, if necessary\n paramfile = os.path.join(self._datadir, self.id.lower() + '.cfg')\n if (os.path.isfile(paramfile)):\n paramjson = open(paramfile)\n params_var = json.load(paramjson)\n params.update(params_var)\n else:\n params_var = {}\n params_var['eta'] = [params['eta']]*24\n params_var['cov'] = [params['sigma']**2]*24\n params.update(params_var)\n \n with open(paramfile, 'w') as paramjson:\n json.dump(params_var, paramjson)\n \n return params", "def load_parameters(self, params):\n # load (aka. deep copy) parameters in params into network\n c=0\n self.params = []\n names = ['W_i']\n for n,p in zip(names, params):\n self.params.append(theano.shared(name = p.name,\n value = p.get_value(borrow=True)))\n \n setattr(self, n, self.params[c])\n c+=1\n assert( len(self.params) == c )", "def _get_parameters(self, *keys):\n return {k: v for k, v in self.param.items() if k in keys}", "def restore_init_param_dict(self):\n self.param_dict = self._init_param_dict\n self._set_primary_behaviors()", "def load_params():\n file_name = filedialog.askopenfilename(\n filetypes=[(\"JSON\", \"*.json\")])\n if file_name:\n self.parent_class.classes[\"fractal\"].curve.load_from_file(\n file_name)\n self.parent_class.classes[\"fractal\"].curve.set_parent_parameters(\n )\n self.rules_frame_class.fill_entries_from_rules(\n self.parent_class.classes[\"fractal\"].rules)\n # fill the entries in rules input on load\n self.set_recursion_depth_entry(\n self.parent_class.classes[\"fractal\"].recursion_depth)\n self.set_base_length_entry(\n self.parent_class.classes[\"fractal\"].base_length)\n self.rules_frame_class.render_preview()", "def load_state_dict(self, state_dict):\n own_state = self.state_dict()\n new_state = OrderedDict()\n for name, param in state_dict.items():\n if name in own_state:\n new_state[name] = param\n\n super(EncoderImagePrecomp, self).load_state_dict(new_state)", "def load_state_dict(self, state_dict: Dict[str, torch.Tensor]):\n pass", "def load_params_from_file(self, input_file):\n\n ### FILL IN ###", "def load_standard_parameters(self):\n paradic = {'x':'0',\n 'y':'0',\n 'n_oct':'8',\n 'n_spo':'3',\n 'sigma_min':'0.8',\n 'delta_min':'0.5',\n 'sigma_in':'0.5',\n 'C_DoG':'0.015',\n 'C_edge':'10',\n 'n_bins':'36',\n 'lambda_ori':'1.5',\n 't':'0.8',\n 'n_hist':'4',\n 'n_ori':'8',\n 'lambda_descr':'6',\n 'flag_match':'1',\n 'C_match':'0.6'}\n self.cfg['param']['paradic'] = paradic\n self.cfg.save()", "def on_load_parameters(self, filename=None):\n if filename is None:\n path, _ = QtWidgets.QFileDialog.getOpenFileName(self, \"Choose a parameter file.\", \"\", \"JSON Files (*.json)\")\n else:\n path = filename\n\n if path == '' or path is None:\n return\n\n self.param_file = path\n\n with open(self.param_file, 'r') as f:\n params = json.loads(f.read())\n\n obj_points = params['object positions']\n cam_pos = params['camera positions']\n dist_coeff = params['distortion coefficients']\n\n for p in obj_points:\n x, y = p['x'], p['y']\n lat, lon, alt = p['lat'], p['lon'], p['alt']\n self.add_known_image_points((x, y), latlonalt=(lat, lon, alt))\n\n self.camera_lat_line.setValue(float(cam_pos['lat']))\n self.camera_lon_line.setValue(float(cam_pos['lon']))\n self.camera_alt_line.setValue(float(cam_pos['alt']))\n self.cx_line.setValue(float(cam_pos['cx']))\n self.cy_line.setValue(float(cam_pos['cy']))\n self.phi_line.setValue(float(cam_pos['phi']))\n self.theta_line.setValue(float(cam_pos['theta']))\n self.psi_line.setValue(float(cam_pos['psi']))\n\n self.k1_line.setValue(float(dist_coeff['k1']))\n self.k2_line.setValue(float(dist_coeff['k2']))\n self.k3_line.setValue(float(dist_coeff['k3']))\n self.p1_line.setValue(float(dist_coeff['p1']))\n self.p2_line.setValue(float(dist_coeff['p2']))\n\n self.statusBar().showMessage(f'Loaded parameters from {self.param_file}')", "def load_params_from_file(self, fn):\n f = file(fn, 'r')\n params = json.load(f)\n return params", "def loadParameters(self, parmfile=''):\n if not parmfile:\n raise IOError(\"You need to specify a parameter filename\")\n parmdir = os.getenv('ATMOSPHERE_PARAMETERS_DIR')\n parmpath = os.join.path(parmdir, parmfile)\n # Read from file\n with open(parmpath, 'r') as parmf:\n data = pickle.load(parmf)\n # Dictionary list\n self.modtran_visits = data[0]\n # Tuple list\n self.aerosol_visits = data[1]\n # seed value\n nruns = len(self.modtran_visits)\n print('Parameters for {1} runs computed with seed = {0}'.format(data[2],\n nruns))\n # Init transmission array\n self.initTransmissionArray(nruns)", "def load_params(param_file):\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params", "def _load_command_dict(self, path=None):", "def load_params(fname):\n parmsff = {}\n # FIXME: This might fail if a parameter name is larger than 50 characters.\n # FIXME: Maybe do this with the csv module instead?\n temparr = numpy.loadtxt(fname, dtype=([('a','S50'),('b','f8')]), delimiter=',') \n for i in temparr:\n parmsff[i[0]] = i[1]\n return parmsff", "def initializeFromDict(self, inputDict):\n pass", "def load_variables(cls):\n cls._variablesDict = fileops.get_json_dict(cls.get_variables_filepath())", "def load_params(path, adjust=None, verbose=False):\n # correct the path format.\n path = path if path.endswith(\".yaml\") else path + \".yaml\"\n\n if verbose:\n print(\"loading parameters from\", path, end='... ')\n # open and load the parameters.\n with open(path, 'r') as file:\n kwargs = yaml.full_load(file)\n if verbose:\n print(\"done\")\n\n # apply the given adjustments if needed.\n if adjust is not None and len(adjust) > 0:\n if verbose:\n print(f\"adjusts performed on parameters from {path}\", end='')\n for key in adjust:\n if verbose:\n print(\" -\", key, end='')\n kwargs[key] = adjust[key]\n if verbose:\n print()\n\n if verbose:\n show_args(dict(\n kwargs=kwargs,\n ), prt_name=False)\n\n return kwargs", "def setupParameters(self, **pars):\n \n seldict = {}\n for k,v in pars.items():\n if v != None and v != \"\":\n seldict[k] = v\n \n return seldict", "def load_yaml_params(self, params_file):\n self._update_params(params_file)", "def load_cls_params(self):\n with open('models/Final/linear_svc.p', 'rb') as model_file:\n model = pickle.load(model_file)\n self.svc = model['svc']\n self.X_scaler = model['X_scaler']\n self.parameters = model['parameters']\n\n print(self.parameters)", "def load_parameter_file(filename: str) -> Dict:\n assert isinstance(filename, str) and len(filename) > 0\n param_dict = {}\n # TODO implement search through possible parameter config file locations\n # Open up the CSV file for reaching\n with open(filename) as f:\n csvreader = csv.DictReader(f, delimiter='\\t')\n\n accepted_field_names = {'mechanism': ['mechanism', 'mechanism_id'],\n 'param_name': [\"parameter_name\", \"parameter\", \"param\", \"param_name\"],\n 'part_id': ['part_id', 'part'],\n 'param_val': [\"val\", \"value\", \"param_val\", \"parameter_value\"]\n }\n\n field_names = Parameter._get_field_names(csvreader.fieldnames, accepted_field_names)\n\n if field_names['param_name'] is None:\n warn('No param name column was found, could not load parameter')\n return param_dict\n if field_names['mechanism'] is None:\n no_mechism_column = True\n else:\n no_mechism_column = False\n\n if field_names['part_id'] is None:\n no_part_id_column = True\n else:\n no_part_id_column = False\n\n for row in csvreader:\n # TODO what about integers? float might cause numerical drift in simulations, e.g. cooperativity=2.001\n param_value = float(row[field_names['param_val']])\n # TODO test all these cases!\n if row[field_names['param_name']] is None or len(row[field_names['param_name']]) == 0:\n pass\n elif no_mechism_column and no_part_id_column:\n param_name = row[field_names['param_name']]\n param_dict[param_name] = param_value\n elif no_mechism_column and no_part_id_column is False:\n if row[field_names['part_id']] is not None and len(row[field_names['part_id']]) > 0:\n part_id = row[field_names['part_id']]\n param_name = row[field_names['param_name']]\n param_dict[(part_id, param_name)] = param_value\n else:\n param_name = row[field_names['param_name']]\n param_dict[param_name] = param_value\n elif no_part_id_column and no_mechism_column is False:\n if row[field_names['mechanism']] is not None and len(row[field_names['mechanism']]) > 0:\n mech_name = row[field_names['mechanism']]\n param_name = row[field_names['param_name']]\n param_dict[(mech_name, param_name)] = param_value\n else:\n param_name = row[field_names['param_name']]\n param_dict[param_name] = param_value\n else:\n if row[field_names['part_id']] is not None and len(row[field_names['part_id']]) > 0:\n if row[field_names['mechanism']] is not None and len(row[field_names['mechanism']]) > 0:\n part_id = row[field_names['part_id']]\n mech_name = row[field_names['mechanism']]\n param_name = row[field_names['param_name']]\n param_dict[(mech_name, part_id, param_name)] = param_value\n else:\n part_id = row[field_names['part_id']]\n param_name = row[field_names['param_name']]\n param_dict[(part_id, param_name)] = param_value\n else:\n if row[field_names['mechanism']] is not None and len(row[field_names['mechanism']]) > 0:\n mech_name = row[field_names['mechanism']]\n param_name = row[field_names['param_name']]\n param_dict[(mech_name, param_name)] = param_value\n else:\n param_name = row[field_names['param_name']]\n param_dict[param_name] = param_value\n\n return param_dict", "def load_params(self, params):\n params.cp_latest_filename = \"latest_checkpoint_v\"+params.version\n params.cp_load_latest_filename = \"latest_checkpoint_v\"+params.cp_load_ver\n params.cp_load_dir = params.out_dir + params.cp_load_name+ \"/checkpoints/\"\n if not hasattr(params, \"model_out_dir\"):\n params.model_out_dir = params.out_dir + params.model_name\n params.cp_save_dir = params.model_out_dir + \"/checkpoints/\"\n params.log_dir = params.model_out_dir + \"/logfiles/\"\n params.save_dir = params.model_out_dir + \"/savefiles/\"\n params.disp_dir = params.model_out_dir + \"/vis/\"\n params.num_pixels = int(np.prod(params.data_shape))\n self.params = params\n self.params_loaded = True", "def load_run_params(config_id: str = \"default\", program: str = None) -> dict:\n\n run_params_file = get_default_params_file()\n\n with run_params_file.open() as f:\n params = json.load(f)\n\n if program is None:\n return params[config_id]\n else:\n return params[config_id][program]", "def load_reco_param(source):\n if not (source is None or isinstance(source, (str, Mapping))):\n raise TypeError('`source` must be string, mapping, or None')\n\n if isinstance(source, str):\n orig_dict = from_file(source)\n\n elif isinstance(source, Mapping):\n orig_dict = source\n\n else:\n raise TypeError('Cannot load reco parameterizations from a %s'\n % type(source))\n\n valid_dimensions = ('coszen', 'energy')\n required_keys = ('dist', 'fraction', 'kwargs')\n\n # Build dict of parameterizations (each a callable) per flavintgroup\n reco_params = OrderedDict()\n for flavint_key, dim_dict in orig_dict.items():\n flavintgroup = NuFlavIntGroup(flavint_key)\n reco_params[flavintgroup] = {}\n for dimension in dim_dict.keys():\n dim_dist_list = []\n\n if not isinstance(dimension, str):\n raise TypeError(\"The dimension needs to be given as a string!\"\n \" Allowed: %s.\"%valid_dimensions)\n\n if dimension not in valid_dimensions:\n raise ValueError(\"Dimension '%s' not recognised!\"%dimension)\n\n for dist_dict in dim_dict[dimension]:\n dist_spec_dict = {}\n\n # allow reading in even if kwargs not present - computation of\n # transform will fail because \"loc\" and \"scale\" hard-coded\n # requirement\n for required in required_keys:\n if required not in dist_dict:\n raise ValueError(\"Found distribution property dict \"\n \"without required '%s' key for \"\n \"%s - %s!\"\n %(required, flavintgroup, dimension))\n\n for k in dist_dict.keys():\n if k not in required_keys:\n logging.warning(\n \"Unrecognised key in distribution property dict: '%s'\"%k\n )\n\n dist_spec = dist_dict['dist']\n\n if not isinstance(dist_spec, str):\n raise TypeError(\" The resolution function needs to be\"\n \" given as a string!\")\n\n if not dist_spec:\n raise ValueError(\"Empty string found for resolution\"\n \" function!\")\n\n try:\n dist = getattr(stats, dist_spec.lower())\n except AttributeError:\n try:\n import scipy\n sp_ver_str = scipy.__version__\n except:\n sp_ver_str = \"N/A\"\n raise AttributeError(\"'%s' is not a valid distribution\"\n \" from scipy.stats (your scipy\"\n \" version: '%s').\"\n %(dist_spec.lower(), sp_ver_str))\n logging.debug(\"Found %s - %s resolution function: '%s'\"\n %(flavintgroup, dimension, dist.name))\n\n dist_spec_dict['dist'] = dist\n\n frac = dist_dict['fraction']\n\n if isinstance(frac, str):\n frac_func = eval(frac)\n\n elif callable(frac):\n frac_func = frac\n\n else:\n raise TypeError(\n \"Expected 'fraction' to be either a string\"\n \" that can be interpreted by eval or a callable.\"\n \" Got '%s'.\" % type(frac)\n )\n\n dist_spec_dict['fraction'] = frac_func\n\n kwargs = dist_dict['kwargs']\n\n if not isinstance(kwargs, dict):\n raise TypeError(\n \"'kwargs' must hold a dictionary. Got '%s' instead.\"\n % type(kwargs)\n )\n\n dist_spec_dict['kwargs'] = kwargs\n for kwarg, kwarg_spec in kwargs.items():\n\n if isinstance(kwarg_spec, str):\n kwarg_eval = eval(kwarg_spec)\n\n elif callable(kwarg_spec) or isscalar(kwarg_spec):\n kwarg_eval = kwarg_spec\n\n else:\n raise TypeError(\n \"Expected kwarg '%s' spec to be either a string\"\n \" that can be interpreted by eval, a callable or\"\n \" a scalar. Got '%s'.\" % type(kwarg_spec)\n )\n\n dist_spec_dict['kwargs'][kwarg] = kwarg_eval\n\n dim_dist_list.append(dist_spec_dict)\n\n reco_params[flavintgroup][dimension] = dim_dist_list\n\n return reco_params", "def __init__(self, adict):\n\n self.__dict__.update(adict)\n\n for k, v in adict.items():\n if isinstance(v, dict):\n self.__dict__[k] = ParamObject(v)", "def parseBoardParameters(self, parametersFromRegistry):\n self.boardParams = dict(parametersFromRegistry)\n #for key, val in dict(parametersFromRegistry).items():\n # setattr(self, key, val)", "def set_params(self, **kwargs):\n for key, value in kwargs.items():\n if key in self.params.keys():\n self.params[key] = value\n else:\n raise KeyError", "def load_params_from_pickle_file(session: tf.Session,\n params_filename: Text) -> None:\n with open(params_filename, 'rb') as f:\n params = pickle.load(f)\n for var in tf.trainable_variables():\n session.run(var.assign(params[var.name]))", "def __init_values(self, values):\n for name, value in list(values.items()):\n if name in initializable_parameters:\n setattr(self, name, value)", "def _load(self, load_dict):\n for key in list(load_dict.keys()):\n # We delete keys over time:\n if key in load_dict:\n if SparseResult.IDENTIFIER in key:\n new_key = key.split(SparseResult.IDENTIFIER)[0]\n matrix = SparseParameter._reconstruct_matrix(load_dict[key])\n self._data[new_key] = matrix\n else:\n self._data[key] = load_dict[key]", "def __init__( self, parameters={} ):\n self.params = {}", "def load_state(self, dictionary):\n self.log_formatstr = dictionary['log_formatstr']\n self.backend_interval = dictionary['backend_interval']", "def load_parameters(self):\n with open(INTERNAL_DATA_DIR / self.name_default_params, 'r') as f:\n return yaml.load(f, Loader=yaml.FullLoader)", "def load_params(exe, prog, path, ignore_params=[]):\n if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')):\n raise ValueError(\"Model pretrain path {} does not \"\n \"exists.\".format(path))\n\n logger.info('Loading parameters from {}...'.format(path))\n\n ignore_set = set()\n state = _load_state(path)\n\n # ignore the parameter which mismatch the shape\n # between the model and pretrain weight.\n all_var_shape = {}\n for block in prog.blocks:\n for param in block.all_parameters():\n all_var_shape[param.name] = param.shape\n ignore_set.update([\n name for name, shape in all_var_shape.items()\n if name in state and shape != state[name].shape\n ])\n\n if ignore_params:\n all_var_names = [var.name for var in prog.list_vars()]\n ignore_list = filter(\n lambda var: any([re.match(name, var) for name in ignore_params]),\n all_var_names)\n ignore_set.update(list(ignore_list))\n\n if len(ignore_set) > 0:\n for k in ignore_set:\n if k in state:\n logger.warning('variable {} not used'.format(k))\n del state[k]\n fluid.io.set_program_state(prog, state)", "def from_internal_dict(cls, params):\n options = cls({}) # basic default options\n opt_dict = options.__dict__\n\n for key, val in opt_dict.items():\n options.__dict__[key] = params.get(key, val)\n\n return options", "def from_dict(self, input):\n return self(\n **{k: v for k, v in input.items() if k in signature(self).parameters}\n )", "def loadParams(self):\n\n if len(self.filParams) < 3:\n return\n\n if not os.access(self.filParams, os.R_OK):\n return\n\n print(\"Priors.loadParams INFO: loading priors from %s\" \\\n % (self.filParams))\n\n # This is a little bit painful without just using something\n # more mature like astropy.table or pandas:\n hypers = np.genfromtxt(self.filParams, usecols=(1,2))\n\n # Convert the angular arguments to radians\n hypers[4] = np.radians(hypers[4])\n hypers[5] = np.radians(hypers[5])\n hypers[7] = np.radians(hypers[7])\n\n # transpose into hyperparams\n self.hyper = np.transpose(hypers)\n\n # now we need to read in the function names. This only really\n # has meaning for the mixed prior...\n strNames = np.genfromtxt(self.filParams, usecols=(0), dtype='str')\n self.mixedNames = list(strNames)\n\n # Finally, read in the name of the function\n with open(self.filParams, 'r') as rObj:\n for sLine in rObj:\n if sLine.find('#') < 0:\n continue\n if sLine.find('NAME') < 0:\n continue\n\n vLine = sLine.strip().split()\n self.namePrior = vLine[-1]", "def _setup_params(self,**params):\n ### a parameter might be passed in for one of the extra_pos;\n ### if a key in the params dict is not a *parameter* of this\n ### PO, then try it on the extra_pos\n for n,p in params.items():\n if n not in self.params():\n self.set_parameter_value(n,p)\n del params[n]\n\n Parameterized._setup_params(self,**params)", "def LoadParams(file):\n global globalParams\n global globalSections\n\n # check to see whether the file exists\n try: f = open(file, 'r')\n except IOError:\n fail('ERROR: parameter file does not exist: ', file)\n else:\n f.close()\n\n\n cp = ConfigParser.ConfigParser()\n cp.optionxform = str\n cp.read(file)\n\n globalSections = cp.sections()\n\n for sec in cp.sections():\n\n for opt in cp.options(sec):\n\n value = cp.get(sec, opt)\n \n # check in turn whether this is an interger, float, or string\n if (isInt(value)):\n globalParams[sec + \".\" + opt] = int(value)\n elif (isFloat(value)):\n globalParams[sec + \".\" + opt] = float(value)\n else:\n globalParams[sec + \".\" + opt] = value.strip()", "def restore_model_params(self, in_dict):\n\n self.trained_model_params = in_dict['model_params']", "def load_bestfitparams(self, param_array):\n\n self.params = OrderedDict()\n\n for i, element in enumerate(param_array):\n self.params[self._paramnames[i]] = element\n pass", "def load_from_dict(self, dict_):\n for key, value in six.iteritems(dict_):\n setattr(self, util.as_attr(key), value)\n self._check_against_schema()", "def load_params(namespace: str, env: str) -> dict:\n config = {}\n path = f\"/{namespace}/{env}/\"\n ssm = boto3.client(\"ssm\")\n more = None\n args = {\"Path\": path, \"Recursive\": True, \"WithDecryption\": True}\n while more is not False:\n if more:\n args[\"NextToken\"] = more\n params = ssm.get_parameters_by_path(**args)\n for param in params[\"Parameters\"]:\n key = param[\"Name\"].split(\"/\")[3]\n config[key] = param[\"Value\"]\n more = params.get(\"NextToken\", False)\n return config", "def import_parameters(self, file_name):\n parameters = []\n\n with open(file_name) as in_file:\n parameters = json.load(in_file)\n\n if parameters:\n self.put_parameters(parameters)", "def load_parameters(self, filename=None):\n if not filename:\n filename = os.path.join(self.directory, 'learned_parameters.npy')\n params = numpy.load(filename)\n lasagne.layers.set_all_param_values(self.__network, params)", "def loadDict(self, sd):\n self.setName(sd.get(\"name\", None))\n self.setDriverName(sd.get(\"driverName\", None))\n self.setOptDriverName(sd.get(\"optDriverName\", None))\n self.setAuxDriverName(sd.get(\"auxDriverName\", None))\n self.setRunType(sd.get(\"runType\", None))\n self.setInputNames(sd.get(\"inputNames\", None))\n self.setOutputNames(sd.get(\"outputNames\", None))\n self.setInputTypes(sd.get(\"inputTypes\", None))\n self.setInputMins(sd.get(\"inputMins\", None))\n self.setInputMaxs(sd.get(\"inputMaxs\", None))\n self.inputDists = []\n if \"inputDists\" in sd:\n for distDict in sd[\"inputDists\"]:\n distr = Distribution(Distribution.UNIFORM)\n distr.loadDict(distDict)\n self.inputDists.append(distr)\n\n if not self.inputDists:\n self.inputDists = None\n self.setInputDefaults(sd.get(\"inputDefaults\", None))\n self.setSelectedOutputs(sd.get(\"outputSelections\", None))\n self.setNamesIncludeNodes(sd.get(\"namesIncludeNodes\", None))\n stats = sd.get(\"emulatorOutputStats\", None)\n for i, stat in enumerate(stats):\n self.setEmulatorOutputStatus(i, stat)\n self.setEmulatorTrainingFile(sd.get(\"emulatorTrainingFile\", None))\n self.inputsFlowsheetFixed = sd.get(\"inputsFlowsheetFixed\", None)", "def init_params(self):\n self.clear()\n self._init_load_data()\n self._init_net_delay_data()", "def load_state_dict(\n self,\n state_dict: Mapping[str, Any],\n *args,\n **kwargs,\n ) -> NamedTuple:\n return super().load_state_dict(state_dict, *args)", "def load(self) -> None:\n data = get_dictionary()\n if 'error' in data:\n quit()\n self.data = data", "def _load(mapping, **keys):\n return keys[\"loader\"](mapping, **keys)", "def loadParamsJSON(self, fromFile):\n if not fromFile.exists():\n return\n self.associatedFile = fromFile\n self.associatedFileField.setText(self.associatedFile.getPath())\n self.params = JSONObject()\n try:\n try:\n while (line = br.readLine()) != None:\n pdata.append(line)\n finally:\n br.close()\n self.params = JSONObject(pdata.__str__())\n self.savedParams = self.params.__str__()\n self.setUIfromJSON()\n self.syncJSONtoUI()\n except Exception as e:\n e.printStackTrace()", "async def load(self) -> Dict[str, Dict]:\n raise NotImplementedError()", "def _get_params_from_yaml_dict(self, yd):\n params = {}\n for step in yd.keys():\n params.update({step+'__'+k: v for k, v in yd[step].items()})\n return params", "def _set_init_param_dict(self):\n\n self.param_dict = {}\n\n try:\n suppress_warning = self._suppress_repeated_param_warning\n except AttributeError:\n suppress_warning = False\n msg = (\"\\n\\nThe param_dict key %s appears in more than one component model.\\n\"\n \"This is permissible, but if you are seeing this message you should be sure you \"\n \"understand it.\\nIn particular, double-check that this parameter does not have \"\n \"conflicting meanings across components.\\n\"\n \"\\nIf you do not wish to see this message every time you instantiate, \\n\"\n \"simply attach a _suppress_repeated_param_warning attribute \\n\"\n \"to any of your component models and set this variable to ``True``.\\n\")\n\n for component_model in self.model_dictionary.values():\n\n if not hasattr(component_model, 'param_dict'):\n component_model.param_dict = {}\n intersection = set(self.param_dict) & set(component_model.param_dict)\n if intersection != set():\n for key in intersection:\n if suppress_warning is False:\n warn(msg % key)\n\n for key, value in component_model.param_dict.iteritems():\n self.param_dict[key] = value\n\n self._init_param_dict = copy(self.param_dict)", "def create_parameter_dictionary(parameters: Union[None, Dict],\n parameter_file: Union[None, str, List[str]]) -> Union[None, Dict]:\n # empty call no parameters are loaded\n if parameters is None or parameter_file is None:\n return parameters\n\n assert isinstance(parameters, dict)\n assert isinstance(parameter_file, str) or isinstance(parameter_file, list)\n\n if isinstance(parameter_file, list):\n file_list = parameter_file\n else:\n file_list = [parameter_file]\n\n for file_name in file_list:\n new_parameters = Parameter.load_parameter_file(file_name)\n parameters.update(new_parameters)\n\n return parameters", "def load_state_dict(self, state_dict):\n self.__dict__.update(state_dict)", "def load_state_dict(self, state_dict):\n self.__dict__.update(state_dict)", "def load_state_dict(self, state_dict):\n self.__dict__.update(state_dict)", "def load_workflow_params() -> dict:\n\n try:\n workflow_params_file = upsearch(WORKFLOW_PARAMS_FILENAME)\n except FileNotFoundError:\n message = \"Unable to find .params file; ensure that you are in a workflow directory.\"\n raise FileNotFoundError(message)\n\n with workflow_params_file.open() as f:\n workflow_params = json.load(f)\n\n return workflow_params", "def parameters_dict(self):\n return", "def get_parameters(\n controller_cls: BaseController, name: str, df_loaded: pd.DataFrame, symbol: str\n) -> Dict[str, Any]:\n signature = inspect.signature(controller_cls) # type: ignore\n kwargs: Dict[str, Any] = {}\n\n for param in signature.parameters.values():\n if param.name in (\"ticker\", \"symbol\", \"coin\"):\n kwargs[param.name] = symbol\n elif param.name == \"data\" and name in (\"forecast\", \"qa\"):\n kwargs[\"data\"] = df_loaded\n elif (\n param.default is inspect.Parameter.empty\n and param.kind is not inspect.Parameter.VAR_KEYWORD\n ):\n for param_name, value in param_name_to_value.items():\n if param.name == param_name:\n kwargs[param.name] = value\n break\n if param.name not in kwargs:\n for param_type, value in param_type_to_value.items():\n if isinstance(param_type, tuple):\n if param.annotation in param_type:\n kwargs[param.name] = {symbol: df_loaded}\n break\n elif param.annotation is pd.DataFrame:\n kwargs[param.name] = df_loaded\n break\n elif param.annotation is param_type:\n kwargs[param.name] = value\n break\n\n return kwargs", "def get_params(name, *optionals):\n name = name.lower()\n optionals = [opt.lower() for opt in optionals]\n\n partial = os.path.join(cfg.param_dir, name+'.json')\n full = os.path.join(cfg.param_dir, '_'.join([name]+optionals)+'.json')\n\n param = {}\n if os.path.isfile(partial):\n with open(partial, 'r') as f:\n tf.logging.info('Loading parameters from %s', partial)\n _update(param, json.load(f))\n if os.path.isfile(full):\n with open(full, 'r') as f:\n tf.logging.info('Loading parameters from %s', full)\n _update(param, json.load(f))\n\n if not param:\n tf.logging.info('No parameter file found')\n return param", "def load(self):\n\n args = self.id, self.name\n self.loader.session.logger.debug(\"loading CDR%d (%r)\", *args)\n cursor = self.loader.dictionary_cursor\n cursor.execute(self.DICTIONARY_INSERT, self.entry)\n for alias in self.aliases:\n cursor.execute(self.ALIAS_INSERT, alias)\n self.loader.dictionary_conn.commit()" ]
[ "0.72479516", "0.7205833", "0.69779885", "0.69117165", "0.68091863", "0.67698336", "0.67271155", "0.66972244", "0.66640526", "0.65984803", "0.6589829", "0.65780336", "0.65641016", "0.6548739", "0.6544162", "0.6452717", "0.6443524", "0.642245", "0.6406009", "0.63931835", "0.63562715", "0.6354297", "0.6344183", "0.63150924", "0.6305116", "0.62856203", "0.6274026", "0.6258936", "0.622153", "0.62004805", "0.6180554", "0.6166029", "0.6158588", "0.6146406", "0.61432064", "0.6124302", "0.6075096", "0.60714287", "0.60672414", "0.6067014", "0.60538775", "0.60435826", "0.6036856", "0.6031396", "0.6030815", "0.6022841", "0.6012546", "0.6007524", "0.5992758", "0.5943362", "0.59379345", "0.5934158", "0.5933369", "0.59247863", "0.59193337", "0.5910651", "0.5905304", "0.5882417", "0.5860807", "0.58517295", "0.58512", "0.58449215", "0.58372253", "0.58349735", "0.58308625", "0.5826308", "0.58243", "0.5806433", "0.58060515", "0.57919633", "0.5790565", "0.5773578", "0.57728386", "0.5772493", "0.5772414", "0.5762116", "0.57610714", "0.57304394", "0.5730148", "0.5729236", "0.5722183", "0.57207495", "0.5710721", "0.5709627", "0.5704126", "0.5702642", "0.56974244", "0.56899506", "0.56878054", "0.5675669", "0.5675239", "0.56696874", "0.5661316", "0.5661316", "0.5661316", "0.566112", "0.56603557", "0.5653711", "0.56526667", "0.5650157" ]
0.56534404
98
Construct solver from Caffe solver prototxt file.
def from_caffe_solver_protoxt(cls, caffe_solver_prototxt_file: Path): solver_param = caffe_pb2.SolverParameter() with open(caffe_solver_prototxt_file, 'rt') as f: pb2.text_format.Merge(f.read(), solver_param) dictionary = {'lr_policy': solver_param.lr_policy, 'base_lr': solver_param.base_lr, 'gamma': solver_param.gamma, 'momentum': solver_param.momentum, 'max_iter': solver_param.max_iter, 'stepsize': solver_param.stepsize, 'stepvalues': solver_param.stepvalue, 'weight_decay': solver_param.weight_decay, 'iter_size': solver_param.iter_size, 'from_prototxt': caffe_solver_prototxt_file} return cls(**dictionary)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_solver(self):\n # Create a temporary solver file.\n fname = '__solver__.prototxt'\n f = open(fname, 'w')\n f.write(self.to_proto())\n f.close()\n # Get solver from file.\n solver = caffe.get_solver_from_file(fname)\n # Remove the temporary solver file and return solver.\n os.remove(fname)\n return solver", "def from_CMTSOLUTION_file(self, filename):\n with open(filename, \"rt\") as f:\n f.readline()\n f.readline()\n time_shift = float(f.readline().strip().split()[-1])\n f.readline()\n latitude = float(f.readline().strip().split()[-1])\n longitude = float(f.readline().strip().split()[-1])\n depth_in_m = float(f.readline().strip().split()[-1]) * 1e3\n\n m_rr = float(f.readline().strip().split()[-1]) / 1e7\n m_tt = float(f.readline().strip().split()[-1]) / 1e7\n m_pp = float(f.readline().strip().split()[-1]) / 1e7\n m_rt = float(f.readline().strip().split()[-1]) / 1e7\n m_rp = float(f.readline().strip().split()[-1]) / 1e7\n m_tp = float(f.readline().strip().split()[-1]) / 1e7\n\n return self(latitude, longitude, depth_in_m, m_rr, m_tt, m_pp, m_rt,\n m_rp, m_tp, time_shift)", "def from_file(csp_file_name):\n\n with open(csp_file_name, 'r') as problem_file:\n file_lines = problem_file.readlines()\n variables = {}\n constraints = Constraints()\n largest_value = 0\n # Make a list of variable names.\n for line in file_lines:\n words = line.split()\n next_variable = CSP.__get_variable_from_dictionary(variables, words[0])\n next_relation = Relation.as_function(words[1])\n next_value = words[2]\n if next_value.isdigit():\n next_value = int(next_value)\n if next_value > largest_value:\n largest_value = next_value\n constraints.add_unary_constraint(next_variable, next_relation, next_value)\n else:\n next_value = CSP.__get_variable_from_dictionary(variables, next_value)\n constraints.add_binary_constraint(next_variable, next_relation, next_value)\n # Find d and v.\n d = len(variables)\n v = largest_value\n # Set domains.\n for var in variables.values():\n var.domain = set(xrange(max(d, (v - 1))))\n new_csp = CSP(variables.values(), constraints)\n return new_csp", "def from_file(cls, fn):\n dct = store.get_dict(fn, 'trainalgorithm')\n return cls.from_dict(dct)", "def solve(ctx):\n my_solver(ctx.obj['filename'])", "def load(file_path):\n with open(file_path, \"rb\") as file:\n data = pickle.load(file)\n ocp = OptimalControlProgram(**data[\"ocp_initilializer\"])\n for key in data[\"versions\"].keys():\n if data[\"versions\"][key] != ocp.version[key]:\n raise RuntimeError(\n f\"Version of {key} from file ({data['versions'][key]}) is not the same as the \"\n f\"installed version ({ocp.version[key]})\"\n )\n out = [ocp, data[\"sol\"]]\n if \"sol_iterations\" in data.keys():\n out.append(data[\"sol_iterations\"])\n return out", "def write_solver(workdir, lr='0.0001', lrp='\"fixed\"'): \n solver = bct.CaffeSolver() \n solver.sp['base_lr'] = lr\n solver.sp['test_interval'] = '60000' \n solver.sp['lr_policy'] = lrp\n solver.write(osp.join(workdir, 'solver.prototxt'))", "def from_file(cls, configID, configDir):\n if configDir not in sys.path:\n sys.path.insert(0, configDir)\n configObj = __import__(configID)\n try:\n if configObj.config[\"solver\"] in cvx.installed_solvers():\n return cls(configID, configObj.config)\n else:\n return None\n except: # pragma: no cover\n warn(\"Could not import configuration: \" + configID)\n return None", "def from_file(cls, filename: str) -> \"NDOptimiser\":\n from autode.opt.coordinates.cartesian import CartesianCoordinates\n\n lines = open(filename, \"r\").readlines()\n n_atoms = int(lines[0].split()[0])\n\n title_line = NumericStringDict(lines[1])\n optimiser = cls(\n maxiter=int(title_line[\"maxiter\"]),\n gtol=GradientRMS(title_line[\"gtol\"]),\n etol=PotentialEnergy(title_line[\"etol\"]),\n )\n\n for i in range(0, len(lines), n_atoms + 2):\n raw_coordinates = np.zeros(shape=(n_atoms, 3))\n gradient = np.zeros(shape=(n_atoms, 3))\n\n for j, line in enumerate(lines[i + 2 : i + n_atoms + 2]):\n _, x, y, z, dedx, dedy, dedz = line.split()\n raw_coordinates[j, :] = [float(x), float(y), float(z)]\n gradient[j, :] = [float(dedx), float(dedy), float(dedz)]\n\n coords = CartesianCoordinates(raw_coordinates)\n coords.e = NumericStringDict(lines[i + 1])[\"E\"]\n coords.g = gradient.flatten()\n\n optimiser._history.append(coords)\n\n return optimiser", "def __init__(self, filename, num_particles, max_iteration, maxFlip, maxTabuSize, w, c1, c2):\n #Read cnf formula from file\n self.clauses, self.num_literals, self.num_clauses = self.w_clauses_from_file(filename)\n\n #Parameters of PSO\n self.num_particles = num_particles\n self.max_iteration = max_iteration\n self.w = w\n self.c1 = c1\n self.c2 = c2\n self.max_flip = maxFlip\n\n #Tabu list parameters\n self.tabuList = []\n self.maxTabuSize = maxTabuSize\n\n #Initialize particles\n self.swarm = self.init_particles(self.num_particles, self.num_literals)\n\n #Initialize global best and it's fitness\n self.global_best = self.swarm[0].position\n self.global_best_fitness = self.fitness(self.global_best)", "def __init__(self, filename):\n self.from_file(filename)\n self.parse_cell()\n self.parse_atom()\n self.apply_symops()", "def from_crystfel_file(cls, filename):\n return translate.load_crystfel(cls, filename)", "def from_crystfel_file(cls, filename):\n return translate.load_crystfel(cls, filename)", "def buildModelFromFile(fname):\n directory = os.path.dirname(fname)\n\n f = open(fname, \"r\")\n in_map = yaml.safe_load(f)\n f.close()\n\n expression = \"\"\n\n return build_model_from_dict(in_map)", "def fromfile(self, path):\n\t\tdata = filetools.read_data(path)\n\t\tprint \"File read: %i lines\" % len(data)\n\t\tself.build_matrix(data)", "def buildFromCSV(self, filepath):\r\n\t\t# TODO: Implement\r\n\t\traise NotImplementedError('This function has not yet been implemented.')\r\n\t\t# with open(filepath, 'r') as scheduleFile:\r\n\t\t# \t# Reusing Parser.parseCSVs(), but not in the intended way; ok because validation is not yet implemented\r\n\t\t# \t# TODO: Split Parser.parseCSVs() into separate people/set file parsers \r\n\t\t# \tn, people, setConstraints = Parser.parseCSVs(-1, scheduleFile, [])\r", "def cl_program_from_file(context, filename):\n return cl.Program(context, open(os.path.join(CL_PATH, filename)).read())", "def __init__(self, inputfolder, gpu_id=0, model=None):\n CaffeLoader.__init__(self, inputfolder, gpu_id=gpu_id)\n logger.info('loading deploy.prototxt from %s' % inputfolder)\n if model:\n self.setModel(model)\n if self.caffemodel and self.protofile:\n self.net = caffe.Net(self.protofile, caffe.TEST)\n else:\n logger.error('Cannot find prototxt or caffemodel file')\n sys.exit(0)", "def from_file(cls, file):\n instance = cls()\n with open(file) as f:\n for line in f:\n line = line.strip()\n if len(line) > 0 and not line.startswith('#'):\n instance.parse_and_add_clause(line)\n return instance", "def from_config(param_file, coeff_file, **kwargs):\n with open(coeff_file) as f:\n coeff_lines = f.readlines()\n coeff_lines = [line for line in coeff_lines if not line.startswith(\"#\")]\n element_profile = {}\n ne, nbc = coeff_lines[0].split()\n ne, nbc = int(ne), int(nbc)\n for n in range(ne):\n specie, r, w = coeff_lines[1 + n * (nbc + 1)].split()\n r, w = float(r), float(w)\n element_profile[specie] = {\"r\": r, \"w\": w}\n\n rcut_pattern = re.compile(r\"rcutfac (.*?)\\n\", re.S)\n twojmax_pattern = re.compile(r\"twojmax (\\d*)\\n\", re.S)\n quadratic_pattern = re.compile(r\"quadraticflag (.*?)(?=\\n|$)\", re.S)\n\n with zopen(param_file, \"rt\") as f:\n param_lines = f.read()\n\n rcut = float(rcut_pattern.findall(param_lines)[-1])\n twojmax = int(twojmax_pattern.findall(param_lines)[-1])\n if quadratic_pattern.findall(param_lines):\n quadratic = bool(int(quadratic_pattern.findall(param_lines)[-1]))\n else:\n quadratic = False\n\n describer = BispectrumCoefficients(\n rcutfac=rcut, twojmax=twojmax, element_profile=element_profile, quadratic=quadratic, pot_fit=True\n )\n model = SKLModel(model=LinearRegression(), describer=describer, **kwargs)\n coef = np.array(\n np.concatenate([coeff_lines[(2 + nbc * n + n) : (2 + nbc * (n + 1) + n)] for n in range(ne)]),\n dtype=np.float64,\n )\n model.model.coef_ = coef\n model.model.intercept_ = 0\n return SNAPotential(model=model)", "def init_from_file(self, filepath, batch_settings, effects_log):\n # don't forget to update the module docstring with changes here\n input_template_name = 'cost_factors_energysecurity'\n input_template_version = 0.3\n input_template_columns = {\n 'calendar_year',\n 'dollar_basis',\n 'dollars_per_bbl',\n 'oil_import_reduction_as_percent_of_total_oil_demand_reduction',\n }\n\n df = read_input_file(filepath, effects_log)\n validate_template_version_info(df, input_template_name, input_template_version, effects_log)\n\n # read in the data portion of the input file\n df = read_input_file(filepath, effects_log, skiprows=1)\n validate_template_column_names(filepath, df, input_template_columns, effects_log)\n\n df = df.loc[df['dollar_basis'] != 0, :]\n\n df = batch_settings.ip_deflators.adjust_dollars(batch_settings, df, effects_log, 'dollars_per_bbl')\n\n self._data = df.set_index('calendar_year').to_dict(orient='index')", "def from_file(path, name=None, seq_types=None):\n ext = path.split(\".\")[-1]\n if name is None:\n name = path.split(\"/\")[-1].replace(f\".{ext}\", \"\")\n with open(path, \"r\") as f:\n netlist = f.read()\n if ext == \"v\":\n return verilog_to_circuit(netlist, name, seq_types)\n elif ext == \"bench\":\n return bench_to_circuit(netlist, name)\n else:\n raise ValueError(f\"extension {ext} not supported\")", "def from_file(cls, filename):\n constructor_args = _load_serialized_mesh(filename)\n return cls(*constructor_args)", "def readsol_CBC(self,filename, lp, vs):\n\t\tf = file(filename)\r\n##\t\tfor i in range(len(lp.constraints)): f.readline()\r\n\t\tvalues = {}\r\n\t\tfor v in vs:\r\n\t\t\tvalues[v.name] = 0.0\r\n\t\t\tpass\r\n\t\tfor line in f:\r\n\t\t\tl = line.split()\r\n\t\t\tvalues[l[1]] = float(l[2])\r\n\t\t\tpass\n##\t\tfor v in vs:\r\n##\t\t\tl = f.readline().split()\r\n##\t\t\tvalues[v.name] = float(l[1])\r\n\t\tstatus = LpStatusUndefined # No status info\n\t\treturn status, values", "def preprocess(file: TextIO, args: Optional[List[str]] = None) -> MipsProgram:\n filename = os.path.abspath(file.name)\n memory = Memory()\n\n argv = [filename]\n if args:\n argv.extend(args)\n\n linesofcode: List[SourceLine] = process_file(file)\n\n labels: Dict[str, Label] = {}\n # Collect Preprocessor Directives.\n includes, eqvs, linesofcode = preprocessor_directives(linesofcode)\n\n # Gather .data/.text sections into separate lists\n unprocessed_labels, unprocessed_code = split_to_sections(linesofcode)\n\n # First process all the .data labels so they can be replaced in .text\n data_labels(labels, unprocessed_labels, memory)\n # Second gather the code labels,\n # this also replaces all labels in code with the correct value\n processed_code = code_labels(labels, unprocessed_code)\n\n # Cannot run a program without a main\n if not (\"main\" in labels and labels[\"main\"].location == mipsRE.TEXT_SEC):\n raise MipsException(f\"Cannot locate main label in {filename}\")\n\n registers = Registers()\n load_args(registers, memory, argv)\n\n registers[\"pc\"] = labels[\"main\"].value\n registers[\"$sp\"] = registers[\"$fp\"] = registers[\"$gp\"] = memory.ram[\"stack\"][\"stops\"]\n\n memory.extend_stack(bytes([ord(\"@\")] * Memory.PAGE_SIZE))\n\n return MipsProgram(name=filename, filenames=[filename, *includes], labels=labels, memory=memory, source=processed_code, registers=registers, eqvs=eqvs,)", "def buildSolverModel(self, lp):\n self._extract(lp)\n try:\n # Apply controls, warmstart etc. We do this here rather than in\n # callSolver() so that the caller has a chance to overwrite things\n # either using the `prepare` argument to callSolver() or by\n # explicitly calling\n # self.buildSolverModel()\n # self.callSolver()\n # self.findSolutionValues()\n # This also avoids setting warmstart information passed to the\n # constructor from actualResolve(), which would almost certainly\n # be unintended.\n model = lp.solverModel\n # Apply controls that were passed to the constructor\n for key, name in [\n (\"gapRel\", \"MIPRELSTOP\"),\n (\"timeLimit\", \"MAXTIME\"),\n (\"heurFreq\", \"HEURFREQ\"),\n (\"heurStra\", \"HEURSTRATEGY\"),\n (\"coverCuts\", \"COVERCUTS\"),\n (\"preSolve\", \"PRESOLVE\"),\n ]:\n value = self.optionsDict.get(key, None)\n if value is not None:\n model.setControl(name, value)\n\n # Apply any other controls. These overwrite controls that were\n # passed explicitly into the constructor.\n for option in self.options:\n if isinstance(option, tuple):\n name = optione[0]\n value = option[1]\n else:\n fields = option.split(\"=\", 1)\n if len(fields) != 2:\n raise PulpSolverError(\"Invalid option \" + str(option))\n name = fields[0].strip()\n value = fields[1].strip()\n try:\n model.setControl(name, int(value))\n continue\n except ValueError:\n pass\n try:\n model.setControl(name, float(value))\n continue\n except ValueError:\n pass\n model.setControl(name, value)\n # Setup warmstart information\n if self.optionsDict.get(\"warmStart\", False):\n solval = list()\n colind = list()\n for v in sorted(lp.variables(), key=lambda x: x._xprs[0]):\n if v.value() is not None:\n solval.append(v.value())\n colind.append(v._xprs[0])\n if _ismip(lp) and self.mip:\n # If we have a value for every variable then use\n # loadmipsol(), which requires a dense solution. Otherwise\n # use addmipsol() which allows sparse vectors.\n if len(solval) == model.attributes.cols:\n model.loadmipsol(solval)\n else:\n model.addmipsol(solval, colind, \"warmstart\")\n else:\n model.loadlpsol(solval, None, None, None)\n # Setup message callback if output is requested\n if self.msg:\n\n def message(prob, data, msg, msgtype):\n if msgtype > 0:\n print(msg)\n\n model.addcbmessage(message)\n except (xpress.ModelError, xpress.InterfaceError, xpress.SolverError) as err:\n raise PulpSolverError(str(err))", "def __init__(self, opts: dict, solver_opts: dict):\n self.name = opts.get(\"name\", \"Undefined\") # Name of the problem\n self.gp = opts.get(\"grid_points\") # Number of grid points\n self.nadir_p = opts.get(\"nadir_points\") # Nadir points\n self.eps = opts.get(\"penalty_weight\", 1e-3) # Penalty weight\n self.round = opts.get(\"round_decimals\", 9) # Decimal places to round to\n self.nadir_r = opts.get(\"nadir_ratio\", 1) # Nadir ratio\n self.logdir = opts.get(\"logging_folder\", \"logs\") # Folder to save logs\n self.early_exit = opts.get(\"early_exit\", True) # Whether to enable early exit\n self.bypass = opts.get(\"bypass_coefficient\", True) # Whether to enable bypass coefficient\n self.flag = opts.get(\"flag_array\", True) # Whether to use flag array\n self.cpu_count = opts.get(\"cpu_count\", cpu_count()) # Number of CPUs to use\n self.redivide_work = opts.get(\"redivide_work\", True) # Whether to redivide work\n self.model_fn = opts.get(\"pickle_file\", \"model.p\") # Pickle file name\n self.shared_flag = opts.get(\"shared_flag\", True) # Whether to use shared flag array\n self.output_excel = opts.get(\"output_excel\", True) # Whether to output to Excel\n self.process_logging = opts.get(\"process_logging\", False) # Whether to enable process logging\n self.process_timeout = opts.get(\"process_timeout\", None) # Timeout for processes\n self.solver_name = opts.get(\"solver_name\", \"gurobi\") # Name of solver\n self.solver_io = opts.get(\"solver_io\", \"python\") # IO mode of solver\n\n self.solver_opts = solver_opts # Solver options\n self.solver_opts[\"MIPGap\"] = solver_opts.get(\"MIPGap\", 0.0) # MIP gap\n self.solver_opts[\"NonConvex\"] = solver_opts.get(\"NonConvex\", 2) # Nonconvex setting\n\n # Remove None values from dict when user has overriden them\n for key, value in dict(self.solver_opts).items():\n if value is None or value:\n del self.solver_opts[key]\n\n self.time_created = time.strftime(\"%Y%m%d-%H%M%S\") # Time the options object was created\n self.log_name = self.name + \"_\" + str(self.time_created) # Name of log file", "def fromXmlFile(filename, plant, orderList, simulator, evaluator):\n\t\tfile = open(filename, \"r\")\n\t\tdoc = minidom.parse(file)\n\t\toptimizer = Optimizer.fromXml(doc, plant, orderList, simulator, evaluator)\n\t\tfile.close()\n\t\treturn optimizer", "def main():\n\n clues_file = \"data/part1-clues.txt\"\n parsed_clues_file = \"data/part1-parsedclues.txt\"\n cp = ClueParser()\n\n clues = loadList(clues_file)\n gold_parsed_clues = loadList(parsed_clues_file)\n assert(len(clues) == len(gold_parsed_clues))\n\n cp.train(clues, gold_parsed_clues)\n parsed_clues = cp.parseClues(clues)\n cp.evaluate(parsed_clues, gold_parsed_clues)", "def read(cls, file_name=None, lexclude=[], lonly=[], verbose=False):\n###################################################################\n\n # import\n import numpy as np\n \n # init\n \n vf = Velocity_Field()\n\n # fake 4-letters code generation using hexadecimal\n def __gen_fake_code__(n):\n \n FAKE = []\n for i in np.arange(n):\n fake_code = (\"%4s\" % hex(i).split('x')[-1].replace('L', '')).replace(' ', '0')\n FAKE.append(fake_code.upper())\n \n return(np.array(FAKE))\n\n # reads psvelo file\n\n if verbose:\n print(\"-- Reading GMT psvelo file: %s \" % file_name)\n \n try:\n np_vel = np.array(np.mat(np.genfromtxt(file_name, comments='#')))\n except:\n raise IOError(\"!!! Could not read file: %s\" % file_name)\n \n # empty psvelo file\n if np_vel.size == 0:\n return( vf )\n \n if (np_vel.shape[1] == 8):\n if verbose:\n print(\"-- file %s has 8 columns\" % file_name)\n\n np_vel = np.delete(np_vel, -1, axis=1)\n np_code = np.array(np.mat(np.genfromtxt(file_name, comments='#', usecols=(7), dtype=str))).flatten()\n \n elif (np_vel.shape[1] == 3):\n\n if verbose:\n print(\"-- file %s has 3 columns\" % file_name)\n\n np_vel = np.delete(np_vel, -1, axis=1)\n np_code = np.array(np.mat(np.genfromtxt(file_name, comments='#', usecols=(2)))).flatten()\n\n elif (np_vel.shape[1] not in [3, 8]):\n np_code = __gen_fake_code__(np_vel.shape[0])\n else:\n raise IOError(\"!!! Could not decipher file content: %s\", file_name)\n\n # populates velocity field\n \n from pyacs.lib.gmtpoint import GMT_Point\n\n lgmt_points = []\n\n for i in np.arange(np_vel.shape[0]):\n\n code = np_code[i]\n \n if np_vel.shape[1] >= 7:\n lon, lat, Ve, Vn, SVe, SVn, SVen = np_vel[i, :]\n M = GMT_Point(lon=lon, lat=lat, Ve=Ve, Vn=Vn, SVe=SVe, SVn=SVn, SVen=SVen, code=code)\n else:\n lon, lat = np_vel[i, :]\n M = GMT_Point(lon=lon, lat=lat, code=code)\n\n if verbose:\n M.get_info(display=True)\n \n # tests whether site will be added\n \n if lonly != []:\n if M.code in lonly:\n lgmt_points.append(M)\n \n else:\n if lexclude != []:\n if M.code not in lexclude:\n lgmt_points.append(M)\n else:\n lgmt_points.append(M)\n \n vf.file_name = file_name\n vf.sites = lgmt_points\n \n return vf", "def __init__(self, path):\n with open(path, 'r') as bt:\n self.headers = bt.readline().split(',')\n self.data = []\n for line in bt:\n self.data.append(list(eval(line)))\n self.scores = []\n self.models = {'dtr': DecisionTreeRegressor(),\n 'br': BaggingRegressor(n_jobs=-1),\n 'rfr': RandomForestRegressor(n_jobs=-1),\n }", "def init_from_file(self, filepath, effects_log):\n # don't forget to update the module docstring with changes here\n input_template_name = 'emission_rates_vehicles'\n input_template_version = 0.2\n input_template_columns = {\n 'start_year',\n 'sourcetype_name',\n 'reg_class_id',\n 'market_class_id',\n 'in_use_fuel_id',\n 'rate_name',\n 'equation',\n }\n\n df = read_input_file(filepath, effects_log)\n validate_template_version_info(df, input_template_name, input_template_version, effects_log)\n\n # read in the data portion of the input file\n df = read_input_file(filepath, effects_log, skiprows=1)\n validate_template_column_names(filepath, df, input_template_columns, effects_log)\n\n rate_keys = zip(\n df['start_year'],\n df['sourcetype_name'],\n df['reg_class_id'],\n df['in_use_fuel_id'],\n df['rate_name']\n )\n df.set_index(rate_keys, inplace=True)\n\n self.startyear_min = min(df['start_year'])\n\n self._data = df.to_dict('index')\n\n for rate_key in rate_keys:\n rate_eq = self._data[rate_key]['equation']\n self._data[rate_key].update({'equation': compile(rate_eq, '<string>', 'eval')})", "def build_from_file(path):\n with open(path) as obj:\n raw_file = obj.read()\n file_lines = [line.split(\" \") for line in raw_file.split(\"\\n\")]\n\n vertices = {}\n faces = []\n for number, line in enumerate(file_lines):\n if line[0] == \"v\":\n vertices[number + 1] = tuple(map(float, line[1:]))\n if line[0] == \"f\":\n face = []\n for index in line[1:]:\n face.append(vertices[int(index)])\n face.append(vertices[int(line[1])])\n faces.append(face)\n return Object(points=faces)", "def loadCyclicSolver(statefile):\n fh = open(statefile,'r')\n cys = cPickle.load(fh)\n fh.close()\n return cys", "def load(cls, f, model, ext_unit_dict=None):\n msg = (\n \"Model object must be of type flopy.mfusg.MfUsg\\n\"\n f\"but received type: {type(model)}.\"\n )\n assert isinstance(model, MfUsg), msg\n\n if model.verbose:\n print(\"loading bcf package file...\")\n\n f_obj = get_open_file_object(f, \"r\")\n\n # dataset 0 -- header\n while True:\n line = f_obj.readline()\n if line[0] != \"#\":\n break\n\n # determine problem dimensions\n nlay = model.nlay\n dis = model.get_package(\"DIS\")\n if dis is None:\n dis = model.get_package(\"DISU\")\n njag = dis.njag\n\n # Item 1: ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET - line already read above\n if model.verbose:\n print(\" loading ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET...\")\n text_list = line_parse(line)\n ipakcb, hdry, iwdflg, wetfct, iwetit, ihdwet = (\n int(text_list[0]),\n float(text_list[1]),\n int(text_list[2]),\n float(text_list[3]),\n int(text_list[4]),\n int(text_list[5]),\n )\n\n ikvflag = type_from_iterable(\n text_list, index=6, _type=int, default_val=0\n )\n ikcflag = type_from_iterable(\n text_list, index=7, _type=int, default_val=0\n )\n\n # LAYCON array\n laycon, intercellt = cls._load_laycon(f_obj, model)\n\n # TRPY array\n if model.verbose:\n print(\" loading TRPY...\")\n trpy = Util2d.load(\n f_obj, model, (nlay,), np.float32, \"trpy\", ext_unit_dict\n )\n\n # property data for each layer based on options\n transient = not dis.steady.all()\n anis = any(t != 1 for t in trpy)\n anglex = 0\n if (not model.structured) and anis:\n if model.verbose:\n print(\"loading ANGLEX...\")\n anglex = Util2d.load(\n f_obj, model, (njag,), np.float32, \"anglex\", ext_unit_dict\n )\n\n # hy, kv, storage\n (sf1, tran, hy, vcont, sf2, wetdry, kv) = cls._load_layer_arrays(\n f_obj,\n model,\n nlay,\n ext_unit_dict,\n transient,\n laycon,\n ikvflag,\n ikcflag,\n iwdflg,\n )\n\n # Ksat mfusg\n ksat = 0\n if (not model.structured) and abs(ikcflag == 1):\n if model.verbose:\n print(\" loading ksat (njag)...\")\n ksat = Util2d.load(\n f_obj, model, (njag,), np.float32, \"ksat\", ext_unit_dict\n )\n\n f_obj.close()\n\n # set package unit number\n unitnumber, filenames = get_unitnumber_from_ext_unit_dict(\n model, cls, ext_unit_dict, ipakcb\n )\n\n # create instance of bcf object\n bcf = cls(\n model,\n ipakcb=ipakcb,\n intercellt=intercellt,\n laycon=laycon,\n trpy=trpy,\n hdry=hdry,\n iwdflg=iwdflg,\n wetfct=wetfct,\n iwetit=iwetit,\n ihdwet=ihdwet,\n ikvflag=ikvflag,\n ikcflag=ikcflag,\n tran=tran,\n hy=hy,\n vcont=vcont,\n kv=kv,\n anglex=anglex,\n ksat=ksat,\n sf1=sf1,\n sf2=sf2,\n wetdry=wetdry,\n unitnumber=unitnumber,\n filenames=filenames,\n )\n\n # return bcf object\n return bcf", "def init_from_obj_file(cls, f, scale=1, density=1):\n lines = [line.strip() for line in f.readlines()]\n vertices = []\n indexes = []\n for line in lines:\n if line.startswith(\"v\"): # vertex\n nums = list(map(float, string_to_list(line[2:])))\n vertices.append(scale * np.array(nums[:3]))\n # x.append(nums[0] * scale)\n # y.append(nums[1] * scale)\n # z.append(nums[2] * scale)\n elif line.startswith(\"f\"): # face\n nums = list(map(lambda a: int(a) - 1, string_to_list(line[2:])))\n indexes.append(nums)\n return cls(vertices, indexes, density)", "def __init__(self, yaml_file = 'options_modeling.yaml'):\n\n self.reproj_th = 2.5\n self.min_matched_views = 3\n self.descriptors = {'SIFT': 'sift'} # Descriptor name and module name\n self.mask_suffix = '*_mask.png'\n \n # If there is an options file, it will overwrite the defaults \n if yaml_file is not None:\n self.load(yaml_file)", "def main():\n input_source = \"../input1.txt\"\n # Make list, since the generator has to be used multiple times\n d = list(data_parser(input_source))\n return (solver_1star(d),solver_2star(d))", "def readsol_CLP(self,filename, lp, vs, variablesNames, constraintsNames, objectiveName):\n\t\tvalues = {}\n\n\t\treverseVn = {}\n\t\tfor k,n in variablesNames.iteritems():\n\t\t\treverseVn[n] = k\n\n\t\tfor v in vs:\n\t\t\tvalues[v.name] = 0.0\n\n\t\tstatus = LpStatusOptimal # status is very approximate\n\t\tf = file(filename)\n\t\tfor l in f:\n\t\t\tif len(l)<=2: break\n\t\t\tif l[:2] == \"**\":\n\t\t\t\tstatus = LpStatusInfeasible\n\t\t\t\tl = l[2:]\n\t\t\tl = l.split()\n\t\t\tvn = l[1]\n\t\t\tif vn in reverseVn:\n\t\t\t\tvalues[reverseVn[vn]] = float(l[2])\n\t\treturn status, values", "def from_txt(cls, filename, seq_length):\n contact_dict, energies = cls.get_contacts_from_txt(filename)\n\n # dot bracket list for all structurs\n all_struct_dot_bracket_dict = {}\n\n # iteratre over len and keys of the contact dict\n for struc_indx, struc_key in enumerate(contact_dict.keys()):\n\n # current structure in the contact dict\n current_entry = contact_dict[struc_key]\n\n # dot bracket list for current entry - # dot initialized\n current_entry_DB_list = \".\" * seq_length\n current_entry_DB_list = list(current_entry_DB_list)\n\n # iterate over the pair tuples in the\n for pair in current_entry:\n\n # get indices from tuple\n open_par_indx = int(pair[0])\n clos_par_indx = int(pair[1])\n\n # open par\n current_entry_DB_list[open_par_indx] = \"(\"\n\n # closed par\n current_entry_DB_list[clos_par_indx] = \"(\"\n\n # save to dot bracket dict\n all_struct_dot_bracket_dict[struc_key] = ''.join(current_entry_DB_list)\n\n return cls(subopt_folds=all_struct_dot_bracket_dict, length=seq_length)", "def __init__(self, path = None, keepFiles = 0, mip = 1,\n\t\t\tmsg = 1, cuts = 1, presolve = 1, dual = 1, strong = 5, options = []):\n\t\tLpSolver_CMD.__init__(self, path, keepFiles, mip, msg, options)\n\t\tself.cuts = cuts\n\t\tself.presolve = presolve\n\t\tself.dual = dual\n\t\tself.strong = strong", "def __init__(self, project=None):\n HyppopySolver.__init__(self, project)", "def parse_model(f_name):\n if os.path.isfile(f_name):\n with open(f_name) as f:\n w = [[], [], [], [], []]\n for i, line in enumerate(f):\n for v in line.strip().split(\" \"):\n w[i].append(float(v))\n return np.matrix(w)\n else:\n error(\"parse model - not a file: %s\" % f_name)", "def load(self, filename):\n with open(filename, 'r') as f:\n self.pca.set_params(pickle.load(f))\n self.fit = True", "def from_file(cls,\n weight_path,\n optimizer='AdaGrad',\n learning_rate=0.1,\n num_workers=1):\n return cls(weight_path=weight_path, \\\n optimizer=optimizer, learning_rate=learning_rate, init_mode='file', num_workers=num_workers)", "def __init__(self, filepath, envmap=None):\n if envmap is None:\n envmap = default_envmap\n# envmap = {'py':pytex.OuterBlock, 'pyno':pytex.NoOutBlock, 'pyans':pytex.AnswerBlock, 'pyfig':pytex.FigureBlock, 'pyi':pytex.PyInLineBlock, 'pyq':pytex.QuestionBlock, 'pyine':pytex.PyInLineNoExecuteBlock}\n self.envmap = envmap\n self.envstr = '|'.join(self.envmap.keys())\n self.pat = r'('+self.envstr+'){'\n self.p = re.compile(self.pat)\n self.envmap = envmap\n self.path = filepath\n basepath, filename = os.path.split(self.path)\n self.basepath = basepath\n rawlist = pytex.readfile(self.path)\n self.rawlist = rawlist\n self.lines = copy.copy(self.rawlist)\n self.ind = 0\n self.lhslist = []", "def read_file(self, file_src):\n with open(file_src, \"r\") as fobj:\n grammar = Grammar()\n settings = Settings()\n for line in fobj:\n rhs = None #right-hand-side of a rule\n lhs = None #left-hand-side of a rule\n state = \"lhs\"\n words = line.rstrip().split()\n for word in words:\n if (words.index(word) == 0 and word == \"axiom:\"):\n words.remove(word)\n grammar.axiom = ' '.join(words)\n elif (words.index(word) > 0 and words[0] == \"angle_z:\"):\n settings.angle_z_min = int(words[1])\n settings.angle_z_max = int(words[3])\n elif (words.index(word) > 0 and words[0] == \"angle_y:\"):\n settings.angle_y_min = int(words[1])\n settings.angle_y_max = int(words[3])\n elif (words.index(word) > 0 and words[0] == \"angle_x:\"):\n settings.angle_x_min = int(words[1])\n settings.angle_x_max = int(words[3])\n elif (words.index(word) > 0 and words[0] == \"branch-shortening:\"):\n settings.branch_min = float(words[1])\n settings.branch_max = float(words[3])\n #elif (words.index(word) > 0 and words[0] == \"num_sides:\"):\n #grammar.num_sides = int(words[1])\n elif (words.index(word) > 0 and words[0] == \"base_radius:\"):\n settings.base_radius = float(words[1])\n elif (words.index(word) > 0 and words[0] == \"rules:\"):\n if(state == \"lhs\"):\n lhs = word\n if(lhs not in grammar.variables):\n grammar.variables.add(lhs)\n state = \"rhs\"\n continue\n if(state == \"rhs\" and word != \"->\"):\n rhs = word\n if(\",\" in rhs):\n rhs = rhs.replace(\",\", \"\")\n grammar.rules.add(Rule(lhs,rhs))\n state = \"lhs\"\n elif (words.index(word) > 0 and words[0] == \"generations:\"):\n settings.generations = int(words[1])\n elif (words.index(word) > 0 and words[0] == \"base_length:\"):\n settings.base_length = float(words[1])\n elif (words.index(word) > 0 and words[0] == \"bark_texture:\"):\n settings.bark_path = words[1]\n elif (words.index(word) > 0 and words[0] == \"leaf_texture:\"):\n settings.leaf_path = words[1]\n return [grammar, settings]", "def parseLcalcfile(self, filecontents):\n \n lines = filecontents.split('\\n',6)\n self.coefficient_type = int(lines[0])\n self.quasidegree = int(lines[4])\n lines = self.lcalcfile.split('\\n',8+2*self.quasidegree)\n self.Q_fe = float(lines[5+2*self.quasidegree])\n self.sign = pair2complex(lines[6+2*self.quasidegree])\n\n self.kappa_fe = []\n self.lambda_fe = []\n self.mu_fe = []\n self.nu_fe = []\n\n for i in range(self.quasidegree):\n localdegree = float(lines[5+2*i])\n self.kappa_fe.append(localdegree)\n locallambda = pair2complex(lines[6+2*i])\n self.lambda_fe.append(locallambda)\n if math.fabs(localdegree-0.5)<0.00001:\n self.mu_fe.append(2*locallambda)\n elif math.fabs(localdegree-1)<0.00001:\n self.nu_fe.append(locallambda)\n else:\n self.nu_fe.append(locallambda)\n self.langlands = False\n\n \"\"\" Do poles here later\n \"\"\"\n \n self.degree = int(round(2*sum(self.kappa_fe)))\n\n self.level = int(round(math.pi**float(self.degree) * 4**len(self.nu_fe) * self.Q_fe**2 ))\n # note: math.pi was not compatible with the sage type of degree\n\n self.dirichlet_coefficients = splitcoeff(lines[-1])", "def main():\n dirname = os.path.dirname(__file__)\n input_source = os.path.join(dirname, '..', 'input1.txt')\n # Make list, since the generator has to be used multiple times\n d = data_parser(input_source)\n return (solver_1star(d), solver_2star(d))", "def init_from_file(self):\n self.src.load('start.00') \n self.oe1.load('start.01')\n #self.det.load('start.02')\n print('NOTE: variables loaded from start.00/start.01 files')", "def _create_solver(self):\n # https://petsc.org/release/docs/manualpages/KSP/KSPType.html\n iterative = [\n 'richardson', 'chebyshev', 'cg', 'groppcg', 'pipecg', 'pipecgrr',\n 'cgne', 'nash', 'stcg', 'gltr', 'fcg', 'pipefcg', 'gmres',\n 'pipefgmres', 'fgmres', 'lgmres', 'dgmres', 'pgmres', 'tcqmr',\n 'bcgs', 'ibcgs', 'fbcgs', 'fbcgsr', 'bcgsl', 'pipebcgs', 'cgs',\n 'tfqmr', 'cr', 'pipecr', 'lsqr', 'preonly', 'qcg', 'bicg',\n 'minres', 'symmlq', 'lcd', 'python', 'gcr', 'pipegcr', 'tsirm',\n 'cgls', 'fetidp']\n # https://petsc.org/release/docs/manualpages/PC/PCType.html\n preconditioners = [\n 'none', 'jacobi', 'sor', 'lu', 'shell', 'bjacobi', 'mg',\n 'eisenstat', 'ilu', 'icc', 'asm', 'gasm', 'ksp', 'composite',\n 'redundant', 'spai', 'nn', 'cholesky', 'pbjacobi', 'mat', 'hypre',\n 'parms', 'fieldsplit', 'tfs', 'ml', 'galerkin', 'exotic', 'cp',\n 'bfbt', 'lsc', 'python', 'pfmg', 'syspfmg', 'redistribute', 'svd',\n 'gamg', 'sacusp', 'sacusppoly', 'bicgstabcusp', 'ainvcusp',\n 'chowiluviennacl', 'rowscalingviennacl', 'saviennacl', 'bddc',\n 'kaczmarz', 'telescope']\n direct_lu = ['mumps', 'superlu_dist', 'umfpack', 'klu']\n direct_cholesky = ['mumps', 'cholmod']\n valid_solvers = iterative + direct_lu + direct_cholesky\n\n solver = self.solver_type\n preconditioner = self.preconditioner\n\n if solver not in valid_solvers:\n raise Exception(f\"{solver} solver not availabe, choose another solver\")\n if preconditioner not in preconditioners:\n raise Exception(f\"{preconditioner} not found, choose another preconditioner\")\n\n self.ksp = PETSc.KSP()\n self.ksp.create(PETSc.COMM_WORLD)\n\n if solver in direct_lu:\n self.ksp.getPC().setType('lu')\n self.ksp.getPC().setFactorSolverType(solver)\n self.ksp.setType('preonly')\n elif solver in direct_cholesky:\n self.ksp.getPC().setType('cholesky')\n self.ksp.getPC().setFactorSolverType(solver)\n self.ksp.setType('preonly')\n elif solver in preconditioners:\n self.ksp.getPC().setType(solver)\n self.ksp.setType('preonly')\n elif solver in iterative:\n self.ksp.getPC().setType(preconditioner)\n self.ksp.setType(solver)", "def init_from_file(self,file_name):\n with open(file_name, mode='rb') as file: # b is important -> binary\n file_content = file.read(1)\n x = file_content\n ct = int.from_bytes(x, byteorder='little', signed=False)\n file_content = file.read(ct)\n header = file_content.decode().split(\" \")\n vindex = header.index('-vectortype')\n vectortype = header[vindex + 1]\n\n if vectortype != 'REAL':\n print('Can\\'t initialize real vector store from ',vectortype,' vectors.')\n return\n\n #read in vectors and wrap in RealVectors\n incoming_terms, incoming_vectors = svu.readfile(file_name)\n self.init_from_lists(incoming_terms,incoming_vectors)", "def load(self, name=\"\"):\n\n self.constructed = True\n if name == \"\":\n name = \"/home/unai/Escritorio/MultiNetwork/model/model\"\n\n network_descriptors = {\"Generic\": GenericDescriptor, \"Decoder\": DecoderDescriptor, \"Discrete\": DiscreteDescriptor, \"Convolution\": ConvolutionDescriptor}\n\n if not os.path.isfile(name):\n print(\"Error at loading the model\")\n return None\n\n f = open(name, \"r+\")\n\n lines = f.readlines()\n\n i = 0\n while lines[i] != \"\\n\": # Each component is stored in a line\n ident, n_inp, kind, n_hidden, layers, init, act, cond_rand, taking, producing, depth, reachable, belows = lines[i][:-1].split(\"_\")\n kwargs = {}\n if int(ident[1:]) > self.last_net:\n self.last_net = int(ident[1:])\n\n self.reachable[ident] = reachable.split(\",\")\n self.comps_below[ident] = belows.split(\",\")\n\n if \"onv\" in kind: # Not working right now\n filters, sizes, layers, strides = layers.split(\"*\")\n sizes = sizes.split(\",\")\n s = np.array([[int(sz) for sz in szs.split(\"/\")] for szs in sizes])\n desc = network_descriptors[kind](int(inp), int(outp), int(n_inp), layers.split(\",\"), filters.split(\",\"), [int(x) for x in strides.split(\",\")], s, [int(x) for x in act.split(\",\")], [int(x) for x in init.split(\",\")], kwargs)\n else:\n if len(kwargs) > 0: # Not working right now\n kwargs = kwargs.split(\"-\")\n kwargs[0] = [int(x) for x in kwargs[0].split(\".\") if len(x) > 0]\n kwargs[1] = [int(x) for x in kwargs[1].split(\".\") if len(x) > 0]\n if len(cond_rand) > 0:\n cond_rand = cond_rand.split(\"-\")\n cond_rand[0] = [int(x) for x in cond_rand[0].split(\",\") if len(x) > 0]\n cond_rand[1] = [int(x) for x in cond_rand[1].split(\",\") if len(x) > 0]\n kwargs[\"conds\"] = cond_rand\n desc = network_descriptors[kind](int(taking.split(\",\")[0]), int(producing.split(\",\")[0]), int(n_inp), int(n_hidden), [int(x) for x in layers.split(\",\") if x != \"-1\"], init_functions[[int(x) for x in init.split(\",\") if x != \"-1\"]],\n act_functions[[int(x) for x in act.split(\",\") if x != \"-1\"]], **kwargs)\n\n # print(\"ident\", ident, \"n_inp\", n_inp, \"kind\", kind, \"inp\", inp, \"outp\", outp, \"layers\", layers, \"init\", init, \"act\", act, \"taking\", taking, \"producing\", producing, \"depth\", depth, \"kwargs\", kwargs)\n net = NetworkComp(desc, InOut(size=int(taking.split(\",\")[0]), data_type=taking.split(\",\")[1]), InOut(data_type=producing.split(\",\")[1], size=int(producing.split(\",\")[0])), int(depth))\n\n self.add_net(net, ident)\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Inputs\n\n ident, size, kind, depth = lines[i].split(\"_\")\n\n self.inputs[ident] = ModelComponent(None, InOut(size=int(size), data_type=kind), int(depth))\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Outputs\n\n ident, size, kind, depth, belows = lines[i].split(\"_\")\n\n self.outputs[ident] = ModelComponent(InOut(size=int(size), data_type=kind), None, int(depth))\n self.comps_below[ident] = belows.split(\",\")\n i += 1\n\n i += 1\n\n while i < len(lines): # Connections\n name, inp, outp, kind, size = lines[i].split(\"_\")\n\n if int(name[1:]) > self.last_con:\n self.last_con = int(name[1:])\n\n self.connections[name] = Connection(inp, outp, InOut(kind, int(size)), name)\n i += 1\n self.update_below()", "def load_dimacs_cnf_file(cnf_file):\n file = open(cnf_file, 'r')\n\n tVariables = -1\n tClauses = -1\n clause = []\n variables = []\n\n current_clause = []\n\n for line in file:\n data = line.split()\n\n if len(data) == 0:\n continue\n if data[0] == 'c':\n continue\n if data[0] == 'p':\n tVariables = int(data[2])\n tClauses = int(data[3])\n continue\n if data[0] == '%':\n break\n if tVariables == -1 or tClauses == -1:\n print(\"Error, unexpected data\")\n sys.exit(0)\n\n for var_i in data:\n literal = int(var_i)\n if literal == 0:\n clause.append(current_clause)\n current_clause = []\n continue\n var = literal\n if var < 0:\n var = -var\n if var not in variables:\n variables.append(var)\n current_clause.append(literal)\n\n if tVariables != len(variables):\n print(\"Unexpected number of variables in the problem\")\n print(\"Variables\", tVariables, \"len: \", len(variables))\n print(variables)\n sys.exit(0)\n if tClauses != len(clause):\n print(\"Unexpected number of clauses in the problem\")\n sys.exit(0)\n file.close()\n return [variables, clause]", "def __init__(self, model, **kwargs):\n super(CpoSolver, self).__init__()\n self.agent = None\n self.process_infos = CpoProcessInfos()\n self.cpostr = None\n self.expr_map = None\n self.blackbox_map = None\n self.last_result = None\n self.status = STATUS_IDLE\n self.status_lock = threading.Lock()\n self.listeners = []\n self.callbacks = []\n self.operation = None\n self.abort_supported = False\n self.model_published = False\n self.model_sent = False\n self.callbacks_registered = False\n\n # Build effective context from args\n # OO's version\n # context = config._get_effective_context(**kwargs)\n # context.params = model.merge_with_parameters(context.params)\n ## trying to fix CP#303\n ctx = config._get_effective_context()\n if model.parameters:\n ctx.params.set_other(model.parameters)\n ctx = config._get_effective_context(context=ctx, **kwargs)\n\n # If defined, limit the number of threads\n mxt = ctx.solver.max_threads\n if isinstance(mxt, int):\n # Maximize number of workers\n nbw = ctx.params.Workers\n if (nbw is None) or (nbw > mxt):\n ctx.params.Workers = mxt\n print(\"WARNING: Number of workers has been reduced to \" + str(mxt) + \" to comply with platform limitations.\")\n\n # Save attributes\n self.model = model\n self.context = ctx\n\n # Determine appropriate solver agent\n self.agent = self._get_solver_agent()\n self.abort_supported = self.agent._is_abort_search_supported()\n\n # Add configured default listeners if any\n # Note: calling solver_created() is not required as it is done by add_listener().\n lstnrs = ctx.solver.listeners\n if lstnrs is not None:\n if is_array(lstnrs):\n for lstnr in lstnrs:\n self._add_listener_from_class(lstnr)\n else:\n self._add_listener_from_class(lstnrs)", "def main() -> None:\n with open(f'{os.path.dirname(__file__)}/input.txt', 'r') as input_file:\n for solution in solve(input_file):\n print(solution)", "def main() -> None:\n with open(f'{os.path.dirname(__file__)}/input.txt', 'r') as input_file:\n for solution in solve(input_file):\n print(solution)", "def main() -> None:\n with open(f'{os.path.dirname(__file__)}/input.txt', 'r') as input_file:\n for solution in solve(input_file):\n print(solution)", "def __init__(self,\n weights_path,\n config_path=DEFAULT_CONFIG_PATH):\n self.weights = weights_path\n self.config = process_config(config_path)\n self._load_vocab()\n self.model = CTCModel(self.config)\n self.model.load(self.weights)", "def solve_CBC(self, lp):\n\t\tif not self.executable(self.path[1]):\n\t\t\traise \"PuLP: cannot execute \"+self.path[1]\n\t\tif not self.keepFiles:\n\t\t\tpid = os.getpid()\n\t\t\ttmpLp = os.path.join(self.tmpDir, \"%d-pulp.mps\" % pid)\n\t\t\ttmpSol = os.path.join(self.tmpDir, \"%d-pulp.sol\" % pid)\n\t\telse:\n\t\t\ttmpLp = lp.name+\"-pulp.mps\"\n\t\t\ttmpSol = lp.name+\"-pulp.sol\"\n##\t\tvs, variablesNames, constraintsNames, objectiveName = lp.writeMPS(tmpLp, rename = 1)\r\n\t\tvs = lp.writeMPS(tmpLp, rename = 0)\n\t\tif not self.msg:\n\t\t\tcbc = os.popen(self.path[1]+\" - > /dev/null 2> /dev/null\",\"w\")\n\t\telse:\n\t\t\tcbc = os.popen(self.path[1]+\" -\",\"w\")\n\t\tcbc.write(\"import \"+tmpLp+\"\\n\")\n\t\tif self.presolve:\n\t\t\tcbc.write(\"presolve on\\n\")\n\t\tcbc.write(\"strong %d\\n\" % self.strong)\n\t\tif self.cuts:\n\t\t\tcbc.write(\"gomory on\\n\")\n\t\t\tcbc.write(\"oddhole on\\n\")\n\t\t\tcbc.write(\"knapsack on\\n\")\n\t\t\tcbc.write(\"probing on\\n\")\n\t\tfor option in self.options:\n\t\t\tcbc.write(option+\"\\n\")\n\t\tif lp.sense == LpMinimize:\n\t\t\tcbc.write(\"min\\n\")\n\t\telse:\n\t\t\tcbc.write(\"max\\n\")\n\t\tif self.mip:\n\t\t\tcbc.write(\"branch\\n\")\n\t\telse:\n\t\t\tcbc.write(\"initialSolve\\n\")\n\t\tcbc.write(\"solution \"+tmpSol+\"\\n\")\n\t\tcbc.write(\"quit\\n\")\n\t\tif cbc.close() != None:\n\t\t\traise \"PuLP: Error while trying to execute \"+self.path[1]\n\t\tif not os.path.exists(tmpSol):\n\t\t\traise \"PuLP: Error while executing \"+self.path[1]\n\t\tlp.status, values = self.readsol_CBC(tmpSol, lp, vs)\n\t\tlp.assign(values)\n\t\tif not self.keepFiles:\n\t\t\ttry: os.remove(tmpLp)\n\t\t\texcept: pass\n\t\t\ttry: os.remove(tmpSol)\n\t\t\texcept: pass\n\t\treturn lp.status", "def parse( cls, filename, verbose = False ) :\n if verbose : sys.stdout.write( \"%s.parse(%s)\\n\" % (cls.__name__, filename,) )\n\n infile = os.path.realpath( filename )\n dat = cls( verbose )\n\n with open( infile, \"rU\" ) as inf :\n expt_num = None\n for line in inf :\n if verbose :\n sys.stdout.write( line )\n\n m = dat.version_pat.search( line )\n if m :\n dat.version = m.group( 1 )\n continue\n\n m = dat.expt_pat.search( line )\n if m :\n expt_num = int( m.group( 1 ) )\n par_set = m.group( 2 ).upper()\n\n if not par_set in bmrbmb.topspin.EXPERIMENTS.keys() :\n raise Exception( \"Unknown experiment parameter set: %s\" % (m.group( 2 ),) )\n\n# adapted sweep width HSQC\n#\n if (par_set == \"HSQCETGP\") and (m.group( 3 ) is not None) :\n expt_name = \"2D 1H-13C HSQC SW small\"\n else :\n expt_name = bmrbmb.topspin.EXPERIMENTS[par_set]\n\n dat.data[expt_num] = { \"name\" : expt_name }\n\n# next line should have experiment details\n# 1 or 2D only\n#\n\n m = dat.dim_pat.search( line )\n if m :\n if expt_num is None :\n raise Exception( \"Experiment detail without parameter set\" )\n\n dims = { m.group( 1 ) : { \"nuc\" : m.group( 2 ), \"sw\" : m.group( 3 ) } }\n if m.group( 4 ) is not None :\n dims[m.group( 4 )] = { \"nuc\" : m.group( 5 ), \"sw\" : m.group( 6 ) }\n\n dat.data[expt_num][\"dims\"] = dims\n\n expt_num = None\n\n return dat", "def vaex_vertices_from_plyfile(filename):\n xyz = vertex_dict_from_plyfile(filename)\n return vx.from_dict(xyz)", "def parse_file(self):\n for num, line in enumerate(self._text):\n if \"CRYSTAL STRUCTURE SOLUTION\" in line:\n line = line.strip().strip('+').strip()\n if 'SHELXTL' in line:\n self.version = 'SHELXT ' + line.split()[-1]\n if line.strip().startswith('R1 Rweak Alpha'):\n for n in range(100):\n if not self._text[num + 1 + n]:\n break\n if self._text[num + 1]:\n self.solutions[self._text[num + 1 + n][58:76].strip()] = self._text[num + 1 + n][37:51].strip()", "def initialize(filename='params.yaml'):\n home_path = str(Path.home())\n project_path = 'Documents/SideProjects/sailboatsfactory'\n work_path = 'src/nn-core'\n params_path = join(home_path, join(project_path, work_path))\n yaml_file = join(params_path, filename)\n print(\"Reading parameters from:\", filename)\n with open(yaml_file, 'r') as f:\n my_params = load(f)\n my_params['x_scaler'] = MinMaxScaler(feature_range=(-1, 1))\n my_params['y_scaler'] = MinMaxScaler(feature_range=(-1, 1))\n\n raw = data.read(my_params)\n adjusted = adjust(raw, my_params)\n\n return adjusted, my_params", "def from_file(\n filename: str, *, formatter: Optional[ModelFormatter] = None\n ) -> \"Model\":\n # change the cwd to the the directory containing the file\n filename = os.path.abspath(filename)\n cwd = os.getcwd()\n dir, _ = os.path.split(filename)\n os.chdir(dir)\n\n # parse the file\n with open(filename, \"r\") as file:\n component = Model.from_string(file.read(), formatter=formatter)\n file.close()\n\n # restore the cwd\n os.chdir(cwd)\n\n return component", "def from_file(file_path: str) -> \"Configuration\":\n\n with open(file_path, encoding=\"utf-8\") as config_file:\n return Configuration(cast(Dict[str, Any], toml.load(config_file)))", "def loadfile(filename):\n with open(filename,'r') as fin:\n lines = fin.readlines()\n\n fixnames = []\n freenames = []\n\n config_list = []\n for line in [L.strip() for L in lines if L.strip() != '']:\n if not line[0]=='#':\n if 'Ebase' in line:\n Ebase = float(line.split(':')[1].strip())\n elif 'fixnames' in line.lower():\n fixnames = line.split(':')[1].strip().split()\n elif 'freenames' in line.lower():\n freenames = line.split(':')[1].strip().split()\n elif 'fixed' in line.lower():\n fixed = [float(val) for val in\n line.split(':')[1].strip().split()]\n elif 'free' in line.lower():\n free = [float(val) for val in\n line.split(':')[1].strip().split()]\n else:\n name = ''\n entry = line.split(':')\n if len(entry)==3:\n name = entry[0].strip()\n Eref = float(entry[-2])\n values = [int(val) for val in entry[-1].split()]\n nvector = values[:len(fixed)]\n mvector = values[len(fixed):]\n config_list.append(LGHconfig(nvector=nvector,mvector=mvector,\n Eref=Eref,name=name))\n return LGH(base=Ebase,fixed = fixed, free = free,\n config_list=config_list,\n fixnames=fixnames,\n freenames=freenames)", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def from_file(path, scale):\n from imageio import imread\n imgarr = imread(path)\n s = imgarr.shape\n extx, exty = (s[1] * scale) / 2, (s[0] * scale) / 2\n ux, uy = e.arange(-extx, extx, scale), e.arange(-exty, exty, scale)\n return Convolvable(data=e.flip(imgarr, axis=0).astype(config.precision),\n x=ux, y=uy, has_analytic_ft=False)", "def createLsystemFromFile( filename ):\n\tfp = open(filename, \"r\")\n\tlines = fp.readlines()\n\tfp.close()\n\tlsys = init()\n\tfor line in lines:\n\t\twords = line.split()\n\t\tif words[0] == 'base':\n\t\t\tsetBase(lsys, words[1])\n\t\telif words[0] == 'rule':\n\t\t\taddRule(lsys, words[1:])\n\treturn lsys", "def __init__(self, data_file, target_file, base_vocab='!abcdefghijklmnopqrstuvwqxyz'):\n raw_data = readFileIntoArray(data_file)\n raw_targets = readFileIntoArray(target_file)\n\n \"\"\" Initialize the underlying vocabulary by assigning vectors to letters \"\"\"\n self.base_vocab = base_vocab # Maybe generate this procedurally\n self.vocab = generateVocabVectors(self.base_vocab)\n\n \"\"\" Convert the targets to a vector \"\"\"\n self.targetTranslate = set(raw_targets)\n optDict = dict(zip(self.targetTranslate, range(0, len(self.targetTranslate))))\n self.targets = np.ndarray([len(raw_targets)])\n for i in range(len(raw_targets)):\n self.targets[i] = optDict[raw_targets[i]]\n self.targets = self.targets.astype(np.int32)\n\n \"\"\" Calculate the max vector length \"\"\"\n # (we won't need this once we fix our underlying chainer model)\n self.max_phrase_len = 0\n for phrase in raw_data:\n if (len(phrase) > self.max_phrase_len):\n self.max_phrase_len = len(phrase)\n self.max_vector_len = self.max_phrase_len * len(self.base_vocab)\n\n \"\"\" Convert data to vectors \"\"\"\n k = []\n for phrase in raw_data:\n k.append(stringToVector(phrase, self.vocab, self.max_vector_len))\n self.data = np.asarray(k)\n\n \"\"\" Do not yet initialize the trainer -- we can retrain it later. \"\"\"\n self.trainer = None", "def build_pcfg(self):\n part = 0 # 0 for grammar, 1 for lexicon\n rule = r'(\\d*\\.\\d*)\\ (.*)->(.*)[\\n]*'\n\n with open(self.grammar_txt) as file:\n for line in file:\n if line == 'Grammar\\n':\n continue\n elif line == 'Lexicon\\n':\n part = 1\n else:\n line = [s for s in re.split(rule, line) if s]\n prob, parent, child = line[0], line[1], line[2]\n if part is 0: # Grammar part\n child = tuple(i for i in child.split())\n self.grammar[parent][child] = Decimal(prob)\n else: # Lexicon part\n self.lexicon[parent][child.lower()] = Decimal(prob)\n # print_nested_dict(self.grammar)\n # print_nested_dict(self.lexicon)\n file.close()", "def load_config_from_file(config_file, protobuf):\n if not tf.io.gfile.exists(config_file):\n raise IOError(\"{} does not exist!\".format(config_file))\n with tf.gfile.Open(config_file, \"r\") as reader:\n proto = text_format.Parse(reader.read(), protobuf)\n return proto", "def __init__(self, fileformat='POSCAR', filename=None, \\\n lattice=None, atom_type=None, composition=None, coordinate=None):\n if fileformat == 'POSCAR':\n self.from_POSCAR(filename)\n elif fileformat == 'cif':\n self.from_cif(filename)\n else:\n self.from_dict(lattice, atom_type, composition, coordinate)", "def from_file(cls, file): \n try:\n import dill as pickle\n except ImportError:\n logger.error(\"Cannot import from file, dill not installed\")\n return None\n model = pickle.load(open(file,'rb'))\n if type(model) == GeologicalModel:\n logger.info('GeologicalModel initialised from file')\n return model\n else:\n logger.error('{} does not contain a geological model'.format(file))\n return None", "def __init__(self, prototxt=os.path.join(settings.DEPENDENCIES_PATH, 'face-py-faster-rcnn', 'models', 'face', 'VGG16', 'faster_rcnn_end2end', 'test.prototxt'),\n caffemodel=settings.GPU_FACE_DETECTION_CAFFE_MODEL,\n face_rect_expand_factor=FACE_RECT_EXPAND_FACTOR,\n enable_cuda=settings.CUDA_ENABLED):\n self.is_cuda_enable = enable_cuda\n self.prototxt = prototxt\n self.caffemodel = caffemodel\n self.face_rect_expand_factor = face_rect_expand_factor\n self.net = caffe.Net(self.prototxt, self.caffemodel, caffe.TEST)", "def read_from(self, filename):\n if os.path.exists(filename):\n logger.info(\"Reading parameters from file {0}\".format(filename))\n cl, icoord, ispec, ireg, xori, yori, dx, dy, nx,\\\n ny, valex, snr, varbak = np.loadtxt(filename, comments='#', unpack=True)\n\n self.cl = cl\n self.icoordchange = int(icoord)\n self.ispec = int(ispec)\n self.ireg = int(ireg)\n self.xori = xori\n self.yori = yori\n self.dx = dx\n self.dy = dy\n self.nx = int(nx)\n self.ny = int(ny)\n self.valex = valex\n self.snr = snr\n self.varbak = varbak\n\n # Compute domain limits for later use\n self.xend = self.xori + (self.nx - 1) * self.dx\n self.yend = self.yori + (self.ny - 1) * self.dy\n\n return self\n else:\n logger.error(\"File {0} does not exist\".format(filename))\n raise FileNotFoundError('File does not exist')", "def __init__(self, workdir, encut, struct_path, name=\"relax_bwmn\"): \n potcar_path = \"../pseudos/BWO_Mn_POTCAR\" \n kgrid = [2, 2, 2] \n input_param = DefaultOptimizationParameters(encut) \n relax_calc = SCFCalculation(workdir, pseudo_par=None, kgrid=kgrid, name=\"BWO_Mn_relax\", encut=encut, input_parameters=input_param) \n relax_calc.make_calculation(struct_path, potcar_path=potcar_path)", "def initFromFile(self,file):\n self.source = file\n file_reader = open(file,\"r\")\n self.isInit = True\n lineCounter = 0\n firstLine = None\n SecondLine = None\n ThirdLine = None\n for line in file_reader:\n if(lineCounter == 0):\n firstLine = line.split()\n self.rowsNumber = int(firstLine[0])\n self.columnsNumber = int(firstLine[1])\n self.routerRangeRadius = int(firstLine[2])\n if(lineCounter == 1):\n SecondLine = line.split()\n self.backBoneCosts = int(SecondLine[0])\n Path.backBoneCost = self.backBoneCosts\n self.routerCosts = int(SecondLine[1])\n self.budget = int(SecondLine[2])\n if(lineCounter == 2):\n ThirdLine = line.split()\n self.firstCell = Cell(int(ThirdLine[0]),int(ThirdLine[1]))\n if(lineCounter>2):\n self.map.append([])\n LINE = line\n columnCounter = 0\n for char in LINE:\n temp = Cell(len(self.map)-1,columnCounter,Cell.getCellType(char))\n self.map[len(self.map)-1].append(temp)\n if(temp.cellType == \"FLOOR\"):\n self.notComputeRouter.append(temp)\n columnCounter += 1\n lineCounter +=1\n self.isInit = True", "def loadProblem(file = \"problem.py\", variable = \"problemMatrix\"):\n\n namespace = dict()\n with open(file) as handle:\n exec(handle.read(), namespace)\n return peak.createProblem(namespace[variable])", "def __init__(self, bc_file):\r\n self.bc_file = bc_file\r\n self.beta = []\r\n self.code = []\r\n self.load_bc()", "def office_prepare_solver(parser, args, params):\n parser.parse_known_args(args)\n control.prepare_solver(params)", "def from_text_file(cls, filename):\n raise NotImplementedError()", "def loadFromFile(cls , filename):\n if FortIO.isFortranFile( filename ):\n return EclGrid( filename )\n else:\n return EclGrid.loadFromGrdecl( filename )", "def __init__(self, fileinit, filegoal):\n \n self.board = Board(filegoal)\n self.numbernodes = 0\n Io = IO(fileinit)\n Io.init_reader()\n self.boxes = []\n i = 0\n j = 0\n for line in Io.file :\n for elem in line :\n if elem == \"$\" :\n self.boxes.append(Box(i,j))\n elif elem == \"@\":\n self.char = Char(i,j)\n j+=1\n j=0\n i+=1\n self.direction = [Direction.UP,Direction.DOWN,Direction.LEFT,Direction.RIGHT]\n \n self.goalsize = len(self.board.positionGoal)\n self.listCombi = heuristic.make_combi(self.board.positionGoal,self.goalsize)\n self.mini = 10000\n Problem.__init__(self, State(self.board,self.boxes,self.char,[]))", "def load(self, filepath):\n try:\n ckpt = torch.load(filepath, map_location=self.device)\n except Exception as e:\n print('Could not load file: {}'.format(e))\n sys.exit()\n try:\n self.load_state_dict(ckpt['ae'])\n except Exception as e:\n print('Could not load model state dict: {}'.format(e))\n try:\n self.optimizer.load_state_dict(ckpt['optimizer'])\n except Exception as e:\n print('Could not load optimizer state dict: {}'.format(e))", "def __init__(self, filepath, envmap=None):\n if envmap is None:\n #print('using default_envmap:'+str(default_envmap.keys()))\n envmap = default_envmap\n self.envmap = envmap\n self.envstr = '|'.join(self.envmap.keys())\n self.pat = r'^\\\\('+self.envstr+'){'\n self.p = re.compile(self.pat)\n self.envmap = envmap\n self.path = filepath\n basepath, filename = os.path.split(self.path)\n self.basepath = basepath\n rawlist = pytex.readfile(self.path)\n self.rawlist = rawlist\n self.lines = copy.copy(self.rawlist)\n self.ind = 0\n self.lhslist = []", "def read_vasp(file_path):\r\n with open(file_path, 'r') as f:\r\n return eval(f.read())", "def office_setup_solver(parser, args, params):\n parser.parse_known_args(args)\n control.setup_solver(params)", "def __init__(self, domainfile, problemfile):\n # domain\n inp = FileStream(domainfile)\n lexer = pddlLexer(inp)\n stream = CommonTokenStream(lexer)\n parser = pddlParser(stream)\n tree = parser.domain()\n self.domain = DomainListener()\n walker = ParseTreeWalker()\n walker.walk(self.domain, tree)\n # problem\n inp = FileStream(problemfile)\n lexer = pddlLexer(inp)\n stream = CommonTokenStream(lexer)\n parser = pddlParser(stream)\n tree = parser.problem()\n self.problem = ProblemListener()\n walker = ParseTreeWalker()\n walker.walk(self.problem, tree)\n # variable ground space for each operator.\n # a dict where keys are op names and values\n # a dict where keys are var names and values\n # a list of possible symbols.\n self.vargroundspace = {}", "def build(self, name='d4pg'):\n program = lp.Program(name=name)\n\n with program.group('replay'):\n replay = program.add_node(lp.ReverbNode(self.replay))\n\n with program.group('counter'):\n counter = program.add_node(lp.CourierNode(self.counter))\n\n if self._max_actor_steps:\n with program.group('coordinator'):\n _ = program.add_node(lp.CourierNode(self.coordinator, counter))\n\n with program.group('learner'):\n learner = program.add_node(lp.CourierNode(self.learner, replay, counter))\n\n with program.group('evaluator'):\n program.add_node(lp.CourierNode(self.evaluator, learner, counter))\n\n if not self._num_caches:\n # Use our learner as a single variable source.\n sources = [learner]\n else:\n with program.group('cacher'):\n # Create a set of learner caches.\n sources = []\n for _ in range(self._num_caches):\n cacher = program.add_node(\n lp.CacherNode(\n learner, refresh_interval_ms=2000, stale_after_ms=4000))\n sources.append(cacher)\n\n with program.group('actor'):\n # Add actors which pull round-robin from our variable sources.\n for actor_id in range(self._num_actors):\n source = sources[actor_id % len(sources)]\n program.add_node(lp.CourierNode(self.actor, replay, source, counter))\n\n return program", "def from_file(cls, name: str, mod_path: List[str] = [\".\"],\n description: str = None) -> \"DataModel\":\n with open(name, encoding=\"utf-8\") as infile:\n yltxt = infile.read()\n return cls(yltxt, mod_path, description)", "def setup(self, solver_name: str, solver_options: Dict = {}):\n # Setup problem\n x = self.opt.decision_variables.vec()\n p = self.opt.parameters.vec()\n\n problem = {\n \"x\": x,\n \"p\": p,\n \"f\": self.opt.f(x, p),\n }\n\n # Setup constraints\n\n ## Lower bound on constraints.\n self._lbg = None\n\n ## Upper bound on constraints\n self._ubg = None\n\n if self.opt_type in CONSTRAINED_OPT:\n problem[\"g\"] = self.opt.v(x, p)\n self._lbg = self.opt.lbv\n self._ubg = self.opt.ubv\n\n # Get solver interface\n if (solver_name in self.qp_solvers) and not self.opt.has_discrete_variables():\n sol = cs.qpsol\n elif (solver_name in self.nlp_solvers) or (solver_name in self.mi_solvers):\n sol = cs.nlpsol\n else:\n raise ValueError(\n f\"solver '{solver_name}' does not support this problem type\"\n )\n\n # Check for discrete variables\n if self.opt.has_discrete_variables():\n solver_options[\"discrete\"] = self.opt.decision_variables.discrete()\n\n # Initialize solver\n\n ## Instance of the CasADi solver.\n self._solver = sol(\"solver\", solver_name, problem, solver_options)\n\n return self", "def _load_parser_file(self, filename: str, protocol: Protocol):\n with open(filename) as fp:\n grammar = fp.read()\n self._load_parser(grammar, protocol)", "def load(cls):\n \n # Loop through problems and build patient problem lists:\n probs = csv.reader(file(PROBLEMS_FILE,'U'),dialect='excel-tab')\n header = probs.next() \n for prob in probs:\n cls(dict(zip(header,prob))) # Create a problem instance ", "def parse_from_file (path):\n with open(path) as f:\n return NFFG.parse(f.read())", "def initialize_cplex_problem(model, num_threads=1, lpmethod=0, adv=2):\n # type: (compass.models.MetabolicModel)\n\n # Create the Problem first\n # Easier to modify existing problem and re-solve\n problem = cplex.Cplex()\n problem.set_log_stream(None) # Suppress output\n problem.set_error_stream(None) # Suppress errors\n problem.set_warning_stream(None) # Suppress Warnings\n problem.set_results_stream(None) # Suppress results to output\n\n # Set Parameters for the Cplex solver\n problem.parameters.emphasis.numerical.set(True)\n problem.parameters.threads.set(num_threads)\n problem.parameters.preprocessing.reduce.set(3) #Turning on primal and dual preprocessing also enables some reoptimization features\n problem.parameters.advance.set(adv) #Will presolve advanced basis again\n problem.parameters.barrier.convergetol.set(1e-12) #default is 1e-8, minimum is 1e-12.\n problem.parameters.simplex.tolerances.optimality.set(1e-9) #default 1e-6, minimum is 1e-9\n problem.parameters.lpmethod.set(lpmethod) #default lets CPLEX choose the method\n\n # Add variables\n reactions = list(model.reactions.values())\n problem.variables.add(\n names=[x.id for x in reactions],\n ub=[x.upper_bound for x in reactions],\n lb=[x.lower_bound for x in reactions],)\n\n # Add constraints\n\n # Add stoichiometry constraints\n c_lin_expr, c_senses, c_rhs, c_names = (\n utils.get_steadystate_constraints(model))\n\n problem.linear_constraints.add(\n lin_expr=c_lin_expr,\n senses=c_senses,\n rhs=c_rhs,\n names=c_names)\n\n # Initialize the objective\n utils.reset_objective(problem)\n\n return problem", "def solve_CLP(self, lp):\n\t\tif not self.executable(self.path[0]):\n\t\t\traise \"PuLP: cannot execute \"+self.path[0]\n\t\tif not self.keepFiles:\n\t\t\tpid = os.getpid()\n\t\t\ttmpLp = os.path.join(self.tmpDir, \"%d-pulp.mps\" % pid)\n\t\t\ttmpSol = os.path.join(self.tmpDir, \"%d-pulp.sol\" % pid)\n\t\telse:\n\t\t\ttmpLp = lp.name+\"-pulp.mps\"\n\t\t\ttmpSol = lp.name+\"-pulp.sol\"\n\t\tvs, variablesNames, constraintsNames, objectiveName = lp.writeMPS(tmpLp, rename = 1)\n\t\tif not self.msg:\n\t\t\tclp = os.popen(self.path[0]+\" - > /dev/null 2> /dev/null\",\"w\")\n\t\telse:\n\t\t\tclp = os.popen(self.path[0]+\" -\",\"w\")\n\t\tclp.write(\"import \"+tmpLp+\"\\n\")\n\t\tif self.presolve:\n\t\t\tclp.write(\"presolve on\\n\")\n\t\tfor option in self.options:\n\t\t\tclp.write(option+\"\\n\")\n\t\tif lp.sense == LpMinimize:\n\t\t\tclp.write(\"min\\n\")\n\t\telse:\n\t\t\tclp.write(\"max\\n\")\n\t\tif self.dual:\n\t\t\tclp.write(\"dualS\\n\")\n\t\telse:\n\t\t\tclp.write(\"primalS\\n\")\n\t\tclp.write(\"solution \"+tmpSol+\"\\n\")\n\t\tclp.write(\"quit\\n\")\n\t\tif clp.close() != None:\n\t\t\traise \"PuLP: Error while trying to execute \"+self.path[0]\n\t\tif not os.path.exists(tmpSol):\n\t\t\traise \"PuLP: Error while executing \"+self.path[0]\n\t\tlp.status, values = self.readsol_CLP(tmpSol, lp, vs, variablesNames, constraintsNames, objectiveName)\n\t\tlp.assign(values)\n\t\tif not self.keepFiles:\n\t\t\ttry: os.remove(tmpLp)\n\t\t\texcept: pass\n\t\t\ttry: os.remove(tmpSol)\n\t\t\texcept: pass\n\t\treturn lp.status", "def from_cheetah_file(cls, filename):\n return translate.load_cheetah(cls, filename)", "def readsol(self,filename):\n\t\tf = file(filename)\n\t\tfor i in range(3): f.readline()\n\t\tstatusString = f.readline()[18:30]\n\t\tcplexStatus = {\n\t\t\t\"OPTIMAL SOLN\":LpStatusOptimal,\n\t\t\t}\n\t\tif statusString not in cplexStatus:\n\t\t\traise ValueError, \"Unknow status returned by CPLEX: \"+statusString\n\t\tstatus = cplexStatus[statusString]\n\n\t\twhile 1:\n\t\t\tl = f.readline()\n\t\t\tif l[:10] == \" SECTION 2\": break\n\t\t\n\t\tfor i in range(3): f.readline()\n\t\tvalues = {}\n\t\twhile 1:\n\t\t\tl = f.readline()\n\t\t\tif l == \"\": break\n\t\t\tline = l[3:].split()\n\t\t\tif len(line):\n\t\t\t\tname = line[1]\n\t\t\t\tvalue = float(line[3])\n\t\t\t\tvalues[name] = value\n\n\t\treturn status, values" ]
[ "0.6062727", "0.5984447", "0.5803342", "0.57631385", "0.55716205", "0.5436299", "0.54261035", "0.53917223", "0.532022", "0.52781713", "0.52740806", "0.52685714", "0.52685714", "0.5249456", "0.5244701", "0.52160436", "0.51900196", "0.5182835", "0.5182635", "0.5168049", "0.5160513", "0.51493496", "0.514495", "0.5131988", "0.51242775", "0.5122958", "0.51137614", "0.51133955", "0.5113283", "0.5103981", "0.5103256", "0.510163", "0.50969976", "0.5087489", "0.50800294", "0.5069479", "0.506745", "0.50505507", "0.5046791", "0.50416607", "0.50352997", "0.5033535", "0.50269467", "0.50115716", "0.50077224", "0.5002015", "0.49760488", "0.4972094", "0.4963412", "0.49631453", "0.49513304", "0.49482772", "0.49479678", "0.4946134", "0.49457878", "0.49453148", "0.49453148", "0.49453148", "0.49419478", "0.4919267", "0.49091798", "0.48962396", "0.4891688", "0.48764768", "0.48720998", "0.48705843", "0.4870477", "0.48675734", "0.48642942", "0.48598188", "0.48578387", "0.48566794", "0.4855073", "0.48534304", "0.48485622", "0.48427394", "0.4841107", "0.48389745", "0.48383415", "0.48296756", "0.48292655", "0.48265147", "0.48256487", "0.4825596", "0.4825485", "0.48157325", "0.48134848", "0.48094276", "0.48085755", "0.4801595", "0.47964713", "0.47957614", "0.47935432", "0.4792779", "0.47874776", "0.47870457", "0.4784211", "0.47781104", "0.47769904", "0.47752598" ]
0.78207666
0
Refreshes the Job's details by querying the workspace.
def refresh(self): self.details = self.workspace.get_job(self.id).details
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh(self): # noqa\n data = self.connection.hgetall(self.key)\n if not data:\n raise NoSuchJobError('No such job: {0}'.format(self.key))\n self.restore(data)", "def reload(self):\n self.job_proto = self.serving_stub.GetJob(GetJobRequest(job=self.job_proto)).job", "def refresh_details(self) -> None:\n data = request(\n 'get',\n f'/api/v0/projects/{self.id}/',\n ).json()\n self.data.update(data)", "def refresh(self):\n\t\tif self.id is None:\n\t\t\tprint(\"({cls}): self.id is None, can't refresh\".format(cls=self.__class__.__name__))\n\t\t\treturn\n\n\t\tif self.needs_project_id and self.project_id is None:\n\t\t\tprint(\"({cls}): self.project_id is None, can't refresh\".format(cls=self.__class__.__name__))\n\t\t\treturn\n\n\t\tif self.needs_project_id:\n\t\t\targs = [self.project_id, self.id]\n\t\telse:\n\t\t\targs = [self.id]\n\n\t\tres = getattr(self._client, \"get_\" + self.method)(*args, raw=True)\n\t\tself._create_fields(res)", "def refresh_jobs(self):\n\n jobs = self.backend.get_jobs()\n\n if not isinstance(jobs, list):\n warning(self.iface, \"Error loading Jobs from the backend (Response status code not 200)\")\n jobs = []\n\n if not self.jobs_changed(jobs):\n return\n\n self.init_jobs()\n self.jobsTableWidget.setSortingEnabled(False)\n self.jobsTableWidget.setRowCount(len(jobs))\n row = 0\n self.jobs_table = {}\n for job in jobs:\n\n if job.updated:\n str_date = job.updated.strftime(\"%Y-%m-%d_%H-%M-%S\")\n qitem = QTableWidgetItem(str_date)\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n elif job.created:\n str_date = job.created.strftime(\"%Y-%m-%d_%H-%M-%S\")\n qitem = QTableWidgetItem(str_date)\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n\n if not job.title:\n qitem = QTableWidgetItem(\"Untitled Job!\")\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 0, qitem)\n else:\n qitem = QTableWidgetItem(job.title)\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 0, qitem)\n\n exec_btn = QPushButton(self.jobsTableWidget)\n exec_btn.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/execute_icon.svg')))\n\n if job.status:\n qitem = QTableWidgetItem(job.status)\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 2, qitem)\n\n if job.status == \"finished\":\n self.jobsTableWidget.item(row, 2).setBackground(QColor(75, 254, 40, 160))\n disp_btn = QPushButton(self.jobsTableWidget)\n disp_btn.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/display_icon.svg')))\n disp_btn.setIconSize(QSize(29, 29))\n self.jobsTableWidget.setCellWidget(row, 4, disp_btn)\n disp_btn.clicked.connect(lambda *args, job_id=job.id: self.job_display(job_id))\n disp_btn = QPushButton(self.jobsTableWidget)\n disp_btn.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/download.png')))\n disp_btn.setIconSize(QSize(29, 29))\n disp_btn.clicked.connect(lambda *args, job_id=job.id: self.job_download(job_id))\n self.jobsTableWidget.setCellWidget(row, 5, disp_btn)\n iface.actionZoomIn().trigger()\n elif job.status == \"running\":\n self.jobsTableWidget.item(row, 2).setBackground(QColor(254, 178, 76, 200))\n exec_btn.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/stop-button.png')))\n elif job.status == \"canceled\":\n self.jobsTableWidget.item(row, 2).setBackground(QColor(254, 178, 76, 200))\n elif job.status == \"error\":\n self.jobsTableWidget.item(row, 2).setBackground(QColor(254, 100, 100, 200))\n\n exec_btn.setIconSize(QSize(21, 21))\n self.jobsTableWidget.setCellWidget(row, 3, exec_btn)\n\n if job.status == \"running\":\n exec_btn.clicked.connect(lambda *args, job_id=job.id: self.job_stop(job_id))\n else:\n exec_btn.clicked.connect(lambda *args, job_id=job.id: self.job_execute(job_id))\n\n info_btn2 = QPushButton(self.jobsTableWidget)\n info_btn2.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/edit_icon.png')))\n info_btn2.setIconSize(QSize(25, 25))\n self.jobsTableWidget.setCellWidget(row, 6, info_btn2)\n info_btn2.clicked.connect(lambda *args, job_id=job.id: self.adapt_job(job_id))\n\n info_btn3 = QPushButton(self.jobsTableWidget)\n info_btn3.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/info_icon.png')))\n info_btn3.setIconSize(QSize(25, 25))\n self.jobsTableWidget.setCellWidget(row, 7, info_btn3)\n info_btn3.clicked.connect(lambda *args, job_id=job.id: self.job_info(job_id))\n\n info_btn4 = QPushButton(self.jobsTableWidget)\n info_btn4.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/deleteFinalBtn.png')))\n info_btn4.setIconSize(QSize(25, 25))\n self.jobsTableWidget.setCellWidget(row, 8, info_btn4)\n info_btn4.clicked.connect(lambda *args, job_id=job.id: self.delete_job_final(job_id))\n\n self.refreshButton.setEnabled(True)\n self.refreshButton_service.setEnabled(True)\n\n self.jobs_table[row] = job\n\n row += 1\n\n self.jobsTableWidget.setSortingEnabled(True)", "def on_job_update(_job):\n nonlocal job\n job = _job", "def on_job_update(_job):\n nonlocal job\n job = _job", "def on_job_update(_job):\n nonlocal job\n job = _job", "def on_job_update(_job):\n nonlocal job\n job = _job", "def update(self) -> None:\n self.previous_status = self.status\n\n jobs = self._client.describe_jobs(jobs = [ self.id ])[\"jobs\"]\n\n try:\n self.state = jobs[0]\n except IndexError:\n raise ValueError(\"Invalid or unknown job id %s\" % self.id) from None", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n aq_data.add_aq_to_db()\n DB.session.commit()\n return 'Data refreshed!'", "def resubmit(self):\n self.keep_data = True\n ManagedJob.submit(self)", "def reload_job(self):\n if self.ui['main_window'].widgets['live_preview'].get_active():\n self._update_preview()", "def refresh_jobs(self):\n jobs = self.connection.user_jobs()\n\n self.init_jobs()\n self.jobsTableWidget.setRowCount(len(jobs))\n row = 0\n for val in jobs:\n\n if \"id\" in val:\n qitem = QTableWidgetItem(val[\"id\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 0, qitem)\n\n if \"error\" in val:\n if val[\"error\"]:\n if \"message\" in val[\"error\"]:\n qitem = QTableWidgetItem(val[\"error\"][\"message\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n elif \"description\" in val:\n qitem = QTableWidgetItem(val[\"description\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n\n if \"submitted\" in val:\n qitem = QTableWidgetItem(val[\"submitted\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 2, qitem)\n\n execBtn = QPushButton(self.jobsTableWidget)\n execBtn.setText('Execute')\n\n if \"status\" in val:\n qitem = QTableWidgetItem(val[\"status\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 3, qitem)\n\n if val[\"status\"] == \"finished\":\n dispBtn = QPushButton(self.jobsTableWidget)\n dispBtn.setText('Display')\n self.jobsTableWidget.setCellWidget(row, 5, dispBtn)\n dispBtn.clicked.connect(lambda *args, row=row: self.job_display(row))\n\n self.jobsTableWidget.setCellWidget(row, 4, execBtn)\n execBtn.clicked.connect(lambda *args, row=row: self.job_execute(row))\n\n row += 1", "def fetchJob(self):\n \n mpDlg = MultipleValDialog(title='Get Job',\n initialvalues=('','my job1'),\n labels=('ID','Your label',),\n types=('string','string'),\n parent=self.mainwin)\n if mpDlg.result == True:\n jobid = mpDlg.results[0]\n name = mpDlg.results[1]\n else:\n return\n job = PEATSA.WebApp.Data.Job(jobid, self.connection) \n if job != None: \n print 'adding job id %s to list' %job.identification\n self.storeJob(name, job)\n self.updateJobs()\n return", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh_history(self):\n\n self.old_jobs = self.secretary_bot.history_bullshit_filter(self.old_jobs)\n self.jobs_save(self.old_jobs, 'overwrite')", "async def refresh(ctx):\n await update_tournament_list()\n res = await refresh_algorithm()\n if res == True:\n await ctx.send(\"Successfully refreshed data from sheet.\")\n else:\n await ctx.send(\":warning: Unsuccessfully refreshed data from sheet.\")", "def update(self):\n self._log.debug(\"About to update job {0}\".format(self.id))\n resp = self._api.get_job(self.id)\n\n if resp.success:\n self.submission = self._format_submission(resp.result)\n return True\n\n else:\n raise resp.result", "def refresh(self):\n connection = self._connection\n with self._refresh_lock:\n self._aiexperiment = connection.aiexperiments(self.id).fetch()", "def execute_queries():\n fetch_job_listings(engine)\n update_job_listing(engine)", "def refresh(self):\n self.dto = self.res.get()\n log.debug(f\"Refreshed {self.url}\")", "def _refresh(self):\n url = self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)\n resp = self._cb.get_object(url)\n self._info = resp\n self._last_refresh_time = time.time()\n return True", "def refresh(self):\n # exists state\n self.shoprefobj = self.sc.get_shopref_obj({'Alias': self.Alias})\n self.exists = self.sc.exists(self.shoprefobj)\n\n if not self.exists:\n raise ShopDisappearedError(\"Could not find the shop anymore!\")\n\n # data from the server\n self.infoshopobj = self.sc.get_infoshop_obj({'Alias': self.Alias})\n self.shopinfo = self.sc.get_info(self.infoshopobj)\n\n self._from_dict(self.shopinfo)", "def RefreshReport(self):\r\n report = self.data.getRefreshReport()\r\n if report: showInfo(self,report,self.data.title)", "def refresh(self):\n pass", "def refresh(self):\n pass", "def refresh(self):\n\n self._refreshed_on = time.time() * 1000", "def refresh(self):\n self.__refresh()", "def refresh(self): \n return self._config.refreshObj(self)", "def RefreshWorkspace(self, request, global_params=None):\n config = self.GetMethodConfig('RefreshWorkspace')\n return self._RunMethod(\n config, request, global_params=global_params)", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def refresh_queue(self):\n state = self.get_state()\n return state.refresh_queue()", "def __clear_jobs(self):\n namespace = self._config.cluster_config.namespace\n self.__logger.info(f'Clearing old jobs in current namespace: {namespace}')\n\n for job in self.__client.get(namespace=self._config.cluster_config.namespace)['items']:\n job_name = job['metadata']['name']\n self.__logger.info(f'Deleting: {job_name}')\n try:\n self.__client.custom_api.delete_namespaced_custom_object(\n PYTORCHJOB_GROUP,\n PYTORCHJOB_VERSION,\n namespace,\n PYTORCHJOB_PLURAL,\n job_name)\n except Exception as e:\n self.__logger.warning(f'Could not delete: {job_name}')\n print(e)", "def refresh(self):\n raise NotImplementedError", "def refresh(self):\n raise NotImplementedError", "def _refresh(self):\n resp = self._cb.get_object(self._build_api_request_uri())\n self._info = resp\n self._last_refresh_time = time.time()\n return True", "def _refresh(self):\n resp = self._cb.get_object(self._build_api_request_uri())\n self._info = resp\n self._last_refresh_time = time.time()\n return True", "def updateJobData(self, jobName):\n self.jobRow.setText(jobName)\n self.updateSelectedLayer()", "def describe_job(self):\n # GET /jobs/{job_id}\n pass", "def job(username, root_wf_id, wf_id, job_id, job_instance_id):\n dashboard = Dashboard(g.master_db_url, root_wf_id, wf_id)\n job = dashboard.get_job_information(wf_id, job_id, job_instance_id)\n job_states = dashboard.get_job_states(wf_id, job_id, job_instance_id)\n job_instances = dashboard.get_job_instances(wf_id, job_id)\n\n previous = None\n\n for state in job_states:\n timestamp = state.timestamp\n state.timestamp = datetime.fromtimestamp(state.timestamp).strftime('%a %b %d, %Y %I:%M:%S %p')\n\n if previous is None:\n state.interval = 0.0\n else:\n state.interval = timestamp - previous\n\n previous = timestamp\n\n if not job:\n return 'Bad Request', 400\n\n return render_template('workflow/job/job_details.html', root_wf_id=root_wf_id, wf_id=wf_id, job_id=job_id, job=job,\n job_instances=job_instances, job_states=job_states)", "def refresh(self):\n self._refresh_method()", "def refresh(self):\n raise NotImplementedError(\"To be implemented\")", "def refresh(self):\n self.fetch(False)", "def updateList(self):\n self._recreateJobs()", "def submit(self):\n self.keep_data = False\n ManagedJob.submit(self)", "def poll_job(self, job, polling_frequency_in_sec=60):\r\n logger.Logger.info('Waiting for job to finish...')\r\n request = self.cloudsqlapi_service.operations().get(\r\n project=job['targetProject'],\r\n operation=job['name'])\r\n\r\n num_wait_sec = 0\r\n while True:\r\n result = request.execute(num_retries=2)\r\n if result['status'] == 'DONE':\r\n logger.Logger.info('Job complete.')\r\n return\r\n else:\r\n logger.Logger.info(\r\n 'Wait %d secs for project %s, wait more. Jobs: %s' % (\r\n num_wait_sec, job['targetProject'], result))\r\n time.sleep(polling_frequency_in_sec)\r\n num_wait_sec += polling_frequency_in_sec", "def update_job_state(self, job):", "def store_import_job_details(self, report_id: int,\n job: bigquery.LoadJob) -> None:\n self.store_document(Type._JOBS, report_id, job.to_api_repr())", "def refresh(self):\n self.update_from_file()\n self.update_from_env()", "def update(self):\n for uid, server in self.servers_online.items():\n if len(server.jobs):\n self.populate_server(server)\n for uid, server in self.servers_online.items():\n if server.jobs:\n server.jobs[0].task_time -= time_interval\n server.waiting_time -= time_interval\n if server.jobs[0].task_time <= 0:\n completed_task = server.jobs.pop(0)\n print(f\"Task '{completed_task.description}' completed\")\n self.all_tasks.remove(completed_task)\n self.servers_jobs_list[uid].pop(0)\n for uid, server in self.all_servers.items():\n if server.status:\n print(f\"{server.server_name} has {len(set(server.jobs))} job(s)\")\n else:\n print(f\"{server.server_name} is offline\")", "def update_db(self):\n for tool in self.watchdb:\n if 'jobs' not in self.watchdb[tool]:\n continue\n for jname in self.watchdb[tool]['jobs']:\n job = self.watchdb[tool]['jobs'][jname]\n if 'timeout' in job:\n # Waiting on a restart or throttled,\n # leave the current state alone\n continue\n # Mark as dead pending verification of state from qstat\n job['state'] = 'DEAD'\n\n # Update the known state of all jobs from qstat data\n xml = ET.fromstring(subprocess.check_output(\n ['/usr/bin/qstat', '-u', '*', '-xml']))\n for j in xml.iter('job_list'):\n tool = j.find('JB_owner').text\n try:\n self.read_config(tool)\n except IOError:\n logger.exception('Failed to read config for %s', tool)\n continue\n\n if tool not in self.watchdb or 'jobs' not in self.watchdb[tool]:\n # Not watching any jobs for this tool\n continue\n\n jname = j.find('JB_name').text\n if jname not in self.watchdb[tool]['jobs']:\n # Not watching this job for this tool\n continue\n\n # Update the watched job's state\n job = self.watchdb[tool]['jobs'][jname]\n job['jname'] = jname\n job['state'] = j.find('state').text\n\n since_xml = j.find('JAT_start_time')\n if since_xml is None:\n since_xml = j.find('JB_submission_time')\n job['since'] = datetime.datetime.strptime(\n since_xml.text, '%Y-%m-%dT%H:%M:%S')\n\n if 'timeout' in job:\n del job['timeout']", "def refresh(self):\n self.Refresh()", "def refresh(self):\n return self._refresh", "def test_get_refresh_job_status(self):\n pass", "def repopall(ctx):\n c = ctx.obj['client']\n if not c.login:\n return False\n\n r = requests.request(\"GET\", urljoin(c.BASE_URL, '/apiproxy/JobService.js'), params={'accesskey': c.login, 'method': 'PopulateAllSearches'})\n print(r.status_code, r.text)\n\n if r.status_code == 200:\n return True\n else:\n return False", "def work(self, job):\n pass", "def job(self, job: str):\n\n self._job = job", "def job(self, job: str):\n\n self._job = job", "def handle_wps_update(self, data):\n\n self.jobs = data", "async def _fetch_data(self) -> JobInfo:\n return await self.api.get_job()", "def resubmitJob(self):\n job, name = self.getJob()\n if job == None:\n return\n DB=self.DB \n self.matrices = job.data.allMatrices()\n for m in matrices:\n matrix=matrices[m]\n if matrix==None: return\n muts = matrix.mutationCodes()\n dbmuts = [DB.get(p).Mutations for p in DB.getRecs()]\n newmuts = list(set(dbmuts) - set(muts))\n print 'the following mutations in the project are not in the job: %s' %newmuts\n \n '''self.submitJob(name=name,\n pdb=pdb, pdbfile=pdbfile,\n ligandfile=self.ligandfile,\n mutations=newmuts,\n calcs=calcs, meta={'expcol':expcol}) '''\n self.log.yview('moveto', 1) \n return", "def Refresh(self):\n pass", "def poll(self, job_id):\n return self.manage.poll_job(job_id=job_id)", "def update_all():\n req_data = request.get_json()\n jobs = JobModel.get_one_job(job_id)\n if not jobs:\n return custom_response({'Error': 'Job Not Found'}, 404)\n\n data, error = job_schema.load(req_data, partial=True)\n if error:\n return custom_response(error, 400)\n\n for job in jobs:\n job.update(data)\n job_message = job_schema.dump(job)\n\n return custom_response(job_message, 200)", "def refresh(self):\n if self.is_server_process and self.cache_manager.is_refreshing():\n raise RefreshInProgressError()\n catalogs = MetadataManager(schemaspace=ComponentCatalogs.COMPONENT_CATALOGS_SCHEMASPACE_ID).get_all()\n for catalog in catalogs:\n self._insert_request(self.refresh_queue, catalog, \"modify\")", "def reload(self):\n\n pass", "def refresh(self) -> None:\n self._itempage.get()", "def __call__(self):\n\n self.initialise()\n\n # //\n # // ToDo: Add exception wrappers for plugin calls\n #//\n subJobs = TrackerDB.getJobsByState(\"submitted\", self.cooloff)\n self.updateSubmitted(*subJobs.keys())\n runningJobs = TrackerDB.getJobsByState(\"running\")\n self.updateRunning(*runningJobs.keys())\n completeJobs = TrackerDB.getJobsByState(\"complete\")\n self.updateComplete(*completeJobs.keys())\n failedJobs = TrackerDB.getJobsByState(\"failed\")\n self.updateFailed(*failedJobs.keys())\n self.cleanup()\n\n return", "def watch(self, job_id, polling_frequency=1, printing_frequency=60):\n\n # Get the job name and job status\n job_name = self.job_name(job_id)\n job_status = self.job_status(job_id)\n\n # Keep track of the job status from the previous iteration (makes sense below)\n last_job_status = None\n\n # Keep track of the last time we printed to the screen\n last_print = time.time()\n\n # Issue periodic updates to the job status\n while job_status not in [\"RUNNING\", \"FAILED\", \"SUCCEEDED\"]:\n if (\n time.time() - last_print\n ) > printing_frequency or job_status != last_job_status:\n logging.info(\"Job %s (%s) is %s\", job_name, job_id, job_status)\n last_print = time.time()\n time.sleep(polling_frequency)\n last_job_status = job_status\n job_status = self.job_status(job_id)\n\n # Now just print out the logs until the job is done\n\n # Keep track of how many lines have been printed\n n_log_lines_printed = 0\n\n # Get the complete set of logs\n logs = self.get_logs(job_id)\n\n # Keep printing the logs to the screen\n while len(logs) > n_log_lines_printed or job_status == \"RUNNING\":\n while len(logs) > n_log_lines_printed:\n logging.info(logs[n_log_lines_printed])\n n_log_lines_printed += 1\n # Wait before checking for more logs\n time.sleep(polling_frequency)\n\n # Refresh the logs\n logs = self.get_logs(job_id)\n\n # Get the new job status\n job_status = self.job_status(job_id)\n\n # The job is now over\n logging.info(\"The final status of %s (%s) is %s\", job_name, job_id, job_status)", "def on_job_update(_job):\n nonlocal job, job_update_counter\n\n # Cancel the job when it updates in the `WORKING` state for the\n # second time. We do it just to be sure it is somewhere in the\n # middle of executions.\n if (job is not None and\n _job.state == job.state == 'WORKING'):\n my_job_gen.job_manager_class.cancel(job.id)\n\n job = _job\n job_update_counter += 1", "def refresh_from_api(self):\n self.populate_from_api(self.get_from_api())", "def refresh_config(self):\n\t\treturn Job(SDK.PrlVm_RefreshConfig(self.handle)[0])", "def main(self):\n\t\tprint \"Retreiving view 'All\",\n\t\tview_all = self.hudson.getViewByName('All')\n\t\tprint \"Done\"\n\t\tprint \"iterating over jobs\"\n\t\tfor job in view_all.jobs.values():\n\t\t\tviewname = job.name.split(\".\")[0]\n\t\t\tif job.name not in self.getJobListFromDB():\n\t\t\t\tself.addJobToDb(job.name)\n\t\t\tif viewname not in self.getViewListFromDB():\n\t\t\t\tself.addViewToDb(viewname)\n\t\t\tfor build in job.builds:\n\t\t\t\tbo = HudsonConnector.HudsonObject( self.hudson.getDataFromUrl(build['url']) )\n\t\t\t\tstamp = datetime.datetime.fromtimestamp(bo.timestamp/1000)\n\t\t\t\tif stamp > self.lastrun:\n\t\t\t\t\tif bo.result is None:\n\t\t\t\t\t\trunname = job.name+\" #%d\" % bo.number\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tprint runname.ljust(29), str(stamp).ljust(24), bo.result.capitalize()\n\t\t\t\t\t\texcept AttributeError:\n\t\t\t\t\t\t\tprint runname.ljust(29), str(stamp).ljust(24), \"Unknown\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tjobdata = { 'name':job.name, 'view':job.name.split(\".\")[0], 'start':stamp, \n\t\t\t\t\t\t\t\t\t'end':stamp + datetime.timedelta(seconds=bo.duration),\n\t\t\t\t\t\t\t\t\t'duration':bo.duration,\n\t\t\t\t\t\t\t\t\t'result':bo.result\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\tself.uploadJobState(jobdata)\n\t\tself.saveState()", "def updateJobDB(request,Q={}):\n\tuser = request.user\n\t# Get metadata\n\tresponse = agaveRequestMetadataList(user,Q=Q)\n\t# Add job if not in db\n\tfor metadata in response['result']:\n\t\tvalue = metadata['value']\n\t\tif 'jobName' in value and 'parameters' in value:\n\t\t\tlogger.info('SetName: ' + value['jobName'] + ', Parameters: [' + ', '.join(value['parameters']) + '], Length: ' + str(len(value['parameters'])))\n\t\t\tif len(value['parameters']) == 2: \n\t\t\t\tjobName = value['jobName']\n\t\t\t\tpara1name = value['parameters'][0]\n\t\t\t\tpara2name = value['parameters'][1]\n\t\t\t\tjobsInDB = Job.objects.filter(name=jobName)\n\n\t\t\t\t# Update status if not 'FINISHED'\n\t\t\t\tfor job in jobsInDB:\n\t\t\t\t\tif job.status not in ['FINISHED']:\n\t\t\t\t\t\tjobResponse = agaveRequestJobSearch(user,jobId=job.jobid)\n\t\t\t\t\t\tstatus = jobResponse['result'][0]['status']\n\t\t\t\t\t\tcolor = 'red'\n\t\t\t\t\t\tif status == 'FINISHED':\n\t\t\t\t\t\t\tcolor = 'blue'\n\t\t\t\t\t\telif status not in ['FINISHED','FAILED','STOPPED']: # Running\n\t\t\t\t\t\t\tcolor = 'orange'\n\t\t\t\t\t\t# else failed or stopped (color = 'red')\n\t\t\t\t\t\tjob.status = status\n\t\t\t\t\t\tjob.color = color\n\t\t\t\t\t\tjob.save()\n\n\t\t\t\t# Create new job entries\n\t\t\t\tjobsInDB = [job.jobid for job in Job.objects.filter(name=jobName)]\n\t\t\t\tjobsNotInDB = (set(jobsInDB) ^ set(metadata['associationIds'])) & set(metadata['associationIds'])\n\t\t\t\tfor jobId in jobsNotInDB:\n\t\t\t\t\tjobResponse = agaveRequestJobSearch(user,jobId=jobId)\n\t\t\t\t\tstatus = jobResponse['result'][0]['status']\n\t\t\t\t\tcolor = 'red'\n\t\t\t\t\tif status == 'FINISHED':\n\t\t\t\t\t\tcolor = 'blue'\n\t\t\t\t\telif status == 'RUNNING':\n\t\t\t\t\t\tcolor = 'orange'\n\t\t\t\t\tpara1value = value['paraValues'][jobId][para1name]\n\t\t\t\t\tpara2value = value['paraValues'][jobId][para2name]\n\t\t\t\t\tJob(name=jobName,\n\t\t\t\t\t\tjobid=jobId,\n\t\t\t\t\t\tuser=user,\n\t\t\t\t\t\tvalue=8,\n\t\t\t\t\t\tpara1name=para1name,\n\t\t\t\t\t\tpara1value=para1value,\n\t\t\t\t\t\tpara2name=para2name,\n\t\t\t\t\t\tpara2value=para2value,\n\t\t\t\t\t\tstatus=status,\n\t\t\t\t\t\tcolor=color).save()", "def fetch_job_update(self) -> Optional[Dict[str, Any]]:\n raise NotImplementedError(\"Base method not implemented\")", "def refresh(self):\n self.lease = self.blazar.lease.get(self.id)", "async def job_detail(request, job_id=None):\n current_jobs = dagobah._serialize().get('jobs', {})\n jobs = [job for job in current_jobs if str(job['job_id']) == job_id]\n if not jobs:\n raise ValueError('not find any jobs')\n return template('job_detail.html', job=jobs[0], hosts=dagobah.get_hosts())", "def refresh(self):\r\n self.metadata = self.db.read(self.path).json()", "def _update(self):\n _logme.log('Updating job.', 'debug')\n self._updating = True\n if self.done or not self.submitted:\n self._updating = False\n return\n self.queue.update()\n if self.id:\n queue_info = self.queue[self.id]\n if queue_info:\n assert self.id == queue_info.id\n self.queue_info = queue_info\n self.state = self.queue_info.state\n if self.state == 'completed':\n if not self._got_exitcode:\n self.get_exitcode()\n if not self._got_times:\n self.get_times()\n self._updating = False", "def refresh(self, parameters = {}):\n\n self.__enforce_connected()\n self.collection.refresh(self, parameters = parameters)", "def resurrectJob(job_id):\n \n with transaction() as t:\n t.cur.execute(\"\"\"update Hydra_rendertask \n set status = 'R' \n where job_id = '%d' and \n status = 'K' or status = 'F'\"\"\" % job_id)", "def test_refresh_connector_content_list_job(self):\n pass", "def get_all_jobs(self):\n all_jobs = self.job_set.all().order_by(\"-time_last_updated\", \"project__name\", \"-id\")\n # for job in all_jobs:\n # job.check_exists()\n\n # get the list of jobs listed in the database as running and update them.\n dbrunning = all_jobs.filter(state__in=['in queue', 'started'])\n for runningjob in dbrunning: runningjob.update();\n\n # get the updated list \n all_jobs = self.job_set.all().order_by(\"-time_last_updated\", \"project__name\", \"-id\")\n\n return all_jobs", "def refresh(self) -> object:\n requestor = Requestor(local_api_key=self._api_key)\n url = self.instance_url()\n response, api_key = requestor.request(method=RequestMethod.GET, url=url, params=self._retrieve_params)\n self.refresh_from(values=response, api_key=api_key)\n return self", "def _retrieve_data(self):\n # Get job results if missing in experiment data.\n if self.provider is None:\n return\n retrieved_jobs = {}\n jobs_to_retrieve = [] # the list of all jobs to retrieve from the server\n\n # first find which jobs are listed in the `job_ids` field of the experiment data\n if self.job_ids is not None:\n for jid in self.job_ids:\n if jid not in self._jobs or self._jobs[jid] is None:\n jobs_to_retrieve.append(jid)\n\n for jid in jobs_to_retrieve:\n try:\n LOG.debug(\"Retrieving job [Job ID: %s]\", jid)\n job = self.provider.retrieve_job(jid)\n retrieved_jobs[jid] = job\n except Exception: # pylint: disable=broad-except\n LOG.warning(\n \"Unable to retrieve data from job [Job ID: %s]\",\n jid,\n )\n # Add retrieved job objects to stored jobs and extract data\n for jid, job in retrieved_jobs.items():\n self._jobs[jid] = job\n if job.status() in JOB_FINAL_STATES:\n # Add job results synchronously\n self._add_job_data(job)\n else:\n # Add job results asynchronously\n self._add_job_future(job)", "def info(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s\" % (self.sessionid,\n self.name))\n return self.server.json_body(resp)", "def flush(self):\n #return self.job.flush()", "def refresh(self):\n resp = self._imgur._send_request(self._INFO_URL)\n self._populate(resp)\n self._has_fetched = True\n # NOTE: What if the object has been deleted in the meantime? That might\n # give a pretty cryptic error.", "def poll(self):\n\n query = f\"sacct -j {self.jobid} -o State -n -X -P\"\n if self.cluster:\n query += f\" --clusters={self.cluster}\"\n\n cmd = BuildTestCommand(query)\n cmd.execute()\n\n logger.debug(f\"Querying JobID: '{self.jobid}' Job State by running: '{query}'\")\n job_state = cmd.get_output()\n self._state = \"\".join(job_state).rstrip()\n logger.debug(f\"JobID: '{self.jobid}' job state:{self._state}\")", "def job_display(self, row):\n job_id = self.jobsTableWidget.item(row, 0).text()\n download_dir = self.connection.job_result_download(job_id)\n if download_dir:\n info(self.iface, \"Downloaded to {}\".format(download_dir))\n result = Result(path=download_dir)\n result.display()\n\n self.refresh_jobs()\n # info(self.iface, \"New Job {}\".format(job_id))", "def refresh_queue_status(self):\n \n # Get the jobid and state for all jobs pending/running/completed for the current user\n qacct_stdout=self.run_grid_command_resubmit([\"qacct\",\"-o\",getpass.getuser(),\"-j\",\"*\"])\n \n # info list should include jobid, state, cpus, time, and maxrss\n info=[]\n job_status=[]\n for line in qacct_stdout.split(\"\\n\"):\n if line.startswith(\"jobnumber\") or line.startswith(\"job_number\"):\n if job_status:\n info.append(job_status)\n job_status=[line.rstrip().split()[-1],\"NA\",\"NA\",\"NA\",\"NA\"]\n # get the states for completed jobs\n elif line.startswith(\"failed\"):\n failed_code = line.rstrip().split()[1]\n if failed_code != \"0\":\n if failed_code in [\"37\",\"100\"]:\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n elif line.startswith(\"deleted_by\"):\n if line.rstrip().split()[-1] != \"NONE\" and job_status[1] == self.job_code_terminated:\n job_status[1]=self.job_code_deleted\n elif line.startswith(\"exit_status\"):\n # only record if status has not yet been set\n if job_status[1] == \"NA\":\n exit_status = line.rstrip().split()[-1]\n if exit_status == \"0\":\n job_status[1]=self.job_code_completed\n elif exit_status == \"137\":\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n # get the current state for running jobs\n elif line.startswith(\"job_state\"):\n job_status[1]=line.rstrip().split()[-1]\n elif line.startswith(\"slots\"):\n job_status[2]=line.rstrip().split()[-1]\n elif line.startswith(\"ru_wallclock\"):\n try:\n # get the elapsed time in minutes\n job_status[3]=str(float(line.rstrip().split()[-1])/60.0)\n except ValueError:\n job_status[3]=\"NA\"\n elif line.startswith(\"ru_maxrss\"):\n job_status[4]=line.rstrip().split()[-1]+\"K\"\n \n if job_status:\n info.append(job_status)\n\n return info", "def refresh_status(self):\n\n pass", "def on_job_update(_job):\n nonlocal job\n job = _job\n # Asserts that job is either pending or canceled.\n assert job.state in ['PENDING', 'CANCELED'], (\n 'Job that canceled immediately after submission has wrong '\n 'state `{job.state}`!'\n )", "def __on_query_edited(self):\n self.__refresh_search_results()", "def on_job_update(_job):\n nonlocal job, job_update_counter\n\n # Cancel the job when it updates in the `WORKING` state for the\n # second time. We do it just to be sure it is somewhere in the\n # middle of execution.\n if (job is not None and\n _job.state == job.state == 'WORKING'):\n my_job_async_gen.job_manager_class.cancel(job.id)\n\n job = _job\n job_update_counter += 1" ]
[ "0.7204651", "0.6387912", "0.5996785", "0.5930605", "0.58091706", "0.5802573", "0.5802573", "0.5802573", "0.5802573", "0.5802385", "0.57152075", "0.5710763", "0.5668991", "0.5663364", "0.56334144", "0.5627976", "0.5627976", "0.5627976", "0.56269634", "0.56071436", "0.56027734", "0.55932873", "0.5577526", "0.5574648", "0.5562469", "0.55377233", "0.553554", "0.5530145", "0.5530145", "0.55272985", "0.5520948", "0.551055", "0.54885405", "0.54515344", "0.54515344", "0.54515344", "0.5444463", "0.54228425", "0.53968656", "0.53968656", "0.5393182", "0.5393182", "0.5379397", "0.5376045", "0.5370053", "0.53617966", "0.5338636", "0.53288186", "0.5314558", "0.5302585", "0.52979887", "0.52862996", "0.528595", "0.528348", "0.5280818", "0.5279777", "0.52775073", "0.5277306", "0.5241053", "0.5236746", "0.5236636", "0.5233285", "0.5233285", "0.523045", "0.5226218", "0.5218583", "0.5216586", "0.5211845", "0.52102417", "0.5209283", "0.52061594", "0.51903003", "0.5190021", "0.5158347", "0.5149394", "0.5148914", "0.51486397", "0.51473814", "0.5142646", "0.5139335", "0.51357037", "0.51146376", "0.5104131", "0.5097692", "0.50900304", "0.50899065", "0.508438", "0.5066876", "0.50657123", "0.5058972", "0.5057112", "0.505403", "0.5047129", "0.5045354", "0.5037287", "0.5035876", "0.5034274", "0.50246483", "0.5020031", "0.50189453" ]
0.80415493
0
Keeps refreshing the Job's details until it reaches a finished status.
def wait_until_completed(self, max_poll_wait_secs=30): self.refresh() poll_wait = 0.2 while not self.has_completed(): logger.debug( f"Waiting for job {self.id}," + f"it is in status '{self.details.status}'" ) print(".", end="", flush=True) time.sleep(poll_wait) self.refresh() poll_wait = ( max_poll_wait_secs if poll_wait >= max_poll_wait_secs else poll_wait * 1.5 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh(self):\n self.details = self.workspace.get_job(self.id).details", "def wait(self):\r\n self.jobs.join()", "def jobComplete(self):\n self._Finished = True\n return", "def wait_till_jobs_complete(self, collection_name: str, job_id: str, job_name: str):\n status = self.job_status(collection_name, job_id, job_name)\n while (\n status[\"status\"] == \"Running\"\n or status[\"status\"] == \"Started\"\n or status[\"status\"] == \"NotStarted\"\n ):\n status = self.job_status(collection_name, job_id, job_name)\n time.sleep(15)\n print(status)\n return \"Done\"", "def _wait_for_jobs(self):\n import googleapiclient\n\n while True:\n # always use self.lock to avoid race conditions\n with self.lock:\n if not self.wait:\n return\n active_jobs = self.active_jobs\n self.active_jobs = list()\n still_running = list()\n\n # Loop through active jobs and act on status\n for j in active_jobs:\n\n # use self.status_rate_limiter to avoid too many API calls.\n with self.status_rate_limiter:\n\n # https://cloud.google.com/life-sciences/docs/reference/rest/v2beta/projects.locations.operations/get\n # Get status from projects.locations.operations/get\n operations = self._api.projects().locations().operations()\n request = operations.get(name=j.jobname)\n logger.debug(\"Checking status for operation {}\".format(j.jobid))\n\n try:\n status = self._retry_request(request)\n except googleapiclient.errors.HttpError as ex:\n\n # Operation name not found, even finished should be found\n if ex.status == 404:\n j.error_callback(j.job)\n continue\n\n # Unpredictable server (500) error\n elif ex.status == 500:\n logger.error(ex[\"content\"].decode(\"utf-8\"))\n j.error_callback(j.job)\n\n except WorkflowError as ex:\n print_exception(ex, self.workflow.linemaps)\n j.error_callback(j.job)\n continue\n\n # The operation is done\n if status.get(\"done\", False) == True:\n\n # Derive success/failure from status codes (prints too)\n if self._job_was_successful(status):\n j.callback(j.job)\n else:\n self.print_job_error(j.job, jobid=j.jobid)\n j.error_callback(j.job)\n\n # The operation is still running\n else:\n still_running.append(j)\n\n with self.lock:\n self.active_jobs.extend(still_running)\n sleep()", "def job_supervisor(self):\n while not self.stop.is_set():\n if not self.running_jobs.empty():\n executor, job = self.running_jobs.get()\n if job.is_alive():\n self.running_jobs.put((executor, job))\n else:\n job.task_done()\n self.running_jobs.task_done()\n time.sleep(SCHEDULER.FINEDELAY)", "def track_job_to_completion(ip_address, headers, job_id):\n job_status_map = {\n \"2020\": \"Scheduled\",\n \"2030\": \"Queued\",\n \"2040\": \"Starting\",\n \"2050\": \"Running\",\n \"2060\": \"Completed\",\n \"2070\": \"Failed\",\n \"2090\": \"Warning\",\n \"2080\": \"New\",\n \"2100\": \"Aborted\",\n \"2101\": \"Paused\",\n \"2102\": \"Stopped\",\n \"2103\": \"Canceled\"\n }\n\n max_retries = 20\n sleep_interval = 60\n failed_job_status = [2070, 2090, 2100, 2101, 2102, 2103]\n job_url = 'https://%s/api/JobService/Jobs(%s)' % (ip_address, job_id)\n loop_ctr = 0\n job_incomplete = True\n print(\"Polling %s to completion ...\" % job_id)\n while loop_ctr < max_retries:\n loop_ctr += 1\n time.sleep(sleep_interval)\n job_resp = requests.get(job_url, headers=headers, verify=False)\n if job_resp.status_code == 200:\n job_status = str((job_resp.json())['LastRunStatus']['Id'])\n print(\"Iteration %s: Status of %s is %s\" % (loop_ctr, job_id, job_status_map[job_status]))\n if int(job_status) == 2060:\n job_incomplete = False\n print(\"Completed updating firmware successfully ... Exiting\")\n break\n elif int(job_status) in failed_job_status:\n job_incomplete = False\n print(\"Update job failed ... \")\n job_hist_url = str(job_url) + \"/ExecutionHistories\"\n job_hist_resp = requests.get(job_hist_url, headers=headers, verify=False)\n if job_hist_resp.status_code == 200:\n job_history_id = str((job_hist_resp.json())['value'][0]['Id'])\n job_hist_det_url = str(job_hist_url) + \"(\" + job_history_id + \")/ExecutionHistoryDetails\"\n job_hist_det_resp = requests.get(job_hist_det_url,\n headers=headers,\n verify=False)\n if job_hist_det_resp.status_code == 200:\n print(job_hist_det_resp.text)\n else:\n print(\"Unable to parse job execution history .. Exiting\")\n break\n else:\n print(\"Unable to poll status of %s - Iteration %s \" % (job_id, loop_ctr))\n if job_incomplete:\n print(\"Job %s incomplete after polling %s times...Check status\" % (job_id, max_retries))", "def update(self) -> None:\n self.previous_status = self.status\n\n jobs = self._client.describe_jobs(jobs = [ self.id ])[\"jobs\"]\n\n try:\n self.state = jobs[0]\n except IndexError:\n raise ValueError(\"Invalid or unknown job id %s\" % self.id) from None", "def _check_for_finished_job(self):\n raise NotImplementedError", "def on_job_update(_job):\n nonlocal job\n job = _job", "def on_job_update(_job):\n nonlocal job\n job = _job", "def on_job_update(_job):\n nonlocal job\n job = _job", "def on_job_update(_job):\n nonlocal job\n job = _job", "def _update(self):\n _logme.log('Updating job.', 'debug')\n self._updating = True\n if self.done or not self.submitted:\n self._updating = False\n return\n self.queue.update()\n if self.id:\n queue_info = self.queue[self.id]\n if queue_info:\n assert self.id == queue_info.id\n self.queue_info = queue_info\n self.state = self.queue_info.state\n if self.state == 'completed':\n if not self._got_exitcode:\n self.get_exitcode()\n if not self._got_times:\n self.get_times()\n self._updating = False", "def wait_until_job_completes(self):\n while True:\n jobflow = self.conn.describe_jobflow(self.jobid)\n if self.verbose_mode:\n print jobflow.state\n if (jobflow.state == 'COMPLETED' or jobflow.state == 'TERMINATED'\n or jobflow.state == 'FAILED'):\n break\n sleep(10)", "def wait(self, jobs):\n while True:\n try:\n for job in self.query(jobs=jobs):\n if job['status'] == 'completed':\n jobs.remove(job['uuid'])\n yield (job)\n except JobClientError as e:\n logger.error(e.message)\n\n if len(jobs) > 0:\n time.sleep(self._status_update_interval_secs)\n else:\n break", "def update_job_state(self, job):", "def job_completed(self,event):\n if event.exception:\n logger.worker.warning('The job crashed :(')\n else:\n logger.worker.warning(self.task_id+'The job finished ')\n # set job complete to true, will display complete in web interface \n self.job_complete_status[self.task_id] = True", "def track_job_to_completion(ip_address, headers, job_id, state):\n\tjob_status_map = {\n\t\t\"2020\": \"Scheduled\",\n\t\t\"2030\": \"Queued\",\n\t\t\"2040\": \"Starting\",\n\t\t\"2050\": \"Running\",\n\t\t\"2060\": \"Completed\",\n\t\t\"2070\": \"Failed\",\n\t\t\"2090\": \"Warning\",\n\t\t\"2080\": \"New\",\n\t\t\"2100\": \"Aborted\",\n\t\t\"2101\": \"Paused\",\n\t\t\"2102\": \"Stopped\",\n\t\t\"2103\": \"Canceled\"\n\t}\n\tstatus_mapping = {\n\t\t\"On\": \"Powered On\",\n\t\t\"Off\": \"Powered Off\",\n\t\t\"Cold Boot\": \"Power Cycle\",\n\t\t\"Warm Boot\": \"Reset\",\n\t\t\"ShutDown\": \"Shutdown\"\n\t}\n\n\tmax_retries = 20\n\tsleep_interval = 30\n\tfailed_job_status = [2070, 2090, 2100, 2101, 2102, 2103]\n\tjob_url = 'https://%s/api/JobService/Jobs(%s)' % (ip_address, job_id)\n\tloop_ctr = 0\n\tjob_incomplete = True\n\tprint(\"Polling %s to completion ...\" % job_id)\n\twhile loop_ctr < max_retries:\n\t\tloop_ctr += 1\n\t\ttime.sleep(sleep_interval)\n\t\tjob_resp = requests.get(job_url, headers=headers, verify=False)\n\t\tif job_resp.status_code == 200:\n\t\t\tjob_status = str((job_resp.json())['LastRunStatus']['Id'])\n\t\t\tjob_status_str = job_status_map[job_status]\n\t\t\tprint(\"Iteration %s: Status of %s is %s\" %\n\t\t\t (loop_ctr, job_id, job_status_str))\n\t\t\tif int(job_status) == 2060:\n\t\t\t\tjob_incomplete = False\n\t\t\t\tprint(\"%s operation successful\" %status_mapping[state])\n\t\t\t\tbreak\n\t\t\telif int(job_status) in failed_job_status:\n\t\t\t\tjob_incomplete = False\n\t\t\t\tif job_status_str == \"Warning\":\n\t\t\t\t\tprint(\"Completed with errors\")\n\t\t\t\telse:\n\t\t\t\t\tprint(\"%s operation failed\" %status_mapping[state])\n\t\t\t\tjob_hist_url = str(job_url) + \"/ExecutionHistories\"\n\t\t\t\tjob_hist_resp = requests.get(job_hist_url, headers=headers, verify=False)\n\t\t\t\tif job_hist_resp.status_code == 200:\n\t\t\t\t\tget_execution_detail(job_hist_resp, headers, job_hist_url)\n\t\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"Unable to poll status of %s - Iteration %s \" % (job_id, loop_ctr))\n\tif job_incomplete:\n\t\tprint(\"Job %s incomplete after polling %s times...Check status\" %\n\t\t (job_id, max_retries))", "def queueStatus(self, job):\n self.status_pool.apply_async(self.statusJob, (job,))", "def refresh(self): # noqa\n data = self.connection.hgetall(self.key)\n if not data:\n raise NoSuchJobError('No such job: {0}'.format(self.key))\n self.restore(data)", "def update_status(cls):\n for job in cls.query.filter(cls.finished == False):\n num_hits_left = session.query(BoxHit).filter_by(training_job_id = job.id, outstanding=True).count()\n urls_left = session.query(VideoTrainingURL).filter_by(training_job_id=job.id, processed = False)\n dynamo = DynamoIngestionStatusClient()\n num_urls_left = 0\n for url in urls_left:\n dynamo_url = dynamo.get(url.url)\n if dynamo_url is None or dynamo_url['status'] == 'Failed':\n # will never be processed, so ignore for our purposes\n url.processed = True\n else:\n num_urls_left += 1\n if num_hits_left+num_urls_left == 0:\n job.finished = True\n print '*** Job ID: %s is complete ***' % str(job.id)\n\n print '------------- Stats for Job ID: %s -------------' % str(job.id)\n print 'Total URLs : %i' % VideoTrainingURL.query.filter_by(training_job_id = job.id).count()\n print 'Total HITs : %i' % BoxHit.query.filter_by(training_job_id = job.id).count()\n if not job.finished:\n print 'unprocessed URLs: %i' % num_urls_left\n print 'outstanding HITs: %i\\n' % num_hits_left\n session.flush()", "def wait_jobs_completed():\n\n logging.info(\"checking jobs\")\n time.sleep(30)\n while True:\n running_jobs = []\n for job in bq.list_jobs(state_filter='RUNNING', all_users=True):\n running_jobs.append(job)\n logging.info(\"running jobs {}\".format(len(running_jobs)))\n if not running_jobs:\n break\n time.sleep(30)", "def __work__(self):\n while not self.is_done:\n self.refreshSignal.emit()\n time.sleep(0.05)", "def wait_for_job(job) -> bool:\n job.refresh_from_db()\n is_done = False\n\n while not is_done:\n if job.end_time is None and job.success is None:\n print(f\"Polling {type(job).__name__}s. Currently waiting for job id: {job.id}\")\n sleep(20)\n job.refresh_from_db()\n elif job.retried and job.retried_job:\n job = job.retried_job\n elif job.success:\n return True\n else:\n print(f\"{type(job).__name__} {job.id} failed!\")\n return False\n\n return False", "def update(self):\n for uid, server in self.servers_online.items():\n if len(server.jobs):\n self.populate_server(server)\n for uid, server in self.servers_online.items():\n if server.jobs:\n server.jobs[0].task_time -= time_interval\n server.waiting_time -= time_interval\n if server.jobs[0].task_time <= 0:\n completed_task = server.jobs.pop(0)\n print(f\"Task '{completed_task.description}' completed\")\n self.all_tasks.remove(completed_task)\n self.servers_jobs_list[uid].pop(0)\n for uid, server in self.all_servers.items():\n if server.status:\n print(f\"{server.server_name} has {len(set(server.jobs))} job(s)\")\n else:\n print(f\"{server.server_name} is offline\")", "def wait_for_job(job):\n if job.state != 'RUNNING':\n job.begin()\n retry_count = 1000\n while retry_count > 0 and job.state != 'DONE':\n retry_count -= 1\n time.sleep(1)\n job.reload()\n assert not job.errors, job.errors\n return job", "def finish(self):\n old_message = None\n cooldown = 5\n while not self.queue_manager.check_finished():\n status = self.get_upload_status(0)\n datagen_workers = f\"{status.sets_being_generated} data generators, \"\n msg = f\"Waiting for {datagen_workers}{status.sets_being_loaded} uploads to finish\"\n if old_message != msg or cooldown < 1:\n old_message = msg\n self.logger.info(msg)\n self.update_running_totals()\n self.print_running_totals()\n cooldown = 5\n else:\n cooldown -= 1\n time.sleep(WAIT_TIME)\n\n self.log_failures()\n\n self.logger.info(\"\")\n self.logger.info(\" == Results == \")\n self.update_running_totals()\n self.print_running_totals()\n elapsed = format_duration(timedelta(seconds=time.time() - self.start_time))\n\n if self.run_until.sobject_name:\n result_msg = f\"{self.sobject_counts[self.run_until.sobject_name].successes} {self.run_until.sobject_name} records and associated records\"\n else:\n result_msg = f\"{self.run_until.target:,} iterations\"\n\n self.logger.info(f\"☃ Snowfakery created {result_msg} in {elapsed}.\")", "def on_job_update(_job):\n nonlocal job, job_update_counter\n\n # Cancel the job when it updates in the `WORKING` state for the\n # second time. We do it just to be sure it is somewhere in the\n # middle of execution.\n if (job is not None and\n _job.state == job.state == 'WORKING'):\n my_job_async_gen.job_manager_class.cancel(job.id)\n\n job = _job\n job_update_counter += 1", "async def _job(self):\n await asyncio.sleep(self._timeout)\n await self._callback()", "def wait_for_processing(self, job_id: str, show_progress: bool = False) -> None:\n # How often to poll Harmony for updated information during job processing.\n check_interval = 3.0 # in seconds\n # How often to refresh the screen for progress updates and animating spinners.\n ui_update_interval = 0.33 # in seconds\n\n intervals = round(check_interval / ui_update_interval)\n if show_progress:\n with progressbar.ProgressBar(max_value=100, widgets=progressbar_widgets) as bar:\n progress = 0\n while progress < 100:\n progress, status = self.progress(job_id)\n if status == 'failed':\n raise Exception('Job has failed. Call result_json() to learn more.')\n break\n if status == 'canceled':\n print('Job has been canceled.')\n break\n # This gets around an issue with progressbar. If we update() with 0, the\n # output shows up as \"N/A\". If we update with, e.g. 0.1, it rounds down or\n # truncates to 0 but, importantly, actually displays that.\n if progress == 0:\n progress = 0.1\n\n for _ in range(intervals):\n bar.update(progress) # causes spinner to rotate even when no data change\n sys.stdout.flush() # ensures correct behavior in Jupyter notebooks\n if progress >= 100:\n break\n else:\n time.sleep(ui_update_interval)\n else:\n progress = 0\n while progress < 100:\n progress, status = self.progress(job_id)\n if status == 'failed':\n raise Exception('Job has failed. Call result_json() to learn more.')\n break\n if status == 'canceled':\n break\n time.sleep(check_interval)", "def complete_thread_job(job):\r\n conn = psycopg2.connect(DATABASE_URL)\r\n cur = conn.cursor()\r\n cur.execute(\"\"\"\r\n UPDATE thread_tasks\r\n SET complete=true\r\n WHERE id=%s\r\n \"\"\",\r\n (job.id,))\r\n\r\n conn.commit()\r\n cur.close()\r\n conn.close()", "def complete_job(self, command_dict):\n job_uuid = command_dict['job_uuid']\n try:\n job = Job[job_uuid]\n except KeyError as e:\n # Job not found is not worth re-raising\n logger.warn(e)\n logger.warn(\"Job {} missing\".format(job_uuid))\n return\n\n logger.info(\"job {} finished with status of {}\".format(job.uuid,\n job.status))\n # Get the job log from the worker\n logger.info(\"retrieving log for job {}\".format(job.uuid))\n job_data_dir = os.path.join(self.data_dir, job.uuid)\n if(not os.path.exists(job_data_dir)):\n os.mkdir(job_data_dir)\n\n fetch_file_from_url(job.log_url(), job_data_dir)\n\n # Now get the job output data from the worker\n if(job.status == Job.STATUS_PROCESSED):\n\n logger.info(\"retrieving output for job {}\".format(job.uuid))\n fetch_file_from_url(job.download_url(), job_data_dir)\n job.status = Job.STATUS_COMPLETE\n\n job.on_primary = True\n # save job\n Job[job.uuid] = job", "def refresh_queue_status(self):\n \n # Get the jobid and state for all jobs pending/running/completed for the current user\n qacct_stdout=self.run_grid_command_resubmit([\"qacct\",\"-o\",getpass.getuser(),\"-j\",\"*\"])\n \n # info list should include jobid, state, cpus, time, and maxrss\n info=[]\n job_status=[]\n for line in qacct_stdout.split(\"\\n\"):\n if line.startswith(\"jobnumber\") or line.startswith(\"job_number\"):\n if job_status:\n info.append(job_status)\n job_status=[line.rstrip().split()[-1],\"NA\",\"NA\",\"NA\",\"NA\"]\n # get the states for completed jobs\n elif line.startswith(\"failed\"):\n failed_code = line.rstrip().split()[1]\n if failed_code != \"0\":\n if failed_code in [\"37\",\"100\"]:\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n elif line.startswith(\"deleted_by\"):\n if line.rstrip().split()[-1] != \"NONE\" and job_status[1] == self.job_code_terminated:\n job_status[1]=self.job_code_deleted\n elif line.startswith(\"exit_status\"):\n # only record if status has not yet been set\n if job_status[1] == \"NA\":\n exit_status = line.rstrip().split()[-1]\n if exit_status == \"0\":\n job_status[1]=self.job_code_completed\n elif exit_status == \"137\":\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n # get the current state for running jobs\n elif line.startswith(\"job_state\"):\n job_status[1]=line.rstrip().split()[-1]\n elif line.startswith(\"slots\"):\n job_status[2]=line.rstrip().split()[-1]\n elif line.startswith(\"ru_wallclock\"):\n try:\n # get the elapsed time in minutes\n job_status[3]=str(float(line.rstrip().split()[-1])/60.0)\n except ValueError:\n job_status[3]=\"NA\"\n elif line.startswith(\"ru_maxrss\"):\n job_status[4]=line.rstrip().split()[-1]+\"K\"\n \n if job_status:\n info.append(job_status)\n\n return info", "def statusJob(self, job):\n with self.thread_lock:\n name = job.name\n job_container = self.shared_dags[job]\n job_dag = job_container.getDAG()\n\n # If there is no timing, then the job is not finished\n if job_container.getTime():\n job_container.addCaveat('time: ' + job_container.getTime())\n if job.getResult() == False:\n self.active.remove(job)\n self.killJobs()\n return\n else:\n self.job_queue_count -= 1\n job_dag.delete_node(job)\n self.active.remove(job)\n if self.args.download_only:\n result = ' -Downloaded | '\n else:\n result = ' --Finished | '\n\n else:\n result = ' Launching | '\n\n # Format job name length field\n name_cnt = (self.term_width - len(job.name)) + 2 # 2 character buffer\n result = strftime(\"%H:%M\") + result + job.name + ' '*name_cnt\n\n # Format caveat length\n caveats = job_container.getCaveats()\n caveat_cnt = self.max_caveat_length - len(caveats)\n\n if caveats:\n result = result + caveats + ' '*caveat_cnt\n else:\n result = result + ' '*caveat_cnt\n\n remaining = job_dag.size()\n print(result, \"remaining: %-3d active: %-2d\" % (remaining, len(self.active)), [x.name for x in self.active])", "def work(self, job):\n pass", "def on_job_update(_job):\n nonlocal job, job_update_counter\n\n # Cancel the job when it updates in the `WORKING` state for the\n # second time. We do it just to be sure it is somewhere in the\n # middle of executions.\n if (job is not None and\n _job.state == job.state == 'WORKING'):\n my_job_gen.job_manager_class.cancel(job.id)\n\n job = _job\n job_update_counter += 1", "def wait_until_complete(self):\n self._log.debug(\"waiting for upload job %s to complete\", self._job_id)\n xpath = ManoProject.prefix_project(\"D,/rw-image-mgmt:upload-jobs/\" +\n \"rw-image-mgmt:job[rw-image-mgmt:id={}]\".\n format(quoted_key(str(self._job_id))),\n project=self._project,\n log=self._log)\n\n while True:\n query_iter = yield from self._dts.query_read(xpath)\n job_status_msg = None\n for fut_resp in query_iter:\n job_status_msg = (yield from fut_resp).result\n break\n\n if job_status_msg is None:\n raise UploadJobError(\"did not get a status response for job_id: %s\",\n self._job_id)\n\n if job_status_msg.status == \"COMPLETED\":\n msg = \"upload job %s completed successfully\" % self._job_id\n self._log.debug(msg)\n return\n\n elif job_status_msg.status == \"FAILED\":\n msg = \"upload job %s as not successful: %s\" % (self._job_id, job_status_msg.status)\n self._log.error(msg)\n raise UploadJobFailed(msg)\n\n elif job_status_msg.status == \"CANCELLED\":\n msg = \"upload job %s was cancelled\" % self._job_id\n self._log.error(msg)\n raise UploadJobCancelled(msg)\n\n yield from asyncio.sleep(.5, loop=self._loop)", "def resubmit(self):\n self.keep_data = True\n ManagedJob.submit(self)", "def run_if_refresh(self):\n if self.is_finished():\n self.status.collect = True\n self.run() # self.run_if_collect()\n elif (\n self.server.run_mode.non_modal\n or self.server.run_mode.queue\n or self.server.run_mode.modal\n ):\n self.run_static()\n else:\n self.refresh_job_status()\n if self.status.refresh:\n self.status.suspended = True\n if self.status.busy:\n self.status.refresh = True\n self.run_if_refresh()", "def test_get_refresh_job_status(self):\n pass", "def monitor(self):\r\n while True:\r\n for worker, start_time in self.workers.items():\r\n if (not worker.isAlive() or\r\n self.timeout\r\n and datetime.now() - start_time > self.timeout): \r\n\r\n self.work_count.get_nowait()\r\n self.jobs.task_done()\r\n del self.workers[worker]\r\n\r\n time.sleep(1)", "def reload_job(self):\n if self.ui['main_window'].widgets['live_preview'].get_active():\n self._update_preview()", "def updateRcloneJobStatus():\n global jobIds, jobStatusGauge\n\n # Check if the jobs are running, update the variables\n for jobName, jobId in jobIds.items():\n jobIsRunning = getRcloneJobRunning(jobId)\n jobIds[jobName] = jobId if jobIsRunning else None\n jobStatusGauge.labels(rclone_job=jobName).set(1 if jobIsRunning else 0)", "def on_job_update(_job):\n nonlocal job\n job = _job\n # Asserts that job is either pending or canceled.\n assert job.state in ['PENDING', 'CANCELED'], (\n 'job that canceled immediately after submission has wrong '\n 'state `%s`' % job.state\n )", "def check_jobs(self):\n # New/aborted jobs\n try:\n jobs = self.sm.get_job('%', phase = 'QUEUED')\n for job in jobs:\n self._launch_job(Job(job['job']))\n res = self.sm.get_aborted_jobs()\n aborts = [x['identifier'] for x in res]\n # Completed jobs\n for t in self.threads:\n if t.isDone() or t.name in aborts:\n self.threads.remove(t)\n # Set job status to COMPLETED\n job = Job(self.sm.get_job(t.name)[0]['job'])\n if t._Future__excpt == None:\n job.set_phase('COMPLETED')\n if t._Future__result != None:\n job.set_results(t._Future__result) \n status = True\n else:\n job.set_phase('ERROR')\n job.set_error_summary(str(t._Future__excpt[1]).replace(\"'\", \"\"))\n status = False\n job.set_end_time(datetime.utcnow().isoformat())\n self.sm.update_job(job = job, completed = status)\n except Exception, e:\n print \"Error:\", e", "def watch(self, job_id, polling_frequency=1, printing_frequency=60):\n\n # Get the job name and job status\n job_name = self.job_name(job_id)\n job_status = self.job_status(job_id)\n\n # Keep track of the job status from the previous iteration (makes sense below)\n last_job_status = None\n\n # Keep track of the last time we printed to the screen\n last_print = time.time()\n\n # Issue periodic updates to the job status\n while job_status not in [\"RUNNING\", \"FAILED\", \"SUCCEEDED\"]:\n if (\n time.time() - last_print\n ) > printing_frequency or job_status != last_job_status:\n logging.info(\"Job %s (%s) is %s\", job_name, job_id, job_status)\n last_print = time.time()\n time.sleep(polling_frequency)\n last_job_status = job_status\n job_status = self.job_status(job_id)\n\n # Now just print out the logs until the job is done\n\n # Keep track of how many lines have been printed\n n_log_lines_printed = 0\n\n # Get the complete set of logs\n logs = self.get_logs(job_id)\n\n # Keep printing the logs to the screen\n while len(logs) > n_log_lines_printed or job_status == \"RUNNING\":\n while len(logs) > n_log_lines_printed:\n logging.info(logs[n_log_lines_printed])\n n_log_lines_printed += 1\n # Wait before checking for more logs\n time.sleep(polling_frequency)\n\n # Refresh the logs\n logs = self.get_logs(job_id)\n\n # Get the new job status\n job_status = self.job_status(job_id)\n\n # The job is now over\n logging.info(\"The final status of %s (%s) is %s\", job_name, job_id, job_status)", "def on_job_update(_job):\n nonlocal job\n job = _job\n # Asserts that job is either pending or canceled.\n assert job.state in ['PENDING', 'CANCELED'], (\n 'Job that canceled immediately after submission has wrong '\n f'state `{job.state}`!')", "def on_job_update(_job):\n nonlocal job\n job = _job\n # Asserts that job is either pending or canceled.\n assert job.state in ['PENDING', 'CANCELED'], (\n 'Job that canceled immediately after submission has wrong '\n f'state `{job.state}`!')", "def on_job_update(_job):\n nonlocal job\n job = _job\n # Asserts that job is either pending or canceled.\n assert job.state in ['PENDING', 'CANCELED'], (\n 'Job that canceled immediately after submission has wrong '\n f'state `{job.state}`!')", "def iterate(self):\n self._prepare_workers()\n self.prepare()\n\n self.job_queue = self.get_job_queue()\n self.job_done = self.get_job_done()\n\n self.worker_informations[\"started\"] = True\n self.write_worker_informations()\n # Ici : enregistrer son worker\n\n GARGAGE_COUNT = 0\n while True:\n\n #############################################\n ### Try to retrieve a job_id in the queue ###\n #############################################\n _start_time_queue = time.time()\n must_stop = False\n\n do_print = True\n while True:\n\n try:\n job_id = self.job_queue.remove()\n except Exception as e:\n logger.fatal(e, exc_info=True)\n raise e\n\n # Ici : on peut peut etre verifier si on a pas deja fait le job\n # ce qui peut arriver, si on a mal synchroniser en entree ? => Ex : on a relancer le controller avec ces models par default ?\n # ou si on a retirer 2 fois un model random,\n\n if job_id is not None:\n # I have found something in the queue\n break\n\n must_stop, reason = self.must_stop()\n if must_stop:\n break\n\n current_time = time.time()\n if (\n self.max_queue_waiting_time is not None\n and current_time - _start_time_queue >= self.max_queue_waiting_time\n ):\n logger.info(\"queue was empty...\")\n logger.info(\"stop waiting for queue\")\n break\n else:\n if do_print:\n logger.info(\"queue was empty...\")\n logger.info(\"wait for queue for %d sec(s)\" % self.input_queue_sleeping_time)\n do_print = False # to print only time\n time.sleep(self.input_queue_sleeping_time)\n\n ###########################################\n # max_queue_waiting_time : #\n # * None <=> inf => wait forever #\n # * -1 => don't wait at all #\n # * x => wait x seconds #\n ###########################################\n\n if job_id is None:\n self.worker_informations[\"stopped\"] = True\n self.worker_informations[\"stopping_reason\"] = \"empty queue\"\n self.write_worker_informations()\n\n break\n\n if must_stop:\n self.worker_informations[\"stopped\"] = True\n self.worker_informations[\"stopping_reason\"] = reason\n self.write_worker_informations()\n logger.info(\"I must stop because %s\" % reason)\n break\n\n ###########################################\n ### Retrieve the parameters of that job ###\n ###########################################\n job_param = self.data_persister.read(key=job_id, path=\"job_param\", write_type=SavingType.json)\n\n logger.info(\"start job_id : %s\" % job_id)\n logger.info(\"\")\n\n try:\n _success = False\n start_time = time.time()\n\n #################################\n ### Send job_id and job_param ###\n #################################\n yield job_id, job_param\n\n return_time = time.time()\n _success = True\n \n except Exception as e:\n logger.fatal(e, exc_info=True)\n raise e\n\n finally:\n\n if not _success:\n ####################################\n ### It means there were an error ###\n ####################################\n self.worker_informations[\"stopped\"] = True\n self.worker_informations[\"stopping_reason\"] = \"error\"\n self.write_worker_informations()\n\n ########################\n ### Save time of job ###\n ########################\n self._all_times.append(return_time - start_time)\n\n ##################################\n ### Do a garbage collector run ###\n ##################################\n GARGAGE_COUNT += 1\n if GARGAGE_COUNT >= self.gc_collect_freq:\n GARGAGE_COUNT = 0\n gc.collect()\n\n ###############################\n ### Add job to 'done queue' ###\n ###############################\n could_add = False\n _start_time_done_queue = time.time()\n\n do_print = True\n while True:\n could_add = self.job_done.add(data=job_id)\n\n if could_add:\n break\n\n must_stop, reason = self.must_stop()\n if must_stop:\n break\n\n current_time = time.time()\n if (\n self.max_done_queue_waiting_time is not None\n and current_time - _start_time_done_queue >= self.max_done_queue_waiting_time\n ):\n logger.info(\"done queue was full...\")\n logger.info(\"stop waiting for done queue\")\n break\n else:\n if do_print:\n logger.info(\"done queue was full...\")\n logger.info(\"wait for done queue for %d sec(s)\" % self.done_queue_sleeping_time)\n\n do_print = False # to print only once\n time.sleep(self.done_queue_sleeping_time)\n\n #############################################\n # max_done_queue_waiting_time : #\n # * None <=> inf : wait for ever #\n # * -1 : don't wait at all #\n # * x : wait for x seconds #\n #############################################\n\n # Ici : regarder si on a un flag 'stop'\n\n if not must_stop:\n must_stop, reason = self.must_stop()\n\n if must_stop:\n self.worker_informations[\"stopped\"] = True\n self.worker_informations[\"stopping_reason\"] = reason\n self.write_worker_informations()\n logger.info(\"I must stop because %s\" % reason)\n break", "def do_status(self, args):\n status = self._leet.job_status\n\n for job in self.finished_jobs:\n status.append({\"id\" : job.id,\n \"hostname\" : job.machine.hostname,\n \"plugin\": job.plugin_instance.LEET_PG_NAME,\n \"status\" : job.status})\n if status:\n pretty_jobs_status(status)\n else:\n print(\"***No jobs pending\")", "def on_job_update(_job):\n nonlocal job\n job = _job\n # Asserts that job is either pending or canceled.\n assert job.state in ['PENDING', 'CANCELED'], (\n 'Job that canceled immediately after submission has wrong '\n 'state `{job.state}`!'\n )", "def refresh_jobs(self):\n\n jobs = self.backend.get_jobs()\n\n if not isinstance(jobs, list):\n warning(self.iface, \"Error loading Jobs from the backend (Response status code not 200)\")\n jobs = []\n\n if not self.jobs_changed(jobs):\n return\n\n self.init_jobs()\n self.jobsTableWidget.setSortingEnabled(False)\n self.jobsTableWidget.setRowCount(len(jobs))\n row = 0\n self.jobs_table = {}\n for job in jobs:\n\n if job.updated:\n str_date = job.updated.strftime(\"%Y-%m-%d_%H-%M-%S\")\n qitem = QTableWidgetItem(str_date)\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n elif job.created:\n str_date = job.created.strftime(\"%Y-%m-%d_%H-%M-%S\")\n qitem = QTableWidgetItem(str_date)\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n\n if not job.title:\n qitem = QTableWidgetItem(\"Untitled Job!\")\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 0, qitem)\n else:\n qitem = QTableWidgetItem(job.title)\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 0, qitem)\n\n exec_btn = QPushButton(self.jobsTableWidget)\n exec_btn.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/execute_icon.svg')))\n\n if job.status:\n qitem = QTableWidgetItem(job.status)\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 2, qitem)\n\n if job.status == \"finished\":\n self.jobsTableWidget.item(row, 2).setBackground(QColor(75, 254, 40, 160))\n disp_btn = QPushButton(self.jobsTableWidget)\n disp_btn.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/display_icon.svg')))\n disp_btn.setIconSize(QSize(29, 29))\n self.jobsTableWidget.setCellWidget(row, 4, disp_btn)\n disp_btn.clicked.connect(lambda *args, job_id=job.id: self.job_display(job_id))\n disp_btn = QPushButton(self.jobsTableWidget)\n disp_btn.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/download.png')))\n disp_btn.setIconSize(QSize(29, 29))\n disp_btn.clicked.connect(lambda *args, job_id=job.id: self.job_download(job_id))\n self.jobsTableWidget.setCellWidget(row, 5, disp_btn)\n iface.actionZoomIn().trigger()\n elif job.status == \"running\":\n self.jobsTableWidget.item(row, 2).setBackground(QColor(254, 178, 76, 200))\n exec_btn.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/stop-button.png')))\n elif job.status == \"canceled\":\n self.jobsTableWidget.item(row, 2).setBackground(QColor(254, 178, 76, 200))\n elif job.status == \"error\":\n self.jobsTableWidget.item(row, 2).setBackground(QColor(254, 100, 100, 200))\n\n exec_btn.setIconSize(QSize(21, 21))\n self.jobsTableWidget.setCellWidget(row, 3, exec_btn)\n\n if job.status == \"running\":\n exec_btn.clicked.connect(lambda *args, job_id=job.id: self.job_stop(job_id))\n else:\n exec_btn.clicked.connect(lambda *args, job_id=job.id: self.job_execute(job_id))\n\n info_btn2 = QPushButton(self.jobsTableWidget)\n info_btn2.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/edit_icon.png')))\n info_btn2.setIconSize(QSize(25, 25))\n self.jobsTableWidget.setCellWidget(row, 6, info_btn2)\n info_btn2.clicked.connect(lambda *args, job_id=job.id: self.adapt_job(job_id))\n\n info_btn3 = QPushButton(self.jobsTableWidget)\n info_btn3.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/info_icon.png')))\n info_btn3.setIconSize(QSize(25, 25))\n self.jobsTableWidget.setCellWidget(row, 7, info_btn3)\n info_btn3.clicked.connect(lambda *args, job_id=job.id: self.job_info(job_id))\n\n info_btn4 = QPushButton(self.jobsTableWidget)\n info_btn4.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/deleteFinalBtn.png')))\n info_btn4.setIconSize(QSize(25, 25))\n self.jobsTableWidget.setCellWidget(row, 8, info_btn4)\n info_btn4.clicked.connect(lambda *args, job_id=job.id: self.delete_job_final(job_id))\n\n self.refreshButton.setEnabled(True)\n self.refreshButton_service.setEnabled(True)\n\n self.jobs_table[row] = job\n\n row += 1\n\n self.jobsTableWidget.setSortingEnabled(True)", "def submit(self):\n self.keep_data = False\n ManagedJob.submit(self)", "def refresh_jobs(self):\n jobs = self.connection.user_jobs()\n\n self.init_jobs()\n self.jobsTableWidget.setRowCount(len(jobs))\n row = 0\n for val in jobs:\n\n if \"id\" in val:\n qitem = QTableWidgetItem(val[\"id\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 0, qitem)\n\n if \"error\" in val:\n if val[\"error\"]:\n if \"message\" in val[\"error\"]:\n qitem = QTableWidgetItem(val[\"error\"][\"message\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n elif \"description\" in val:\n qitem = QTableWidgetItem(val[\"description\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n\n if \"submitted\" in val:\n qitem = QTableWidgetItem(val[\"submitted\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 2, qitem)\n\n execBtn = QPushButton(self.jobsTableWidget)\n execBtn.setText('Execute')\n\n if \"status\" in val:\n qitem = QTableWidgetItem(val[\"status\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 3, qitem)\n\n if val[\"status\"] == \"finished\":\n dispBtn = QPushButton(self.jobsTableWidget)\n dispBtn.setText('Display')\n self.jobsTableWidget.setCellWidget(row, 5, dispBtn)\n dispBtn.clicked.connect(lambda *args, row=row: self.job_display(row))\n\n self.jobsTableWidget.setCellWidget(row, 4, execBtn)\n execBtn.clicked.connect(lambda *args, row=row: self.job_execute(row))\n\n row += 1", "def refresh_status(self):\n\n pass", "def handle_job_success(self, job):\n super().handle_job_success(job)\n\n self._handle_job_status(job, \"finished\")", "def jobFinished(self):\n if not self._job.result and not self._job.aborted:\n self.showErrorMessage()\n del self._job\n self.deleteLater()", "def _refresh_cache(self):\r\n if self._cache_refresh:\r\n Thread(target=self._cache_refresh).start()\r\n return \"Cache refresh in progress\"", "def query_job_progress():\n pass", "def isFinished(self):\r\n try:\r\n output = Popen(\"qstat | grep \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]\r\n if self.jobId in output:\r\n if output.split()[4] == \"Eqw\":\r\n #If the job fails, print a warning, and wait a minute so the user can check why the job fails,\r\n #before resubmitting the job.\r\n logging.warning(\"job \" + output.split()[2] + \" failed to run, resubmitting in one minute\")\r\n time.sleep(60)\r\n output = Popen(\"qdel \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]\r\n self.submit()\r\n return False\r\n else:\r\n logging.info(\"job with ID: \" + self.jobId + \" is finished.\")\r\n return True\r\n \r\n except ValueError:\r\n logging.info(\"Error: waiting for not submitted job...\")", "def finish(ch, method, properties, body) -> Union[Job, None]:\n del ch, method, properties\n # todo: add error handling\n found_job = db.Jobs().get_by_id(body)\n if not found_job:\n return\n found_job.status = \"done\"\n return db.Jobs().update(found_job)", "def poll_job(self, job, polling_frequency_in_sec=60):\r\n logger.Logger.info('Waiting for job to finish...')\r\n request = self.cloudsqlapi_service.operations().get(\r\n project=job['targetProject'],\r\n operation=job['name'])\r\n\r\n num_wait_sec = 0\r\n while True:\r\n result = request.execute(num_retries=2)\r\n if result['status'] == 'DONE':\r\n logger.Logger.info('Job complete.')\r\n return\r\n else:\r\n logger.Logger.info(\r\n 'Wait %d secs for project %s, wait more. Jobs: %s' % (\r\n num_wait_sec, job['targetProject'], result))\r\n time.sleep(polling_frequency_in_sec)\r\n num_wait_sec += polling_frequency_in_sec", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def monitor_and_terminate(self):\n import time\n import datetime\n\n keep_running = True\n\n while keep_running:\n\n print()\n print(datetime.datetime.now().replace(microsecond=0))\n print(self.get_monitor_string())\n\n time.sleep(30)\n\n _, status = self.reporter.get_job_status(self.info)\n if status[\"active\"]+status[\"running\"] == 0:\n keep_running = False\n\n print(\"All tasks done.\")", "def process_job():\n r = redis.StrictRedis()\n while True:\n curr_job = r.blpop('job_queue', 0)[1]\n r.hset('status', curr_job, 'processing')\n print('current job ID:', curr_job)\n # convert byte to string\n url = r.hget('urls', curr_job).decode(\"utf-8\")\n print('Current URL:', url)\n\n # if this url has not been requested before/is not in the db\n if Site.query.filter_by(url=url).first():\n r.hset('status', curr_job, 'complete')\n print('Job', curr_job, 'Completed')\n else:\n # fetches url page source\n try:\n html = str(get_html(url))\n print('Successfully retrieved HTML')\n # add results to database\n db.session.add(Site(url=url, html=html))\n db.session.commit()\n print('Added to database')\n r.hset('status', curr_job, 'complete')\n print('Job', curr_job, 'Completed')\n except ValueError:\n r.hset('status', curr_job, 'abort')\n print('Job', curr_job, 'Aborted')\n except TimeoutError:\n r.hset('status', curr_job, 'timeout')\n print('Job', curr_job, 'Timed Out')\n return", "def on_job_update(_job):\n nonlocal job\n job = _job\n\n if job.state in ['DONE', 'ERROR', 'WORKING']:\n canceled = my_job_async.job_manager_class.cancel(job.id)\n assert not canceled, (\n f'Uncancelable job is canceled in the `{job.state}` state!')", "def process(self, job_id, job_service):\n print('Monitoring job %s' % job_id)\n local_job = Job.query.get(job_id)\n remote_job = job_service.get_job(local_job.remote_job_id)\n\n # TODO: catch saga.IncorrectState\n remote_job_state = remote_job.state\n\n if local_job.last_status != remote_job_state:\n self.send_notifications(local_job, remote_job)\n self.download_files(local_job, remote_job, job_service)\n self.update_state(local_job, remote_job)\n\n # Add task back to the queue if still running\n if remote_job_state not in (saga.FAILED,\n saga.DONE,\n saga.CANCELED,\n saga.FINAL,\n saga.EXCEPTION):\n self.send((job_id, job_service))", "def wait_complete(self):\n self.join()", "def update_status(self):\n\n # first get the instances we need to check\n monitor_jobs = {}\n for _, job_node in self.get_executions_iterator():\n if job_node.is_job:\n for job_instance in job_node.instances:\n if not job_instance.simulate:\n if job_instance.host in monitor_jobs:\n monitor_jobs[job_instance.host]['names'].append(\n job_instance.name)\n else:\n monitor_jobs[job_instance.host] = {\n 'config': job_instance.monitor_config,\n 'type': job_instance.monitor_type,\n 'workdir': job_instance.workdir,\n 'names': [job_instance.name],\n 'period': job_instance.monitor_period\n }\n else:\n job_instance.set_status('COMPLETED')\n\n # nothing to do if we don't have nothing to monitor\n if not monitor_jobs:\n return\n\n # then look for the status of the instances through its name\n states = self.jobs_requester.request(monitor_jobs, self.logger)\n\n # finally set job status\n for inst_name, state in states.iteritems():\n self.job_instances_map[inst_name].set_status(state)\n\n # We wait to slow down the loop\n sys.stdout.flush() # necessary to output work properly with sleep\n time.sleep(LOOP_PERIOD)", "def rescheduleJob(self, job):\n with self:\n with self.queues.jobsInProgress:\n with self.queues.jobsDone:\n try:\n index = self.queues.jobsInProgress.index(job)\n except ValueError, ex:\n raise BlackboardUpdateError(\"Job not found in jobsInProgress: \" +\n job.getProperty(Props.NAME, \"(unidentified)\"))\n job = self.queues.jobsInProgress.pop(index)\n self.queues.jobsAvailable.append(job)", "async def _fetch_data(self) -> JobInfo:\n return await self.api.get_job()", "def collect(self):\n while True:\n if not self._queue.empty():\n message = self._queue.get()\n self.working_on = message['job_type']\n else:\n break\n logging.info(\"Popped off message: {}\\n\".format(str(message)))\n\n if message['job_type'] == 'STOP':\n break\n\n if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':\n raise ValueError('{} is not a recognized task type'.format(message['job_type']))\n pass\n\n \"\"\" Query all repos with repo url of given task \"\"\"\n repoUrlSQL = s.sql.text(\"\"\"\n SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'\n \"\"\".format(message['given']['git_url']))\n repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])\n\n try:\n if message['models'][0] == 'badges':\n self.badges_model(message, repo_id)\n except Exception as e:\n register_task_failure(self, logging, message, repo_id, e)\n pass", "def run(self):\r\n while True:\r\n job = self.jobs.get()\r\n\r\n work_thread = Thread(target = job)\r\n work_thread.setDaemon(True)\r\n self.work_count.put(True)\r\n self.workers[work_thread] = datetime.now()\r\n work_thread.start()", "def wait_completion(self):\r\n self.tasks.join()", "def finished(self):\n\t\telog(\"finished\")", "def reload(self):\n self.job_proto = self.serving_stub.GetJob(GetJobRequest(job=self.job_proto)).job", "def updateList(self):\n self._recreateJobs()", "def on_job_update(_job):\n nonlocal job\n job = _job\n\n if job.state in ['WORKING', 'DONE', 'ERROR']:\n canceled = my_job.job_manager_class.cancel(job.id)\n assert not canceled, (\n f'Uncancelable job is canceled in the `{job.state}` state!')", "def wait_for_job(self, value):\n logger.info('Waiting for job %s' % self.job_name)\n\n if self.provider_options.dry_run == True:\n logger.info('Dry run: continuing')\n else:\n logger.info('Checking job status...')\n provider = provider_base.get_provider(self.provider_options)\n\n while True:\n tasks = provider.lookup_job_tasks('*', job_name_list=[self.job_name], max_jobs=1)\n logger.debug('Tasks: %s' % tasks)\n\n if not tasks:\n raise RuntimeError('Job not found: %s' % self.job_name)\n\n is_running = False\n status = None\n\n # Wait until all tasks succeed; abort if any task fails or is canceled\n for task in tasks:\n status = provider.get_task_field(task, 'job-status')\n\n if status == 'RUNNING':\n is_running = True\n elif status == 'CANCELED':\n raise RuntimeException('Job %s: CANCELED' % self.job_name)\n elif status == 'FAILURE':\n error = provider.get_task_field(task, 'error-message')\n raise RuntimeException('Job %s: FAILURE. Error message: %s' % (self.job_name, error))\n\n if is_running:\n time.sleep(self.poll_interval)\n else:\n break\n\n logger.info('Job %s: SUCCESS' % self.job_name)\n return 'Success'", "def job_done(self, success):\n run_usage = self._attempt.get_usage()\n self._usage.append(run_usage)\n\n log.debug(\"job_done job_id=%s success=%s (last attempt %s\", self.job_id, success, self._attempt_ids[-1])\n self._attempt = None", "def wait_for_update(self):\n while \"updating_db\" in self.status():\n time.sleep(1)", "def flush(self):\n #return self.job.flush()", "async def finalize_finished_submissions_loop(monitored_jobs: Dict[js.JobKey, Set]):\n loop = asyncio.get_running_loop()\n while True:\n logger.debug(\"finalize_finished_submissions_loop\")\n logger.debug(\"monitored_jobs: %s\", monitored_jobs)\n if monitored_jobs:\n finished_jobs = []\n for job_key, job_set in list(monitored_jobs.items()):\n if not job_set:\n logger.debug(\n \"monitored_jobs with key '%s' is empty, finalizing...\", monitored_jobs\n )\n if job_key[1]: # job email was specified\n loop.run_in_executor(\n None,\n js.email.send_job_finished_email,\n job_key[0],\n job_key[1],\n \"complete\",\n )\n await set_job_status(job_key[0])\n finished_jobs.append(job_key)\n await asyncio.sleep(js.perf.SLEEP_FOR_LOOP)\n for job_key in finished_jobs:\n monitored_jobs.pop(job_key)\n await asyncio.sleep(js.perf.SLEEP_FOR_QSTAT)", "def update(self):\n self._log.debug(\"About to update job {0}\".format(self.id))\n resp = self._api.get_job(self.id)\n\n if resp.success:\n self.submission = self._format_submission(resp.result)\n return True\n\n else:\n raise resp.result", "def updater_job_status(self,request):\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/status invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\n\t\t# First check if a job is running. This will update the\n\t\t# internal field self._current_job, or if the job is finished,\n\t\t# it would return an empty string.\n\t\tinst = self.__which_job_is_running()\n\n\t\tjob = request.options.get('job','')\n\t\tresult = {}\n\t\tif job in INSTALLERS:\n\t\t\t# make a copy, not a reference!\n#\t\t\tresult = {}\n#\t\t\tfor arg in INSTALLERS[job]:\n#\t\t\t\tresult[arg] = INSTALLERS[job][arg]\n\t\t\tresult = deepcopy(INSTALLERS[job])\n\n\t\t\tif 'statusfile' in INSTALLERS[job]:\n\t\t\t\ttry:\n\t\t\t\t\tfor line in open(INSTALLERS[job]['statusfile']):\n\t\t\t\t\t\tfields = line.strip().split('=')\n\t\t\t\t\t\tif len(fields) == 2:\n\t\t\t\t\t\t\tresult['_%s_' % fields[0]] = fields[1]\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t# if we encounter that the frontend asks about the last job we\n\t\t\t# have executed -> include its properties too.\n\t\t\tif self._current_job:\n\t\t\t\tif self._current_job['job'] == job:\n\t\t\t\t\tfor f in self._current_job:\n\t\t\t\t\t\tresult[f] = self._current_job[f]\n\t\t\t\t\t\tif isinstance(result[f],str) and result[f].isdigit():\n\t\t\t\t\t\t\tresult[f] = int(result[f])\n\t\t\t\tif inst == '':\n\t\t\t\t\tresult['running'] = False\n\t\t\telse:\n\t\t\t\t# no job running but status for release was asked? \n\t\t\t\t# maybe the server restarted after job finished\n\t\t\t\t# and the frontend did not get that information\n\t\t\t\t# Bug #26318\n\t\t\t\tif job == 'release':\n\t\t\t\t\tresult['detail'] = '%s-%s' % (self.ucr.get('version/version'), self.ucr.get('version/patchlevel'))\n\t\t\t\telse:\n\t\t\t\t\tresult['detail'] = _('Unknown')\n\n\t\t\t# -------------- additional fields -----------------\n\n\t\t\t# elapsed time, ready to be displayed. (not seconds, but rather\n\t\t\t# the formatted string)\n\t\t\tif 'time' in result and 'started' in result:\n\t\t\t\telapsed = result['time'] - result['started']\n\t\t\t\tif elapsed < 60:\n\t\t\t\t\tresult['elapsed'] = '%ds' % elapsed\n\t\t\t\telse:\n\t\t\t\t\tmins = int(elapsed/60)\n\t\t\t\t\tsecs = elapsed - (60 * mins)\n\t\t\t\t\tif mins < 60:\n\t\t\t\t\t\tresult['elapsed'] = '%d:%02dm' % (mins,secs)\n\t\t\t\t\telse:\n\t\t\t\t\t\thrs = int(mins/60)\n\t\t\t\t\t\tmins = mins - (60*hrs)\n\t\t\t\t\t\tresult['elapsed'] = '%d:%02d:%02dh' % (hrs,mins,secs)\n\t\t\t# Purpose is now formatted in the language of the client (now that\n\t\t\t# this LANG is properly propagated to us)\n\t\t\tif 'purpose' in result:\n\t\t\t\tif result['purpose'].find('%') != -1:\n\t\t\t\t\t# make sure to not explode (Bug #26318), better show nothing\n\t\t\t\t\tif 'detail' in result:\n\t\t\t\t\t\tresult['label'] = result['purpose'] % result['detail']\n\t\t\t\telse:\n\t\t\t\t\tresult['label'] = result['purpose']\n\t\t\t# Affordance to reboot... hopefully this gets set before\n\t\t\t# we stop polling on this job status\n\t\t\tself.ucr.load()\t# make it as current as possible\n\t\t\tresult['reboot'] = self.ucr.is_true('update/reboot/required',False)\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/status returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# -----------------------------------\n\n\t\tself.finished(request.id,result)", "def _finalize_job(self, job: Job) -> None:\n self._jobs.remove(job)\n self._finalized_jobs[job.task_name][job.status] += 1", "def on_job_done(self, future):\n self.futures.append(future)", "async def wait(self):\n if self._state in (JobState.PENDING, JobState.RUNNING):\n await self._process.wait()", "def wait_until_finished(self):\n for processor in self._processors.values():\n while not processor.done:\n time.sleep(0.1)", "def _checkpoint(self,):\n self.outstanding.wait()", "def complete(self, item, line_reference, status):\n self.job.complete(item, line_reference, status)", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()" ]
[ "0.7560717", "0.6975098", "0.69365853", "0.68282324", "0.6777694", "0.6736639", "0.6731939", "0.6727907", "0.6715797", "0.6674043", "0.6674043", "0.6674043", "0.6674043", "0.66640437", "0.6595647", "0.65691817", "0.6568777", "0.6568164", "0.65348256", "0.6516131", "0.6487494", "0.64795285", "0.64691204", "0.6457744", "0.6446938", "0.6418423", "0.6410596", "0.6393762", "0.638839", "0.6370189", "0.6311257", "0.6306076", "0.6283961", "0.6262176", "0.6238809", "0.6227114", "0.6223076", "0.6222799", "0.6210294", "0.62060446", "0.61963946", "0.61900514", "0.6177995", "0.6173246", "0.6167538", "0.6151014", "0.6127745", "0.6123603", "0.6123603", "0.6123603", "0.6121285", "0.61119235", "0.6110881", "0.6108042", "0.6097526", "0.6094359", "0.6066328", "0.6065099", "0.6064926", "0.6032912", "0.6022852", "0.60190654", "0.6011496", "0.6009918", "0.6008235", "0.6008235", "0.6008235", "0.6008235", "0.5999545", "0.59888995", "0.5976422", "0.5975795", "0.59710425", "0.5956697", "0.59531486", "0.5950067", "0.5934823", "0.5922681", "0.59146124", "0.5914152", "0.5914139", "0.59104776", "0.59088916", "0.5907126", "0.59052396", "0.5888133", "0.588449", "0.58829534", "0.58769757", "0.5876287", "0.5861395", "0.58606374", "0.585678", "0.58553106", "0.58380556", "0.5823382", "0.58226866", "0.58226866", "0.58226866", "0.58226866" ]
0.61918545
41
Checks if job (self) matches the given properties if any.
def matches_filter( self, name_match: str = None, status: Optional[JobStatus] = None, created_after: Optional[datetime] = None ) -> bool: if name_match is not None and re.search(name_match, self.details.name) is None: return False if status is not None and self.details.status != status.value: return False if created_after is not None: # if supplied date is date we must convert to datetime first if type(created_after) is date: created_after = datetime(created_after.year, created_after.month, created_after.day) # if supplied date is naive, assume local and convert to timezone aware object if created_after.tzinfo is None: created_after = created_after.astimezone() if self.details.creation_time.replace(tzinfo=timezone.utc) < created_after: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matchesProperties(self, *args):\n return _libsbml.SBMLRuleConverter_matchesProperties(self, *args)", "def matchesProperties(self, *args):\n return _libsbml.SBMLConverter_matchesProperties(self, *args)", "def _matchcondition_holds(self, matchconditions, src_sheet):\n matches=True\n if matchconditions is None:\n return False\n\n for incoming_key, incoming_value in matchconditions.items():\n if (incoming_key in src_sheet.properties and \\\n str(src_sheet.properties[incoming_key]) != str(incoming_value)) \\\n or (incoming_key not in src_sheet.properties and incoming_value is not None):\n matches=False\n break\n\n return matches", "def _match_all(self, obj, criteria):\n return all(getattr(obj, key_, None) == value_ for\n key_, value_ in criteria.items())", "def matchesProperties(self, *args):\n return _libsbml.SBMLIdConverter_matchesProperties(self, *args)", "def has_rejected_jobs(self, **kwargs):\n if Job.objects.filter(house=self.house, rejected=True, **kwargs).exists():\n return True\n\n return False", "def has_active_jobs(self, **kwargs):\n if Job.objects.add_balance().filter(house=self.house, balance1__gt=0, approved=True, **kwargs).exists():\n return True\n\n return False", "def test_matching_jobs_existing(self):\n self.assertEquals(\n self.query_api.get_matching_jobs(\n \"try\", \"146071751b1e\",\n 'Linux x86-64 try build'), json.loads(JOBS_SCHEDULE))", "def matchesProperties(self, *args):\n return _libsbml.CobraToFbcConverter_matchesProperties(self, *args)", "def matchesProperties(self, *args):\n return _libsbml.SBMLUnitsConverter_matchesProperties(self, *args)", "def checkSiteProperties(self, siteProperties, *propertyNames):\n if siteProperties is None:\n return False\n for name in propertyNames:\n if not (name in siteProperties):\n return False\n return True", "def _match(self, rule, obj):\n\n for key in rule:\n if key == '$and':\n if not self.handle_and(key, rule[key], obj):\n return False\n\n elif key == '$or':\n if not self.handle_or(key, rule[key], obj):\n return False\n\n elif key == '$nor':\n if not self.handle_nor(key, rule[key], obj):\n return False\n\n elif not self.handle_field(key, rule[key], obj):\n return False\n\n return True", "def _check_kwargs(cls, kwargs: Mapping[str, Any]):\n for name, prop in cls._props_by_name.items():\n if prop.required and name not in kwargs:\n raise ValueError(f'Required property {name} is missing.')\n for name, value in kwargs.items():\n if name not in cls._props_by_name:\n raise KeyError(f'Unknown property {name}.')\n prop = cls._props_by_name[name]\n prop.validate(value)", "def matchesProperties(self, *args):\n return _libsbml.SBMLLocalParameterConverter_matchesProperties(self, *args)", "def matchesProperties(self, *args):\n return _libsbml.FbcToCobraConverter_matchesProperties(self, *args)", "def matchesProperties(self, *args):\n return _libsbml.SBMLReactionConverter_matchesProperties(self, *args)", "def check_job_exists( job_list, analysis_group_id, reprocess_config_id):\n for job in job_list:\n struct = JSONMessage.unserialize(job.input_message)\n\n if( int( struct.analysis_group_id ) == int( analysis_group_id ) and \\\n int( struct.reprocess_config_id ) == int( reprocess_config_id ) ):\n return 1\n return 0", "def _validate(self):\n for name, prop in self._properties.iteritems():\n value = getattr(self, name, None)\n prop._do_validate(value)", "def _propertyFilter(self, entity, params):\n\n if 'property_conditions' not in params:\n raise ProtocolError()\n\n conditions = params['property_conditions']\n\n for field, allowed_values in conditions.iteritems():\n if entity.__getattribute__(field) not in allowed_values:\n return False\n\n return True", "def matchesProperties(self, *args):\n return _libsbml.SBMLInferUnitsConverter_matchesProperties(self, *args)", "def test_is_valid(self):\n job = ModelJob()\n self.assertFalse(job.is_valid())\n\n # If all of the required arguments are supplied, this should result in a valid job\n ts_complete_set = {tsk: TimeSignal.from_values(tsk, [0., 0.1], [1., 999.])\n for tsk in time_signal_names}\n\n valid_args = {\n 'time_start': 0,\n 'duration': 0.2,\n 'ncpus': 1,\n 'nnodes': 1,\n 'timesignals': ts_complete_set\n }\n\n self.assertTrue(ModelJob(**valid_args).is_valid())\n\n # If any of the supplied arguments are missing, this should invalidate things\n for k in valid_args.keys():\n invalid_args = valid_args.copy()\n del invalid_args[k]\n self.assertTrue(ModelJob(**valid_args).is_valid())", "def testJobProperties(databases):\n\n def checkProperties(config):\n jobId = 'job0'\n assert config.targets == {'target1', 'target2'}\n assert config.getId() == jobId\n assert config['name'] == jobId\n assert config.owner == gen.owner\n assert config['owner'] == gen.owner\n assert config.comment == gen.comment\n #assert config.getDescription() == config['description']\n\n gen = DataGenerator(databases)\n config = gen.createConfiguration(\n targets=('target1', 'target2')\n )\n runWithReload(databases, config, checkProperties)", "def matchesProperties(self, *args):\n return _libsbml.SBMLLevel1Version1Converter_matchesProperties(self, *args)", "def mutexPropositions(prop1, prop2, mutexActions):\n for a1 in prop1.getProducers():\n for a2 in prop2.getProducers():\n if Pair(a1, a2) not in mutexActions:\n return False\n return True", "def _match_one(self, rec, tests):\n for key, test in tests.items():\n if not test(rec.get(key, None)):\n return False\n return True", "def validateProp(filename):\n\n # does the file exists\n if (not os.path.exists(filename)):\n LOG.warning('Prop file (%s) does not exist' % (filename))\n return False\n\n # can I read it\n try:\n propFile = open(filename, 'r')\n prop = json.load(propFile)\n propFile.close()\n except (ValueError, OSError):\n LOG.warning('Prop file (%s) unable to read or did not parse' % (filename))\n return False\n\n # does the prop have the correct value\n for key in ('name', 'md5', 'description', 'size', 'contact'):\n if (key not in prop):\n LOG.warning('Prop file (%s) missing key (%s)' % (filename, key))\n return False\n\n return True", "def matchesProperties(self, *args):\n return _libsbml.SBMLInitialAssignmentConverter_matchesProperties(self, *args)", "def verify_job(cls, auth_key, job_id):\n key = ObjectId(job_id)\n user_id = ObjectId(auth_key)\n db = cls.mongo_cli.get_database(collection=Job.collection_name)\n if db.count({\"_id\": key, \"user_id\": user_id}) > 0:\n return True\n return False", "def job_has_params(job_url):\n name = job_url.rstrip(\"/\").rsplit(\"/\")[-1]\n if name in (\n \"pr-docs\",\n \"pr-lint\",\n \"pr-pre-commit\",\n ):\n return False\n else:\n return True", "def matchesProperties(self, *args):\n return _libsbml.SBMLFunctionDefinitionConverter_matchesProperties(self, *args)", "def is_exist(self, trigger):\n for moira_trigger in self.fetch_all():\n if trigger.name == moira_trigger.name and \\\n set(trigger.targets) == set(moira_trigger.targets) and \\\n set(trigger.tags) == set(moira_trigger.tags):\n return True\n return False", "def test_tap_config_valid_if_properties_is_none(self):\n self._assert_tap_config(config=self.valid_json_file, properties=None, state=self.valid_json_file)", "def is_satisfied(self, item: Any) -> bool:", "def _does_product_contains_given_attributes(self, product, *attrs):\n\n for attribute in list(attrs[0]):\n if not product.get(attribute):\n return False\n\n return True", "def job_lang_check(lang):\n lang = lang\n def job_check(form, field):\n \"\"\"\n this is to check if job properties are well edited:\n job title and content should not be empty if it is published\n\n Arguments:\n - `from`:\n - `field`:\n \"\"\"\n data = field.data\n published = getattr(form, 'publish_'+lang)\n if published.data:\n if len(data) == 0:\n raise validators.ValidationError('field should not be empty if you choose to publish it')\n return job_check", "def _has_valid_mandatory_properties(self):\n for prop in self.mandatory_properties:\n if not hasattr(self, prop):\n logger.error(\n \"Skipping %s: could not find information about '%s'\",\n self, prop)\n return False\n return True", "def find_event_property(properties, value, events):\n for event in events:\n if properties in event and event[properties] == value:\n return True\n return False", "def matchesProperties(self, *args):\n return _libsbml.CompFlatteningConverter_matchesProperties(self, *args)", "def lock_JobProperties(self):\n for j in self.__dict__.keys():\n j_obj=self.__dict__.get(j)\n if hasattr(j_obj,'lock_JobProperties'):\n j_obj.lock_JobProperties()\n j_obj._locked=True\n elif hasattr(j_obj,'_locked'):\n j_obj._locked=True\n self._log.info('The JobProperty Container %s is locked',\n self.__name__)", "def _perform_check(data, check):\n\n return not check or all(data.get(k) == v for k, v in check.items())", "def check_properties(self):\r\n for prop in self.mandatory_properties:\r\n if not hasattr(self, prop):\r\n raise NameError(prop)", "def has_pending_jobs(instance_properties, max_size):\n try:\n max_cluster_slots = max_size * instance_properties.get(\"slots\")\n pending_jobs = get_pending_jobs_info(max_slots_filter=max_cluster_slots, skip_if_state=SGE_HOLD_STATE)\n logging.info(\"Found the following pending jobs:\\n%s\", pending_jobs)\n return len(pending_jobs) > 0, False\n except Exception as e:\n log.error(\"Failed when checking for pending jobs with exception %s. Reporting no pending jobs.\", e)\n return False, True", "def _check_queryinfo_existence(self, hostname: str, job: str) -> bool:\n with self.lock:\n hosts = self.host_query_info.all()\n for host in hosts:\n if host['hostname'] == hostname and host['job'] == job:\n return True\n return False", "def testAddingPropertyFields(self):\n map_sheet = self.properties[PROPERTY_SHEET]\n for key, value in PROPS.items():\n self.failUnless(map_sheet.hasProperty(key) and list(map_sheet.getProperty(key)) == value)", "def athlete_match(athlete, athlete_criteria):\n for key, value in athlete_criteria.items():\n if key not in athlete or athlete[key] not in value:\n return False\n return True", "def __contains__(self, obj):\n if isinstance(obj, self):\n query = self.where(**obj.data).select()\n result = query.execute()\n if result.count:\n return True\n return False", "def has_role(self, *args, **kwargs):\n\n # Super can do anything\n if 'super' in (r.name for r in self.roles):\n return True\n\n return any([all([getattr(r, k) == v\n for k, v in kwargs.items()] +\n [r.name == name for name in args])\n for r in self.roles])", "def is_satisfied(self, item):\n return all(map(\n # we go through every single item in self.args and check if the specks are satisfied.\n lambda spec: spec.is_satisfied(item), self.args\n ))", "def check_args(args):\n for arg in vars(args):\n if getattr(args, arg):\n return True\n return False", "def is_job_running(self, condor_id):\n\n classads = self.get_classads(\"OSGRSVUniqueName==\\\"%s\\\"\" % condor_id)\n\n if classads is None:\n self.rsv.log(\"ERROR\", \"Could not determine if job is running\")\n return False\n\n for classad in classads:\n # We put the attribute into the classad in quotes, so search for it accordingly\n if classad[\"OSGRSVUniqueName\"] == '\"' + condor_id + '\"':\n return True\n\n return False", "def _check_property_arguments(args, type_):\n assert args.command in ('add', 'edit')\n\n # Determine the entry class. It can be either specified via its name or\n # directly.\n if isinstance(type_, str):\n entry_cls = _NAME_TO_ENTRY_TYPE_MAP[type_]\n else:\n assert issubclass(type_, storepass.model.Entry)\n entry_cls = type_\n\n res = 0\n for field in storepass.model.ENTRY_FIELDS:\n if field in args.properties and field not in entry_cls.entry_fields:\n print(\n f\"Property '{field.name}' is not valid for entry type \"\n f\"'{entry_cls.entry_type_name}'\",\n file=sys.stderr)\n res = 1\n return res", "def should_notify(job):\n if job.result not in (Result.failed, Result.passed):\n return\n\n parent = Job.query.join(\n Source, Source.id == Job.source_id,\n ).filter(\n Source.patch_id == None, # NOQA\n Source.revision_sha != job.build.source.revision_sha,\n Job.project == job.project,\n Job.date_created < job.date_created,\n Job.status == Status.finished,\n Job.result.in_([Result.passed, Result.failed]),\n ).order_by(Job.date_created.desc()).first()\n\n # if theres no parent, this job must be at fault\n if parent is None:\n return job.result == Result.failed\n\n if job.result == Result.passed == parent.result:\n return False\n\n current_failures = get_test_failures(job)\n # if we dont have any testgroup failures, then we cannot identify the cause\n # so we must notify the individual\n if not current_failures:\n return True\n\n parent_failures = get_test_failures(parent)\n if parent_failures != current_failures:\n return True\n\n return False", "def test_check_opt_complete(self):\n self.assertEqual(check_opt(self.jobset3.job), 'completed')", "def job_posting_matches(self, job_title_posting, html_posting):\n regex_keyword_title = re.compile(r'\\b(data|machine learning)\\b', flags=re.IGNORECASE)\n regex_bad_position_title = re.compile(r'\\b(manager|principal|professor|director|lead)\\b', flags=re.IGNORECASE)\n\n job_posting = BeautifulSoup(html_posting, 'html.parser').get_text()\n regex_language_posting = re.compile(r'python', flags=re.IGNORECASE)\n\n return regex_keyword_title.search(job_title_posting) and \\\n not regex_bad_position_title.search(job_title_posting) and \\\n regex_language_posting.search(job_posting)", "def validate_present(self, obj):\n for k, v in obj.items():\n func = self.validation.get(k)\n if func:\n func(k, v)", "def check_rule_complete(predicate, objects_dic, predicates_rules):\n\n pname = predicate[\"name\"]\n predicate_rule = predicates_rules[pname]\n objects_list_ref = predicate_rule[\"objects\"]\n objects = predicate[\"objectNames\"]\n if \"custom_obj\" in predicate_rule:\n # addtional custom object not in the real pddl file\n custom_obj = predicate_rule[\"custom_obj\"]\n # complete object list\n object_list = objects + custom_obj\n objects_list_ref = objects_list_ref + custom_obj\n else:\n object_list = objects\n obj_ref_dic = dict(zip(objects_list_ref, object_list))\n if \"require\" in predicate_rule:\n for obj_index in predicate_rule[\"require\"]:\n for property in predicate_rule[\"require\"][obj_index]:\n objectname = obj_ref_dic[obj_index]\n if objects_dic[objectname][property] is False:\n return False\n return True", "def check_skill_prerequisites(self, skill, header):\n try: \n skill_type = ContentType.objects.get_for_model(Skill)\n skill_prerequisites = Prerequisite.objects.filter(\n content_type__pk=skill_type.id,\n object_id=skill.id\n )\n return self.check_prerequisites(skill_prerequisites)\n except Prerequisite.DoesNotExist:\n return True\n return True", "def check(args, session: Session = NEW_SESSION) -> None:\n if args.allow_multiple and not args.limit > 1:\n raise SystemExit(\"To use option --allow-multiple, you must set the limit to a value greater than 1.\")\n if args.hostname and args.local:\n raise SystemExit(\"You can't use --hostname and --local at the same time\")\n\n query = select(Job).where(Job.state == JobState.RUNNING).order_by(Job.latest_heartbeat.desc())\n if args.job_type:\n query = query.where(Job.job_type == args.job_type)\n if args.hostname:\n query = query.where(Job.hostname == args.hostname)\n if args.local:\n query = query.where(Job.hostname == get_hostname())\n if args.limit > 0:\n query = query.limit(args.limit)\n\n alive_jobs: list[Job] = [job for job in session.scalars(query) if job.is_alive()]\n\n count_alive_jobs = len(alive_jobs)\n if count_alive_jobs == 0:\n raise SystemExit(\"No alive jobs found.\")\n if count_alive_jobs > 1 and not args.allow_multiple:\n raise SystemExit(f\"Found {count_alive_jobs} alive jobs. Expected only one.\")\n if count_alive_jobs == 1:\n print(\"Found one alive job.\")\n else:\n print(f\"Found {count_alive_jobs} alive jobs.\")", "def has_received_json_partially_matching(\n self, obj, timeout=60, poll=0.1):\n assert_message_eventually(self.jsonMessages.getObjects,\n MatchesJSONPartially(obj), 'Failed to match json messages', timeout, poll)", "def is_property_available(self, name):\n if name in self.properties and not (isinstance(self.properties[name], dict)\n and '__deferred' in self.properties[name]):\n return True\n return False", "def test_get_property_matches(self):\n\n results = GenomePropertiesResultsWithMatches(*self.test_genome_property_results, properties_tree=self.test_tree)\n\n self.assertEqual(results.get_property_matches('GenProp0236'), None)\n self.assertEqual(len(results.get_property_matches('GenProp0232')), 9)\n self.assertEqual(len(results.get_property_matches('GenProp0232', top=True)), 2)\n self.assertEqual(len(results.get_property_matches('GenProp0232', sample='C_luteolum_DSM_273')), 4)", "def matches(self, name):\n return name is not None and name in (self.leader, self.sync_standby)", "def __eq__(self, other):\n if not isinstance(other, JobRun):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return isinstance(other, Job) and self.__uuid == other.uuid", "def isThisJobFinished(self, identifier):\n identifier = identifier.strip()\n with self.__queueLock:\n # Look through the finished jobs and attempt to find a matching\n # identifier. If the job exists here, it is finished\n for run in self.__finished:\n if run.identifier == identifier:\n return True\n\n # Look through the pending jobs and attempt to find a matching identifier\n # If the job exists here, it is not finished\n for queue in [self.__queue, self.__clientQueue]:\n for run in queue:\n if run.identifier == identifier:\n return False\n\n # Look through the running jobs and attempt to find a matching identifier\n # If the job exists here, it is not finished\n for run in self.__running+self.__clientRunning:\n if run is not None and run.identifier == identifier:\n return False\n\n # If you made it here and we still have not found anything, we have got\n # problems.\n self.raiseAnError(RuntimeError,\"Job \"+identifier+\" is unknown!\")", "def matchesProperties(self, *args):\n return _libsbml.FbcV2ToV1Converter_matchesProperties(self, *args)", "def check_claim(item, prop, target):\n item_dict = item.get()\n try:\n claims = item_dict['claims'][prop]\n except KeyError:\n return None\n\n for claim in claims:\n if claim.target_equals(target):\n return claim\n return None", "def filter_by_property(self, properties=None, **kwargs):\r\n\t\tif properties is None:\r\n\t\t\tproperties = {}\r\n\t\tproperties.update(kwargs)\r\n\t\tresult_list = ElementList()\r\n\t\tfor element in self:\r\n\t\t\tif all(k in element.properties and element.properties[k] == v\r\n\t\t\t\t\tfor k, v in properties.items()):\r\n\t\t\t\tresult_list.append(element)\r\n\t\treturn result_list", "def check_condition(self, query_dict):\n return all(key in self.__data and self.__data[key] == value\n for key, value in query_dict.items())", "def _check_analyzed_job(self, job, container):\n self.log(u\"Checking the Job object generated from container\")\n\n self.log(u\"Checking that the Job is not None\")\n if job is None:\n self._failed(u\"Unable to create a Job from the container.\")\n return\n\n self.log(u\"Checking that the Job has at least one Task\")\n if len(job) == 0:\n self._failed(u\"Unable to create at least one Task from the container.\")\n return\n\n if self.rconf[RuntimeConfiguration.JOB_MAX_TASKS] > 0:\n self.log(u\"Checking that the Job does not have too many Tasks\")\n if len(job) > self.rconf[RuntimeConfiguration.JOB_MAX_TASKS]:\n self._failed(u\"The Job has %d Tasks, more than the maximum allowed (%d).\" % (\n len(job),\n self.rconf[RuntimeConfiguration.JOB_MAX_TASKS]\n ))\n return\n\n self.log(u\"Checking that each Task text file is well formed\")\n for task in job.tasks:\n self.log([u\"Checking Task text file '%s'\", task.text_file_path])\n text_file_bstring = container.read_entry(task.text_file_path)\n if (text_file_bstring is None) or (len(text_file_bstring) == 0):\n self._failed(u\"Text file '%s' is empty\" % task.text_file_path)\n return\n self._check_utf8_encoding(text_file_bstring)\n if not self.result.passed:\n self._failed(u\"Text file '%s' is not encoded in UTF-8\" % task.text_file_path)\n return\n self._check_not_empty(text_file_bstring)\n if not self.result.passed:\n self._failed(u\"Text file '%s' is empty\" % task.text_file_path)\n return\n self.log([u\"Checking Task text file '%s': passed\", task.text_file_path])\n self.log(u\"Checking each Task text file is well formed: passed\")", "def matchesProperties(self, *args):\n return _libsbml.SBMLLevelVersionConverter_matchesProperties(self, *args)", "def job_exists(self, job_id):\n\n return True if self.get_status(job_id) else False", "def matchesProperties(self, *args):\n return _libsbml.FbcV1ToV2Converter_matchesProperties(self, *args)", "def is_satisfied(self, item: Product) -> bool:\n return item.colour == self.colour", "def _hasValuesCheckerWrapper(self, args):\n \n constraints = args['constraints']\n \n def _hasValuesChecker(entity, params):\n \"\"\"Checks if values of specified properties of an entity are in\n given sets. \n \"\"\"\n \n for key, values in constraints.iteritems():\n if entity.__getattribute__(key) not in values:\n return False\n\n return True\n\n return _hasValuesChecker", "def __eq__(self, other):\n if not isinstance(other, JobTaskSpec):\n return False\n\n return self.__dict__ == other.__dict__", "def match(self, query, annotations):\n # XXX Note that we are not inspecting 'action' \n\n # TMP CACHE DEBUG\n #import pdb\n #pdb.set_trace()\n\n # The object without namespace to compare with the rule\n if ':' in query.object:\n obj = query.object.split(':')[-1] \n else:\n obj = query.object\n\n # Test if the object of the Query matches the object of the Rule\n if self.object != '*' and not str(self.object) == str(obj):\n return False\n\n #print \"rule.match between these objects: self.object = %s - query.object %s\" % (self.object,obj)\n query_fields_R = set()\n query_fields_R |= query.get_select()\n query_fields_R |= query.get_where().get_field_names()\n\n query_fields_W = set()\n query_fields_W |= set(query.get_params().keys())\n\n query_fields_RW = set()\n query_fields_RW |= query_fields_R\n query_fields_RW |= query_fields_W\n\n if self.access == 'R':\n return ('*' in self.fields and query_fields_R) or query_fields_R.intersection(self.fields)\n elif self.access == 'W':\n return ('*' in self.fields and query_fields_W) or query_fields_W.intersection(self.fields)\n elif self.access == 'RW':\n return ('*' in self.fields and query_fields_RW) or query_fields_RW.intersection(self.fields)", "def dependencies_are_met(\n self,\n parent_job: Optional['Job'] = None,\n pipeline: Optional['Pipeline'] = None,\n exclude_job_id: Optional[str] = None,\n ) -> bool:\n connection = pipeline if pipeline is not None else self.connection\n\n if pipeline is not None:\n connection.watch(*[self.key_for(dependency_id) for dependency_id in self._dependency_ids])\n\n dependencies_ids = {_id.decode() for _id in connection.smembers(self.dependencies_key)}\n\n if exclude_job_id:\n dependencies_ids.discard(exclude_job_id)\n if parent_job and parent_job.id == exclude_job_id:\n parent_job = None\n\n if parent_job:\n # If parent job is canceled, treat dependency as failed\n # If parent job is not finished, we should only continue\n # if this job allows parent job to fail\n dependencies_ids.discard(parent_job.id)\n if parent_job.get_status() == JobStatus.CANCELED:\n return False\n elif parent_job._status == JobStatus.FAILED and not self.allow_dependency_failures:\n return False\n\n # If the only dependency is parent job, dependency has been met\n if not dependencies_ids:\n return True\n\n with connection.pipeline() as pipeline:\n for key in dependencies_ids:\n pipeline.hget(self.key_for(key), 'status')\n\n dependencies_statuses = pipeline.execute()\n\n allowed_statuses = [JobStatus.FINISHED]\n if self.allow_dependency_failures:\n allowed_statuses.append(JobStatus.FAILED)\n\n return all(status.decode() in allowed_statuses for status in dependencies_statuses if status)", "def validate_required(self, claims, required, *args, **kwargs):\n return all(claims.get(claim) for claim in required)", "def meets_requirements(self, requirements):\n return len(self.completed_requirements(requirements)) == len(requirements)", "def schedule(self, job: Job) -> bool:\n if self.num_avail_cores < job.num_cores:\n return False\n\n # Find the available cores\n num_cores_found = 0\n\n for i in range(self.num_cores):\n if self.core_status[i] == 0:\n # available\n\n self.core_status[i] = job.num_timesteps\n self.core_job_id[i] = job.id\n \n self.num_avail_cores -= 1\n num_cores_found += 1\n if num_cores_found >= job.num_cores:\n # found all the cores needed, we're done\n break\n \n return True", "def check_property(prop, name, **kwargs):\n\n checkers = {\n 'color': check_color,\n 'alpha': check_alpha,\n 'size': check_size,\n 'thickness': check_thickness,\n 'index': check_index,\n 'coordinates': check_coordinates,\n 'colormap': check_colormap,\n 'bins': check_bins,\n 'spec': check_spec\n }\n\n if name in checkers:\n return checkers[name](prop, **kwargs)\n elif isinstance(prop, list) or isinstance(prop, ndarray) or isscalar(prop):\n return check_1d(prop, name)\n else:\n return prop", "def is_expected_for_this_test(obj):\n if obj['test-name'] != test_name:\n return False\n if not fnmatch.fnmatch(config_filename, obj['configuration-filename']):\n return False\n expected_variant = obj.get('variant', None)\n if expected_variant == \"*\":\n return True\n for k in expected_variant:\n if not k in variant:\n return False\n if expected_variant[k] != variant[k]:\n return False\n return True", "def __eq__(self, other) -> bool:\r\n return self.job_id == other.job_id", "def validate_params(self, params: Dict[str, Any]) -> bool:\n dict_set_defaults(params, self.DEFAULT_PARAMS)\n\n for k in self.params:\n if k in {\"name\", \"descr\", \"cache_file\"}:\n continue\n\n if self.params[k] != params.get(k):\n return False\n\n return True", "def check(self):\n\n if not self.target.ok():\n return False\n\n if not self.progid.ok():\n return False\n\n if not self.prinapp.ok():\n return False\n\n if not self.observers.ok():\n return False\n\n return True", "def specifies(self, key, value=None, path=None):\n try:\n if path != None and isDict(multiIndex(self.current_state, path)):\n target = multiIndex(self.current_state, path)\n logging.debug(\"Specification found: \")\n logging.debug(\"Key : \" + key)\n logging.debug(\"path : \" + str(path))\n logging.debug(\"value: \" + str(target))\n return key in target.keys()\n else:\n target = self.current_state\n logging.debug(\"Specification found: \")\n logging.debug(\"Key : \" + key)\n logging.debug(\"equals value: \" + str(value))\n logging.debug(\"at spec path: \" + str(path))\n return key in target.keys() and (\n target[key] == value if value != None else True)\n except KeyError:\n logging.debug(\"WARNING: Key error when requesting path \" + \\\n str(path) + \" for widget \" + self.name)", "def __eq__(self, obj: \"Property\") -> bool:\n return self.name == obj.name and self.property_type == obj.property_type", "def validate(self, name):\n return name in self.dict", "def __is_hard_match(self, obj):\n for attr in self.list:\n try:\n if getattr(obj, attr) != getattr(self, attr):\n return False\n except AttributeError:\n pass\n return True", "def set_JobProperties(self,data):\n tp=type(data)\n if tp.__name__=='dict':\n list_context=list(JobProperty._nInstancesContextDict.keys())\n for i in data.keys():\n for j in data[i].keys():\n if list_context.count(i+'.'+j)==1:\n jp=JobProperty._nInstancesContextDict[i+'.'+j]\n jp.set_Value(data[i][j])\n self._log.info(\"The JobProperty %s has been set to %s\",\n i+'.'+j,data[i][j])\n else:\n self._log.warning(\"The JobProperty %s does not exist\",\n i+'.'+j)\n else:\n raise ValueError('The received data is has not the expected'\n 'type/format')", "def has_sclass(self, w: Wrapper, prop: Any) -> bool:\n if not prop:\n return None\n props = self.sclasses(w)\n if isinstance(prop, str):\n ans = [prop in props]\n else:\n ans = [i in props for i in prop]\n return all(ans)", "def check_required_props(self,\n df,\n node,\n dd,\n exclude_props = [ # submitters don't provide these properties, so remove them from QC check\n # case props not provided by submitters\n \"datasets.submitter_id\",\n \"token_record_id\",\n \"linked_external_data\",\n #series_file props not provided by submitters\n \"file_name\",\n \"md5sum\",\n \"file_size\",\n \"object_id\",\n \"storage_urls\",\n \"core_metadata_collections.submitter_id\",\n \"core_metadata_collections\",\n \"associated_ids\",\n #imaging_study props not provided by submitters\n \"loinc_code\",\n \"loinc_system\",\n \"loinc_contrast\",\n \"loinc_long_common_name\",\n \"loinc_method\",\n \"days_from_study_to_neg_covid_test\",\n \"days_from_study_to_pos_covid_test\"\n ]\n ):\n errors = []\n links = self.list_links(node, dd)\n any_na = df.columns[df.isna().any()].tolist()\n required_props = list(set(dd[node]['required']).difference(links).difference(exclude_props))\n for prop in required_props:\n if prop not in df:\n error = \"{} TSV does not have required property header '{}'!\".format(node,prop)\n print(error)\n errors.append(error)\n elif prop in any_na:\n error = \"{} TSV does not have complete data for required property '{}'!\".format(node,prop)\n print(error)\n errors.append(error)\n return errors", "def check_data(self):\n\n missing_params = {}\n flag = False\n\n missing_params['general'] = {}\n for name, param in self.params.items():\n if not param.check():\n missing_params['general'][name] = param.get_description()\n flag = True\n\n for component, comp_obj in self.components.items():\n missing_params[component], flag_comp = comp_obj.check_data()\n\n # Assign empty component parameters that have a general version:\n empty_general_params = set(missing_params[component]).intersection(\n set(self.params))\n for param in empty_general_params:\n comp_obj.change_param_object(param, self.params[param])\n del missing_params[component][param]\n\n if missing_params[component]:\n flag = True\n\n if flag:\n raise Exception('Following parameters are missing:\\n{}'\n .format(\n self._print_params(missing_params, disp=False)))\n\n return True", "def matches_config(cls, config):\n return (not config.measures) or all(me in cls.available_measures for me in config.measures)", "def in_queue(self):\n if self.get_db('jobid') is None:\n log.debug('jobid not found for calculation.')\n return False\n else:\n # get the jobid\n jobid = self.get_db('jobid')\n # see if jobid is in queue\n _, jobids_in_queue, _ = getstatusoutput('qselect',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n if str(jobid) in jobids_in_queue.split('\\n'):\n # get details on specific jobid in case it is complete\n status, output, err = getstatusoutput(['qstat', jobid],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n if status == 0:\n lines = output.split('\\n')\n fields = lines[2].split()\n job_status = fields[4]\n if job_status == 'C':\n return False\n else:\n return True\n else:\n return False", "def exists(cls, job_id: str, connection: Optional['Redis'] = None) -> bool:\n if not connection:\n connection = resolve_connection()\n job_key = cls.key_for(job_id)\n job_exists = connection.exists(job_key)\n return bool(job_exists)", "def is_valid(self):\n return (\n self.data['queueType'] in self.VALID_QUEUES\n and self.data['matchMode'] in self.VALID_MODES\n and self.data['matchDuration'] > 800\n )", "def _update_versions(self, job):\n current_versions = self._current_versions(job)\n if all(getattr(job, f) == v for f, v in current_versions.items()):\n # No updates required\n return True\n\n if job.completions > 0:\n logger.warning('%r is outdated but has previously completed, skipping...', job)\n return False\n\n try:\n with transaction.atomic():\n for f, v in current_versions.items():\n setattr(job, f, v)\n job.save()\n logger.warning('%r has been updated to the versions: %s', job, current_versions)\n return True\n except IntegrityError:\n logger.warning('A newer version of %r already exists, skipping...', job)\n return False", "def validate(self):\n validated = True \n # Check that all parameters exist in the self.parameters dictionary\n for param_name in self._SCALAR_PARAMETERS:\n if param_name not in self.parameters:\n LOG.critical('%s not found in %s', param_name, self.filename)\n validated = False \n \n for param_name in self._TABLE_PARAMETERS:\n if not all([elem for elem in self.parameters[param_name]]):\n LOG.critical('%s not found in %s', param_name, self.filename)\n validated = False\n \n return validated" ]
[ "0.5983222", "0.5968996", "0.5967254", "0.5961163", "0.5783931", "0.5742514", "0.56300825", "0.5572125", "0.55432945", "0.55259526", "0.55190766", "0.5497851", "0.5486585", "0.5448529", "0.5427197", "0.5398042", "0.53733146", "0.5355467", "0.5347236", "0.5345044", "0.53402734", "0.5335837", "0.53346914", "0.5325825", "0.5312826", "0.52839273", "0.52719116", "0.52337724", "0.5224326", "0.5211353", "0.514922", "0.5148015", "0.5142896", "0.514018", "0.5139033", "0.51015854", "0.50956815", "0.50938106", "0.50910217", "0.5087889", "0.50816584", "0.5079813", "0.50778544", "0.5072427", "0.5069293", "0.50519747", "0.5046847", "0.50328195", "0.50149405", "0.50134", "0.50106573", "0.50062394", "0.5001954", "0.4996042", "0.49908257", "0.49809548", "0.4977522", "0.49719295", "0.49405158", "0.4931941", "0.49305472", "0.49262455", "0.49256137", "0.49165887", "0.49163657", "0.4903143", "0.4900701", "0.4892577", "0.48903033", "0.48900652", "0.48821825", "0.48673353", "0.48667496", "0.48654333", "0.48586306", "0.4853088", "0.48522663", "0.48502812", "0.48489958", "0.48480776", "0.4845834", "0.4841829", "0.48400283", "0.4838064", "0.48333678", "0.48254728", "0.4819954", "0.48118937", "0.47958517", "0.47919747", "0.47898293", "0.47876942", "0.47863874", "0.4782334", "0.47809887", "0.4772455", "0.4764873", "0.47547215", "0.47517338", "0.47488248" ]
0.47501066
99
Create a unique id for a new job.
def create_job_id() -> str: return str(uuid.uuid1())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_job_id():\n # CAIP job id can contains only numbers, letters and underscores.\n unique_tag = str(uuid.uuid4()).replace(\"-\", \"_\")\n return \"tf_cloud_train_{}\".format(unique_tag)", "def _get_job_id(self):\n return uuid.uuid4().hex", "def create_task_id():\n return str(int(round(time.time() * 10**9)))", "def create_new_job(self, search_id: Hashable) -> Hashable:\n partial_id = (\n self._redis.incr(f\"search:{search_id}.job_id_counter\", amount=1) - 1\n )\n partial_id = f\"{partial_id}\" # converting to str\n job_id = f\"{search_id}.{partial_id}\"\n self._redis.rpush(f\"search:{search_id}.job_id_list\", job_id)\n self._redis.json().set(\n f\"job:{job_id}\", \".\", {\"in\": None, \"metadata\": {}, \"out\": None}\n )\n return job_id", "def create_id():\n unique_id = UniqueId()\n unique_id.put()\n return unique_id.key().id()", "def create_id():\n unique_id = UniqueId()\n unique_id.put()\n return unique_id.key().id()", "def get_id(self):\n\n self.redis.setnx('job_id', '-1')\n return self.redis.incr('job_id')", "def job_create(self, sender, name=None):\n self._require_running()\n name = name or self.DEFAULT_JOB_NAME\n job_id = uuid.uuid4().hex\n assert job_id not in self._jobs\n assert sender is not None\n assert sender.connection\n job = Job(\n job_id,\n name,\n self._session_root.joinpath(job_id),\n sender,\n self._loop\n )\n self._jobs[job_id] = job\n self._jobs_by_connection[sender.connection][job_id] = job\n self._log.debug('Created job %s', job)\n return job_id", "def generate_job_id(*args):\n md5 = hashlib.md5()\n for arg in args:\n md5.update(arg.encode(\"utf-8\"))\n return md5.hexdigest()", "async def create_job(response: Response,\n request: Request,\n job: Job = Body(\n ...,\n example={\n \"id_video\": \"bbb_0.mp4\",\n \"bitrate\": 7000,\n \"speed\": \"ultrafast\",\n },\n )\n ): \n \n\n # get an ID and return to client\n id_job = mngr.getID()\n logger.debug(\"got id_job %s\" %id_job)\n resp = [\"http:/\"]\n resp.append(request.headers['host'])\n resp.append(id_job)\n response.headers[\"Location\"] = \"/\".join(resp)\n\n # create the task\n mngr.newJob(id_job, \n job.id_video, \n job.bitrate, \n job.speed)\n\n return id_job", "def _job_id(resource_uuid: str) -> str:\n return resource_uuid if \".\" in resource_uuid else f\"{resource_uuid}.0\"", "def id(self):\n return self.job_proto.id", "def _get_job_id(self) -> str:\n return self.split_name[2][3:]", "def create_job(project, description):\n randomnames = open(os.path.join(\"Anemone\", \"templates\", \"namegen.html\")).readlines()\n jobname = (\"Quick.\" +\n random.choice(randomnames)[:-1] + # for some reason choice gives extra space\n random.choice(randomnames)[:-1]) # for some reason choice gives extra space\n\n newjob = Job.create(project=project, name=jobname, description=description)\n newjob.name = newjob.name + \".{0:0=3d}\".format(newjob.id)\n newjob.save()\n return newjob", "def makeid(cls):\n return str(uuid.uuid4().hex)", "def created_job(new_job, bulk_request):\n bulk_request.return_value = '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <jobInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <id>THEJOBID</id>\n <operation>update</operation>\n <object>Lead</object>\n </jobInfo>\n '''\n new_job.create()\n return new_job", "def create_tag_id():\n return uuid.uuid1().int", "def make_id(self, name: str) -> str:\n # id_cache is intentionally mutable\n id = self.id_cache.get(name)\n if not id:\n id = 'epub-%d' % self.env.new_serialno('epub')\n self.id_cache[name] = id\n return id", "def job_id(self) -> JobId:\r\n return self._job_id", "def _make_task_id(self, task):\n index = self._tasks.add(task)\n task_id = '{name}-{idx}'.format(name=task.name, idx=index)\n\n return task_id", "def _job_id(files: list, extra: str):\n files_str = \"\"\n for file in files:\n files_str += file\n job_id = hashlib.sha1(files_str.encode() + extra.encode()).hexdigest()\n return job_id", "def _CreateRecordId(self):\n self._record_count += 1\n return '%s_%s' % (self._unique_id, self._record_count)", "def job_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"job_id\")", "def _get_unique_id(self):\n now = datetime.now()\n\n u_id = now.second + 60*(now.minute + 60*(now.hour + 24*(now.day + 31*(now.month + 366*(now.year)))))\n return \"instance\" + str(u_id)", "def unique_id() -> str:", "def get_or_create_unique_id(self):\n if not self.unique_id:\n self.unique_id = uuid.uuid4().hex\n return self.unique_id", "def createRunId(self):\n # runid is in the form of <login>_YYYY_MMDD_HHMMSS\n now = datetime.now()\n username = pwd.getpwuid(os.geteuid()).pw_name\n runid = \"%s_%02d_%02d%02d_%02d%02d%02d\" % (username, now.year, now.month,\n now.day, now.hour, now.minute, now.second)\n self.runid = runid\n return runid", "def get_job_id(self):\n return {'job_id': self._job_id}", "def get_job_id():\n # Get yarn application or K8s experiment ID when running distributed training\n if env.get_env(_JOB_ID_ENV_VAR) is not None:\n return env.get_env(_JOB_ID_ENV_VAR)\n else: # set Random ID when running local training\n job_id = uuid.uuid4().hex\n os.environ[_JOB_ID_ENV_VAR] = job_id\n return job_id", "def rule_01_set_job_id(session):\n\n my_id = \"\".join(\"%02x\" % random.randint(0,255) for _ in xrange(4))\n\n session[\"config\"][\"tags\"][\"instavpn\"] = my_id\n show.output(\"Instavpn Task ID\", \"is %s\" % my_id)\n\n return True", "def make_id():\n global _simple_id\n\n import uuid\n from ..settings import settings\n\n if settings.simple_ids(False):\n _simple_id += 1\n new_id = _simple_id\n else:\n new_id = uuid.uuid4()\n return str(new_id)", "def newId():\n global lastId\n lastId += 1\n return 'id%d' % lastId", "def build_id():\n return \"test123\"", "def add_job(self, data):\n job_id = str(uuid.uuid4()).replace('-', '')\n try:\n self._session.add(JobEntity(\n id=job_id,\n workflow_id=data['workflow_id'],\n name=data['name'],\n username=data['username'],\n work_uri=data['work_uri'],\n no_output_hash=data['no_output_hash'],\n inputs=data['inputs'],\n parameters=data['parameters'],\n output_uri=data['output_uri'],\n final_output=data['final_output'],\n exec_context=data['exec_context'],\n exec_method=data['exec_method'],\n exec_parameters=data['exec_parameters'],\n notifications=data['notifications']\n ))\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return job_id", "def _register_job(self, job, client):\n new_job = copy.deepcopy(job)\n new_job['client_id'] = client['id']\n\n return int(self.create_object(self.JOB_ENDPOINT, new_job))", "def test_gen_job_id_new(self, mock_reattach_job):\n\n job_id = 'brand-new'\n\n mock_reattach_job.side_effect = [\n GenieJobNotFoundError\n ]\n\n assert_equals(job_id, generate_job_id(job_id))", "def job_id(self):\n return self._job_id", "def gen_id(self) -> str:\n self._id += 1\n return str(self._id)", "def sfdcCreateJob(**kwargs):\n api_ver = kwargs.get('api_ver', '')\n session_id = kwargs.get('session_id', '')\n instance = kwargs.get('instance', '')\n job_id = kwargs.get('job_id', '')\n sfdcXml = kwargs.get('sfdcXml', {})\n\n bodyXml = sfdcXml.get('job', {}).get('body')\n url = sfdcXml.get('job', {}).get('url')\n headers = sfdcXml.get('job', {}).get('headers')\n\n bodyXml = unicode(bodyXml, \"utf-8\")\n url = url.format(instance=instance, api_ver=api_ver)\n headers['X-SFDC-Session'] = self.session_id\n\n resp = requests.post(url=url, headers=headers, data=bodyXml)\n dictResp = xmltodict.parse(resp.text)\n job_id = str(dictResp['jobInfo']['id'])\n\n self.job_id = job_id\n return job_id", "def generate_unique_job_name(self, name='no_name_job'):\n # TODO: Make it more suitable for disk paths. (no *, -)\n from base64 import urlsafe_b64encode\n name = os.path.basename(name)\n return \"_\".join([os.path.split(name)[1], urlsafe_b64encode(os.urandom(3))])", "def add_default_job():\n new_job = Job(name='job50')\n new_job.insert() \n return ('', 204)", "def jobid(self):\n return self.get_db('jobid')", "def get_job_id(self, job_specifier):\n return self._project.get_job_id(job_specifier=job_specifier)", "def create_job(self, employer_id, compensation, location, description, category_id, group_id):\n\n job = Job(employer_id=employer_id, group_id=group_id, compensation=compensation, location=location, category_id=category_id, description=description) \n db.session.add(job)\n db.session.commit()", "def get_ticket_id():\n return str(time.time()) + str(uuid.uuid4())", "def create_job(api_instance, job):\n api_response = api_instance.create_namespaced_job(\n body=job, namespace=\"default\", pretty=True\n )\n logger.info(\"Job created with status='%s'\" % str(api_response.status))\n return api_response", "def format_job_id(\n service: str,\n instance: str,\n git_hash: Optional[str] = None,\n config_hash: Optional[str] = None,\n) -> str:\n service = str(service).replace(\"_\", \"--\")\n instance = str(instance).replace(\"_\", \"--\")\n if git_hash:\n git_hash = str(git_hash).replace(\"_\", \"--\")\n if config_hash:\n config_hash = str(config_hash).replace(\"_\", \"--\")\n formatted = compose_job_id(service, instance, git_hash, config_hash)\n return formatted", "def _create_id(self):\r\n buildfile_relpath = os.path.dirname(self.address.buildfile.relpath)\r\n if buildfile_relpath in ('.', ''):\r\n return self.name\r\n else:\r\n return \"%s.%s\" % (buildfile_relpath.replace(os.sep, '.'), self.name)", "def create_job(jobtype, server):\n name = generate_job_name(jobtype)\n job = Job.objects.create(jobtype=jobtype, server=server, name=name)\n return job", "def _newClusterId(self):\n return self.guidGenerator.new_id()", "def generate_request_id():\n return 'req-%s' % uuid.uuid4()", "def generate_request_id():\n return 'req-%s' % uuid.uuid4()", "def process_id(job_id):\n pass # Not implemented yet", "def generateID(self):\n\n return str(uuid.uuid1())", "def unique_id(self) -> str:\n return \"{}-{}-{}\".format(*self._id)", "def generate_id():\n return uuid4().get_hex()", "def getJobID(self):\n return self.__nupicJobID", "def add_new_job():\n ClientID = request.form['ClientID']\n job_name = request.form['job_name']\n rate = int(float(request.form['rate']) * 100)\n\n job = Job(Name=job_name, ClientID=ClientID, DefaultRate=rate, Active=True)\n\n get_module_logger().info(\"Created job %s\", job)\n\n job.insert()\n\n return redirect(url_for('all_jobs_for_client', ClientID=ClientID))", "def generateUID(self):\n global previous_id\n \n id = previous_id\n previous_id += 1\n \n return id", "def get_job_id(self, filename):\n return Jobs.get_job_id(filename)", "def get_unique_id():\n global unique_id_increment\n if unique_id_increment is None:\n unique_id_increment = 0\n unique_id_increment += 1\n return '%d%d' % (int(time.time()), unique_id_increment)", "def generate_transaction_id():\r\n return str(int(time.time() * 1000))", "def get_unique_id():\n global unique_id_increment\n if unique_id_increment is None:\n unique_id_increment = 0\n unique_id_increment += 1\n return \"%d%d\" % (int(time.time()), unique_id_increment)", "def generate_id(cls):\n cls._index += 1\n return 'fp_%s' % cls._index", "def new_case_id():\n return uuid.uuid4().hex", "def new_case_id():\n return uuid.uuid4().hex", "def generate_message_id():\n return str(uuid.uuid1())", "def id(self):\n return str(self.jid)", "def unique_id(self):\n return (\n \"a80f3d5b-df3d-4e38-bbb7-1025276830cd\"\n )", "def spawn_update_job(ip_address, headers, job_payload):\n job_id = -1\n job_url = 'https://%s/api/JobService/Jobs' % ip_address\n job_resp = requests.post(job_url, headers=headers,\n json=job_payload,\n verify=False)\n if job_resp.status_code == 201:\n job_id = (job_resp.json())['Id']\n print(\"Successfully spawned update job\", job_id)\n else:\n print(\"Unable to spawn update job .. Exiting\")\n return job_id", "def create(cls, job_id: str) -> \"JobManifest\":\n now = datetime.datetime.now(datetime.timezone.utc)\n return JobManifest(creation_time=now, job_id=job_id, orbit_ids=[], task_ids=[])", "def genPID(toHash):\n sha256 = getSHA256Hex(toHash)\n pid = sha256[0:32]\n return pid", "def _create_job(self,\n name,\n environment_string,\n description='',\n platform='LINUX'):\n job = data_types.Job()\n job.name = name\n if environment_string.strip():\n job.environment_string = environment_string\n job.platform = platform\n job.descripton = description\n job.put()\n\n return job", "def job_id(self):\n return self._properties.get(\"jobReference\", {}).get(\"jobId\")", "def prep_jid(nocache): # pylint: disable=unused-argument\n #return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid()\n return salt.utils.gen_jid()", "def get_jobs_id(self, ti) -> None:\n return self.get_hook().get_jobs_id(ti)", "def req_id_generator() -> str:\n # 8 chars long should be long enough, add the 'Generated' prefix to know not to search for this id in the elb logs\n return f'Generated-{str(uuid.uuid4())[:8]}'", "def id_generator():\r\n new_id = uuid.uuid4()\r\n return new_id.hex", "def insert_job(sess, filetype, status, type_id, submission, job_id=None, filename=None,\n file_size=None, num_rows=None):\n job = Job(\n file_type_id=filetype,\n job_status_id=status,\n job_type_id=type_id,\n submission_id=submission,\n original_filename=filename,\n file_size=file_size,\n number_of_rows=num_rows\n )\n if job_id:\n job.job_id = job_id\n sess.add(job)\n sess.commit()\n return job", "def create_default_identifier():\n return random.randint(0, constants.UINT64_MAX)", "def get_unique_id(self):\n if not self.unique_id:\n self.unique_id = uuid.uuid4().hex\n return self.unique_id", "def replace_job_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"replace_job_id\")", "def generate_id():\n\treturn \"%s-%s\" % (str(uuid.uuid4())[:4],random.choice(funnames).lower())", "def unique_id(self) -> str:\n return pulumi.get(self, \"unique_id\")", "def get_new_id(self) -> str:\n user = self.get_template(list_entries=False)\n return user.id", "def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument\n return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)", "def __init__(self, job_id):\n self._job_id = job_id", "def __create_periodic_id() -> str:\n now = datetime.now()\n return now.strftime(\"%m%d%Y%H%M%S%f\")", "def job_id(self, job_id: JobId):\r\n self._job_id = job_id", "def __assign_name_id(self):\n if not self.name_id:\n self.name_id = str(BaseTicketing.objects.create())", "def _build_id():\n # 1: Timestamp\n current_id = FindanceIdField.date_to_int(datetime.utcnow().replace(tzinfo=timezone.utc)) << 23\n\n # 2: Shard ID (For now, always one)\n current_id |= SHARD_ID << 10\n\n # 3: Auto-incr with the last 10 bits\n current_id |= next(BASIC_TICK) % 1024\n\n return current_id", "def get_id(self) -> str: # noqa\n if self._id is None:\n self._id = str(uuid4())\n return self._id", "def create_id(uid, begintime, endtime):\n allowed_chars = string.ascii_lowercase[:22] + string.digits\n temp = re.sub('[^{}]'.format(allowed_chars), '', uid.lower())\n return re.sub('[^{}]'.format(allowed_chars), '', uid.lower()) + str(arrow.get(begintime).timestamp) + str(arrow.get(endtime).timestamp)", "def createUniqueRatingId():\n #connector = appEngine.connect()\n ratingID = 'r' + str(ceil(time.time()))\n return ratingID", "def _create_job_spec(\n self,\n job_id: Text,\n training_input: Dict[Text, Any],\n job_labels: Optional[Dict[Text, Text]] = None) -> Dict[Text, Any]:\n pass", "def get_custom_object_id():\n worker = ray.worker.global_worker\n object_id = ray._raylet.compute_put_id(worker.current_task_id,\n worker.task_context.put_index)\n worker.task_context.put_index += 1\n return object_id", "def generate_id():\n return \"%s-%s\" % (str(uuid.uuid4())[:4],random.choice(funnames).lower())", "def unique_id(self):\n return f\"bhyve:program:{self._program_id}\"", "def new_task(\n self,\n task_name: str,\n task_type: str,\n project_id: Optional[str] = None,\n parent_task_id: Optional[str] = None,\n ) -> str:\n if task_name in [\"\"]:\n raise MephistoDBException(f'Invalid task name \"{task_name}')\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n try:\n c.execute(\n \"\"\"INSERT INTO tasks(\n task_name,\n task_type,\n project_id,\n parent_task_id\n ) VALUES (?, ?, ?, ?);\"\"\",\n (\n task_name,\n task_type,\n nonesafe_int(project_id),\n nonesafe_int(parent_task_id),\n ),\n )\n task_id = str(c.lastrowid)\n return task_id\n except sqlite3.IntegrityError as e:\n if is_key_failure(e):\n raise EntryDoesNotExistException(e)\n elif is_unique_failure(e):\n raise EntryAlreadyExistsException(e)\n raise MephistoDBException(e)", "def _create_job_spec(\n self,\n job_id: Text,\n training_input: Dict[Text, Any],\n job_labels: Optional[Dict[Text, Text]] = None) -> Dict[Text, Any]:\n\n job_spec = {\n 'display_name': job_id,\n 'job_spec': training_input,\n 'labels': job_labels,\n }\n return job_spec" ]
[ "0.7751475", "0.7743574", "0.74370795", "0.73037905", "0.7232655", "0.7232655", "0.6982425", "0.6982323", "0.6908271", "0.6889731", "0.68492854", "0.67642814", "0.6763294", "0.6702419", "0.6602108", "0.65147835", "0.65133655", "0.65115803", "0.6507985", "0.64827", "0.6455972", "0.64288855", "0.6410233", "0.64082557", "0.6406856", "0.6403862", "0.638045", "0.6375566", "0.63718086", "0.6368601", "0.6356266", "0.63394564", "0.6333098", "0.6316652", "0.62851304", "0.6264216", "0.6254518", "0.6236965", "0.6223924", "0.62193805", "0.6210113", "0.61915", "0.6189886", "0.6159491", "0.6150623", "0.6149435", "0.61399704", "0.61259985", "0.612044", "0.61144996", "0.6109243", "0.6109243", "0.6079908", "0.60688406", "0.60455245", "0.60374725", "0.6031759", "0.6029491", "0.60212964", "0.6013509", "0.5989577", "0.5982456", "0.59760696", "0.59612936", "0.59609306", "0.59609306", "0.5950612", "0.5949643", "0.5945889", "0.5942933", "0.59279776", "0.5927589", "0.59222484", "0.59217834", "0.5902209", "0.58998567", "0.5897612", "0.5888695", "0.58748114", "0.5867959", "0.58530533", "0.58521503", "0.5850182", "0.5843035", "0.58329", "0.58323634", "0.582662", "0.5824751", "0.5824655", "0.5824597", "0.58234584", "0.5786501", "0.57848316", "0.57738966", "0.5758092", "0.5757019", "0.57567406", "0.57560945", "0.5745925", "0.5735833" ]
0.88138694
0
Runs the supplied runners sequentially.
def run( runners, data_loader=None, warm_up=None, use_subprocess=None, subprocess_timeout=None, subprocess_polling_interval=None, save_inputs_path=None, ): warm_up = util.default(warm_up, 0) data_loader = util.default(data_loader, DataLoader()) use_subprocess = util.default(use_subprocess, False) subprocess_polling_interval = util.default(subprocess_polling_interval, 30) loader_cache = DataLoaderCache(data_loader, save_inputs_path=save_inputs_path) def execute_runner(runner, loader_cache): with runner as active_runner: # DataLoaderCache will ensure that the feed_dict does not contain any extra entries # based on the provided input_metadata. loader_cache.set_input_metadata(active_runner.get_input_metadata()) if warm_up: G_LOGGER.start(f"{active_runner.name:35} | Running {warm_up} warm-up run(s)") try: feed_dict = loader_cache[0] except IndexError: G_LOGGER.warning( f"{warm_up} warm-up run(s) were requested, but data loader did not supply any data. Skipping warm-up run(s)" ) else: G_LOGGER.ultra_verbose(f"Warm-up Input Buffers:\n{util.indent_block(feed_dict)}") # First do a few warm-up runs, and don't time them. for _ in range(warm_up): active_runner.infer(feed_dict=feed_dict) G_LOGGER.finish(f"{active_runner.name:35} | Finished {warm_up} warm-up run(s)") # Then, actual iterations. index = 0 iteration_results = [] total_runtime = 0 for index, feed_dict in enumerate(loader_cache): G_LOGGER.info( f"{active_runner.name:35}\n---- Inference Input(s) ----\n{TensorMetadata().from_feed_dict(feed_dict)}", mode=LogMode.ONCE, ) G_LOGGER.extra_verbose( lambda: f"{active_runner.name:35} | Feeding inputs:\n{util.indent_block(dict(feed_dict))}" ) outputs = active_runner.infer(feed_dict=feed_dict) runtime = active_runner.last_inference_time() total_runtime += runtime # Without a deep copy here, outputs will always reference the output of the last run iteration_results.append( IterationResult(outputs=copy.deepcopy(outputs), runtime=runtime, runner_name=active_runner.name) ) G_LOGGER.info( f"{active_runner.name:35}\n---- Inference Output(s) ----\n{TensorMetadata().from_feed_dict(outputs)}", mode=LogMode.ONCE, ) G_LOGGER.extra_verbose( lambda: f"{active_runner.name:35} | Inference Time: {runtime * 1000.0:.3f} ms | Received outputs:\n{util.indent_block(dict(outputs))}" ) total_runtime_ms = total_runtime * 1000.0 G_LOGGER.finish( f"{active_runner.name:35} | Completed {index + 1} iteration(s) in {total_runtime_ms:.4g} ms | Average inference time: {total_runtime_ms / float(index + 1):.4g} ms." ) return iteration_results # Wraps execute_runner to use a queue. def execute_runner_with_queue(runner_queue, runner, loader_cache): iteration_results = None try: iteration_results = execute_runner(runner, loader_cache) except: # Cannot necessarily send the exception back over the queue. G_LOGGER.backrace() util.try_send_on_queue(runner_queue, iteration_results) # After finishing, send the updated loader_cache back. util.try_send_on_queue(runner_queue, loader_cache) # Do all inferences in one loop, then comparisons at a later stage. # We run each runner in a separate process so that we can provide exclusive GPU access for each runner. run_results = RunResults() if not runners: G_LOGGER.warning( "No runners were provided to Comparator.run(). Inference will not be run, and run results will be empty." ) for runner in runners: G_LOGGER.start(f"{runner.name:35} | Activating and starting inference") if use_subprocess: runner_queue = Queue() process = Process(target=execute_runner_with_queue, args=(runner_queue, runner, loader_cache)) process.start() # If a subprocess hangs in a certain way, then process.join could block forever. Hence, # we need to keep polling the process to make sure it really is alive. iteration_results = None while process.is_alive() and iteration_results is None: try: iteration_results = util.try_receive_on_queue( runner_queue, timeout=subprocess_polling_interval / 2 ) # Receive updated loader cache, or fall back if it could not be sent. loader_cache = util.try_receive_on_queue(runner_queue, timeout=subprocess_polling_interval / 2) except queue.Empty: G_LOGGER.extra_verbose("Polled subprocess - still running") try: assert iteration_results is not None run_results.append((runner.name, iteration_results)) process.join(subprocess_timeout) except: G_LOGGER.critical( f"{runner.name:35} | Terminated prematurely. Check the exception logged above. If there is no exception logged above, make sure not to use the --use-subprocess flag or set use_subprocess=False in Comparator.run()." ) finally: process.terminate() if loader_cache is None: G_LOGGER.critical( "Could not send data loader cache to runner subprocess. Please try disabling subprocesses " "by removing the --use-subprocess flag, or setting use_subprocess=False in Comparator.run()" ) else: run_results.append((runner.name, execute_runner(runner, loader_cache))) G_LOGGER.verbose(f"Successfully ran: {[r.name for r in runners]}") return run_results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_test_suites(self, suites):\n for suite_class in suites:\n test_suite = suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def test_serial_runs(make_runner: Callable[..., TargetFunctionRunner]) -> None:\n runner = make_runner(target_delayed, use_instances=True)\n\n run_info = TrialInfo(config=2, instance=\"test2\", seed=0, budget=0.0)\n runner.submit_trial(run_info)\n\n run_info = TrialInfo(config=3, instance=\"test3\", seed=0, budget=0.0)\n runner.submit_trial(run_info)\n\n results = runner.iter_results()\n\n first = next(results, None)\n assert first is not None\n\n second = next(results, None)\n assert second is not None\n\n # To make sure runs launched serially, we just make sure that the end time of a run\n # is later than the other # Results are returned in left to right\n _, first_run_value = first\n _, second_run_value = second\n assert int(first_run_value.endtime) <= int(second_run_value.starttime)", "def start_runners(self, runner_names=None):\n if not runner_names:\n runner_names = []\n for runner_config in config.runner_configs:\n # Strip off the 'runner.' prefix.\n assert runner_config.name.startswith('runner.'), (\n 'Unexpected runner configuration section name: {}'.format(\n runner_config.name))\n runner_names.append(runner_config.name[7:])\n # For each runner we want to start, find their config section, which\n # will tell us the name of the class to instantiate, along with the\n # number of hash space slices to manage.\n for name in runner_names:\n section_name = 'runner.' + name\n # Let AttributeError propagate.\n runner_config = getattr(config, section_name)\n if not as_boolean(runner_config.start):\n continue\n # Find out how many runners to instantiate. This must be a power\n # of 2.\n count = int(runner_config.instances)\n assert (count & (count - 1)) == 0, (\n 'Runner \"{0}\", not a power of 2: {1}'.format(name, count))\n for slice_number in range(count):\n # runner name, slice #, # of slices, restart count\n info = (name, slice_number, count, 0)\n spec = '{0}:{1:d}:{2:d}'.format(name, slice_number, count)\n pid = self._start_runner(spec)\n log = logging.getLogger('mailman.runner')\n log.debug('[{0:d}] {1}'.format(pid, spec))\n self._kids.add(pid, info)", "def run_all_tests(self):\n for index in range(len(self.__test_set_list)):\n self.run_test(index)", "def start(self):\n for trial in self._trials:\n self._run(trial)", "def run(self, *args, **kwargs) -> None:\n loop = tqdm(self.configs, desc='Configurations')\n for cfg in loop:\n loop.set_postfix_str(cfg.experiment_cfg['name'])\n for i in range(cfg.num_models):\n filename = None\n run_id = None\n if cfg.filenames is not None:\n if isinstance(cfg.filenames, str):\n filename = cfg.filenames\n else:\n filename = cfg.filenames[i]\n elif cfg.run_ids is not None:\n run_id = cfg.run_ids[i]\n\n run_cfg = modelgen_cfg_to_runner_cfg(cfg, run_id=run_id, filename=filename)\n runner = Runner(run_cfg, persist_metadata=cfg.experiment_cfg)\n runner.run()\n\n # clear up memory between runs\n torch.cuda.empty_cache()", "def run_dataset(dataset, trackers, debug=False, threads=0, num_gpus=8):\n multiprocessing.set_start_method('spawn', force=True)\n\n print('Evaluating {:4d} trackers on {:5d} sequences'.format(len(trackers), len(dataset)))\n\n multiprocessing.set_start_method('spawn', force=True)\n\n if threads == 0:\n mode = 'sequential'\n else:\n mode = 'parallel'\n\n if mode == 'sequential':\n for seq in dataset:\n for tracker_info in trackers:\n run_sequence(seq, tracker_info, debug=debug)\n elif mode == 'parallel':\n param_list = [(seq, tracker_info, debug, num_gpus) for seq, tracker_info in product(dataset, trackers)]\n with multiprocessing.Pool(processes=threads) as pool:\n pool.starmap(run_sequence, param_list)\n print('Done')", "def run_all(self):\n failures, errors = [], []\n\n # Run each test case registered with us and agglomerate the results.\n for case_ in self.cases:\n case_.run()\n update_results(failures, errors, case_)\n\n # Display our results.\n print_errors(errors)\n print_failures(failures)\n print_overview(errors, failures)\n\n # Exit with 0 if all tests passed, >0 otherwise.\n sys.exit(len(failures) + len(errors))", "def runner_setup():\n runner = ClassicRunner()\n yield runner", "def _run_tests(self):\n for pyunit_testcase in self.cfg.testcases:\n yield self._run_testsuite(pyunit_testcase)", "def runner_setup():\n concurrent_sessions = 5\n runner = VisualGridRunner(concurrent_sessions)\n yield runner", "def run(self):\n for worker in self.simulation_workers:\n worker.start()", "def run(self):\n results = []\n for task in self.tasks:\n results.append(task.run())\n self.tasks = []\n return results", "def run(self):\n\n self.create_trials() # create them *before* running!\n self.start_experiment()\n\n for trail in self.trials:\n trial.run()\n\n self.close()", "def launch(cls, recorder_count):\n for i in xrange(recorder_count):\n recorder = Recorder()\n cls.recorders.append(recorder)\n\n run = recorder._run_bulk if config.options.use_bulk_tracking else recorder._run_single\n t = threading.Thread(target=run)\n\n t.daemon = True\n t.start()\n logging.debug('Launched recorder')", "def run_multiple_test_cycles(self):\n # Perform as many cycles as required\n while self.args.repetitions >= 0:\n self.run_one_test_cycle()\n self.args.repetitions -= 1", "def run_tests(tests):\n return [test(t) for t in tests]", "def testJobMultiTaskRunner(databases):\n\n class CustomGenerator(DataGenerator):\n chanceChainProduct = 0.4\n numTaskRunners = 5\n chanceTRFramework = 0.7\n\n def frameworksForTaskRunner(self):\n return [\n framework for framework in self.frameworks\n if self.rnd.random() < self.chanceTRFramework\n ]\n\n seed = 123456789\n rnd = random.Random(seed)\n runs = 10\n randomRuns(databases, runs, rnd, CustomGenerator)", "def run_dataset(dataset, trackers, debug=False, threads=0, visdom_info=None):\n print('Evaluating {:4d} trackers on {:5d} sequences'.format(len(trackers), len(dataset)))\n\n visdom_info = {} if visdom_info is None else visdom_info\n\n if threads == 0:\n mode = 'sequential'\n else:\n mode = 'parallel'\n\n if mode == 'sequential':\n for seq in dataset:\n for tracker_info in trackers:\n run_sequence(seq, tracker_info, debug=debug, visdom_info=visdom_info)\n elif mode == 'parallel':\n param_list = [(seq, tracker_info, debug, visdom_info) for seq, tracker_info in product(dataset, trackers)]\n with multiprocessing.Pool(processes=threads) as pool:\n pool.starmap(run_sequence, param_list)\n print('Done')", "def run_stage_loop(cls, _opts, tests_results, put_next_stage):\n for _, result in tests_results:\n put_next_stage(result)", "def run(self):\n for tool in self.tools:\n tool.run()\n return", "def run(self) -> None:\n if len(self._waiting) == 0:\n raise ValueError(\"Nothing is waiting\")\n waiters = self._waiting\n self._waiting = []\n for d in waiters:\n d.callback(None)", "def start_stats_runners(self):\n\n def stats_runner_run():\n while(True):\n with u.timeit(\"kfac_update\"):\n self.model.advance_batch()\n self.update_stats()\n\n runner_threads = [threading.Thread(target=stats_runner_run,\n args=(), daemon=True)]\n for t in runner_threads:\n t.start()\n return runner_threads", "def test_after_jam_step_two(self):\n for test_suite_class in self.jam_step_2_test_suite_list:\n test_suite = test_suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def multi_radexcee(observations, gridpath, **kwargs):\n print 'Running %d instances.' % len(observations)\n return [run_radexcee(obs, gridpath, **kwargs) for obs in observations]", "def run_trials(self, num=0):\n if num == 'all':\n self.trials_to_run = len(self.trials)\n else:\n self.trials_to_run = num\n self.vision_egg.go()", "def multi_run(replications: int, iters: List, n: int):\n global call_count\n kwargs = {\n # 'alpha': 0.75,\n # 'rho': 'VaR',\n 'alpha': 0.75,\n 'rho': 'CVaR',\n 'x0': 2,\n 'n0': n,\n 'mu_1': -15,\n 'mu_2': 10,\n 'sigma_1': 4,\n 'sigma_2': 2\n }\n\n out_dict = {\n 'SA': dict(),\n 'SA_SAA': dict(),\n 'NM': dict(),\n 'NM_SAA': dict(),\n 'LBFGS': dict(),\n 'LBFGS_SAA': dict(),\n 'EI': dict(),\n 'EI_SAA': dict()\n }\n total_calls = dict()\n for key in out_dict.keys():\n total_calls[key] = dict()\n for it_count in iters:\n kwargs['iter_count'] = it_count\n for key in out_dict.keys():\n out_dict[key][it_count] = dict()\n total_calls[key][it_count] = 0\n i = 0\n while i < replications:\n try:\n out_dict['SA'][it_count][i] = SA_run(seed=i, **kwargs)\n total_calls['SA'][it_count] += call_count\n call_count = 0\n out_dict['SA_SAA'][it_count][i] = SA_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['SA_SAA'][it_count] += call_count\n call_count = 0\n out_dict['NM'][it_count][i] = NM_run(seed=i, **kwargs)\n total_calls['NM'][it_count] += call_count\n call_count = 0\n out_dict['NM_SAA'][it_count][i] = NM_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['NM_SAA'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS'][it_count][i] = LBFGS_run(seed=i, **kwargs)\n total_calls['LBFGS'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS_SAA'][it_count][i] = LBFGS_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['LBFGS_SAA'][it_count] += call_count\n call_count = 0\n out_dict['EI'][it_count][i] = EI_run(seed=i, **kwargs)\n total_calls['EI'][it_count] += call_count\n call_count = 0\n out_dict['EI_SAA'][it_count][i] = EI_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['EI_SAA'][it_count] += call_count\n call_count = 0\n i += 1\n except:\n continue\n np.save('call_counts_cvar_%d.npy' % n, total_calls)\n evaluate(out_dict, n)", "def __run_class_fixtures(self, stage, fixtures, callback_on_run_event, callback_on_complete_event):\n self._stage = stage\n\n for fixture_method in fixtures:\n result = TestResult(fixture_method)\n\n try:\n for callback in self.__callbacks[callback_on_run_event]:\n callback(result.to_dict())\n\n result.start()\n\n if self.__execute_block_recording_exceptions(fixture_method, result, is_class_level=True):\n result.end_in_success()\n except (KeyboardInterrupt, SystemExit):\n result.end_in_interruption(sys.exc_info())\n raise\n finally:\n for callback in self.__callbacks[callback_on_complete_event]:\n callback(result.to_dict())", "def execute(self):\n\t\tfor callback in self:\n\t\t\tcallback()", "def _run(self):\n sequence = list(range(len(self.sequence)))\n self._send_sequence() # Share the initial sequence\n while True:\n if self.shuffle:\n random.shuffle(sequence)\n\n with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:\n for i in sequence:\n if self.stop_signal.is_set():\n return\n\n self.queue.put(\n executor.apply_async(get_index, (self.uid, i)), block=True)\n\n # Done with the current epoch, waiting for the final batches\n self._wait_queue()\n\n if self.stop_signal.is_set():\n # We're done\n return\n\n # Call the internal on epoch end.\n self.sequence.on_epoch_end()\n self._send_sequence() # Update the pool", "def instantiate_runners(self):\n for _, a in self.wf['action'].items():\n if 'docker://' in a['uses']:\n a['runner'] = DockerRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n continue\n\n if 'shub://' in a['uses']:\n a['runner'] = SingularityRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n continue\n\n if './' in a['uses']:\n if os.path.exists(os.path.join(a['uses'], 'Dockerfile')):\n a['runner'] = DockerRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n elif os.path.exists(os.path.join(a['uses'],\n 'singularity.def')):\n a['runner'] = SingularityRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n else:\n a['runner'] = HostRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n continue\n\n dockerfile_path = os.path.join(a['repo_dir'], a['action_dir'],\n 'Dockerfile')\n singularityfile_path = os.path.join(a['repo_dir'], a['action_dir'],\n 'singularity.def')\n\n if os.path.exists(dockerfile_path):\n a['runner'] = DockerRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n elif os.path.exists(singularityfile_path):\n a['runner'] = SingularityRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n else:\n a['runner'] = HostRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)", "def execute_parallel_runs(self, runs, instances=None):\n if not instances:\n instances = self.threads\n\n # TODO(mbarbella): Hack for Android. If we are running single-threaded, it\n # is safe to call a cleanup function on each thread. Ideally, the minimizer\n # would like to assume that when it finishes running a process it cleans\n # itself up properly.\n cleanup_function = None\n if self.threads == 1:\n cleanup_function = process_handler.cleanup_stale_processes\n\n run_queue = minimizer.TestQueue(\n instances, per_thread_cleanup_function=cleanup_function)\n for _ in range(runs):\n run_queue.push(self.file_path, self.run, self.store_result_from_run)\n\n run_queue.process()\n\n # At timeout, we send SIGTERM. Wait for 2 seconds before sending SIGKILL.\n time.sleep(2)\n process_handler.cleanup_stale_processes()\n\n with self._result_lock:\n results = self._results\n self._results = []\n\n return results", "def run(race_types, odds_only, pred_only):\n for race_type in race_types or ['R', 'G', 'H']:\n logger.info('Running race type {}'.format(race_type))\n\n races = load_races(race_type)\n logger.info('loaded {} races...'.format(len(races)))\n\n for i, race in enumerate(races):\n logger.debug('Running race {} {}'.format(race.meeting_name, race.meeting_date))\n runners = race.get_runners()\n\n try:\n # shared with watching\n if not pred_only:\n add_odds(runners)\n if not odds_only:\n add_predictions(runners, race_type)\n add_probabilities(runners)\n except OddsError as e:\n logger.warning(e)\n delete_race(race.id)\n db_session.commit()\n except (Exception, ProbabilityError):\n print(json.dumps(race, indent=4, default=str, sort_keys=True))\n print(json.dumps(runners, indent=4, default=str, sort_keys=True))\n delete_race(race.id)\n db_session.commit()\n raise\n else:\n race.num_runners = len([r for r in runners if r['has_odds']])\n race.set_runners(runners)\n logger.info('{:.0f}% completed'.format(i / len(races) * 100))\n\n logger.info('saving...')\n db_session.commit()", "def call_subscribers(self, *args, **kwargs) -> None:\n for subscriber in self.get_subscribers():\n subscriber(*args, **kwargs)", "def run(self, **kwargs):\n for repl in self.replicas:\n self.log.info('-'*50)\n self.log.info(\"Running %s analysis...\"%repl.name)\n self.__submitReplica(repl, **kwargs)\n self.log.info('-'*50)", "def run_suite(*test_classes):\n\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n for test_class in test_classes:\n tests = loader.loadTestsFromTestCase(test_class)\n suite.addTests(tests)\n if suite is not None:\n unittest.TextTestRunner(verbosity=2).run(suite)\n return", "def iter_runs(self, idxs=False, run_sel=None):\n\n if run_sel is None:\n run_sel = self.run_idxs\n\n for run_idx in self.run_idxs:\n if run_idx in run_sel:\n run = self.run(run_idx)\n if idxs:\n yield run_idx, run\n else:\n yield run", "def run_all(self):\n runs = []\n for run in self.benchmarks:\n run.start()\n run.wait()\n runs.append(run.metadata)\n return runs", "def _run_tasks(self):\n next_tasks = self._job_queue.get_next_tasks()\n for task in next_tasks:\n sid = self._docker.start_task(task.identifier, task.image, task.name, task.args)\n self._job_queue.mark_task_started(task.identifier, task.name, sid)", "def run(self):\n tasks = []\n for stream in self.streams:\n task = mp.Process(target=self.record, args=[stream])\n task.start()\n tasks.append(task)\n for t in tasks:\n t.join()", "def run_providers(self, argv):\n\n for name, provider in self.providermanager:\n provider = provider(self)\n self.produce_output(provider.title,\n provider.location,\n provider.run(argv))", "def execute_series(self):\n for n in xrange(self.conf[\"n_runs\"]):\n self.runs[n].execute()", "def run_callbacks(self, **kwargs):\n for callback in self.CALLBACKS:\n getattr(self, callback)(**kwargs)", "def run(self, steps):\n self.sim.run(steps)", "def run_in_parallel(self):\n\t\tfor p in self.parallel_threads:\n\t\t\tp.start()\n\t\tfor p in self.parallel_threads:\n\t\t\tp.join()", "def gen_stage_loop(cls, _opts, tests, put_next_stage, _put_result_stage):\n for test in tests:\n put_next_stage(test)", "def execute(self):\n for test in self.tests:\n test.execute()\n self.logger.dump()\n print(\"Finished!\")", "def run(self):\n\n self.__run_class_setup_fixtures()\n self.__enter_context_managers(self.class_setup_teardown_fixtures, self.__run_test_methods)\n self.__run_class_teardown_fixtures()", "def run(ctx, report_ids):\n client = ctx.obj[\"client\"]\n for report_id in report_ids:\n report = client.run_report(report_id)\n click.secho(f\"Running {report}\")", "def run_all_tasks(data_dir):\n print(\"Training and testing for all tasks ...\")\n for t in range(20):\n run_task(data_dir, task_id=t + 1)", "def run(self, *pipeline_factories, exceptions=None, wait=True):\n async def run_and_wait(*jobs, exceptions=None, wait=True):\n run_tasks = []\n for job in jobs:\n run_tasks.append(\n self.event_loop.create_task(\n job.run_async(\n stdin_factory=ManualStreamFactory(fileobj_r=self.stdin),\n stdout_factory=ManualStreamFactory(fileobj_w=self.stdout),\n stderr_factory=ManualStreamFactory(fileobj_w=self.stderr)\n )\n )\n )\n\n for run_task in run_tasks:\n await run_task\n\n if wait:\n for job in jobs:\n await job.wait_async(exceptions=exceptions)\n\n if exceptions is None:\n exceptions = self.exceptions\n\n ret = []\n for pipeline_factory in pipeline_factories:\n job = Job(\n pipeline_factory,\n env=self.environment,\n cwd=self.cwd,\n event_loop=self.event_loop\n )\n self.event_loop.run_until_complete(\n run_and_wait(job, exceptions=exceptions, wait=wait)\n )\n ret.append(job)\n\n return ret", "def run(self, steps = 1000):\n for step in range(steps):\n if self.is_done():\n return\n self.step()", "def run(self, steps):\n if not self.skip_reference:\n self.reference_sim.run(steps)\n self.compare_sim.run(steps)", "def run_tests(self):\n\n self.endurance_results = []\n self._mozmill.add_listener(self.endurance_event, eventType='mozmill.enduranceResults')\n self._mozmill.persisted['endurance'] = {'delay': self.delay,\n 'iterations': self.options.iterations,\n 'entities': self.options.entities,\n 'restart': self.options.restart}\n\n self.manifest_path = os.path.join('tests', 'endurance')\n if not self.options.reserved:\n self.manifest_path = os.path.join(self.manifest_path,\n \"manifest.ini\")\n else:\n self.manifest_path = os.path.join(self.manifest_path,\n 'reserved',\n self.options.reserved + \".ini\")\n TestRun.run_tests(self)", "def batch_test_run():\n WebDriverWait(browser, 15).until(EC.visibility_of_element_located((By.XPATH, '//button[contains(text(), ''\"Run\")]')))\n batch_run_button = browser.find_elements_by_xpath('//button[contains(text(), \"Run\")]')\n for test in batch_run_button:\n test.click()\n time.sleep(4)", "def execute(self):\n results = []\n \n for callback in self.callback:\n results.append(callback(*self.args))\n \n return results", "def run(self, steps=1000):\n for step in range(steps):\n if self.is_done():\n return\n self.step()", "def run(self, steps=1000):\n for step in range(steps):\n if self.is_done():\n return\n self.step()", "def _threaded(self, *args, **kwargs):\n\n for target in self.targets:\n result = target(*args, **kwargs)\n self.queue.put(result)", "def run_tests(remit, sourcelist):\n for source in sourcelist:\n # - move into source's directory\n os.chdir(source)\n # - build worklist of commands\n commands = list()\n commands += test_matrix(remit, source)\n commands += extra_tests(remit, source)\n commands = remove_blacklist(remit, source, commands)\n # - run the commands\n for i, command in enumerate(commands):\n print('[test %s: %s of %d] %s'\n % (source,\n str(i+1).rjust(len(str(len(commands)))),\n len(commands),\n ' '.join(command)))\n subprocess.call(command)\n # - move out of source's directory\n os.chdir('..')", "def run_async(self, examples, pool):\n return pool.imap(self, examples)", "def run(self):\n count = self.neuron_count\n for i in range(0, count):\n self.run(i)", "def execute(\n cls, datasets: list[DatasetBase], runner: Callable, nprocs: int | None = None\n ) -> Self:\n if nprocs is None:\n nprocs = max(os.cpu_count() - 1, 1)\n\n results: list[ExecutionResponse] = []\n if nprocs == 1:\n # run without a ProcessPoolExecutor; useful for debugging\n for dataset in tqdm(datasets, desc=\"Executing...\"):\n results.append(runner(dataset))\n else:\n # adapted from https://gist.github.com/alexeygrigorev/79c97c1e9dd854562df9bbeea76fc5de\n with ProcessPoolExecutor(max_workers=nprocs) as executor:\n with tqdm(total=len(datasets), desc=\"Executing...\") as progress:\n futures = []\n for dataset in datasets:\n future = executor.submit(runner, dataset)\n future.add_done_callback(lambda p: progress.update())\n futures.append(future)\n\n for future in futures:\n results.append(future.result())\n\n batch = cls()\n for result in tqdm(results, desc=\"Building batch...\"):\n if result.success:\n if isinstance(result.content, list):\n for item in result.content:\n batch.sessions.append(BmdsSession.from_serialized(item))\n else:\n batch.sessions.append(BmdsSession.from_serialized(result.content))\n else:\n batch.errors.append(result.content)\n\n return batch", "async def run_all_clients():\n completed_clients = 0\n for client_result in asyncio.as_completed(clients):\n completed_clients += await client_result\n return completed_clients", "def Run(self, vms, workloads=None, run_kwargs=None) -> list[sample.Sample]:\n if FLAGS.ycsb_skip_run_stage:\n return []\n workloads = workloads or GetWorkloadFileList()\n assert workloads, 'no workloads'\n if not run_kwargs:\n run_kwargs = {}\n if _BURST_LOAD_MULTIPLIER.value:\n samples = self._RunBurstMode(vms, workloads, run_kwargs)\n elif _INCREMENTAL_TARGET_QPS.value:\n samples = self._RunIncrementalMode(vms, workloads, run_kwargs)\n else:\n samples = list(self.RunStaircaseLoads(vms, workloads, **run_kwargs))\n if (\n FLAGS.ycsb_sleep_after_load_in_sec > 0\n and not FLAGS.ycsb_skip_load_stage\n ):\n for s in samples:\n s.metadata['sleep_after_load_in_sec'] = (\n FLAGS.ycsb_sleep_after_load_in_sec\n )\n return samples", "def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()", "def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()", "def __call__(self, *pipeline_factories, exceptions=None, wait=True):\n return self.run(*pipeline_factories, exceptions=exceptions, wait=wait)", "def run(self, exec_type='sequential'):\n for environment, agents in self.experiment_structure.items():\n for agent, exp in agents.items():\n self.logger.info('Starting Experiment for {} on {}'.format(agent, environment))\n exp.run(exec_type=exec_type, **self.run_params)", "def start(self):\n logger.debug(\"Starting {0} downloaders\".format(self.num_downloaders))\n for p in self._downloaders:\n # p.daemon = True\n p.start()\n logger.debug(\"Starting {0} checkers\".format(self.num_checkers))\n for p in self._checkers:\n # p.daemon = True\n p.start()", "def _initialize_runners_startup(self):\n if self.command_group.is_cmd0_runner():\n self._initialize_runner(self.command_group.cmd0)\n if self.command_group.is_cmd1_runner():\n self._initialize_runner(self.command_group.cmd1)\n if self.command_group.is_cmd2_runner():\n self._initialize_runner(self.command_group.cmd2)", "def run_all(operations=ops):\n for operation in operations:\n run(operation)", "def run(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)\n tasks = []\n self.threads[0] = Worker(self, 0, self.options, self.logger,\n self.queue, self.storage, self.parser, self.addToQueue, role=1)\n tasks.append(self.threads[0].begin())\n for thread in range(1, self.max_threads):\n # Spawn and start the threads\n self.threads[thread] = Worker(self, thread, self.options, self.logger,\n self.queue, self.storage, self.parser, self.addToQueue)\n tasks.append(self.threads[thread].begin())\n self.loop.run_until_complete(asyncio.gather(*tasks))", "async def _run(self, remotes: List[PodData]) -> List[Optional[Exception]]:\n self.log.info(\"Replicating from remotes: {}\".format([r.env_name for r in remotes]))\n jobs_queue = asyncio.Queue(maxsize=len(remotes))\n\n replicators = [\n asyncio.ensure_future(self.replicate(remote_config=remote, jobs_queue=jobs_queue))\n for remote in remotes\n ]\n reconstructors = [\n asyncio.ensure_future(self.reconstruct(jobs_queue=jobs_queue))\n for _ in remotes\n ]\n replicators_results = await asyncio.gather(*replicators, return_exceptions=True)\n\n await jobs_queue.join()\n for c in reconstructors:\n c.cancel()\n return replicators_results", "def run_seeds(self, nbrun):\n self._check_status(STATUS_IDLE)\n self._set_status(STATUS_RUNNING_SEEDS)\n self._notify_listeners_start_operation(listener.OPERATION_RUN_SEEDS)\n rsol = self.agent.run_seeds(nbrun)\n self._set_status(STATUS_IDLE)\n self._notify_listeners_end_operation()\n return rsol", "def run_multiple(self, num_episodes=5, base_output_name=\"logs/output_command\"):\n for i in range(num_episodes):\n client.reset()\n client.confirmConnection()\n client.enableApiControl(True)\n client.armDisarm(True)\n airsim.time.sleep(1)\n client.takeoffAsync().join()\n output_filename = base_output_name + \"{:02d}\".format(i) + \".txt\"\n self.move(output_filename, self.input_file_name)\n self.clear_logging_arr()", "def train(self, iters, n_episodes):\n for i in range(iters):\n self.self_play(n_episodes)\n self.learn()", "def run(self):\n self.log.overall('Starting run')\n run_start = time()\n for epoch in xrange(self.n_epochs):\n self.agent.reset()\n self.n_epoch = epoch\n self._run_epoch()\n self.log.overall('End of run ({:.2f} s)'.format(time() - run_start))", "def setup_crawler(self, crawlers: List[BaseCrawler]) -> None:\n self.tasks.extend(crawlers)", "def _run_tests(\n self, test_prod_class_pairs: ResultPair\n ) -> _output.TestResult:\n results = []\n classpath = self._generate_classpath()\n with _junit4_runner.security_policy(\n classpath, active=not self.junit4_disable_security\n ) as security_policy:\n for test_class, prod_class in test_prod_class_pairs:\n test_result = _junit4_runner.run_test_class(\n test_class,\n prod_class,\n classpath=classpath,\n security_policy=security_policy,\n timeout=self.junit4_timeout,\n )\n results.append(test_result)\n return results", "def _run_commands(self, command_list):\n for cmd in command_list:\n print(cmd)\n if not self.dry_run:\n run(cmd)", "def report(self, *reporters):\n if len(reporters) == 0:\n reporters = [c() for c in dexy.reporter.Reporter.plugins if c.ALLREPORTS]\n\n for reporter in reporters:\n self.log.debug(\"Running reporter %s\" % reporter.ALIASES[0])\n reporter.run(self)", "def run_run(self, cmds):\n pass", "def run(self, *args, **kwargs):\n if kwargs.pop('lazy', False):\n self._lazy_run = args, kwargs\n else:\n if len(args) == 0 and len(kwargs) == 0:\n args, kwargs = self._lazy_run\n for _ in self.gen_batch(*args, **kwargs):\n pass\n return self", "def run(commands):\n\n # Work out total steps to take\n max_steps = sum([c.steps for c in commands])\n\n count = 0\n while count != max_steps:\n for command in commands:\n # we want to interleave the commands\n if command.steps > 0:\n command.stepper.step(1, command.direction)\n command.steps -= 1\n count += 1", "def main(args: argparse) -> None:\n threads = args.threads\n\n for thread in range(threads):\n canvas = canvas_init(args, thread)\n seeds = tools.load_seeds_from_file(args.seed, args.manual_seed)\n\n # Main process\n canvas_thread = threading.Thread(target=run,\n args=(args, canvas, seeds, thread,\n threads))\n canvas_thread.start()", "def run(self, args):\n start_time = time.time()\n self._printer.write_update('Collecting tests ...')\n running_all_tests = False\n\n try:\n paths, all_test_names, running_all_tests = self._collect_tests(\n args)\n except IOError:\n # This is raised if --test-list doesn't exist\n return test_run_results.RunDetails(\n exit_code=exit_codes.NO_TESTS_EXIT_STATUS)\n\n test_names = self._finder.split_into_chunks(all_test_names)\n if self._options.order == 'natural':\n test_names.sort(key=self._port.test_key)\n elif self._options.order == 'random':\n test_names.sort()\n random.Random(self._options.seed).shuffle(test_names)\n elif self._options.order == 'none':\n # Restore the test order to user specified order.\n # base.tests() may change the order as it returns tests in the\n # real, external/wpt, virtual order.\n if paths:\n test_names = self._restore_order(paths, test_names)\n\n if not self._options.no_expectations:\n self._printer.write_update('Parsing expectations ...')\n self._expectations = test_expectations.TestExpectations(self._port)\n\n tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)\n\n self._printer.print_found(\n len(all_test_names), len(test_names), len(tests_to_run),\n self._options.repeat_each, self._options.iterations)\n\n # Check to make sure we're not skipping every test.\n if not tests_to_run:\n msg = 'No tests to run.'\n if self._options.zero_tests_executed_ok:\n _log.info(msg)\n # Keep executing to produce valid (but empty) results.\n else:\n _log.critical(msg)\n code = exit_codes.NO_TESTS_EXIT_STATUS\n return test_run_results.RunDetails(exit_code=code)\n\n exit_code = self._set_up_run(tests_to_run)\n if exit_code:\n return test_run_results.RunDetails(exit_code=exit_code)\n\n if self._options.num_retries is None:\n # If --test-list is passed, or if no test narrowing is specified,\n # default to 3 retries. Otherwise [e.g. if tests are being passed by\n # name], default to 0 retries.\n if self._options.test_list or len(paths) < len(test_names):\n self._options.num_retries = 3\n else:\n self._options.num_retries = 0\n\n should_retry_failures = self._options.num_retries > 0\n\n try:\n self._register_termination_handler()\n self._start_servers(tests_to_run)\n if self._options.watch:\n run_results = self._run_test_loop(tests_to_run, tests_to_skip)\n else:\n run_results = self._run_test_once(tests_to_run, tests_to_skip,\n should_retry_failures)\n initial_results, all_retry_results = run_results\n finally:\n _log.info(\"Finally stop servers and clean up\")\n self._stop_servers()\n self._clean_up_run()\n\n if self._options.no_expectations:\n return test_run_results.RunDetails(0, [], [], initial_results,\n all_retry_results)\n\n # Some crash logs can take a long time to be written out so look\n # for new logs after the test run finishes.\n self._printer.write_update('Looking for new crash logs ...')\n self._look_for_new_crash_logs(initial_results, start_time)\n for retry_attempt_results in all_retry_results:\n self._look_for_new_crash_logs(retry_attempt_results, start_time)\n\n self._printer.write_update('Summarizing results ...')\n summarized_full_results = test_run_results.summarize_results(\n self._port, self._options, self._expectations, initial_results,\n all_retry_results)\n summarized_failing_results = test_run_results.summarize_results(\n self._port,\n self._options,\n self._expectations,\n initial_results,\n all_retry_results,\n only_include_failing=True)\n run_histories = test_run_results.test_run_histories(\n self._options, self._expectations, initial_results,\n all_retry_results)\n\n exit_code = summarized_failing_results['num_regressions']\n if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:\n _log.warning('num regressions (%d) exceeds max exit status (%d)',\n exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)\n exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS\n\n if not self._options.dry_run:\n self._write_json_files(summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories)\n\n self._copy_results_html_file(self._artifacts_directory,\n 'results.html')\n if (initial_results.interrupt_reason is\n test_run_results.InterruptReason.EXTERNAL_SIGNAL):\n exit_code = exit_codes.INTERRUPTED_EXIT_STATUS\n else:\n if initial_results.interrupted:\n exit_code = exit_codes.EARLY_EXIT_STATUS\n if (self._options.show_results\n and (exit_code or initial_results.total_failures)):\n self._port.show_results_html_file(\n self._filesystem.join(self._artifacts_directory,\n 'results.html'))\n self._printer.print_results(time.time() - start_time,\n initial_results)\n\n return test_run_results.RunDetails(exit_code, summarized_full_results,\n summarized_failing_results,\n initial_results, all_retry_results)", "async def run_requests(self):\n loop = asyncio.get_event_loop()\n tasks = []\n async with aiohttp.ClientSession(connector=self.connector) as session:\n\n for index, id in enumerate(self.ids):\n if id not in self.processed_ids:\n url = self.base_url + id\n auth_token = base64.b64encode(id.encode('ascii'))\n header = {\"Authorization\": auth_token.decode('UTF-8')}\n tasks.append(asyncio.ensure_future(self._request_one(url=url, header=header, id=id, index = index, session = session)))\n\n _ = await asyncio.gather(*tasks)", "def main():\n print(\"runner\")\n runner = Runner()\n stop_on_idle = True\n probes = []\n for url in urls:\n probe_cls = random.choice((HttpProbe, ThreadProbe, ShellProbe))\n runner.probes.append(probe_cls(url))\n\n runner.run()", "def run_tests(self):\n with self.report.timer.record(\"run\"):\n self.result.report.extend(self._run_tests())", "def run_tests(self):\n\n # log\n self.logger.debug('\\n\\nExecute test methods:\\n-----------------------------')\n\n # test methods start here\n # ------------------------------------------------------------------\n # ------------------------------------------------------------------\n\n # dummy_method\n self.dummy_method()\n\n # ------------------------------------------------------------------\n # ------------------------------------------------------------------\n # test methods end here\n\n # log\n self.logger.debug('\\n\\n-----------------------------\\nFinished test methods.')", "def swait_multiple(cos):\n asyncio.get_event_loop().run_until_complete(asyncio.wait(cos))", "def multiple_qc_test_run(pg_driver):\n # add\n with pg_driver.session_scope() as sxn:\n tr = models.qcreport.TestRun(project_id=\"CGCI-BLGSP\")\n tr.entity_id = str(uuid.uuid4())\n tr.test_type = \"aliquots\"\n tr.status = \"SUCCESS\"\n tr.is_stale = False\n sxn.add(tr)\n\n tr_2 = models.qcreport.TestRun(project_id=\"CGCI-BLGSP\")\n tr_2.entity_id = tr.entity_id\n tr_2.test_type = \"aliquots\"\n tr_2.status = \"ERROR\"\n tr_2.is_stale = False\n sxn.add(tr_2)\n\n yield\n\n # clean up\n cleanup_records(pg_driver, models.qcreport.TestRun, [tr.id, tr_2.id])", "def run (self, bioseqs, *clargs):\t\t\n\t\t## Preconditions:\n\t\tassert (2 <= len (bioseqs))\n\t\t## Main:\n\t\tself._inseqs = bioseqs\n\t\tself.call_cmdline (*clargs)", "def test_multiple_games(self, iteration=10):\n # TODO: multithread?\n for i in range(iteration):\n self.test_one_game()", "def in_parallel(*args):\n \n # Execute each in a thread and return them all.\n return ThreadPool(len(args)).map(lambda x: x(), args)", "def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)", "def run_sagemaker_tests(images):\n if not images:\n return\n pool_number = len(images)\n with Pool(pool_number) as p:\n p.map(run_sagemaker_pytest_cmd, images)", "def run_combined(self):\n self.runtest_autokey()\n self.runtest_mediaresource()\n self.runtest_composite_slug()\n self.runtest_all_types()\n self.runtest_complex_types()\n self.runtest_only_key()\n self.runtest_compound_key()\n self.runtest_simple_select()\n self.runtest_paging()\n self.runtest_nav_o2o()\n self.runtest_nav_o2o_1()\n self.runtest_nav_zo2o()\n self.runtest_nav_zo2o_f()\n self.runtest_nav_zo2o_b()\n self.runtest_nav_many2o()\n self.runtest_nav_many2o_f()\n self.runtest_nav_many2o_b()\n self.runtest_nav_many2zo()\n self.runtest_nav_many2zo_f()\n self.runtest_nav_many2zo_b()\n self.runtest_nav_many2zo_r()\n self.runtest_nav_many2zo_rf()\n self.runtest_nav_many2zo_rb()\n self.runtest_nav_many2many()\n self.runtest_nav_many2many_1()\n self.runtest_nav_many2many_r()\n self.runtest_nav_many2many_r1()", "def RunStages(self):\n self._RunStage(build_stages.InitSDKStage)\n self.RunSetupBoard()\n self._RunStage(report_stages.RefreshPackageStatusStage)" ]
[ "0.6145495", "0.60319495", "0.59287375", "0.5832656", "0.5831884", "0.5669822", "0.5645586", "0.56302845", "0.5623436", "0.5600219", "0.5553108", "0.55371404", "0.55194783", "0.55185175", "0.55181295", "0.55050325", "0.5497241", "0.54962456", "0.54926574", "0.5487635", "0.54862726", "0.5471837", "0.54647636", "0.54445213", "0.5441109", "0.54358506", "0.5423685", "0.5411242", "0.54008925", "0.5395814", "0.5393839", "0.5380825", "0.5371908", "0.53708667", "0.53595454", "0.5352173", "0.5335893", "0.53313506", "0.5322408", "0.531242", "0.5292121", "0.52699065", "0.5267155", "0.52369654", "0.52253777", "0.5202755", "0.5202184", "0.5190896", "0.5185695", "0.51490927", "0.5147363", "0.5143807", "0.51397085", "0.51273644", "0.5117103", "0.5115019", "0.5111611", "0.5111611", "0.5108152", "0.5097995", "0.5090849", "0.50844055", "0.50576556", "0.50530267", "0.5053025", "0.50523984", "0.50523984", "0.5041532", "0.50400066", "0.50398844", "0.5039002", "0.502529", "0.5023836", "0.5022762", "0.5016552", "0.5013958", "0.50127465", "0.5009267", "0.5007956", "0.5007151", "0.5000561", "0.49990848", "0.4997308", "0.49967706", "0.4989397", "0.49888253", "0.4984735", "0.4981664", "0.49806243", "0.49670485", "0.4966564", "0.4963097", "0.49524587", "0.49441797", "0.4944048", "0.49405372", "0.493707", "0.49320123", "0.49316248", "0.4930983" ]
0.56467295
6
Applies post processing to all the outputs in the provided run results. This is a convenience function to avoid the need for manual iteration over the run_results dictionary.
def postprocess(run_results, postprocess_func): G_LOGGER.start(f"Applying post-processing to outputs: {postprocess_func.__name__}") for _, iteration_results in run_results: for index, iter_res in enumerate(iteration_results): iteration_results[index] = postprocess_func(iter_res) G_LOGGER.finish("Finished applying post-processing") return run_results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, results):\n raise NotImplementedError", "def process_results(self, results=None, **value): # pragma: no cover\n return default_result_processor(results=results, **value)", "def _post_process_result(result: Any) -> Any:\n return result", "def process_results(self, response, results):\n return results", "def process_results(self, response, results):\n return results", "def decode_results(self, outputs):\n ...", "def _postprocess(self, responses):\n for idx, response in enumerate(responses):\n responses[idx] = {'id': response[0],\n 'text': self.target_test[response[0]]}\n\n for jdx, score in enumerate(response[1:]):\n responses[idx]['score_' + str(jdx)] = response[1:][jdx]\n\n return responses", "def postprocess_result(self):\n output_file = self.analyzer_result_file\n LOG.debug_analyzer(self.analyzer_stdout)\n tidy_stdout = self.analyzer_stdout.splitlines()\n generate_plist_from_tidy_result(output_file, tidy_stdout)\n\n if self.report_hash_type == 'context-free':\n report.use_context_free_hashes(output_file)", "def postprocess(\n self,\n preds: Any,\n visualization: List[np.ndarray],\n return_datasample=False,\n **kwargs,\n ) -> dict:", "def __call__(self, results):\n\n results = self._mixup_transform(results)\n return results", "def post_postprocessor(result=None, **kw):\n logger.info(\"start post_postprocessor\")\n logger.info(result)\n logger.info(\"end post_postprocessor\")\n pass", "def __call__(self, results):\n for key in results.get('img_fields', ['img']):\n results[key] = general_ocr.imnormalize(results[key], self.mean, self.std,\n self.to_rgb)\n results['img_norm_cfg'] = dict(\n mean=self.mean, std=self.std, to_rgb=self.to_rgb)\n return results", "def postprocess(self, predicted_output, original_input=None, stats=None,\n **kwargs):\n pass", "def post_process(self, res):\n # some lists are better converted to numpy arrays\n convert_to_arr = (\n 'prediction_rank',\n 'cumulative_area',\n 'prediction_values',\n 'cumulative_crime',\n 'cumulative_crime_count',\n 'cumulative_crime_max',\n 'pai'\n )\n for k in convert_to_arr:\n if k in res:\n # this allows for optional components such as prediction values\n res[k] = np.array(res[k])", "def process_pr_results(self, results_files, custom_report):\n \n\n \n output_file = open(os.path.join(self.path, 'raw_results.txt'), 'w')\n \n #Keep track of the last read line before a newline; this will be the best value from an optimization run\n last_line = ''\n #Match a string of the format ( 0.0995749 0.101685 0.108192 0.091224 ) 0.091224 0 100\n #Contains parameter values, the best optimization value, the cpu time, and some other values, e.g. particle numbers that Copasi likes to add. These could be removed, but they seem useful.\n output_string = r'.*\\(\\s(?P<params>.+)\\s\\)\\s+(?P<best_value>\\S+)\\s+(?P<cpu_time>\\S+)\\s+(?P<function_evals>\\S+)\\.*'\n output_re = re.compile(output_string)\n \n best_value = None\n best_line = None\n \n #Copy the contents of the first file to results.txt\n for line in open(os.path.join(self.path, results_files[0]), 'r'):\n output_file.write(line)\n try:\n if line != '\\n':\n if output_re.match(line):\n current_value = float(output_re.match(line).groupdict()['best_value'])\n if best_value != None:\n if current_value < best_value:\n best_value = current_value\n best_line = line\n elif best_value == None:\n best_value = current_value\n best_line = line\n else:\n pass\n except Exception as e:\n if custom_report:\n pass\n else:\n raise e\n \n #And for all other files, copy everything but the last line\n for filename in results_files[1:]:\n firstLine = True\n for line in open(os.path.join(self.path, filename), 'r'):\n if not firstLine:\n output_file.write(line)\n try:\n if line != '\\n':\n if output_re.match(line):\n current_value = float(output_re.match(line).groupdict()['best_value'])\n if current_value < best_value:\n best_value = current_value\n best_line = line\n else:\n pass\n except Exception as e:\n if custom_report:\n pass\n else:\n raise e\n firstLine = False\n \n \n output_file.close()\n \n #Write the best value to results.txt\n output_file = open(os.path.join(self.path, 'results.txt'), 'w')\n \n output_file.write('Best value\\tCPU time\\tFunction evals\\t')\n \n for parameter in self.get_parameter_estimation_parameters():\n\n output_file.write(parameter[0].encode('utf8'))\n output_file.write('\\t')\n output_file.write('\\n')\n\n best_line_dict = output_re.match(best_line).groupdict()\n\n output_file.write(best_line_dict['best_value'])\n output_file.write('\\t')\n output_file.write(best_line_dict['cpu_time'])\n output_file.write('\\t')\n output_file.write(best_line_dict['function_evals'])\n output_file.write('\\t')\n \n for parameter in best_line_dict['params'].split('\\t'):\n output_file.write(parameter)\n output_file.write('\\t')\n output_file.close()\n \n if best_value != None:\n return True\n else:\n return False", "def update_results(self, results):\n pass", "def transform(self, results: Dict) -> Dict:\n\n # Apply mapping\n inputs = self._map_input(results, self.mapping)\n # Apply wrapped transforms\n outputs = self._apply_transforms(inputs)\n # Apply remapping\n outputs = self._map_output(outputs, self.remapping)\n\n results.update(outputs) # type: ignore\n return results", "def _process_results(self, timestamp, results):\n\n topic_value = self.create_topic_values(results)\n\n _log.debug('Processing Results!')\n if mode:\n _log.debug(\"ACTUATE ON DEVICE.\")\n actuator_error = False\n if make_reservations and results.devices:\n results, actuator_error = self.actuator_request(results)\n if not actuator_error:\n self.actuator_set(topic_value)\n if make_reservations and results.devices and not actuator_error:\n self.actuator_cancel()\n\n for value in results.log_messages:\n _log.debug(\"LOG: {}\".format(value))\n for key, value in results.table_output.items():\n _log.debug(\"TABLE: {}->{}\".format(key, value))\n if output_file_prefix is not None:\n results = self.create_file_output(results)\n if command_output_file is not None:\n self.create_command_file_output(timestamp, topic_value)\n # if len(results.table_output.keys()):\n # results = self.publish_analysis_results(results)\n return results", "def post_task_run(self, results, extra_events: Optional[dict] = None):\n\n if extra_events is None:\n extra_events = {}\n\n # No need to expose the RETURN_KEYS_KEY\n try:\n del results[RETURN_KEYS_KEY]\n except (TypeError, KeyError):\n pass\n\n # Print the post-call header\n self.print_postcall_header(results)\n\n # Send a custom task-succeeded event with the results\n if not self.request.called_directly:\n self.send_event('task-results', firex_result=convert_to_serializable(results), **extra_events)\n self.send_firex_data(self.abog)", "def _map_output_parameters(self, results, algorithm):\n if results is not None:\n\n # update python data objects\n for result_name in results:\n result_type = algorithm.get_type_from_output_name(result_name)\n if result_type is None:\n raise exceptions.PacmanTypeError(\n \"Unrecognised result name {} for algorithm {} with \"\n \"outputs {}\".format(\n result_name, algorithm.algorithm_id,\n algorithm.outputs))\n self._internal_type_mapping[result_type] = results[result_name]\n elif len(algorithm.outputs) != 0:\n raise exceptions.PacmanAlgorithmFailedToGenerateOutputsException(\n \"Algorithm {} did not generate any outputs\".format(\n algorithm.algorithm_id))", "def process_results(self):\n return self._do_action_under_lock(self._process_results)", "def transform(self, results: Dict) -> Optional[Dict]:\n if self.random_apply():\n return self.transforms(results) # type: ignore\n else:\n return results", "def postProcessOutput(self):\n\n logging.info(\" ========> Analysis %20s called postProcessOutput:\"%(self.name))\n\n if self.checkExpectedOutputFiles() == False:\n raise Exception(\"Missing expected output files. Number missing are [%d]\"%(len(self.missing_output_files)))\n\n FileUtils.checkDirExists(self.output_dir)\n\n tmpfiles = []\n\n logging.info(\" ========> Analysis %20s called postProcessOutput: Moving files from %s to %s \"%(self.name,self.working_dir,self.output_dir))\n try:\n for srcfile in self.expected_output_files:\n\n fullsrcfile = os.path.join(self.working_dir,srcfile)\n destfile = os.path.join(self.output_dir,srcfile)\n\n FileUtils.checkDirExistsForFile(destfile)\n\n res = shutil.move(fullsrcfile,destfile)\n\n if res == None:\n res = \"OK\"\n else:\n res = \"FAILED\"\n\n print \"Checking %s\"%destfile\n tmpfiles.append(destfile)\n \n logging.info(\" ========> Analysis %20s called postProcessOutput: Result of file move for %s = %s\" % (self.name,srcfile,res))\n\n except Exception as e:\n logging.info(\" ========> Analysis %20s file move failed %s\"%(self.name,e))\n raise\n\n self.output_files = tmpfiles\n\n for f in self.temp_output_files:\n logging.info(\" ========> Analysis %20s removing temp file %s \"%(self.name,f))\n\t res = os.remove(f)", "def __call__(self, results):\n\n for key in results.get('seg_fields', []):\n if self.scale_factor != 1:\n results[key] = general_ocr.imrescale(\n results[key],\n self.scale_factor,\n interpolation='nearest',\n backend=self.backend)\n return results", "def postprocess_model_outputs(self, predictions, expected):\n\n predictions = {k: t.numpy() for k, t in predictions.items()}\n\n return predictions, expected", "def postprocess(self, inference_output):\n ret = []\n quantiles = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n # for each request\n for inference_output_request in inference_output:\n ret_request = []\n # for each time series\n for i in inference_output_request:\n l = {}\n l[\"item_id\"] = i.item_id\n l[\"quantiles\"] = {}\n for q in quantiles:\n l[\"quantiles\"][str(q)] = i.quantile(q).tolist()\n l[\"mean\"] = i.mean.tolist()\n ret_request.append(json.dumps(l))\n ret.append('\\n'.join(ret_request) + '\\n')\n return ret", "def transform(self, results: Dict) -> Optional[Dict]:\n for t in self.transforms:\n results = t(results) # type: ignore\n if results is None:\n return None\n return results", "def compute_metrics(self, results: list) -> dict:\n dump(results, self.out_file_path)\n print_log(\n f'Results has been saved to {self.out_file_path}.',\n logger='current')\n return {}", "def postprocess_model_outputs(self, predictions, expected):\n\n for key, val in predictions.items():\n predictions[key] = val.numpy()\n\n for key, val in expected.items():\n expected[key] = val.numpy()\n\n return predictions, expected", "def update_running_totals_from_load_step_results(self, results: dict) -> None:\n for result in results[\"step_results\"].values():\n sobject_name = result[\"sobject\"]\n totals = self.sobject_counts[sobject_name]\n totals.errors += result[\"total_row_errors\"]\n totals.successes += result[\"records_processed\"] - result[\"total_row_errors\"]", "def _postprocess(self, output: Dict[str, np.ndarray]):\n # Slice to remove padding, omitting initial [CLS] and final [SEP]\n slicer = slice(1, output.pop(\"ntok\") - 1)\n output[\"tokens\"] = self.tokenizer.convert_ids_to_tokens(\n output.pop(\"input_ids\")[slicer])\n probas = output.pop(\"probas\")\n\n # Predictions at every position, regardless of masking.\n output[\"pred_tokens\"] = self._get_topk_tokens(probas[slicer]) # pytype: disable=container-type-mismatch\n\n return output", "def process_results(self, results):\n issues = {}\n for service in SERVICE_RESOURCES:\n for agent in SERVICE_RESOURCES[service]['daemons']:\n _results = results.find_by_tag(agent)\n ret = self.get_exceptions_results(_results)\n if ret:\n if service not in issues:\n issues[service] = {}\n\n issues[service][agent] = ret\n\n if issues:\n self._output['agent-exceptions'] = issues", "def results(self, results):\n self._results = results", "def results(self, results):\n self._results = results", "def post_process(self, xout, params_out):\n # Should be used by all methods matching \"solve_*\"\n for post_processor in self.post_processors:\n xout, params_out = post_processor(xout, params_out)\n return xout, params_out", "def after_test(self, test_results):\n pass", "def postprocess(self, inference_output):\n logger.info(inference_output)\n return inference_output", "def post_result(self, halt_on_failure=False):\n self.api.m.perf_dashboard.set_default_config()\n self.api.m.perf_dashboard.post_bisect_results(\n self.get_result(), halt_on_failure)", "def __call__(self, results):\n self._pad_img(results)\n self._pad_masks(results)\n self._pad_seg(results)\n return results", "def transform(self, results: Dict) -> Optional[Dict]:\n idx = self.random_pipeline_index()\n return self.transforms[idx](results)", "def add_results(self, results):\n if self.replication_counter < self.replication_num:\n for metric in self.metrics:\n self.metric_final_results[metric].append(results[metric])\n\n self.replication_counter += 1\n else:\n raise Exception(\"The requested metric collection call of {}/{} exceeds the number of pre-defined replication\".format(self.replication_counter, self.replication_num))", "def results(self, results):\n\n self._results = results", "def results(self, results):\n\n self._results = results", "def results(self, results):\n\n self._results = results", "def results(self, results):\n\n self._results = results", "def results(self, results):\n\n self._results = results", "def prepare_results(self) -> dict:\n if not hasattr(self, \"results\"):\n raise AttributeError(\n \"Results have not been finalized. Please call \"\n \"finalize_results() before saving output.\"\n )\n\n output = {\n \"armory_version\": armory.__version__,\n \"config\": self.config,\n \"results\": self.results,\n \"timestamp\": int(self.time_stamp),\n }\n return output", "def format_results(self, results_dict, data, cached_data):\n new_results = {}\n status = self.STATUS_MAP[results_dict['info']['exitFlag']]\n new_results[s.STATUS] = status\n\n # Timing data\n new_results[s.SOLVE_TIME] = results_dict[\"info\"][\"timing\"][\"tsolve\"]\n new_results[s.SETUP_TIME] = results_dict[\"info\"][\"timing\"][\"tsetup\"]\n new_results[s.NUM_ITERS] = results_dict[\"info\"][\"iter\"]\n\n if new_results[s.STATUS] in s.SOLUTION_PRESENT:\n primal_val = results_dict['info']['pcost']\n new_results[s.VALUE] = primal_val + data[s.OFFSET]\n new_results[s.PRIMAL] = results_dict['x']\n new_results[s.EQ_DUAL] = results_dict['y']\n new_results[s.INEQ_DUAL] = results_dict['z']\n\n return new_results", "def _update_result(self, results, clf_numbers):\n # ToDo make results of scoring values dynamic\n names_results = ['Accuracy']\n for number in clf_numbers:\n for name in names_results:\n if name not in self.results:\n self.results[name] = [results[number][name + \"_test_score_\" + str(number)]]\n else:\n self.results[name].append(results[number][name + \"_test_score_\" + str(number)])", "def addMultiResults(self, results, index):\n # if no return from site, seed the results with an empty list\n if results is None or len(results) == 0:\n self._results[index] = None\n else:\n self._results[index] = results", "def chainercv_postprocess_pack_each_item(results):\n bboxes, labels, scores = results\n\n # loop over the results and add them to the list of\n # returned predictions\n predictions = []\n for index, bbox in enumerate(bboxes[0]):\n r = {\"class\": str(voc_bbox_label_names[int(labels[0][index])]),\n \"bbox\": {\n \"ymin\": str(bbox[0]),\n \"xmin\": str(bbox[1]),\n \"ymax\": str(bbox[2]),\n \"xmax\": str(bbox[3])\n },\n \"probability\": str(scores[0][index])\n }\n predictions.append(r)\n\n return predictions", "def process_results(self):\n processes = {\"*.csv\": _process_csv}\n custom_processes = self.custom_processes\n if custom_processes:\n processes.update(custom_processes)\n\n try:\n results = []\n for glob, process in processes.items():\n results.extend(\n [\n (\n file.basename(),\n process(\n file,\n working_dir=os.getcwd(),\n simulname=self.output_prefix,\n ),\n )\n for file in self.simulation_dir.files(glob)\n ]\n )\n except FileNotFoundError:\n raise ValueError(\"No results to process. Have you called IDF.simulate()?\")\n else:\n return results", "def add_results_to_results_dict(self, config_no, results):\n assert len(results) == len(self.results_metadata[\"names\"])\n for name, result in zip(self.results_metadata[\"names\"], results):\n self.results_dict[config_no][name].append(result)", "def handle_result(self, results: List[Dict], **info):\n pass", "def reconcile(self, batch_results, patch_centers, patch_sizes):\n final_results = {}\n if len(batch_results) == 0: # Empty batch\n return final_results\n\n # UResNet predictions\n if 'predictions' and 'scores' and 'softmax' in batch_results[0]:\n final_voxels = np.array([], dtype=np.int32).reshape(0, 3) # Shape N_voxels x dim\n final_scores = np.array([], dtype=np.float32).reshape(0, self.cfg.NUM_CLASSES) # Shape N_voxels x num_classes\n final_counts = np.array([], dtype=np.int32).reshape(0,) # Shape N_voxels x 1\n for i, result in enumerate(batch_results):\n # Extract voxel and voxel values\n # Shape N_voxels x dim\n v, values = extract_voxels(result['predictions'])\n # Extract corresponding softmax scores\n # Shape N_voxels x num_classes\n scores = result['softmax'][v[:, 0], v[:, 1], v[:, 2], :]\n # Restore original blob coordinates\n v = (v + np.flipud(patch_centers[i]) - patch_sizes[i] / 2.0).astype(np.int64)\n v = np.clip(v, 0, self.cfg.IMAGE_SIZE-1)\n # indices are indices of the *first* occurrences of the unique values\n # hence for doublons they are indices in final_voxels\n # We assume the only overlap that can occur is between\n # final_voxels and v, not inside these arrays themselves\n n = final_voxels.shape[0]\n final_voxels, indices, counts = np.unique(np.concatenate([final_voxels, v], axis=0), axis=0, return_index=True, return_counts=True)\n final_scores = np.concatenate([final_scores, scores], axis=0)[indices]\n lower_indices = indices[indices < n]\n upper_indices = indices[indices >= n]\n final_counts[lower_indices] += counts[lower_indices] - 1\n final_counts = np.concatenate([final_counts, np.ones((upper_indices.shape[0],))], axis=0)\n\n final_scores = final_scores / final_counts[:, np.newaxis] # Compute average\n final_predictions = np.argmax(final_scores, axis=1)\n final_results['predictions'] = np.zeros((self.cfg.IMAGE_SIZE,) * 3)\n final_results['predictions'][final_voxels.T[0], final_voxels.T[1], final_voxels.T[2]] = final_predictions\n final_results['scores'] = np.zeros((self.cfg.IMAGE_SIZE,) * 3)\n final_results['scores'][final_voxels.T[0], final_voxels.T[1], final_voxels.T[2]] = final_scores[np.arange(final_scores.shape[0]), final_predictions]\n final_results['softmax'] = np.zeros((self.cfg.IMAGE_SIZE,) * 3 + (self.cfg.NUM_CLASSES,))\n final_results['softmax'][final_voxels.T[0], final_voxels.T[1], final_voxels.T[2], :] = final_scores\n final_results['predictions'] = final_results['predictions'][np.newaxis, ...]\n\n # PPN\n if 'im_proposals' and 'im_scores' and 'im_labels' and 'rois' in batch_results[0]:\n # print(batch_results[0]['im_proposals'].shape, batch_results[0]['im_scores'].shape, batch_results[0]['im_labels'].shape, batch_results[0]['rois'].shape)\n final_im_proposals = np.array([], dtype=np.float32).reshape(0, 3)\n final_im_scores = np.array([], dtype=np.float32).reshape(0,)\n final_im_labels = np.array([], dtype=np.int32).reshape(0,)\n final_rois = np.array([], dtype=np.float32).reshape(0, 3)\n for i, result in enumerate(batch_results):\n im_proposals = result['im_proposals'] + np.flipud(patch_centers[i]) - patch_sizes[i] / 2.0\n im_proposals = np.clip(im_proposals, 0, self.cfg.IMAGE_SIZE-1)\n # print(final_im_proposals, im_proposals)\n final_im_proposals = np.concatenate([final_im_proposals, im_proposals], axis=0)\n final_im_scores = np.concatenate([final_im_scores, result['im_scores']], axis=0)\n final_im_labels = np.concatenate([final_im_labels, result['im_labels']], axis=0)\n rois = result['rois'] + (np.flipud(patch_centers[i]) - patch_sizes[i] / 2.0) / (self.cfg.dim1 * self.cfg.dim2)\n rois = np.clip(rois, 0, self.cfg.IMAGE_SIZE-1)\n final_rois = np.concatenate([final_rois, rois], axis=0)\n final_results['im_proposals'] = np.array(final_im_proposals)\n final_results['im_scores'] = np.array(final_im_scores)\n final_results['im_labels'] = np.array(final_im_labels)\n final_results['rois'] = np.array(final_rois)\n\n # Try thresholding\n # index = np.where(final_results['im_scores'] > 1e-3)\n # final_results['im_proposals'] = final_results['im_proposals'][index, :]\n # final_results['im_scores'] = final_results['im_scores'][index]\n # final_results['im_labels'] = final_results['im_labels'][index]\n\n return final_results", "def combine_eval_results_trials(\n self,\n result_dirs: List[Path],\n output_result_dir: Path,\n ) -> NoReturn:\n pass", "def set_results(self, results, unique_keys):\n self._results = results\n self._compute_logic()\n\n for _, query in enumerate(self._results):\n\n flat = query.flatten_results(unique_keys)\n filename = 'flattened_{0}.csv'.format('_'.join(sorted(query.in_sets)))\n flat.to_csv(\n os.path.join(\n Configuration().csv.output_directory,\n '{0}'.format(filename)\n ),\n sep='\\t'\n )", "def _process_results(self):\n self.portfolio.create_backtest_result_dataframe()\n stats = self._show_stats()\n return stats", "def _reformat_results(self, results, strategy='BE_LOOP'):\n if self._verbose:\n print('Strategy to use: {}'.format(strategy))\n # Create an empty array to store the guess parameters\n if self._verbose:\n print('Raw results and compound Loop vector of shape {}'.format(len(results)))\n\n if strategy in ['BE_LOOP']:\n temp = np.array([np.hstack([result.x, result.fun]) for result in results])\n temp = stack_real_to_compound(temp, loop_fit32)\n return temp", "def collectOutput(self, finishedJob, output):\n evaluation = finishedJob.getEvaluation()\n realizations = evaluation[1]\n for rlz in realizations:\n output.addRealization(rlz)", "def post_process(self, relevant_targets):\r\n pass", "def compute_metrics(self, results: list) -> dict:", "def process_query_results(self, query_results: dict or list, analysis, observable: Observable) -> None:\n pass", "def handle_regression_results(self, results):\n # Make sure the regression results are a named map\n if not isinstance(results, dict) or not all(isinstance(key, str) for key in results):\n self.fail(\"Regression test '\" +\n self.get_test_method_name() +\n \"' didn't return a named map of regression results\")\n\n # Handle each result individually\n for name, result in results.items():\n self.handle_regression_result(name, result)", "def process_results(self, episode, eval):\n if episode % 10 == 9:\n ave = np.mean(self.scores[episode - 9:episode])\n print('Episodes: {}, AveScores: {}, Alpha: {}, Steps: {}'.format(\n episode + 1, ave, self.alpha.item(), self.step_count))\n if eval:\n if episode % 100 == 99:\n s1 = './' + self.game_name + '/'\n np.save(s1 + 'scores_eval{}.npy'.format(episode + 1), self.scores)\n print('Evaluation results saved!')\n else:\n if episode % 200 == 199:\n self.save_episode_models(episode)\n self.plot_array(episode)\n print('Model salved!')\n print('Total {} frames!'.format(self.frames_count))", "def process_sceneset_results(self, training_results, validation_results,\n tmp_dir):\n pass", "def __save_relevants_in_results(self, exec_result, total: bool = False) -> None:\n current_idx = self.num_res\n # print(\"Current index: {}\".format(current_idx))\n self.num_res += len(exec_result['search-results']['entry'])\n # print(\"[Before saving in results dict] Number of current results: {}\".format(self.num_res))\n if total:\n self.results[\"total_results\"] = int(exec_result['search-results']['opensearch:totalResults'])\n for i, doc in enumerate(exec_result['search-results']['entry']):\n date_parts = self.__split_date(doc['prism:coverDate'][0]['$'])\n if \"authors\" in doc.keys():\n authors = self.__convert_authors(doc[\"authors\"])\n else:\n authors = \"\"\n self.results[\"documents\"].append(dict())\n self.results[\"documents\"][current_idx+i][\"eid\"] = doc['eid']\n self.results[\"documents\"][current_idx+i][\"title\"] = self.__prepare_title(doc[\"dc:title\"])\n self.results[\"documents\"][current_idx+i][\"authors\"] = authors\n self.results[\"documents\"][current_idx+i][\"date\"] = doc['prism:coverDate'][0]['$']\n self.results[\"documents\"][current_idx+i][\"year\"] = date_parts[0]\n self.results[\"documents\"][current_idx+i][\"month\"] = date_parts[1]\n self.results[\"documents\"][current_idx+i][\"day\"] = date_parts[2]", "def reduce(model_results, out, params):\n import numpy as np\n for n, (ps, activation_results) in model_results:\n\n if not activation_results:\n out.add(n, (ps, None))\n else:\n out.add(n, (ps, activation_results))", "def __call__(self, command_result):\n if not self.enabled:\n return command_result\n\n changes = 0\n for output_name in self.output_parts:\n output = getattr(command_result, output_name)\n if output and self.matches_output(output):\n changed, new_output = self.process_output(output)\n if changed:\n changes += 1\n setattr(command_result, output_name, new_output)\n\n if changes:\n # -- RESET: Composite output\n # pylint: disable=protected-access\n command_result._output = None\n return command_result", "def postprocess(self, prediction_dict, **params):\r\n pass", "def addResults(self, results):\n if results is None or len(results) == 0:\n self._results = None\n else:\n self._results = results", "def _process_output(self, driver_output):\n fs = self._port._filesystem\n failures = self._handle_error(driver_output)\n expected_driver_output = self._expected_driver_output()\n\n # Check the output and save the results.\n start_time = time.time()\n time_for_diffs = {}\n for test_type in self._test_types:\n start_diff_time = time.time()\n new_failures = test_type.compare_output(\n self._port, self._filename, self._options, driver_output,\n expected_driver_output)\n # Don't add any more failures if we already have a crash, so we don't\n # double-report those tests. We do double-report for timeouts since\n # we still want to see the text and image output.\n if not driver_output.crash:\n failures.extend(new_failures)\n test_result_writer.write_test_result(\n self._port, self._options.results_directory, self._filename,\n driver_output, expected_driver_output, new_failures)\n time_for_diffs[test_type.__class__.__name__] = (\n time.time() - start_diff_time)\n\n total_time_for_all_diffs = time.time() - start_diff_time\n return TestResult(self._filename, failures, driver_output.test_time,\n total_time_for_all_diffs, time_for_diffs)", "def __call__(self, results):\n\n results = self._mosaic_transform(results)\n return results", "def process_output_reports(results, analysis, date_now):\n #PLUG_INS[analysis.plug_in].set_data(analysis.title, file_path, results)\n output = PLUG_INS[analysis.plug_in]()\n file_path = settings.REPORT_PATH+\"/analysis%s_%s_%s_%s_%s_%s_%s\" % (analysis.id, date_now.year, date_now.month, date_now.day, date_now.hour, date_now.minute, date_now.second)\n output.set_data(analysis.title, file_path, results)\n\n result = AnalysisResult(analysis=analysis, output=string.split(output.get_output_file(), \"/\")[-1], run_date=date_now)\n result.save() \n analysis.last_report = date_now\n analysis.save()\n return True", "def run(self):\n\n self.preprocess()\n self.restore_ratings()\n self.prepare_UI()\n self.loop_through_units()\n self.cleanup()\n\n print('\\nAll Done - results are available in:\\n\\t{}'.format(self.out_dir))", "def _append_results(self) -> None:\n self._t_mps.compute_traces(self._step, self._process_tensors)\n time = self.time(self._step)\n norm = self._t_mps.get_norm()\n bond_dimensions = self._t_mps.get_bond_dimensions()\n self._results['time'].append(time)\n self._results['norm'].append(norm)\n self._results['bond_dimensions'].append(bond_dimensions)\n for sites, dynamics in self._results['dynamics'].items():\n if isinstance(sites, int):\n sites_list = [sites]\n else:\n sites_list = list(sites)\n dynamics.add(\n time,\n self._t_mps.get_density_matrix(sites_list))\n self._t_mps.clear_traces()", "def postprocess( # type: ignore[override]\n self,\n result: Mapping[str, Optional[torch.Tensor]],\n *,\n img_size: Tuple[int, int],\n output_height: int,\n output_width: int,\n **kwargs: Any,\n ) -> Dict[str, Optional[torch.Tensor]]:\n r: Optional[torch.Tensor] = result.get(self.output_key, None) # image\n if r is None:\n return {self.output_key: None}\n r = r[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)\n r = torch.nn.functional.interpolate(\n r, size=(output_height, output_width), mode=\"bilinear\", align_corners=False,\n )[0]\n return {self.output_key: r}", "def postprocess( # type: ignore[override]\n self,\n result: Mapping[str, Optional[torch.Tensor]],\n *,\n img_size: Tuple[int, int],\n output_height: int,\n output_width: int,\n **kwargs: Any,\n ) -> Dict[str, Optional[torch.Tensor]]:\n r: Optional[torch.Tensor] = result.get(self.output_key, None) # image\n if r is None:\n return {self.output_key: None}\n r = r[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)\n r = torch.nn.functional.interpolate(\n r, size=(output_height, output_width), mode=\"bilinear\", align_corners=False,\n )[0]\n return {self.output_key: r}", "def postprocessing(self, postprocessing):\n\n self._postprocessing = postprocessing", "def _extract_results_from_run_history(self, run_history: RunHistory) -> None:\n\n for run_key, run_value in run_history.data.items():\n config = run_history.ids_config[run_key.config_id]\n self._update(config=config, run_key=run_key, run_value=run_value)\n\n self._check_null_in_optional_inference_choices()\n\n self.rank_opt_scores = scipy.stats.rankdata(\n -1 * self._metric._sign * self.opt_scores, # rank order\n method='min'\n )", "def post_apply(self): #pragma no cover\n for e in self.obs_queue:\n\n # translate operation wire labels to the device's wire labels\n device_wires = self.map_wires(e.wires)\n\n self.measure += \"set resultArray w/= {wires[0]} <- \".format(wires=device_wires.tolist())\n self.measure += self._observable_map[e.name].format(wires=device_wires.tolist())\n self.measure += \" \"\n\n self._source_code = PROGRAM.format(wires=self.num_wires, operations=self.prog, measurements=self.measure)\n self.qs = qsharp.compile(self._source_code)", "def process_results(self, results):\n for service in SERVICE_RESOURCES:\n for agent in SERVICE_RESOURCES[service][\"daemons\"]:\n self._process_agent_results(results, service, agent)\n\n return self._agent_log_issues", "def iterate_results(results, extract_fn):\n outputs = {}\n for environment, environment_results in results.items():\n if environment not in outputs:\n outputs[environment] = {}\n for experimental_setting, setting_results in environment_results.items():\n outputs[environment][experimental_setting] = []\n for config, seeds_results in setting_results.items():\n for seed, actual_results in seeds_results.items():\n output = extract_fn(actual_results)\n outputs[environment][experimental_setting].append(output)\n outputs[environment][experimental_setting] = np.array(outputs[environment][experimental_setting])\n return outputs", "def eval_results(self, passed_custom, override):\n errors = self.results.linter.stats.get('error', False)\n fatal = self.results.linter.stats.get('fatal', False)\n score = self.check_score()\n file_passed = True\n self.logging.info('\\n------------------------------------------------------------------\\n')\n self.logging.info('Your code has been rated at {0:.2f}/10\\n'.format(score))\n self.logging.info('\\n')\n self.logging.info('------------------------------------------------------------------')\n if fatal:\n self.logging.warning(\"FATAL ERROR(S) DETECTED IN {}.\".format(self.fname))\n file_passed = False\n if errors and not self.allow_errors:\n self.logging.warning(\"ERROR(S) DETECTED IN {}.\".format(self.fname))\n file_passed = False\n if score:\n file_passed = file_passed and self.check_threshold(score)\n if self.custom_rules and passed_custom != file_passed and override:\n self.logging.info(\"OVERRIDING STANDARD RESULT WITH CUSTOM FROM {} TO {}.\".format(file_passed,\n passed_custom))\n file_passed = passed_custom\n if not file_passed and self.ignore_tests and (\"test_\" in self.fname.split(\"/\")[-1] or \\\n \"tests.py\" in self.fname):\n self.logging.info(\"ASSUMING {} IS TEST FILE. ALLOWING.\".format(self.fname))\n self.logging.info('------------------------------------------------------------------\\n')\n elif file_passed:\n self.logging.info('FILE {} PASSED PYLINT, THRESHOLD {}'.format(self.fname, self.thresh))\n else:\n self.failed_files.append(self.fname)\n self.logging.warning('------------------------------------------------------------------')", "def post_process_result(self, result: np.ndarray) -> np.ndarray:\n to_cut = len(\"_tag\")\n return np.asarray([[tag[:-to_cut] for tag in list_of_tags] for list_of_tags in result])", "def _do_subject_normalize(output_dir,\n segment_result=None,\n do_report=True,\n results_gallery=None,\n progress_logger=None,\n brain=\"EPI\",\n cmap=None,\n fwhm=0,\n **spm_normalize_kwargs):\n\n nipype_report_filenames = []\n\n # sanity\n def get_norm_apply_to_files(files):\n if isinstance(files, basestring):\n norm_apply_to_files = files\n file_types = 'string'\n else:\n file_types = []\n norm_apply_to_files = []\n for x in files:\n if isinstance(x, basestring):\n norm_apply_to_files.append(x)\n file_types.append('string')\n else:\n norm_apply_to_files += x\n file_types.append(('list', len(x)))\n\n return norm_apply_to_files, file_types\n\n if 'apply_to_files' in spm_normalize_kwargs:\n spm_normalize_kwargs['apply_to_files'], file_types = \\\n get_norm_apply_to_files(spm_normalize_kwargs['apply_to_files'])\n\n output = {}\n\n # prepare for smart caching\n cache_dir = os.path.join(output_dir, 'cache_dir')\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n mem = Memory(base_dir=cache_dir)\n\n # run workflow\n normalize = mem.cache(spm.Normalize)\n norm_result = normalize(**spm_normalize_kwargs)\n\n # collect ouput\n output['result'] = norm_result\n if not norm_result.outputs is None:\n normalized_files = norm_result.outputs.normalized_files\n\n # define execution log html output filename\n execution_log_html_filename = os.path.join(\n output_dir,\n 'normalization_of_%s_execution_log.html' % brain,\n )\n\n # grab execution log\n execution_log = preproc_reporter.get_nipype_report(\n preproc_reporter.get_nipype_report_filename(normalized_files))\n\n # write execution log\n open(execution_log_html_filename, 'w').write(\n execution_log)\n\n # update progress bar\n if progress_logger:\n progress_logger.log(\n '<b>Normalization of %s</b><br/><br/>' % brain)\n progress_logger.log(execution_log)\n progress_logger.log('<hr/>')\n\n # do smoothing\n if np.sum(fwhm):\n smooth = mem.cache(spm.Smooth)\n smooth_result = smooth(\n in_files=output['result'].outputs.normalized_files,\n fwhm=fwhm)\n\n # collect ouput\n output['result'] = smooth_result\n if not smooth_result.outputs is None:\n normalized_files = smooth_result.outputs.smoothed_files\n output['normalized_files'] = normalized_files\n\n # grab execution log\n execution_log = preproc_reporter.get_nipype_report(\n preproc_reporter.get_nipype_report_filename(\n normalized_files))\n\n # write execution log\n open(execution_log_html_filename, 'w').write(\n execution_log)\n\n # update progress bar\n if progress_logger:\n progress_logger.log(\n '<b>Smoothening of %s</b><br/><br/>' % brain)\n progress_logger.log(execution_log)\n progress_logger.log('<hr/>')\n\n if 'apply_to_files' in spm_normalize_kwargs:\n if not isinstance(file_types, basestring):\n _tmp = []\n s = 0\n for x in file_types:\n if x == 'string':\n if isinstance(normalized_files, basestring):\n _tmp = normalized_files\n break\n else:\n _tmp.append(\n normalized_files[s])\n s += 1\n else:\n _tmp.append(\n normalized_files[s: s + x[1]])\n s += x[1]\n\n normalized_files = _tmp\n\n output['normalized_files'] = normalized_files\n\n # generate gallery for HTML report\n if do_report:\n if normalized_files:\n if segment_result:\n subject_gm_file = segment_result.outputs.modulated_gm_image\n subject_wm_file = segment_result.outputs.modulated_wm_image\n subject_csf_file = segment_result.outputs.modulated_csf_image\n else:\n subject_gm_file = None\n subject_wm_file = None\n subject_csf_file = None\n\n nipype_report_filenames = [\n preproc_reporter.get_nipype_report_filename(\n subject_gm_file)] + nipype_report_filenames\n\n # generate normalization thumbs\n output.update(preproc_reporter.generate_normalization_thumbnails(\n normalized_files,\n output_dir,\n brain=brain,\n execution_log_html_filename=execution_log_html_filename,\n results_gallery=results_gallery,\n ))\n\n # generate segmentation thumbs\n output.update(preproc_reporter.generate_segmentation_thumbnails(\n normalized_files,\n output_dir,\n subject_gm_file=subject_gm_file,\n subject_wm_file=subject_wm_file,\n subject_csf_file=subject_csf_file,\n brain=brain,\n execution_log_html_filename=execution_log_html_filename,\n cmap=cmap,\n results_gallery=results_gallery))\n\n # collect ouput\n output['result'] = norm_result\n\n return output", "def extract_results(self, results: Any) -> dict:\n for nested_attribute in self.nested_results_parts:\n results = getattr(results, nested_attribute)\n return results if isinstance(results, dict) else results()", "def postprocess(self, data):\n if self.error is not None:\n return [self.error]\n\n # Iterating over inference results to render the normalized probabilities\n response = []\n for inference_result in data:\n softmax_result = inference_result.softmax().asnumpy()\n for idx, label in enumerate(self.labels):\n response.append({label: float(softmax_result[0][idx])})\n return [response]", "def emit_results(self, betas, rescaled_betas, gold_standard, priors):\n raise NotImplementedError # implement in subclass", "def update_results(failures, errors, case_):\n for check in case_.checks:\n if check.result == FAILURE:\n failures.append(check)\n elif check.result == ERROR:\n errors.append(check)", "def _compile_results(self):\n self.statements = stmts_from_json(self.__statement_jsons.values())\n if self.use_obtained_counts:\n self.__source_counts = get_available_source_counts(self.statements)\n self.__evidence_counts = get_available_ev_counts(self.statements)", "def poison_all_outputs(self) -> None:\n assert self.step is not None\n\n for pattern in sorted(self.step.output):\n formatted_pattern = fmt_capture(self.kwargs, optional(pattern))\n if is_phony(formatted_pattern):\n Invocation.poisoned.add(formatted_pattern)\n continue\n for path in glob_paths(optional(formatted_pattern)):\n Invocation.poisoned.add(path)\n global remove_failed_outputs # pylint: disable=invalid-name\n if remove_failed_outputs.value and not is_precious(path):\n Logger.file(f\"Remove the failed output: {path}\")\n Invocation.remove_output(path)", "def register_results(self, regressor, results):\n\n self.prediction_results[regressor] = results", "def _extract_results(self) -> None:\n metric_name = self.metric.name\n for inference_name in ['train', 'test', 'opt']:\n # TODO: Extract information from self.search_results\n data = getattr(self.search_results, f'{inference_name}_metric_dict')[metric_name]\n if all([d is None for d in data]):\n if inference_name not in OPTIONAL_INFERENCE_CHOICES:\n raise ValueError(f\"Expected {metric_name} score for {inference_name} set\"\n f\" to not be None, but got {data}\")\n else:\n continue\n self.data[f'single::{inference_name}::{metric_name}'] = np.array(data)\n\n if self.ensemble_results.empty() or inference_name == 'opt':\n continue\n\n data = getattr(self.ensemble_results, f'{inference_name}_scores')\n if all([d is None for d in data]):\n if inference_name not in OPTIONAL_INFERENCE_CHOICES:\n raise ValueError(f\"Expected {metric_name} score for {inference_name} set\"\n f\" to not be None, but got {data}\")\n else:\n continue\n self.data[f'ensemble::{inference_name}::{metric_name}'] = np.array(data)", "def postprocess_result(self, job: Job, result: Any, pre_call_hash: str) -> Any:\n postprocess_args = {\n \"pre_call_hash\": pre_call_hash,\n }\n\n def postprocess_value(value):\n value2 = self.type_registry.postprocess(value, postprocess_args)\n\n if isinstance(value, Handle) and not self.dryrun:\n # Handles accumulate state change from jobs that emit them.\n assert value2 != value\n self.backend.advance_handle([value], value2)\n\n return value2\n\n return map_nested_value(postprocess_value, result)", "def applyRunInfo(self, runInfo):\n self.runInfoDict = runInfo", "def get_results(self):\n self.report('Checking finished evaluations.')\n outputs = {}\n while self.indices_to_retrieve:\n idx = self.indices_to_retrieve.pop(0)\n key = self.eval_key(idx)\n self.report('Retrieving output for evaluation {}'.format(idx))\n eval_proc = self.ctx[key]\n if not eval_proc.is_finished_ok:\n return self.exit_codes.ERROR_EVALUATE_PROCESS_FAILED\n outputs[idx] = get_outputs_dict(eval_proc)\n\n with self.optimizer() as opt:\n opt.update(outputs)", "def postprocess(self, res, op_item, addon):\n result = res\n\n if \"postprocess\" in op_item:\n for post in [i.strip() for i in op_item[\"postprocess\"].split(\" --> \")]:\n func_name = sub(\"^([^\\(\\)]+)\\(.+$\", \"\\\\1\", post).strip()\n param_str = sub(\"^.+\\(([^\\(\\)]*)\\).*\", \"\\\\1\", post)\n if param_str == \"\":\n params_values = ()\n else:\n params_values = next(\n reader(param_str.splitlines(), skipinitialspace=True)\n )\n\n func = getattr(addon, func_name)\n func_params = (result,) + tuple(params_values)\n result, do_type_fields = func(*func_params)\n if do_type_fields:\n result = self.type_fields(result, op_item)\n\n return result", "def transform(self, results: Dict):\n\n # Apply input remapping\n inputs = self._map_input(results, self.mapping)\n\n # Scatter sequential inputs into a list\n input_scatters = self.scatter_sequence(inputs)\n\n # Control random parameter sharing with a context manager\n if self.share_random_params:\n # The context manager :func`:cache_random_params` will let\n # cacheable method of the transforms cache their outputs. Thus\n # the random parameters will only generated once and shared\n # by all data items.\n ctx = cache_random_params # type: ignore\n else:\n ctx = nullcontext # type: ignore\n\n with ctx(self.transforms):\n output_scatters = [\n self._apply_transforms(_input) for _input in input_scatters\n ]\n\n # Collate output scatters (list of dict to dict of list)\n outputs = {\n key: [_output[key] for _output in output_scatters]\n for key in output_scatters[0]\n }\n\n # Apply remapping\n outputs = self._map_output(outputs, self.remapping)\n\n results.update(outputs)\n return results", "def clear_unsuccessful_results(self):\n results = [x for x in self.get_results() if x.return_code == 0]\n self._write_results(results)\n logger.info(\"Cleared failed results from %s\", self._filename)" ]
[ "0.65581095", "0.64528775", "0.62099826", "0.61681396", "0.61681396", "0.6150747", "0.61191654", "0.6098182", "0.6078971", "0.60327035", "0.6017863", "0.60096914", "0.59998536", "0.59641546", "0.5962654", "0.593701", "0.5910838", "0.5900604", "0.5772284", "0.5731572", "0.5715091", "0.5704457", "0.5674019", "0.56517893", "0.56443274", "0.5637599", "0.5632728", "0.5623844", "0.56205946", "0.56177664", "0.56166595", "0.56129307", "0.55983657", "0.55983657", "0.55966425", "0.55894643", "0.5579402", "0.553614", "0.54864585", "0.54852396", "0.5466931", "0.54636705", "0.54636705", "0.54636705", "0.54636705", "0.54636705", "0.54618096", "0.5458626", "0.54415673", "0.54310644", "0.5427897", "0.54198456", "0.5418581", "0.5414279", "0.54030555", "0.53823", "0.5381603", "0.5376189", "0.5375476", "0.53738284", "0.5368087", "0.5365219", "0.53621984", "0.53149617", "0.53127503", "0.5303484", "0.5295635", "0.52950937", "0.5294467", "0.52937853", "0.52896076", "0.52671355", "0.5266207", "0.52624506", "0.52590656", "0.5257004", "0.525446", "0.525446", "0.5236182", "0.5232716", "0.5224414", "0.52023673", "0.5191861", "0.51825607", "0.51776296", "0.51680875", "0.5165439", "0.5161191", "0.5152957", "0.5149433", "0.51470524", "0.514361", "0.5133997", "0.5129986", "0.512551", "0.512446", "0.5114957", "0.5107681", "0.5106378", "0.50962585" ]
0.8450894
0
Turns Freshbooks tickets from the past x days into Toggl projects.
def sync(self, no_of_days=1): zd = Zendesk() tg = Toggl() try: self.print("Syncing...") self.print_divider(30) tickets = zd.get_tickets(no_of_days) for ticket in tickets: project_title = self.format_title(ticket.id, ticket.subject) if ticket.organization: client_id = tg.get_client_id(name=ticket.organization.name) if not client_id: new_client = tg.create_client(ticket.organization.name) client_id = new_client['id'] else: client_id = False self.print("Ticket '%s' has no associated organization!" % (project_title)) all_projects = tg.get_projects() if not self.already_created(ticket.id, all_projects): self.print("Creating project '%s'..." % (project_title)) result = tg.create_project(project_title, client_id, is_private=False) self.print("Toggl response:") self.log(result, silent=False) else: self.print("There is already a Toggl project for Zendesk ticket #%s!" % ticket.id) pass # TODO: edit Toggl project # tg.edit_project(project_id, name=ticket.subject) self.print_divider(30) self.print("Done!") except: self.log(traceback.format_exc(), silent=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resume():\n # We now retrieve all entries in the previous month.\n # Getting the current date and the date from a month before.\n time_year = time.localtime()[0] \n time_month = time.localtime()[1]\n time_day = time.localtime()[2]\n if time_month == 1:\n prev_time_month = 12\n prev_time_year = time_year - 1\n else:\n prev_time_month = time_month - 1\n prev_time_year = time_year\n cur_date = str(time_year) + '-' + ('%02d' % time_month) + '-' + ('%02d' % time_day)\n prev_date = str(prev_time_year) + '-' + ('%02d' % prev_time_month) + '-' + ('%02d' % time_day)\n\n entries = toggl.entries_between(prev_date, cur_date)\n entry_list = []\n \n for entry in entries:\n if is_entry_in_list(entry, entry_list) == False:\n entry_list.append(entry)\n\n print(\">>> You can resume the following entries:\")\n n = 1\n for entry in entry_list:\n tags = []\n if 'tags' in entry:\n [tags.append(i) for i in entry['tags']]\n print('> {} - {} [{}]'.format(str(n),\n entry['description'],\n \",\".join(tags)))\n n += 1\n choice = int(input(\">>> Type an entry number: \"))\n\n if choice >= 1 and choice <= len(entry_list):\n res_entry = entry_list[choice-1]\n start_toggl(res_entry['description'], res_entry['tags'])\n else:\n print(\"You typed an unavailable number.\")\n\n \"\"\"\n >>> You can resume the following entries:\n > 1 - test [project]\n > 2 - another [other project]\n >>> Type an entry number: \n \"\"\"", "def push_historic_data(project):\n defects = []\n\n logger.info(\"Starting {}...\".format(project))\n jira_issues = get_jira_defects(project)\n last_upload = datetime.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0) + relativedelta(weekday=SA(-1))\n logger.debug(\"Fetched {} issues successfully for {}\".format(len(jira_issues), project))\n for issue in jira_issues:\n try:\n created = datetime.datetime.strptime(issue.fields.created, DATE_FORMAT)\n jira_dict = jira_obj_to_dict(issue, datetime.datetime.utcnow().strftime(DATE_FORMAT))\n\n historic_data = []\n # Last Friday of the report ran\n report_date = last_upload\n while(report_date > created):\n jira_dict = jira_for_date(jira_dict, issue.changelog, report_date)\n historic_data.insert(0, create_defect(jira_dict, issue))\n report_date -= datetime.timedelta(weeks=1)\n defects.append(historic_data)\n except Exception as e:\n logger.debug(\"Exception processing {} {}\".format(jira_dict[\"key\"], e))\n logger.exception(\"Exception\")\n logger.debug(\"Missing values {}\".format(str(jira_dict)))\n pass\n if len(defects) < len(jira_issues):\n logger.debug(\"{delta} defects not added in the {} report\".format(project, delta=len(jira_issues) - len(defects)))\n defects_as_list = []\n for defect in defects:\n defects_as_list.extend(defect)\n return post_defects(project, jira_issues, defects_as_list)", "def time_tracking(self):\n fb = FreshBooks()\n tg = Toggl()\n self.print_splash()\n self.print(\"Tip: You can always enter 'skip' when you want to skip a time entry.\", format='warn')\n days = self.get_interactive_days() # number of days to go back\n self.print(\"OK, I'll run you through the Toggl time entries of the past %i day(s).\" % (days))\n timestamp = self.get_timestamp(days) # unix timestamp including tz\n time_entries = tg.get_time_entries(timestamp)\n if len(time_entries) == 0:\n self.print(\"No Toggl entries in this time span!\", 'warn')\n return False\n time_entries = self.merge_toggl_time_entries(time_entries) # merge Toggl entries\n fb_projects = fb.get_projects()\n # Loop through merged Toggl time entries:\n for entry in time_entries:\n # Get and convert all necessary info:\n client_id = tg.get_client_id(project_id=entry.get('pid'))\n client_name = tg.get_client_name(client_id)\n project = tg.get_project(entry.get('pid'))\n duration = int(entry['duration']) / 60 / 60 # convert duration to hours\n duration = round(duration * 4 ) / 4 # round hours to nearest .25\n description = self.format_description(project['name'], entry['description'])\n date = str(parser.parse(entry['start']).date())\n # Print info in a nice way:\n self.print_divider(30)\n self.print(\"Description: \" + description)\n self.print(\"Date: \" + date)\n self.print(\"Hours spent: \" + str(duration))\n # Skip if Toggl entry is already booked:\n if entry.get('tags') and tg.BOOKED_TAG in entry['tags']:\n self.print(\"Skipping this entry because it is already in Freshbooks.\", 'cross')\n # Skip if duration is below 0.25:\n elif duration < 0.25:\n self.print(\"Skipping this entry because there are less than 0.25 hours spent.\", 'cross')\n # If billable, add to Freshbooks:\n elif entry['billable']:\n # Get FreshBooks project name through interactive search:\n try:\n self.print(\"Project: \\U0001F50D \")\n fb_project_name = self.interactive_search(fb_projects.keys(), client_name)\n # Handle KeyboardInterrupt\n except KeyboardInterrupt:\n answer = input(\"\\nKeyboardInterrupt! Skip current entry or quit time tracking? (S/q) \")\n if answer.lower() == 's' or answer == '':\n self.clear_lines(1)\n self.print(\"Skipping this entry.\", 'cross')\n continue\n else:\n self.clear_lines(1)\n self.print(\"Ok, stopping time tracking.\", 'cross')\n sys.exit()\n # If user requests so, skip this entry:\n self.clear_lines(1)\n if not fb_project_name:\n self.print(\"Skipping this entry.\", 'cross')\n continue\n # Otherwise, add entry to FreshBooks and tag Toggl entry/entries:\n self.print(\"Project: \" + fb_project_name)\n project_id = fb.get_project_id(fb_project_name)\n fb.add_entry(project_id, duration, description, date)\n tg.tag_projects(entry['merged_ids'], tg.BOOKED_TAG)\n # If not billable, skip entry:\n else:\n self.print(\"Skipping this entry because it is not billable.\", 'cross')\n self.print_divider(30)\n answer = input(\"All done! Open FreshBooks in browser to verify? (Y/n) \")\n if answer.lower() == 'y' or answer == '':\n webbrowser.open('https://%s.freshbooks.com/timesheet' % fb.fb_creds['subdomain'])", "def getPastProjects(self)->list:\n returnList=[]\n for i in range(0,randint(1, 10)):\n randumProjectId = randint(0, 109)\n if randumProjectId not in returnList:\n returnList.append(randumProjectId)\n\n return returnList", "def scrum(project):\r\n\r\n stories = project.in_progress_stories()\r\n stories_by_owner = group_stories_by_owner(stories)\r\n\r\n print bold(\"{} SCRUM -- {}\".format(project.name, pretty_date()))\r\n print\r\n\r\n for owner in stories_by_owner:\r\n print bold(owner)\r\n for story in stories_by_owner[owner]:\r\n print \" #{:12s}{:9s} {:7s} {}\".format(story.story_id,\r\n estimate_visual(story.estimate),\r\n story.story_type,\r\n story.name)\r\n\r\n print\r\n\r\n print bold(\"Bugs\")\r\n bugs = project.open_bugs()\r\n if len(bugs) == 0:\r\n print 'Not sure that I believe it, but there are no bugs'\r\n for bug in bugs:\r\n print \" #{:12s} {:4s} {}\".format(bug.story_id,\r\n initials(bug.owned_by),\r\n bug.name)", "def main():\n parser = argparse.ArgumentParser(description='Creates tickets for release certification')\n parser.add_argument('-u', '--username', help='jira username', default='admin')\n parser.add_argument('-p', '--password', help='jira password', default='admin')\n parser.add_argument('-c', '--config', help='path to config file', default='./options.ini')\n parser.add_argument('-j', '--jira', help='url of jira server', default='http://localhost:8080')\n\n args = parser.parse_args()\n\n jira_user = args.username\n jira_pass = args.password\n jira_server = args.jira\n config_file_path = args.config\n CONFIG.read(config_file_path)\n\n parent_ticket = config_map('JiraOptions')['parent_ticket']\n apprenda_version = config_map('VersionInfo')['to_version']\n jira_project = config_map('JiraOptions')['project']\n jira_issue_type = config_map('JiraOptions')['issue_type']\n jira = JIRA(jira_server, basic_auth=(jira_user, jira_pass))\n\n parent_issue = jira.issue(parent_ticket)\n ticket_list = []\n\n # create clean install tickets\n clean_strings = config_map('CleanInstallSection')\n for cloud in ['single', 'hybrid']:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(clean_strings['summary'], apprenda_version, cloud)\n ticket_to_add.format_description(clean_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # create upgrade tickets\n from_versions = json.loads(config_map('VersionInfo')['from_versions'])\n upgrade_strings = config_map('UpgradeSection')\n\n # single cloud\n for version in from_versions:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(upgrade_strings['summary'], apprenda_version, version,\n \"single\")\n ticket_to_add.format_description(upgrade_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # hybrid cloud\n for version in from_versions:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(upgrade_strings['summary'], apprenda_version, version,\n \"hybrid\")\n ticket_to_add.format_description(upgrade_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # create testing tickets for other tasks\n for section in CONFIG.sections():\n if 'Ticket' in section:\n strings = config_map(section)\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(strings['summary'], apprenda_version)\n ticket_to_add.format_description(strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n print 'Created {0} tickets, now sending them to Jira'.format(len(ticket_list))\n # send issues to jira and create tickets and links\n issues = jira.create_issues(field_list=ticket_list)\n\n for item in issues:\n jira.create_issue_link(\n type=\"Task of Story\",\n outwardIssue=item['issue'].key,\n inwardIssue=parent_issue.key,\n )\n\n print 'Finished linking issues, exiting.'", "def get_newhire_tickets(group_id):\n url = f\"{BASE_URL}/api/v2/tickets\"\n headers = {\"AUTHorization\": f\"Basic {AUTH}\"}\n r = requests.get(url, headers=headers)\n if r.ok:\n print(f\"Got list of all new hire tickets.\")\n else:\n logging.debug(f\"Error - {r.status_code} - {r.content}\")\n tickets = r.json()[\"tickets\"]\n ticket_ids = set()\n last_hour = datetime.now() - timedelta(hours=1)\n\n for ticket in tickets:\n update_time = datetime.strptime(ticket[\"updated_at\"], \"%Y-%m-%dT%H:%M:%SZ\")\n # Check for tickets modified in the last hour\n if update_time > last_hour:\n # Verify the subject and group are related to New Hire Onboarding\n if \"New Hire\" in ticket[\"subject\"] and ticket[\"group_id\"] == group_id:\n start_date = get_start_date(ticket[\"id\"])\n # Check to see if ticket due date was already updated\n if start_date == ticket[\"due_by\"][0:10]:\n print(f'Ticket {ticket[\"id\"]} already updated.')\n else:\n ticket_ids.add(ticket[\"id\"])\n add_ticket_note(ticket[\"id\"], ticket[\"due_by\"][0:10])\n\n return ticket_ids", "def get_jira_defects(project):\n return get_jira_issues('project = \"{}\" AND filter = 19589'.format(project))", "def push_current_data(project):\n defects = []\n\n logger.info(\"Starting {}...\".format(project))\n jira_issues = get_jira_defects(project)\n now = datetime.datetime.utcnow().strftime(DATE_FORMAT)\n logger.debug(\"Fetched {} issues successfully for {}\".format(len(jira_issues), project))\n\n # Each issue fetched is being generated with our schema.\n for issue in jira_issues:\n try:\n jira_dict = jira_obj_to_dict(issue, now)\n defect = create_defect(jira_dict, issue)\n defects.append(defect)\n except Exception as e:\n logger.debug(\"Exception processing {} {}\".format(issue.key, e))\n logger.debug(\"Missing values {}\".format(str(jira_dict)))\n pass\n if len(defects) < len(jira_issues):\n logger.debug(\"{delta} defects not added in the {} report\".format(project, delta=len(jira_issues) - len(defects)))\n\n return post_defects(project, jira_issues, defects)", "def nfldraft(self, irc, msg, args, optyear, optround):\n \n if optyear: # if optyear is there, test for valid and if after 2003.\n testdate = self._validate(optyear, '%Y')\n if not testdate:\n irc.reply(\"Invalid year. Must be YYYY.\")\n return\n if optyear < 1996:\n irc.reply(\"Year must be after 1996.\")\n return\n \n if optround:\n if 1 <= optround <= 7:\n irc.reply(\"Draft round must be 1 or 7.\")\n return\n \n url = self._b64decode('aHR0cDovL2luc2lkZXIuZXNwbi5nby5jb20vbmZsL2RyYWZ0L3JvdW5kcw==')\n\n if optyear: # add year if we have it.\n url += '?year=%s' % (optyear)\n\n if optround: # optional round.\n url += '&round=%s' % (optround)\n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to fetch: %s\" % url)\n return\n\n soup = BeautifulSoup(html)\n\n # check and make sure we have a table, otherwise error.\n if not soup.find('table', attrs={'class':'tablehead draft-tracker'}): \n irc.reply(\"error: could not find any draft information. Bad year or round?\")\n return\n else:\n table = soup.find('table', attrs={'class':'tablehead draft-tracker'})\n \n h2 = soup.find('h2')\n rows = table.findAll('tr', attrs={'class': re.compile('^oddrow.*?|^evenrow.*?')})\n\n object_list = []\n \n for row in rows:\n pickNumber = row.find('p', attrs={'class':'round-number'})\n pickName = row.find('p', attrs={'class':'player-name'})\n pickPos = row.find('li', attrs={'class':'li-position'})\n pickTeam = row.find('p', attrs={'class':'team-name'})\n \n appendString = ircutils.bold(pickNumber.getText()) + \". \" + pickName.getText() + \" - \" + pickTeam.getText()\n \n if row.find('p', attrs={'class':'notes'}):\n appendString += \" (\" + row.find('p', attrs={'class':'notes'}).getText() + \")\"\n \n object_list.append(appendString) \n \n irc.reply(ircutils.mircColor(h2.getText().strip(), 'red') + \": \") # print header.\n \n for N in self._batch(object_list, 6):\n irc.reply(' | '.join(str(n) for n in N))", "def tickets(self):\n if self._tickets:\n return self._tickets\n else:\n # Parse out tickets by splitting on the fixed format -- will break if format changes\n tickets = self.text.split(FIXED_FORMAT + '\\n')\n tickets = tickets[1:-2] # Exclude extra line that are not tickets\n tickets = [Ticket(text) for text in tickets]\n\n for ticket in tickets:\n for line in ticket.text.splitlines():\n line = line.strip('\\n')\n\n # Use the Easier to Ask for Forgiveness idiom\n # If we recognize an entity, we parse it, if not, we do nothing\n try:\n ticket.outages.append(Outage(line))\n except ParsingException:\n pass\n\n try:\n ticket.causes.append(Cause(line))\n except ParsingException:\n pass\n\n try:\n ticket.date_log.append(DateEntry(line))\n except ParsingException:\n pass\n\n try:\n ticket.history_log.append(HistoryEntry(line))\n except ParsingException:\n pass\n\n return tickets", "def do_projects(self, arg):\n args = shlex.split(arg)\n limit = 10\n from_date = to_date = ''\n if args:\n limit = 0\n try:\n from_date, to_date = helpers.parse_date_parameters(args)\n except ValueError, msg:\n print(msg)\n return\n projects = self.db.get_projects_with_activity_field(\n from_date, to_date, limit=limit)\n refined = map(lambda x: [\n x['pid'], x['name'],\n '[Active]' if x['active'] else '[closed]',\n datetime.datetime.strftime(x['created'], '%c').decode('utf8'),\n x['description']], projects)\n print(tabulate(refined, ['ID', 'Project', 'Activity', 'Created',\n 'Description']))", "def ConvertProjectToCrpx(basket):\n\n sCrpxName = \"\"\n sCrpxContent = \"\"\n template_crp = \"seeker/crp.xml\"\n oErr = utils.ErrHandle()\n standard_features = ['searchWord', 'searchPOS']\n iQCid = 1\n\n try:\n # Access the research project and the gateway\n research = basket.research\n gateway = research.gateway\n\n # Get the name of the project\n sCrpxName = research.name\n\n # The format of what we process\n # Options: Xquery-Psdx, Folia-Xml, Negra-Tig, Alpino-Xml, Dbase\n format = basket.format\n if format == \"psdx\":\n extension = \".psdx\"\n project_type = \"Xquery-psdx\"\n elif format == \"folia\":\n extension = \".folia.xml\"\n project_type = \"Folia-Xml\"\n elif format == \"negra\":\n extension = \".xml\"\n project_type = \"Negra-Tig\"\n elif format == \"alpino\":\n extension = \".xml\"\n project_type = \"Alpino-Xml\"\n else:\n extension = \"\"\n project_type = \"\"\n\n # The language and location of what we process\n lng = basket.part.corpus.get_lng_display()\n dir = basket.part.dir\n\n outfeat = \"\" # List of features separated by semicolon\n queryname = \"Cesar_query-main\"\n defname = \"Cesar_standard-def\"\n currentdate = timezone.now().strftime(\"%c\")\n outputname = \"standard\"\n # Make sure that the dbfeatlist contains all features in exactly the right ORDER!!!\n dbfeatlist = []\n # Add the standard features\n for idx in range(0, len(standard_features)):\n dbfeat = standard_features[idx]\n iNum =idx+1\n oDbFeat = {\"name\": dbfeat, \"QCid\": iQCid, \"FtNum\": iNum}\n dbfeatlist.append(oDbFeat)\n # Add the user-defined features\n iLastNum = len(standard_features)+1\n feature_list = gateway.get_feature_list()\n forbidden_names = [x.lower() for x in standard_features]\n for idx in range(0, len(feature_list)):\n iNum = iLastNum + idx\n ft = feature_list[idx]\n # Name check\n if ft.name.lower() in forbidden_names:\n sCrpxName = \"\"\n sCrpxContent = [\"Sorry, please don't use the feature name [{}]\".format(ft.name)]\n return sCrpxName, sCrpxContent\n # We are okay...\n oDbFeat = {\"name\": ft.name, \"QCid\": iQCid, \"FtNum\": iNum}\n dbfeatlist.append(oDbFeat)\n\n # Create a context for the template\n context = dict(gateway=gateway, \n research=research,\n extension=extension,\n lng=lng,\n dir=dir,\n projectdir=PROJECT_DIR,\n outfeat=outfeat,\n queryname=queryname,\n defname=defname,\n outputname=outputname,\n dbfeatlist=dbfeatlist,\n project_type=project_type,\n currentdate=currentdate,\n changed=get_crpp_date(timezone.now()),\n created=get_crpp_date(basket.created),\n codedef=basket.codedef,\n codeqry=basket.codeqry)\n # Convert template\n sCrpxContent = loader.get_template(template_crp).render(context)\n sCrpxContent = re.sub(r'\\n\\s*\\n', '\\n', sCrpxContent).strip()\n\n except:\n # Show error message\n oErr.DoError(\"ConvertProjectToCrpx error: \")\n sCrpxName = \"\"\n sCrpxContent = oErr.loc_errStack\n\n return sCrpxName, sCrpxContent", "def getFeaturedProject(current_timeline, program):\n # expiry time to fetch the new featured project entity\n # the current expiry time is 2 hours.\n expiry_time = datetime.timedelta(seconds=7200)\n\n def queryForProject():\n query = project_model.GSoCProject.all()\n query.filter('is_featured', True)\n query.filter('program', program)\n if current_timeline == 'coding_period':\n project_status = project_model.STATUS_ACCEPTED\n else:\n project_status = 'completed'\n query.filter('status', project_status)\n return query\n\n q = queryForProject()\n\n # the cache stores a 3-tuple in the order student_project entity,\n # cursor and the last time the cache was updated\n fsp_cache = memcache.get('featured_gsoc_project' + program.key().name())\n\n if fsp_cache:\n cached_project, cached_cursor, cache_expiry_time = fsp_cache\n if not datetime.datetime.now() > cache_expiry_time + expiry_time:\n return cached_project\n else:\n q.with_cursor(cached_cursor)\n if q.count() == 0:\n q = queryForProject()\n\n new_project = q.get()\n new_cursor = q.cursor()\n memcache.set(\n key='featured_gsoc_project',\n value=(new_project, new_cursor, datetime.datetime.now()))\n\n return new_project", "def get_project_issues(repo_slug, max_issues_per_project=None, max_date=None):\n # type: (str, int, str) -> pd.DataFrame\n logging.info(\"Processing %s\", repo_slug)\n all_issues = pd.DataFrame(\n json_imap({\n 'reporter': 'user__login',\n 'role': 'author_association',\n 'number': 'number',\n 'title': 'title',\n 'created_at': 'created_at',\n 'body': 'body',\n 'state': 'state',\n },\n api.repo_issues(repo_slug)),\n ).sort_values('created_at')\n if max_date:\n all_issues = all_issues[all_issues['created_at'] < max_date]\n last_reported = all_issues.groupby(\n 'reporter').last().iloc[:max_issues_per_project]\n first_reported = all_issues.groupby('reporter').first()['created_at']\n # int(timedelta) is ns, times 86400 seconds in a day\n last_reported['tenure'] = (\n pd.to_datetime(last_reported['created_at'])\n - pd.to_datetime(last_reported.index.map(first_reported))\n ).astype(int) // 86400000000000\n last_reported['project'] = repo_slug\n return last_reported.reset_index().sort_values('number')", "def get_upcoming_games(n=10):\n conn, cursor = connect_to_db()\n query = \"\"\"select kickoff_time, t2.team_id home_id, t2.team_name home_name, \n t3.team_id away_id, t3.team_name away_name\n from fpl_fixtures t1 left join fpl_teams t2 on t1.team_h = t2.id left \n join fpl_teams t3 on t1.team_a = t3.id where started = 0 order by \n kickoff_time limit {}\"\"\".format(n)\n df = run_query(cursor, query)\n return df", "def createTasks():\n tickets = jutdaapi.get_tickets(queues=[3]) # this works better (still not\n # perfect) if list results is set to 1000 in jutda user settings\n tasks = []\n for ticket in tickets:\n tasks.append(ticketToTask(ticket))\n return tasks", "def get_jira_issues(query):\n jira_issues = []\n defects = []\n count, maxlen = 0, 1\n while count < maxlen:\n issues = jira_client.search_issues(query, startAt=count, maxResults=50, expand='changelog')\n jira_issues.extend(issues)\n count = len(jira_issues)\n maxlen = issues.total\n\n return jira_issues", "def get_jira_tasks(start_date, end_date, pj_name=project_name):\n\n start_date=start_date.replace(\"-\",'/')\n end_date=end_date.replace(\"-\",'/')\n try:\n jira = JIRA(options=options, basic_auth=(usr, pas))\n except JIRAError as e:\n if e.status_code == 401:\n print (\"Login to JIRA failed.\")\n jq = \"\"\"project = {} \n and duedate >= \"{}\" \n and duedate <= \"{}\" \n order by created DESC\"\"\".format(pj_name, start_date,end_date )\n issues = jira.search_issues(jq)\n columns = ['year','month','day', 'name','timeoriginalestimate','timespent']\n data = pd.DataFrame([], columns=columns)\n for issue in issues:\n name = \"NoAssign\"\n if issue.fields.assignee:\n name = issue.fields.assignee.displayName\n (year, month, day) = issue.fields.duedate.split(\"-\")\n timeoriginalestimate = issue.fields.timeoriginalestimate if issue.fields.timeoriginalestimate is not None else 0\n timespent = issue.fields.timespent if issue.fields.timespent is not None else 0\n tmp_df = pd.DataFrame([[year, month, day, name, timeoriginalestimate/3600, timespent/3600]], columns=columns)\n data = data.append(tmp_df)\n\n data.reset_index(drop=True, inplace=True)\n return data", "def tickets(people: list) -> str:\n\tprint(people)\n\n\tvasya = list()\n\n\tfor p in people:\n\n\t\tif p == 25:\n\t\t\tvasya.append(p)\n\t\t\tcontinue\n\n\t\tif p == 50 and 25 in vasya:\n\t\t\tdel vasya[vasya.index(25)]\n\t\t\tvasya.append(p)\n\t\t\tcontinue\n\n\t\tif p == 100:\n\t\t\tif 25 in vasya and 50 in vasya:\n\t\t\t\tdel vasya[vasya.index(25)]\n\t\t\t\tdel vasya[vasya.index(50)]\n\t\t\t\tvasya.append(p)\n\t\t\t\tcontinue\n\n\t\t\tif vasya.count(25) >= 3:\n\t\t\t\ti = 3\n\t\t\t\twhile i > 0:\n\t\t\t\t\tdel vasya[vasya.index(25)]\n\t\t\t\t\ti -= 1\n\t\t\t\tvasya.append(p)\n\t\t\t\tcontinue\n\n\t\treturn 'NO'\n\n\treturn \"YES\"", "def main():\n # group_id = get_group_id() This would be used if I had\n # the appropriate privileges\n group_id = 15000022833\n setup_logger()\n ticket_ids = get_newhire_tickets(group_id)\n for ticket_id in ticket_ids:\n update_ticket_info(ticket_id)", "def get_november_historical_comments(subreddit, limit):\n all_submissions = []\n\n days = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n month = 11\n year = 2020\n\n for day in days:\n # generate random 4 hour time chunk\n start_hour = random.randint(0, 14)\n end_hour = start_hour + 4\n start_time = int(dt.datetime(year, month, day, start_hour, 0).timestamp())\n end_time = int(dt.datetime(year, month, day, end_hour, 0).timestamp())\n\n # gets submissions and adds submission dictionary to master list\n threads = list(get_submissions(subreddit, start_time, end_time, limit))\n\n for item in threads:\n all_submissions.append(item.d_)\n\n # gets submissions and adds submission dictionary to master list\n threads = list(get_submissions(subreddit, start_time + 5, end_time + 5, limit))\n\n for item in threads:\n all_submissions.append(item.d_)\n\n print('querying day:', day)\n print('total submissions:', len(all_submissions))\n\n return all_submissions", "def tickets(number, day, premium_seating):\n #fill in your code here. \n return 0.0", "def get_popular_tickets_solution(tickets):\n popular_tickets = []\n for ticket in tickets:\n num_watchers = len(ticket['people']['watchers'])\n if num_watchers >= 8:\n popular_tickets.append(ticket)\n return popular_tickets", "def run():\n\n full_ticket = Ticket()\n daily_ticket_a = Ticket()\n daily_ticket_b = Ticket()\n daily_ticket_c = Ticket()\n community_ticket = Ticket()\n\n full_ticket.ticket_type = 'full'\n daily_ticket_a.ticket_type = 'daily-13'\n daily_ticket_b.ticket_type = 'daily-14'\n daily_ticket_c.ticket_type = 'daily-15'\n community_ticket.ticket_type = 'community'\n\n full_ticket.price = 400000\n daily_ticket_a.price = 200000\n daily_ticket_b.price = 300000\n daily_ticket_c.price = 350000\n community_ticket.price = 0\n\n full_ticket.information = 'Ticket for full 3 days devsummit event.'\n daily_ticket_a.information = 'Ticket for 13th November at devsummit event.'\n daily_ticket_b.information = 'Ticket for 14th November at devsummit event.'\n daily_ticket_c.information = 'Ticket for 15th November at devsummit event.'\n community_ticket.information = 'Ticket for community, only given by admin.'\n db.session.add(full_ticket)\n db.session.add(daily_ticket_a)\n db.session.add(daily_ticket_b)\n db.session.add(daily_ticket_c)\n db.session.add(community_ticket)\n\n db.session.commit()", "def switch_project(project):\n # Get the data\n project = project.lower()\n lines, finished, last_project = parse_file(project=None)\n line1, i1, last1, _, times1 = parse_line(lines, last_project, finished)\n line2, i2, _, new2, times2 = parse_line(lines, project, True)\n now = datetime.now()\n\n # Format the data\n if not finished:\n punch1 = now - last1\n times1.append(punch1)\n punch1 = punch1.total_seconds()\n total1 = sum(t.total_seconds() for t in times1)\n total2 = sum(t.total_seconds() for t in times2)\n now = now.strftime(TIMEF)\n\n # Modifying the lines for the file\n lines[1] = HEADER1 + project\n if not finished:\n\n # Clock-Out\n line1[-1] += IN_OUT_SEP + now\n line1[1] = fnum(total1)\n line1 = PUNCH_SEP.join(line1)\n lines[i1] = line1\n\n # Clock-In\n line2.append(now)\n line2 = PUNCH_SEP.join(line2)\n if new2:\n lines.append(line2)\n else:\n lines[i2] = line2\n\n # Write to file\n with open(PUNCHES_PATH, 'w+') as f:\n f.write('\\n'.join(lines))\n\n # Report\n if new2:\n print(f\"Created Project: '{project}'\")\n if finished:\n print(f\"CURRENTLY CLOCKED OUT, Project Switched From: '{last_project}', To: '{project}'\")\n print(f\"NOW: {now}\")\n print(f\"'{last_project}' Total Hrs: {fnum(total1)}\")\n print(f\"'{project}' Total Hrs: {fnum(total2)}\")\n else:\n print(f\"CLOCK OUT, Project: '{last_project}'\")\n print(f\"CLOCK IN, Project: '{project}'\")\n print(f\"'{last_project}' IN: {last1.strftime(TIMEF)}, NOW: {now}\")\n print(f\"'{last_project}' Total Hrs: {fnum(total1)}, Current Punch: {fnum(punch1)}\")\n print(f\"'{project}' Total Hrs: {fnum(total2)}\")", "def twenty_seventeen():\n return 2017", "def tickets(people):\n people= [100, 50, 25]", "def get_created_projects(self):\n project_ouessant1 = Project.objects.get(name='Ouessant Tidal Power Phase I')\n project_ouessant2 = Project.objects.get(name='Ouessant Tidal Power Phase II')\n project_liaoning = Project.objects.get(\n name='Liaoning Linghai China Resource Power Wind Power Wind Farm'\n )\n return [project_ouessant1, project_ouessant2, project_liaoning]", "def _create_historic_forecasts(\n data, time_dt, frt_dt, standard_grid_metadata=\"uk_ens\", number_of_days=5, **kwargs\n):\n historic_forecasts = iris.cube.CubeList([])\n for day in range(number_of_days):\n new_frt_dt = frt_dt + datetime.timedelta(days=day)\n new_time_dt = time_dt + datetime.timedelta(days=day)\n historic_forecasts.append(\n set_up_variable_cube(\n data - 2 + 0.2 * day,\n time=new_time_dt,\n frt=new_frt_dt,\n standard_grid_metadata=standard_grid_metadata,\n **kwargs,\n )\n )\n return historic_forecasts", "def project():", "def project():", "def project():", "def get_projects(selection):\n project=[]\n\n # project with keyname\n\n if 'project' in selection.facets:\n project+=selection.facets['project']\n\n\n # project without keyname\n\n # WARNING\n #\n # The code below uses sdinference and query the database to retrieve ESGF parameters.\n # Doing those things here may raise circular dependencies\n # as well as making the whole thing very complex.\n #\n # We do this to make this syntax work (i.e. project value without key)\n # synda search GeoMIP\n #\n # Note that this syntax always works (i.e. load the project level default file), even without this code.\n # synda search project=GeoMIP\n #\n #\n pending_projects=sdearlystreamutils.get_facet_values_early([selection.facets],'project',extract_item=True) # project without keyname or project as part of an identifier.\n\n li=pending_projects+project\n\n li=list(set(li)) # remove duplicate\n\n return li", "def test_get_past(self, rf, projects):\n # get queryset\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n view.get_queryset()\n qs = view.get_past()\n\n # slavic working group grant ended so it is \"past\"\n assert projects[\"slavic\"] in qs\n assert projects[\"derrida\"] not in qs\n assert projects[\"pliny\"] not in qs\n assert projects[\"ocampo\"] not in qs", "def getPreviousNightlyPath( numDaysInPast=1 ):\n\n myPath= os.environ.get(\"NICOS_PROJECT_RELNAME_COPY\",\"\")\n #replace rel_x with rel_(x-1)\n for i in range(0,7):\n if (\"rel_%d\" % i) in myPath:\n myPath = myPath.replace( (\"rel_%d\" % i), (\"rel_%d\" % ( (i-numDaysInPast)%7 )) )\n break\n refFile = os.environ.get(\"NICOS_COPY_HOME\",\"\") + \"/\" + myPath + \"/NICOS_area/NICOS_atntest\" + os.environ.get(\"NICOS_SUFFIX\",\"\") + \"/\" + os.path.basename(os.environ.get(\"ATN_WORK_AREA\",\"\"))\n\n return refFile", "def tumpek(cls, gregorian_year):\n year = GregorianDate.year_range(gregorian_year)\n cap_Delta = cls.day_fromordinal(0)\n return cls.positions_in_range(14, 35, cap_Delta, year)", "def giftify(day):\n index = day - 13\n if day < 13:\n gift = GIFTS[day]\n elif day % 10 == 1:\n # Partridge in a pear tree\n gift = plural_nouns[index] + \" in \" + a(pears.pop() + \" \" + trees.pop())\n elif day % 10 == 5:\n # Five gold rings\n gift = adjectives.pop() + \" \" + plural_nouns[index]\n else:\n gift = plural_nouns[index] + \" \" + gerundify(verbs.pop())\n if day > 1:\n gift = p.number_to_words(day) + \" \" + gift + \",\"\n\n gift = capify(gift)\n\n if print_html:\n if day != 11 and day % 10 == 1:\n # Partridge in a pear tree\n gift = \"<i>\" + gift + \"</i>\"\n elif day % 10 == 5:\n # Five gold rings\n gift = \"<g>\" + gift + \"</g>\"\n\n return gift", "def list_tickets(db=Depends(get_db)):\n return db.query(Ticket)\n return [\n {\n \"ticket_id\": ticket_id,\n \"title\": title,\n \"status\": status,\n \"created_at\": created_at / 1000,\n }\n for ticket_id, title, status, created_at in db.fetchall()\n ]", "def due_soon(request):\n soon = timezone.now() + timedelta(days=1)\n return Task.objects.select_related('project').filter(user=request.user, due__lt=soon, done=False).exclude(folder='trash')", "def create_issue_objs(self):\n \n print \"Creating IssueClass objects\"\n \n # Create IssueClass objects, add to issue_objs dictionary\n for issue in self.issues: \n# print json.dumps(issue, indent=4)\n if issue['fields']['issuetype']['name'] == \"Sub-task\" and issue['fields']['parent']['fields']['issuetype']['name'] != \"New Feature\":\n continue # Skip sub-tasks whose parents are not New features\n ic = IssueClass() # Create IssueClass object for each issue, assign data from issue to object's variables\n ic.assignee = issue['fields']['assignee']['name']\n ic.assignee_email = issue['fields']['assignee']['emailAddress']\n ic.issue_id = issue['key']\n ic.issue_type = issue['fields']['issuetype']['name']\n ic.summary = issue['fields']['summary']\n ic.status = issue['fields']['status']['name']\n self.issue_objs[issue['key']] = ic # Add object to main object dictionary\n \n if ic.issue_type == \"Sub-task\":\n ic.issue_parent = issue['fields']['parent']['key'] # Get Sub-task parent\n \n try:\n ic.sprint = issue['fields']['customfield_10264'][0]['value'] # Get current sprint\n except TypeError:\n pass # Some issues have no sprint\n \n # Brand new issues less than change_period with no changes yet are considered a \"change of status\".\n ic.icdt = dt.strptime(issue['fields']['created'].split('.')[0], \"%Y-%m-%dT%H:%M:%S\") # Item create datetime\n if (issue['fields']['issuetype']['name'] == \"New Feature\") and \\\n ic.icdt.date() > date.today()-timedelta(days=int(self.config.get('default', 'change_period'))):\n ic.last_sprint = \"\" # Only objects with a last_sprint or last_status attribute will be checked for changes within change_period\n ic.last_status = \"\" # Set last_sprint and last_status to null for issues less than change_period old\n\n # Get time in status for the issues we're interested in, also updates sprint/last_sprint, status/last_status\n self.get_time_in_status(issue, ic.status)", "def find_new_contests(sport):\n\n # def get_pst_from_timestamp(timestamp_str):\n # timestamp = float(re.findall(\"[^\\d]*(\\d+)[^\\d]*\", timestamp_str)[0])\n # return datetime.datetime.fromtimestamp(\n # timestamp / 1000, timezone(\"America/Los_Angeles\")\n # )\n\n url = f\"https://www.draftkings.com/lobby/getcontests?sport={sport}\"\n\n # response = requests.get(url, headers=HEADERS, cookies=COOKIES).json()\n response_contests = get_contests(url)\n\n # create list of Contest objects\n contests = [Contest(c) for c in response_contests]\n # contests = [\n # get_largest_contest(response[\"Contests\"], 3),\n # get_largest_contest(response[\"Contests\"], 0.25),\n # get_largest_contest(response[\"Contests\"], 27),\n # ] + get_contests_by_entries(response[\"Contests\"], 3, 50000)\n target_contests = []\n entry_fees = []\n if sport == \"NFL\":\n entry_fees = [5, 10, 25, 50]\n else:\n entry_fees = [10, 25]\n\n for entry_fee in entry_fees:\n largest_contest = get_largest_contest(contests, entry_fee=entry_fee)\n # check if largest_contest is None\n if largest_contest is not None:\n logger.debug(\"Appending contest %s\", largest_contest)\n target_contests.append(largest_contest)\n\n for contest in target_contests:\n date_time = contest.start_dt\n # make naive datetime aware based on django settings\n aware_datetime = make_aware(date_time)\n dkcontest, created = DKContest.objects.update_or_create(\n dk_id=contest.id,\n defaults={\n \"date\": aware_datetime.date(),\n \"datetime\": aware_datetime,\n \"sport\": sport,\n \"name\": contest.name,\n \"draft_group_id\": contest.draft_group,\n \"total_prizes\": contest.total_prizes,\n \"entries\": contest.entries,\n \"entry_fee\": contest.entry_fee,\n },\n )\n if created:\n logger.info(\"Creating DKContest %s\", dkcontest)", "def mailboxes (number):\n\n\n low = number * 2 #minimum months removed\n\n high = number * 6 #maximum months removed\n\n print (\"Because you have run into\", number, \"Mailbox(es),\",\n \"your car's lifespan has been shortened anywhere\" ,\n \"from\", low, \"to\", high, \"months\")", "def get_todays_posts():\n \n return sorted(requests.get(TODAY_URL).json()['hunts'], \n key=lambda post: post['rank'])", "def get_jira_tasks(\n host: str, username: str, jira_password: str, max_results: int = 1000\n) -> List:\n # options = {'server': 'https://cog-jira.ipsoft.com', 'basic_auth': ('dengvall', pwd)}\n try:\n jira = JIRA(basic_auth=(username, jira_password), server=f\"https://{host}\")\n except j.exceptions.JIRAError:\n logger.error(\"Error connecting to server - please verify credentials\")\n raise\n\n # Get all projects\n # projects = jira.projects()\n\n logger.info(\"fetching jira tickets\")\n all_tickets = jira.search_issues(\n \"assignee = currentUser() order by priority desc\", maxResults=max_results\n )\n logger.info(f\"complete fetching {len(all_tickets)} tickets\")\n return all_tickets", "def tbr_no_lgtm(cc): # pragma: no cover\n cc.execute(\"\"\"SELECT DISTINCT DATE_FORMAT(git_commit.timestamp, '%Y-%m')\n FROM git_commit\"\"\")\n months = cc.fetchall()\n results = []\n for month in months:\n month = month[0]\n cc.execute(\"\"\"SELECT COUNT(*)\n FROM review\n INNER JOIN git_commit ON review.review_url = git_commit.review_url\n INNER JOIN commit_people\n ON commit_people.git_commit_hash = git_commit.hash\n LEFT JOIN (\n SELECT review_url, COUNT(*) AS c FROM review_people\n WHERE type = 'lgtm' GROUP BY review_url) lgtm_count\n ON review.review_url = lgtm_count.review_url\n WHERE lgtm_count.c = 0 OR lgtm_count.c IS NULL\n AND commit_people.type = 'tbr'\n AND YEAR(git_commit.timestamp) = %s\n AND MONTH(git_commit.timestamp) = %s\"\"\" % (month[:4], month[5:]))\n result = cc.fetchone()\n results.append([month, int(result[0])])\n return results", "def confirm_recent_new_issues(self, msg, hour):\n self._asset_bind(msg)\n yield (\"Processing....\")\n trans = self._translation_util(msg)\n client = self._github_operator(msg)\n cmd = \"repo:{} is:open type:issue\".format(\n task_repository_name())\n issue_list = client.search_issue(cmd, 10)\n end_time = datetime.datetime.now()\n cnt = 0\n for issue in issue_list:\n start_time = issue.created_at\n if end_time - start_time <= datetime.timedelta(hours=hour):\n trans.wait_for_limit(MAX_RESULT, MAX_RESULT)\n confirmed = False\n for label in issue.get_labels():\n if label.name == \"welcome\":\n issue.remove_from_labels(\"welcome\")\n else:\n if label.name in [\"pending\", \"translating\", \"pushed\", \"finished\"]:\n confirmed = True\n if not confirmed:\n issue.add_to_labels(\"pending\")\n cnt += 1\n yield \"{} issues confirmed.\".format(cnt)", "def use_reddit_api(days_past):\n reddit_json = 'https://www.reddit.com/r/SketchDaily.json'\n r = requests.get(reddit_json, headers = {'User-agent': 'drawingPromptsForAlexa'})\n json_data = json.loads(r.text)\n #Getting todays data\n theme = json_data['data']['children'][days_past]['data']['title'].split()\n theme = theme[3:] #Removing date\n theme = \" \".join(theme) #Joining theme into string\n\n selftext = json_data['data']['children'][days_past]['data']['selftext']\n #Removing reddit formatting:\n selftext = selftext.replace(\"\\r\", \"\")\n selftext = selftext.replace(\"\\n\", \"\")\n selftext = selftext.split()\n #print(selftext) #TESTING\n #Finding alternate theme\n altTheme, index, collecting = \"\", 0, False\n while (index < len(selftext) and selftext[index] != \"*****\"): #Parsing through selftext to find alternate theme\n if selftext[index].lower() == \"theme:\":\n collecting = True\n elif collecting:\n altTheme += selftext[index] + \" \"\n index += 1\n retString = \"\"\n if days_past == 0:\n retString = \"Today's drawing prompt is: \" + theme + \". The alternate theme is: \" + altTheme\n elif days_past == 1:\n retString = \"Yesterday's drawing prompt is: \" + theme + \". The alternate theme is: \" + altTheme\n else:\n retString = \"A random drawing prompt is: \" + theme + \". The alternate theme is: \" + altTheme\n return retString", "def get_popular_tickets(tickets):\n popular_tickets = []\n #\n # TODO - your code here\n # \n for ticket in tickets:\n str_len=len(ticket['people']['watchers'])\n if str_len>=8:\n popular_tickets.append(ticket)\n \n return popular_tickets", "def projects_count(args):\n session = GithubSession()\n\n print(f\"counting {args.name}\")\n\n board = session.get_project(args.name)\n\n tally = []\n\n columns = session.get_columns(board)\n for column in columns:\n print(column[\"name\"], file=sys.stderr)\n\n cards = list(session.get_cards(column))\n\n total = Decimal(0)\n unpointed = 0\n num_cards = 0\n num_walk_ins = 0\n issues = []\n walk_ins = []\n walk_in_points = 0\n\n for card_data in cards:\n issue_number = utils.get_issue_number_from_card_data(card_data)\n if not issue_number: # must be a note\n continue\n\n issue_data = session.get_issue(issue_number)\n labels = issue_data[\"labels\"]\n\n num_cards += 1\n\n points = get_points(labels)\n if points:\n total += points\n else:\n unpointed += 1\n\n issue_data = {\n \"issue_number\": issue_number,\n \"points\": str(points),\n \"unpointed\": points is None,\n \"walk_in\": False,\n }\n\n if is_walk_in(labels):\n num_walk_ins += 1\n if points:\n walk_in_points += points\n\n issue_data[\"walk_in\"] = True\n\n walk_ins.append(issue_data)\n\n issues.append(issue_data)\n\n tally.append(\n {\n \"column\": column[\"name\"],\n # 'issues': issues,\n \"num_cards\": num_cards,\n \"num_walk_ins\": num_walk_ins,\n \"walk_in_points\": str(walk_in_points),\n # 'walk_ins': walk_ins,\n \"total_points\": str(total),\n \"unpointed\": unpointed,\n }\n )\n\n print(json.dumps(tally, indent=4))", "def backlog_milestone():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"project\", help=\"name of the project\")\n parser.add_argument(\"milestone\", help=\"name of the milestone\")\n\n args = parser.parse_args()\n\n session = GithubSession()\n\n project_data = session.get_project(args.project)\n\n milestone_data = session.get_milestone(args.milestone)\n milestone_title = milestone_data[\"title\"]\n\n backlog_data = session.get_column(project_data, \"backlog\")\n icebox_data = session.get_column(project_data, \"icebox\")\n\n results = session.search(f'repo:openslate/openslate milestone:\"{milestone_title}\"')\n for search_data in results[\"items\"]:\n issue_data = get_issue(search_data[\"number\"]).issue\n issue_card = session.get_card(project_data, issue_data)\n\n if issue_card[\"column_url\"] == icebox_data[\"url\"]:\n session.move_card(issue_card, backlog_data)\n\n print(\".\", end=\"\")", "def makeRoastTabs(ID, numbers):\n newFromTemplate(ID, TODAY)\n continueFromLastPR(ID, 1)\n newFromTemplate(ID, TOMORROW)\n populateBatches(ID, 2, numbers)", "def projects_label(args):\n session = GithubSession()\n\n label_datas = list(session.get_labels())\n\n team = args.team\n team_label_data = None\n if team:\n team_label = utils.get_label(team, prefix=\"team\")\n team_label_data = [x for x in label_datas if x[\"name\"] == team_label][0]\n\n # get the project label\n project_label = utils.get_label(args.name, prefix=\"project\")\n project_label_data = [x for x in label_datas if x[\"name\"] == project_label][0]\n\n print(f\"label cards in project {args.name} column {args.column}\")\n\n project_board = session.get_project(args.name)\n project_backlog_grooming = session.get_column(project_board, \"backlog grooming\")\n\n cards = list(session.get_cards(project_backlog_grooming))\n for card_data in cards:\n issue_number = utils.get_issue_number_from_card_data(card_data)\n\n print(issue_number)\n\n # add the project label\n session.add_label(project_label_data, number=issue_number)\n\n if team_label_data:\n session.add_label(team_label_data, number=issue_number)", "def merge_toggl_time_entries(self, time_entries):\n tg = Toggl()\n d = {}\n for entry in time_entries:\n if entry.get('billable'):\n if entry.get('tags') and tg.BOOKED_TAG in entry['tags']:\n status = 'booked'\n else:\n status = 'not-booked'\n date = parser.parse(entry['start']).date()\n if not entry.get('pid'):\n self.log(\"Couldn't find associated project for entry: %s\" % (str(entry)))\n continue\n unique_id = str(entry['pid']) + str(date) + status\n if not entry.get('description'):\n entry['description'] = \"\"\n if d.get(unique_id):\n d[unique_id]['duration'] += entry['duration']\n d[unique_id]['merged_ids'].append(entry['id'])\n if d[unique_id].get('description'):\n if entry['description'].strip() not in d[unique_id]['description']:\n d[unique_id]['description'] += ' / ' + entry['description']\n else:\n d[unique_id]['description'] = entry['description']\n else:\n entry['merged_ids'] = [entry['id']]\n d[unique_id] = entry\n return d.values()", "def schedule_content(request):\r\n\r\n stories = Story.objects.filter(organization=request.user.organization).exclude(archived=True)\r\n\r\n # data = {}\r\n # data['success'] = 1\r\n # data['result'] = []\r\n data = []\r\n\r\n for story in stories:\r\n # Facet Schedules\r\n for facet in story.facetstory.all():\r\n credit = {}\r\n for user in facet.credit.all():\r\n credit['id'] = []\r\n credit['id'].append(user.credit_name)\r\n credit['id'].append(user.get_absolute_url())\r\n editor = {}\r\n for user in facet.editor.all():\r\n editor['id'] = []\r\n editor['id'].append(user.credit_name)\r\n editor['id'].append(user.get_absolute_url())\r\n print credit\r\n if facet.due_edit:\r\n edit_event_dict = {}\r\n edit_event_dict['id'] = facet.id\r\n edit_event_dict['title'] = facet.name.encode('utf-8')\r\n edit_event_dict['description'] = facet.description.encode('utf-8')\r\n edit_event_dict['due-edit'] = time.mktime(facet.due_edit.timetuple())\r\n edit_event_dict['editor'] = facet.editor.credit_name\r\n edit_event_dict['credit'] = credit\r\n edit_event_dict['url'] = facet.get_absolute_url()\r\n edit_event_dict['start'] = time.mktime(facet.due_edit.timetuple()) * 1000\r\n edit_event_dict['end'] = (time.mktime(facet.due_edit.timetuple()) * 1000) + 60\r\n edit_event_dict['overlap'] = True\r\n edit_event_dict['allDay'] = False\r\n edit_event_dict['backgroundColor'] = '#00aced'\r\n edit_event_dict['textColor'] = '#fff'\r\n data.append(edit_event_dict)\r\n if facet.run_date:\r\n run_event_dict = {}\r\n run_event_dict['id'] = facet.id\r\n run_event_dict['title'] = facet.name.encode('utf-8')\r\n run_event_dict['description'] = facet.description.encode('utf-8')\r\n run_event_dict['due-edit'] = time.mktime(facet.due_edit.timetuple())\r\n run_event_dict['editor'] = facet.editor.credit_name\r\n run_event_dict['credit'] = credit\r\n run_event_dict['url'] = facet.get_absolute_url()\r\n run_event_dict['class'] = 'event_run'\r\n run_event_dict['start'] = time.mktime(facet.run_date.timetuple()) * 1000\r\n run_event_dict['end'] = (time.mktime(facet.run_date.timetuple()) * 1000) + 60\r\n run_event_dict['overlap'] = True\r\n run_event_dict['backgroundColor'] = '#5cb85c'\r\n run_event_dict['textColor'] = '#fff'\r\n data.append(run_event_dict)\r\n\r\n # print \"DATA: \", data\r\n\r\n return HttpResponse(json.dumps(data), content_type='application/json')", "def datefixer(ds):\n\n\n\t# ========== create the new dates ==========\n\tyear = ds.Year\n\n\t# +++++ set up the list of dates +++++\n\tdates = OrderedDict()\n\ttm = [dt.datetime(int(year) , int(np.floor(tm)), int(tm%1*30+1)) for tm in ds.time]\n\tdates[\"time\"] = pd.to_datetime(tm)\n\n\tdates[\"calendar\"] = 'standard'\n\tdates[\"units\"] = 'days since 1900-01-01 00:00'\n\t\n\tdates[\"CFTime\"] = date2num(\n\t\ttm, calendar=dates[\"calendar\"], units=dates[\"units\"])\n\n\treturn dates", "def poker(project):\r\n total_stories = len(project.unestimated_stories())\r\n for idx, story in enumerate(project.unestimated_stories()):\r\n clear()\r\n rows, cols = _get_column_dimensions()\r\n print \"{} PLANNING POKER SESSION [{}]\".format(project.name.upper(), bold(\"{}/{} Stories Estimated\".format(idx+1, total_stories)))\r\n print \"-\" * cols\r\n pretty_print_story(story)\r\n prompt_estimation(project, story)\r\n else:\r\n print \"KaBoom!!! Nice Work Team\"", "def get_data(user, token, start, end, weekly):\n gh = Github(user, token)\n\n issues = []\n commits = []\n commit_set = set()\n\n for project in PROJECTS:\n if weekly and project in PRIVATE:\n continue\n issues_new, commits_new = get_repo_data(gh, user, project, start, end)\n # Include all issues\n issues.extend(issues_new)\n\n if project in PROJECT_EXTRA_COMMITS:\n # Only include commits not seen so far from this repo\n for commit in commits_new:\n sha = commit.sha[:7]\n if sha not in commit_set:\n commits.append(commit)\n commit_set.add(sha)\n else:\n # Include all commits\n commits.extend(commits_new)\n commit_set.update([commit.sha[:7] for commit in commits_new])\n\n return issues, commits", "def fp_from_eggs(daily_num_eggs):\n eggs = kg_to_tonnes(daily_to_annual(daily_num_eggs*300)/1000)\n return eggs", "def get_issues(self, project, weeks=12):\n issues = {\n 'summary': [],\n 'assignee': [],\n 'reporter': [],\n 'description': [],\n 'created': [],\n 'updated': [],\n 'labels': [],\n 'status': []\n }\n\n jql = \"project={0} AND updated >= -{1}w\".format(project.key, weeks)\n project_issues = self.jira.search_issues(jql, maxResults=False, fields=['summary', 'description', 'comment', 'labels'])\n\n for issue in project_issues:\n issues['summary'].append(issue.fields.summary or '')\n issues['description'].append(issue.fields.description or '')\n assignee = issue.fields.assignee\n issues['assignee'].append(assignee.displayName if assignee else '')\n reporter = issue.fields.reporter\n issues['reporter'].append(reporter.displayName if reporter else '')\n issues['created'].append(issue.fields.created)\n issues['updated'].append(issue.fields.updated)\n issues['labels'].append(','.join(issue.fields.labels))\n issues['status'].append(issue.fields.status.name)\n\n return issues", "def get_last_seven_days():\n logs = json.load(open(\"seven_log\", \"r\"))\n days = [day for day in logs]\n usage = [[logs[day][gpu] for gpu in logs[day]] for day in logs]\n return days, usage", "def generate_daily_matrix(full_df, feat_days):\n pred_ticker = full_df.ticker.unique()[0]\n feature_tickers = [i for i in full_df.ticker.unique() if i != pred_ticker]\n dfml = full_df[full_df.ticker == pred_ticker].drop('ticker', axis=1)\n dfml.rename({'percent_change_pred': f'{pred_ticker}_percent_change_{feat_days}'}, axis=1, inplace=True)\n for ticker in feature_tickers:\n help_df = full_df[full_df.ticker == ticker][['past_date', 'current_date', 'prediction_date', 'percent_change_pred']]\n help_df.rename({'percent_change_pred': f'{ticker}_percent_change_{feat_days}'}, axis=1, inplace=True)\n dfml = pd.merge(dfml, help_df,\n left_on=['past_date', 'current_date', 'prediction_date'],\n right_on=['past_date', 'current_date', 'prediction_date'],\n how='left')\n return dfml.drop('percent_change_feat', axis=1)", "def getTickets( self, bSerial ):\n\n\t\ttry:\n\t\t\t# Get most recent 5 tickets for this serial\n\t\t\tsQuery = \"SELECT B.bug_id, P.realname, B.bug_status, B.short_desc, UNIX_TIMESTAMP( B.lastdiffed ) \" + \\\n\t\t\t\t\"FROM bugs B, components C, profiles P \" + \\\n\t\t\t\t\"WHERE \" + \\\n\t\t\t\t\t\"( B.component_id = C.id AND B.assigned_to = P.userid ) AND \" + \\\n\t\t\t\t\t\"( C.name = 'Fort Worth' OR C.name = 'Odessa' OR C.name = 'Contractor' ) AND \" + \\\n\t\t\t\t\t\"B.version = \" + str( bSerial ) + \" \" + \\\n\t\t\t\t\"ORDER BY B.bug_id DESC LIMIT 5\"\n\n\t\t\trgoResult = self._libDBbug.query( sQuery )\n\n\t\t\tif rgoResult is None:\n\t\t\t\treturn []\n\n\t\t\treturn rgoResult\n\n\t\texcept Exception, e:\n\t\t\terrMsg( 'error while getting open tickets for serial [%d]' % bSerial )\n\t\t\terrMsg( e )\n\t\t\traise Exception, \"System error while getting tickets.\"", "def construct_timeline(self, timexList, docFeatList):\n if not docFeatList:\n return timexList \n \n \n self.timeReferences = self.create_time_references(docFeatList, timexList)\n \n self.timexImpactZones = self.create_timex_impact_zone(timexList)\n \n timexList = self.evaluate_all_relative_timexes(timexList, docFeatList)\n \n# (expDate, expConf) = self.estimate_exposure_date(self.timeReferences, timexList)\n ##: expDate is obtained based on the first time. \n ##: Update time reference and re-estimate exposure time\n \n \n ##: Update time references after some features obtain their time from time impact zones\n self.timeReferences = self.update_time_references_with_impact_zones(docFeatList, timexList)\n \n (expDate, expConf) = self.estimate_exposure_date(self.timeReferences, timexList)\n \n if expDate:\n self.exposureDate = expDate\n self.exposureDateConfidence = expConf \n self.timeReferences[('Vaccination', None, None, None, None, None, expConf)] = self.exposureDate\n self.timeReferences[('Injection', None, None, None, None, None, expConf)] = self.exposureDate\n \n (onsetDate, onsetConf) = self.estimate_onset_date(docFeatList)\n if onsetDate:\n self.onsetDate = onsetDate\n self.onsetDateConfidence = onsetConf \n \n ##: Final scan for all features without assigned date time \n for feat in docFeatList:\n if self.sentence_tags[feat.getSentNum()]!='NORMAL': continue\n if not feat.getTlink() or not feat.getTlink().getDateTime():\n ##: feautures in clause should not be assigned a time. They should have been given a time somewhere else\n if feat.inClause():\n feat = self.assign_feature_time_with_references(feat, self.timeReferences, feat.getStartPos())\n ##: TLink could still be None if no reference is found. Then use the time from time impact zones\n if feat.getTlink():\n continue\n \n if feat.getType()=='DRUG' and 'concomitant' in [tg[0] for tg in self.sentence_full_tags[feat.getSentNum()]]:\n feat = self.assign_time_to_concomitant_drug(feat, docFeatList)\n if feat.getTlink():\n continue\n \n if not self.timexImpactZones or feat.getStartPos() < self.timexImpactZones[0][0]: ##: feature locates before any time zones\n ##: Assignment on features in the begining for VAERS\n if self.reportType == 'vaers': \n feat = self.assign_feature_time_with_references(feat, self.timeReferences) \n continue\n \n feat = self.assign_feature_time_with_impact_zones(feat, self.timexImpactZones)\n \n return timexList", "def test_git_py2py3_expired_major_grace_period(self):\n obsolete_dep_info = dict(UP_TO_DATE_DEPS)\n obsolete_dep_info['google-auth'] = {\n 'current_time': datetime.datetime(2019, 3, 23, 0, 0, 0),\n 'installed_version': '0.9.9',\n 'installed_version_time': datetime.datetime(\n 2019, 2, 19, 21, 15, 56),\n 'is_latest': False,\n 'latest_version': '1.0.0',\n 'latest_version_time': datetime.datetime(2019, 2, 19, 21, 15, 56)\n }\n\n obsolete_dep_compat_results = list(RECENT_SUCCESS_DATA)\n obsolete_dep_compat_results.remove(\n GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2)\n obsolete_dep_compat_results.remove(\n GOOGLE_API_CORE_GIT_RECENT_SUCCESS_3)\n obsolete_dep_compat_results.append(\n GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2.with_updated_dependency_info(\n obsolete_dep_info))\n obsolete_dep_compat_results.append(\n GOOGLE_API_CORE_GIT_RECENT_SUCCESS_3.with_updated_dependency_info(\n obsolete_dep_info))\n\n self.fake_store.save_compatibility_statuses(\n obsolete_dep_compat_results)\n package_name = 'git+git://github.com/google/api-core.git'\n self.assertImageResponseGithub(package_name)\n self.assertTargetResponse(\n package_name, self.expired_major_grace_period_expected_details)", "def create_ticket(data):\n firebase_uid = data['session'].split('/')[-1]\n contexts = data['queryResult']['outputContexts']\n for i in contexts:\n if 'visit_data' in i['name']:\n context = i\n break\n\n date = datetime.datetime.now()\n date = date.strftime(\"%d-%m-%Y\")\n\n raw_params = context['parameters']\n ticket_params = {\n \"Agent\": \"None\",\n \"Product Type\": raw_params[\"product_type\"],\n \"Type\": \"House Call\",\n \"Issue Type\": raw_params[\"issue_type\"],\n \"Description\": raw_params[\"description\"],\n \"Model Number\": raw_params[\"model_number\"],\n \"Serial Number\": raw_params[\"serial_number\"],\n \"Status\": \"Open\",\n \"Date\": date,\n \"Time Slot Chosen\": \"0\",\n \"Time Slots\": {\"Slot 1\": {\"Time\": \"0\", \"Date\": \"0\"},\n \"Slot 2\": {\"Time\": \"0\", \"Date\": \"0\"},\n \"Slot 3\": {\"Time\": \"0\", \"Date\": \"0\"}},\n \"Progress\": \"Under Review\",\n \"Free Time\": {\n \"Date\": \"0\",\n \"Time\": \"0\",\n },\n \"Details of Call\": {\n \"Time\": \"0\",\n \"Date\": \"0\"}\n }\n ticket_id = str(uuid.uuid4())[:8]\n db = firebase.database()\n db.child(\n 'user_data').child(\n firebase_uid).child(\n 'Complaints').child(ticket_id).set(ticket_params)\n\n fulfillment_response = {\n \"fulfillmentText\":\n \"You ticket was successfully registered. The reference number is \" + ticket_id +\n \". Based on the availability of our agents, we will give you three time slots to choose from. You can \"\n \"either go to the \\\"Tickets\\\" section of the app and update your preference or do so by talking to me.\"}\n return fulfillment_response", "def post_defects(project, jira_issues, defects):\n payload = \"\"\n for defect in defects:\n #TODO: this is a hack which can be removed once, excel docs are done away with.\n if defect[\"assignee\"] == \"Unassigned\":\n defect[\"assignee\"] = None\n\n data = {\"host\": host,\n \"time\": int(datetime.datetime.strptime(defect[\"report_date\"], DATE_FORMAT).strftime(\"%s\")) * 1000,\n \"event\": defect,\n \"index\": INDEX,\n \"source\": \"defect\"}\n if config.splunk[config.environment].payload_limit and len(payload) + len(data) >= config.splunk[config.environment].payload_limit:\n logger.info(\"Reached length: {}, Restarting\".format(len(payload)))\n rsp = post_to_splunk(payload=payload)\n logger.info(\"Successfully posted batched data to Splunk {}\".format(project))\n payload = \"{}\".format(json.dumps(data))\n else:\n payload += \" {}\".format(json.dumps(data))\n\n rsp = post_to_splunk(payload=payload)\n logger.info(\"Successfully posted data to splunk for {}\".format(project))\n return {project: rsp.status_code, \"defects_require_fixing\": str(len(jira_issues) - len(defects))}", "def blank_tbr(cc): # pragma: no cover\n cc.execute(\"\"\"SELECT DISTINCT DATE_FORMAT(git_commit.timestamp, '%Y-%m')\n FROM git_commit\"\"\")\n months = cc.fetchall()\n results = []\n for month in months:\n month = month[0]\n cc.execute(\"\"\"SELECT COUNT(*)\n FROM commit_people\n INNER JOIN git_commit\n ON commit_people.git_commit_hash = git_commit.hash\n WHERE commit_people.people_email_address = 'NOBODY'\n AND YEAR(git_commit.timestamp) = %s\n AND MONTH(git_commit.timestamp) = %s\"\"\" % (month[:4], month[5:]))\n result = cc.fetchone()\n results.append([month, int(result[0])])\n return results", "def today(session_cookie: str) -> T:\n year = current_year()\n day = current_day()\n return Puzzle(year, day, session_cookie)", "def cinema_trip(persons, day, premium_seating, treat):\n #fill in your code here\n return tickets(persons, day, premium_seating) + refreshment(treat)", "async def recentchanges(self, ctx, limit=50):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Wiki.recentchanges: ' + str(limit), extra={'invoker': ctx.message.author.name})\r\n twenties, limit = divmod(limit, 20)\r\n async with ctx.channel.typing():\r\n result = ['']\r\n changes = []\r\n start = 'now'\r\n for i in [20 for j in range(twenties)] + [limit]:\r\n resp = await self.req({\r\n 'action': 'query',\r\n 'list': 'recentchanges',\r\n 'rcprop': 'user|timestamp|comment|title|sizes|flags',\r\n 'rctype': 'edit|new',\r\n 'rclimit': i,\r\n 'rcstart': start\r\n })\r\n changes.extend(resp['query']['recentchanges'])\r\n start = resp['query']['recentchanges'][-1]['timestamp']\r\n i = 0\r\n for ch in changes:\r\n change = '\\n'\r\n change += ch['timestamp']\r\n change += ': '\r\n change += ch['title']\r\n change += '; '\r\n sizechange = ch['newlen'] - ch['oldlen']\r\n if sizechange <= -500 or sizechange >= 500:\r\n change += '**'\r\n change += '('\r\n if sizechange <= 0:\r\n change += str(sizechange)\r\n if sizechange > 0:\r\n change += '+' + str(sizechange)\r\n change += ')'\r\n if sizechange <= -500 or sizechange >= 500:\r\n change += '**'\r\n change += ' . . '\r\n change += ch['user']\r\n change += ' _('\r\n change += ch['comment'].replace('*', '\\\\*').replace('_', '\\\\_').replace('`', '\\\\`')\r\n change += ')_'\r\n result[i] += change\r\n if len(result[i]) > 2000:\r\n result.append('')\r\n result[i], result[i+1] = result[i].rsplit('\\n', 1)\r\n i += 1\r\n for r in result:\r\n await ctx.send(r)", "def test_shotgun():\n events = [['Event', '2017-11-22T11:30:00-08:00', '2017-11-22T12:10:00-08:00'],\n ['Event', '2017-11-22T12:00:00-08:00', '2017-11-22T13:00:00-08:00'],\n ['Event', '2017-11-22T12:30:00-08:00', '2017-11-22T13:30:00-08:00'],\n ['Event', '2017-11-23T10:00:00-08:00', '2017-11-23T11:20:00-08:00'],\n ['Event', '2017-11-23T14:00:00-08:00', '2017-11-23T15:00:00-08:00'],\n ['Event', '2017-11-24T14:30:00-08:00', '2017-11-25T19:00:00-08:00'],\n ['Event', '2017-11-25T12:00:00-08:00', '2017-11-25T13:00:00-08:00'],\n ['Event', '2017-11-26T11:30:00-08:00', '2017-11-26T12:10:00-08:00'],\n ['Event', '2017-11-26T12:30:00-08:00', '2017-11-26T13:30:00-08:00'],\n ['Event', '2017-11-28T10:00:00-08:00', '2017-11-28T11:20:00-08:00'],\n ['Event', '2017-11-28T12:00:00-08:00', '2017-11-28T13:00:00-08:00'],\n ['Event', '2017-11-28T14:00:00-08:00', '2017-11-28T15:00:00-08:00']]\n\n freetimes, _ = free(events, 9, 0, 17, 0, day_range, 30)\n fmt_freetime = output_format(freetimes)\n print(fmt_freetime)\n for i in fmt_freetime:\n print(i)\n assert fmt_freetime == ['Tue, Nov 21, 9:00 am to Tue, Nov 21, 5:00 pm.',\n 'Wed, Nov 22, 9:00 am to Wed, Nov 22, 11:30 am.',\n 'Wed, Nov 22, 1:30 pm to Wed, Nov 22, 5:00 pm.',\n 'Thu, Nov 23, 9:00 am to Thu, Nov 23, 10:00 am.',\n 'Thu, Nov 23, 11:20 am to Thu, Nov 23, 2:00 pm.',\n 'Thu, Nov 23, 3:00 pm to Thu, Nov 23, 5:00 pm.',\n 'Fri, Nov 24, 9:00 am to Fri, Nov 24, 2:30 pm.',\n 'Sun, Nov 26, 9:00 am to Sun, Nov 26, 11:30 am.',\n 'Sun, Nov 26, 1:30 pm to Sun, Nov 26, 5:00 pm.',\n 'Mon, Nov 27, 9:00 am to Mon, Nov 27, 5:00 pm.',\n 'Tue, Nov 28, 9:00 am to Tue, Nov 28, 10:00 am.',\n 'Tue, Nov 28, 11:20 am to Tue, Nov 28, 12:00 pm.',\n 'Tue, Nov 28, 1:00 pm to Tue, Nov 28, 2:00 pm.']", "def create_defect(jira_dict, issue):\n defect = deepcopy(jira_dict)\n\n if jira_dict[\"sdlc_phase\"].lower() == \"closed\":\n created_dt = datetime.datetime.strptime(defect[\"created\"], DATE_FORMAT)\n resolved_dt = datetime.datetime.strptime(defect[\"resolved\"], DATE_FORMAT)\n\n if (resolved_dt - created_dt).days == 0:\n defect[\"age\"] = 0 if (resolved_dt.month == created_dt.month and\n resolved_dt.day == created_dt.day) else 1\n else:\n timedelta = resolved_dt - created_dt\n defect[\"age\"] = int(round(float((timedelta.days*86400 + timedelta.seconds)/(86400)), 0))\n else:\n timedelta = datetime.datetime.strptime(defect[\"report_date\"], DATE_FORMAT) - datetime.datetime.strptime(defect[\"created\"], DATE_FORMAT)\n defect[\"age\"] = int(round(float((timedelta.days*86400 + timedelta.seconds)/(86400)), 0))\n\n return defect", "def makeVideo():\n weekNumber = 11\n for _ in range(10):\n df = loadDbIntoDf2('trending')\n df_copy = df.copy()\n df_shorter = selectTop(df_copy,'week',weekNumber , 'trending')\n vid_dl = download(df_shorter,weekNumber)\n merge(vid_dl,weekNumber)\n weekNumber = weekNumber + 1", "async def tickets(ctx, user: discord.User=None):\n\n tickets_emb = discord.Embed(\n title=\"Active support tickets\",\n color=EMBED_COLOR\n )\n\n if user is not None:\n tickets_emb.description = \"All open tickets of the given user.\"\n tickets_emb.set_author(\n name=f\"{user.name}#{user.discriminator}\",\n icon_url=user.avatar_url\n )\n\n db_user = User.select(graph, user.id).first()\n\n ticket_list = list(db_user.tickets)\n\n else:\n tickets_emb.description = \"All open tickets of this guild.\"\n\n guild = Guild.select(graph, ctx.guild.id).first()\n\n ticket_list = list(guild.tickets)\n\n # TODO: check scopes\n ticket_list = list(filter(lambda t: t.state != 'closed', ticket_list))\n ticket_list.reverse()\n\n if len(ticket_list) == 0:\n await ctx.send(\"There are no active support tickets.\")\n return None\n\n for ticket in ticket_list:\n tickets_emb.add_field(\n name=f\"#{ticket.id} || {ticket.title}\",\n value=ticket.description,\n inline=False\n )\n\n tickets_emb.set_footer(\n text=\"To see all properties of a ticket use the 'ticket show' command.\"\n )\n\n await ctx.send(embed=tickets_emb)", "def get_create_projects(target, proposal_ref, proposal_code='lb'):\n\n # Note that in the loader this is based on information in the PROPOSALS and VISITS files\n # TODO Multiple Visits can be defined in a file apparently - future improvement.\n # TODO NB LIne above in delete_users - redundant if using ISPYB??.\n # For the online loader it comes from the proposal_ref\n\n projects = []\n # The first word is the ISPY proposal/visit name that is used as the title of the project.\n # It can be set to OPEN in which case there are no users.\n visit = proposal_ref.split()[0]\n # If the visit is not prefixed by the proposal code\n # (typically a 2-letter sequence like \"lb\") then prefix it.\n if visit[0].isdigit():\n visit = f\"{proposal_code}{visit}\"\n project = Project.objects.get_or_create(title=visit)[0]\n projects.append(project)\n\n # If not open then delete users for the project and re-add them based on supplied fed-ids.\n delete_users(project)\n\n # Update project_id on target.\n target.project_id.add(project)\n\n # Remaining words in proposal_ref (if any) must be fedid's which are used to find users information.\n num_users = 0\n for fedid in proposal_ref.split()[1:]:\n user = User.objects.get_or_create(username=fedid, password=\"\")[0]\n project.user_id.add(user)\n num_users += 1\n if num_users == 0:\n project.open_to_public = True\n\n target.upload_progess = 10.00\n target.save()\n\n return projects", "def getBuild(number):", "def getBuild(number):", "def generateTheWorkspace(self):\n \"\"\"\n ########################################################################################\n This line of code will obtain the name of the Workspace.////////////////////////////////\n ########################################################################################\n \"\"\"\n theWorkspaceName = self.readThe['WorkspaceName'].get_value(0)\n\n \"\"\"\n ########################################################################################\n This block of code will generate a custom iso formatted date./////////////////////////// \n ########################################################################################\n \"\"\"\n\n theCreatedDay = datetime.today().day\n theCreatedCurrentDay = int(theCreatedDay)\n theCreatedMonth = datetime.today().month\n theCreatedCurrentMonth = int(theCreatedMonth)\n theCreatedYear = datetime.today().year\n theCreatedCurrentYear = int(theCreatedYear)\n theCreatedHour = datetime.today().hour\n theCreatedCurrentHour = int(theCreatedHour)\n theCreatedMinute = datetime.today().minute\n theCreatedCurrentMinute = int(theCreatedMinute)\n theCreatedSecond = datetime.today().second\n theCreatedCurrentSecond = int(theCreatedSecond)\n theCreatedMicrosecond = datetime.today().microsecond\n theCreatedCurrentMicrosecond = int(theCreatedMicrosecond)\n theCurrentCreatedDate = datetime(theCreatedCurrentYear, theCreatedCurrentMonth, theCreatedCurrentDay,\n theCreatedCurrentHour, theCreatedCurrentMinute, theCreatedCurrentSecond,\n theCreatedCurrentMicrosecond)\n theCreatedDate = theUpdatedDate = theCurrentCreatedDate.isoformat() + 'Z'\n\n theIntentColumn = self.readThe['Intents']\n theIntentsArray = []\n theCounter = 0\n for each in theIntentColumn:\n theIntentExamplesArray = []\n theIntentName = self.readThe['Entity'].get(theCounter)\n\n example1 = {\n \"text\": \"¿\" + theIntentName + \"?\",\n \"created\": theCreatedDate,\n \"updated\": theCreatedDate\n }\n\n example2 = {\n \"text\": \"\" + theIntentName + \"?\",\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate\n }\n example3 = {\n \"text\": \"¿Qué es un \" + theIntentName + \"?\",\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate\n }\n example4 = {\n \"text\": \"¿Que es un \" + theIntentName + \"?\",\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate\n }\n example5 = {\n \"text\": \"Qué es un \" + theIntentName + \"?\",\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate\n }\n example6 = {\n \"text\": \"Que es un \" + theIntentName + \"?\",\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate\n }\n\n example7 = {\n \"text\": theIntentName + \", ¿qué es?\",\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate\n }\n\n theIntentExamplesArray.append(example1)\n theIntentExamplesArray.append(example2)\n theIntentExamplesArray.append(example3)\n theIntentExamplesArray.append(example4)\n theIntentExamplesArray.append(example5)\n theIntentExamplesArray.append(example6)\n theIntentExamplesArray.append(example7)\n\n theClientExamples = self.readThe['Examples']\n if theClientExamples.count() > 0:\n theCustomExamples = theClientExamples.get_value(theCounter)\n each_custom_intent = str(theCustomExamples)\n if not each_custom_intent == \"nan\":\n theQuestionsArray = each_custom_intent.split(\";\")\n for each_example in theQuestionsArray:\n theCustomExampleIntent = {\n \"text\": each_example,\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate\n }\n theIntentExamplesArray.append(theCustomExampleIntent)\n else:\n print(\"There are NO client custom examples for this intent {}.\".format(theIntentName))\n else:\n print(\"Well, there are some that have, others don't.\")\n theIntents = {\n \"intent\": each,\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate,\n \"examples\": theIntentExamplesArray,\n \"description\": None\n }\n\n theIntentsArray.append(theIntents)\n theCounter += 1\n\n theEntityColumn = self.readThe['Entity']\n theEntitiesArray = []\n\n for each in theEntityColumn:\n theValuesArray = []\n each = str(each)\n theValues = {\n \"type\": \"synonyms\",\n \"value\": each,\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate,\n \"metadata\": None,\n \"synonyms\": getSynonyms(each)\n }\n theValuesArray.append(theValues)\n\n theEntities = {\n \"entity\": each,\n \"values\": theValuesArray,\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate,\n \"metadata\": None,\n \"description\": None\n }\n theEntitiesArray.append(theEntities)\n\n theLanguage = self.readThe['Language'].get_value(0)\n\n theFormattedYear = datetime.today().year\n theYearAsNumber = str(theFormattedYear)\n theFormattedMonth = datetime.today().month\n theMonthAsNumber = str(theFormattedMonth)\n theFormattedDay = datetime.today().day\n theDayAsNumber = str(theFormattedDay)\n theCreatedDateFormatted = \"{}-{}-{}\".format(theYearAsNumber, theMonthAsNumber, theDayAsNumber)\n\n theMetaDataMajorVersion = 'v1'\n theMetaDataMinorVersion = theCreatedDateFormatted\n theWorkspaceMetaDataAPI_VERSION = {\n \"major_version\": theMetaDataMajorVersion,\n \"minor_version\": theMetaDataMinorVersion\n }\n theWorkspaceMetaData = {\n \"api_version\": theWorkspaceMetaDataAPI_VERSION\n }\n\n theWorkspaceDescription = self.readThe['Description'].get_value(0)\n\n theDialogNodesArray = []\n\n theWorkspaceID = '1234'\n\n theWorkspaceCounterExamples = None\n\n theWorkspaceLearningOptOut = False\n\n theFinalWorkspace = {\n \"name\": theWorkspaceName,\n \"created\": theCreatedDate,\n \"intents\": theIntentsArray,\n \"updated\": theUpdatedDate,\n \"entities\": theEntitiesArray,\n \"language\": theLanguage,\n \"metadata\": theWorkspaceMetaData,\n \"description\": theWorkspaceDescription,\n \"dialog_nodes\": theDialogNodesArray,\n \"workspace_id\": theWorkspaceID,\n \"counterexamples\": theWorkspaceCounterExamples,\n \"learning_opt_out\": theWorkspaceLearningOptOut\n }\n\n return str(dict(theFinalWorkspace))", "def year_tracker(words):\n new_words = []\n for w in words:\n new_word = re.sub(r\"^[1][789][0-9]{2}$\", \"jahreszahl\", w) # for 1700-1999\n new_word = re.sub(r\"^[2][01][0-9]{2}$\", \"jahreszahl\", new_word) # for 2000-2199\n new_words += [new_word]\n return new_words", "def latestGamePack(team):\n lgr= get('schedule', {'ver':'v1', 'sportId':1, 'date':today, 'teamId':team, 'fields':['dates','games','gamePk'] })\n return lgr['dates'][0]['games'][0]['gamePk']", "def feature():\n mesosite = iemdb.connect('mesosite', bypass=True)\n mcursor = mesosite.cursor(cursor_factory=psycopg2.extras.DictCursor)\n lastts = mx.DateTime.now() + mx.DateTime.RelativeDateTime(days=-1)\n # Query\n mcursor.execute(\"\"\"\n SELECT *, to_char(valid, 'DD Mon HH:MI AM') as nicedate \n from feature WHERE date(valid) = 'YESTERDAY'\"\"\")\n textfmt = \"\"\"\n +----------------------------------------------\n%(link)s\n | Title : %(title)s\n | Date : %(nicedate)s\n | Votes : Good: %(good)s Bad: %(bad)s\n +----------------------------------------------\n\n%(story)s\n\n\"\"\"\n htmlfmt = \"\"\"\n<p><a href=\"%(link)s\">%(title)s</a>\n<br /><strong>Date:</strong> %(nicedate)s\n<br /><strong>Votes:</strong> Good: %(good)s &nbsp; Bad: %(bad)s\n\n<p>%(story)s\n\n\"\"\"\n txt = \"> Daily Feature\\n\"\n html = \"<h3>Daily Feature</h3>\"\n\n for row in mcursor:\n row2 = row.copy()\n row2['link'] = \"http://mesonet.agron.iastate.edu/onsite/features/cat.php?day=%s\" % (lastts.strftime(\"%Y-%m-%d\"),)\n txt += textfmt % row2\n html += htmlfmt % row2\n if mcursor.rowcount == 0:\n txt += \"\\n No feature posted\\n\\n\"\n html += \"<strong>No feature posted</strong>\"\n\n return txt, html", "def queryFlywheel(project):\n\n # Create info dict with entries for each subject.\n info = dict()\n\n # Loop through subjects in project\n #for sub in subjects:\n for sub in project.subjects():\n\n # Loop through sessions in subject\n for ses in sub.sessions():\n ses = ses.reload()\n\n # Loop through acquisitions in session\n for acq in ses.acquisitions():\n acq = acq.reload()\n\n # Loop through files in acquisition\n for f in acq.files:\n \n # Skip over non-nifti files\n if f.type != 'nifti':\n next\n\n # Get Flywheel fileId to use as unique identifier\n fileId = f.id\n\n # Try to get timestamp (sometimes DateTime field isn't present.) \n try:\n timestamp = f.info['AcquisitionDateTime']\n except KeyError:\n try:\n timestamp = f.info['AcquisitionDate']\n # Set to None if field isn't present\n except:\n timestamp = pd.NaT\n \n # Try to get series number (sometimes field isn't present.) \n try:\n seriesNum = f.info['SeriesNumber']\n # Set to None if field isn't present\n except:\n np.NaN \n # Add the folowing metadata to study info dict:\n # fileID: [subId, sesId, acqLabel, fileName, seriesNum, timestamp]\n info[fileId] = [sub.label, ses.label, acq.label, f.name, seriesNum, timestamp]\n \n # Return project info dict\n return info", "def _get_open_projects_info():\n projects = Project.objects.filter(project_open=True).order_by(\"created_at\")\n projects_sum_hours = []\n for project in projects:\n time_entries_pro_project = TimeEntry.objects.filter(project=project)\n used_hours = _sum_hours(time_entries_pro_project)\n hours_percent = _calculate_hours_percent(used_hours, project.stimated_hours)\n projects_sum_hours.append(\n {\n \"hours_percent_number\": hours_percent,\n \"hours_percent\": f\"{hours_percent}%\",\n \"worked_hours\": used_hours,\n \"project\": project,\n }\n )\n return projects_sum_hours", "def weekly():", "def create_time_references(self, docFeatList, timexList): \n timeReferences = {} \n \n confidence = 1 \n ##: confidence = 1: input exposure and onset date; \n ##: = 0.9: with tags of interest;\n ##: = 0.8: obtained from extracted vaccines\n ##: = 0.7: obtained from extracted drugs\n ##: = 0.6: date of drug or vaccine is obtained from time impact zone\n if self.exposureDate: ##: input exposure date is available\n self.exposureDateConfidence = 1 \n timeReferences[('Vaccination', None, None, 0, None, None, confidence)] = self.exposureDate\n timeReferences[('Injection', None, None, 0, None, None, confidence)] = self.exposureDate\n if self.onsetDate: ##: input onset date is available\n self.onsetDateConfidence = 1\n timeReferences[('Onset', None, None, 0, None, None, confidence)] = self.exposureDate\n \n if self.receiveDate:\n timeReferences[('Administration', None, None, None, None, None, 1)] = self.receiveDate\n \n coordFeatTypes = set(['VACCINE', 'DRUG']) \n ##: add tags in features into coordinates\n for feature in docFeatList:\n \n if not feature.getType() in coordFeatTypes:\n continue\n \n if feature.inClause(): continue\n \n sentnumber = feature.getSentNum()\n \n if feature.getType()=='VACCINE':\n coordType = 'Vaccine'\n confidence = 0.8\n else: # DRUG\n coordType = 'Drug'\n confidence = 0.7\n \n tlink = feature.getTlink()\n if tlink:\n ##: Handle features with does number\n counts = []\n if 'DoseIndicator' in [tg[1] for tg in self.sentence_full_tags[sentnumber]]:\n counts = [tg[0] for tg in self.sentence_full_tags[sentnumber] if tg[1]=='Count']\n \n timexes = [t for t in tlink.getTimexes() if t.getDateTime() and t.getRole()!='IGNORE']\n \n if self.get_drug_dose_number(feature) and len(counts) == len(timexes):\n for i, t in enumerate(timexes):\n val = util.text2num.convertOrdinal(counts[i])\n timeReferences[(coordType, feature.getString(), val, sentnumber, feature.getStartPos(), t.getStartPos(), confidence)] = t.getDateTime()\n else:\n for t in timexes:\n timeReferences[(coordType, feature.getString(), 0, sentnumber, feature.getStartPos(), t.getStartPos(), confidence)] = t.getDateTime() \n \n exposureSet = ['Vaccination', 'Injection']\n anchorSet = ['Hospitalization', 'Administration']\n for sentnum, sentence in enumerate(self.sentences):\n tags = set([tg[1] for tg in self.taggedSentences[sentnum]])\n timexes = [t for t in timexList if t.getDateTime() and t.getSentNum()==sentnum and t.getRole()!='IGNORE']\n if timexes:\n sent_start = self.sentence_startPos[sentnum]\n intersect = tags.intersection(anchorSet)\n for st in intersect:\n words = [tg[0] for tg in self.taggedSentences[sentnum] if tg[1]==st]\n wordPos = [sentence.lower().find(word) for word in words]\n validWords = [pos for pos in wordPos if not self.is_in_clause(pos+sent_start, sentnum)]\n if not validWords:\n continue \n coord = (st, '', None, sentnum, None, None, 0.9)\n if not coord in timeReferences:\n timeReferences[coord] = timexes[0].getDateTime()\n \n ref =[]\n if tags.intersection(exposureSet):\n tgs = [tg for tg in self.taggedSentences[sentnum] if tg[1] in exposureSet]\n ref = tgs[0]\n \n if tags.intersection(['Treatment']):\n tokens = set([tg[0].lower() for tg in self.sentence_full_tags[sentnum]])\n intst = tokens.intersection(['started', 'starts', 'begins', 'began'])\n if intst:\n ref = (list(intst)[0], 'Injection') \n \n if ref:\n word = ref[0].lower()\n wpos = sentence.lower().find(word) + sent_start\n if self.is_in_clause(wpos, sentnum):\n continue\n leftTimexes = [t for t in timexes if t.getStartPos() <= wpos]\n rightTimexes = [t for t in timexes if t.getStartPos() >= wpos]\n if not leftTimexes:\n dt = rightTimexes[0].getDateTime()\n elif not rightTimexes:\n dt = leftTimexes[-1].getDateTime()\n else:\n leftSeg = self.text[leftTimexes[-1].getEndPos():wpos]\n rightSeg = self.text[wpos+len(word):rightTimexes[0].getStartPos()]\n \n if self.is_next_separated(leftSeg, rightSeg):\n dt = leftTimexes[-1].getDateTime()\n else:\n dt = rightTimexes[0].getDateTime() \n timeReferences[(ref[1], word, None, sentnum, wpos, wpos+len(word), 0.9)] = dt\n \n return timeReferences", "def index(http_request, year=datetime.datetime.now().strftime(\"%Y\"), month=datetime.datetime.now().strftime(\"%m\")):\n\t# make sure the year number and month number are ints\n\tyear = int(year)\n\tmonth = int(month)\n\ttimestamp = datetime.datetime(year, month, 1)\n\t\n\t#initialize container for dates to be stored\n\tdate_list = []\n\t\n\tevents = Event.objects.filter(edate__year=year).filter(edate__month=month)\n\tfor event in events:\n\t\tdate_list.append({'id':event.id, 'day':datetime.date(event.edate.year, event.edate.month, event.edate.day), 'title':event.title, 'class':'event'})\n\n\tprojects = Project.objects.filter(due__year=year).filter(due__month=month)\n\tfor project in projects:\n\t\tdate_list.append({'id':project.id, 'day':datetime.date(project.due.year, project.due.month, project.due.day), 'title':project.name, 'class':'projects'})\n\t\t\t\n\t# next month's timestamp\n\tif month == 12:\n\t\tnext_month = datetime.datetime(year+1, 1, 1)\n\telif month < 12:\n\t\tnext_month = datetime.datetime(year, month+1, 1)\n\t\n\tupcoming_projects = Project.objects.filter(due__year=next_month.year).filter(due__month=next_month.month)\n\t\n\t\n\treturn render_to_response('schedule_cal.html', \n\t\t\t\t {'date_list':date_list, \n\t\t\t\t 'date':timestamp, \n 'urlprefix': urlprefix (),\n\t\t\t\t 'upcoming_projects':upcoming_projects}, \n\t\t\t\t )", "def main():\n now = datetime.now()\n if now <= datetime(month=7, day=4, year=now.year):\n year = now.year\n else:\n year = now.year + 1\n then = datetime(month=7, day=4, year=year)\n diff = then - now\n num_days = diff.days\n print(f\"There are {num_days} days until July 4th\")\n client = Client(TWILIO_ACCT_SID, TWILIO_AUTH_TOKEN)\n for number in OUTGOING_LIST:\n print(f\"Sending countdown message to {number}\")\n try:\n message = client.messages.create(body=num_days,\n from_=SENDING_NUMBER,\n to=str(number))\n print(f\"Message has been sent :: {message.sid}\")\n except TwilioRestException as e:\n print(e)\n print(f\"ERROR: Could not send quote to phone number {number}\")", "def get_featured_projects(self):\n featured_projs = FeaturedProject.objects.order_by('id')[0:3]\n highlighted = []\n activities = personalize_activities_dict(self.request.user)\n try:\n for featured in featured_projs:\n try:\n activity = activities[featured.project.id_label]\n if featured.description:\n activity['commentary'] = featured.description\n highlighted.append(activity)\n except KeyError:\n pass\n return highlighted\n except (ValueError, TypeError):\n return []", "def fp_from_cheese(daily_g_cheese):\n cheese = kg_to_tonnes(daily_to_annual(daily_g_cheese*12)/1000)\n return cheese", "def generate_father_day_planning(days_to_countries=None):\n if days_to_countries is None:\n days_to_countries = get_father_days()\n\n dates = list(days_to_countries.keys())\n\n\n for i,date in enumerate(dates):\n dates[i] = parse(date,default=datetime.datetime(2020,1,1))\n #days_to_countries[date].sort()\n\n\n\n dates.sort()\n\n\n for date in dates:\n date = date.strftime('%B %d')\n date = re.sub(r'0(\\d)',r'\\1',date)\n\n countries = days_to_countries[date]\n\n print(date)\n\n for country in countries:\n print(f'- {country}')\n\n print()\n\n\n # you code", "def _create_dummy_project(self,projectname=\"testproject\"):\n # Create three types of users that exist: Root, can do anything, \n # projectadmin, cam do things to a project he or she owns. And logged in\n # user \n \n #created in _create_main_project_and_root.\n root = self.root\n # non-root users are created as if they signed up through the project, \n # to maximize test coverage. \n \n # A user who has created a project\n projectadmin = self._create_random_user(\"projectadmin_\")\n \n testproject = self._create_comicsite_in_admin(projectadmin,projectname)\n create_page_in_admin(testproject,\"testpage1\")\n create_page_in_admin(testproject,\"testpage2\")\n \n # a user who explicitly signed up to testproject\n participant = self._create_random_user(\"participant_\")\n self._register(participant,testproject)\n \n # a user who only signed up but did not register to any project\n registered_user = self._create_random_user(\"comicregistered_\")\n \n #TODO: How to do this gracefully? \n return [testproject,root,projectadmin,participant,registered_user]", "def updateG(self, dt):\n\t\tself.tissue.G.project( (self.initial * dt + Identity(3)) * self.tissue.G )", "def getPendingBuilds():", "def getPendingBuilds():", "def list(projectname):\n backedamount=0\n con = lite.connect(databasefile)\n with con:\n cur = con.cursor() \n cur.execute(\"SELECT Id FROM projects where name=?\", (projectname,))\n exists = cur.fetchone()\n if exists:\n cur.execute(\"SELECT * FROM backers where Projectname=?\", (projectname,))\n rows = cur.fetchall()\n numbackers=len(rows)\n for row in rows:\n backedamount+=row[4]\n click.echo(\"-- %s backed for $%-.2f\" % (row[1],row[4]))\n else:\n click.echo(\"That project doesn't exist!\")\n sys.exit()\n\n cur.execute(\"SELECT Tamount FROM projects where name=?\", (projectname,))\n tamount = cur.fetchone()\n if tamount[0] > backedamount:\n amountneeds = tamount[0] - backedamount\n click.echo(\"%s needs $%-.2f more dollars to be successful. It has %d backers\" % (projectname,amountneeds,numbackers))\n else:\n click.echo(\"%s is successful! It has %d backers\" % (projectname,numbackers))", "def show_weeks_tasks(self):\n for day in [datetime.today() + timedelta(days=i) for i in range(7)]:\n tasks = self.session.query(self.Table).filter(self.Table.deadline == day.strftime('%Y-%m-%d')).\\\n order_by(self.Table.deadline).all()\n print(f'{day.strftime(\"%A\")} {day.strftime(\"%d %b\")}:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()", "def get_calendar_items(self, take=5):\n url = 'https://www.rova.nl/api/waste-calendar/upcoming'\n # request data from rova API and save response first 5 items (default)\n response = requests.get(url, params={\n 'postalcode': self.zip_code,\n 'houseNumber': self.house_number,\n 'addition': self.house_addition,\n 'take': take,\n })\n\n response.raise_for_status()\n\n rova_response = response.json()\n\n items = []\n types = []\n # add next pickup date for each garbage type\n for item in rova_response:\n date = datetime.strptime(item[\"date\"], \"%Y-%m-%dT%H:%M:%SZ\")\n date = date.strftime(\"%Y-%m-%dT%H:%M:%S\")\n garbage_type = item[\"garbageTypeCode\"].upper()\n\n items.append({\n 'GarbageTypeCode': garbage_type,\n 'Date': date\n })\n types.append(garbage_type)\n return items", "def Tobs_past_year(): \n results = pd.DataFrame(session.query(Measurement.date,Measurement.tobs).\\\nfilter(Measurement.date.between(One_yrs_ago,current_time)).all());\n\n dates_of_last_year=list(results.sort_values(by='date')['date'].unique()) \n aa1=results.sort_values(by='date').groupby('date')\n last_year_tobs={dates_of_last_year[i]:list(aa1.get_group(dates_of_last_year[i])['tobs'])\\\n for i in range(len(aa1))}\n print(f\"Route /api/v1.0/tobs/past_year is being visited\")\n return jsonify(last_year_tobs)", "def recent_comic_titles():\r\n\treturn [comic.title for comic in Comic.objects.all().order_by('-created_on')[0:10]]" ]
[ "0.5335107", "0.52704185", "0.5256979", "0.5130906", "0.50964624", "0.5023923", "0.5006047", "0.49849492", "0.4897661", "0.4888886", "0.4882372", "0.486753", "0.4858375", "0.48496896", "0.4849528", "0.48321384", "0.48075008", "0.47820213", "0.47624293", "0.4758167", "0.47529867", "0.47324315", "0.47247747", "0.47106138", "0.46973166", "0.46779072", "0.46518856", "0.46518558", "0.46459478", "0.46433154", "0.46396977", "0.46396977", "0.46396977", "0.46149406", "0.4604744", "0.4599002", "0.4592913", "0.4576119", "0.45698765", "0.45690224", "0.45682245", "0.4563813", "0.45557287", "0.4550087", "0.4530039", "0.45232427", "0.451487", "0.4514611", "0.4513778", "0.4513705", "0.45005706", "0.4486073", "0.44733703", "0.44657487", "0.44570655", "0.44468766", "0.44425336", "0.44407743", "0.4433166", "0.44329938", "0.44283533", "0.44209224", "0.44134384", "0.44131994", "0.44131863", "0.44075263", "0.44067147", "0.4399823", "0.43961456", "0.43940088", "0.4389051", "0.4385605", "0.43805462", "0.43797937", "0.43731052", "0.4370966", "0.43697435", "0.43697435", "0.43660814", "0.4365302", "0.4365272", "0.43515384", "0.4351084", "0.43501663", "0.43419674", "0.43397337", "0.43387634", "0.4333822", "0.43315995", "0.43315652", "0.43312874", "0.43287018", "0.4323978", "0.4320454", "0.4320454", "0.43192297", "0.43185455", "0.43152505", "0.43116304", "0.43110704" ]
0.58926123
0
Starts interactive time tracking session. Updates Freshbooks based on Toggl entries.
def time_tracking(self): fb = FreshBooks() tg = Toggl() self.print_splash() self.print("Tip: You can always enter 'skip' when you want to skip a time entry.", format='warn') days = self.get_interactive_days() # number of days to go back self.print("OK, I'll run you through the Toggl time entries of the past %i day(s)." % (days)) timestamp = self.get_timestamp(days) # unix timestamp including tz time_entries = tg.get_time_entries(timestamp) if len(time_entries) == 0: self.print("No Toggl entries in this time span!", 'warn') return False time_entries = self.merge_toggl_time_entries(time_entries) # merge Toggl entries fb_projects = fb.get_projects() # Loop through merged Toggl time entries: for entry in time_entries: # Get and convert all necessary info: client_id = tg.get_client_id(project_id=entry.get('pid')) client_name = tg.get_client_name(client_id) project = tg.get_project(entry.get('pid')) duration = int(entry['duration']) / 60 / 60 # convert duration to hours duration = round(duration * 4 ) / 4 # round hours to nearest .25 description = self.format_description(project['name'], entry['description']) date = str(parser.parse(entry['start']).date()) # Print info in a nice way: self.print_divider(30) self.print("Description: " + description) self.print("Date: " + date) self.print("Hours spent: " + str(duration)) # Skip if Toggl entry is already booked: if entry.get('tags') and tg.BOOKED_TAG in entry['tags']: self.print("Skipping this entry because it is already in Freshbooks.", 'cross') # Skip if duration is below 0.25: elif duration < 0.25: self.print("Skipping this entry because there are less than 0.25 hours spent.", 'cross') # If billable, add to Freshbooks: elif entry['billable']: # Get FreshBooks project name through interactive search: try: self.print("Project: \U0001F50D ") fb_project_name = self.interactive_search(fb_projects.keys(), client_name) # Handle KeyboardInterrupt except KeyboardInterrupt: answer = input("\nKeyboardInterrupt! Skip current entry or quit time tracking? (S/q) ") if answer.lower() == 's' or answer == '': self.clear_lines(1) self.print("Skipping this entry.", 'cross') continue else: self.clear_lines(1) self.print("Ok, stopping time tracking.", 'cross') sys.exit() # If user requests so, skip this entry: self.clear_lines(1) if not fb_project_name: self.print("Skipping this entry.", 'cross') continue # Otherwise, add entry to FreshBooks and tag Toggl entry/entries: self.print("Project: " + fb_project_name) project_id = fb.get_project_id(fb_project_name) fb.add_entry(project_id, duration, description, date) tg.tag_projects(entry['merged_ids'], tg.BOOKED_TAG) # If not billable, skip entry: else: self.print("Skipping this entry because it is not billable.", 'cross') self.print_divider(30) answer = input("All done! Open FreshBooks in browser to verify? (Y/n) ") if answer.lower() == 'y' or answer == '': webbrowser.open('https://%s.freshbooks.com/timesheet' % fb.fb_creds['subdomain'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n # check database for tracking options\n # if empty prompt to add subject\n\n # present tracking options\n\n # calculate timedelta\n\n # printing/updating the time", "def setTrackStartTime() :\n s.startTrack()", "def time_automation_listener(now):\n action()", "def main():\n \n ## Determine whether to query for the sunset or sunrise\n if datetime.now().hour >= 20:\n ## Run sunrise tweets after 8PM\n type = 'sunrise'\n else:\n ## Any earlier, run sunset tweets (by default run at 12PM)\n type = 'sunset'\n \n ## Iterate through the time series and states\n log_df = TWEET_HISTORY_DF.copy()\n for loc in c.LOCATIONS.keys():\n \n ## Instantiate a class to do the tweetin'\n MySunTweeter = SunTweeter(loc, type, log_df)\n MySunTweeter.send_tweet()\n \n ## Save the log to use in the next iteration of the loop\n log_df = MySunTweeter.log_df\n \n ## Overwrite the log with the updated records\n log_df.to_csv(\"log/SunsetWx_full_tweet_log.csv\",\n index = False)", "def start_live_observation(self):\n\n logging.debug(\"start live observation, self.liveObservationStarted: {}\".format(self.liveObservationStarted))\n\n if \"scan sampling\" in self.textButton.text():\n self.textButton.setText(\"Stop live observation\")\n self.liveTimer.start(100)\n return\n\n if not self.liveObservationStarted:\n\n if self.twEvents.rowCount():\n if dialog.MessageDialog(programName, \"Delete the current events?\", [YES, NO]) == YES:\n self.twEvents.setRowCount(0)\n self.pj[OBSERVATIONS][self.observationId][EVENTS] = []\n self.projectChanged = True\n\n self.textButton.setText(\"Stop live observation\")\n\n self.liveStartTime = QTime()\n # set to now\n self.liveStartTime.start()\n # start timer\n self.liveTimer.start(100)\n else:\n\n self.textButton.setText(\"Start live observation\")\n self.liveStartTime = None\n self.liveTimer.stop()\n\n if self.timeFormat == HHMMSS:\n self.lbTimeLive.setText(\"00:00:00.000\")\n if self.timeFormat == S:\n self.lbTimeLive.setText(\"0.000\")\n\n self.liveObservationStarted = not self.liveObservationStarted", "def start_station(self):\n if Config.LOG_TO_CONSOLE and Config.LOG_INTERVAL:\n self._log_results(first_time=True)\n\n if Config.WEATHER_UPLOAD and Config.UPLOAD_INTERVAL:\n self._upload_results(first_time=True)\n\n if Config.UPDATE_DISPLAY and Config.UPDATE_INTERVAL:\n self._update_display()", "def initialize_new_live_observation(self):\n\n self.playerType = LIVE\n self.playMode = LIVE\n\n self.create_live_tab()\n\n self.toolBox.setVisible(True)\n\n self.dwObservations.setVisible(True)\n self.dwObservationsGroup.setVisible(True)\n\n self.simultaneousMedia = False\n\n self.lbFocalSubject.setVisible(True)\n self.lbCurrentStates.setVisible(True)\n\n self.liveTab.setEnabled(True)\n self.toolBox.setItemEnabled(0, True) # enable tab\n self.toolBox.setCurrentIndex(0) # show tab\n\n self.menu_options()\n\n self.liveObservationStarted = False\n self.textButton.setText(\"Start live observation\")\n if self.timeFormat == HHMMSS:\n self.lbTimeLive.setText(\"00:00:00.000\")\n if self.timeFormat == S:\n self.lbTimeLive.setText(\"0.000\")\n\n self.liveStartTime = None\n self.liveTimer.stop()", "def open(self):\n\n self.st_time = time.strftime('%H:%M %A %d %B')\n self.is_active = True", "def main(stdscr, starting_portfolios):\n\n # Generally don't need a cursor.\n curses.curs_set(0)\n\n # Clear the screen\n stdscr.clear()\n\n # Fire up the Stock Tracker.\n st = ST(stdscr);\n st.run(starting_portfolios)", "def TeleopPeriodic(self):\n Scheduler.GetInstance().Run()\n LiveWindow.Run()", "def do_upt(self, arg):\n self.do_timesheet('update today')", "def start(self):\n self.login(not self.quiet)\n self.start_time = time.time()\n while True:\n self.print_time()\n try:\n self.tick()\n except Exception as e:\n print(e)", "def start(self):\n self.start_time = time.time()", "def start(self):\r\n self.start_time = time.time()", "def time_automation_listener(now):\n hass.async_add_job(action, {\n 'trigger': {\n 'platform': 'time',\n 'now': now,\n },\n })", "def start_game(self) -> None:\n self.check_edgework_view_attached()\n self.timer.start_timing()\n self._edgework_view.start_timing()", "def FreshStart(self):\n # Create a vector holding historical data for the purpose of plotting.\n # The length may vary because the sampling speed of different are\n # sensors may vary.\n\n self.history = {'time': collections.deque( [], self.history_length ),\\\n 'data': collections.deque( [], self.history_length )\n }", "def start():\n print('Running...')\n with Feed(Config.database) as feed:\n feed.refresh()", "def refresh():\r\n db.drop_all()\r\n db.create_all()\r\n for time_value in get_datetime_values('Los Angeles', 'pm25'):\r\n record = Record(datetime=str(time_value[0]), value=time_value[1])\r\n db.session.add(record)\r\n db.session.commit()\r\n return render_template('refresh.html')", "def main():\n # group_id = get_group_id() This would be used if I had\n # the appropriate privileges\n group_id = 15000022833\n setup_logger()\n ticket_ids = get_newhire_tickets(group_id)\n for ticket_id in ticket_ids:\n update_ticket_info(ticket_id)", "def start(cls):\n\n cls._set_mode_running()\n TimeDisplay.start_time()\n for callback in cls.start_callback:\n callback()", "def run(self):\n self.timestamp['start'] = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')\n\n for point in self._prepare_grid():\n graph = self._prepare_graph(**point)\n env = self._prepare_env(graph, **point)\n log = self._prepare_logger(graph, env, **point)\n\n try:\n env.run(until=self.runtime)\n except Exception as e:\n print(e)\n log.close()\n\n # self.timestamp[grid.hash_grid_point(point)].append(datetime.datetime.now().strftime('%Y%m%dT%H%M%S'))\n\n self.timestamp['end'] = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')", "def insert_time(self):\n if self.controller.shared_data.obj_track.size == 0:\n message = 'There is no loaded track to insert timestamp'\n messagebox.showwarning(title='Insert Time Assistant',\n message=message)\n return\n\n self.timestamp = dt.datetime(2000, 1, 1, 0, 0, 0)\n self.speed = 0\n\n spinbox_options = {'year': [1990, 2030, 2000],\n 'month': [1, 12, 1],\n 'day': [1, 31, 1],\n 'hour': [0, 23, 0],\n 'minute': [0, 59, 0],\n 'second': [0, 59, 0]}\n\n top = tk.Toplevel()\n top.title('Insert Time Assistant')\n\n # Insert data frame\n frm_form = tk.Frame(top, relief=tk.FLAT, borderwidth=3)\n frm_form.pack() # insert frame to use grid on it\n spn_time = collections.defaultdict()\n\n for i, entry in enumerate(spinbox_options):\n # This allow resize the window\n top.columnconfigure(i, weight=1, minsize=75)\n top.rowconfigure(i, weight=1, minsize=50)\n\n # Create widgets\n var = tk.StringVar(top)\n var.set(spinbox_options[entry][2])\n\n spn_time[entry] = tk.Spinbox(from_=spinbox_options[entry][0],\n to=spinbox_options[entry][1],\n master=frm_form,\n width=8,\n textvariable=var,\n justify=tk.RIGHT,\n relief=tk.FLAT)\n\n lbl_label = tk.Label(master=frm_form, text=f'{entry}', anchor='w')\n\n # Grid\n lbl_label.grid(row=i, column=0) # grid attached to frame\n spn_time[entry].grid(row=i, column=1)\n\n # Insert speed\n i = len(spn_time)\n top.columnconfigure(i, weight=1, minsize=75)\n top.rowconfigure(i, weight=1, minsize=50)\n spn_speed = tk.Spinbox(from_=0, to=2000,\n master=frm_form,\n width=8,\n justify=tk.RIGHT,\n relief=tk.FLAT)\n lbl_label = tk.Label(master=frm_form, text='speed (km/h)', anchor='w')\n lbl_label.grid(row=i, column=0, pady=10)\n spn_speed.grid(row=i, column=1)\n\n def _insert_timestamp():\n # Check input data and insert timestamp\n try:\n self.timestamp = dt.datetime(int(spn_time['year'].get()),\n int(spn_time['month'].get()),\n int(spn_time['day'].get()),\n int(spn_time['hour'].get()),\n int(spn_time['minute'].get()),\n int(spn_time['second'].get()))\n self.speed = float(spn_speed.get())\n if self.speed <= 0:\n raise ValueError('Speed must be a positive number.')\n\n # Insert timestamp\n self.controller.shared_data.obj_track.\\\n insert_timestamp(self.timestamp, self.speed)\n top.destroy()\n\n except (ValueError, OverflowError) as e:\n messagebox.showerror('Input Error', e)\n\n def _clear_box():\n for s in spn_time:\n spn_time[s].delete(0, 8)\n spn_time[s].insert(0, spinbox_options[s][2])\n spn_speed.delete(0, 8)\n spn_speed.insert(0, 0)\n\n # Button frame\n frm_button = tk.Frame(top)\n frm_button.pack(fill=tk.X, padx=5,\n pady=5) # fill in horizontal direction\n\n btn_clear = tk.Button(master=frm_button, text='Clear',\n command=_clear_box)\n btn_submit = tk.Button(master=frm_button, text='Submit',\n command=_insert_timestamp)\n btn_clear.pack(side=tk.RIGHT, padx=10)\n btn_submit.pack(side=tk.RIGHT, padx=10)", "def tic():\n import time\n global startTime_for_tictoc\n startTime_for_tictoc = time.time()", "def _open(self):\n \n # Set initial time\n t = datetime.datetime.today()\n timestamp = t.microsecond/1000 + t.second*1000 + \\\n t.minute*60*1000 + t.hour*60*60*1000 + \\\n t.day*24*60*60*1000\n while timestamp > 4294967295: timestamp -= 4294967295\n self._last = timestamp\n \n # Nothing else to do... already open", "def start(self):\n self._state = STATE_INACTIVE\n self._game = None\n self._last_key_press = False\n self._last_n_press = False\n self._last_lose_life = False\n self._mssg = (GLabel(text=START_MSSG, x=GAME_WIDTH/2, y=GAME_HEIGHT/2, font_size=24))\n self.time = None\n self._points_mssg = None\n self._falling_points = []\n self._FP_mssg = None", "def start_timer(self):\n self.start_time = datetime.now()", "def start(self):\r\n self.debug(\"### starting gox streaming API, trading %s%s\" %\r\n (self.curr_base, self.curr_quote))\r\n self.client.start()", "def onCreate(self):\n\t\tself.enableTick()\n\t\tself.lastFpsUpdate = 0.0\n\t\tself.fpsInfo = \"\"\n\t\tself.createTime = time.time()", "def time(lancet, issue):\n issue = get_issue(lancet, issue)\n\n with taskstatus(\"Starting harvest timer\") as ts:\n lancet.timer.start(issue)\n ts.ok(\"Started harvest timer\")", "def update_timesheet(self, args):\n if len(args) == 1:\n print(self.error_wrong_parameters)\n return\n try:\n started, finished = helpers.parse_date_parameters(args[1:])\n except ValueError as error:\n print(error)\n return\n if started == datetime.date.fromtimestamp(0):\n track = self.db.get_minimal_started_track()\n if track:\n started = track['started']\n else:\n started = finished\n # Get timesheet records\n tracks = self.db.get_tracks_by_date(started, finished,\n also_unfinished=False)\n # Exposure tracks to the table\n tracks_contents = self.create_tracks_contents(tracks)\n lnum = 0\n header = self.get_timesheet_header(started, finished)\n header_length = len(header.split(os.linesep))\n while(True):\n try:\n # Create the editor's contents\n contents = self.create_timesheet_contents(header, tracks_contents)\n timesheet = self.open_external_editor(contents, lnum)\n # we must get the table header here due to the length of the columns\n table_header = timesheet[header_length-1:header_length+1]\n tracks = timesheet[header_length+1:]\n except OSError, message:\n print(\"*** Error: %s\", message)\n return\n # Parse the input\n try:\n data = self.parse_timesheet(tracks, header_length)\n except errors.ParsingError as error:\n print(error.msg)\n print(\"Would you like to update the timesheet again? [Y/n] \")\n if not helpers.get_yes_no(default='y'):\n return\n table_header.extend(tracks)\n tracks_contents = \"\".join(table_header)\n lnum = error.lnum\n continue\n break\n # Update the DB\n # TODO: get rid the danger operation\n self.db.delete_tracks_by_date(started=started, finished=finished)\n data.sort(key=operator.itemgetter('started'))\n for track in data:\n self.db.create_track(track['tid'],\n track['started'], track['finished'],\n int(not bool(track['is_billed'])))\n print('The timesheet has been updated.')", "def main():\n populate_satellites_array()\n latitude = float(os.environ['LATITUDE'])\n longitude = float(os.environ['LONGITUDE'])\n radius = int(os.environ['RADIUS'])\n timeout = 1\n previous_satellites = []\n while True:\n if (last_updated[0] + 86400) < int(time.time()):\n print('Expired data, updating from spacetrack')\n cron_refresh_spacetrack_cache()\n populate_satellites_array()\n print('Checking {}, {}'.format(latitude, longitude))\n currently_overhead = get_overhead_satellites_dicts(latitude, longitude, radius)\n for sat in currently_overhead:\n if not sat['name'] in previous_satellites:\n announce_satellite(sat)\n previous_satellites = [x['name'] for x in currently_overhead]\n time.sleep(timeout)", "def main():\n credentials = get_credentials()\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n max = 7\n events = getEvents(credentials, now, max)\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n print(start, event['summary'])\n #addEvent(credentials)", "def start_engine():\r\n traffic = TrafficCollector()\r\n weather = WeatherController()\r\n client = MongoClient()\r\n db = client.jam_forecaster\r\n\r\n scheduler = BlockingScheduler()\r\n scheduler.add_job(get_data, trigger='cron', hour='6-22', minute='*/5', second='0', max_instances=10, args=[traffic, weather, db])\r\n scheduler.start()", "def update_time(self):\n pass # Do nothing", "def refresh():\r\n DB.drop_all()\r\n DB.create_all()\r\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\r\n for i in time_x_values():\r\n DB.session.add(Record(datetime=i[0], value=i[1]))\r\n DB.session.commit()\r\n return 'Data refreshed!'", "def TestPeriodic(self):\n LiveWindow.Run()", "def run():\n\n full_ticket = Ticket()\n daily_ticket_a = Ticket()\n daily_ticket_b = Ticket()\n daily_ticket_c = Ticket()\n community_ticket = Ticket()\n\n full_ticket.ticket_type = 'full'\n daily_ticket_a.ticket_type = 'daily-13'\n daily_ticket_b.ticket_type = 'daily-14'\n daily_ticket_c.ticket_type = 'daily-15'\n community_ticket.ticket_type = 'community'\n\n full_ticket.price = 400000\n daily_ticket_a.price = 200000\n daily_ticket_b.price = 300000\n daily_ticket_c.price = 350000\n community_ticket.price = 0\n\n full_ticket.information = 'Ticket for full 3 days devsummit event.'\n daily_ticket_a.information = 'Ticket for 13th November at devsummit event.'\n daily_ticket_b.information = 'Ticket for 14th November at devsummit event.'\n daily_ticket_c.information = 'Ticket for 15th November at devsummit event.'\n community_ticket.information = 'Ticket for community, only given by admin.'\n db.session.add(full_ticket)\n db.session.add(daily_ticket_a)\n db.session.add(daily_ticket_b)\n db.session.add(daily_ticket_c)\n db.session.add(community_ticket)\n\n db.session.commit()", "def refresh():\n DB.drop_all()\n DB.create_all()\n df_meas = open_api.measurements(city='Los Angeles', parameter='pm25', df=True)\n df_meas['date.utc'] = df_meas['date.utc'].astype(str)\n create_DB_records(df_meas)\n DB.session.commit()\n message = 'Data refreshed on: ' + str(datetime.datetime.now())\n over9s = Record.query.filter(Record.value > 9)\n recs = Record.query.filter(Record.id < 20)\n over5s = Record.query.filter(Record.value > 5)\n return render_template('base.html', message=message, over9s=over9s, over5s=over5s, recs=recs)", "async def start_periodically_refresh_appointments(): # pylint: disable=invalid-name\n await asyncio.sleep(60)\n await app[\"snct_scrapper\"].refresh_appointments_every_minutes()", "def start_timer(self):\n print \"Timer Object Started. Will update ADC Information every %s seconds\" % self.refreshTime\n self.timer=Timer(float(self.refreshTime)*1000, self._refresh_Visible_channels)", "def raging_fire():\n\n if editspin1.get() != '0': # season\n select_values[0] = int(editspin1.get())\n select_values[3] = \"{}\".format(datetime.datetime.now()) # update the modify date\n with open('series_table.json', 'w') as f:\n json.dump(series_dict, f, indent=2)\n\n if editspin2.get() != '0': # episode\n select_values[1] = int(editspin2.get())\n select_values[3] = \"{}\".format(datetime.datetime.now()) # update the modify date\n with open('series_table.json', 'w') as f:\n json.dump(series_dict, f, indent=2)\n\n if editentvar.get() != curitem: # name\n series_dict[editentvar.get().title()] = series_dict.pop(curitem) # update the modify date\n select_values[3] = \"{}\".format(datetime.datetime.now())\n with open('series_table.json', 'w') as f:\n json.dump(series_dict, f, indent=2)\n\n if editent2var.get() != select_values[2]: # pic\n select_values[3] = \"{}\".format(datetime.datetime.now()) # update the modify date\n select_values[2] = editent2var.get()\n with open('series_table.json', 'w') as f:\n json.dump(series_dict, f, indent=2)\n\n edittop.destroy()", "def _trigger(self):\n if len(self._stat_now):\n self._stat_now['epoch_num'] = self.epoch_num\n self._stat_now['global_step'] = self.global_step\n\n self._stats.append(self._stat_now)\n self._stat_now = {}\n self._write_stat()", "def main(t0, t1, save_plot):\n\n last_day = \"\"\n stocks, plot_names = [], []\n\n # Read all stocks by symbol into list\n stocks = open(\"stocks.txt\",\"r\").read().splitlines()\n\n # Wait until specific time is reached\n while True:\n # Get current time in \"hour:min\"-format\n t = dt.datetime.now().strftime(\"%H:%M\")\n # Check if it\"s later than 20:00 o\"clock (24h time format)\n if t >= \"19:00\" and last_day != dt.date.today():\n print(\"Starting with creating candlestick graphs!\")\n # Create candlestick plot for every specified stock\n for stock in stocks:\n td = fetch_data(t0, t1, stock)\n pn = calculcate_candlestick(td, stock, save_plot)\n # Add stock-image path to list\n plot_names.append(pn)\n # Send plots via e-mail\n send_mail(plot_names)\n last_day = dt.date.today()\n\n # Sleep 59 seconds before checking again\n time.sleep(59)", "async def track_start(self):\n await self.wait_until_ready()\n self.start_time = datetime.datetime.utcnow()", "def start_program():\n\n today = date.today()\n current_date = today.strftime(\"%d/%m/%Y\")\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n print('Starting program : Speech_Name_Organization.py - at : ' + current_time + ' on : ' + current_date)", "def start(self):\n# if self._start_time is not None:\n self._start_time = time.perf_counter()", "def run_game(self):\n\t\twhile True:\n\t\t\tself._check_events()\n\t\t\t\n\t\t\tif self.stats.game_active:\n\t\t\t\tself.pigeon.update()\n\t\t\t\tself._update_droppings()\n\t\t\t\tself._update_autos()\n\n\t\t\tself._update_screen()", "def main():\n current_exit_code = 0\n # Launch NBA Stat Tracker main window\n app = QApplication(sys.argv)\n mw = MAIN_WINDOW.Tracker()\n mw.show()\n\n # Closure to ensure information window opens after the\n # event loop is started\n def on_start_cb():\n mw.on_start()\n QTimer.singleShot(0, on_start_cb)\n\n current_exit_code = app.exec_()\n app.deleteLater()\n if current_exit_code == mw.EXIT_CODE_RESTART:\n main()", "def refresh_screen(self):", "def startGame():\n #roundnumber\n eel.updateRoundNumber()\n # start page\n eel.updateStartPage([startPage.getTitle(), startPage.getUrl()])\n eel.updateStartPageDescription(startPage.getFirstSentence())\n # goal page\n eel.updateGoalPage([goalPage.getTitle(), goalPage.getUrl()])\n eel.updateGoalPageDescription(goalPage.getFirstSentence())\n # ui updates\n eel.updateCurrentPage(\n [wikiPageStackTrace[-1].getTitle(), wikiPageStackTrace[-1].getUrl()])\n eel.updateCurrentPageDescription(wikiPageStackTrace[-1].getFirstSentence())\n eel.printInPageList(wikiPageStackTrace[-1].getOnlyLinksListJS())\n # loader\n time.sleep(0.5)\n eel.hideLoader()", "def refresh(self):\n\n self._refreshed_on = time.time() * 1000", "def refreshStories(self):\n\t\tself.stories = self.h.getLatestStories(self.newestOrTop, self.alreadyReadList)\n\t\tself.lastRefreshed = time.localtime()\n\t\tif self.hnUserName != \"\":\n\t\t\tprint \"Getting \" + self.hnUserName + \"'s karma from HN...\"\n\t\t\tuser = HackerNewsUser(self.hnUserName)\n\t\t\tif self.karma != user.karma and self.karma != -1000:\n\t\t\t\tkarmaChange = 1\n\t\t\tself.karma = user.karma", "async def time(self, ctx):\r\n time = market_time()\r\n await ctx.send(f'It is currently {time.time().strftime(\"%H:%M:%S\")} EDT for the market.')", "def start_tracking(date=date):\n # requesting data to be used later\n print(\"\\n\\t\\t\\t\\tTracking Data\", end=\"\\n\\t\\t\\t\\t\")\n print(\"*\" * 13, end=\"\\n\\n\")\n start_hour = input(\"Enter start hour: \")\n start_min = input(\"Enter start minute(00 if you started exactly that hour): \")\n start_ampm = input(\"AM or PM: \")\n\n print(\"\\n\\n\")\n\n end_hour = input(\"Enter end hour: \")\n end_min = input(\"Enter end minute(00 if you ended exactly that hour): \")\n end_ampm = input(\"AM or PM: \")\n\n\n # accounting for every possible bug that may affect dataframe structure\n try:\n if (int(start_hour) < 10) and (int(end_hour) < 10):\n try:\n if (int(start_min) < 10) and (int(end_min) < 10):\n start_time = \" \" + start_hour + \":0\" + start_min\n end_time = \" \" + end_hour + \":0\" + end_min\n elif int(start_min) < 10:\n start_time = \" \" + start_hour + \":0\" + start_min\n end_time = \" \" + end_hour + \":\" + end_min\n elif int(end_min) < 10:\n end_time = \" \" + end_hour + \":0\" + end_min\n start_time = \" \" + start_hour + \":\" + start_min\n else:\n start_time = \" \" + start_hour + \":\" + start_min\n end_time = \" \" + end_hour + \":\" + end_min\n except ValueError:\n print(\"\\nSorry you may have entered incorrect data. Please restart the program.\")\n elif int(start_hour) < 10:\n try:\n if (int(start_min) < 10) and (int(end_min) < 10):\n start_time = \" \" + start_hour + \":0\" + start_min\n end_time = end_hour + \":0\" + end_min\n elif int(start_min) < 10:\n start_time = \" \" + start_hour + \":0\" + start_min\n end_time = end_hour + \":\" + end_min\n elif int(end_min) < 10:\n end_time = end_hour + \":0\" + end_min\n start_time = \" \" + start_hour + \":\" + start_min\n else:\n start_time = \" \" + start_hour + \":\" + start_min\n end_time = end_hour + \":\" + end_min\n except ValueError:\n print(\"\\nSorry you may have entered incorrect data. Please retart the program.\")\n elif int(end_hour) < 10:\n try:\n if (int(start_min) < 10) and (int(end_min) < 10):\n start_time = start_hour + \":0\" + start_min\n end_time = \" \" + end_hour + \":0\" + end_min\n elif int(start_min) < 10:\n start_time = start_hour + \":0\" + start_min\n end_time = \" \" + end_hour + \":\" + end_min\n elif int(end_min) < 10:\n end_time = \" \" + end_hour + \":0\" + end_min\n start_time = start_hour + \":\" + start_min\n else:\n start_time = start_hour + \":\" + start_min\n end_time = \" \" + end_hour + \":\" + end_min\n except ValueError:\n print(\"\\nSorry you may have entered incorrect data. Please restart the program.\")\n else:\n try:\n if (int(start_min) < 10) and (int(end_min) < 10):\n start_time = start_hour + \":0\" + start_min\n end_time = end_hour + \":0\" + end_min\n elif int(start_min) < 10:\n start_time = start_hour + \":0\" + start_min\n end_time = end_hour + \":\" + end_min\n elif int(end_min) < 10:\n end_time = end_hour + \":0\" + end_min\n start_time = start_hour + \":\" + start_min\n else:\n start_time = start_hour + \":\" + start_min\n end_time = end_hour + \":\" + end_min\n except ValueError:\n print(\"\\nSorry you may have entered incorrect data. Please retart the program.\")\n except ValueError:\n print(\"\\nSorry you may have entered incorrect data. Please restart the program.\")\n\n\n # accepting data and writing to file\n if (start_ampm.lower() == \"am\" and end_ampm.lower() == \"am\") or (start_ampm.lower() == \"pm\" and end_ampm.lower() == \"pm\"):\n\n # checking for value errors\n try:\n # if both starting and ending hours are \"AM\"\n total_hours = (int(end_hour) - int(start_hour))\n total_min = diff_mins(start_min, end_min)\n converted_total_hours = (total_hours * 60)\n total_mins = (total_min + converted_total_hours)\n amount_earned = round((total_mins / 60) * 5, 2)\n print(\"\\nYou worked for a total of {}hour(s) {}min(s) and earned ${:.2f}!\"\n .format(total_hours, total_min, amount_earned))\n print(\"Your data has been saved!\\n\")\n\n date = date.today()\n with open(\"timetracker.csv\", \"a\", newline='') as file:\n writer = csv.writer(file)\n writer.writerow([date, start_time, start_ampm.upper(), \n end_time, end_ampm.upper(), total_hours, total_min, amount_earned])\n except ValueError:\n pass\n elif (start_ampm.lower() == \"am\" and end_ampm.lower() == \"pm\") or ((start_ampm.lower() == \"pm\" and end_ampm.lower() == \"am\")):\n \n try:\n # if starting hour is \"AM\" but ending hour is \"PM\" or vice versa\n if int(end_hour) == 12:\n total_hours = ((int(end_hour)) - int(start_hour))\n else:\n total_hours = ((int(end_hour) + 12) - int(start_hour))\n total_min = diff_mins(start_min, end_min)\n converted_total_hours = (total_hours * 60)\n total_mins = (total_min + converted_total_hours)\n amount_earned = round((total_mins / 60) * 5, 2)\n print(\"\\nYou worked for a total of {}hour(s) {}min(s) and earned ${:.2f}!\"\n .format(total_hours, total_min, amount_earned))\n print(\"Your data has been saved!\\n\")\n\n date = date.today()\n with open(\"timetracker.csv\", \"a\", newline='') as file:\n writer = csv.writer(file)\n writer.writerow([date, start_time, start_ampm.upper(), end_time, \n end_ampm.upper(), total_hours, total_min, amount_earned])\n except ValueError:\n pass\n else:\n print(\"\\nSorry you may have entered incorrect data. Please restart the program.\")", "def timer_setup(self):\n pass", "def start_clock(self):\n pass", "def startUpdates(self):\r\n # Analytics stream\r\n self.blptsAnalytics = blpapiwrapper.BLPTS()\r\n self.streamWatcherAnalytics = StreamWatcher(self, BloombergQuery.ANALYTICS)\r\n self.blptsAnalytics.register(self.streamWatcherAnalytics)\r\n # Price only stream\r\n self.blptsPriceOnly = blpapiwrapper.BLPTS()\r\n self.streamWatcherPriceOnly = StreamWatcher(self, BloombergQuery.PRICEONLY)\r\n self.blptsPriceOnly.register(self.streamWatcherPriceOnly)\r\n # Price change subscription\r\n self.streamWatcherBID = StreamWatcher(self,BloombergQuery.BID)\r\n self.bbgstreamBIDEM = blpapiwrapper.BLPStream(list((self.embondsisins + BBGHand + ' Corp').astype(str)), 'BID', 0)\r\n self.bbgstreamBIDEM.register(self.streamWatcherBID)\r\n self.bbgstreamBIDEM.start()\r\n # Risk free bonds: no streaming as too many updates - poll every 15 minutes\r\n rfRequest = blpapiwrapper.BLPTS(list((self.rfbondsisins + '@CBBT' + ' Corp').astype(str)), self.bbgPriceRFQuery)\r\n self.RFtimer = RFdata(900, rfRequest, self)\r\n self.BDMdata = BDMdata(900, self) #15 MINUTES\r\n self.BDMEODsave = BDMEODsave(self)", "def start(update, context) -> None:\n global ANSWERS, id_question\n ANSWERS = []\n\n id_question = 0\n update.message.reply_text(\n 'Please select /question to get the poll'\n )", "def start_timer(self):\n self.start_time = time.time()", "def start(update: Update, context: CallbackContext):\n first_set(update, type=\"initial\")", "def show(self):\n if not self.init_run and self.flow_auto_update:\n self.run_all()\n self.init_run = True\n self.flow_view.show()", "def _start_clock(self):\n self._start = time.time()", "def start(self):\n\t\tself.init_trajectory_gripper()\n\t\tself.gripperserver.start()\n\t\tprint(\"The action server for this driver has been started\")", "def main():\n\n while True:\n print(\"Let's explore some US bikeshare data!\")\n city, month, day = get_filters()\n df = load_data(city, month, day)\n # printing filter\n print(f\"Month: {month}, Day: {day}\")\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n display_records(df)\n restart = prompts.yes_no_prompt(\"\\nWould you like to restart?\\n\").launch()\n if not restart:\n break\n system(\"clear\")", "def run_at_time(self, input_dict):\n lead_seq = util.get_lead_sequence(self.config, input_dict)\n for lead in lead_seq:\n self.clear()\n input_dict['lead_hours'] = lead\n self.config.set('config', 'CURRENT_LEAD_TIME', lead)\n os.environ['METPLUS_CURRENT_LEAD_TIME'] = str(lead)\n time_info = time_util.ti_calculate(input_dict)\n self.run_at_time_once(time_info)", "def _start_live(self):\n StartLiveData(FromNow=False, FromTime=False, FromStartOfRun=True, UpdateEvery=1.0,\n Instrument=\"ISIS_Kafka_Event\", RunTransitionBehavior=\"Stop\", OutputWorkspace=self.outputWs,\n Address=self.listenerHost, PreserveEvents=True, AccumulationMethod=\"Add\",\n InstrumentName=self.instrumentName)\n\n # Grab most recent live data algorithm handle\n self._monitorLiveDataHandle = api.AlgorithmManagerImpl.Instance().newestInstanceOf(\"MonitorLiveData\")", "def run(self):\n\t\tinterval_in_ticks = self.session.timer.get_ticks(GAME.INGAME_TICK_INTERVAL)\n\t\tScheduler().add_new_object(self._tick, self, runin=interval_in_ticks, loops=-1)", "def start(self):\n gevent.spawn_later(self._period, self._run)", "def at_repeat(self):\n global _SESSIONS\n if not _SESSIONS:\n from evennia.server.sessionhandler import SESSIONS as _SESSIONS\n for session in _SESSIONS.sessions_from_player(self.player):\n session.update_session_counters(idle=True)", "def start(self):\n self.timeStart = pygame.time.get_ticks()", "def resume():\n # We now retrieve all entries in the previous month.\n # Getting the current date and the date from a month before.\n time_year = time.localtime()[0] \n time_month = time.localtime()[1]\n time_day = time.localtime()[2]\n if time_month == 1:\n prev_time_month = 12\n prev_time_year = time_year - 1\n else:\n prev_time_month = time_month - 1\n prev_time_year = time_year\n cur_date = str(time_year) + '-' + ('%02d' % time_month) + '-' + ('%02d' % time_day)\n prev_date = str(prev_time_year) + '-' + ('%02d' % prev_time_month) + '-' + ('%02d' % time_day)\n\n entries = toggl.entries_between(prev_date, cur_date)\n entry_list = []\n \n for entry in entries:\n if is_entry_in_list(entry, entry_list) == False:\n entry_list.append(entry)\n\n print(\">>> You can resume the following entries:\")\n n = 1\n for entry in entry_list:\n tags = []\n if 'tags' in entry:\n [tags.append(i) for i in entry['tags']]\n print('> {} - {} [{}]'.format(str(n),\n entry['description'],\n \",\".join(tags)))\n n += 1\n choice = int(input(\">>> Type an entry number: \"))\n\n if choice >= 1 and choice <= len(entry_list):\n res_entry = entry_list[choice-1]\n start_toggl(res_entry['description'], res_entry['tags'])\n else:\n print(\"You typed an unavailable number.\")\n\n \"\"\"\n >>> You can resume the following entries:\n > 1 - test [project]\n > 2 - another [other project]\n >>> Type an entry number: \n \"\"\"", "def __interact(self):\n while True:\n query_screen = self.issue_query()\n\n if query_screen is None:\n print \"NO QUERIES, REMAINING, STAHP\"\n break\n else:\n print \" > Examine SERP\"\n\n while True:\n doc_screen = self.examine_document(query_screen.get_data())\n\n if doc_screen is None:\n break\n else:\n if random.randint(1, 10) > 5: # Is the document relevant?\n print \" > DOC JUDGED RELEVANT\"\n print doc_screen.get_data()", "def run_interactive():\n from cherrypy import engine\n \n # This is what quickstart does but we don't block\n engine.signals.subscribe()\n engine.start()\n #engine.block()", "def run_hourly_hygienist(self):\n self.ensure_timebox_trackers_accurate()\n self.copy_tasks_with_schedule_string()", "def run():\n\n window = get_window()\n\n # Used in some unit test\n if os.environ.get('ARCADE_TEST'):\n window.on_update(window._update_rate)\n window.on_draw()\n elif window.headless:\n # We are entering headless more an will emulate an event loop\n import time\n\n # Ensure the initial delta time is not 0 to be\n # more in line with how a normal window works.\n delta_time = window._draw_rate\n last_time = time.perf_counter()\n\n # As long as we have a context --\n while window.context:\n # Select active view or window\n active = window.current_view or window\n\n active.on_update(delta_time)\n if window.context:\n active.on_draw()\n\n # windwow could be closed in on_draw\n if window.context:\n window.flip()\n\n now = time.perf_counter()\n delta_time, last_time = now - last_time, now\n else:\n import sys\n if sys.platform != 'win32':\n # For non windows platforms, just do pyglet run\n pyglet.app.run(window._draw_rate)\n else:\n # Ok, some Windows platforms have a timer resolution > 15 ms. That can\n # drop our FPS to 32 FPS or so. This reduces resolution so we can keep\n # FPS up.\n import contextlib\n import ctypes\n from ctypes import wintypes\n\n winmm = ctypes.WinDLL('winmm')\n\n class TIMECAPS(ctypes.Structure):\n _fields_ = (('wPeriodMin', wintypes.UINT),\n ('wPeriodMax', wintypes.UINT))\n\n def _check_time_err(err, func, args):\n if err:\n raise WindowsError('%s error %d' % (func.__name__, err))\n return args\n\n winmm.timeGetDevCaps.errcheck = _check_time_err\n winmm.timeBeginPeriod.errcheck = _check_time_err\n winmm.timeEndPeriod.errcheck = _check_time_err\n\n @contextlib.contextmanager\n def timer_resolution(msecs=0):\n caps = TIMECAPS()\n winmm.timeGetDevCaps(ctypes.byref(caps), ctypes.sizeof(caps))\n msecs = min(max(msecs, caps.wPeriodMin), caps.wPeriodMax)\n winmm.timeBeginPeriod(msecs)\n yield\n winmm.timeEndPeriod(msecs)\n\n with timer_resolution(msecs=10):\n pyglet.app.run(window._draw_rate)", "def refresh():\n now = datetime.now()\n time_now = datetime.time(now)\n curr_timelabel = Label(root, text=\"Current Time: \" + str(time_now)[:8])\n curr_timelabel.config(font=(\"Calibri 16\"))\n curr_timelabel.place(relx=0.5, rely=0.065, anchor=CENTER)\n\n if time_now > times[0] and time_now < times[1]:\n curr_salahlabel = Label(root, text=\"Current Salah: Fajr\")\n curr_salahlabel.config(font=(\"Calibri, 18\"))\n curr_salahlabel.place(relx=0.1, rely=0.29, anchor=CENTER)\n\n next_salahlabel = Label(root, text=\"Sunrise: \" + times[1])\n next_salahlabel.config(font=(\"Calibri, 18\"))\n next_salahlabel.place(relx=0.3, rely=0.29, anchor=CENTER)\n\n elif time_now > times[1] and time_now < times[2]:\n curr_salahlabel = Label(root, text=\"Sunrise - No Salah\")\n curr_salahlabel.config(font=(\"Calibri, 18\"))\n curr_salahlabel.place(relx=0.1, rely=0.29, anchor=CENTER)\n\n next_salahlabel = Label(\n root, text=\"Next Salah: Zuhr \" + \"(\" + str(times[2]) + \")\"\n )\n next_salahlabel.config(font=(\"Calibri, 18\"))\n next_salahlabel.place(relx=0.3, rely=0.29, anchor=CENTER)\n\n elif time_now > times[2] and time_now < times[3]:\n curr_salahlabel = Label(root, text=\"Current Salah: Zuhr\")\n curr_salahlabel.config(font=(\"Calibri, 18\"))\n curr_salahlabel.place(relx=0.1, rely=0.29, anchor=CENTER)\n\n next_salahlabel = Label(\n root, text=\"Next Salah: Asr \" + \"(\" + str(times[3]) + \")\"\n )\n next_salahlabel.config(font=(\"Calibri, 18\"))\n next_salahlabel.place(relx=0.3, rely=0.29, anchor=CENTER)\n\n elif time_now > times[3] and time_now < times[4]:\n curr_salahlabel = Label(root, text=\"Current Salah: Asr\")\n curr_salahlabel.config(font=(\"Calibri, 18\"))\n curr_salahlabel.place(relx=0.1, rely=0.29, anchor=CENTER)\n\n next_salahlabel = Label(\n root, text=\"Next Salah: Maghrib \" + \"(\" + str(times[4]) + \")\"\n )\n next_salahlabel.config(font=(\"Calibri, 18\"))\n next_salahlabel.place(relx=0.3, rely=0.29, anchor=CENTER)\n\n elif time_now > times[4] and time_now < times[5]:\n curr_salahlabel = Label(root, text=\"Current Salah: Maghrib\")\n curr_salahlabel.config(font=(\"Calibri, 18\"))\n curr_salahlabel.place(relx=0.1, rely=0.29, anchor=CENTER)\n\n next_salahlabel = Label(\n root, text=\"Next Salah: Isha \" + \"(\" + str(times[5]) + \")\"\n )\n next_salahlabel.config(font=(\"Calibri, 18\"))\n next_salahlabel.place(relx=0.3, rely=0.29, anchor=CENTER)\n\n elif time_now > times[5]:\n curr_salahlabel = Label(root, text=\"Current Salah: Isha\")\n curr_salahlabel.config(font=(\"Calibri, 18\"))\n curr_salahlabel.place(relx=0.1, rely=0.29, anchor=CENTER)\n\n next_salahlabel = Label(\n root, text=\"Next Salah: Fajr \" + \"(\" + str(times[0]) + \"~)\"\n )\n next_salahlabel.config(font=(\"Calibri, 18\"))\n next_salahlabel.place(relx=0.3, rely=0.29, anchor=CENTER)\n\n root.after(1000, refresh)\n root.after(1000, curr_salahlabel.pack_forget)\n root.after(1000, next_salahlabel.pack_forget)\n root.after(1000, curr_timelabel.pack_forget)", "def __liveActually(self, stdscr):\n global screenH, screenW\n self.__stdscr = stdscr\n (screenH, screenW) = self.__stdscr.getmaxyx()\n self.__stdscr.addstr(0, 0, \"Custom Burner \" + common.version)\n self.__stdscr.addstr(screenH - 1, 0, \"a: add ISO q: Quit\")\n self.__stdscr.noutrefresh()\n isoWindowHeight = ((screenH - 2) * 2)/ 3\n self.__isoWindow = IsoWindow(isoWindowHeight, screenW, 1, 0)\n self.__isoWindow.timeout(1000) # msec\n self.__logWindow = LogWindow(screenH - 2 - isoWindowHeight, screenW,\n isoWindowHeight + 1, 0)\n self.__focus = 0\n self.__focusedWindow = self.__isoWindow\n self.__isoWindow.focus()\n quitting = False\n while not quitting:\n self.__updateLog()\n curses.panel.update_panels()\n curses.doupdate()\n c = self.__focusedWindow.getch()\n if c == curses.ascii.TAB:\n self.__switchFocus()\n elif c == ord('a'):\n self.__askForIso()\n elif c == ord('q'):\n quitting = True", "def update_time(self):\n time_metrics = self._fetch_time_metrics_and_clear()\n self._logger.info('update_time. time_metrics = %s', build_metrics_times_data(time_metrics))", "def start_session(self):\r\n ee.Initialize()", "def main():\n data_file = 'shrec_timer.json'\n\n if len(sys.argv) == 2:\n generate_data(data_file, sys.argv[1])\n\n plot_data(data_file)", "def run_trackings():\n return analytics.select_rows(\n analytics.trackings_table(),\n 0,\n 2)", "def demonstrate(self):\n self.env.render()\n self.action = 0\n\n # Control actions with keys\n @self.env.viewer.window.event\n def on_key_press(symbol, modifiers):\n self.action = Task.act(symbol)\n\n # Default to no action\n @self.env.viewer.window.event\n def on_key_release(symbol, modifiers):\n self.action = 0\n\n # Perform actions on a timer\n def tick(dt):\n if not self.ended():\n self.perform(self.action, render=True, delay=SLOW)\n self.update()\n pyglet.clock.schedule_once(tick, 0)\n\n tick(0)\n pyglet.app.run()", "def initializeQuestProcedure(win):\n \n questClock = core.Clock()\n return(questClock)", "def start(update: Update, _: CallbackContext) -> None:\n latest_day, previous_day = get_latest_stats_from_db()\n logger.info(latest_day['date'])\n _.bot_data.update({'date': str(latest_day['date'])})\n update.message.reply_markdown \\\n (\n \"*💉I'm the Irish Vaccine Data bot!💉* \\n\\n \"\n \"Try these commands\\n\\n\"\n \"✅ /daily - Subscribe for daily updates.\\n\\n\"\n \"📅 /latest - Get the latest vaccination stats.\\n\\n\"\n \"🗓 /week - Get the stats for the last 7 days.\\n\\n\"\n \"📈 /overall - Overall rollout statistics.\\n\\n\"\n \"📈 /supply - See the latest supply updates from the HSE.\\n\\n\"\n \"❎ /unsubscribe - Unsubscribe from daily updates.\\n\\n\"\n )\n\n logger.info(\"Start by \" + str(update.message.chat_id))", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 10 events')\n eventsResult = service.events().list(\n calendarId='[email protected]', timeMin=now, maxResults=10, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n\n\n # TODO noitem found\n print(datetime.datetime.strptime(events[0]['start']['dateTime'], '%Y-%m-%dT%H:%M:%S+09:00'))\n\n nextStartTime = datetime.datetime.strptime(events[0]['start']['dateTime'], '%Y-%m-%dT%H:%M:%S+09:00')\n delta = (nextStartTime - datetime.datetime.now()).total_seconds()\n\n if delta < 0:\n print(\"capture next\")\n nextStartTime = datetime.datetime.strptime(events[1]['start']['dateTime'], '%Y-%m-%dT%H:%M:%S+09:00')\n delta = (nextStartTime - datetime.datetime.now()).total_seconds()\n\n print(delta)\n\n if NOTIFY_THRESHOLD_SECOND > delta:\n alert_time_limit()\n else:\n set_normal()\n\n\n\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n print(start, event['summary'])", "def isFresh(self, timestamp):\n pass;", "def setSubmitTime(t):", "def main(db):\n tracks = list_tracks(db)\n tracks_amount = len(tracks)\n for i, track in enumerate(tracks):\n try:\n fill_info(track)\n persist_track_info(db, track)\n except Exception as e:\n print(e)\n\n print(\"Track filled: \", track[\"title\"], f\"{i+1}/{tracks_amount}\")\n time.sleep(randint(5, 20) / 10)", "def main():\n while True:\n city, month, day = get_filters()\n\n df = load_data(city, month, day)\n\n time_stats(df, city, month, day)\n station_stats(df, city)\n trip_duration_stats(df, city)\n # The city of washington does not provide user statistics\n if city != \"washington\":\n user_stats(df, city)\n\n sample = input(\n \"\\nIf you would like a sample of the raw date, enter 'yes' ===> \"\n )\n if sample.lower() == \"yes\":\n review_data(df)\n\n restart = input(\"\\nEnter 'yes' if you would like to restart ===> \")\n if restart.lower() != \"yes\":\n break", "def on_tick(self, _):\n now = datetime.datetime.now()\n is_weekday = (now.weekday() <= 5)\n is_workhour = (now.hour >= 7 and now.hour <= 16)\n is_top_of_the_hour = (now.minute >= 25 and now.minute <= 29)\n is_bottom_of_the_hour = (now.minute >= 55 and now.minute <= 59)\n is_break = is_top_of_the_hour or is_bottom_of_the_hour\n if is_weekday and is_workhour and not self.pause:\n if is_break:\n if self.app.title == 'work':\n rumps.notification(\"Break\", \"Time to take a break\", \"ok\")\n self.app.title = 'break'\n else:\n if self.app.title == 'break':\n rumps.notification(\"Work\", \"Time to work\", \"\")\n self.app.title = 'work'", "def increment_time(self, **kwargs):\n \n #Pull all optional keyword arguements\n if 'timerange' in kwargs:\n timerange = kwargs.pop('timerange')\n else:\n timerange = 7\n \n if 'display' in kwargs:\n displayflag = kwargs.pop('display')\n else:\n displayflag = 1\n \n if 'auto' in kwargs:\n autoflag = kwargs.pop('auto')\n else:\n autoflag = 0\n \n if 'triggered' in kwargs:\n triggered_rules = kwargs.pop('triggered')\n else:\n triggered_rules = []\n \n #Run simulation one day at a time until specified end point is reached\n count = range(0,timerange)\n for i in count:\n \n \n #Increment one day if at least one infected person remains. If not, end the simulation\n if self.SD_Map.IPop.value() > 1:\n time = self.timeSeries[-1]\n self.timeSeries.append(time+1)\n self.SD_Map.update_all(self.timestep(), len(self.timeSeries)-2)\n else:\n print('Done!')\n \n #Update the time display\n self.timev.set(self.timeSeries[-1])\n \n #Add any triggered rules to the rule log display\n if triggered_rules != []:\n day_text = self.translate('Day')+' ' + str(self.timeSeries[-1]) \n rule_text = '; ' + self.translate('Rules') + ': ' + str(triggered_rules)[1:-1]\n log_text = day_text + rule_text\n self.list_info_boxes['Log'].insert(tk.END, log_text)\n \n #If appropriate, update all of the graphs\n if displayflag == 1:\n if self.arrangment == ['Map', 'Graph']:\n index = 2\n invertflag = 1\n else:\n index = 0\n invertflag = 0\n \n #Select all of the graphs\n canvaslist = []\n for entrylist in self.graph_canvas_list:\n for entry in entrylist:\n canvaslist.append(entry)\n\n #For each graph, delete it and replace it with an update graph\n for canvas in canvaslist:\n if index < 2:\n col = 0\n inputindex = index\n self.figures[index].clear()\n plt.close(self.figures[index])\n else:\n col = 1\n inputindex = index - 2\n if invertflag:\n self.figures[inputindex].clear()\n plt.close(self.figures[inputindex])\n else:\n self.figures[index].clear()\n plt.close(self.figures[index])\n \n #Make new graph\n framename = canvas.get_tk_widget().master\n canvas.get_tk_widget().destroy()\n graph = self.translate(self.graph_setting_list[col][inputindex].get(),\n input_language=self.language,\n output_language='english')\n canvas,fig = self.make_graph(framename, graph,\n gridpos = inputindex*2+1)\n self.graph_canvas_list[col][inputindex]=canvas\n \n #Update figures list\n if invertflag:\n self.figures[inputindex] = fig\n else:\n self.figures[index] = fig\n index += 1", "def create_timeslots_at_interval():\n LOGGER.info(\"refreshing the timeslots\")\n timeslot_engine.generate_time_slots_from_range('9:00', '18:00')\n LOGGER.info(\"done refreshing the timeslots...\")", "def ttint(timelist,venue):\n #setup\n showturtle()\n #make python turtle graphics window 1260 pixels wide and 800 pixels tall\n setup(width = 1260, height = 800, startx = None, starty = None)\n reset()\n #text at top\n pen(pencolor=\"black\")\n pu()\n setpos(0,380)\n write(\"Welcome to your schedule. Use the arrow keys to toggle the day of the week\",move=False,align=\"center\",font=(\"Courier New\",10,\"normal\"))\n setpos(0,360)\n write(\"In Idle, type 'quit()' to exit turtle.\",move=False,align=\"center\",font=(\"Courier New\",10,\"normal\"))\n dayl = [\"Mon\",\"Tue\",\"Wed\",\"Thu\",\"Fri\",\"Sat\",\"Sun\"]\n setpos(0,-350)\n #writes venue at bottom of GUI\n write(venue,move=False,align=\"center\",font=(\"Courier New\",20,\"normal\"))\n #drawing the lines and timing\n #baseY = 300 because y = 300 is the height of the line for monday\n baseY = 300\n for ch in range(7):\n pu()\n #goes to relevant y position for respective day code\n setpos(-570,(baseY-(100*ch)))\n #writes day name at side\n write(dayl[ch],move=False,align=\"center\",font=(\"Courier New\",20,\"normal\"))\n pen(pencolor=\"black\",pensize=\"3\")\n #draws lines\n #for each hour\n for dh in range(19):\n #move right 60 steps\n setx(xcor()+60)\n pd()\n #move up 20 steps\n sety(ycor()+20)\n pu()\n #stop drawing. move up 10 steps and write hour\n sety(ycor()+10)\n write(str((600+(dh*100))),move=False,align=\"center\",font=(\"Courier New\",10,\"normal\"))\n #go back down 30 steps to main line\n sety(ycor()-30)\n #continue drawing\n pd()\n pu()\n #goes to each relevant timing to write module code\n #for every time range in timelist. dp stands for day parse\n for dp in range(len(timelist)):\n #if week day in timelist is not empty\n if len(timelist[dp]) >= 1:\n #for each timing in the week day. hp stands for hour parse\n for hp in range(1,len(timelist[dp])):\n #for each hour in the time range. pr is an arbitrary variable which helps to direct the turtle to the timings in between the start and end time to write the module code at the relevant location\n for pr in range(int((timelist[dp][hp][1]-timelist[dp][hp][0])/100)):\n #go to the relevant time and write the module code in between\n setpos((-840+(int(timelist[dp][hp][0]/100)+pr)*60),(410-timelist[dp][0]*100))\n write(timelist[dp][hp][2],move=False,align=\"center\",font=(\"Courier New\",8,\"normal\"))", "def main():\n global repeat\n regime = collect()\n start = int(raw_input(\"Which line of the exercise script would you like to begin with? \")) - 1\n regime = regime[start:]\n say(\"Ready?\")\n time.sleep(1)\n for exercise in regime:\n coach(exercise[:-1])\n while repeat:\n repeat = False\n coach(exercise[:-1])\n say(\"Session complete.\")", "def run(self):\n last_time = time.time()\n while self.running:\n now_time = time.time()\n interval = now_time - last_time\n last_time = now_time\n self.update(interval)\n time.sleep(Options['update interval'])", "def StartTimer(self):\n self._start_time = time.time()", "def start(self):\n self.log.setLevel(logging.INFO)\n super().start()\n \n self._dts = rift.tasklets.DTS(self.tasklet_info,\n UtCompositeYang.get_schema(),\n self._loop,\n self.on_dts_state_change) \n\n # Set the instance id\n self.instance_name = self.tasklet_info.instance_name\n self.instance_id = int(self.instance_name.rsplit('-', 1)[1])\n self.log.debug(\"Starting TestDriverTasklet Name: {}, Id: {}\".format(\n self.instance_name,\n self.instance_id))\n\n self.state = TaskletState.STARTING", "def run_game(self):\n #create ufos\n self.creat_fleet_ufos()\n \n while True:\n \n self._check_events()\n self.ship.update()\n #self.ufos.update()\n self.missiles.update()\n self._update_screen()\n self.delete_missiles()", "def on_start(self):\r\n # This adjust the recipe tiles to the correct starting width:\r\n self.update_tile_width()\r\n # This searches the database in order to find all recipes and generate Tiles:\r\n self.update_tile_menu()\r\n toast('Welcome!', 3)" ]
[ "0.6347523", "0.576873", "0.5634519", "0.56242204", "0.56151354", "0.5609387", "0.56086725", "0.55940264", "0.5576316", "0.5486407", "0.54707676", "0.5469594", "0.5462939", "0.54449016", "0.5444178", "0.5436368", "0.54061335", "0.53546655", "0.53450114", "0.5342797", "0.53191763", "0.53158724", "0.5309369", "0.52914613", "0.52407014", "0.52161396", "0.5204799", "0.5183675", "0.51699257", "0.516171", "0.5159982", "0.5150497", "0.51500803", "0.5149131", "0.5146019", "0.51301", "0.5116246", "0.51070166", "0.5091785", "0.50909406", "0.5085462", "0.5081983", "0.5075694", "0.50719935", "0.5066659", "0.50649375", "0.5057975", "0.5054996", "0.50498414", "0.50446886", "0.5044226", "0.5042055", "0.5035714", "0.5025315", "0.50245374", "0.50240076", "0.5023296", "0.5019952", "0.5010318", "0.5003153", "0.4998259", "0.4995756", "0.49807003", "0.49778724", "0.49707127", "0.49635142", "0.49605", "0.49604973", "0.4959858", "0.4956418", "0.49542442", "0.49507546", "0.4949555", "0.49418592", "0.4940541", "0.49331692", "0.49291825", "0.4909009", "0.4906558", "0.49060813", "0.49037692", "0.49020535", "0.4895071", "0.4893443", "0.48811272", "0.4868921", "0.4868374", "0.48644963", "0.48628283", "0.48625833", "0.48571065", "0.4848369", "0.4845938", "0.48457012", "0.48425922", "0.48271155", "0.48267934", "0.48167446", "0.48156765", "0.48108837" ]
0.69116616
0
Starts interactive search, allows user to make a selection. Accepts array of strings and optional (user) query. Returns string chosen by user.
def interactive_search(self, choices, query=None): if query: match = self.get_interactive_match(choices, query) if match: self.print("Matched query to '%s'." % (match)) answer = input("Is that correct? (Y/n) ") self.clear_lines(1) if answer.lower() == 'y' or answer == '': self.clear_lines(1) return match else: self.clear_lines(1) return self.interactive_search(choices) else: return None else: query = input("Please type a query: ") self.clear_lines(1) return self.interactive_search(choices, query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ask_query(self):\n self.__output = list()\n return input(form('What do you want to search?\\n> '))", "def search():\n query = input('Please enter your search query\\n')\n # For now, we will just print the whole database\n #db_actions.display()\n db_actions.search(query)", "def invoke(self):\n # set menu handlers\n menu_handlers = [\n SearchByAuthor(self.db, self),\n SearchByName(self.db, self),\n SearchByPublishedDate(self.db, self)\n ]\n\n # display menu, get selection, and run\n is_exit = False\n while not is_exit:\n menu = ConsoleMenu(\n menu_handlers,\n \"Search Book by text:\"\n )\n menu.display_menu()\n is_exit = menu.prompt_and_invoke_option()", "def search():\n try:\n query = request.args.get(\"q\").lower()\n except AttributeError:\n query = request.args.get(\"q\")\n\n # Adding browse functionality\n browse = request.args.get(\"browse\")\n\n if browse is None:\n # Select all rows with a column value that includes query\n results = db.execute(\"SELECT * FROM books \"\n \"WHERE LOWER(isbn) LIKE CONCAT('%', :q, '%')\"\n \"OR LOWER(title) LIKE CONCAT('%', :q, '%') \"\n \"OR LOWER(author) LIKE CONCAT('%', :q, '%') \"\n \"ORDER BY title LIMIT 100\", {'q': query}).fetchall()\n else:\n # Select titles starting with letter\n results = db.execute(\n \"SELECT * FROM books \"\n \"WHERE LOWER(title) LIKE CONCAT(:q, '%') \"\n \"ORDER BY title\", {'q': query}).fetchall()\n\n return render_template(\"search.html\", browse=browse, query=query, results=results)", "def search():\n import booksearch as bs\n\n opt = var.get()\n term = searchBox.get()\n term2 = dateBox.get()\n\n # Case statement (substitute) for different search areas\n # Each key is an option in the OptionMenu\n searchBy = {\n \"Title & Author\" : bs.search(term),\n \"ID\" : bs.bookID(term),\n \"Date\" : bs.dateRange(term, term2),\n }\n query = searchBy[opt] # Make & stores a query (2D list)\n\n # Repopulates table\n if term != \"\":\n populate(query)", "def search_menu():\n clear_screen()\n print(\"What would you like to search by?\")\n print(\" d: Date (Default)\")\n print(\" t: Time spent\")\n print(\" e: Exact\")\n print(\" p: Pattern (Regex)\")\n user_input = input(\"> \").lower()\n if user_input == 't':\n search_by_time_spent()\n elif user_input == 'e':\n search_by_string()\n elif user_input == 'p':\n search_by_pattern()\n else:\n search_by_date()", "def book_search(library: list) -> None:\n options = ['Author', 'Title', 'Publisher', 'Shelf', 'Category', 'Subject']\n prompt = '\\nWhat option would you like to search by?'\n choice = get_user_choice(options, prompt)\n if choice == '1':\n search_by_chosen_option(library, options[0])\n elif choice == '2':\n search_by_chosen_option(library, options[1])\n elif choice == '3':\n search_by_chosen_option(library, options[2])\n elif choice == '4':\n search_by_shelf(library)\n elif choice == '5':\n search_by_chosen_option(library, options[4])\n elif choice == '6':\n search_by_chosen_option(library, options[5])", "def search_term():\n search = input(\"Enter term or string: \")\n entries = select_entries()\n entries = entries.where(\n (Entry.task_name.contains(search)) |\n (Entry.notes.contains(search)))\n view_entries(entries)\n return entries", "def _search(client, search_string):\n if search_string is None:\n logger.info(uxstring.UxString.list_all, fg=\"green\")\n\n current_page = 0\n total_pages = get_search_results(client, search_string, current_page)\n if total_pages < 1:\n return\n\n while 0 <= current_page < total_pages:\n try:\n prompt_resp = click.prompt(uxstring.UxString.pagination,\n type=str)\n next_page = get_next_page(prompt_resp, current_page)\n if next_page == -1:\n model_id = prompt_resp\n display_search_info(client, model_id)\n elif next_page >= total_pages or next_page < 0:\n continue\n elif next_page != current_page:\n get_search_results(client, search_string, next_page)\n current_page = next_page\n\n except click.exceptions.Abort:\n return", "def other_search(self):\n test = self.ask_zoekarg.text()\n if test:\n self.parent().search_arg = test\n self.parent().do_select()", "def searchUser(database):\n print(\"How do you want to search for a user\\n1.name\\n2.field\\n3.year of study\\n4.areas of interest\\n5.Quit\")\n choice=int(input(\"Your choice :\"))\n if choice==1:\n searchByName(database)\n elif choice==2:\n searchByField(database)\n elif choice==3: \n searchByYear(database)\n elif choice==4:\n searchByInterest(database)\n elif choice==5:\n return", "def __ui_choose_search_criteria_for_persons(self):\n print(\"By which criteria do you want to search persons?\\n\"\n \" 1. By name\\n\"\n \" 2. By phone number\\n\")\n user_choice = input(\"Type your option: \").strip()\n if user_choice == \"1\":\n self.__ui_search_persons_by_name()\n elif user_choice == \"2\":\n self.__ui_search_persons_by_phone_number()\n else:\n print(\"Invalid option!\\n\")\n return", "def search(query_string):", "def search():\n pass", "def search():\r\n ch = input('You are about to SEARCH for an entry. If NO, you may choose another option.\\n').lower()\r\n\r\n if y_n(ch):\r\n print('Enter your desired subject to search in...\\n')\r\n chs2 = ['last name', 'l', 'first name', 'f', 'grade', 'g', 'stream', 's', 'role', 'r']\r\n ch2 = input('Search by LAST NAME, FIRST NAME, GRADE, STREAM, or ROLE?\\n').lower()\r\n ch2 = check(ch2, chs2)\r\n\r\n if ch2 == 'last name' or ch2 == 'l':\r\n query(ln_s(re.sub(r'\\s', '', str(input('Desired last name?\\n')))))\r\n elif ch2 == 'first name' or ch2 == 'f':\r\n query(fn_s(re.sub(r'\\s', '', str(input('Desired first name?\\n')))))\r\n elif ch2 == 'grade' or ch2 == 'g':\r\n try:\r\n xgr = int(input('Desired grade?\\n'))\r\n xgrs = [8, 9, 10, 11, 12, 13]\r\n\r\n xgr = check_int(xgr, xgrs)\r\n query(gr_s(xgr))\r\n except ValueError:\r\n print('You did not enter an applicable grade. Please enter another value.')\r\n search()\r\n elif ch2 == 'stream' or ch2 == 's':\r\n query(sr_s(str(input('Desired stream?\\n'))))\r\n else:\r\n query(rl_s(str(input('Desired role?\\n'))))\r\n else:\r\n start()", "def search(self, query):\n launch_gs_app('search',\n self.browser,\n GoogleSuite.SEARCH_URL.format(_urlencode([('q', query)])))", "def __search_student(self):\n menu_string = \"Search for a student:\\n\"\n menu_string += \"\\t1. by ID\\n\"\n menu_string += \"\\t2. by discipline_name\\n\"\n menu_string += \"\\t0. Exit\\n\"\n\n stop = False\n while not stop:\n command_list = \\\n {\n '1': self.__ui_search_student_by_id,\n '2': self.__ui_search_student_by_name,\n '0': self.__no_command\n }\n command = self.__ui_read_command(menu_string)\n\n if command == '0':\n return\n\n search = input(\"Enter search_substring string: \")\n if len(search) == 0:\n print(\"Search string cannot be empty!\")\n return\n\n if command in command_list.keys():\n command_list[command](search)\n else:\n print(\"Invalid command!\")", "def get_interactive_match(self, choices, query):\n if query in self.SKIP_KEYWORDS:\n return None\n results = process.extract(query, choices, limit=10) # fuzzy string matching\n best_match = results[0]\n second_best_match = results[1]\n if best_match[1] == second_best_match[1] or best_match[1] < 50: # if inconclusive or low score\n self.print(\"Couldn't find a conclusive match for '%s'. Best matches:\" % (query))\n i = 0\n for result in results:\n i += 1\n print(\" [%i] %s\" % (i, result[0]))\n answer = input(\"Choose one or specify a less ambiguous query: \")\n self.clear_lines(2 + len(results))\n if answer.isdigit() and int(answer) <= len(results):\n return results[int(answer) - 1][0]\n else:\n return self.get_interactive_match(choices, answer)\n else:\n return best_match[0]", "def interactive_select(space, current):\n print \"Type an element name, an element index, or an unambiguous prefix to add to your selection.\"\n print \"Type '\" + color_code(MAGENTA) + \"list\" + CLEAR_COLOR +\"' to see the list of valid selections/indices.\"\n print \"Type '\" + color_code(MAGENTA) + \"clear\" + CLEAR_COLOR +\"' to clear selection.\"\n print \"Enter an empty line when done.\\n\"\n \n done = False\n while not done:\n print color_code(BLACK, bold=True), \"\\nCurrent selection\" + CLEAR_COLOR + \":\", (current if current else \"None\")\n tentative = raw_input(color_code(YELLOW) + \"Selection or Command\" + CLEAR_COLOR + \": \")\n matches = [el for el in space if el.startswith(tentative)]\n try: index = int(tentative)\n except ValueError: index = None\n if tentative == 'list':\n for i,el in enumerate(space):\n print \"\\t\", color_code(BLUE, bold=True), i, CLEAR_COLOR, el\n print \"\\n\"\n elif tentative == 'clear':\n current = []\n elif tentative == '':\n if current:\n print color_code(GREEN), \"\\nFinal selection\" + CLEAR_COLOR + \":\", current, \"\\n\\n\"\n done = True\n else:\n print_error(\"Must select at least one\")\n elif len(matches) > 1:\n print_error(\"Multiple matches found for `{}' ({})\".format(tentative, matches))\n elif len(matches):\n if matches[0] in current:\n print_warning(\"{} was already selected\".format(matches[0]))\n else:\n current.append(matches[0])\n elif index is not None:\n if index < 0 or index >= len(space):\n print_error(\"Invalid index {}\".format(index))\n elif space[index] in current:\n print_warning(\"{} was already selected\".format(space[index]))\n else:\n current.append(space[index])\n else:\n print_error(\"Unknown token: {}\".format(tentative))\n \n return current", "def choose_from_list(query_category, query_list):\n print('Choose the {cat} you want from the below list:'.format(\n cat=query_category))\n for counter, value in enumerate(query_list):\n print('{counter}: {value}'.format(counter=counter, value=value))\n selection = input('Choice: ')\n return query_list[int(selection)]", "def lookup_search_term():\n while True:\n search_query = input('Show entries containing (in name or notes): ')\n if validate_lookup_search_term_format(search_query):\n break\n print('** Please enter search term **')\n return (Entry.select().where(Entry.employee_name.contains(search_query)) |\n Entry.select().where(Entry.task_notes.contains(search_query)))", "def __ui_choose_search_criteria_for_activities(self):\n print(\"By which criteria do you want to search activities?\\n\"\n \" 1. By date\\n\"\n \" 2. By description\\n\")\n user_choice = input(\"Type your option: \").strip()\n if user_choice == \"1\":\n self.__ui_search_activities_by_date()\n elif user_choice == \"2\":\n self.__ui_search_activities_by_description()\n else:\n print(\"Invalid option!\\n\")\n return", "def search_by_string(self):\n print(\"*** String Search ***\\n\")\n print(\"Enter a search string.\\n\")\n print(\"- NAME and NOTE will be searched for all tasks -\")\n print(\"- Searching IS case-sensitive, but partial matches will be returned -\\n\")\n while True:\n try:\n search_string = input(\">>> \")\n results = self.regex_entry_search(search_string)\n except re.error:\n print(\"Couldn't parse search query. Please try again.\")\n else:\n clear_screen()\n print(f\"Found {len(results)} matches for string \\\"{search_string}\\\"...\\n\")\n self.print_selected_entries(results)\n break", "def ask_search():\n\n print(\n\"\"\"\nPlease enter your desired keywords for the lexical dispersion analysis. For quick templates, enter the following keys:\n\ntemplate_insurance: insurance identifier terms\ntemplate_contract: contract identifier terms\ntemplate_privacy: privacy contract identifier terms\n\nTo stop entering keywords, simply enter an empty input.\n\"\"\"\n )\n\n #asking user for search terms\n ask = True\n search = []\n\n while ask == True:\n temp = input(\"Enter a keyword: \")\n if temp == \"\":\n break\n elif temp == \"template_insurance\":\n search = [\"treatment\", \"premium\", \"claim\", \"benefit\", \"exclusions\", \"charges\", \"payment\", \"occupation\"]\n break\n elif temp == \"template_contract\":\n search = [\"defined\",\"liability\",\"service\",\"confidential\",\"terminate\",\"law\", \"breach\"]\n break\n elif temp == \"template_privacy\":\n search = [\"purpose\",\"personal\",\"data\",\"collect\",\"transfer\",\"services\",\"contact\",\"provide\",\"authority\",\"marketing\",\"retention\",\"consent\",\"analysis\",\"analytics\"]\n break\n else:\n search.append(temp)\n\n return search", "def search(self, query=None):\n\n self.visual.log(\"Starting search\")\n if self.search_invoke_counter > 0:\n # step to the starting history to search everything\n self.reset_history()\n search_done = False\n just_began_search = True\n query_supplied = bool(query)\n\n ttr = TimedThreadRunner(self.search_for_entry, \"\")\n # ttr.set_delay(1, self.visual.log, \"delaying search execution...\")\n\n while True:\n # get new search object, if it's a continued search OR no pre-given query\n if not just_began_search or (just_began_search and not query_supplied):\n search_done, new_query = self.visual.receive_search()\n self.visual.log(\"Got: [{}] [{}]\".format(search_done, new_query))\n if search_done is None:\n # pressed ESC\n self.visual.message(\"Aborting search\")\n return\n if new_query == \"\" and search_done:\n # pressed enter\n self.visual.message(\"Concluded search\")\n break\n # got an actual query item\n # if query content is updated, reset the timer\n query = new_query\n\n query = query.lower().strip()\n # ttr.reset_time(query)\n # self.visual.log(\"Got query: {}\".format(query))\n # ttr.update_args(query)\n # ttr.start()\n # ttr.stop()\n # results_ids = ttr.get_result()\n results_ids = self.search_for_entry(query)\n # results_ids = []\n just_began_search = False\n self.search_invoke_counter += 1\n if not self.visual.does_incremental_search:\n break\n\n if not query:\n # no search was performed\n return\n # push the reflist modification to history\n self.change_history(results_ids, \"search:\\\"{}\\\"\".format(query))", "def search_by_chosen_option(library: list, chosen_option: str) -> None:\n user_input = input(f'What is the name of the {chosen_option} you want to search for?')\n found_books = []\n for book in library:\n if user_input.lower() in str(getattr(book, chosen_option.lower())).lower():\n found_books.append(book)\n print(f'We found {len(found_books)} book(s) that matched this search in your library.\\n')\n for num, book in enumerate(found_books, 1):\n print(f'{num} - {book.__repr__()}')\n if len(found_books) > 0 and not return_to_main_menu():\n move_book(library, found_books)", "def search(self, *args, **kwargs):", "def search(self, query):\n logger.debug('Performing search for: '+query)\n write_textfield('queryString', query+\"\\n\", check=False)\n self.waitForLoaderToDisappear()", "def search():\n # Check for database tables\n check_db()\n # Check for GET data\n search_query = request.args.get(\"q\", None)\n # Format search results as HTML\n search_results = get_search_results_html(search_query)\n # Format recent searches as HTML\n recent_searches = get_recent_searches_html()\n\n return html_wrapper('<h1>' + SITE_NAME + '''</h1>\n <form action=\"/\" method=\"GET\">\n <input type=\"text\" name=\"q\">\n <input type=\"submit\" value=\"search\">\n </form>''' + search_results + recent_searches)", "def search(search, candidates):\n choicer = choices.Choice()\n for candidate in candidates:\n choicer.add(candidate)\n return choicer.search(search)", "def search(self, query):", "def lookup(self, query_text):\n query = query_text.lower()\n if query in self.mesh.keys():\n return self.mesh[query]\n else:\n closest = difflib.get_close_matches(query, self.mesh.keys())\n print('Did you mean?')\n for ind, match in enumerate(closest):\n print('%i) %s' % (ind + 1, self.mesh[match]['name']))\n selection = input()\n try:\n if int(selection) <= len(closest):\n return self.mesh[closest[0]]\n else:\n sys.stdout.write('Not a known selection, exiting...\\n')\n sys.exit(0)\n except Exception:\n sys.stdout.write('Unknown exception, exiting...\\n')\n sys.exit(0)", "def selection_input(\n self,\n prompt,\n choices,\n default=None,\n error_message=\"Invalid Selection\",\n transform=None\n ):\n while True:\n result = self.text_input(prompt, default)\n\n if transform is not None and result is not None:\n result = transform(result)\n\n if result in choices:\n return result\n\n print()\n print(error_message)", "async def choose(self, ctx, *args):\n query = \" \".join(args)\n choices = query.split(\" or \")\n if len(choices) < 2:\n await ctx.send(\"Give me at least 2 options to choose from! (separate options with `or`)\")\n self.logger.warning(misolog.format_log(ctx, f\"1 option\"))\n return\n choice = rd.choice(choices).strip()\n await ctx.send(f\"I choose **{choice}**\")\n self.logger.info(misolog.format_log(ctx, f\"{choice}\"))", "def get_search_string():\n return input(\"Enter search name, or phone number: \")", "def search(self, query_id, query_str):\n pass", "def search_results(request):\n #key\n\n user_input = request.GET['q']\n\n people_objs = Person.objects.filter(Q(last__contains=user_input) | Q(\n first__contains=user_input))\n document_objs = Document.objects.filter(title__contains=user_input)\n folder_objs = Folder.objects.filter(full__contains=user_input)\n organization_objs = Organization.objects.filter(Q(name__contains=user_input)|Q(\n location__contains=user_input))\n obj_dict = {\n 'people_objs': people_objs,\n 'document_objs': document_objs,\n 'folder_objs': folder_objs,\n 'organization_objs': organization_objs,\n 'query': user_input,\n }\n response = render(request, 'search_results.jinja2', obj_dict)\n return response", "def select_user_search_collection(args):\n is_parameter_exists([\n constants.TEXT, constants.COLLECTION_ID\n ], args)\n\n # Collection ID\n collection_id = args[constants.COLLECTION_ID]\n\n # Request User\n request_user = args[constants.USER]\n\n # Search Keyword\n keyword = args[constants.TEXT]\n\n # Page Number\n page_number = 1 if constants.PAGE_NUMBER not in args else int(args[constants.PAGE_NUMBER])\n\n # User Queryset\n queryset = User.objects.annotate(\n is_in_collection=__is_in_collection('id', collection_id)\n ).filter(\n username__icontains=keyword,\n is_in_collection=False\n ).values_list('id', flat=True)\n\n # User Ids\n user_ids = get_results_from_queryset(queryset, 10, page_number)\n\n # is_finished\n is_finished = not user_ids.has_next()\n\n # Users\n users, _, _ = __get_users(Q(id__in=user_ids), request_user, 10)\n\n return users, page_number, is_finished", "def search():\n while True:\n clear()\n print(dedent(\"\"\"\n What do you want to search by? Enter a through e.\n a) Employee\n b) Date\n c) Time Spent\n d) Search Term\n e) Return to main menu\n \"\"\"))\n choice = input(\"> \")\n\n if choice == \"a\":\n search_employee()\n elif choice == \"b\":\n search_date()\n elif choice == \"c\":\n work_log.search_duration()\n elif choice == \"d\":\n work_log.search_exact()\n elif choice == \"e\":\n break\n else:\n print(\"Please enter a valid choice\")\n time.sleep(3)", "def search(self, term):", "def search(self, query, limit=10):\n word_ids, url_ids = self.query(query, limit)\n selected_url = random.choice(url_ids)\n print(\"User selected url \\\"{}\\\"\".format(self.get_url_name(selected_url)))\n return SearchNet().train_query(word_ids, url_ids, selected_url)", "def getSelected(*args):", "def search_request():\n my_dropdown_items = [] # create the dropdowns list\n query_data = request.get_json() # get the query data, which simple-json puts in \"target\"\n\n # example 1: in this case, Grafana template variable had \"server_list\" as the query\n if query_data.get('target') == \"server_list\":\n my_dropdown_items += ['here', 'are', 'some', 'things', 'for', 'the', 'dropdowns']\n \n # example 2: the query in Grafana template var is \"server_values\"\n elif query_data.get('target') == \"server_values\":\n \"\"\"For this example, we are using the text/values. The difference is that when a user selects one of the dropdowns\n on Grafana, the value will be sent in any queries using that template var instead of the name like the first example\n above. This is useful if you want to provide an id in queries, rather than parsing the human-readable name.\"\"\"\n my_dropdown_items += [{'text':'here', 'value':'key1'}, {'text':'another', 'value':'key2'}]\n\n return make_response(jsonify(my_dropdown_items))", "def search_command():\n listing.delete(0, END)\n for row in backend.search(title_text.get(), \n author_text.get(), \n year_text.get(), \n isbn_text.get()):\n listing.insert(END, row)", "def _search(progtext, qs=None, splash=True, pre_load=True):\n g.message = \"Searching for '%s%s%s'\" % (c.y, progtext, c.w)\n\n # show splash screen during fetch\n if splash:\n g.content = logo(c.b) + \"\\n\\n\"\n screen_update()\n\n # perform fetch\n wdata = call_gdata('search', qs)\n songs = get_tracks_from_json(wdata)\n\n if songs and pre_load:\n # preload first result url\n kwa = {\"song\": songs[0], \"delay\": 0}\n t = threading.Thread(target=preload, kwargs=kwa)\n t.start()\n\n if songs:\n g.model.songs = songs\n return True\n\n return False", "def search_for_books(main_page): # Add information to the printout if the book is rented\n\n type_of_search = 0\n\n header = \"\"\"\n Do you want to search for books by the first letter of the title\n or by the type?\n \"\"\"\n search_choices= (\n (\"To search by letter\", search_by_letter),\n (\"To search by type\", search_by_type),\n (\"To exit\",exit.exit_to_main)\n )\n\n book_search = Screen(header,search_choices,\n main_page.login, main_page.password)\n book_search.activate()", "def run_interactive(query, editor=None, just_count=False, default_no=False):\n global yes_to_all\n\n # Load start from bookmark, if appropriate.\n bookmark = _load_bookmark()\n if bookmark and not yes_to_all:\n print('Resume where you left off, at %s (y/n)? '\n % str(bookmark), end=' ')\n sys.stdout.flush()\n if (_prompt(default='y') == 'y'):\n query.start_position = bookmark\n\n # Okay, enough of this foolishness of computing start and end.\n # Let's ask the user about some one line diffs!\n print('Searching for first instance...')\n suggestions = query.generate_patches()\n\n if just_count:\n for count, _ in enumerate(suggestions):\n terminal.terminal_move_to_beginning_of_line()\n print(count, end=\" \")\n sys.stdout.flush() # since print statement ends in comma\n print()\n return\n\n for patch in suggestions:\n _save_bookmark(patch.start_position)\n _ask_about_patch(patch, editor, default_no)\n print('Searching...')\n _delete_bookmark()", "def __search(self):\n self.resultList.clear()\n self.infoLabel.clear()\n \n self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)\n self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(True)\n self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)\n self.searchButton.setEnabled(False)\n QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)\n \n QApplication.setOverrideCursor(Qt.WaitCursor)\n QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)\n \n self.__canceled = False\n \n self.__query = [term for term in self.searchEdit.text().strip().split()\n if term not in PipSearchDialog.Stopwords]\n self.__client.call(\n \"search\",\n ({\"name\": self.__query, \"summary\": self.__query}, \"or\"),\n self.__processSearchResult,\n self.__searchError\n )", "def search():\r\n\r\n # Ensure parameter is present\r\n if not request.args.get(\"q\"):\r\n raise RuntimeError(\"missing search string\")\r\n\r\n # Query db with the string LIMIT result to 10\r\n # TODO\r\n\r\n # send back as json\r\n # TODO\r\n\r\n return jsonify({})", "def searchWikidata(input, type):\n # Whenever the user types something in the searchbar open a session\n if len(input) >= 1:\n # The string with API wbsearchentities to suggestions to the user input\n URL = \"https://www.wikidata.org/w/api.php?action=wbsearchentities&search=%s\" \\\n \"&format=json&limit=5&formatversion=2&language=en&type=%s\" % (input, type)\n with requests.Session() as S:\n DATA = S.post(url=URL, headers={\"user-agent\": \"magic browser\", \"Content-Type\": \"application/json\"}).json()\n\n # Whenever a search entity is returned, do something\n if len(DATA[\"search\"]) >= 1:\n # Go through the DATA.json and append an entity label, id and description to a option list\n option_list = []\n for option in DATA[\"search\"]:\n temp_str = \"\"\n\n try:\n temp_str += option[\"label\"] + \" (\"\n except Exception:\n temp_str += \"|\"\n\n try:\n temp_str += option[\"id\"] + \") | \"\n except Exception:\n temp_str += \"|\"\n\n try:\n temp_str += option[\"description\"]\n except Exception:\n \"\"\n\n option_list.append(temp_str)\n\n # Creates a list with the suggested entities\n return html.Ul([html.Li(temp_str) for temp_str in option_list])\n\n # If no results is returned do something\n else:\n return \"No results could be found\"\n\n # Do nothing when no input\n else:\n return \"\"", "def search(request):\n\n # get form data \n searchItem = request.GET.get(\"q\")\n # if searchItem is an exact match redirect to that page\n if (util.get_entry(searchItem) is not None):\n return HttpResponseRedirect(reverse(\"entry\", kwargs={\n \"title\": searchItem\n }))\n # add any pages with the string in it to results list \n else: \n results = []\n substring = False\n for title in util.list_entries():\n if searchItem.upper() in title.upper():\n results.append(title)\n if results:\n substring = True\n # return results\n return render(request, \"encyclopedia/search.html\", {\n \"searchItem\": searchItem,\n \"substring\": substring,\n \"results\": results\n })", "def question():\n print('Enter 1 to search database by habitat with detailed information\\nEnter 2 to search database by coordinates \\nEnter 3 to search by habitat in csv file for a quick overview without detail')\n print('habitat search options so far:\\n Alpenvorland, Niederrheinisches Tiefland, Oberrheinisches Tiefland')\n src = int(input('Enter here:'))\n\n if src == 1:\n habitat = input('Enter name of habitat\\n')\n query = \"habitat = '\" + habitat + \"'\"\n search_db_via_query(query)\n elif src == 2:\n search_by_coordinates()\n elif src == 3:\n search_by_habitat()\n else:\n print('no data')", "def on_searchButton_clicked(self):\n self.__search()", "def start():\n while True:\n clear_screen()\n print(\"Select an option:\")\n print(\" e: Enter new entry (Default)\")\n print(\" s: Search\")\n print(\" q: Quit\")\n user_input = input(\"> \").lower()\n if user_input == 'q':\n sys.exit()\n if user_input == 's':\n search_menu()\n else:\n new_entry()", "def run_search(self, evt):\n search_input = self.search_input_txtctrl.GetValue()\n self.execute_google_search(str(search_input))\n self.set_result_to_dict_for_page_scroller()\n self.clear_result_screen()\n self.trigger_scroller_event()", "def QueryStrGeneral(cls, queryString: str, errorPrompt: str, conditionList: list) -> str:\n\n global userInput\n\n try:\n userInput = input(queryString).upper()\n\n # Check if userInput points to either of the options, and recursively call\n # the function until userInput has an actionable value.\n if userInput not in conditionList:\n raise ValueError\n\n except ValueError:\n # Reprompt user for valid entry.\n print(errorPrompt)\n cls.QueryStrGeneral(queryString, errorPrompt, conditionList)\n\n except Exception:\n print(\"\\nOops something is buggy\")\n\n return userInput", "def option_search(args):\n print(\"= SEARCH =\")\n print()\n print(\"Index file:\\t\\t{}\".format(args.indexfile))\n print(\"QE enabled:\\t\\t{}\".format(args.queryexpansion))\n if not os.path.exists(args.indexfile):\n raise OSError(\"No such file!\")\n print(\"Query:\\t\\t\\t'{}'\".format(args.query))\n print(\"\\n\")\n\n search.search_index(args.indexfile, args.query,\n top=args.top,\n default_field=args.defaultfield,\n display_fields=args.resultfields,\n qe=args.queryexpansion)", "def showSelectionInTitle(*args, **kwargs)->None:\n pass", "def search():\n query = request.args['query']\n # find instances of the entered word in title, tags or ingredients\n results = mongo.db.places.find({\n '$or': [\n {'name': {'$regex': query, '$options': 'i'}},\n {'tags': {'$regex': query, '$options': 'i'}},\n {'city': {'$regex': query, '$options': 'i'}},\n ]\n })\n return render_template('search.html', query=query, results=results)", "def search():\n if request.method == \"GET\":\n mongo_collection = mongo_database[\"questions\"]\n query = request.args.get(\"keyword\")\n result = mongo_collection.find({\"$text\": {\"$search\": query}})\n objects = []\n for object in result:\n objects.append(object)\n return render_template(\"search.html\", cards=objects)\n else:\n return start()", "def search(self):\n premium = self.config.get('premium', False)\n\n self.params[self.opts['keyword']['query_key']] = self.config[self.opts['keyword']['config_key']] # keyword\n # Selection params\n self.append_param('tag_mode', 'selection')\n if premium:\n self.append_param('order_premium', 'selection')\n else:\n self.append_param('order_not_premium', 'selection')\n\n self.append_param('type', 'selection')\n self.append_param('tool', 'selection')\n self.append_param('ratio', 'selection')\n self.append_param('mode', 'selection')\n\n # Number params\n self.append_param('min_width', 'number')\n self.append_param('max_width', 'number')\n self.append_param('min_height', 'number')\n self.append_param('max_height', 'number')\n if premium:\n self.append_param('min_bookmark', 'number')\n self.append_param('max_bookmark', 'number')\n else:\n self.set_bookmark_filter()\n\n # Date params\n self.append_param('start_time', 'date')\n self.append_param('end_time', 'date')\n\n # multi work filter\n self.filters['multi'] = self.config.get('download_multi', False)\n\n for i in range(self.config['start_page'], self.config['end_page'] + 1):\n self.params['p'] = i\n self.headers['Referer'] = 'https://www.pixiv.net/'\n url ='https://www.pixiv.net/search.php'\n html = self.session.get(url, headers = self.headers, params = self.params, timeout = 10, proxies = self.proxies)\n\n soup = BeautifulSoup(html.text, 'lxml')\n data_items = json.loads(soup.find('input', id = 'js-mount-point-search-result-list')['data-items'])\n\n return self.extract_work_info(data_items)", "def autocomplete():\n query = '' if request.args.get('query') is None else request.args.get('query')\n\n prefixed_words = []\n close_words = []\n for f in app.preprocessed.words:\n lowered = f.lower()\n if lowered.startswith(query) and lowered != query:\n prefixed_words.append(f)\n elif levenshtein(query, lowered) <= 1:\n close_words.append(f)\n\n result = {\n 'success': True,\n 'data': {\n 'suggestions': prefixed_words + close_words\n }\n }\n return jsonify(result)", "def do_search(request):\n products = Product.objects.filter(name__icontains=request.GET['q'])\n return render(request, \"search_results.html\", {\"products\": products})", "def search(request, is_my_list=\"False\"):\n\n search_type = request.GET.get(\"submit\")\n if search_type:\n\n # get query field\n query = ''\n if request.GET.get(search_type):\n query = request.GET.get(search_type)\n\n proj_ids = []\n cod_ids = []\n\n valid_searches = [constants.STRING_TITLE, constants.STRING_DESCRIPTION, constants.STRING_PROTOCOL,\n constants.STRING_CODER, constants.STRING_AREA, constants.STRING_WORKINGGROUP]\n\n search_in_all = True\n for v in valid_searches:\n if v in request.GET:\n search_in_all = False\n break\n\n if search_in_all or request.GET.get(constants.STRING_TITLE):\n codings = CodingProject.objects.all()\n for cod in codings:\n if query.lower() in cod.title.lower():\n cod_ids.append(cod.id)\n\n if search_in_all or request.GET.get(constants.STRING_DESCRIPTION):\n codings = CodingProject.objects.all()\n for cod in codings:\n if query.lower() in cod.additional_information.lower():\n cod_ids.append(cod.id)\n\n if request.GET.get(constants.STRING_PROTOCOL):\n proj_ids += ProjectContainer.objects.filter(protocol__icontains=query).values_list('id', flat=True)\n\n if search_in_all or request.GET.get(constants.STRING_CODER):\n for pr in ProjectContainer.objects.all():\n for cd in pr.codings.all():\n user = Person.objects.using('datatracker').get(id=cd.coder)\n if query.lower() in user.name.lower():\n proj_ids.append(pr.id)\n break\n\n if search_in_all or request.GET.get(constants.STRING_AREA):\n for project_container in ProjectContainer.objects.all():\n docs = []\n if not project_container.docs or project_container.docs == '':\n continue\n keys = filter(None, project_container.docs.split(';'))\n docs.extend(list(DocAlias.objects.using('datatracker').filter(name__in=keys).values_list(\n 'document__group__parent__name')))\n for doc in docs:\n if query.lower() in doc[0].lower():\n proj_ids.append(project_container.id)\n break\n # ids += ProjectContainer.objects.filter(docs__document__group__parent__name__icontains=query).values_list(\n # 'id', flat=True)\n\n if search_in_all or request.GET.get(constants.STRING_WORKINGGROUP):\n for project_container in ProjectContainer.objects.all():\n docs = []\n if not project_container.docs or project_container.docs == '':\n continue\n keys = filter(None, project_container.docs.split(';'))\n docs.extend(list(\n DocAlias.objects.using('datatracker').filter(name__in=keys).values_list('document__group__name')))\n for doc in docs:\n if query.lower() in doc[0].lower():\n proj_ids.append(project_container.id)\n break\n \n if cod_ids:\n cod_ids = list(set(cod_ids))\n proj_ids += ProjectContainer.objects.filter(codings__id__in=cod_ids).values_list('id', flat=True)\n project_containers = ProjectContainer.objects.filter(id__in=list(set(proj_ids)))\n \n request.session[constants.ALL_CODINGS] = cod_ids\n request.session[constants.ALL_PROJECTS] = project_containers\n\n request.session[constants.MAINTAIN_STATE] = True\n\n return HttpResponseRedirect(\n settings.CODESTAND_PREFIX + '/codestand/matches/show_list/' + \n is_my_list + '/{0}/'.format(constants.ATT_CREATION_DATE) + 'True')\n\n else:\n return render_page(request, constants.TEMPLATE_MATCHES_SEARCH, {\n \"form\": SearchForm()\n })", "def perform_query(tweets_dict, index, tf, idf, rt, likes, score, get_input=True, query=None):\n print(\"Insert your query:\\n\")\n if get_input:\n query = input()\n ranked_docs = search(query, index, idf, tf, rt, likes, score) \n return query, ranked_docs", "def invoke(self):\n print(\"\\nEnter Book Name: \", end=\"\")\n # get option from user, and strip whitespace\n str_option = input().strip()\n if not str_option:\n print(\"Invalid Input!\")\n return\n self.sbh.display_books(\n self.db.query_book_by_title(str_option)\n )", "def search():\n\n # TO DO: refine with wildcard to curb superfluous results\n \n # logged in users can search for books\n # via 'isbn', 'author', or 'title'\n query = request.form.get(\"search\")\n if not query:\n return render_template(\"home.html\", result=0, name=session[\"name\"],result_head=\"Results\")\n \n # query 'isbn'\n if query.isdigit():\n res = db.execute(\"SELECT * FROM books WHERE isbn LIKE :query\",\n {\"query\": f\"{query}%\"}).fetchall()\n else:\n # query 'author'\n res = db.execute(\"SELECT * FROM books WHERE author LIKE :query\",\n {\"query\": f\"{query}%\"}).fetchall()\n # If no result from author, query 'title'\n if len(res) == 0:\n res = db.execute(\"SELECT * FROM books WHERE title LIKE :query\",\n {\"query\": f\"{query}%\"}).fetchall()\n if len(res) == 0:\n res = 0\n return render_template(\"home.html\", result=res, name=session[\"name\"], result_head=\"Results\")", "def select_search_method():\n st.sidebar.markdown('### Search method:')\n search_method = st.sidebar.selectbox('', ['Individual', 'Department'], index=0)\n return search_method", "def prompt_user_selection(self):\n self.print_users()\n user_input = self.input_for_user_selection()\n currently_selected = self.users[int(user_input)]\n return currently_selected", "def start():\r\n print(\"Please select an option:\")\r\n print(\"1) Query by movies\")\r\n print(\"2) Query by actor\")\r\n print(\"3) Insert a new movie\")\r\n print(\"4) Save and Exit\")\r\n print(\"5) Exit\")\r\n option = input()\r\n return option", "def autocomplete():\n value = str(request.args.get('q'))\n result = s.query(Genes).filter(Genes.name.like(\"%\" + value + \"%\")).all()\n data = [i.name for i in result]\n return jsonify(matching_results=data)", "def main(args: Namespace):\n # Perform the search using an authenticated instance of the Intel Service Class\n ret = perform_search(Intel(client_id=args.client_id, client_secret=args.client_secret),\n args.find, # Search string\n args.types, # Types to display\n args.table_format, # Table format\n args.reverse, # Reverse sort boolean,\n args.output_prefix # Output file prefix\n )\n return *ret, args.types", "def search():\n query = request.form.get(\"query\")\n recipes = list(mongo.db.recipes.find({\"$text\": {\"$search\": query}}))\n return render_template(\"recipes.html\", recipes=recipes)", "def _(event):\n input_buffer = event.cli.buffers.previous(event.cli)\n search_buffer = event.cli.buffers[SEARCH_BUFFER]\n\n # Update search state.\n if search_buffer.text:\n get_search_state(event.cli).text = search_buffer.text\n\n # Apply search.\n input_buffer.apply_search(get_search_state(event.cli), include_current_position=True)\n\n # Add query to history of search line.\n search_buffer.append_to_history()\n search_buffer.reset()\n\n # Focus previous document again.\n event.cli.pop_focus()", "def search(request):\n if 'q' in request.GET:\n term = request.GET['q'].lower()\n thispushqueryset = pushitem.objects.filter(Q(searchfield__contains= term) )\n message = _('Searching for %s')%str(term)\n else:\n thispushqueryset = pushitem.objects.none()\n message = _('No search query specified')\n r = makepage(request,thispushqueryset,{'search_query':request.GET['q'].lower(), 'showall': 1,'message':message,}, template='search.html')\n return r", "def search(self, *args, **kwargs): # real signature unknown\n pass", "def start_search(self):\n self._raise_not_supported()", "def search():\n\n # Store the 'q' part of the URL as a string called 'q'. Check 'q' loaded, and produce runtime error if not.\n # e.g. '12589'\n q = request.args.get(\"q\")\n if not q:\n raise RuntimeError(\"missing location\")\n\n # Rewrites user input as lowercase\n q = str.lower(q)\n\n # Select the entire row from database 'places' that at least contains the value of 'q' in one of the 'postal_code', 'place_name', or 'admin_name1' fields.\n # e.g. [{'country_code':'US','postal_code':'12589'}]\n q_info = db.execute(\"SELECT * FROM places WHERE postal_code LIKE :q OR LOWER(place_name) LIKE :q OR LOWER(admin_name1) LIKE :q LIMIT 10\", q='%'+q+'%')\n\n # Run 'q_info' dict through 'jsonify()' function to convert some elements to JSON compatible(?)\n return jsonify(q_info)", "def __find(self):\n txt = self.textCursor().selectedText()\n self.__mainWindow.showFind(txt)", "def search_autocomplete(request):\n response = HttpResponse(content_type='application/json')\n query = request.GET.get('query', None)\n if query:\n try:\n suggestions = []\n for node in nc.get_indexed_node(nc.graphdb.manager, 'name', query):\n suggestions.append(node['name'])\n d = {'query': query, 'suggestions': suggestions, 'data': []}\n json.dump(d, response)\n except Exception:\n pass\n return response\n return False", "def search_page():\n return render_template('page_query.html', search_label=g_search_type)", "def _get_user_input(query, valid, default):\n\n # Wait for valid user input and return choice upon receipt\n while True:\n choice = input(query)\n if default is not None and choice == \"\":\n return default\n elif choice in valid:\n return choice\n else:\n print(\"Please respond with '\" + \\\n \"or '\".join(opt + \"' \" for opt in valid) + \"\\n\")", "def input_word():\n\n search_user= raw_input(\"\\nEnter word(s) to search: \")\n return search_user.lower()", "def search(self):\n\n if not(self.checkBox_coder1.isChecked()) and not(self.checkBox_coder2.isChecked()):\n QtGui.QMessageBox.warning(None, \"No coder\",\"No coder has been selected.\")\n return\n\n self.htmlResults = \"\"\n self.plainTextResults = \"\"\n\n # get search text\n searchText = self.lineEdit.text()\n unic_err = False\n try:\n searchText = str(searchText)\n except UnicodeEncodeError as e:\n unic_err = True\n QtGui.QMessageBox.warning(None, \"Unicode encode error\", str(e) +\"\\nPlease use different search text.\" \\\n \"\\nThe problem character(s) have been replaced with Wildcards for this search.\")\n if unic_err is True:\n # use sql wildcards\n newText = \"\"\n for c in searchText:\n try:\n newText += str(c)\n except UnicodeEncodeError as e:\n newText += \"_\"\n searchText = newText\n\n # get selected codes\n codeIDs = \"\"\n for itemWidget in self.tableWidget.selectedItems():\n codeIDs += \",\" + self.tableWidget.item(itemWidget.row(), self.ID_COLUMN).text()\n if len(codeIDs) == 0:\n QtGui.QMessageBox.warning(None, \"No codes\",\"No codes have been selected.\")\n return\n codeIDs = codeIDs[1:]\n\n # get file ids\n if self.fileIDs == \"\": # unless already selected via selectFiles method\n filenames = []\n cur = self.settings['conn'].cursor()\n cur.execute(\"select id, name, status from source\")\n result = cur.fetchall()\n for row in result:\n filenames.append({'id': row[0], 'name': row[1], 'status': row[2]})\n self.fileIDs += \",\" + str(row[0])\n if len(self.fileIDs) > 0:\n self.fileIDs = self.fileIDs[1:]\n\n searchResults = []\n searchString = \"\"\n cur = self.settings['conn'].cursor()\n if self.caseIDs == \"\": # no selected case ids\n sql = \"select freecode.name, color, source.name, selfirst, selend, seltext from coding \"\n sql += \" join freecode on cid = freecode.id join source on fid = source.id \"\n sql += \" where freecode.id in (\" + str(codeIDs) + \") \"\n sql += \" and source.id in (\" + str(self.fileIDs) + \") \"\n #print(sql)\n if self.checkBox_coder1.isChecked():\n if searchText == \"\":\n cur.execute(sql)\n else:\n sql = sql + \"and seltext like ?\"\n #print(sql)\n cur.execute(sql,[\"%\"+str(searchText)+\"%\"])\n result = cur.fetchall()\n for row in result:\n searchResults.append(row)\n\n if sql.find(\"seltext like ?\") > 0:\n sql = sql.replace(\"seltext like ?\", \"seltext like \\\"%\" + searchText + \"%\\\"\")\n searchString = sql\n\n if self.checkBox_coder2.isChecked():\n sql = \"select freecode.name, color, source.name, selfirst, selend, seltext from coding2 \"\n sql += \" join freecode on cid = freecode.id join source on fid = source.id \"\n sql += \" where freecode.id in (\" + str(codeIDs) + \") \"\n sql += \" and source.id in (\" + str(self.fileIDs) + \") \"\n #print(sql)\n if searchText == \"\":\n cur.execute(sql)\n else:\n sql = sql + \" and seltext like ?\"\n #print(sql)\n cur.execute(sql,[\"%\"+str(searchText)+\"%\"])\n result = cur.fetchall()\n for row in result:\n searchResults.append(row)\n\n if sql.find(\"seltext like ?\") > 0:\n sql = sql.replace(\"seltext like ?\", \"seltext like \\\"%\" + searchText + \"%\\\"\")\n searchString += \"\\n\" + sql\n\n else: # cases have been selected via selectCases method, file selection is ignored\n if self.checkBox_coder1.isChecked():\n sql = \"select freecode.name, color, cases.name, coding.selfirst, coding.selend, seltext from coding \"\n sql += \" join freecode on cid = freecode.id \"\n sql += \" join (caselinkage join cases on cases.id = caselinkage.caseid) on coding.fid = caselinkage.fid \"\n sql += \" where freecode.id in (\" + str(codeIDs) + \") \"\n sql += \" and caselinkage.caseid in (\" + str(self.caseIDs) + \") \"\n if searchText != \"\":\n sql += \"and seltext like ?\"\n sql += \" group by cases.name, coding.selfirst, coding.selend\" # need to group by or can get multiple results\n #print(sql)\n if searchText == \"\":\n cur.execute(sql)\n else:\n cur.execute(sql,[\"%\"+str(searchText)+\"%\"])\n result = cur.fetchall()\n for row in result:\n searchResults.append(row)\n\n if sql.find(\"seltext like ?\") > 0:\n sql = sql.replace(\"seltext like ?\", \"seltext like \\\"%\" + searchText + \"%\\\"\")\n searchString = sql\n\n if self.checkBox_coder2.isChecked():\n sql = \"select freecode.name, color, cases.name, coding2.selfirst, coding2.selend, seltext from coding2 \"\n sql += \" join freecode on cid = freecode.id \"\n sql += \" join (caselinkage join cases on cases.id = caselinkage.caseid) on coding2.fid = caselinkage.fid \"\n sql += \" where freecode.id in (\" + str(codeIDs) + \") \"\n sql += \" and caselinkage.caseid in (\" + str(self.caseIDs) + \") \"\n if searchText != \"\":\n sql += \"and seltext like ?\"\n sql += \" group by cases.name, coding2.selfirst, coding2.selend\" # need to group by or can get multiple results\n #print(sql)\n if searchText == \"\":\n cur.execute(sql)\n else:\n cur.execute(sql,[\"%\"+str(searchText)+\"%\"])\n result = cur.fetchall()\n for row in result:\n searchResults.append(row)\n\n if sql.find(\"seltext like ?\") > 0:\n sql = sql.replace(\"seltext like ?\", \"seltext like \\\"%\" + searchText + \"%\\\"\")\n searchString += \"\\n\" + sql\n\n # add to text edit with some formatting\n self.textEdit.clear()\n fileOrCase = \"File\"\n if self.caseIDs != \"\":\n fileOrCase = \"Case\"\n CODENAME = 0\n COLOR = 1\n FILEORCASENAME = 2\n #SELFIRST = 3\n #SELEND = 4\n SELTEXT = 5\n self.plainTextResults += \"Search queries:\\n\" + searchString + \"\\n\\n\"\n searchString = searchString.replace(\"&\",\"&amp;\")\n searchString = searchString.replace(\"<\",\"&lt;\")\n searchString = searchString.replace(\">\",\"&gt;\")\n searchString = searchString.replace(\"\\\"\",\"&quot;\")\n self.htmlResults += \"<h1>Search queries</h1>\\n\"\n self.htmlResults += \"<p>\" + searchString + \"</p>\"\n self.htmlResults += \"<h2>Results</h2>\"\n\n for row in searchResults:\n colorhex = self.codeColors.getHexFromName(row[COLOR])\n if colorhex == \"\":\n colorhex = \"#CCCCCC\"\n title = \"<em><span style=\\\"background-color:\" + colorhex + \"\\\">\"+row[CODENAME] + \"</span>, \"\n title +=\" \"+ fileOrCase + \": \" + row[FILEORCASENAME] + \"</em>\"\n self.textEdit.appendHtml(title)\n self.textEdit.appendPlainText(row[SELTEXT] + \"\\n\")\n\n self.htmlResults += \"<p>\" + title + \"<br />\"\n tmpHtml = row[SELTEXT].replace(\"&\",\"&amp;\")\n tmpHtml = tmpHtml.replace(\"<\",\"&lt;\")\n tmpHtml = tmpHtml.replace(\">\",\"&gt;\")\n #self.htmlResults += row[SELTEXT] + \"</p>\\n\"\n self.htmlResults += tmpHtml + \"</p>\\n\"\n self.plainTextResults += row[CODENAME] +\", \" + fileOrCase +\": \" + row[FILEORCASENAME] +\"\\n\"\n self.plainTextResults += row[SELTEXT] + \"\\n\\n\"", "def search(request):\n\n term = \"\"\n organizations = None\n memberships = None\n events = None\n persons = None\n airports = None\n training_requests = None\n comments = None\n only_result = None\n\n if request.method == \"GET\" and \"term\" in request.GET:\n form = SearchForm(request.GET)\n if form.is_valid():\n term = form.cleaned_data.get(\"term\", \"\")\n tokens = re.split(r\"\\s+\", term)\n\n organizations = Organization.objects.filter(\n Q(domain__icontains=term) | Q(fullname__icontains=term)\n ).order_by(\"fullname\")\n if len(organizations) == 1 and not only_result:\n only_result = organizations[0]\n\n memberships = Membership.objects.filter(\n registration_code__icontains=term\n ).order_by(\"-agreement_start\")\n if len(memberships) == 1 and not only_result:\n only_result = memberships[0]\n\n events = Event.objects.filter(\n Q(slug__icontains=term)\n | Q(host__domain__icontains=term)\n | Q(host__fullname__icontains=term)\n | Q(url__icontains=term)\n | Q(contact__icontains=term)\n | Q(venue__icontains=term)\n | Q(address__icontains=term)\n ).order_by(\"-slug\")\n if len(events) == 1 and not only_result:\n only_result = events[0]\n\n # if user searches for two words, assume they mean a person\n # name\n if len(tokens) == 2:\n name1, name2 = tokens\n complex_q = (\n (Q(personal__icontains=name1) & Q(family__icontains=name2))\n | (Q(personal__icontains=name2) & Q(family__icontains=name1))\n | Q(email__icontains=term)\n | Q(secondary_email__icontains=term)\n | Q(github__icontains=term)\n )\n persons = Person.objects.filter(complex_q)\n else:\n persons = Person.objects.filter(\n Q(personal__icontains=term)\n | Q(family__icontains=term)\n | Q(email__icontains=term)\n | Q(secondary_email__icontains=term)\n | Q(github__icontains=term)\n ).order_by(\"family\")\n\n if len(persons) == 1 and not only_result:\n only_result = persons[0]\n\n airports = Airport.objects.filter(\n Q(iata__icontains=term) | Q(fullname__icontains=term)\n ).order_by(\"iata\")\n if len(airports) == 1 and not only_result:\n only_result = airports[0]\n\n training_requests = TrainingRequest.objects.filter(\n Q(group_name__icontains=term)\n | Q(family__icontains=term)\n | Q(email__icontains=term)\n | Q(github__icontains=term)\n | Q(affiliation__icontains=term)\n | Q(location__icontains=term)\n | Q(user_notes__icontains=term)\n )\n if len(training_requests) == 1 and not only_result:\n only_result = training_requests[0]\n\n comments = Comment.objects.filter(\n Q(comment__icontains=term)\n | Q(user_name__icontains=term)\n | Q(user_email__icontains=term)\n | Q(user__personal__icontains=term)\n | Q(user__family__icontains=term)\n | Q(user__email__icontains=term)\n | Q(user__github__icontains=term)\n ).prefetch_related(\"content_object\")\n if len(comments) == 1 and not only_result:\n only_result = comments[0]\n\n # only 1 record found? Let's move to it immediately\n if only_result and not form.cleaned_data[\"no_redirect\"]:\n msg = format_html(\n \"You were moved to this page, because your search <i>{}</i> \"\n \"yields only this result.\",\n term,\n )\n if isinstance(only_result, Comment):\n messages.success(request, msg)\n return redirect(\n only_result.content_object.get_absolute_url()\n + \"#c{}\".format(only_result.id)\n )\n elif hasattr(only_result, \"get_absolute_url\"):\n messages.success(request, msg)\n return redirect(only_result.get_absolute_url())\n\n else:\n messages.error(request, \"Fix errors below.\")\n\n # if empty GET, we'll create a blank form\n else:\n form = SearchForm()\n\n context = {\n \"title\": \"Search\",\n \"form\": form,\n \"term\": term,\n \"organisations\": organizations,\n \"memberships\": memberships,\n \"events\": events,\n \"persons\": persons,\n \"airports\": airports,\n \"comments\": comments,\n \"training_requests\": training_requests,\n }\n return render(request, \"dashboard/search.html\", context)", "def test_selection_name(self):\n skill = create_skill()\n skill.speak = mock.Mock()\n skill.get_response = mock.Mock()\n\n skill.get_response.return_value = 'octopus'\n\n options = ['a balloon', 'an octopus', 'a piano']\n response = skill.ask_selection(options, 'which is better')\n self.assertEqual(options[1], response)\n\n # Assert that the spoken sentence contains all options.\n spoken_sentence = skill.speak.call_args[0][0]\n for opt in options:\n self.assertTrue(opt in spoken_sentence)", "def ask_for_query():\n print('Enter query, empty to quit:')\n try:\n query = input('? ')\n except EOFError:\n # User has cancelled\n return False\n\n return query", "def lookup_entries(self, selection):\n if selection == \"M\":\n return None\n else:\n clear_screen()\n if selection == \"D\":\n self.search_by_date()\n elif selection == \"T\":\n self.search_by_time()\n elif selection == \"S\":\n self.search_by_string()\n else:\n self.search_by_regex()\n # No matter the path, prompt to hit ENTER for main menu return\n continue_prompt()", "def search(request):\n context = {}\n q = \"\"\n try:\n if request.POST:\n q = request.POST['q']\n else:\n q = request.GET['q']\n except MultiValueDictKeyError:\n pass\n context['query'] = q\n context['search_entry_list'] = watson.search(q)\n return render(request, 'search.html', context)", "def run_it():\n initialize()\n parser = get_parser()\n args = None\n first_parse = True\n while(True):\n if first_parse is True:\n first_parse = False\n args = parser.parse_args()\n \n else:\n # print(textwrap.dedent(\n # '''\\\n # Search again like in the beginning.\n # -- You can either choose best rated or list mode.\n # -- This time, you can insert the search string without double quotes.\n # Remember the list mode options!\n # 0: torrent project.\n # 1: the pirate bay.\n # 2: 1337x.\n # 3: eztv.\n # 4: limetorrents.\n # 5: isohunt.\n # '''))\n sys.exit(0)\n print('Or.. if you want to exit just write \"' +\n Colors.LRED + 'Q' + Colors.ENDC + '\" or \"' +\n Colors.LRED + 'q' + Colors.ENDC + '\".')\n input_parse = input('>> ').replace(\"'\", \"\").replace('\"', '')\n if input_parse in ['Q', 'q']:\n sys.exit(1)\n\n args = parser.parse_args(input_parse.split(' ', 2))\n \n if args.str_search.strip() == \"\":\n print('Please insert an appropiate non-empty string.')\n else:\n args.str_search = args.str_search.replace('_',' ').replace(\"'\",'')\n\n movieName = args.str_search\n #print(args.str_search)\n auto = AutoPy(*insert(args))\n auto.movieName = movieName\n auto.get_content()\n auto.select_torrent()\n auto.download_torrent()", "def search(self, value):\n self.base_selenium.set_text(element='general:search', value=value)\n self.base_selenium.click(element='general:search')\n time.sleep(self.base_selenium.TIME_MEDIUM)\n return self.result_table()", "def choose_query(screening_log_path: str):\n while True:\n # Ask for what the user would like to do\n print(\"1. Basic Screening Log Stats\")\n print(\"2. Get basic stats between two dates\")\n print(\"3. Get basic stats by time of day\")\n choice = input(\"What actions would you like to take, q to quit \")\n\n choices = {'1': get_screening_log_basic_stats,\n '2': get_screening_log_stats_by_date,\n '3': get_screening_log_stats_by_time,\n }\n\n if choice is not None and choice.strip() and choices.get(choice) is not None:\n choices[choice](screening_log_path)\n\n elif choice is not None and choice.strip() and choice.lower() == 'q':\n break\n # Bad Entry\n else:\n print(\"Please enter a valid choice\")", "def search_method_menu(self):\n\n print()\n options = {'1': 'Employee Name', '2': 'Keyword', '3': 'Time Spent',\n '4': 'Date', '5': 'Date Range', '6': 'Exit to main menu'}\n\n while True:\n\n for k, v in options.items():\n print(k + \". \" + v)\n\n user_choice = input('\\nPlease enter the number of choice: ').lower().strip()\n\n if user_choice in options.keys():\n return options.get(user_choice)\n else:\n print('\\nInvalid choice! Please try again.\\n')", "def search():\n query = request.form.get(\"query\", None)\n recipes = mongo.db.recipes.find({\"$text\": {\"$search\": query}})\n return render_template(\"recipes/list.html\", recipes=recipes)", "def search():\n args = request.args.to_dict()\n query = QueryModel(args)\n result = repository.search_text(COLLECTION_NAME, query.value)\n return {\"texts\": result}", "def search_employee():\n while True:\n clear()\n print(dedent(\"\"\"\n What do you want to do? Enter a, b or c.\n a) Input a name to search\n b) See a list of employees\n c) Return to search menu\n \"\"\"))\n choice = input(\"> \")\n if choice == \"a\":\n work_log.search_employee_name()\n elif choice == \"b\":\n work_log.multiple_matches(type='employee')\n elif choice == \"c\":\n break\n else:\n print(\"Please enter a valid choice\")\n time.sleep(3)", "def search():\n student_to_find=request.args.get(\"student\", None)\n print(f\"A buscar: {student_to_find}\")\n student_list=search_student(student_to_find)\n return render_template(\"search.html\",student_list_result=student_list)", "def ask_user_for_relevance(query_results):\n for i, result in enumerate(query_results):\n hdr = 'Result #%d ' % (i+1)\n prompt_text = 'Is result #%d relevant? [y/n] ' % (i+1)\n print '\\n' + hdr + '-'*(70 - len(hdr))\n print result.to_formatted_string()\n print '-'*70\n while True:\n user_in = raw_input(prompt_text).strip().lower()\n if user_in == 'y' or user_in == 'n':\n break\n if user_in == 'y':\n result.is_relevant = True", "def search():\n query = request.form.get(\"query\")\n category = list(mongo.db.tips.find({\"$text\": {\"$search\": query}}))\n return render_template(\"tips.html\", category=category)", "def question_new_search():" ]
[ "0.6945267", "0.6146135", "0.60944784", "0.6077637", "0.6019907", "0.5902816", "0.58127916", "0.58041674", "0.5797001", "0.5792282", "0.5770086", "0.5716065", "0.56671447", "0.5661047", "0.5658922", "0.56546295", "0.56507295", "0.5648148", "0.56405544", "0.56253314", "0.5614291", "0.5591348", "0.55757976", "0.5567116", "0.5538281", "0.5532533", "0.55296844", "0.5517586", "0.5517161", "0.5510592", "0.5510478", "0.548861", "0.54816866", "0.54762626", "0.5465974", "0.54551053", "0.5452351", "0.5448728", "0.54425585", "0.5409637", "0.5403998", "0.5374103", "0.53732866", "0.53726584", "0.53665394", "0.5358237", "0.5346183", "0.5336218", "0.5329516", "0.53254", "0.53228736", "0.5319986", "0.5317494", "0.5315589", "0.5312162", "0.52991843", "0.52916294", "0.5286538", "0.5279614", "0.5270545", "0.52684855", "0.5267037", "0.526283", "0.52543205", "0.5245637", "0.52438426", "0.5238758", "0.52339506", "0.52323014", "0.5231704", "0.5228836", "0.52261", "0.5211979", "0.5201169", "0.5195152", "0.5194561", "0.5192646", "0.51905906", "0.5189161", "0.5165357", "0.5163041", "0.5161281", "0.51612204", "0.51603854", "0.5158266", "0.5157007", "0.5155199", "0.5151014", "0.5149277", "0.5148722", "0.5148376", "0.51430637", "0.514133", "0.51386356", "0.5134745", "0.5130294", "0.5128082", "0.51267713", "0.51252705", "0.51190734" ]
0.67623794
1
Returns string that best matches query out of a list of choices. Prompts user if unsure about best match.
def get_interactive_match(self, choices, query): if query in self.SKIP_KEYWORDS: return None results = process.extract(query, choices, limit=10) # fuzzy string matching best_match = results[0] second_best_match = results[1] if best_match[1] == second_best_match[1] or best_match[1] < 50: # if inconclusive or low score self.print("Couldn't find a conclusive match for '%s'. Best matches:" % (query)) i = 0 for result in results: i += 1 print(" [%i] %s" % (i, result[0])) answer = input("Choose one or specify a less ambiguous query: ") self.clear_lines(2 + len(results)) if answer.isdigit() and int(answer) <= len(results): return results[int(answer) - 1][0] else: return self.get_interactive_match(choices, answer) else: return best_match[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interactive_search(self, choices, query=None):\n if query:\n match = self.get_interactive_match(choices, query)\n if match:\n self.print(\"Matched query to '%s'.\" % (match))\n answer = input(\"Is that correct? (Y/n) \")\n self.clear_lines(1)\n if answer.lower() == 'y' or answer == '':\n self.clear_lines(1)\n return match\n else:\n self.clear_lines(1)\n return self.interactive_search(choices)\n else:\n return None\n else:\n query = input(\"Please type a query: \")\n self.clear_lines(1)\n return self.interactive_search(choices, query)", "async def cmd_choose(self, args: Args, **_):\n response = \"From what you gave me, I believe `{}` is the best choice\".format(\n args[randint(0, len(args) - 1)]\n )\n return response", "def get_choice(choices: list, choice: str):\n if choice == \"1\":\n return 0\n \n if choice == \"2\":\n return 1\n\n choices = list(map(str.lower, choices))\n words = list(map(str.split, choices))\n\n # Go through all words in the given message, and find any words unique to a choice\n for word in choice.lower().split():\n if word in words[0] and word not in words[1]:\n return 0\n elif word in words[1] and word not in words[0]:\n return 1\n\n # Invalid choice\n return None", "async def suggest(self, ctx, choice=None):\n\n if choice is None or choice.lower() in (\"online\", \"voice\"):\n suggestions = get_suggestions(get_users(ctx, choice))\n\n if suggestions:\n await self.bot.say(\"You can play these games: \\n\")\n message = pagify(\"\\n\".join(suggestions), ['\\n'])\n\n for page in message:\n await self.bot.say(box(page))\n else:\n await self.bot.say(\"You have exactly **zero** games in common, go buy a 4-pack!\")\n else:\n await self.bot.say(\"Please enter a valid filter -> either use `online` (default) for all online users or `voice` for all users in a voice channel\")", "def promptUser(choices, choiceStr, question=None, maxToShow=20):\n # Display choices to the user\n print \"\"\n validinput = ['']\n for i in range(len(choices)):\n validinput.append(str(i+1))\n try:\n try: print encode(\" %2s. %s\" % (i+1, choiceStr(choices[i])))\n except: print \" %2s. %s\" % (i+1, choiceStr(choices[i]))\n except:\n pass\n if (i == maxToShow-1): break\n # Get a response from the user\n response = \"<UNANSWERED>\"\n question = question or \" Please select the correct item\"\n question = \"%s (0 for None) [0]: \" % question\n while (response not in validinput):\n response = raw_input(\"\\n%s\" % question)\n if (response not in validinput):\n print \" Invalid input, please choose one of: %s\" % validinput\n # We have a response, return the correct choice\n if (response == ''):\n print \" You selected: None\"\n return None\n selection = choices[int(response)-1]\n print \" You selected: %s\" % choiceStr(selection)\n return selection", "def select_option(options, choice):\n choices = []\n txt = \"\"\n last = len(options) - 1\n for opt in options:\n if options.index(opt) == 0:\n txt += \"'\" + str(opt) + \"'\"\n elif options.index(opt) == last:\n txt += \" and '\" + str(opt) + \"'\"\n else:\n txt += \", '\" + str(opt) + \"'\"\n choices.append({'name': opt})\n\n question = [\n {\n 'type': 'list',\n 'message': 'The similarities between \\'' + choice + '\\' with ' + txt + ' are equal. Choose the one to consider.',\n 'name': 'option',\n 'choices': choices\n }\n ]\n\n answer = prompt(question, style=style)\n return answer.get(\"option\")", "async def choose(self, ctx, *args):\n query = \" \".join(args)\n choices = query.split(\" or \")\n if len(choices) < 2:\n await ctx.send(\"Give me at least 2 options to choose from! (separate options with `or`)\")\n self.logger.warning(misolog.format_log(ctx, f\"1 option\"))\n return\n choice = rd.choice(choices).strip()\n await ctx.send(f\"I choose **{choice}**\")\n self.logger.info(misolog.format_log(ctx, f\"{choice}\"))", "def choose_matching_model_for_style(model_style_name, model_choices):\n model_choices = set(model_choices)\n matching_models = []\n\n # Remove punctuation and capitalize both terms for easier comparison\n model_style_uc = model_style_name.replace(\"&\", \"And\").upper()\n model_style_alphanumeric = not_alphanumeric.sub(\"\", model_style_uc)\n model_choice_original_map = {}\n for model_choice in model_choices:\n model_choice_original_map[not_alphanumeric.sub(\"\", model_choice.upper())] = model_choice\n model_choices_alphanumeric = model_choice_original_map.keys()\n\n # First check if the model_style starts with the name of any of our models\n for model_choice in model_choices_alphanumeric:\n if model_style_alphanumeric.startswith(model_choice):\n matching_models.append(model_choice_original_map[model_choice])\n\n if len(matching_models) == 1:\n return matching_models[0]\n\n # If that fails, look for overlap between a model and the model_style\n for model_choice in model_choices_alphanumeric:\n if model_choice in model_style_alphanumeric:\n matching_models.append(model_choice_original_map[model_choice])\n\n if len(matching_models) == 1:\n return matching_models[0]\n\n if len(matching_models) > 1:\n # If there are multiple matching, choose the largest match first. This mostly seems to work.\n matching_models = sorted(matching_models, key=lambda x: len(x), reverse=True)\n return matching_models[0]\n\n return None", "def _choose_best_option(self):", "async def randomChoice(self, ctx: commands.Context, *choices: str):\n if not choices:\n await ctx.reply(f\"Command failed - no arguments given.\\nEnter a sequence of arguments to choose from (you can use quotes for grouping).\", mention_author=False)\n elif len(choices)==1:\n await ctx.reply(f\"After some extremely randomized choosing from the one singular option that was given to choose from, the surprising result is:\\n{choices[0]}\", mention_author=False)\n else:\n await ctx.reply(f\"Randomly chosen result:\\n{random.choice(choices)}\", mention_author=False)", "def prompt(text, choices):\n text += \" [\" + \"/\".join(choices) + \"] \"\n while True:\n inp = input(text)\n if inp in choices:\n return inp", "def next_choice(self, opponent: 'Player') -> str:\n\n if self.adaptive_ai:\n # this is an adaptive_ai player, so see if it has collected\n # enough stats about the current opponent yet:\n if sum(self.opponent_choices[opponent.name].values()) > 5:\n # has enough samples to start adapting to the opponent\n print(' {} is trying to guess the opponent\\'s choice...'.format(self.name))\n\n # AI algorithm 1:\n # simply find the most-frequent selection by the opponent and\n # choose its killer.\n\n guess = self.opponent_choices[opponent.name].most_common(1)[0][0]\n ai_choice = weapon_to_beat(guess)\n print(' ', opponent.name, 'most often chose', guess, 'so he/she chose', ai_choice)\n return ai_choice\n\n # use the standard tendency distribution to choose a weapon:\n n = randint(1, self.randmax)\n if n <= self.tendency[0]:\n return 'rock'\n elif n <= self.tendency[0] + self.tendency[1]:\n return 'paper'\n else:\n return 'scissors'", "def ask(question, options, default):\n assert default in options\n\n question += \" ({})? \".format(\"/\".join(o.upper() if o == default else o for o in options))\n selected = None\n while selected not in options:\n selected = input(question).strip().lower()\n if selected == \"\":\n selected = default\n else:\n if selected not in options:\n question = \"Please type '{}'{comma} or '{}': \".format(\n \"', '\".join(options[:-1]), options[-1],\n comma=',' if len(options) > 2 else '',\n )\n return selected", "def __choose_best_matching_candidate(candidates, artist):\n\n artist_names = set()\n for match in candidates:\n artist_names.add(match[1])\n\n # If there is more than 1 matched artist:\n if len(artist_names) > 1:\n \n best_distance = 10000\n best_artist = \"\"\n\n # Calculate the levenshtein edit distance between the searched artist name and the artist names in the search results.\n for matched_artist in artist_names:\n distance = editdistance.eval(matched_artist, artist)\n if distance < best_distance:\n best_distance = distance\n best_artist = matched_artist\n\n # Then exclude from candidates all matches that are NOT from the best artist\n candidates = [candidate for candidate in candidates if candidate[1] == best_artist]\n else:\n best_artist = artist_names.pop()\n best_distance = editdistance.eval(best_artist, artist)\n\n # Threshold candidate name to the artist name\n ratio = best_distance/len(artist)\n # Allow ~15% difference\n if ratio > 0.15:\n raise MatchNotFoundError(\"Closest artist is too far of the queried artist\")\n\n # Descending list\n sort_on_num_ratings = sorted(candidates, key=lambda cand: cand[2], reverse=True)\n\n # Take the one with the most votes\n selected = sort_on_num_ratings[0]\n\n # Unless it has a rating lower than 4.\n if selected[3] < 4:\n\n sort_on_rating = sorted(candidates, key=lambda cand: cand[3], reverse=True)\n\n # If there is one with a rating higher than 4, select that one. \n if sort_on_rating[0][3] > 4:\n selected = sort_on_rating[0]\n\n return selected", "def __str__(self):\n return gettext('One of %s') % self._get_choices_str()", "def selection_input(\n self,\n prompt,\n choices,\n default=None,\n error_message=\"Invalid Selection\",\n transform=None\n ):\n while True:\n result = self.text_input(prompt, default)\n\n if transform is not None and result is not None:\n result = transform(result)\n\n if result in choices:\n return result\n\n print()\n print(error_message)", "def QueryStrGeneral(cls, queryString: str, errorPrompt: str, conditionList: list) -> str:\n\n global userInput\n\n try:\n userInput = input(queryString).upper()\n\n # Check if userInput points to either of the options, and recursively call\n # the function until userInput has an actionable value.\n if userInput not in conditionList:\n raise ValueError\n\n except ValueError:\n # Reprompt user for valid entry.\n print(errorPrompt)\n cls.QueryStrGeneral(queryString, errorPrompt, conditionList)\n\n except Exception:\n print(\"\\nOops something is buggy\")\n\n return userInput", "def _choice_str(choices, max):\n return ''.join(map(str, [choice(choices) for _ in range(max)]))", "def get_choice(choices, conversation):\n user_choice = None\n while user_choice is None:\n try:\n user_input = input(\"\\nChoice: \")\n if user_input == 'debug' or user_input == 'd':\n print(\"\\n\", conversation.getDebugInfo())\n else:\n user_choice = int(user_input)\n if choices.__len__() < user_choice or user_choice < 1:\n print('Error, try again')\n user_choice = None\n else:\n print(\"\\nYou: \" + choices[user_choice - 1])\n except ValueError:\n print('Error, try again')\n return user_choice", "def find_best_candidate(s_array):\n best_string = ''\n max_val = 0\n for s in s_array:\n score = compare(s)\n if score > max_val:\n max_val = score\n best_string = s\n return best_string", "def select_query(\n items: Sequence,\n max_display: int = 10,\n fallback: Callable[[], T] = None,\n item_formatter: Callable[[T], str] = str,\n header: str = \"Available options:\",\n footer: str = \"Please enter the number of the option to use.\",\n) -> T:\n\n # Truncate if needed\n print(header)\n if max_display is not None and len(items) > max_display:\n items = items[:max_display]\n print(f\"(showing the latest {max_display})\")\n\n # Display list\n for i, exp in enumerate(items):\n print(\" \", i, \": \", item_formatter(exp))\n\n print(footer)\n\n # Repeat query on errors\n while True:\n sel = input()\n\n # Check if sel is a number, if so use it.\n if sel == \"\":\n # first item is default\n return items[0]\n elif sel.isdigit():\n # Parse index\n sel_idx = int(sel)\n if sel_idx < len(items):\n return items[sel_idx]\n # Error\n print(\"Please enter a number between 0 and \", len(items) - 1, \".\")\n elif fallback is not None:\n # Use fallback if any\n fres = fallback(sel)\n if fres is not None:\n return fres\n # The fallback should report it's own errors\n else:\n print(\"Please enter a number.\")", "async def choose(self, ctx, *, choices: str):\n await ctx.send(\n self.bot.bot_prefix + 'I choose: ``{}``'.format(random.choice(choices.split(\"|\"))))", "def choose_option():\n print(\"1. title of most played game\"\n \"\\n2. how many copies have been sold in total\"\n \"\\n3. average selling\"\n \"\\n4. how many characters long is the longest title\"\n \"\\n5. average of the release dates\"\n \"\\n6. properties of the game\"\n \"\\n7. how many games are grouped by genre\"\n \"\\n8. ordered titles of games by date and alphabet\"\n \"\\n9. Exit\")\n\n option = input(\"\\nDisplay: \")\n return option", "def get_best_match(self, list):\n raise NotImplementedError", "def pull_suggestion(self, callback, who, arg):\n\t\t\n random_sug = self.dong.db.get_random_row('suggest')\n res = self.google_suggest(callback, who, random_sug[2], False)\n\t\t\n w = res.split()\n if w[0].lower() in ('what', 'why', 'was', 'where', 'who', 'which', 'whom', 'when', 'how', 'is', 'are', 'did'):\n if w[-1:] != '?':\n res = res + '?'\n return res.capitalize()", "def question_with_suggested_answers(text, default, suggest):\n\n reply = question(text, default)\n while reply not in suggest:\n report(_(\"\"\"The value you have chosen is not among the suggested values.\nYou have chosen '%s'.\"\"\" % reply))\n report(_(\"The suggested values are \" + str(suggest)))\n correct = question(_(\"Do you want to correct your answer?\"), True)\n if correct:\n reply = question(text, default)\n else:\n return reply\n return reply", "def search(search, candidates):\n choicer = choices.Choice()\n for candidate in candidates:\n choicer.add(candidate)\n return choicer.search(search)", "def pick_place(choices_arg, question='Where to next?',inv=True):\r\n \r\n choices_alt = []\r\n \r\n if isinstance(choices_arg,list):\r\n choices = list(choices_arg)\r\n if inv:\r\n choices += ['inventory','map']\r\n \r\n elif isinstance(choices_arg,tuple):\r\n choices = choices_arg[0]\r\n choices_alt = choices_arg[1]\r\n if inv:\r\n choices += ['inventory','map']\r\n choices_alt += ['inventory','map']\r\n\r\n staying = True\r\n \r\n while staying:\r\n\r\n print question + '\\n'\r\n\r\n if choices_alt:\r\n for index in range(len(choices_alt)): #print alternate choices in menu form\r\n if str(choices[index]) == 'inventory':\r\n print\r\n print(str(index+1) + ': ' + str(choices_alt[index]))\r\n\r\n else:\r\n for index in range(len(choices)): #print choices in menu form\r\n if str(choices[index]) == 'inventory':\r\n print\r\n print(str(index+1) + ': ' + str(choices[index]))\r\n\r\n print('') #get some blank line in here yo\r\n chosen = raw_input('').lower()\r\n \r\n try:\r\n final = ''\r\n for index in range(len(choices)): #check if they typed a number\r\n item = choices[index]\r\n if index == int(chosen)-1:\r\n final = item\r\n staying = False\r\n if final == '':\r\n print 'Nice Try.\\n' #if they type a number not in range\r\n question = 'Try again, foo.'\r\n except:\r\n final = ''\r\n if choices_alt:\r\n for index in range(len(choices_alt)): #check if they typed letters\r\n item = choices_alt[index]\r\n if chosen == str(item).lower():\r\n final = choices[index]\r\n staying = False\r\n\r\n else:\r\n for index in range(len(choices)): #check if they typed letters\r\n item = choices[index]\r\n if chosen == str(item).lower():\r\n final = item\r\n staying = False\r\n if final == '':\r\n print 'Nice Try.\\n' #if they misspelled\r\n question = 'Try again, foo.'\r\n\r\n if final == 'map':\r\n inspect_map()\r\n question = 'Where to?'\r\n staying = True\r\n if final == 'inventory':\r\n inspect_inventory()\r\n question = 'Where to?'\r\n staying = True\r\n\r\n return final", "def report_matches(unknown, reference_langs, args):\n matches = language_match.best_matches(unknown, reference_langs, args.n_gram_max, args.matches)\n print(\"Best match{} for\".format(\"es\" if args.matches != 1 else \"\"), repr(unknown))\n pad = max([len(name) for (name, score) in matches])\n for (name, score) in matches:\n print(\"\\t\", name.ljust(pad), \"\\t{:>6.2%}\".format(score))", "def select_best_match(self, normalized_texts: List[str], transcript: str, verbose: bool = False):\n normalized_texts = calculate_cer(normalized_texts, transcript)\n normalized_texts = sorted(normalized_texts, key=lambda x: x[1])\n normalized_text, cer = normalized_texts[0]\n\n if verbose:\n print('-' * 30)\n for option in normalized_texts:\n print(option)\n print('-' * 30)\n return normalized_text, cer", "def get_choice(list_of_games, num_games, num_pages=None, current_page=None):\n\tif current_page == 0:\n\t\ttext = Fore.WHITE + 'Options: Display (' + Fore.GREEN + 'N' + Fore.WHITE + ')ext page, (' + Fore.MAGENTA + \\\n\t\t 'C' + Fore.WHITE + ')urrent page, (' + Fore.RED + 'Q' + Fore.WHITE + ')uit or enter the ' + Fore.CYAN + \\\n\t\t 'Number' + Fore.WHITE + ' of the game to play'\n\telse:\n\t\ttext = Fore.WHITE + 'Options: Display (' + Fore.BLUE + 'P' + Fore.WHITE + ')revious page, (' + Fore.GREEN + \\\n\t\t 'N' + Fore.WHITE + ')ext page, (' + Fore.MAGENTA + 'C' + Fore.WHITE + ')urrent page, (' + \\\n\t\t Fore.RED + 'Q' + Fore.WHITE + ')uit or enter the ' + Fore.CYAN + 'Number' + Fore.WHITE + ' of the game to play'\n\n\tprint '\\n' + text\n\tindex = raw_input(Fore.WHITE + Style.BRIGHT + 'What would you like to do?: ').lower()\n\twhile index != 'p' or index != 'n' or index != 'd' or index.isdigit():\n\t\tif index == 'c':\n\t\t\tos.system('clear')\n\t\t\tif num_pages:\n\t\t\t\tlist_columns(list_of_games)\n\t\t\t\tprint '\\nDisplaying page {} of {}'.format(current_page, num_pages)\n\t\t\telse:\n\t\t\t\tlist_columns(list_of_games)\n\t\t\tprint text\n\t\telif index == 'p':\n\t\t\tbreak\n\t\telif index == 'n':\n\t\t\tbreak\n\t\telif index == 'q':\n\t\t\tsys.exit()\n\t\telif index.isdigit():\n\t\t\tif 0 < int(index) < num_games:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint Fore.RED + '\\nSorry that is not a valid choice!'\n\t\t\tprint text\n\t\tindex = raw_input(Fore.WHITE + Style.BRIGHT + 'What would you like to do?: ')\n\n\treturn index", "def main():\n long = give_long()\n short = give_short()\n similarity1 = find_similarity(long, short)\n print('The best match is '+similarity1+'.')", "def attack_choice(self, user_choice):\n\n if 'A' in user_choice:\n return 'What is the name of your item?'\n\n elif 'B' in user_choice:\n # IDEA: Should there be limit on flee?\n if randint(1, 4) == 3:\n return False\n else:\n return \"Well looks like your escape attempt failed.\"\n else:\n return \"Please choose either 'A' or 'B'\"", "def _get_winner(computer_choice, player_choice):\n if player_choice not in choices:\n return 'Invalid choice'\n if computer_choice == player_choice:\n return tie\n if player_choice == defeated_by[computer_choice]:\n return win.format(player_choice, computer_choice)\n else:\n return lose.format(computer_choice,player_choice)", "async def poll(self, ctx, choice=None):\n\n if choice is None or choice.lower() in (\"online\", \"voice\"):\n suggestions = get_suggestions(get_users(ctx, choice))\n\n if suggestions:\n poll_id = create_strawpoll(\"What to play?\", suggestions)\n\n if poll_id:\n await self.bot.say(\"Here's your strawpoll link: https://www.strawpoll.me/{}\".format(poll_id))\n else:\n await self.bot.say(\"Phew! You have way too many games to create a poll. You should try `{}game suggest` instead.\".format(ctx.prefix))\n else:\n await self.bot.say(\"You have exactly **zero** games in common, go buy a 4-pack!\")\n else:\n await self.bot.say(\"Please enter a valid filter -> either use `online` (default) for all online users or `voice` for all users in a voice channel\")", "def choose_display():\n\n choices = [\"d\",\"theta\",\"both\"]\n\n temp_choice = \"false\"\n\n while temp_choice not in choices:\n temp_choice = input(\"Please choose the scale to display.\\nd, theta, both\\n\")\n if temp_choice not in choices:\n print(\"incorrect choice\\n\")\n\n return temp_choice", "def find_best_match(fpl_teams: List[str], team: str) -> Tuple[str, int]:\n best_ratio = 0.0\n best_match = None\n for t in fpl_teams:\n if fuzz.partial_ratio(t, team) > best_ratio:\n best_ratio = fuzz.partial_ratio(t, team)\n best_match = t\n print(f\"Best match {best_match}/{team}, score {best_ratio}\")\n return best_match, best_ratio", "def generate_answer(self):\n\n for model in Response.get_all_models():\n match = model.matches(self.request.question, self.request.element)\n if match: return model.solve(self.request.question, self.request.element)\n\n return \"I am unable to answer this question. If you think I should be able to answer\\n\" + \\\n \"this, please submit an issue or pull request at:\\n\" + \\\n \"https://github.com/jackcook/the-scientist/compare\"", "async def choose(self, ctx):\r\n if len(str(ctx.message.content)) < 9:\r\n await self.bot.say('{}, the usage is **!choose Option 1; Option 2; Option 3**, until you run out of options.'.format(ctx.message.author.mention))\r\n else:\r\n choices = str(ctx.message.content[8:])\r\n if '; ' not in choices:\r\n await self.bot.say('{}, the usage is **!choose Option 1; Option 2; Option 3**, ntil you run out of options.'.format(ctx.message.author.mention))\r\n else:\r\n options = choices.split('; ')\r\n await self.bot.say('{}, I choose: **{}**.'.format(ctx.message.author.mention,random.choice(options)))", "def get_fuzzy_match(object, answer, threshold=80):\n answer_phrase = generate_ngrams(answer)\n if answer_phrase:\n best_match = [fuzz.ratio(object, phr) for phr in answer_phrase]\n if np.max(best_match)>threshold:\n return np.max(best_match), answer_phrase[np.argmax(best_match)]\n else:\n return 0,''\n else:\n return 0, ''", "def suggest(suggestions):\n weight_sum = sum(suggestions.values())\n prob_ranges = []\n lower_bound = 0.0\n\n # generate probability ranges\n for task, weight in suggestions.iteritems():\n upper_bound = lower_bound + weight / weight_sum\n prob_ranges.append((task, (lower_bound, upper_bound)))\n\n # update lower bound\n lower_bound = upper_bound\n\n rand_number = random.random()\n\n for task, (low, high) in prob_ranges:\n if low <= rand_number < high:\n return task\n\n raise AssertionError('Should not be here. O_O');", "def _choice_a_filter(self, possibles):\n print(\"Avaliable Filters:\")\n for i, filter in enumerate(possibles):\n print(f\"{i+1} - {filter.name}\")\n\n option = 0\n while option <= 0 or option > len(possibles):\n try:\n option = int(input(\"\\nType the selected filter number: \"))\n except ValueError:\n continue\n\n return possibles[option - 1]", "def choose(multichoice, question, correct):\n counter = 1\n ncorrect = 0\n allowed = '12345'\n print(\"choose a synonym for \"+question)\n for option in multichoice.values():\n print(str(counter)+\")\"+option)\n if option == correct:\n ncorrect = counter\n counter = counter + 1\n res = raw_input(\">\")\n while (len(res) != 1 or res not in allowed):\n \tres = raw_input(\">\")\n #return res\n if int(res) == ncorrect:\n print(\"CORRECT!\")\n return 1\n else:\n print(\"\\n >>>>>> The answer is actually -- \" + correct)\n\tprint \n return 0", "def choose(multichoice, question, correct):\n counter = 1\n ncorrect = 0\n allowed = '12345'\n print(\"choose a synonym for \"+question)\n for option in multichoice.values():\n print(str(counter)+\")\"+option)\n if option == correct:\n ncorrect = counter\n counter = counter + 1\n res = raw_input(\">\")\n while (len(res) != 1 or res not in allowed):\n \tres = raw_input(\">\")\n #return res\n if int(res) == ncorrect:\n print(\"CORRECT!\")\n return 1\n else:\n print(\"\\n >>>>>> The answer is actually -- \" + correct)\n\tprint \n return 0", "def choose(multichoice, question, correct):\n counter = 1\n ncorrect = 0\n allowed = '12345'\n print(\"choose a synonym for \"+question)\n for option in multichoice.values():\n print(str(counter)+\")\"+option)\n if option == correct:\n ncorrect = counter\n counter = counter + 1\n res = raw_input(\">\")\n while (len(res) != 1 or res not in allowed):\n \tres = raw_input(\">\")\n #return res\n if int(res) == ncorrect:\n print(\"CORRECT!\")\n return 1\n else:\n print(\"\\n >>>>>> The answer is actually -- \" + correct)\n\tprint \n return 0", "def choose(multichoice, question, correct):\n counter = 1\n ncorrect = 0\n allowed = '12345'\n print(\"choose a synonym for \"+question)\n for option in multichoice.values():\n print(str(counter)+\")\"+option)\n if option == correct:\n ncorrect = counter\n counter = counter + 1\n res = raw_input(\">\")\n while (len(res) != 1 or res not in allowed):\n \tres = raw_input(\">\")\n #return res\n if int(res) == ncorrect:\n print(\"CORRECT!\")\n return 1\n else:\n print(\"\\n >>>>>> The answer is actually -- \" + correct)\n\tprint \n return 0", "def get_user_choice(options: list, prompt: str) -> str:\n choice = False\n while not validate_user_choice(choice, options):\n print(prompt)\n for num, option in enumerate(options, 1):\n print(f\"{num} - {option}\")\n choice = input('\\nPlease enter the number corresponding to your choice here: ')\n return choice", "def ask_matches(self, value, options):\n # Fast track dominant matches\n if len(options) == 1 or options[1][1] - options[0][1] > 0.5:\n match = options[0][0]\n if click.confirm(f'Confirm as {match}?', default=True):\n return [match]\n\n # Print menu\n for i, (m, pc) in enumerate(options, start=1):\n pc = int(100 * (1 - pc))\n click.echo(f' {i}) {m} ({pc}%)')\n click.echo(f'\\n {i + 1}) Add new')\n click.echo(f' {i + 2}) Ignore')\n\n # Process value\n while True:\n try:\n opts = self.ask_options('Choose option', len(options), -1)\n except ValueError:\n click.echo('Invalid option!')\n continue\n\n if opts == 'add-new':\n self.config.add_student(value)\n return [value]\n\n elif opts == 'ignore':\n return []\n else:\n return opts", "def __ask_query(self):\n self.__output = list()\n return input(form('What do you want to search?\\n> '))", "def choose(inp):\n if not inp.text:\n return lex.input.missing\n options = [i.strip() for i in inp.text.split(',') if i.strip()]\n if not options:\n return lex.input.incorrect\n return random.choice(options)", "def choose_from_list(query_category, query_list):\n print('Choose the {cat} you want from the below list:'.format(\n cat=query_category))\n for counter, value in enumerate(query_list):\n print('{counter}: {value}'.format(counter=counter, value=value))\n selection = input('Choice: ')\n return query_list[int(selection)]", "def choice(prompt, choices=('y', 'n')):\n\n while True:\n choice = input(prompt).lower().strip()\n # terminate if end is pressed\n if choice == 'end':\n raise SystemExit\n # triggers if you enter only one name\n elif ',' not in choice:\n if choice in choices:\n break\n # triggers if you enter more than one name\n elif ',' in choice:\n choice = [i.strip().lower() for i in choice.split(',')]\n if list(filter(lambda x: x in choices, choice)) == choice:\n break\n\n prompt = (\"\\nPlease verify the format and be sure to enter a valid option:\\n>\")\n\n return choice", "def print_selection(self):\n #get the index of the selected item\n value = self.my_listbox.curselection()[0]\n\n #retrieve the corresponding value from dictionary using index\n s = self.my_listbox.get(value)\n\n #differentiate response based on score:\n #if score is around 2 ~ 3:\n if float(sorted_dict[s]) >= 2.853 and float(sorted_dict[s]) <= 3.836:\n result = \"\"\"Unfortunately, based on world happiness report(2019), \\nyour country scored very low.\\n\"\"\"\n prediction = \"\\nYour have a minimum chance at being happy.\"\n \n #if score is around 3 ~ 4:\n if float(sorted_dict[s]) > 3.835 and float(sorted_dict[s]) <= 4.819:\n result = \"\"\"Unfortunately, based on world happiness report(2019), \\nyour country scored relatively low. \\n\"\"\"\n prediction = \"\\nYou have a scarce chance at being happy.\"\n \n #if score is around 4 ~ 5:\n if float(sorted_dict[s]) > 4.819 and float(sorted_dict[s]) <= 5.802:\n result = \"\"\"Congratulation! Based on world happiness report(2019), \\nyour country scored in the middle.\\n\"\"\"\n prediction = \"\\nYou have a chance at being happy.\"\n \n #if score is around 5 ~ 6:\n if float(sorted_dict[s]) > 5.802 and float(sorted_dict[s]) <= 6.785:\n result = \"\"\"Congratulation! Based on world happiness report(2019), \\nyour country scored relatively high.\\n\"\"\"\n prediction = \"\\nYou have many chances at being happy.\"\n \n #if score is around 6 ~ 7:\n if float(sorted_dict[s]) > 6.785 and float(sorted_dict[s]) <= 7.769:\n result = \"\"\"Congratulation! Based on world happiness report(2019), \\nyour country scored very high.\\n\"\"\"\n prediction = \"\\nYou have an abundance of chances at being happy.\"\n\n #finalize response\n response = result + prediction+ \"\\n\" + s + \"\"\"'s score: \"\"\" + str(sorted_dict[s])\n\n self.var1.set(response)", "def multiple_choice(correct_choice, all_choices):\r\n # format for character is {'あ': 'ah'}\r\n # format for character is {'japanese character': 'english sound'}\r\n\r\n # get 3 different characters from all_choices, randomly\r\n # add all 3 'values', of the k:v pair, to the choices\r\n # if the input from the user != the 'key' of the correct character then it is wrong\r\n # if wrong, try again.\r", "def choice(prompt, choices=('y', 'n')):\n while True:\n choice = input(prompt).lower().strip()\n if choice == '0':\n raise SystemExit\n elif ',' not in choice:\n if choice in choices:\n break\n elif ',' in choice:\n choice = [i.strip().lower() for i in choice.split(',')]\n if list(filter(lambda x: x in choices, choice)) == choice:\n break\n prompt = (\"\\n!!!OOOPS !!!Please be sure to enter a valid option!!! /n\")\n return choice", "def select_best_match(\n self, normalized_texts: List[str], transcript: str, verbose: bool = False, remove_punct: bool = False\n ):\n normalized_texts = calculate_cer(normalized_texts, transcript, remove_punct)\n normalized_texts = sorted(normalized_texts, key=lambda x: x[1])\n normalized_text, cer = normalized_texts[0]\n\n if verbose:\n print('-' * 30)\n for option in normalized_texts:\n print(option)\n print('-' * 30)\n return normalized_text, cer", "def comp_choose_word(hand, word_list):\n maxscore = 0\n maxword = \"\" \n for n in range(calculate_handlen(hand)):\n perms = get_perms(hand, n)\n for word in perms:\n wordscore = get_word_score(word, HAND_SIZE)\n if wordscore > maxscore:\n if word not in word_list:\n continue\n else:\n maxscore = wordscore\n maxword = word\n return maxword\n # TO DO...", "def select(self, options, prompt='Your choice? '):\n local_opts = options\n if isinstance(options, string_types):\n local_opts = list(zip(options.split(), options.split()))\n fulloptions = []\n for opt in local_opts:\n if isinstance(opt, string_types):\n fulloptions.append((opt, opt))\n else:\n try:\n fulloptions.append((opt[0], opt[1]))\n except IndexError:\n fulloptions.append((opt[0], opt[0]))\n for (idx, (value, text)) in enumerate(fulloptions):\n self.poutput(' %2d. %s\\n' % (idx + 1, text))\n while True:\n response = sm.input(prompt)\n try:\n response = int(response)\n result = fulloptions[response - 1][0]\n break\n except (ValueError, IndexError):\n self.stdout.write(\"{!r} isn't a valid choice. Pick a number \"\n \"between 1 and {}:\\n\".format(\n response, len(fulloptions)))\n return result", "def input_with_validation(text, choices):\n choice_vals = set(map(str, choices))\n while True:\n val = input(f\"{text} | choices={choices}: \")\n if val in choice_vals:\n return val\n else:\n print(f\"{val} is not a valid value. Please choose from: {choices}\")", "def select(self, opts, prompt='Your choice? '):\n local_opts = opts\n if isinstance(opts, string_types):\n local_opts = list(zip(opts.split(), opts.split()))\n fulloptions = []\n for opt in local_opts:\n if isinstance(opt, string_types):\n fulloptions.append((opt, opt))\n else:\n try:\n fulloptions.append((opt[0], opt[1]))\n except IndexError:\n fulloptions.append((opt[0], opt[0]))\n for (idx, (value, text)) in enumerate(fulloptions):\n self.poutput(' %2d. %s\\n' % (idx + 1, text))\n while True:\n response = sm.input(prompt)\n try:\n response = int(response)\n result = fulloptions[response - 1][0]\n break\n except (ValueError, IndexError):\n self.poutput(\"{!r} isn't a valid choice. Pick a number between 1 and {}:\\n\".format(response,\n len(fulloptions)))\n return result", "def print_options(val, cur_matches):\n print val\n\n #skip one to print none at end\n for i,v in enumerate(cur_matches[1:]):\n print \"[%i] %s : %s \"%(i+1, v[0], v[1])\n print \"[%i] %s : %s \" % (0, cur_matches[0][0], cur_matches[0][1])\n\n print \n print 'Choice?'", "def __ui_choose_search_criteria_for_persons(self):\n print(\"By which criteria do you want to search persons?\\n\"\n \" 1. By name\\n\"\n \" 2. By phone number\\n\")\n user_choice = input(\"Type your option: \").strip()\n if user_choice == \"1\":\n self.__ui_search_persons_by_name()\n elif user_choice == \"2\":\n self.__ui_search_persons_by_phone_number()\n else:\n print(\"Invalid option!\\n\")\n return", "async def choose(*choices : str):\n await bot.say(random.choice(choices))", "def __ui_choose_search_criteria_for_activities(self):\n print(\"By which criteria do you want to search activities?\\n\"\n \" 1. By date\\n\"\n \" 2. By description\\n\")\n user_choice = input(\"Type your option: \").strip()\n if user_choice == \"1\":\n self.__ui_search_activities_by_date()\n elif user_choice == \"2\":\n self.__ui_search_activities_by_description()\n else:\n print(\"Invalid option!\\n\")\n return", "def choice(\n\t\toptions: Union[List[str], Mapping[str, str]],\n\t\ttext: str = '',\n\t\tdefault: Optional[str] = None,\n\t\tprompt_suffix: str = \": \",\n\t\tshow_default: bool = True,\n\t\terr: bool = False,\n\t\tstart_index: int = 0\n\t\t) -> Union[str, int]:\n\n\t# TODO: completer for numbers?\n\n\ttype_: click.ParamType\n\n\tif isinstance(options, Mapping):\n\t\t# (Y/I/N/O/D/Z) [default=N]\n\n\t\ttext = f\"{text} ({'/'.join(options.keys())})\"\n\t\ttype_ = click.STRING\n\n\t\tfor choice, descripton in options.items():\n\t\t\tclick.echo(f\" {choice} : {descripton}\")\n\n\telse:\n\t\ttype_ = click.IntRange(start_index, len(options) + 1 - start_index)\n\n\t\tfor idx, descripton in enumerate(options):\n\t\t\tidx += start_index\n\t\t\tclick.echo(f\" [{idx}] {descripton}\")\n\n\tif default is not None and show_default:\n\t\ttext += f\" [default={default}]\"\n\n\twhile True:\n\t\tselection = prompt(\n\t\t\t\ttext=text,\n\t\t\t\tdefault=default,\n\t\t\t\ttype=type_,\n\t\t\t\tprompt_suffix=prompt_suffix,\n\t\t\t\tshow_default=False,\n\t\t\t\terr=err,\n\t\t\t\t)\n\t\tif isinstance(options, Mapping):\n\t\t\tselection = selection.strip().upper()\n\t\t\tif selection not in options:\n\t\t\t\tclick.echo(f\"Please enter a valid option.\")\n\t\t\telse:\n\t\t\t\treturn selection\n\t\telse:\n\t\t\treturn selection - start_index", "def _multiple_choice_prompt(question: str,\n options: List[str],\n console: io.IO,\n default: Optional[int] = None) -> Optional[int]:\n assert '{}' in question\n assert len(options) > 0\n\n options_formatted = [\n '{}. {}'.format(str(i), opt) for i, opt in enumerate(options, 1)\n ]\n options = '\\n'.join(options_formatted)\n\n while True:\n answer = console.ask(question.format(options))\n\n if not answer and default:\n return default\n\n try:\n _multiple_choice_validate(answer, len(options))\n break\n except ValueError as e:\n console.error(e)\n\n return int(answer) - 1", "def test_pick_best_sentences(self): \n input_sentences = (\n \"first sentence\",\n \"second sentence\",\n \"third sentence\",\n \"fourth sentence\"\n )\n\n input_ratings = [0.01, 0.015, 0.02, 0.005]\n\n input_length = 2\n\n expected = [\"second sentence\", \"third sentence\"]\n\n result = self.summarizer.pick_best_sentences(input_sentences, input_ratings, input_length)\n self.assertListEqual(expected, result)", "def select_game_difficulty():\n prompt = \"Please select a game difficulty by typing it in!\\n\"\n prompt += \"Possible choices include easy, medium and hard.\\n\"\n equivalents_difficulty = {x: \"easy\" for x in (\"easy\", \"e\", \"1\", \"1.\")}\n equivalents_difficulty.update(\n {y: \"medium\" for y in (\"medium\", \"m\", \"2\", \"2.\")}\n )\n equivalents_difficulty.update(\n {z: \"hard\" for z in (\"hard\", \"h\", \"3\", \"3.\")}\n )\n chosen_difficulty = input(prompt).lower()\n while chosen_difficulty not in equivalents_difficulty:\n print(\"That's not an option!\")\n chosen_difficulty = input(prompt).lower()\n print(\n \"You've chosen \" +\n str(equivalents_difficulty[chosen_difficulty]) +\n \"!\\n\"\n )\n return equivalents_difficulty[chosen_difficulty]", "def new_extract_one(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0):\n best_list = new_extract_without_order(\n query, choices, processor, scorer, score_cutoff)\n try:\n return max(best_list, key=lambda i: i[1])\n except ValueError:\n return None", "def get_most_probable_sentence(\n self,\n suggestions: List[List[str]]\n ) -> str:\n sent_word_count = len(suggestions)\n suggestions = [[tok] for tok in ContextModel.START_TOKENS] + suggestions + \\\n [[tok] for tok in ContextModel.END_TOKENS]\n memory = [[MemoryItem(score=0.0, decoded=tuple())], [MemoryItem(score=0.0, decoded=tuple())]]\n for t in range(2, len(suggestions)):\n memory.append([])\n for i, word in enumerate(suggestions[t]):\n mx_score, pick_1, pick_2 = 0, 0, 0\n for j, suggestion_1 in enumerate(suggestions[t - 1]):\n for k, suggestion_2 in enumerate(suggestions[t - 2]):\n curr_score = memory[-3][k].score \\\n + self.model_dict.get((suggestion_2, suggestion_1), self.default_prob) \\\n + self.model_dict.get((suggestion_1, word), self.default_prob) \\\n + self.model_dict.get((suggestion_2, word), self.default_prob)\n if curr_score > mx_score:\n mx_score, pick_1, pick_2 = curr_score, j, k\n memory_item = MemoryItem(score=mx_score, decoded=memory[-3][pick_2].decoded + (pick_2, pick_1,))\n memory[-1].append(memory_item)\n memory = memory[1:]\n\n decoded = ' '.join([suggestions[t][i] for t, i in enumerate(memory[-1][0].decoded[-sent_word_count:],\n start=2)])\n # score = memory[-1][0].score\n return decoded", "def suggest(ctx, request: str):\n replacer = Replacer(ctx.obj.get('GKG_API_KEY'))\n suggestion = replacer.suggest(request)\n if suggestion == request:\n logger.info(\n 'Result from Google Knowledge Graph equals input: \"{0}\"', request,\n )\n elif suggestion:\n logger.info('Result from Google Knowledge Graph: \"{0}\"', suggestion)\n else:\n logger.info(\n 'No results in the Google Knowledge Graph for: \"{0}\"', request,\n )", "def main():\n long_sequence = input(\"Please give ne a DNA sequence to search: \")\n short_sequence = input(\"What DNA sequence would you like to match? \")\n\n # converts characters to uppercase\n new_long_sequence = long_sequence.upper()\n new_short_sequence = short_sequence.upper()\n\n ans = homology(new_long_sequence, new_short_sequence)\n print(\"The best match is \" + ans)", "def compute_best_guess(self) -> str:\n entropy_all = self.compute_entropy_all()\n return entropy_all.idxmax()", "def get_choice():\n response = raw_input().rstrip(\"\\n\")\n\n if response == 'exit':\n #this doesn't work\n raise SystemExit()\n\n if not response.isdigit():\n get_choice()\n\n if not 0 <= int(response) < MATCH_LIMIT+2:\n get_choice()\n\n return int(response)", "def comp_choose_word(hand, word_list):\n perms_list = []\n for i in range(1, HAND_SIZE+1):\n perms_list.extend(get_perms(hand, i))\n perms_valid = []\n for j in range(0, len(perms_list)):\n word = perms_list[j]\n if is_valid_word(word, hand, word_list):\n perms_valid.append(word)\n\n \n\n\n\n\n\n valid_scores = []\n for k in range(0, len(perms_valid)):\n valid_scores.append(get_word_score(perms_valid[k], HAND_SIZE))\n valid_words = perms_valid[k]\n\n # Compare first two scores and put highest score and corresponding word in seprate variables\n # respectively.\n max_score = 0\n max_word = ''\n for j in range(0, len(valid_scores)):\n if max_score < valid_scores[j]:\n max_score = valid_scores[j]\n max_word = perms_valid[j]\n return max_word", "def unique_selection(prompt_text, option_list):\n while True:\n selection = clean_input(prompt_text)\n\n # Get list of options that match input string from the start.\n matched = match_start_string(option_list, selection)\n if(len(matched) == 0):\n print('\\nThere was no match for \"{}\". Please try again.'\n ' Here are the options: '.format(selection))\n [print('\\t{}'.format(option)) for option in option_list]\n else:\n if(len(matched) > 1):\n print('\\nThere was more than one match to \"{}\":'\n .format(selection))\n [print('\\t{}'.format(match)) for match in matched]\n print('Please be more specific or \"quit\".')\n else: # One clear option has been selected.\n break\n # Now there is only one item in the list, return it as a string.\n return str(matched).strip(\"'[]\")", "def question():\n print('Enter 1 to search database by habitat with detailed information\\nEnter 2 to search database by coordinates \\nEnter 3 to search by habitat in csv file for a quick overview without detail')\n print('habitat search options so far:\\n Alpenvorland, Niederrheinisches Tiefland, Oberrheinisches Tiefland')\n src = int(input('Enter here:'))\n\n if src == 1:\n habitat = input('Enter name of habitat\\n')\n query = \"habitat = '\" + habitat + \"'\"\n search_db_via_query(query)\n elif src == 2:\n search_by_coordinates()\n elif src == 3:\n search_by_habitat()\n else:\n print('no data')", "def most_probable_match(string, possible_strings, n_matches=1):\n if not _is_def_lcs:\n raise ImportError(\"`pylcs` not loaded try running 'pip install pylcs'\")\n def return_match(arg_most_prob, probable_match, i_match=1):\n print(\n f\"\\tMatch {i_match} of {n_matches} : \"\n + f\"'{possible_strings[arg_most_prob]}' == '{string}' \"\n + f\"with a match score of {probable_match[arg_most_prob]}/{4*len(string)}.\"\n )\n return possible_strings[arg_most_prob]\n\n possible_strings_l = [s.lower() for s in possible_strings]\n string_l = string.lower()\n lcsubsequence_list_lower = pylcs.lcs_of_list(string_l, possible_strings_l)\n probable_match = (np.add(np.add(\n pylcs.lcs_of_list(string, possible_strings), # longest subsequence \n pylcs.lcs2_of_list(string, possible_strings) # longest substring\n ), np.add(\n lcsubsequence_list_lower, # longest subsequence case insensitive\n pylcs.lcs2_of_list(string_l, possible_strings_l) # longest substring case insensitive\n )).astype(np.int) - (\n np.array([2*(len(t) - m) for t, m in zip(possible_strings_l, lcsubsequence_list_lower)]) # Penalise extra characters in the match\n ))\n if n_matches == 1:\n arg_most_prob = np.argmax(probable_match)\n return return_match(arg_most_prob, probable_match)\n elif n_matches > 1:\n args_most_prob = np.argpartition(probable_match, -n_matches)[-1:-n_matches-1:-1]\n return [return_match(a, probable_match, i+1) for i, a in enumerate(args_most_prob)]\n else: \n raise AttributeError(\"Invalid number of matches requested\")", "def best(score, names):\n top_score_name = \"\"\n for name in names:\n if score(top_score_name) < score(name):\n top_score_name = name\n\n return top_score_name + \" has the longest name.\"", "def selector(possibilities, names):\n # Print all possibilities\n for i in range(len(possibilities)):\n print(f'({i +1}) {possibilities[i]}')\n names.append(str(i + 1))\n skip_lines(1)\n # Ask a user for a choice\n choice = str(input(\"Select a category by using its index or by spelling it: \"))\n choice = choice.upper()\n # Verify that the choice is possible\n while choice not in names:\n choice = str(input(\"Select a category by using its index or by spelling it: \"))\n choice = choice.upper()\n return choice", "def ask_user_for_relevance(query_results):\n for i, result in enumerate(query_results):\n hdr = 'Result #%d ' % (i+1)\n prompt_text = 'Is result #%d relevant? [y/n] ' % (i+1)\n print '\\n' + hdr + '-'*(70 - len(hdr))\n print result.to_formatted_string()\n print '-'*70\n while True:\n user_in = raw_input(prompt_text).strip().lower()\n if user_in == 'y' or user_in == 'n':\n break\n if user_in == 'y':\n result.is_relevant = True", "def select_result(inference_results):\n if not inference_results:\n return 'unknown'\n for r in inference_results:\n assert r in possible_inference_results, '{0} not in {1}'.format(r, possible_inference_results)\n if r != 'unknown':\n return r\n return 'unknown'", "def weighted_choice(self, probabilities, key):\n\n try:\n choice = self.values[key].lower()\n except KeyError:\n # override not set.\n result = super(OverridableParameters, self)\\\n .weighted_choice(probabilities, key)\n if hasattr(result, \"__call__\"):\n self.results[key] = result.__name__\n else:\n self.results[key] = str(result)\n return result\n\n # Find the matching key (case insensitive)\n for probability, option in probabilities:\n if str(option).lower() == choice:\n self.results[key] = option\n return option\n\n # for function or class-type choices, also check __name__\n for probability, option in probabilities:\n if option.__name__.lower() == choice:\n self.results[key] = option.__name__\n return option\n\n assert False, \"Invalid value provided\"", "def solution(input_string):\n __check_validation(input_string)\n substrings = __get_all_possible_substrings(base_string=input_string)\n best_by_leftovers = __get_candidates_best_by_leftovers_count(substrings=substrings, base_string=input_string)\n best_by_quantity = __get_candidates_best_by_elements_count(substrings=best_by_leftovers)\n return best_by_quantity[0][1]", "def google_suggest(self, callback, who, arg, store=True):\n\t\t\n sugs = self.get_xml('http://google.com/complete/search', {'output':'toolbar', 'q': arg})\n\n if sugs is not None:\n try:\n sugs = [x[0].get('data') for x in sugs]\n except Exception, e:\n print \"XML error with Google Suggest: %s\" % e\n\t\t\t\n suggestions = self.remove_lyrics(sugs)\n random_sug = choice(suggestions)\n\t\t\t\n # Same string as we started with - roll again\n if random_sug == arg:\n try:\n suggestions.pop(suggestions.index(random_sug))\n except:\n pass\n random_sug = choice(suggestions)\n\t\t\t\t\n if random_sug is not None:\n if store:\n self.store_suggestion(who, arg)\n random_sug.strip('')\n random_sug.strip('\\r')\n w = random_sug.split()\n if w[0].lower() in ('what', 'why', 'was', 'where', 'who', 'which', 'whom', 'when', 'how', 'is', 'are', 'did'):\n if '?' not in w[-1:]:\n random_sug = random_sug + '?'\n return random_sug", "def print_choice_msg(self) -> None:\n pass", "def generate_results_string(player_list, singular_result, plural_result):\n string = \"\"\n plural = len(player_list) > 1\n player_number = 1\n if len(player_list) != 0:\n string += \"Player \"\n for player in player_list:\n string += player.get_name()\n if player_number < len(player_list) - 1:\n string += \", \"\n elif player_number < len(player_list):\n string += \" & \"\n player_number += 1\n if plural:\n string = string[:6] + \"s\" + string[6:] + plural_result\n else:\n string += singular_result\n return string", "def algo_selection(algos: tuple):\n print_header()\n print_list_algos(algos)\n print(\"Your choice: \", end='')\n return get_num_algo(algos)", "def choose_action(self, valid_list):\n \n action_str = input(\"Choose action: \").lower()\n print()\n \n if action_str in valid_list:\n return action_str\n \n else:\n print(\"Invalid action!\")\n return False", "def menu():\n print(\"Choose an option\")\n print(\"(L)ist Friends\")\n print(\"(A)dd Friend\")\n print(\"(C)lear List\")\n print(\"(Q)uit\")\n while True:\n choice = input(\"Now choose: \").lower().strip()\n if choice in 'lacq':\n return choice\n print(\"Invalid choice.\")", "def pickpattern():\n\n pattern = [\n ['Noun-common.list', 'Verbs-common.list', 'Adj-common.list', 'Nouns-common.list'],\n ['Noun-common.list', 'Verbs-common.list', 'Nouns-common.list', 'Adverb-common.list'],\n ['Adj-common.list', 'Nouns-common.list', 'Verb-common.list', 'Adverb-common.list'],\n ['Noun-common.list', 'Adverb-common.list', 'Verbs-common.list', 'Noun-common.list'],\n ['Noun-common.list', 'Adverb-common.list', 'Verbs-common.list', 'Nouns-common.list'],\n ['Noun-common.list', 'Verbs-common.list', 'Adverb-common.list', 'Adj-common.list']\n ]\n\n return choice(pattern)", "def comp_choose_word(hand, word_list):\n hand_length = 0\n for v in hand.values():\n hand_length += v\n\n word_choices = []\n\n for num in range(1, hand_length + 1):\n num_perms = get_perms(hand, num)\n for perm in num_perms:\n if perm in word_list:\n word_choices.append(perm)\n\n comp_word = ''\n\n for word in word_choices:\n score = get_word_score(word, hand_length)\n if score > get_word_score(comp_word, hand_length):\n comp_word = word\n\n return comp_word", "async def choose(ctx, *choices: str):\n await ctx.send(random.choice(choices))", "def build_alternatives(replacements):\n alternatives = []\n highest = 0\n prev_lev = 0\n\n for term in replacements:\n # print(\"\\nterm => \" + str(term))\n lev = term[1][0]\n hits = term[1][1]\n\n if lev < 2 or lev == prev_lev:\n if args.all:\n # print(\"\\nterm => \" + str(term))\n alternatives.append(term[0])\n prev_lev = lev\n else:\n if hits > highest:\n highest = hits\n # print(\"\\nterm => \" + str(term))\n alternatives.append(term[0])\n prev_lev = lev\n\n if args.all or args.jaccard:\n # print('!')\n return alternatives\n else:\n # print('?')\n return alternatives[-1]", "def _get_choices_str(self):\n return ', '.join(\n '\"%s\"' % choice\n for choice in self.choices\n )", "def get_suggestions():\n\n flash(\"The Recommendation feature is under construction! Please check back soon!\")\n return render_template('index.html')", "def dp_match(phrase, songs=None):\n scores = [score_match(phrase, song) for song in songs]\n i = scores.index(max(scores))\n return scores[i], [phrase], [songs[i]]", "def search_method_menu(self):\n\n print()\n options = {'1': 'Employee Name', '2': 'Keyword', '3': 'Time Spent',\n '4': 'Date', '5': 'Date Range', '6': 'Exit to main menu'}\n\n while True:\n\n for k, v in options.items():\n print(k + \". \" + v)\n\n user_choice = input('\\nPlease enter the number of choice: ').lower().strip()\n\n if user_choice in options.keys():\n return options.get(user_choice)\n else:\n print('\\nInvalid choice! Please try again.\\n')", "def generate_suggestion():\n age = age_input(\"Enter age: \")\n if age <= 5:\n age_5_younger = open_and_read_file(\"age_5_younger.txt\")\n print \"Why don't you {}?\".format(random.choice(age_5_younger))\n print\n\n elif (age > 5) and (age < 20):\n age_5_to_20 = open_and_read_file(\"age_5_to_20.txt\")\n print \"Why don't you {}?\".format(random.choice(age_5_to_20))\n print\n\n elif age >= 20:\n age_20_plus = open_and_read_file(\"age_20_plus.txt\")\n print \"Why don't you {}?\".format(random.choice(age_20_plus))\n print", "def userSuggestions(database):\n firstname=str(input(\"who do you want to have follow suggestions for :\"))\n usr,find=getByName(database,firstname)\n if not find:\n print(\"the User could not be found\")\n return\n else:\n following=[]\n followers=[]\n for folower in usr.folowed:\n followers.append(folower)\n for folowed in usr.folow:\n following.append(folowed)\n results=[]\n print(\"On what do you want your suggestions to be based on?\\n1. Mutual Interests\\n2. Mutual Connections\\n3. Both\")\n choice=int(input(\"Your choice :\"))\n for key ,usrs in database.items():\n if key not in following: \n correspondant=0\n if choice == 1 or choice == 3:\n for interest in usr.interest:\n if interest in usrs.interest:\n correspondant+=1\n if choice == 2 or choice == 3:\n for folower in followers:\n for folows in usrs.folowed:\n if key == folows:\n correspondant+=1\n results.append([key,correspondant])\n for i in range(len(results)):\n for j in range(0, len(results)-i-1):\n if results[j][1] > results[j+1][1] :\n results[j], results[j+1] = results[j+1], results[j]\n for k in range(5):\n print(results[k][0])" ]
[ "0.6642002", "0.6545624", "0.6269913", "0.623575", "0.6235035", "0.62169236", "0.6010412", "0.60037553", "0.6000231", "0.59865594", "0.5913682", "0.59093326", "0.5877843", "0.5836029", "0.5736314", "0.57017106", "0.5665372", "0.56378025", "0.5632232", "0.5620297", "0.55832994", "0.55733556", "0.5563177", "0.5549811", "0.5519867", "0.5513932", "0.5500545", "0.5495603", "0.5491926", "0.5485603", "0.5484042", "0.54789543", "0.54787666", "0.5478386", "0.54667133", "0.5418912", "0.54101396", "0.54018354", "0.5396005", "0.5379854", "0.5374933", "0.53748924", "0.5371001", "0.5371001", "0.5371001", "0.5371001", "0.5368381", "0.5368118", "0.5362261", "0.53587127", "0.5352045", "0.53505117", "0.53484565", "0.5304013", "0.52986896", "0.5292428", "0.5279313", "0.52789766", "0.5276834", "0.5269544", "0.52680045", "0.526674", "0.52599", "0.52580094", "0.52578825", "0.5256361", "0.5256171", "0.52421933", "0.52354825", "0.52348256", "0.5231197", "0.5226249", "0.52261364", "0.52255726", "0.5224022", "0.5220449", "0.5216554", "0.5214978", "0.5207824", "0.52006596", "0.5196596", "0.51948756", "0.5184187", "0.51800287", "0.51709956", "0.51661474", "0.5165669", "0.5158696", "0.51556396", "0.51502347", "0.5150001", "0.5142786", "0.51353025", "0.51325166", "0.5124087", "0.5104297", "0.510413", "0.5102656", "0.5097913", "0.5096392" ]
0.7756389
0
Asks an user how many days to go back. Returns int.
def get_interactive_days(self): answer = input("Press return to get entries of past day or input number of days to go back in time: ") if answer == '': days = 1 else: try: days = int(answer) except: print("You didn't enter a number, assuming 1 day.") days = 1 return days
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decreases_remaining(self):\n return 2 - self.decreases_today", "def remain():\r\n global total\r\n global user_pick\r\n total = int(total - user_pick)\r\n print(\"Remaining \" + str(total))", "def remaining_days_in_cycle(self) -> int:\n if not self.expiration:\n return 0\n delta = self.expiration - _today()\n return int(delta.days)", "def decays(self):\n return self._base.decays", "def last_days_results(self, days):\n return self.security['Date', 'Close', 'FinalDecision'][-days:]", "def Daysleftverification():\n pass", "def now_minus(days: int):\n return NOW - datetime.timedelta(days=days)", "def countdown(self, amt=1):\n pass", "def getBugsToday(myDay):\r\n #set bugs_today as neg one to accept zero as an input\r\n bugs_today = -1\r\n while bugs_today < 0 :\r\n myBugs_Validation = (input(u'Enter the number of bugs collected on day ' + str(myDay) + ' : '))\r\n #call my getValidation to check values entered\r\n bugs_today = getValidation(myBugs_Validation)\r\n #check if user entered a valid number\r\n if bugs_today == -1:\r\n print('\\nPlease enter the number of bugs collected. \\nEnter a whole integer number >= 0')\r\n \r\n return bugs_today", "def remaining_trial_days(self):\n try:\n return self.count_days_from_now(self.trial_ended_at)\n except AttributeError:\n return 0", "def days(input=None):\n return get(input).days", "def ex8() :\r\n print(\" - Date Calculator - \")\r\n import datetime\r\n today = datetime.date.today()\r\n print(today)\r\n try : #try catch method, in case user enters non-date, or 31st Feb etc.\r\n userDate = input(\"Please enter the date to check in a dd/mm/yy format: \") #userDate is string\r\n userDate = datetime.datetime.strptime(userDate, '%d/%m/%Y').date() #userDate is date_object\r\n if userDate < today : print(\"Invalid input, date is in the past\")\r\n elif userDate == today: print(\"That's today you dum-dum, answer is 0 days.\")\r\n else:\r\n delta = userDate - today #calculate difference\r\n delta = str(delta) #date_object don't work with split only str\r\n delta = delta.split(\",\") #unorthodox method to delete time (0:00:0) from the days\r\n print(\"The number of days between today (\",today,\") and entered date (\",userDate,\") are \",delta[0],\".\")\r\n except ValueError as e :\r\n print(\"Not a valid date.\")", "def days_since_last_checkin(self):\n # TODO use local timezone\n checkin_date = (self.last_checkin - datetime.timedelta(hours=5)).date()\n today = datetime.date.today()\n return (today - checkin_date).days", "def remaining_days(self):\n if self.trialing or self.trial_ended:\n return self.remaining_trial_days\n else:\n return self.remaining_days_in_current_period", "def countdown(n):\n if n < 0:\n print(\"ERROR! Invalid input\")\n elif n == 0:\n print(\"Done!\")\n else:\n print(n)\n return countdown(n-1)", "def get_number_days(self):\r\n return 1", "def remaining_days_in_current_period(self):\n try:\n return self.count_days_from_now(self.current_period_ends_at)\n except AttributeError:\n return 0", "def get_n_days_ago(self, startdate, n):\n return startdate - datetime.timedelta(days=n)", "def automatically_after_days(self) -> Optional[int]:\n return pulumi.get(self, \"automatically_after_days\")", "def automatically_after_days(self) -> Optional[int]:\n return pulumi.get(self, \"automatically_after_days\")", "def calc_remained_days(name: str, full_date: str, current: str):\n expiry_date = get_expiry_date(name, full_date)\n intervals = datetime.strptime(expiry_date, DATE_FORMAT) - datetime.strptime(current, DATE_FORMAT)\n days = intervals.days + 1\n if days <= 0:\n raise ValueError(f'remained days {expiry_date} - {current}, {days} out of range. ')\n return days", "def remaining_retention_days(self) -> int:\n return pulumi.get(self, \"remaining_retention_days\")", "def until_reset(self) -> int:\n return int((self.resets_at - datetime.now()).total_seconds())", "def decrement(val):\n return coerce_to_int(val) - 1", "def down(self):\n global curafl, maxafl\n curafl -= 1\n if (curafl == 0):\n curafl = maxafl \n try:\n subprocess.call(['/home/holiday/bin/afl', '%d' % curafl])\n except:\n print \"afl failed\"", "def countdown(n):\n while n > 0:\n n -= 1", "def numOfDays():\n\n print(\"Podaj rok, miesiac oraz dzien pierwszej daty: \")\n inputs = [input() for i in range(3)]\n\n print(\"Podaj rok, miesiac oraz dzien drugiej daty: \")\n inputs1 = [input() for i in range(3)]\n\n d0 = date(inputs[0], inputs[1], inputs[2])\n d1 = date(inputs1[0], inputs1[1], inputs1[2])\n delta = abs(d1 - d0)\n \n print(delta.days)\n return abs(delta.days)", "def GetDownLast(self, *args, **kwargs):\n pass", "def back( self ):\n super( ConfirmationScreen, self ).back()\n\n self._current_option = self._current_option - 1\n print( \"Current option is \" +str( self._current_option ) )\n \n if self._current_option < 0:\n self._current_option = len( self._options ) - 1", "async def rewards(ctx, username):\n history = get_history(username)\n await bot.say(history+\" in the past 7 days\")", "def thirty_days_ago():\n return date.today() - timedelta(days=30)", "def get_skipped_days(self) -> int:\n return self._skipped_days.get()", "def debit(self):\n debit = 0 #variable to track the remaining debit\n debit = self.total_purchase() - self.total_clearance()\n return debit", "def yesterday(self):\n if self.isLeapYear():\n fdays = 29\n else:\n fdays = 28\n\n DIM = [0, 31, fdays, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n currentDay = self.day\n firstMonth = 1\n firstDay = 1\n\n if currentDay == firstDay and self.month == firstMonth:\n self.year -= 1\n self.month = 12\n self.day = 31\n elif currentDay == firstDay:\n self.month -= 1\n self.day = DIM[self.month]\n else:\n self.day -= 1", "def count_down(count):\r\n \r\n global timer\r\n \r\n # convert passed in total seconds to be able to be displayed as 00:00 format\r\n count_min = math.floor(count / 60)\r\n count_sec = count % 60\r\n if count_sec < 10:\r\n count_sec = f\"0{count_sec}\" \r\n \r\n # updates display\r\n canvas.itemconfig(timer_text, text=f\"{count_min}:{count_sec}\")\r\n \r\n if count > 0:\r\n timer = window.after(1000, count_down, count - 1)\r\n else:\r\n start_timer()\r\n # adds check marks for each work session\r\n marks = \"\"\r\n work_sessions = math.floor(reps / 2)\r\n for _ in range(work_sessions):\r\n marks += CHECK_MARK\r\n \r\n check_marks.config(text=marks)", "def yesterday():\n return datetime.today() - timedelta(1)", "def get_days_old(days):\n days = int(days)\n current_time = datetime.datetime.today()\n days_after = datetime.timedelta(days)\n new_date = current_time - days_after\n new_date = new_date.strftime(\"%d-%b-%Y\")\n return new_date", "def OnBackView( self, event ):\n self.historyIndex -= 1\n try:\n self.RestoreHistory( self.history[ self.historyIndex ] )\n except IndexError, err:\n self.SetStatusText( _('No further history available'))", "def goBack(self):\r\n if self.currLoc > 0:\r\n self.currLoc -= 1\r\n return self.history[self.currLoc]", "def go_back(update: Update, context: CallbackContext):\n query = update.callback_query\n query.answer()\n\n choice = query.data.split(\"back_to:\")[1]\n\n if \"links\" in choice:\n get_links(update, context, editable_message_id=query.message.message_id)\n elif \"expand\" in choice:\n expand_link(update, context)", "def remaining(self):\n if self.goal:\n return self.goal - self.total_donated()\n else:\n return 0", "def third_down_attempts(self):\n return self._third_down_attempts", "def test_daysback_greater_than_max(self):\n req = MockRequest(self.env, args={'daysback': '100'})\n\n data = TimelineModule(self.env).process_request(req)[1]\n\n self.assertEqual(90, data['daysback'])", "def age(self):\n then = self.ship_date\n if self.status == 'delivered':\n now = self.event_time.date()\n else:\n now = datetime.datetime.now().date()\n delta = now - then\n return delta.days", "def get_user_check_interval(self):\n check_interval = None\n print(\"How many seconds between consequetive checks?:\")\n while not check_interval:\n try:\n check_interval = int(input())\n except ValueError:\n print(\"That doesn't look like a number. Try again please.\")\n continue\n return check_interval", "def elapsed_days(self) -> int:\n return (datetime.today() - self.release_datetime).days", "def last_seen_days(self):\n return self.last_seen.days", "def decrement(self):\r\n return self.add(-1)", "def _historyBackwardClickedSlot(self):\r\n\r\n steps, success = self._controller.backwardAction.data().toInt()\r\n if success:\r\n self._controller.model.relativeHistoryIndex = steps", "def find_change(now: int, history: List[int]) -> Union[int, None]:\n if now is None or history is None:\n return None\n index = 7\n if len(history) < 7:\n index = len(history)\n return history[-index] - now", "def max_days(username):\n path = users_folder_file_path + username\n with open(path + '/preferences.txt', 'r+') as json_file:\n data = json.load(json_file)\n\n data['training_level_increase'] = \\\n int(request.form['training_level_increase'])\n\n json_file.seek(0) # rewind\n json.dump(data, json_file)\n json_file.truncate()\n\n if data['runner_type'] == 0:\n return render_template('max_days.html', username=username)\n elif data['runner_type'] == 1:\n return render_template('max_days_int.html', username=username)", "def go_to(self, user_input: int) -> None:\n if self.last_item_index > 9:\n go_to_max = ord(\"9\")\n elif self.last_item_index < 0:\n return\n else:\n go_to_max = ord(str(self.last_item_index))\n # TODO: Make this use a buffer for multi-digit numbers\n # TODO: also use for letters\n if ord(\"1\") <= user_input <= go_to_max:\n self.current_option = user_input - ord(\"0\") - 1\n self.draw()", "def get_number_days(self):\r\n raise NotImplementedError", "def days(self) -> Optional[int]:\n return pulumi.get(self, \"days\")", "def todays_choice():\n while True: #Run until a suitable input is passed.\n question = input(\"Deposit(D) or Withdrawal(W) or History(H) or Balance(B) >>> \")\n if question == \"D\": #if savings account\n return \"deposit\"\n elif question == \"W\": #if current account\n return \"withdraw\"\n elif question == \"H\":\n return \"history\"\n elif question == \"B\":\n return \"balance\"", "def _max_days(self):\n # type: (...) -> Union[int, Tuple[int]]\n\n return self.value.max_days", "def user_numforms_prev(*args):\n return _ida_hexrays.user_numforms_prev(*args)", "def test_due_back_field_initial_value(self):\n login = self.client.login(\n username='testuser2',\n password='2HJ1vRV0Z&3iD')\n response = self.client.get(\n reverse('librarian-renew-book',\n kwargs={'pk': self.test_bookinstance1.pk}))\n self.assertEqual(response.status_code, 200)\n\n future_3_weeks = datetime.date.today() + datetime.timedelta(weeks=3)\n self.assertEqual(response.context['form'].initial['due_back'],\n future_3_weeks)", "def days_until(self, target_date_tensor):\n return target_date_tensor.ordinal() - self._ordinals", "def get_day():\n return handle_invalid_inputs(question_4, days)", "def click_back_button(driver):\n driver.back()\n return PASSED", "def goagain():\n while True:\n goagain = input(\"Would you like to pick again. (y/n)\")\n if goagain == 'y' or goagain == 'Y':\n x = 1\n break\n elif goagain == 'n' or goagain == 'N':\n x = 0\n break\n else:\n print('Invalid Input, Please retry')\n return x", "def max_age_in_days(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_age_in_days\")", "def get_remaining_count(self):\n return self.total_count - self.count", "def subtraction(self):\r\n global answer\r\n while True:\r\n try:\r\n easy_random1 = int(random.choice(string.digits))\r\n easy_random2 = int(random.choice(string.digits))\r\n easy_random3 = int(random.choice(string.digits))\r\n easy_random4 = int(random.choice(string.digits))\r\n print(f\"{easy_random1} - {easy_random2} - {easy_random3} - {easy_random4} = ?\")\r\n real_answer = easy_random1 - easy_random2 - easy_random3 - easy_random4\r\n answer = input(\"Enter answer: \")\r\n if answer.lower() == \"stop\":\r\n print(\"okay\")\r\n break\r\n if int(answer) == real_answer:\r\n print(\"CORRECT ANSWER\")\r\n else:\r\n print(\"WRONG ANSWER\")\r\n print(f\"the answer is {real_answer} sorry! try again\")\r\n except ValueError:\r\n return f'\"{answer}\" is not a valid number, only the string stop is allowed'", "def get_day(month_name, num_days):\n display_month(month_name, num_days)\n day = input(\"Enter Day: \")\n try:\n day = int(day)\n if day > num_days or day < 1:\n os.system('cls')\n print(\"Accepted Values: 1-\" + str(num_days))\n return get_day(month_name, num_days)\n else:\n return day\n except ValueError:\n os.system('cls')\n print(\"Accepted Values: 1-\" + str(num_days))\n return get_day(month_name, num_days)", "def go_down(self, _: int = 0) -> None:\n if self.current_option < self.last_item_index:\n self.current_option += 1\n else:\n self.current_option = 0\n self.draw()", "def check_day_advance(self):\n days_ago = datetime.now().toordinal() - self.start_time.toordinal()\n if days_ago:\n # New day. Save data for the old day.\n self.save(days_ago = days_ago)\n self.start_time = datetime.now()\n # Reset all counters back to 0:00:00.\n for rd in self.row_detail_list:\n rd.time = '0:00:00'\n self.refresh_display()", "def data_refresh_window_days(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"data_refresh_window_days\")", "def drawdown(returns):\n val = returns.cumsum()\n running_max = val.expanding().max()\n drawdown_series = val - running_max\n return drawdown_series", "def test_check_yesterday(self):\n \n # the service calendar has two weekdays, back to back\n sc = ServiceCalendar()\n sc.add_period( 0, 3600*24, [\"WKDY\"] )\n sc.add_period( 3600*24, 2*3600*24, [\"WKDY\"] )\n \n # the timezone lasts for two days and has no offset\n # this is just boilerplate\n tz = Timezone()\n tz.add_period( TimezonePeriod(0, 2*3600*24, 0) )\n \n # tripboard runs on weekdays for agency 0\n al = TripAlight( \"WKDY\", sc, tz, 0 )\n \n # one alighting - one second before midnight\n al.add_alighting( \"1\", 86400-1, 0 )\n \n # our starting state is midnight between the two days\n s0 = State(1, 86400)\n \n # it should be one second after the last alighting \n s1 = al.walk_back( s0, WalkOptions() )\n self.assertEquals( s1.time, 86399 )", "def calcDays(dateToCheck):\n today = datetime.date.today()\n # guard against *somehow* receiving an incorrect data type\n if type(dateToCheck) is not datetime.date:\n origBirthday = datetime.date.fromisoformat(str(dateToCheck))\n else:\n origBirthday = dateToCheck\n # determine the next birthday for this date of birth\n nextBirthday = datetime.date(today.year, origBirthday.month, origBirthday.day)\n # calculate days to next birthday\n if today<nextBirthday:\n daysLeft = (nextBirthday - today).days\n return daysLeft\n elif today == nextBirthday:\n daysLeft = 0\n return daysLeft\n else:\n newDate = datetime.date(nextBirthday.year + 1, nextBirthday.month, nextBirthday.day)\n daysLeft = (newDate - today).days\n return daysLeft", "def num_arbitrary_lookback(self, seconds):\n offset = self._get_offset(seconds)\n\n return len(self._search_history) - offset", "def data_refresh_window_days(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"data_refresh_window_days\")", "def calculate_days(time):\n return int(time / 86400)", "def retention_days(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"retention_days\")", "def determine_attempts():\r\n #Inputs: # of attempts requested by user\r\n #Outputs: game gives number of attempts user selected before ending \r\n how_many_tries = int(raw_input(\"How many attempts do you want to answer a blank correctly before the answer is provided to you? Please provide a number, such as 2.\\n\"))\r\n attempts = how_many_tries\r\n number_of_tries = 5\r\n while how_many_tries < 1:\r\n print \"Please try again.\"\r\n determine_attempts\r\n attempts = attempts + 1\r\n if attempts == number_of_tries:\r\n break \r\n else:\r\n print \"Please read the paragraph below and provide the answers to fill in the numbered blanks.\\nYou will be given \" + str(attempts) + \" chances to enter the correct answer before it is provided to you.\\n\"\r\n return how_many_tries", "def navigBack(self):\n cmdId = self.executeCommand(Command.GO_BACK)\n return cmdId", "def n_time_comeback(self, chosen_class):\n return self.customer_classes[chosen_class].n_times_comeback()", "def go_back(self):\n if not self._items:\n raise ValueError(\"Empty navigation history\")\n\n if self.can_go_back():\n self._pointer -= 1\n return self._items[self._pointer]", "def subNDays(self, N):\n print self\n for i in range(N):\n self.yesterday()\n print self", "def previous_date(self):\n yesterday = pendulum.yesterday('UTC')\n last_update = self.storage.last_update(self.feed)\n if not last_update or last_update < yesterday:\n last_update = yesterday\n return last_update", "def days(self):\n ends_at = created_at = datetime.datetime.now().replace(tzinfo=utc)\n if self.created_at:\n created_at = self.created_at\n if self.ends_at:\n ends_at = self.ends_at\n return (ends_at - created_at).days", "def enough_days(self, cur, username, start_date, end_date):\n cur.execute('SELECT days_free FROM users WHERE username = ?', (username,))\n days_free = cur.fetchone()[0]\n days_between = abs(self.days_difference(start_date, end_date))\n return days_free >= days_between", "def remaining(self) -> int:\n\n return self.window[1]", "def fourth_down_attempts(self):\n return self._fourth_down_attempts", "def check_answer(guess, answer, turns):\n if guess > answer:\n print(\"Too high.\")\n return turns - 1\n elif guess < answer:\n print(\"Too low.\")\n return turns - 1\n else:\n print(f\"You got it! The answer was {answer}.\")", "def downtime(self, down_time=0):\n self.down_time = down_time\n return down_time", "def advance(self):\n\n max_days = Calendar.months[self.__months - 1]\n if self.__months == 2 and Calendar.leapyear(self.__years):\n max_days += 1\n if self.__days == max_days:\n self.__days = 1\n if self.__months == 12:\n self.__months = 1\n self.__years += 1\n else:\n self.__months += 1\n else:\n self.__days += 1", "def GetDown(self, *args, **kwargs):\n pass", "def count_of_upgrades_after_downgrades(self) -> Optional[float]:\n return pulumi.get(self, \"count_of_upgrades_after_downgrades\")", "def get_prev_weekday(x: Optional[Date] = None) -> Date:\n ## Get the day:\n x = x or get_today()\n\n ## Define the offset:\n offset = max(1, (x.weekday() + 6) % 7 - 3)\n\n ## Compute the day and return:\n return x - TimeDelta(days=offset)", "async def countdown():\n await bot.say('Monster Hunter World will release on January 26, 2018')", "def card_info_attempts(entered, stored):\r\n attempts = 3\r\n # Starts the countdown of tries\r\n while entered != stored:\r\n if attempts != 0:\r\n attempts -= 1\r\n print(\"Invalid card information. \\nAttempts remaining: \", attempts)\r\n print(\"Please try again.\")\r\n entered = input(\"\")\r\n else:\r\n print(\"Attempt maximum exceeded\")\r\n quit()", "def daily_rolling_drawdown(cumulative_returns, rolling_max):\n\n return (cumulative_returns / rolling_max) - 1", "def _set_days_until_triage(self):\n if self.sla_triaged_at:\n btd = dates.businesstimedelta(self.created_at, self.sla_triaged_at)\n self.days_until_triage = btd.days\n else:\n self.days_until_triage = None", "def count_downvotes(self):\n return self.filter(value=-1).count()", "def shift_down(self, times=1):\n try:\n return Location(self._rank - times, self._file)\n except IndexError as e:\n raise IndexError(e)", "def secondsLeft(self)->int:\n x = self.expirePeriodInSeconds - self.secondsPassed\n return x if x >=0 else 0", "def goBack(current, bck, fwd):\n if bck.size() < 1:\n print(\"Cannot go back.\")\n else:\n fwd.push(current)\n current = bck.pop()\n return current" ]
[ "0.6136943", "0.59328794", "0.5877445", "0.58612114", "0.57502425", "0.5743318", "0.55419844", "0.55365306", "0.55009514", "0.5493947", "0.54851043", "0.5480768", "0.5478333", "0.54070824", "0.5393414", "0.53924155", "0.5386575", "0.5374631", "0.52890193", "0.52890193", "0.5274363", "0.5264618", "0.52618796", "0.52066404", "0.51860446", "0.51715845", "0.51540196", "0.51327634", "0.51059616", "0.50729567", "0.506975", "0.5064964", "0.5064865", "0.5064503", "0.5042316", "0.5016015", "0.50024295", "0.49712908", "0.49639836", "0.49493575", "0.49440816", "0.493596", "0.49347076", "0.49299783", "0.49257365", "0.4923823", "0.4917782", "0.4905699", "0.49005094", "0.48916104", "0.48887065", "0.48817432", "0.48735216", "0.48656064", "0.4865059", "0.4861716", "0.48595396", "0.48576745", "0.48514014", "0.48429787", "0.4839211", "0.48193046", "0.48165828", "0.4812975", "0.48071054", "0.48046687", "0.4803239", "0.47900072", "0.47892419", "0.47892162", "0.4787932", "0.4781087", "0.47705102", "0.47691494", "0.47676113", "0.4767342", "0.47669062", "0.47640276", "0.47506025", "0.47479334", "0.47390014", "0.47380492", "0.4734113", "0.47324646", "0.47286654", "0.47263476", "0.47192648", "0.47128004", "0.47100136", "0.47042996", "0.46992695", "0.46940452", "0.46930072", "0.46921608", "0.46873507", "0.46845353", "0.46826205", "0.4679196", "0.46748653", "0.46721733" ]
0.70395863
0
Hacky way to check if this function already made a Toggl project based on a Zendesk ticket ID.
def already_created(self, ticket_id, toggl_projects): project_prepends = [p['name'].split()[0][1:] for p in toggl_projects] if str(ticket_id) in project_prepends: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def check_if_is_ticket(ctx):\n channel : TextChannel = ctx.channel\n return 'ticket-' in channel.name", "def sync(self, no_of_days=1):\n zd = Zendesk()\n tg = Toggl()\n try:\n self.print(\"Syncing...\")\n self.print_divider(30)\n tickets = zd.get_tickets(no_of_days)\n for ticket in tickets:\n project_title = self.format_title(ticket.id, ticket.subject)\n if ticket.organization:\n client_id = tg.get_client_id(name=ticket.organization.name)\n if not client_id:\n new_client = tg.create_client(ticket.organization.name)\n client_id = new_client['id']\n else:\n client_id = False\n self.print(\"Ticket '%s' has no associated organization!\" % (project_title))\n all_projects = tg.get_projects()\n if not self.already_created(ticket.id, all_projects):\n self.print(\"Creating project '%s'...\" % (project_title))\n result = tg.create_project(project_title, client_id, is_private=False)\n self.print(\"Toggl response:\")\n self.log(result, silent=False)\n else:\n self.print(\"There is already a Toggl project for Zendesk ticket #%s!\" % ticket.id)\n pass\n # TODO: edit Toggl project\n # tg.edit_project(project_id, name=ticket.subject)\n self.print_divider(30)\n self.print(\"Done!\")\n except:\n self.log(traceback.format_exc(), silent=False)", "def verify_project(self, pool, project):\n svc = self.project_path % (pool, project)\n ret = self.rest_get(svc, restclient.Status.OK)\n return ret", "def is_project_created(path):\n project_id = None\n try:\n with open(\"%s%sproject\"\n % (path, os.sep)) as project_file:\n project_id = project_file.readline().strip()\n try:\n project_id = bigml.api.get_project_id(\n project_id)\n return True, project_id\n except ValueError:\n return False, None\n except IOError:\n return False, None", "def is_project(self, project):\n return self._projects_lookup.get(project, False)", "def create_project_if_necessary(ctx, org_name, project_name, ):\n org = cmd.get_one_organization_by_name(\n client=ctx.obj, organization_name=org_name)\n pprint(cmd.ensure_project(\n client=ctx.obj, project_name=project_name, organization_id=org.id))", "def test_check_ticket_8(self):\n self.tkt.phage_id = \"\"\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def _dummy(ticket):\r\n return True", "def is_pull_request(issue):\r\n return 'pull_request_url' in issue", "def test_check_ticket_9(self):\n self.tkt.type = \"add\"\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def test_not_authed_public_project(self):\n # Clear out existing project with ID=1 if necessary.\n Project.objects.filter(id=1).delete()\n locale = LocaleFactory.create(code='fakelocale')\n project = ProjectFactory.create(id=1, slug='valid-project', locales=[locale])\n ResourceFactory.create(project=project)\n\n response = self.client.get('/fakelocale/valid-project/')\n assert_equal(response.status_code, 200)\n # I'd assertTemplateUsed here but it doesn't work on non-DTL\n # templates.", "def _get_project_id(self, request):\n project_id = request.environ[\"masakari.context\"].project_id\n if project_id in request.url:\n return project_id\n return ''", "def is_project_in_the_response(projectComponent, response):\n for project in response:\n if response[project] == projectComponent:\n return True\n return False", "def test_check_ticket_3(self):\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set([\"Trixie\"]),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def project_in_vc(name):\n vc3_client = get_vc3_client()\n projects = vc3_client.listProjects()\n vc = vc3_client.getRequest(requestname=name)\n vc_owner_projects = []\n\n for project in projects:\n if vc.owner == project.owner:\n vc_owner_projects.append(project)\n\n for p in vc_owner_projects:\n if (session['name'] in p.members or session['name'] == p.owner):\n return True\n else:\n return False", "def has_valid_id(self):\n try:\n project_id = self.track.project.id\n except (OSError, AttributeError):\n return False\n pointer, name = self._get_pointer_and_name()\n return bool(RPR.ValidatePtr2(project_id, pointer, name))", "def project(request, proj_id=None, scenario_id=None):\n\n if proj_id:\n project = get_object_or_404(Project, id=proj_id)\n\n if project.user != request.user and project.is_private:\n raise Http404\n\n return render_to_response('home/home.html', get_context(request))", "def check_project_exists(self, project):\n session = self.session_factory()\n exists = session.query(PipelineRun).filter_by(project=project).first()\n session.close()\n if exists:\n return True\n return False", "def project_with_revision_exists(project_name, project_revision, working_dir):\n try:\n with open(working_dir + project_name + \".qpf\", \"r\") as project_file:\n for line in project_file:\n if f\"PROJECT_REVISION = \\\"{project_revision}\\\"\" in line:\n return True\n return False\n except FileNotFoundError:\n return False", "def test_check_ticket_2(self):\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set([1]), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def test_check_ticket_4(self):\n self.tkt.type = \"invalid\"\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def check_project_id(project_id):\n # Convert variable into a string\n project_id = str(project_id)\n # Replace Capital letters and spaces\n project_id = project_id.replace(\" \", \"-\").lower()\n\n # Throw an error if any known incorrect usages found\n try:\n if re.search(\"^-|[^a-z0-9-]|google|ssl|-$\", project_id):\n raise ValueError(\"Invalid characters or words in Project ID\")\n elif len(project_id) > 30:\n raise ValueError(\"Too many characters in Project ID\")\n elif len(project_id) < 6:\n raise ValueError(\"More Characters required in Project ID\")\n else:\n log.info(f\"Project Id {project_id} passed regex check\")\n project_outcome = {\n \"outcome\": True,\n \"project_id\": project_id\n }\n return project_outcome\n except ValueError as e:\n log.warning(f\"Proposed Id {project_id} violates known google policies: \"\n \"https://cloud.google.com/resource-manager/docs/creating-managing-projects\")\n project_outcome = {\n \"outcome\": False,\n \"project_id\": project_id\n }\n return project_outcome", "def test_check_ticket_7(self):\n self.tkt.eval_flags = {}\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def project_validated(name):\n vc3_client = get_vc3_client()\n # Grab project by name\n project = vc3_client.getProject(projectname=name)\n\n # Checks to see if user is in project\n if (session['name'] in project.members or\n session['name'] == project.owner):\n return True\n else:\n return False", "def git_has_object(project: Project, name: str) -> bool:\n ret = project.git(\"rev-parse\", \"--verify\", name, _ok_code=[0, 128])\n return ret.exit_code == 0", "def is_already_linked(ticket_id):\n exists_query = db.session.query(\n all_models.IssuetrackerIssue.issue_id\n ).filter_by(issue_id=ticket_id).exists()\n return db.session.query(exists_query).scalar()", "def test_get_project(self):\n pass", "def __get_project_version__(self):\n api = FortifyApi(self.ssc_server, token=self.token, verify_ssl=False)\n try:\n response = api.get_project_versions() # api should support a search expression here. alas...\n if response.success:\n for project_version in response.data['data']:\n if project_version['project']['name'] == self.application_name:\n if project_version['name'] == self.fortify_version:\n # we have a matching project version\n Logger.app.debug(\"Found existing project version {0}\".format(project_version['id']))\n return project_version['id']\n # Didn't find a matching project version, verify that our project exists\n for project_version in response.data['data']:\n if project_version['project']['name'] == self.application_name:\n # Our project exsits, so create a new version\n return self.__create_project_version__()\n # Let upload_scan know that our project doesn't exist\n return -2\n elif \"401\" in response.message:\n # Avoid printing error for invalid token. Return -1 to reauth\n return -1\n else:\n Logger.app.critical(\"Failed to get project version. {0}\".format(response.message))\n except Exception as e:\n Logger.app.critical(\"Exception trying to get project version. {0}\".format(e.message))\n\n return None", "def ensure_project(self, project_id):\n\n if not project_id:\n return\n\n # TODO(rkukura): It seems load_from_conf_options() and\n # keystoneclient auth plugins have been deprecated, and we\n # should use keystoneauth instead.\n if project_id not in self.project_names:\n if self.keystone is None:\n self._get_keystone_client()\n LOG.debug(\"Calling project API\")\n projects = self.keystone.projects.list()\n LOG.debug(\"Received projects: %s\", projects)\n for project in projects:\n self.project_names[project.id] = project.name", "def test_create_project_request(self):\n pass", "def test_create_ticket(self):\n with patch('requests.get') as mock:\n mock.return_value.status_code = 200\n pgt = ProxyGrantingTicket.objects.create_ticket('https://www.example.com', 'https://www.example.com/',\n user=self.user, granted_by_pt=self.pt)\n self.assertTrue(re.search(pgt.TICKET_RE, pgt.ticket))\n self.assertTrue(pgt.iou.startswith(pgt.IOU_PREFIX))", "def test_new_project_invalid_on_submit(self):\n\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.data, {\"notes\": [\"created token\"]})\n self.assertEqual(len(mail.outbox), 3)\n\n fake_clients.identity_cache[\"projects\"] = {}\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"testpassword\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(mail.outbox), 3)", "def test_missing_project(self):\n task = Task({\n 'name': 'test',\n 'id': 1,\n 'stage_id' : [1, 'name'],\n 'date_deadline': False,\n 'date_start': False,\n 'date_end': False,\n 'partial_messages': [{'date':'2018-10-21 12:00:00'}],\n 'kanban_state': 'blocked',\n 'planned_hours': 100,\n 'priority': '1'\n })\n self.assertIsNotNone(task)\n self.assertEqual(task.project, 'Not assigned to project')", "def create_experiment_if_needed(tr):\n exp = tr.getExperiment(EXPERIMENT_ID)\n if None == exp:\n create_project_if_needed(tr)\n exp = tr.createNewExperiment(EXPERIMENT_ID, 'DEFAULT_EXPERIMENT')\n \n return exp", "def _has_ist_project_manager(self, investment_project):\n project_manager = investment_project.project_manager\n return (\n project_manager\n and project_manager.dit_team\n and Team.Tag.INVESTMENT_SERVICES_TEAM in project_manager.dit_team.tags\n )", "def test_check_ticket_11(self):\n self.tkt.data_retrieve = {\"invalid\"}\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def test_get_project(self):\n self.assertEqual(self.remote_project.get_project(), self.project)", "def raise_jira_ticket(obj,org_id):\n try:\n app_id = obj.get('app_id','') \n vul_name = obj.get('vul_name','')\n cwe = int(obj.get('cwe',0))\n project_key = obj.get('project_key','')\n issuetype = obj.get('issuetype','Bug')\n assignee = obj.get('assignee')\n app_obj = Application.objects.get(pk=app_id)\n if app_id and vul_name:\n vuls = Vulnerability.objects.filter(is_false_positive=False,is_remediated=False,scan__application=app_obj,cwe=cwe,name=vul_name)\n jira_obj = JiraIssueTypes.objects.get(org__id=org_id)\n jira = get_jira_con(jira_obj) \n if jira and vuls.exists(): \n complete_desc = ''\n references = '' \n if app_obj:\n complete_desc += 'Application:\\n{0}\\n\\n'.format(app_obj.name)\n complete_desc += 'Application URL:\\n{0}\\n\\n'.format(app_obj.url)\n if cwe:\n complete_desc += 'CWE :\\n{0}\\n\\n'.format(cwe)\n org_obj = app_obj.org\n if org_obj.orl_config_exists():\n vul_info = get_open_vul_info_from_api(cwe,org_obj)\n complete_desc += 'Description:\\n{0}\\n\\n'.format(vul_info.get('description','')) \n if references:\n complete_desc += 'References:\\n{0}'.format(references) \n data_dict = {\n 'project':{'key':project_key },\n 'issuetype':{'name': issuetype},\n 'priority':{'name': 'Highest'},\n 'summary':vul_name,\n 'description':complete_desc, \n } \n new_issue = jira.create_issue(**data_dict) \n evids = VulnerabilityEvidence.objects.filter(vul__in=vuls) \n attachment = io.StringIO()\n attachment.write('Evidences') \n for evid in evids:\n data = '\\n\\t- {0}\\n\\t\\t- {1}'.format(evid.url,evid.name)\n attachment.write(data) \n jira.add_attachment(issue=new_issue, attachment=attachment, filename='evidences.txt') \n vuls.update(jira_id=str(new_issue),jira_issue_status=str(new_issue.fields.status))\n info_debug_log(event='Raise Jira ticket',status='success')\n if assignee:\n jira.assign_issue(new_issue,assignee)\n info_debug_log(event='Assign Jira ticket to an assignee',status='success')\n except BaseException as e:\n print(\"Error raising JIRA tickets\")\n # general_error_messages.delay(path='raise_jira_ticket function',msg=log_exception(e))\n critical_debug_log(event=e,status='failure')", "def test_request_membership_form_with_an_invalid_project_id(self):\n pass", "def get_ticket(self, wid, project, nowait=False):\n\n path = os.path.join(self.prjdir, project)\n q = WorkQueue(path)\n if nowait and not q.isquiet():\n # If we don't want to wait and there is Q, we'll just leave\n return\n\n if not q.add(json.dumps(wid.to_h(), sort_keys=True, indent=4)):\n # Marking the wid to be forgotten ensures it's not sent\n # back to BOSS, and the process blocks\n wid.forget = True\n else:\n wid.result = True", "def test_create_project_unknown_user(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': INVALID_UUID,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)", "def _check_project(self, was, willbe):\n self.BibTerm = os.path.normpath(\\\n os.environ['HomeDrive'] + os.environ['HomePath'] + '\\\\BibTerm')\n# messagebox.showwarning('self.BibTerm', \">{}<\".format(self.BibTerm))\n\n if willbe in self.list_projects:\n self.ddnCurProject.set(willbe)\n return True\n else:\n self.ddnCurProject.set(was)\n return False", "def test_user_can_create_a_project(self):\n self.assertEqual(project_model.Project.objects.get(user=self.test_user).pk, self.test_project.pk)", "def is_valid_project_id(project_id):\n return re.match(r'^(google.com:)?[a-z0-9\\-]+$', project_id)", "def _unique_build_request(buildername, revision):\n global SCHEDULING_MANAGER\n sch_mgr = SCHEDULING_MANAGER\n\n if is_downstream(buildername):\n return True\n else:\n if revision in sch_mgr and buildername in sch_mgr[revision]:\n LOG.info(\"We have already scheduled the build '%s' for \"\n \"revision %s during this session. We don't allow \"\n \"multiple requests.\" % (buildername, revision))\n return False\n else:\n if revision not in sch_mgr:\n sch_mgr[revision] = []\n\n sch_mgr[revision].append(buildername)\n return True", "def test_check_ticket_6(self):\n self.tkt.eval_mode = \"invalid\"\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def test_not_authed_nonpublic_project(self):\n # Clear out existing project with ID=1 if necessary.\n Project.objects.filter(id=2).delete()\n locale = LocaleFactory.create(code='fakelocale')\n project = ProjectFactory.create(id=2, slug='valid-project', locales=[locale])\n ResourceFactory.create(project=project)\n\n response = self.client.get('/fakelocale/valid-project/')\n assert_redirects(response, reverse('pontoon.home'))\n assert_equal(self.client.session['translate_error'], {'redirect': '/fakelocale/valid-project/'})", "def test_get_current(self, rf, projects):\n # get queryset\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n view.get_queryset()\n qs = view.get_current()\n\n # slavic working group grant ended so it is \"past\"\n assert projects[\"derrida\"] in qs\n assert projects[\"pliny\"] in qs\n assert projects[\"ocampo\"] in qs\n assert projects[\"slavic\"] not in qs", "def test_check_ticket_5(self):\n self.tkt.description_field = \"invalid\"\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def process_project(function):\n def wrapper(request, *args, **kwargs):\n from localground.apps.site.models import Project\n r = request.GET or request.POST\n cookies = request.COOKIES\n project = None\n user = request.user\n\n # inner method to get most recent project if default selection isn't\n # valid\n def get_default_project(user):\n projects = Project.objects.filter(\n owner=user).order_by('-time_stamp')\n if len(projects) > 0:\n return projects[0]\n return None\n\n # 1) if anonymous request, return error:\n if user.is_anonymous():\n return HttpResponse(json.dumps({\n 'code': 'failure',\n 'message': 'User cannot be anonymous'\n }))\n\n # 2) order matters (defer to request param before cookie)\n # or cookies.get('project_id_' + user.username)\n project_id = r.get('project_id')\n if project_id is not None:\n if project_id in ['all', 'all#', '']:\n project = None\n elif project_id in ['add', 'add#']:\n project_name = r.get('project_name', None)\n if project_name is None:\n project = None\n else:\n # create a new project\n from localground.apps.site.models import UserProfile\n import time\n profile = UserProfile.objects.get(user=request.user)\n project = Project()\n project.name = project_name\n project.owner = user\n project.slug = '%s-%s' % (project_name, int(time.time()))\n project.access_authority = profile.default_view_authority\n project.save()\n else:\n try:\n project = Project.objects.get(id=int(project_id))\n except ValueError:\n project = get_default_project(user)\n except Project.DoesNotExist:\n project = get_default_project(user)\n\n # is user authorized?\n # if project is not None and project.owner != user and not\n # user.is_superuser:\n if project.can_view(user) == False:\n return HttpResponse(json.dumps({\n 'code': 'failure',\n 'message': 'Not authorized to view information for %s'\n % project.name\n }))\n else:\n # if no project id defined, pick the most recently updated project:\n project = get_default_project(user)\n\n # 3) update kwargs dict to return 'identity' entry to calling function:\n if kwargs is None:\n kwargs = {}\n kwargs.update({'project': project})\n return function(request, *args, **kwargs)\n return wrapper", "def test_check_ticket_10(self):\n self.tkt.data_retain = {\"invalid\"}\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def test_returns_404_if_project_doesnt_exist(self):\n # Act\n response = self.client.get(\n \"/api/v2/projects/999/queries/notasks/\",\n headers={\"Authorization\": self.user_session_token},\n )\n self.assertEqual(response.status_code, 404)", "def test_returns_404_if_project_doesnt_exist(self):\n # Act\n response = self.client.get(\n \"/api/v2/projects/queries/999/similar-projects/\",\n headers={\"Authorization\": self.user_session_token},\n )\n self.assertEqual(response.status_code, 404)", "def checkNewProject(project_num, project_name):\n tr_table = str.maketrans('', '', ',;:\"\\'\\\\`~!%^#&{}|<>?*/')\n clean_name = project_name.translate(tr_table)\n clean_name = clean_name.strip('_ .\\t\\n-')\n if clean_name != project_name:\n logger.error(\"Name cannot contain special characters\")\n return False\n if len(clean_name) < 6:\n logger.error(\"Project name too short.\")\n return False\n\n return True", "def test_new_project_existing(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})", "def _create_dummy_project(self,projectname=\"testproject\"):\n # Create three types of users that exist: Root, can do anything, \n # projectadmin, cam do things to a project he or she owns. And logged in\n # user \n \n #created in _create_main_project_and_root.\n root = self.root\n # non-root users are created as if they signed up through the project, \n # to maximize test coverage. \n \n # A user who has created a project\n projectadmin = self._create_random_user(\"projectadmin_\")\n \n testproject = self._create_comicsite_in_admin(projectadmin,projectname)\n create_page_in_admin(testproject,\"testpage1\")\n create_page_in_admin(testproject,\"testpage2\")\n \n # a user who explicitly signed up to testproject\n participant = self._create_random_user(\"participant_\")\n self._register(participant,testproject)\n \n # a user who only signed up but did not register to any project\n registered_user = self._create_random_user(\"comicregistered_\")\n \n #TODO: How to do this gracefully? \n return [testproject,root,projectadmin,participant,registered_user]", "def test_create_project_target_enabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)", "def check_project_name(struct, opts):\n if not opts['project'].startswith('pyscaffoldext-') and not opts['force']:\n raise InvalidProjectNameException\n\n if opts[\"package\"].startswith('pyscaffoldext_'):\n opts[\"package\"] = opts[\"package\"].replace(\"pyscaffoldext_\", \"\")\n\n return struct, opts", "def test_check_ticket_12(self):\n self.tkt.data_add = {\"invalid\"}\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def _ProjectIsRestricted(mr):\n return (mr.project and\n mr.project.access != project_pb2.ProjectAccess.ANYONE)", "def get_project(self):\n project_id = self.kwargs['project_id']\n try:\n project = Project.objects.get(pk=project_id)\n except ObjectDoesNotExist:\n raise ObjectNotFound('Not found')\n contributors = CustomUser.objects.filter(contributor__project=project.pk)\n if self.request.user not in contributors:\n raise ObjectNotFound('Not found')\n return project", "def get_project(self):\n project_id = self.kwargs['project_id']\n try:\n project = Project.objects.get(pk=project_id)\n except ObjectDoesNotExist:\n raise ObjectNotFound('Not found')\n contributors = CustomUser.objects.filter(contributor__project=project.pk)\n if self.request.user not in contributors:\n raise ObjectNotFound('Not found')\n return project", "def check_existing_project(\n self, description: str, project_name: str, project_url: str\n ) -> bool:\n\n checks = [{\"project_name\": project_name}, {\"description\": description}]\n if project_url != \"\":\n checks.append({\"project_url\": project_url})\n\n result = self.db.project.find_one({\"$or\": checks})\n\n if result:\n raise ExistingProjectError(\"Project Exists\")", "def setup_project(project_name):\n\n project_arn = ''\n for project in device_farm.list_projects()['projects']:\n if project['name'] == project_name:\n print('{} project already exists'.format(project_name))\n project_arn = project['arn']\n else:\n print(\n '{} project is not available, creating new one'.format(\n project_name\n )\n )\n project_arn = create_project(project_name)\n\n return project_arn\n\n raise KeyError('Problem finding project %r' % project_name)", "def _is_new_repo_generating(module_build, koji_session):\n if not module_build.new_repo_task_id:\n return False\n\n log.debug(\n 'Checking status of newRepo task \"%d\" for %s', module_build.new_repo_task_id, module_build)\n task_info = koji_session.getTaskInfo(module_build.new_repo_task_id)\n\n active_koji_states = [\n koji.TASK_STATES[\"FREE\"], koji.TASK_STATES[\"OPEN\"], koji.TASK_STATES[\"ASSIGNED\"]]\n\n return task_info[\"state\"] in active_koji_states", "def get_project_id():\n return os.environ.get('project')", "def test_public_pending_exists(self):\n self.change_status(self.version_1_2_2, amo.STATUS_PENDING)\n self.change_status(self.version_1_2_0, amo.STATUS_PENDING)\n self.change_version(self.version_1_2_0, '1.2beta')\n\n version, file = self.get('1.2', self.version_int,\n self.app, self.platform)\n\n assert version == self.version_1_2_1", "def test_create_project_target_disabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 403, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)", "def __get_project_id(self):\n request = urllib2.Request(self.host_api+\"projects?owner=\"+urllib2.quote(self.owner)+\"&display_name=\"+urllib2.quote(self.project_name))\n # request = urllib2.Request(self.host_api+\"projects?owner=\"+self.owner+\"&display_name=Galaxy%20Zoo%20Bar%20Lengths\")\n # print hostapi+\"projects?owner=\"+owner+\"&display_name=\"+project_name\n request.add_header(\"Accept\",\"application/vnd.api+json; version=1\")\n request.add_header(\"Authorization\",\"Bearer \"+self.token)\n\n # request\n try:\n response = urllib2.urlopen(request)\n except urllib2.HTTPError as e:\n print self.host_api+\"projects?owner=\"+self.owner+\"&display_name=\"+self.project_name\n print 'The server couldn\\'t fulfill the request.'\n print 'Error code: ', e.code\n print 'Error response body: ', e.read()\n except urllib2.URLError as e:\n print 'We failed to reach a server.'\n print 'Reason: ', e.reason\n else:\n # everything is fine\n body = response.read()\n\n # put it in json structure and extract id\n data = json.loads(body)\n return data[\"projects\"][0][\"id\"]", "def is_bug_open(cls, bug_id):\n config = LaunchpadTrackerConfig()\n log = logging.getLogger('RunnerLog')\n launchpad = LaunchpadClient()\n\n resp = launchpad.get_bug_tasks(bug_id)\n if resp.status_code == 404:\n log.info('Couldn\\'t find bug with ID {0}'.format(bug_id))\n\n tasks = resp.model or []\n for bug_task in tasks:\n if bug_task.bug_target_name == config.project:\n return bug_task.status not in ('Fix Committed', 'Fix Released')\n\n log.info('Bug does not affect project {0} '\n 'or project name is not correct.'.format(config.project))\n return False", "def test_returns_challenging_projects_if_difficulty_set_to_changelling(self):\n # Arrange\n self.test_project_2.private = False\n # Set difficulty of test_project_2 to hard.\n self.test_project_2.difficulty = ProjectDifficulty.CHALLENGING.value\n self.test_project_2.save()\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"difficulty\": \"CHALLENGING\"},\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)\n self.assertEqual(\n response.json[\"results\"][0][\"projectId\"], self.test_project_2.id\n )", "def test_create_project_from_template(self):\n project_new = self.project_template.take_template()\n\n self.assertTrue(project_new)", "def test_project_id_detection(self):\n tests = {\n 2907852: self.url_parsing_tests['valid'],\n None: self.url_parsing_tests['invalid'],\n }\n \n for basecamp_id, test_cases in tests.items():\n for case in test_cases:\n actual = Project.extract_basecamp_id(case)\n msg = \"%s != %s Test case: %s\" % (basecamp_id, actual, case)\n self.assertEqual(basecamp_id, actual, msg)", "def check_if_project_can_be_created_or_updated(project):\n if project.owner is None:\n return {'can_be_updated': False, 'reason': ERROR_PROJECT_WITHOUT_OWNER}\n\n if project.is_private:\n current_projects = project.owner.owned_projects.filter(is_private=True).count()\n max_projects = project.owner.max_private_projects\n error_project_exceeded = _(\"You can't have more private projects\")\n\n current_memberships = project.memberships.count() or 1\n max_memberships = project.owner.max_memberships_private_projects\n error_memberships_exceeded = _(\"This project reaches your current limit of memberships for private projects\")\n else:\n current_projects = project.owner.owned_projects.filter(is_private=False).count()\n max_projects = project.owner.max_public_projects\n error_project_exceeded = _(\"You can't have more public projects\")\n\n current_memberships = project.memberships.count() or 1\n max_memberships = project.owner.max_memberships_public_projects\n error_memberships_exceeded = _(\"This project reaches your current limit of memberships for public projects\")\n\n if max_projects is not None and current_projects >= max_projects:\n return (False, error_project_exceeded)\n\n if max_memberships is not None and current_memberships > max_memberships:\n return (False, error_memberships_exceeded)\n\n return (True, None)", "def test_check_ticket_1(self):\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n with self.subTest():\n self.assertEqual(len(self.tkt.evaluations), 12)\n with self.subTest():\n self.assertEqual(count, 0)", "def test_project_reader(project):\n if is_server_administrator():\n return True\n if is_project_administrator(project):\n return True\n if is_project_writer(project):\n return True\n if is_project_reader(project):\n return True\n return False", "def test_ticket_type_add_error_already_exists(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('ticket_type add defect')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def release_ticket(self, wid, project):\n\n path = os.path.join(self.prjdir, project)\n q = WorkQueue(path)\n\n head_wi = Workitem(q.head())\n if head_wi.wfid != wid.wfid:\n self.log.info(\"OUCH ... released the wrong lock\")\n\n try:\n next_wid = Workitem(q.next())\n next_wid.result = True\n # Implementation is a bit convoluted but this just sends\n # the WI from the stack to BOSS\n self.send_to_engine(next_wid)\n except QueueEmpty:\n # That's OK, there's nothing waiting\n pass\n wid.result = True", "def hasMentorProjectAssigned(profile, org_key=None):\n query = project_model.GSoCProject.all()\n query.filter('mentors', profile.key.to_old_key())\n\n if org_key:\n query.filter('org', org_key.to_old_key())\n\n return query.count() > 0", "def getFeaturedProject(current_timeline, program):\n # expiry time to fetch the new featured project entity\n # the current expiry time is 2 hours.\n expiry_time = datetime.timedelta(seconds=7200)\n\n def queryForProject():\n query = project_model.GSoCProject.all()\n query.filter('is_featured', True)\n query.filter('program', program)\n if current_timeline == 'coding_period':\n project_status = project_model.STATUS_ACCEPTED\n else:\n project_status = 'completed'\n query.filter('status', project_status)\n return query\n\n q = queryForProject()\n\n # the cache stores a 3-tuple in the order student_project entity,\n # cursor and the last time the cache was updated\n fsp_cache = memcache.get('featured_gsoc_project' + program.key().name())\n\n if fsp_cache:\n cached_project, cached_cursor, cache_expiry_time = fsp_cache\n if not datetime.datetime.now() > cache_expiry_time + expiry_time:\n return cached_project\n else:\n q.with_cursor(cached_cursor)\n if q.count() == 0:\n q = queryForProject()\n\n new_project = q.get()\n new_cursor = q.cursor()\n memcache.set(\n key='featured_gsoc_project',\n value=(new_project, new_cursor, datetime.datetime.now()))\n\n return new_project", "def get_project(db, id):\n \n for element in db:\n if element['project_no'] == id:\n return element\n return None", "def _is_ticketing_handled(self, regform, **kwargs):\n return regform.cern_access_request is not None and regform.cern_access_request.is_active", "def test_projects_id_get(self):\n response = self.client.open('/project-tracker/projects/{id}'.format(id=56),\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_registered_user_can_create_project(self):\n user = self._create_user({\"username\":\"user2\",\"email\":\"[email protected]\"})\n testproject = self._create_comicsite_in_admin(user,\"user1project\") \n testpage1 = create_page_in_admin(testproject,\"testpage1\")\n testpage2 = create_page_in_admin(testproject,\"testpage2\")\n \n self._test_page_can_be_viewed(user,testpage1)\n self._test_page_can_be_viewed(self.root,testpage1)", "def test_activate_not_existing(client, auth_token):\n # When\n response = client.post(\"/projects/%s\" % 789,\n data={'active': False},\n headers={'token': auth_token},\n follow_redirects=True)\n\n # Then\n assert 404 == response.status_code", "def test_public_pending_not_exists(self):\n self.change_status(self.version_1_2_0, amo.STATUS_PENDING)\n self.change_version(self.version_1_2_0, '1.2beta')\n self.change_status(self.version_1_2_2, amo.STATUS_BETA)\n\n version, file = self.get('1.2beta', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1", "def test_get_past(self, rf, projects):\n # get queryset\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n view.get_queryset()\n qs = view.get_past()\n\n # slavic working group grant ended so it is \"past\"\n assert projects[\"slavic\"] in qs\n assert projects[\"derrida\"] not in qs\n assert projects[\"pliny\"] not in qs\n assert projects[\"ocampo\"] not in qs", "def test_create_project(self):\n yield self.nodes[0].overlay.create_project(\"test\", \"specpointer\", \"01-02-03\", 300, \"EUR\", 5)\n yield self.deliver_messages()\n\n # Node 2 should know about this app request now\n projects = self.nodes[1].overlay.persistence.get_projects()\n self.assertTrue(projects)\n self.assertEqual(projects[0]['id'], 1)", "def can_install_project(self):\n return True", "def find_project_for_story(story_id):\n\n for project in Project.all():\n story = project.load_story(story_id)\n if story is not None:\n return project\n\n #Not found\n print \"No project found for story: #{}\".format(story_id)\n return None", "def exists(cls, ticket_id):\n ticket_id = int(ticket_id)\n ticket = DB_TICKET_TABLE.get(doc_id=ticket_id)\n if not ticket:\n raise ValueError(f\"unknown ticket '{ticket_id}'\")\n return ticket_id", "def is_project_lead(user, project_id):\n if user.id:\n project = models.Project.objects.get(pk=project_id)\n return user in project.project_leads_as_users", "def check_name_is_avail(self):\n if utils.project_name_exists(self.proj_name):\n raise Exception(\"Project name `{}` already in use.\"\n .format(self.proj_name))", "def find_project_for_story(story_id):\r\n\r\n for project in Project.all():\r\n story = project.load_story(story_id)\r\n if story is not None:\r\n return project\r\n\r\n #Not found\r\n print \"No project found for story: #{}\".format(story_id)\r\n return None", "async def ticket(self, ctx, ticketpanel_name: str):\n licence_id = servers.get_licence_id(ctx.guild.id)\n ticketpanel: Ticketpanel = await Ticketpanel.query.where(Ticketpanel.name == ticketpanel_name).where(Ticketpanel.licence_id == licence_id).gino.first()\n\n if not ticketpanel:\n embed: Embed = settings.get_ticket_error_embed()\n embed.description = f\"\\nTicketPanel called **{ticketpanel_name}** doesnt exist\\n\"\n embed.set_footer(text=embed.footer.text,\n icon_url=self.bot.user.avatar_url)\n await ctx.send(embed=embed)\n return\n\n embed : Embed = settings.get_ticket_panel_embed()\n embed.description = ticketpanel.description\n embed.set_footer(text=embed.footer.text,\n icon_url=self.bot.user.avatar_url)\n await ctx.message.delete()\n message = await ctx.send(embed=embed)\n await message.add_reaction(settings.get_ticket_create_emoji())", "def _check_version(self, project, targetdir):\r\n versionfile = os.path.join(targetdir, 'project.version')\r\n if (os.path.exists(versionfile)):\r\n file_ = open(versionfile, \"r\")\r\n projectname = file_.read().strip()\r\n file_.close()\r\n if (projectname == project.objectname):\r\n return True\r\n return False", "def _ensure_project(self, c_params: CommonParams) -> Squonk2AgentRv:\n assert c_params\n assert isinstance(c_params, CommonParams)\n \n target_access_string = self._get_target_access_string(c_params.access_id)\n assert target_access_string\n\n # A Squonk2Unit must exist for the Target Access String.\n rv: Squonk2AgentRv = self._ensure_unit(target_access_string)\n if not rv.success:\n return rv\n unit: Squonk2Unit = rv.msg\n\n user_name: str = self._get_user_name(c_params.user_id)\n target_title: str = self._get_target_title(c_params.target_id)\n assert user_name\n assert target_title\n\n _, name_full = self._build_product_name(user_name, target_title)\n sq2_project: Optional[Squonk2Project] = Squonk2Project.objects.filter(name=name_full).first()\n if not sq2_project:\n msg = f'No existing Squonk2Project for \"{name_full}\"'\n _LOGGER.info(msg)\n # Need to call upon Squonk2 to create a 'Product'\n # (and corresponding 'Product').\n rv = self._create_product_and_project(unit, user_name, target_title, c_params)\n if not rv.success:\n msg = f'Failed creating AS Product or DM Project ({rv.msg})'\n _LOGGER.error(msg)\n return rv\n\n # Now record these new remote objects in a new\n # Squonk2Project record. As it's worked we're given\n # a dictionary with keys \"sq2_project_uuid\" and \"sq2_product_uuid\"\n sq2_project = Squonk2Project(uuid=rv.msg['sq2_project_uuid'],\n name=name_full,\n product_uuid=rv.msg['sq2_product_uuid'],\n unit_id=unit.id)\n sq2_project.save()\n msg = f'Created NEW Squonk2Project for {sq2_project.uuid} \"{name_full}\"'\n _LOGGER.info(msg)\n else:\n msg = f'Squonk2Project for {sq2_project.uuid} \"{name_full}\" already exists - nothing to do'\n _LOGGER.debug(msg)\n\n return Squonk2AgentRv(success=True, msg=sq2_project)", "def get_current_project():\n return get_from_session(KEY_PROJECT)", "def get_or_create_project(group, project_label):\n\n print(f\"Looking for prject.label {project_label}\")\n projects = group.projects.find(f\"label={project_label}\")\n if len(projects) > 0:\n print(f\"Found it.\")\n project = projects[0]\n print(f\"project.label {project.label}\")\n print(f\"project.id {project.id}\")\n else:\n print(\"Project not found - Creating it.\")\n project = group.add_project(label=f\"{project_label}\")\n print(f\"project.label {project.label}\")\n print(f\"project.id {project.id}\")\n return project", "def add_project(self, project=None):\n is_project = type(project) is Project\n id_exists = project.client_id in [c.client_id for c in self.client_list]\n pid_exists = project.project_id() in [p.project_id() for p in self.project_list]\n\n # cancel if it's no project or the client_id does not exist\n # or the project_id already exists\n if not is_project or not id_exists or pid_exists:\n return False\n\n # add the project\n self.project_list.append(project)\n self.save_project_to_file(project=project)\n return True" ]
[ "0.60784185", "0.584883", "0.56591004", "0.55432487", "0.5472561", "0.5453476", "0.5446431", "0.54430735", "0.54305506", "0.54244524", "0.54243267", "0.5412555", "0.53918374", "0.5391034", "0.5389106", "0.5382748", "0.53724587", "0.53702873", "0.535847", "0.5357751", "0.53424674", "0.53350586", "0.5321972", "0.53187186", "0.5315832", "0.5315102", "0.5314675", "0.53119636", "0.53095293", "0.52856845", "0.5279757", "0.5274046", "0.5246909", "0.5246518", "0.5234469", "0.5230948", "0.52246577", "0.52209985", "0.52158105", "0.5210895", "0.5207707", "0.520523", "0.5201854", "0.51936924", "0.51875573", "0.51732135", "0.51683855", "0.5164494", "0.5162615", "0.5159114", "0.5152303", "0.51473475", "0.5146761", "0.5136245", "0.5127596", "0.51256835", "0.51152205", "0.51031524", "0.509327", "0.5075995", "0.50662154", "0.50662154", "0.50621516", "0.5061232", "0.50577503", "0.5045041", "0.5042336", "0.5038691", "0.50308067", "0.5027862", "0.5025124", "0.5021086", "0.5016021", "0.50118583", "0.4998656", "0.49933022", "0.49904573", "0.4988181", "0.49798977", "0.4977591", "0.49750412", "0.49743244", "0.49728924", "0.49720138", "0.4970797", "0.49686468", "0.49613625", "0.49611405", "0.4959794", "0.49552444", "0.49548483", "0.49498746", "0.4946578", "0.49403897", "0.49396452", "0.4932628", "0.49234283", "0.4917688", "0.49109587", "0.4909565" ]
0.7632334
0
Formats id and subject into a suitable (Freshbooks) title.
def format_title(self, ticket_id, subject): # TODO: strip block tags? title = "#%i %s" % (ticket_id, subject) return title.strip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n times = [time.strftime(i).lstrip('0') for i in ('%m', '%d', '%I:%M%p')]\n ret += ' on {}/{} at {}'.format(times[0], times[1], times[2])\n return ret", "def format_title(self, data):\n return data", "def get_title(self):\n return \"{id}@{hn}\".format(id=self.model.identity, hn=self.model.hostname)", "def get_title_by_id(id):\n\n # your code", "def get_title():", "def getMITItemTitle(self,xc,item,id):\n \n titles = xc.xpathEval(\"mitcp:title\")\n title = ''\n if titles:\n title = titles[0].getContent()\n else:\n title = id\n\n return title", "def make_title(words):", "def format_filename(title: str, id: Any, ext: str = \".\", dirFormat=None):\r\n ...", "def dc_title(self):\n return u\"{0} ({1}): {2} {3}\".format(\n self.label, self.in_assessment[0].timepoint,\n self.subjects[0].code_in_study,\n \"...\" if len(self.subjects) > 1 else \"\")", "def numbered_title(self):\n return f\"{self.title}\"", "def numbered_title(self):\n return f\"{self.title}\"", "def Title(self, **kwargs):\n full_name = ''\n if self.getFirstname() == '' or self.getLastname() == '':\n if not self.getOrganization():\n return '...'\n else:\n return self.getOrganization()\n format = kwargs.get('format', None)\n if format == 'natural':\n full_name = '%s %s' % (self.getFirstname(), self.getLastname())\n else:\n full_name = '%s %s' % (self.getLastname(), self.getFirstname())\n return '%s' % full_name", "def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title", "def _update_title(self, title, tag, lid):\n return title", "def get_title(self):\n if not hasattr(self, '_title'):\n self._title = 'NO TITLE'\n if self._title:\n title = _(self._title)\n title = title.replace('&', '&amp;') \n title = title.replace('\"', '&quot;')\n return title\n else:\n return u''", "def _generate_title_description(psap_id, title, description):\n if description is None:\n description = PersistentFields.get_description(psap_id)\n else:\n PersistentFields.set_description(psap_id, description)\n if title is None:\n title = PersistentFields.get_title(psap_id)\n else:\n PersistentFields.set_title(psap_id, title)\n\n return title, description", "def get_challenge_name_and_id(self, obj):\n return \"%s - %s\" % (obj.challenge.title, obj.challenge.id)", "def get_challenge_name_and_id(self, obj):\n return \"%s - %s\" % (obj.challenge.title, obj.challenge.id)", "def inject_title(self,template,title):\n return re.sub('TITLE',title,template)", "def name_with_title(self):\n return \"%s %s\" % (self.title, self.name)", "def __str__(self):\n return f\"{self.id}: {self.title}\"", "def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)", "def format_title(self, title):\n new_title = ''.join(word.lower().strip('!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~ ') for word in title)\n return new_title", "def __str__(self):\n return \"ID {0:25} | Name: {1} \\n\".format(self.id, self.title)", "def title_by_id(id_: int) -> Any:\n post = Posts.query.filter_by(id=id_).first()\n if post is None:\n return \"404\"\n return post.title", "def getTitle(self, item):\n return item.Title() or item.getId()", "def title(self, obj):\n return str(obj)", "def get_title(self) -> str:\n pass", "def numbered_title(self):\n return f\"Chapter {self.title}\"", "def get_title_by_id_from_table(table, id):\n\n # your code", "def numbered_title(self):\n return f\"Appendix {self.chapter}. {self.title}\"", "def _format_id(ns, id):\n label = '%s:%s' % (ns, id)\n label = label.replace(' ', '_')\n url = get_identifiers_url(ns, id)\n return (label, url)", "def create_title(title, year=None, time_step=None, base=0, interval=None,\n gage=None, m=None, h=None):\n if type(gage) is list or type(gage) is tuple:\n title = title + ' at listed gages'\n elif gage is not None:\n title = title + ' at '+ gage\n \n if m is not None:\n title = title + ' for Month {mo} of'.format(mo=m)\n elif h is not None:\n title = title + ' for Hour {ho} of'.format(ho=h) \n elif interval is 'seasonal':\n title = title + ' for Months of'\n elif interval is 'diurnal':\n title = title + ' for Hours of'\n if time_step is not None:\n ts = time_step.replace('min', ' minute').replace('T', ' minute').replace('H', ' hour').replace('D', ' day')\n title = title.format(ts=ts)\n if year is not None:\n title = title +' '+ year\n return title", "def get_title(self, qid):\n return self._entity_symbols.get_title(qid)", "def get_detail_title(soort, edit, obj):\n naam_ev = get_names_for_type(soort)[0]\n if edit == 'new':\n return _('Nieuw(e) ') + str(naam_ev)\n try:\n title = \" \".join((naam_ev.capitalize(), obj.naam))\n except AttributeError:\n title = \" \".join((naam_ev.capitalize(), obj.nummer))\n return title", "def get_full_name_with_academic_title(self) -> str:\n base_name = super().get_full_name()\n return f'{self.title} {base_name}' if self.title else base_name", "def __str__(self):\n return \"{title}\".format(title=self.title)", "def dict2title(title):\n L = title\n y = ''.join(dictionary[int(i)] + \" \" for i in L if i > 0)\n y=y.replace('\"', \"\")\n return y", "def setTitle(self, meta):\n\n title = ''\n try:\n title += meta['date'] + ' '\n except KeyError:\n pass\n try:\n title += meta['time'] + ' '\n except KeyError:\n pass\n try:\n title += meta['trial']\n except KeyError:\n pass\n\n meta['title'] = title.strip()", "def update_title_only(self, title, incident_id):\n self.cursor.execute(\"\"\"UPDATE incidents SET title='%s' WHERE incident_id='%s'\"\"\"%(title, incident_id))\n self.commiting()", "def title_or_id(context):\n title = getattr(context, 'title', '')\n if not title:\n if hasattr(context, '__name__'):\n title = getattr(context, '__name__', '')\n elif hasattr(context, 'getId'):\n title = context.getId()\n return title", "def _title(hit: DD) -> str:\n return hit[\"_source\"][\"title\"]", "def __str__(self):\n return '%s, %s: %s' % (self.first_author, self.year, self.title)", "def get_title(text, uuid=None):\n if uuid is not None:\n text += get_provenance_link(uuid)\n title = pn.Row(pn.pane.HTML('<h2>{}</h2>'.format(text)), align='start')\n\n return title", "def fetch_title(self, movie_id):\n movie = tmdbsimple.Movies(movie_id)\n request = movie.info()\n\n return movie.title", "def print_title(title):\n print \"\\n\"+\"#\"*32+\"\\n# \"+title+\"\\n\"+\"#\"*32+\"\\n\"", "def title(self):\n strng = \"\"\n if self.type:\n strng = self.type\n if self.server:\n if self.status:\n strng = \"%s\" % (strng)\n if not strng:\n strng = \"Error\"\n strng = \"%s on %s\" % (strng, self.server)\n elif self.status:\n strng = self.status\n if self.server:\n strng = \"%s on server %s\" % (strng, self.server)\n elif self.raw:\n strng = self.raw\n else:\n strng = self.error_timestamp.isoformat()\n if self.uid:\n strng = \"%s\" % (strng)\n return strng", "def job_title(self, job):\n def _format_num(num):\n if isinstance(num, bool):\n return str(num)\n elif isinstance(num, Real):\n return str(round(num, 2))\n return str(num)\n\n try:\n s = []\n for keys in sorted(self._schema_variables()):\n v = job.statepoint()[keys[0]]\n try:\n for key in keys[1:]:\n v = v[key]\n except KeyError: # Particular key is present in overall\n continue # schema, but not this state point.\n else:\n s.append('{}={}'.format('.'.join(keys), _format_num(v)))\n return ' '.join(s)\n except Exception as error:\n logger.debug(\n \"Error while generating job title: '{}'. \"\n \"Returning job-id as fallback.\".format(error))\n return str(job)", "def generate_article_title(art_type: str, period: str, begin_date):\n if art_type == \"dagartikel\":\n # build the title for the article type 'dagartikel'\n title = f\"Beurs update {begin_date.strftime('%d %b')}\"\n\n elif art_type == \"weekartikel\":\n # build the title for the article type 'weekartikel'\n title = f\"Beurs weekoverzicht week {begin_date.isocalendar()[1]}\"\n\n elif art_type == \"maandartikel\":\n # build the title for the article type 'maandartikel'\n title = f\"Beurs maandoverzicht {period}\"\n\n return title", "def getFullCourseTitle(self, brain):\n full_title = ''\n\n id = brain.getCourseId\n if id:\n full_title = '%s - ' %id\n full_title += brain.Title\n term = brain.getTerm\n if term:\n full_title += ', %s' %term\n\n return full_title", "def update_card_title():\n\n card_id = request.get_json()['card_id']\n card_title = request.get_json()['card_title']\n\n return sql_manager.update_card_title(card_id, card_title)", "def make_fit_title(self, trials, fid=None, hypo=None,\n fhkey=None, begin_center=False):\n fittitle = \"\"\n if begin_center:\n fittitle += r\"\\begin{center}\"\n if hasattr(self, 'labels'):\n if self.labels.dict['data_name'] == '':\n fittitle += \"Data, \"\n else:\n fittitle += \"True %s, \"%self.labels.dict['data_name']\n if ((fid is not None) and (hypo is not None)) and (fhkey is not None):\n raise ValueError(\n \"Got a fid, hypo and fhkey specified. Please use fid \"\n \"and hypo OR fhkey (from which fid and hypo will be \"\n \"extracted) but not both.\"\n )\n if fid is not None:\n fittitle += \"Fiducial Fit %s, \"%self.labels.dict['%s_name'%fid]\n if hypo is not None:\n if hypo == 'both':\n fittitle += \"Both (%s/%s) Hypotheses \"%(\n self.labels.dict['h0_name'], self.labels.dict['h1_name'])\n else:\n fittitle += \"Hypothesis %s \"%self.labels.dict['%s_name'%hypo]\n if fhkey is not None:\n hypo = self.get_hypo_from_fiducial_hypo_key(fhkey=fhkey)\n fid = self.get_fid_from_fiducial_hypo_key(fhkey=fhkey)\n fittitle += \"Fiducial Fit %s, \"%self.labels.dict['%s_name'%fid]\n fittitle += \"Hypothesis %s \"%self.labels.dict['%s_name'%hypo]\n if trials is not None:\n fittitle += \"(%i Trials)\"%trials\n fittitle += r\"\\end{center}\"\n return fittitle", "def get_title(portobjlist):\n #fetch_title(portobjlist)\n fetch_title(portobjlist)", "def _make_title(self, ind):\n start = self.df_event_time.loc[ind, 'time']\n date = np.datetime_as_string(start.astype('<M8[ns]'), unit='s')\n start_ns = start - (start // 10**9) * 10**9\n end = self.df_event_time.loc[ind, 'endtime']\n end_ns = end - start + start_ns\n return ''.join((f'##Event {ind} from run {self.run_id}\\n',\n f'##Recorded at ({date[:10]} {date[10:]}) UTC ',\n f'{start_ns} ns - {end_ns} ns'))", "def print_title(obj: object) -> None:\n title = f'Object \"{obj.__str__()}\" details'\n num_of_fill_symbols = (DEFAULT_LENGTH - 4 - len(title)) // 2\n full_title = f\"{'#'*num_of_fill_symbols} {title} {'#'*num_of_fill_symbols}\"\n if len(full_title) < DEFAULT_LENGTH:\n full_title += '#'*(DEFAULT_LENGTH - len(full_title))\n print(full_title)", "def title_string(self):\n return ' '.join(self.title).replace(' - ', '')", "def ftitle(self, text):\n return \"{} - {}\".format(self._app_name, text)", "def get_title_by_id(id):\n\n sales_data = data_manager.get_table_from_file(\"sales/sales.csv\")\n for line in sales_data:\n if line[ID] == id:\n return line[TITLE]\n return None", "def format_subject(self, notice):\n return \"PDR Notice: {0}: {1}\".format(notice.type, notice.title)", "def formatName(self):\r\n return self.title.getVal() + \" \" + self.first.getVal() + \" \" + self.last.getVal()", "def get_descriptive_name(self):\r\n long_name=str(self.year)+' '+self.make+' '+self.model\r\n return long_name.title()", "def get_descriptive_name(self):\r\n long_name = str(self.year)+' '+self.make + ' '+self.model\r\n return long_name.title()", "def html_title(title):\n return '<center><h1>%s</h1></center>' % (title)", "def tv_tropes_id(title):\n pass", "def complete_alt_title(self, obj):\n return str(obj)", "def printable(title, subtitle=None, resp=None):\n title = getfirst(title)\n subtitle = getfirst(subtitle)\n resp = getfirst(resp)\n if subtitle:\n title += \" : \" + subtitle\n if resp:\n title += \" / \" + resp\n return title", "def get_title(self, entry):\n title = _('%(title)s (%(word_count)i words)') % \\\n {'title': entry.title, 'word_count': entry.word_count}\n return title", "def __str__(self):\n return f'{self.id} ({self.book.title})'", "def _generate_title(cls, ca_type):\n special_chars = string_utils.SPECIAL\n return append_random_string(\n \"{}_{}_\".format(ca_type, random_string(\n size=len(special_chars), chars=special_chars)))", "def get_descriptive_name(self):\n return f\"{self.year} {self.make} {self.model}\".title()", "def __str__(self):\n return '%s (%s)' % (self.id,self.book.title)", "def get_title(self):\n title = self.title\n if not title and self.parent_id:\n title = self.parent.title\n return title", "def __calculate_title(video_data):\n title = 'Unknown'\n if 'fulltitle' in video_data.keys():\n title = video_data['fulltitle']\n elif 'title' in video_data.keys():\n title = video_data['title']\n elif '_filename' in video_data.keys():\n title = video_data['_filename']\n return title", "def TitlePrint(title):\n titleLength = len(title)\n barLength = titleLength + 12\n fmtdTitle = '----- {0} -----'.format(title)\n bar = '-' * barLength\n print(bar, fmtdTitle, bar,\n sep='\\n', end='\\n\\n')", "def eidr_identifier(title):\n pass", "def getTitle(self):\n\n # print(self.soupObject.title.string)\n try:\n s = self.soupObject.find(\"meta\", attrs={\"name\": \"twitter:title\"})\n self.title = str(s['content'])\n self.title = self.title.replace(\"/\", \"\")\n self.title = self.title.strip()\n if not self.title:\n s = int(\"deliberateError\")\n\n # except\n except:\n self.title = \"Amazonsubtitles\"\n\n pass", "def __str__(self):\n return '{0} ({1})'.format(self.id, self.book.title)", "def title(self) -> str:\n\t\t# pylint: disable=unsubscriptable-object\n\t\treturn self.value[1]", "def _title(self, path):\n title = os.path.basename(os.path.splitext(path)[0])\n return title", "def try_create_uniqe_title(self,title,plan_id):\n if self.valid_title(title):\n for i in range (1,20):\n new_title=title+\"_\"+str(i)\n if self.unique_title(new_title,plan_id):\n return new_title\n return False\n else:\n return False", "def title(self):\n return ' '.join(self._title)", "def title(self):\n # Use the first line of the articles text as title, if not title\n # exists.\n title = self._text[:min(32, self._text.find(\"\\n\"))]\n return title", "def get_title(self, region, namespace, id, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/title/{0}', region, *[id], **filters)", "def html_title(self, title=None):\r\n if title is None:\r\n return \"<title>PyBossa</title>\"\r\n else:\r\n return \"<title>PyBossa &middot; %s</title>\" % title", "def getTitle(movieInfo):\n if \"title\" in movieInfo:\n #We remove the punctuation\n title = \"\".join(c for c in movieInfo[\"title\"] if c not in punctuation)\n #We return the title as a list of words in the right format\n return [ _format(w) for w in title.split() ]\n else:\n raise AttributeError(\"%s instance has no attribute title\" % movieInfo)", "def __init__(self,\n id = 'abbrev',\n title = \"Abbreviation Bibliography ID Cooker\"):\n self.id = id\n self.title = title", "def updateTitle(self):\n proprietary = ''\n proprietaries = [pro for pro in self.getProprietaries()\n if api.content.get_state(pro) == 'enabled']\n if proprietaries:\n proprietary = ', '.join([prop.Title() for prop in proprietaries])\n AD_refs = self.getFormal_notice_old_reference()\n title = \"{}{}{} - {}\".format(\n self.getReference(),\n AD_refs and ' - {}'.format(AD_refs) or '',\n proprietary and ' - {}'.format(proprietary) or '',\n self.getLicenceSubject()\n )\n self.setTitle(title)\n self.reindexObject(idxs=('Title', 'sortable_title',))", "def format_title(input_str):\n title_mapping = {'PD_whole_tree': 'Phylogenetic Diversity'}\n\n if input_str in title_mapping:\n return title_mapping[input_str]\n else:\n return ' '.join(map(lambda e: e[0].upper() + e[1:],\n input_str.split('_')))", "def sortable_title(portal, title):\n if not title:\n return ''\n\n def_charset = portal.plone_utils.getSiteEncoding()\n sortabletitle = title.lower().strip()\n # Replace numbers with zero filled numbers\n sortabletitle = num_sort_regex.sub(zero_fill, sortabletitle)\n # Truncate to prevent bloat\n for charset in [def_charset, 'latin-1', 'utf-8']:\n try:\n sortabletitle = unicode(sortabletitle, charset)[:30]\n sortabletitle = sortabletitle.encode(def_charset or 'utf-8')\n break\n except UnicodeError:\n pass\n except TypeError:\n # If we get a TypeError if we already have a unicode string\n sortabletitle = sortabletitle[:30]\n break\n return sortabletitle", "def title(self, title):\n\t\tself.head += '<title>' + title + '</title>\\n'", "def set_title(audio: EasyID3, title: str):\r\n audio['title'] = title\r\n audio.save()", "def get_title(self, obj):\n title = obj.habit.title\n return title", "def get_descriptive_name(self):\r\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\r\n return long_name.title()", "def GetTitle(self):\n return str(self.title)", "def get_title(fn):\n title = fn.name if hasattr(fn, 'name') else fn.__name__\n title = title.replace('_cut_function','')\n suffix = []\n # if 'JetsAK15_subleading_' in title:\n # suffix.append(r'$j^{\\mathrm{AK15}}_{\\mathrm{subl}}$')\n title = title.replace('JetsAK15_subleading_', '').replace('subleading_', '')\n if hasattr(fn, 'left'):\n suffix.append('({:.0f} < {} < {:.0f})'.format(fn.left, svjflatanalysis.utils.get_title('mt'), fn.right))\n # Transform variable name to title stirng\n title = svjflatanalysis.utils.get_title(title)\n if hasattr(fn, 'operator'):\n title += ' ' + fn.operator + ' cut'\n # Add the suffix\n title += ' ' + ' '.join(suffix)\n return title", "def _topic_to_title(topic):\n # XXX: Special case. The titlecase module is supposed to be good about\n # leaving alone acronyms or words with idiosyncratic internal capitalization\n # (e.g. \"eBay\"), but it fails on the specific case of \"t-SNE\", probably because\n # of the punctuation.\n if topic.endswith(\"t-SNE\"):\n base = topic[:topic.rindex(' ')]\n return titlecase.titlecase(base) + ' t-SNE'\n else:\n return titlecase.titlecase(topic)\n # with initialisms and words with idiosyncratic", "def format_id(self, html=False):\n if self.term_type == 'C':\n full_id = 'KEGG:' + self.org_prefix + self.term_id\n else:\n full_id = 'KEGG:' + self.term_type\n\n if html:\n term_id = self.id_anchor_fmt % (self.url(), full_id)\n else:\n term_id = full_id\n return term_id", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()" ]
[ "0.63595736", "0.6318129", "0.6200423", "0.61921966", "0.6153127", "0.60798085", "0.6040445", "0.6013723", "0.6009649", "0.59691554", "0.59691554", "0.5925276", "0.58930725", "0.5861564", "0.5843622", "0.5807595", "0.57983005", "0.57983005", "0.5792964", "0.57914525", "0.5784936", "0.5754138", "0.5747758", "0.57427716", "0.5736765", "0.57350945", "0.5733803", "0.5710083", "0.56977195", "0.569569", "0.5692775", "0.5675338", "0.56750834", "0.566601", "0.5656295", "0.56486005", "0.5635058", "0.5632761", "0.56229174", "0.55856735", "0.5584382", "0.55720836", "0.556156", "0.55610347", "0.5560293", "0.5544775", "0.55441165", "0.55431855", "0.55425304", "0.553696", "0.5536498", "0.55328643", "0.5524859", "0.55184084", "0.55144155", "0.55143255", "0.551217", "0.549144", "0.5485226", "0.54739034", "0.5472092", "0.5468711", "0.5457559", "0.5440593", "0.5431623", "0.5430736", "0.5427278", "0.5422247", "0.54201186", "0.5413077", "0.54076934", "0.53982", "0.5391573", "0.53873086", "0.53813833", "0.5374471", "0.5372425", "0.5369736", "0.53652376", "0.53620213", "0.5360026", "0.53591686", "0.5356157", "0.53548515", "0.53532726", "0.5348652", "0.5342879", "0.53400695", "0.53342444", "0.533339", "0.53324383", "0.5330518", "0.53301674", "0.5323947", "0.5318922", "0.5316719", "0.5306648", "0.52999824", "0.52999824", "0.52999824" ]
0.78905237
0
Formats Toggl project name and description into (Freshbooks) description.
def format_description(self, project_name, description): description = description if description else '' return "%s %s" % (project_name, '- ' + description)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def description():", "def get_descriptive_name(self):\n description = (f\"{self.year} {self.manufacturer.title()} \"\n f\"{self.model.title()}\")\n\n return description", "def get_describe_name(self):\n long_name = str(self.year)+ ' ' + self.make.title()+ ' ' +self.model.title()\n return long_name", "def __str__(self):\r\n proj_string = \" Project Name: \" + self.__name\r\n proj_string += \"\\n Cover Photo: \" + self.__cover_photo\r\n proj_string += \"\\n Links: \" + self.__links\r\n proj_string += \" Note: \" + self.__note\r\n proj_string += \" Photos: \" + list_str(self.__photos)\r\n\r\n return proj_string", "def full_description(self):\n des = describe_dut(self.dut) if self.dut else ''\n if self.build:\n des += ' with ' + self.build\n if self.result_id:\n des += ' BVT result ID ' + str(self.result_id)\n return (self.description if self.description \n else 'unknown test') + ' on ' + des", "def _create_readme(self, name, summary, description):\n return \"\"\"\n %(header_bar)s\n %(header)s\n %(header_bar)s\n\n %(content)s\n \"\"\" % {\n 'header': name,\n 'header_bar': '=' * len(name),\n 'content': '\\n\\n'.join(\n content\n for content in (summary, description)\n if content\n ) or 'Describe your extension.',\n }", "def unique_project_description():\n return ''.join([str(uuid.uuid4())[:6] for num in range(30)])", "def get_descriptive_name(self): # 定义描述完整信息的方法\n long_name = str(self.year) + \" \" + self.make + \" \" + self.model # 拼接变量字符串并赋值变量\n return long_name.title() # 返回字符串并首字母大写", "def __str__(self):\n return_string = \"Project: {}-{}\".\\\n format(self.public_information[\"project_id\"],\n self.public_information[\"title\"])\n\n return return_string", "def getProjectName():", "def displaySummary(self):\r\n print('Project Name:' + self.project['name'])\r\n print('Project chip:' + self.project['chip'])\r\n print('Project includes: ' + ' '.join(self.project['incs']))\r\n print('Project defines: ' + ' '.join(self.project['defs']))\r\n print('Project srcs: ' + ' '.join(self.project['srcs']))", "def _build_title(db, place):\n descr = place.get_title()\n location = get_main_location(db, place)\n parish = location.get(PlaceType.PARISH)\n city = location.get(PlaceType.CITY)\n state = location.get(PlaceType.STATE)\n title_descr = \"\"\n if descr:\n title_descr += descr.strip()\n if parish:\n title_descr += ', ' + parish.strip() + _(\" parish\")\n if city:\n title_descr += ', ' + city.strip()\n if state:\n title_descr += ', ' + state.strip() + _(\" state\")\n return _strip_leading_comma(title_descr)", "def Description(self) -> str:", "def Description(self) -> str:", "def get_descriptive_name(self):\n long_name = f\"{self.make} {self.model} {self.year}\"\n \n return long_name.title()", "def get_descriptive_name(self):\n return f\"{self.year} {self.make} {self.model}\".title()", "def get_descriptive_name(self):\r\n long_name=str(self.year)+' '+self.make+' '+self.model\r\n return long_name.title()", "def describe(self) -> str:\n return (\n \"{name} {surname} è nata/o a {birth_municipality} ({birth_province_code}) il {birthdate}.\"\n \" Ora vive a {municipality} ({province_code}) in {address} {house_number}.\"\n ).format(**self._data)", "def display_project_info(project_name):\n\n # project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project_name)\n\n grades = hackbright.get_grades_by_title(project_name)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n grade=max_grade,\n grades=grades)", "def description(self, description: str):\n return self.swag({\n 'description': normalize_indent(description),\n })", "def get_descriptive_name(self):\n long_name = f\"{self.year} {self.make} {self.model}\"\n return long_name.title()", "def description(self):\n publisher = self.parent\n\n shotgun_url = publisher.sgtk.shotgun_url\n\n media_page_url = \"%s/page/media_center\" % (shotgun_url,)\n review_url = \"https://www.shotgunsoftware.com/features/#review\"\n\n return \"\"\"\n Separate layers and upload to Shotgun for review.<br><br>\n\n A <b>Version</b> entry will be created in Shotgun and a transcoded\n copy of the file will be attached to it. The file can then be reviewed\n via the project's <a href='%s'>Media</a> page, <a href='%s'>RV</a>, or\n the <a href='%s'>Shotgun Review</a> mobile app.\n \"\"\" % (media_page_url, review_url, review_url)", "def get_descriptive_name(self):\r\n long_name = str(self.year)+' '+self.make + ' '+self.model\r\n return long_name.title()", "def combined_description(desc1, desc2):\n description = desc1\n if desc2:\n description = '{0}_{1}'.format(desc1, desc2)\n\n return description", "def ftitle(self, text):\n return \"{} - {}\".format(self._app_name, text)", "def get_description(self):\n return \"-\".join(\n map(str, (self.release, self.chromosome, self.start, self.reference, self.alternative))\n )", "def __str__(self):\n string = \"\"\"\n Project Factory:\\n\n Directory: {}\\n\n Size: {}\\n\n \"\"\".format(self._directory, len(self.projects))\n return string", "def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n times = [time.strftime(i).lstrip('0') for i in ('%m', '%d', '%I:%M%p')]\n ret += ' on {}/{} at {}'.format(times[0], times[1], times[2])\n return ret", "def testDescription(self):\n project = self.session.create_project()\n\n self.util.stringTypeTest(self, project, \"description\")\n\n self.util.stringPropertyTest(self, project, \"description\")", "def get_descriptive_name(self):\r\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\r\n #Mostrar_Grande = long_name.upper()\r\n #return long_name.upper()\r\n #return Mostrar_Grande #Funciona Com Return TAMBÉM, mas olhe na linha 39 como seria necessário usar.\r\n print(long_name.upper())", "def get_descriptive_name(self):\r\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\r\n return long_name.title()", "def FormatDescription(description):\n\n description = description.replace(\"wx\", \"wx.\")\n description = description.replace(\"EVT_COMMAND\", \"wxEVT_COMMAND\")\n description = description.replace(\"wx.Widgets\", \"wxWidgets\")\n\n return description", "def _get_description(actor, commit, run_id):\n return (\"Requested by @%s on commit %s\\n\" % (actor, commit) +\n \"Last updated: %s \\n\" % _get_datetime() +\n \"**[View integration test log & download artifacts](https://github.com/firebase/firebase-cpp-sdk/actions/runs/%s)**\\n\" % run_id)", "def description(self):", "def get_long_description(title):\n ROOT = os.path.abspath(os.path.dirname(__file__))\n\n readme = open(os.path.join(ROOT, 'README.rst'), 'r', 'utf8').read()\n body_tag = \".. Omit badges from docs\"\n readme_body_start = readme.index(body_tag)\n assert readme_body_start\n readme_body = readme[readme_body_start + len(body_tag):]\n\n changelog = open(os.path.join(ROOT, 'changelog.rst'), 'r', 'utf8').read()\n old_tag = \".. Omit older changes from package\"\n changelog_body_end = changelog.index(old_tag)\n assert changelog_body_end\n changelog_body = changelog[:changelog_body_end]\n\n bars = '=' * len(title)\n long_description = \"\"\"\n%(bars)s\n%(title)s\n%(bars)s\n%(readme_body)s\n\n%(changelog_body)s\n\n_(Older changes can be found in the full documentation)._\n\"\"\" % locals()\n return long_description", "def _str_desc(self, reader):\n data_version = reader.data_version\n if data_version is not None:\n data_version = data_version.replace(\"releases/\", \"\")\n desc = \"{OBO}: fmt({FMT}) rel({REL}) {N:,} GO Terms\".format(\n OBO=reader.obo_file, FMT=reader.format_version,\n REL=data_version, N=len(self))\n if reader.optobj:\n desc = \"{D}; optional_attrs({A})\".format(D=desc, A=\" \".join(sorted(reader.optobj.optional_attrs)))\n return desc", "def MakePuzzleTitleForDisplay(p):\n pack = models.GetPackForPuzzle(p)\n return '%s, Puzzle %s' % (pack.title, p.name)", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def __str__(self):\n gppkg_spec_file = '''\nPkgName: ''' + self.name + '''\nVersion: ''' + self.version + '''\nGPDBVersion: ''' + self.gpdbversion + '''\nDescription: Temporary Test Package\nOS: ''' + self.os + '''\nArchitecture: ''' + self.arch\n\n return gppkg_spec_file", "def describe():\n keys = Project.__dict__.keys()\n for key in sorted(keys):\n value = Project.__dict__[key]\n if key in ATTRIBUTES:\n attrs = ATTRIBUTES[key]\n required = False\n if 'required' in attrs:\n if attrs['required']:\n required = True\n if 'help' in attrs:\n info(\"# {key}\".format(key=key))\n if required:\n info(\"# REQUIRED\")\n for line in textwrap.wrap(attrs['help'], width=100):\n info(\"# {line}\".format(line=line))\n info(\"# '{key}': '{value}'\".format(key=key, value=value))\n info('')\n else:\n info(\"'{key}': '{value}'\".format(key=key, value=value))", "def email_project_info(project):\n header = (\"Project title: {}\\n\"\n \"Submission ID: {}\\n\"\n \"Submitting author: {}\"\n ).format(project.title, project.slug, project.submitting_author())\n\n return header", "def test_description_is_generated_from_long_desc_formats(self):\r\n self.register()\r\n res = self.new_application(long_description=\"## Hello\")\r\n\r\n app = db.session.query(App).first()\r\n assert '##' not in app.description, app.description\r\n assert '<h2>' not in app.description, app.description", "def get_description(self):\n if CONFIG_KEY not in self:\n return\n if hasattr(self[CONFIG_KEY], DESC_KEY):\n desc_str = str(self[CONFIG_KEY][DESC_KEY])\n if not isinstance(desc_str, str):\n try:\n desc_str = str(desc_str)\n except Exception as e:\n raise InvalidConfigFileException(\n \"Could not convert the specified Project description \"\n \"({}) to string. Caught exception: {}\".\n format(desc_str, getattr(e, 'message', repr(e))))\n return desc_str", "def get_description(self):", "def create_title(title, year=None, time_step=None, base=0, interval=None,\n gage=None, m=None, h=None):\n if type(gage) is list or type(gage) is tuple:\n title = title + ' at listed gages'\n elif gage is not None:\n title = title + ' at '+ gage\n \n if m is not None:\n title = title + ' for Month {mo} of'.format(mo=m)\n elif h is not None:\n title = title + ' for Hour {ho} of'.format(ho=h) \n elif interval is 'seasonal':\n title = title + ' for Months of'\n elif interval is 'diurnal':\n title = title + ' for Hours of'\n if time_step is not None:\n ts = time_step.replace('min', ' minute').replace('T', ' minute').replace('H', ' hour').replace('D', ' day')\n title = title.format(ts=ts)\n if year is not None:\n title = title +' '+ year\n return title", "def project_name(self):\n pass", "def get_description(self):\n if self.desc_format == MARKDOWN_FORMAT:\n return markdown.markdown(self.desc, safe_mode='escape') \n elif self.desc_format == TEXT_FORMAT:\n return html.escape(self.desc)", "def get_description(self):\n if self.desc_format == MARKDOWN_FORMAT:\n return markdown.markdown(self.desc, safe_mode='escape') \n elif self.desc_format == TEXT_FORMAT:\n return html.escape(self.desc)", "def logic_program_form(self):\r\n return '% ASP{f} Translation of System Description ' + self.name + '\\n\\n'", "def cal_desc(self):\n desc = ''\n desc += 'Requested by '\n orgs = self.event.org.all()\n for org in orgs:\n desc += org.name + ', '\n desc = desc[:-2] + '.\\n' # removes trailing comma\n desc += 'Crew Chief: ' + self.crew_chief.get_full_name() + '\\n'\n if self.event.description:\n desc += self.event.description + '\\n'\n return desc", "def get_description():\n desc = {}\n desc[\"data\"] = True\n desc[\n \"description\"\n ] = \"\"\"For each year, the average first and last date of\n a given temperature is computed. The values are then averaged and plotted\n to represent the period between these occurences and also the number of\n days represented by the period.\n \"\"\"\n desc[\"arguments\"] = [\n dict(\n type=\"station\",\n name=\"station\",\n default=\"IATDSM\",\n label=\"Select Station\",\n network=\"IACLIMATE\",\n )\n ]\n return desc", "def get_short_name(self):\n split = self.name.split(' - ')\n # author, year, and first couple of words of paper title\n return \"{} ({}), {}\".format(split[0], split[1], \" \".join(split[2].split(' ')[:3]))", "def get_and_display_project():\n\n project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project)\n\n\n github_grade_list = hackbright.get_grades_by_title(project)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n max_grade=max_grade,\n github_grade_list=github_grade_list)", "def __str__(self):\r\n hobby_string = \"Hobby Name: \" + self.__name\r\n hobby_string += \"\\n Cover Photo: \" + self.__cover_photo\r\n hobby_string += \"\\n Projects: \\n\" + list_str_breaks(self.__projects)\r\n\r\n return hobby_string", "def description(self):\n\t\treturn \"%s, %s\" % (self.name, self.country)", "def description(request):\n if request.method != 'POST':\n description = request.issue.description or \"\"\n return HttpTextResponse(description)\n if not request.issue.edit_allowed:\n if not IS_DEV:\n return HttpTextResponse('Login required', status=401)\n issue = request.issue\n issue.description = request.POST.get('description')\n issue.put()\n return HttpTextResponse('')", "def project_name() -> str:\n fake = Faker()\n raw_name: str = (\n fake.name_female().lower().replace(\" \", \"_\").replace(\"-\", \"_\").replace(\".\", \"_\")\n )\n return re.sub(\"_+\", \"_\", raw_name).strip(\"_\")", "def short_description(self):\n description = self.description\n if description is not None:\n lines = description.splitlines()\n title = []\n for line in lines:\n line = line.strip()\n if line == \"\":\n if len(title) > 0:\n break\n else:\n title.append(line)\n description = \"\\n\".join(textwrap.wrap(\"\\n\".join(title), 80))\n\n return description", "def __str__(self):\n\t\ttxt = \"____RECIPE____\\n\\nname: {}\\ncooking_lvl: {}\\ncooking time: {}\\nIngredients: {}\\nRecipe type: {}\\n\\\nDescription:{}\\n______________\\n\".format(self.name, self.cooking_lvl, self.cooking_time, \\\n\t\t\tself.ingredients, self.recipe_type, self.description)\n\t\treturn txt", "def longdescription():\r\n print()\r\n here = path.abspath(path.dirname(__file__))\r\n with open(path.join(here, 'README.rst')) as f:\r\n long_description = f.read()\r\n\r\n print(long_description)", "def get_description(self) -> str:\n pass", "def __str__(self) -> str:\n string = fr\"{self.id}\\. `{self.content}`\"\n if self.description:\n string += f\" - {self.description}\"\n return string", "def get_project():\n\n title = request.args.get('title')\n if not title:\n return \"Please enter a title!\"\n\n project = hackbright.get_project_by_title(title)\n\n grades = hackbright.get_grades_by_title(title)\n\n if not project:\n return \"There is no project with title \\\"{}\\\".\".format(title)\n\n title, description, max_grade = project\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n max_grade=max_grade,\n grades=grades)", "def summary_title(tile_summary):\n return f\"Slide tile_summary.slide_name Tile Summary:\"", "def description(self):\n\n\t\treturn \"%d %s %s\" % (self.vintage, self.winery.name, self.name)", "def cal_desc(self):\n desc = \"\"\n desc += \"Requested by \"\n orgs = self.org.all()\n if len(orgs) > 0:\n for org in orgs:\n desc += org.name + \", \"\n desc = desc[:-2] + \".\\n\" # removes trailing comma\n ccs = self.ccinstances.all()\n if len(ccs) > 0:\n desc += \"Crew Chiefs: \"\n for cc in ccs:\n desc += cc.crew_chief.get_full_name() + \" [\" + (cc.service.shortname if cc.service else cc.category.name) + \"], \"\n desc = desc[:-2] + \".\\n\" # removes trailing comma\n if self.description:\n desc += self.description + \"\\n\"\n return desc", "def __str__(self):\n return \"{0} : {1}\".format(self.name, self.description)", "def description(request):\n description = request.issue.description or \"\"\n return HttpTextResponse(description)", "def get_project_info():\n\n title = request.args.get('project')\n\n project_info_list = hackbright.get_project_by_title(title)\n\n html = render_template(\"project_info.html\",\n project_info_list=project_info_list)\n return html", "def describe_pet2(pet_name, animal_type='dog'):\n print(f\"\\nI have a {animal_type}.\")\n print(f\"My {animal_type}'s name is {pet_name.title()}.\")", "def show_project():\n\n title = request.args.get('title')\n\n title, description, grade = hackbright.get_project_by_title(title)\n\n grade_list = hackbright.get_grades_by_title(title)\n\n html = render_template(\"project.html\", title=title,\n description=description, grade=grade,\n grade_list=grade_list)\n\n return html", "def help_description():\n pass", "def _prettyfilename(self):\n return f'{self.title} ({self.year})'", "def format_project_string(repo_path, name):\n\n if not ProjectRepo.existing_git_repository(repo_path):\n return colored(name, 'green')\n repo = ProjectRepo(repo_path, __project_repo_default_remote__, __project_repo_default_ref__)\n if not repo.validate_repo():\n color = 'red'\n symbol = '*'\n else:\n color = 'green'\n symbol = ''\n return colored(name + symbol, color)", "def __str__(self):\n # These are required tags so we should have generated an\n # error beforehand and this shouldn't raise a ``KeyError``\n s = [(\"Album Title\", self[\"TITLE\"]), (\"Album Artist\", self[\"ARTIST\"]),\n (\"Year\", self[\"DATE_RECORDED\"]), (\"Genre\", self[\"GENRE\"])]\n s = OrderedDict(s)\n\n def add_optional(key):\n nonlocal s\n if key in self:\n text = key.replace('_', ' ').split(' ')\n text = ' '.join([x.capitalize() for x in text])\n s[text] = self[key]\n\n add_optional(\"LABEL\")\n add_optional(\"ISSUE_DATE\")\n add_optional(\"ORIGINAL_MEDIUM\")\n add_optional(\"VERSION\")\n add_optional(\"HD_FORMAT\")\n add_optional(\"DISC_NAME\")\n add_optional(\"PHASE_NAME\")\n if self.discs > 1:\n s[\"Disc\"] = self[\"PART_NUMBER\"]\n s[\"Discs\"] = self.discs\n if self.channels != \"2.0\":\n s[\"Channels\"] = self.channels\n # Now we have to deal with the formatted output. First we need\n # the maximum length of the keys to properly align the output\n # Note that the keys used will have a space appended, so we add 1\n max_len = max(len(x[0]) for x in s)+1\n\n # Output for an entry in ``s`` of (\"Year\", \"2016\") with a ``max_len`` of 10\n # would be: '= Year .....: 2016'\n def line(k, v):\n return f\"{k.ljust(max_len, '.')}: {v}\"\n\n s = [line(*x) for x in s.items()]\n # Now we can reuse ``max_len`` to mean the longest fully formatted line\n # We want to add '= ' to the left side and ' =' to the right side to\n # form a border\n max_len = max(len(x) for x in s)\n s = [f'= {x:{max_len}} =' for x in s]\n max_len += 4\n s = [\" ALBUM INFORMATION \".center(max_len, \"=\")] + s + [\"=\" * max_len]\n return \"\\n\".join(s)", "def get_long_description():\n descr = []\n for fname in ('README.rst',):\n with io.open(fname, encoding='utf-8') as f:\n descr.append(f.read())\n return '\\n\\n'.join(descr)", "def brief(self):\n result = \"({0.mode}) {0.name}\".format(self)\n if self.content_hash_before is None:\n result += \" (new)\"\n return result", "def description(self):\n pass", "def description(self):\n pass", "def description() -> str:\n content = \"Demonstrates usage of blackbord remappings.\\n\"\n content += \"\\n\"\n content += \"Demonstration is via an exemplar behaviour making use of remappings..\\n\"\n\n if py_trees.console.has_colours:\n banner_line = console.green + \"*\" * 79 + \"\\n\" + console.reset\n s = banner_line\n s += console.bold_white + \"Blackboard\".center(79) + \"\\n\" + console.reset\n s += banner_line\n s += \"\\n\"\n s += content\n s += \"\\n\"\n s += banner_line\n else:\n s = content\n return s", "def portfolio():\n projects = get_projects()\n for project in projects:\n unicode_body = project[\"description\"].decode(\"utf-8\")\n html_body = markdown.markdown(unicode_body)\n safe_html_body = Markup(html_body)\n project[\"description\"] = safe_html_body\n context = {\n \"projects\": projects\n }\n return render_template(\"portfolio.html\", **context)", "def debian_description(self):\n text = [\"Python package\", self.python_name, \"converted by py2deb on\"]\n # The %e directive (not documented in the Python standard library but\n # definitely available on Linux which is the only platform that py2deb\n # targets, for obvious reasons :-) includes a leading space for single\n # digit day-of-month numbers. I don't like that, fixed width fields are\n # an artefact of 30 years ago and have no place in my software\n # (generally speaking :-). This explains the split/compact duo.\n text.extend(time.strftime('%B %e, %Y at %H:%M').split())\n return ' '.join(text)", "def get_description(self):\n pass", "def _append_descriptions(self, issue, dep_name, dep_latest_version):\n logging.info(\"Updating JIRA issue {0} to track {1} upgrade process\".format(\n issue.key,\n dep_name))\n description = issue.fields.description + \"\"\"\\n\\n{0}\\n\n Please review and upgrade the {1} to the latest version {2} \\n \n cc: \"\"\".format(\n datetime.today(),\n dep_name,\n dep_latest_version\n )\n _, owners = self._find_owners(dep_name)\n for owner in owners:\n description += \"[~{0}], \".format(owner)\n try:\n self.jira.update_issue(issue, description=description)\n except Exception as e:\n traceback.print_exc()\n logging.error(\"Failed updating issue: \"+ str(e))", "def _generate_title_description(psap_id, title, description):\n if description is None:\n description = PersistentFields.get_description(psap_id)\n else:\n PersistentFields.set_description(psap_id, description)\n if title is None:\n title = PersistentFields.get_title(psap_id)\n else:\n PersistentFields.set_title(psap_id, title)\n\n return title, description", "def _title(profile):\n if profile['operation'] == 'differential':\n p1, p2 = profile['profiles']\n return 'differential ({}, {})'.format(_title(p1), _title(p2))\n elif profile['operation'] == 'local feature':\n p = profile['profile']\n return 'local feature {} ({})'.format(profile['function'], _title(p))\n else:\n return ' '.join([str(x) for x in profile.values()])", "def read_project(filename):\n sep ='='\n \n tags = {\n 'description': ['BEGIN DESCRIPTION:', 'END DESCRIPTION:']\n }\n\n fixed = {\n 'units': ['SI Units' 'English Units']\n }\n\n keys = {\n 'Proj Title': '',\n 'Default Exp/Contr': '=0.3,0.1',\n 'Current Plan': '=p03',\n 'Geom File': '=g01',\n 'Flow File': '=f01',\n 'Plan File': '=p01',\n 'Y Axis Title=Elevation': '',\n 'X Axis Title(PF)': '=Main Channel Distance',\n 'X Axis Title(XS)': '=Station',\n 'DSS Start Date': '=',\n 'DSS Start Time': '=',\n 'DSS End Date': '=',\n 'DSS End Time': '=',\n 'DSS Export Filename': '=',\n 'DSS Export Rating Curves': '= 0',\n 'DSS Export Rating Curve Sorted': '= 0',\n 'DSS Export Volume Flow Curves': '= 0',\n 'DXF Filename': '=',\n 'DXF OffsetX': '= 0',\n 'DXF OffsetY': '= 0',\n 'DXF ScaleX': '= 1',\n 'DXF ScaleY': '= 10',\n 'GIS Export Profiles': '= 0'\n }", "def Title(self, **kwargs):\n full_name = ''\n if self.getFirstname() == '' or self.getLastname() == '':\n if not self.getOrganization():\n return '...'\n else:\n return self.getOrganization()\n format = kwargs.get('format', None)\n if format == 'natural':\n full_name = '%s %s' % (self.getFirstname(), self.getLastname())\n else:\n full_name = '%s %s' % (self.getLastname(), self.getFirstname())\n return '%s' % full_name", "def fix_description(text):\n separate = text.split()\n joined = ' '.join(list([x.strip('\\\\n') for x in separate]))\n final_joined = ' '.join(joined.split('\\\\n')[::3])\n return final_joined", "def get_description(self):\n return re.sub('\\n\\W+',' ', self.__doc__)", "def __str__(self):\n description = \"Object class Critter.\"\n description += \"\\nName: \" + self.name + \"\\nMood: \" + self.mood + \"\\nHunger: \" + str(self.hunger) + \"\\nBoredom: \" + str(self.boredom) + \"\\n\"\n return description", "def generate_grid_title(grid, field, level):\n time_str = generate_grid_time_begin(grid).isoformat() + \"Z\"\n height = grid.z[\"data\"][level] / 1000.0\n l1 = f\"{generate_grid_name(grid)} {height:.1f} km {time_str} \"\n field_name = generate_field_name(grid, field)\n return l1 + \"\\n\" + field_name", "def get_description():\n raise NotImplementedError" ]
[ "0.6330707", "0.5992437", "0.59294826", "0.59286237", "0.58890724", "0.584214", "0.5832703", "0.5818409", "0.5808963", "0.5762168", "0.5734125", "0.5720652", "0.5715829", "0.5715829", "0.56770307", "0.5670249", "0.56625277", "0.5661302", "0.56451756", "0.5631556", "0.5621286", "0.56144625", "0.5603907", "0.5602831", "0.5570571", "0.5557692", "0.5554851", "0.55504674", "0.5547473", "0.5531748", "0.5527534", "0.55143964", "0.5513799", "0.55079937", "0.5504367", "0.54953283", "0.54952997", "0.5485188", "0.5485188", "0.5485188", "0.5485188", "0.5485188", "0.5485188", "0.5485188", "0.5485188", "0.5478685", "0.54772973", "0.5451208", "0.54505247", "0.54468656", "0.543736", "0.54307634", "0.54190636", "0.54165107", "0.54165107", "0.5414696", "0.5393992", "0.5371215", "0.53678864", "0.5363891", "0.5359662", "0.5353482", "0.5350721", "0.5338288", "0.5333472", "0.53302413", "0.5329507", "0.5321206", "0.5316899", "0.53138655", "0.5310177", "0.5303074", "0.5303064", "0.5291936", "0.52886355", "0.52865535", "0.52721936", "0.525194", "0.52493995", "0.5247164", "0.52464825", "0.5241843", "0.5241501", "0.5240071", "0.52296084", "0.52296084", "0.5229105", "0.52195036", "0.5201166", "0.5191991", "0.51871353", "0.5187034", "0.51868206", "0.51817065", "0.51769257", "0.51727515", "0.5168887", "0.5167804", "0.51669365", "0.51648706" ]
0.7948996
0
Merges toggle time entries with same project name. Sums duration if billable.
def merge_toggl_time_entries(self, time_entries): tg = Toggl() d = {} for entry in time_entries: if entry.get('billable'): if entry.get('tags') and tg.BOOKED_TAG in entry['tags']: status = 'booked' else: status = 'not-booked' date = parser.parse(entry['start']).date() if not entry.get('pid'): self.log("Couldn't find associated project for entry: %s" % (str(entry))) continue unique_id = str(entry['pid']) + str(date) + status if not entry.get('description'): entry['description'] = "" if d.get(unique_id): d[unique_id]['duration'] += entry['duration'] d[unique_id]['merged_ids'].append(entry['id']) if d[unique_id].get('description'): if entry['description'].strip() not in d[unique_id]['description']: d[unique_id]['description'] += ' / ' + entry['description'] else: d[unique_id]['description'] = entry['description'] else: entry['merged_ids'] = [entry['id']] d[unique_id] = entry return d.values()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def break_time(self):\n\t\ts = timedelta()\n\t\tfor i in xrange(1, len(self.toggles)-1, 2):\n\t\t\ts += self.toggles[i+1] - self.toggles[i]\n\n\t\t# If not working need to add the last period of time\n\t\tif not self.status():\n\t\t\ts += datetime.now() - self.toggles[-1]\n\t\treturn s", "def _task_data(self):\n output = {\n 'all': [],\n 'open': [],\n 'open_hours': 0,\n 'done': [],\n 'done_hours': 0,\n 'week_done': [],\n 'week_done_hours': 0,\n 'week_due': [],\n 'week_due_hours': 0,\n 'velocity': [],\n 'velocity_hours': 0,\n 'velocity_count': 0,\n }\n\n last_sunday = SUNDAY - timedelta(weeks=1)\n three_weeks_ago = MONDAY - timedelta(weeks=4)\n\n tasks = Task.originals.owner_id(self.pk).order_by('due_dt')\n for t in tasks:\n output['all'].append(t)\n # process open tasks\n if not t.completed:\n output['open'].append(t)\n output['open_hours'] += t.task_time\n\n # Process done tasks\n else:\n output['done'].append(t)\n output['done_hours'] += t.task_time\n if t.completed_dt >= three_weeks_ago and t.completed_dt <= last_sunday:\n output['velocity'].append(t)\n output['velocity_hours'] += t.task_time\n\n if t.due_dt >= MONDAY and t.due_dt <= SUNDAY:\n output['week_due'].append(t)\n output['week_due_hours'] += t.task_time\n\n if t.completed and t.completed_dt >= MONDAY and t.completed_dt <= SUNDAY:\n output['week_done'].append(t)\n output['week_done_hours'] += t.task_time\n\n output['all_hours'] = output['open_hours'] + output['done_hours']\n\n # Extra calcs for the velocity\n output['velocity_count'] = len(output['velocity'])\n\n if output['velocity_hours'] > 0:\n output['velocity_hours'] = round(output['velocity_hours']/3,2)\n if output['velocity_count'] > 0:\n output['velocity_count'] = round(Decimal(output['velocity_count'])/3,2)\n\n return output", "def _task_data(self):\n output = {\n 'all': [],\n 'all_hours': 0,\n 'open': [],\n 'open_hours': 0,\n 'done': [],\n 'done_hours': 0,\n }\n\n tasks = Task.originals.project_id(self.pk).order_by('due_dt')\n for t in tasks:\n # process open tasks\n if not t.completed:\n output['open'].append(t)\n output['open_hours'] += t.task_time\n\n # Process done tasks\n else:\n output['done'].append(t)\n output['done_hours'] += t.task_time\n\n # Included in the loop to keep the ordering\n output['all'].append(t)\n\n output['all_hours'] = output['open_hours'] + output['done_hours']\n\n return output", "def addTimeWashed(df): \n # Calculate time washed of food (start of no food)\n time_washed = pd.DataFrame(df.groupby(['date_yyyymmdd'])['wormsorter_start_time'].min())\n time_washed = time_washed.reset_index(drop=False)\n time_washed.columns = ['date_yyyymmdd','time_washed']\n \n df = pd.merge(left=df, right=time_washed, on='date_yyyymmdd')\n \n return df", "def _get_open_projects_info():\n projects = Project.objects.filter(project_open=True).order_by(\"created_at\")\n projects_sum_hours = []\n for project in projects:\n time_entries_pro_project = TimeEntry.objects.filter(project=project)\n used_hours = _sum_hours(time_entries_pro_project)\n hours_percent = _calculate_hours_percent(used_hours, project.stimated_hours)\n projects_sum_hours.append(\n {\n \"hours_percent_number\": hours_percent,\n \"hours_percent\": f\"{hours_percent}%\",\n \"worked_hours\": used_hours,\n \"project\": project,\n }\n )\n return projects_sum_hours", "def merge_arrival_and_completion_time(tests_dataframe):\r\n arrival_time_df = tests_dataframe[['time_test_arrives_lab', 'server_size']]\r\n completion_time_df = tests_dataframe[['completion_time', 'server_size']]\r\n arrival_time_df['add'] = 1\r\n completion_time_df['add'] = -1\r\n arrival_time_df = arrival_time_df.rename(columns={\"time_test_arrives_lab\":\"time\"})\r\n completion_time_df = completion_time_df.rename(columns={\"completion_time\":\"time\"})\r\n union = pd.concat([arrival_time_df, completion_time_df])\r\n union = union.sort_values(by=\"time\")\r\n prev_server_size = 0\r\n for index, row in union.iterrows():\r\n if index == 0:\r\n current_server_size= row['server_size'] + row['add']\r\n prev_server_size = current_server_size\r\n #union['server_size'] = union['server_size'] + union['add']\r\n else:\r\n current_server_size = prev_server_size + row['add'] \r\n prev_server_size = current_server_size\r\n union.at[index,'server_size'] = current_server_size\r\n #union.to_csv('union.csv')\r\n return union", "def _02_merge_plant_project_outputs(records, **params):\n output_records = OrderedDict()\n for record in records:\n project_key = (record[\"Power Plant Name\"], record[\"Project Name\"])\n if project_key not in output_records:\n output_records[project_key] = record\n\n base_field = f\"{record['Type']} Output\"\n if record.get(f\"{base_field} Year\") in [None, \"NA\"]:\n continue\n elif (\n output_records[project_key][f\"{base_field} Year\"] in [None, \"NA\"]\n or record[f\"{base_field} Year\"] > output_records[project_key][f\"{base_field} Year\"]\n ):\n for key in [f\"{base_field}\", f\"{base_field} Unit\", f\"{base_field} Year\"]:\n output_records[project_key][key], record[key] = record[key], None\n else:\n for key in [f\"{base_field}\", f\"{base_field} Unit\", f\"{base_field} Year\"]:\n record[key] = None # reduce\n\n return records", "def cal_group_actions(df,option):\r\n\r\n if option == 'precovid':\r\n print('This is the pre-pandemic period:')\r\n elif option == 'postcovid':\r\n print('This is the post-pandemic period:')\r\n\r\n A = df[df['mod_numEdits'] == 1]\r\n B = df[(df['mod_numEdits'] > 1) & (df['mod_numEdits'] <= 10)]\r\n C = df[(df['mod_numEdits'] > 10) & (df['mod_numEdits'] <= 100)]\r\n D = df[(df['mod_numEdits'] >= 100)]\r\n \r\n A.insert(11,'group','A')\r\n B.insert(11,'group','B')\r\n C.insert(11,'group','C')\r\n D.insert(11,'group','D')\r\n\r\n li_add_A = []\r\n li_upd_A = []\r\n li_rem_A = []\r\n\r\n li_add_B = []\r\n li_upd_B = []\r\n li_rem_B = []\r\n\r\n li_add_C = []\r\n li_upd_C = []\r\n li_rem_C = []\r\n\r\n li_add_D = []\r\n li_upd_D = []\r\n li_rem_D = []\r\n\r\n for userid in set(A.userId):\r\n \r\n li_add_A.append(len(A[(A['action'] == 'add') & (A['userId'] == userid)]))\r\n li_upd_A.append(len(A[(A['action'] == 'update') & (A['userId'] == userid)]))\r\n li_rem_A.append(len(A[(A['action'] == 'remove') & (A['userId'] == userid)]))\r\n \r\n for userid in set(B.userId):\r\n \r\n li_add_B.append(len(B[(B['action'] == 'add') & (B['userId'] == userid)]))\r\n li_upd_B.append(len(B[(B['action'] == 'update') & (B['userId'] == userid)]))\r\n li_rem_B.append(len(B[(B['action'] == 'remove') & (B['userId'] == userid)]))\r\n \r\n for userid in set(C.userId):\r\n \r\n li_add_C.append(len(C[(C['action'] == 'add') & (C['userId'] == userid)]))\r\n li_upd_C.append(len(C[(C['action'] == 'update') & (C['userId'] == userid)]))\r\n li_rem_C.append(len(C[(C['action'] == 'remove') & (C['userId'] == userid)]))\r\n\r\n for userid in set(D.userId):\r\n \r\n li_add_D.append(len(D[(D['action'] == 'add') & (D['userId'] == userid)]))\r\n li_upd_D.append(len(D[(D['action'] == 'update') & (D['userId'] == userid)]))\r\n li_rem_D.append(len(D[(D['action'] == 'remove') & (D['userId'] == userid)]))\r\n \r\n li_add = [li_add_A, li_add_B, li_add_C, li_add_D]\r\n li_upd = [li_upd_A, li_upd_B, li_upd_C, li_upd_D]\r\n li_rem = [li_rem_A, li_rem_B, li_rem_C, li_rem_D]\r\n\r\n print(f'the mean of li_add_A is:{round(np.mean(li_add_A, dtype=np.float64),2)}')\r\n print(f'the mean of li_add_B is:{round(np.mean(li_add_B, dtype=np.float64),2)}')\r\n print(f'the mean of li_add_C is:{round(np.mean(li_add_C, dtype=np.float64),2)}')\r\n print(f'the mean of li_add_D is:{round(np.mean(li_add_D, dtype=np.float64),2)}')\r\n\r\n print(f'the mean of li_upd_A is:{round(np.mean(li_upd_A, dtype=np.float64),2)}')\r\n print(f'the mean of li_upd_B is:{round(np.mean(li_upd_B, dtype=np.float64),2)}')\r\n print(f'the mean of li_upd_C is:{round(np.mean(li_upd_C, dtype=np.float64),2)}')\r\n print(f'the mean of li_upd_D is:{round(np.mean(li_upd_D, dtype=np.float64),2)}')\r\n\r\n print(f'the mean of li_rem_A is:{round(np.mean(li_rem_A, dtype=np.float64),2)}')\r\n print(f'the mean of li_rem_B is:{round(np.mean(li_rem_B, dtype=np.float64),2)}')\r\n print(f'the mean of li_rem_C is:{round(np.mean(li_rem_C, dtype=np.float64),2)}')\r\n print(f'the mean of li_rem_D is:{round(np.mean(li_rem_D, dtype=np.float64),2)}')\r\n\r\n return li_add, li_upd, li_rem", "def collapse_using_timeStr(self):\n if self.modified == True:\n raise Exception('Probabilities already modified.\\nCollapsing after modification will lead to incorrect results.')\n timeUnits = np.array(process_time_string(self.timeStr))\n if len(self.timeslices) + 1 == np.sum(timeUnits):\n if timeUnits[-1] == 1:\n timeUnits = timeUnits[:-1]\n else:\n timeUnits[-1] -= 1\n if len(self.timeslices) != np.sum(timeUnits):\n raise Exception('Total number of timeslices is different.')\n ind = 0\n cnt = 0\n curr_rates = np.matrix(np.zeros((np.shape(self.obsRates)[0], len(timeUnits))))\n curr_times = []\n for i in timeUnits:\n curr_rates[:, cnt] = np.sum(self.obsRates[:, ind:ind + i], axis=1)\n curr_times.append(np.sum(self.timeslices[ind:ind + i]))\n ind += i\n cnt += 1\n\n self.obsRates = curr_rates\n self.timeslices = curr_times", "def switch_project(project):\n # Get the data\n project = project.lower()\n lines, finished, last_project = parse_file(project=None)\n line1, i1, last1, _, times1 = parse_line(lines, last_project, finished)\n line2, i2, _, new2, times2 = parse_line(lines, project, True)\n now = datetime.now()\n\n # Format the data\n if not finished:\n punch1 = now - last1\n times1.append(punch1)\n punch1 = punch1.total_seconds()\n total1 = sum(t.total_seconds() for t in times1)\n total2 = sum(t.total_seconds() for t in times2)\n now = now.strftime(TIMEF)\n\n # Modifying the lines for the file\n lines[1] = HEADER1 + project\n if not finished:\n\n # Clock-Out\n line1[-1] += IN_OUT_SEP + now\n line1[1] = fnum(total1)\n line1 = PUNCH_SEP.join(line1)\n lines[i1] = line1\n\n # Clock-In\n line2.append(now)\n line2 = PUNCH_SEP.join(line2)\n if new2:\n lines.append(line2)\n else:\n lines[i2] = line2\n\n # Write to file\n with open(PUNCHES_PATH, 'w+') as f:\n f.write('\\n'.join(lines))\n\n # Report\n if new2:\n print(f\"Created Project: '{project}'\")\n if finished:\n print(f\"CURRENTLY CLOCKED OUT, Project Switched From: '{last_project}', To: '{project}'\")\n print(f\"NOW: {now}\")\n print(f\"'{last_project}' Total Hrs: {fnum(total1)}\")\n print(f\"'{project}' Total Hrs: {fnum(total2)}\")\n else:\n print(f\"CLOCK OUT, Project: '{last_project}'\")\n print(f\"CLOCK IN, Project: '{project}'\")\n print(f\"'{last_project}' IN: {last1.strftime(TIMEF)}, NOW: {now}\")\n print(f\"'{last_project}' Total Hrs: {fnum(total1)}, Current Punch: {fnum(punch1)}\")\n print(f\"'{project}' Total Hrs: {fnum(total2)}\")", "def get_weekly_project_durations(self, week=0):\n\n # get the start and end of the desired week\n now = dt.datetime.now()\n monday = now.date() - dt.timedelta(days=now.weekday() + 7*week)\n nextmonday = monday + dt.timedelta(days=7)\n\n # get all jobs and associated projects for the selected week\n # there will be one row per job and associated project such that a job\n # which is assigned to two projects will also have two rows\n self.alog.dbcur.execute(\n 'WITH ja (id, start, dur, act) AS ('\n ' SELECT jobs.id, jobs.start, jobs.duration, activities.label '\n ' FROM jobs JOIN activities ON jobs.activity = activities.id '\n ' WHERE jobs.start >= ? AND jobs.start < ?) '\n 'SELECT ja.id, ja.start, ja.dur, ja.act, projects.label '\n 'FROM ja LEFT OUTER JOIN job_pj ON ja.id = job_pj.job '\n ' LEFT OUTER JOIN projects ON job_pj.project = projects.id',\n (monday, nextmonday))\n\n jobs = pd.DataFrame(self.alog.dbcur.fetchall(),\n columns=('id', 'start', 'duration', 'act',\n 'project'))\n\n # do the same thing for people, but do not select jobs here that have a\n # project associated with them\n # note that it's not necessary to outer join here, because I have already\n # got all the necessary information about jobs above\n self.alog.dbcur.execute(\n 'SELECT jobs.id, people.label '\n 'FROM jobs JOIN job_p, people '\n ' ON jobs.id = job_p.job AND job_p.person = people.id '\n 'WHERE jobs.start >= ? '\n ' AND jobs.start < ?'\n ' AND jobs.id NOT IN (SELECT job FROM job_pj)',\n (monday, nextmonday))\n\n j_p = pd.DataFrame(self.alog.dbcur.fetchall(),\n columns=('id', 'person'))\n\n # sort the people as projects into the job list\n ids = j_p.id.unique()\n for jid in ids:\n people = j_p[j_p.id == jid].person\n\n row = jobs[jobs.id == jid].copy()\n row.project = people.iloc[0]\n\n # add first person to the corresponding job\n jobs[jobs.id == jid] = row\n\n # if several people are associated with the job, add more rows to the\n # job list\n for person in people.values[1:]:\n row.project = person\n jobs = jobs.append(row, ignore_index=True)\n\n projects = pd.DataFrame(jobs.groupby('project').duration.sum(\n ).sort_values(ascending=False))\n acts = jobs.act.unique()\n\n for act in acts:\n projects[act] = 0\n\n for pj in projects.index:\n actdurs = jobs[jobs.project == pj].groupby('act').duration.sum()\n\n projects.loc[pj, actdurs.index] = actdurs\n\n # remove activities which did not occur in any of the projects\n # (these are project-independent activities)\n projects = projects.T[projects.sum() > 0].T\n\n return projects", "def projectDuration(listActivities):\n lastAct = max(listActivities, key=lambda activity: activity.startTime)\n return lastAct.startTime + lastAct.duration", "def merge_all_data(self):\n\n logging.info('***** Starting the merging process merge_all_data')\n\n \"\"\" All possible unique_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n date_times = np.array(date_times) \n\n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, = [] , [] , [] , [] , []\n best_ds_list = [] \n source_files = []\n station_configurations = []\n\n \"\"\" The items contained in the lists in the list below can be removed from the list when the record that was previously stored is removed. \"\"\"\n all_list = [all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, best_ds_list, source_files , station_configurations ] # holder of all the above lists\n all_list_name = ['all_combined_obs' , 'all_combined_head', 'all_combined_era5fb' , 'combined_indices' , 'combined_date_time' , 'best_ds_list', 'source_files' ] \n \n removed_record, kept_record = [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #dt_bestds_dic = {} # store the selected best dataset for each dt \n #date_times=date_times[0:30000]\n tot = len(date_times)\n tt=time.time()\n print('*** Merging ' , tot, ' records ***')\n \n early_datasets = True\n \n self.processed_dt = [] \n \n for dt, c in zip(date_times, range(tot) ): # loop over all the possible date_times \n\n if (c+1)%1000==0:\n print('Analize : ', str(c+1) , '/', str(tot) , ' ', dt , ' ',\n now(time.time()),'{:5.3f}'.format(time.time()-tt ))\n\n delete = self.delete_ds(dt) # check if there is a dataset to delete \n \n \"\"\" Finding if this record is the same as the previous one analyzed, according to the given time_shift \"\"\"\n if c == 0:\n is_same_record = False\n else:\n is_same_record = self.is_same_record( time_shift = self.hour_time_delta , dt = dt)\n \n \"\"\" Updating list of processed datetimes \"\"\"\n self.processed_dt.append(dt) # cannot put it before the check_timeshift or it will check itself \n\n \n cleaned_df_container = {} \n all_len = [] # will hold the length of all the obs_tabs \n \n for k in self.dataset_per_dt[dt].keys() : # checking the list of available datasets \n ''' {'era5_2': ['example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._1:82930.gz.nc', \n 'example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._82930.gz.nc']}\n ''' \n for F in self.dataset_per_dt[dt][k]: # checking the list of available files for the dataset\n \n if data[k][F][\"counter\"] %self.slice_size==0 or data[k][F][\"counter\"] == 0: # loading the data only at specific slices \n load = self.load_obstab_feedback_sliced(datetime=dt, dataset=k, file = F)\n \n data[k][F][\"counter\"] = data[k][F][\"counter\"] + 1 \n \n obs_tab, era5fb_tab = self.make_obstab_era5fb_dic(dataset = k , date_time = dt, File = F )\n\n if len(obs_tab['date_time'][:])==0: # go to next file if obs_tab is empty \n #print('ZERO length')\n continue \n\n all_len.append( len(obs_tab['date_time'][:] ) )\n \n if k not in cleaned_df_container.keys():\n cleaned_df_container[k] = {}\n\n cleaned_df_container[k][F] = {}\n cleaned_df_container[k][F]['obs_tab'] = obs_tab # cleaned dataframe \n cleaned_df_container[k][F]['era5fb_tab'] = era5fb_tab # cleaned dataframe \n \n \"\"\" Merging the different records found in the sifferent sources \"\"\"\n if bool(all_len): # skipping empty container dictionary. At this point I certainyl have one valid record \n best_ds, combined_obs_tab, combined_era5fb_tab, combined_head_tab, selected_file, best_file = self.combine_record(dt, container = cleaned_df_container)\n \n if is_same_record: # decide what to keep in case of same record\n temporary_previous = all_combined_obs[-1] # keep the temporary previous record \n\n if best_ds in ['era5_1','era5_2']: # best_ds from era5\n if best_ds_list[-1] not in ['era5_1','era5_2']: # remove previous non era5_1 or era5_2 record \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n\n elif best_ds_list[-1] in ['era5_1','era5_2']:\n if len(combined_obs_tab) <= len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab)\n continue # nothing to do, will keep the previous records -> go to next dt \n \n else: # case where both the current and previous are from era5_1 and era5_2, but the previous has smaller number of data \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # best_ds not from era5\n if best_ds_list[-1] in ['era5_1','era5_2']:\n #print('This best ds is ' , best_ds , ' but I will keep ' , best_ds_list[-1] )\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else:\n if len(combined_obs_tab) < len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue # nothing to do, will keep the previous records -> go to next dt \n \n elif len(combined_obs_tab) > len(all_combined_obs[-1] ): # remove previous, keep current \n for lista in all_list:\n lista.pop() \n #kept_record.append(combined_obs_tab) \n #removed_record.append(temporary_previous)\n \n elif len(combined_obs_tab) == len(all_combined_obs[-1] ): # prefer igra2, otherwise\n if best_ds == 'igra2':\n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # case where data source is not important, I keep the previous and do nothing \n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else: # not the same record, nothing special to do, keep both previous and current \n pass \n else:\n print(' Found an empty record / time shifted record ')\n continue\n \n\n \"\"\" Fill the best_ds list \"\"\"\n best_ds_list.append(best_ds)\n\n \"\"\" Storing the selected file for the source_configuration \"\"\"\n source_files.append(selected_file)\n \"\"\" Selecting the station_configuration \"\"\"\n station_configurations.append(self.data[best_ds][best_file]['station_configuration'] )\n \n \"\"\" Storing the combined era5fb, header and observations tables\"\"\"\n all_combined_era5fb.append(combined_era5fb_tab)\n all_combined_obs .append(combined_obs_tab)\n \n primary, name = self.data[best_ds][best_file]['station_configuration']['primary_id'][0] , self.data[best_ds][best_file]['station_configuration']['station_name'][0] \n #combined_head_tab['primary_station_id'] = [ primary ] * len( combined_head_tab ) \n #combined_head_tab['station_name'] = [ name ] * len( combined_head_tab ) \n \n combined_head_tab['primary_station_id'] = np.array( [primary] )\n combined_head_tab['station_name'] = np.array( [name] )\n \n all_combined_head .append(combined_head_tab)\n\n \"\"\" Dictionary to fill the best_ds for duplicates \"\"\"\n #dt_bestds_dic[dt] = {}\n #dt_bestds_dic[dt]['best_ds'] = best_ds\n #dt_bestds_dic[dt]['len'] = len(combined_obs_tab['date_time'])\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n combined_indices.append(len(combined_obs_tab['date_time'])) \n combined_date_time.append(dt)\n\n del cleaned_df_container \n \n \n \n #print(blue + 'Memory used after deleting the cleaned_df_container: ', process.memory_info().rss/1000000000 , cend)\n\n \"\"\" Removing remaining loaded df \"\"\"\n for k in self.datasets_keys:\n for F in self.datasets[k]:\n try:\n del data[k][F]['era5fb_tab']\n print('=== removed era5fb ' , k , F )\n except:\n pass\n try:\n del data[k][F]['observations_table']\n print('=== removed obstab ' , k , F ) \n except:\n pass\n \n \n \"\"\" Saving a numpy dictionary \"\"\"\n print(\" === Saving the numpy dictionary of removed and kept records +++ \")\n #dic_records = { 'kept' : kept_record , 'removed': removed_record }\n #np.save(self.station + '_time_shift_removed_kept.npy',dic_records )\n \n \n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n combined_date_time = np.array(combined_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : combined_date_time.shape } , combined_date_time )\n di['recordtimestamp'].attrs['units']='seconds since 1900-01-01 00:00:00'\n\n \"\"\" Creating the merged indices mi \"\"\"\n mi = [] \n mi.append(0)\n for i in range(len(combined_indices)):\n mi.append( combined_indices[i] + mi[-1] )\n mi.pop()\n pop = np.array(mi) # removing last unecessary index \n di['recordindex'] = ( {'recordindex' : pop.shape } , pop )\n\n\n \"\"\" Creating the combined data \"\"\"\n logging.debug('*** Concatenating the observations_table ' ) \n combined_obs = {}\n #### Writing combined observations_table dic\n logging.info(' ***** Writing the observations_table to the netCDF output ***** ' ) \n for k in all_combined_obs[0].keys(): \n a = np.concatenate([all_combined_obs[i][k][:] for i in range(len(all_combined_obs))])\n if k == 'date_time':\n combined_obs[k]= a \n self.tot_records = len(combined_obs[k])\n self.write_merged(content = 'observations_table', table= {k:a})\n #logging.info('*** Written observations table %s: ', k)\n\n\n #self.tot_records = len(combined_obs['date_time'])\n del all_combined_obs\n print(blue + 'Memory used after deleting all_combined_obs dic: ', process.memory_info().rss/1000000000 , cend )\n \n dateindex = combined_obs['date_time']//86400 \n date_times, indices, counts = np.unique(dateindex, return_counts = True, return_index= True) \n di['dateindex'] = ( {'dateindex' : indices.shape } , indices ) # considers the day only \n del combined_obs\n \n combined_era5fb = {}\n #### Writing combined era5fb_table dic \n for k in all_combined_era5fb[0].keys():\n try:\n #combined_era5fb[k]=np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n #self.write_merged(content = 'era5fb', table= {k:combined_era5fb[k]})\n \"\"\" try replacing , remove combined_era5fb = {} \"\"\"\n a = np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n self.write_merged(content = 'era5fb', table= {k:a})\n logging.debug('*** Written era5fb %s: ', k)\n except:\n print(\"FAILED feedback variable \" , k)\n\n del all_combined_era5fb\n print(blue + 'Memory used after deleting era5fb_tab dic: ', process.memory_info().rss/1000000000 , cend)\n\n\n #### Writing combined header_table dic \n for k in all_combined_head[0].keys():\n print('head variable is', k )\n if ( k == 'comments' or k == 'history'):\n continue\n try:\n tab=np.concatenate([all_combined_head[i][k][:] for i in range(len(all_combined_head))])\n self.write_merged(content = 'header_table', table= {k: tab}) # { key: np.array([])}\n logging.info('*** Written header table %s: ', k)\n except:\n print('FFF FAILED variable in header table', k )\n\n del all_combined_head\n print(blue + 'Memory used after deleting all_merged head_tab dic: ', process.memory_info().rss/1000000000 , cend)\n \n self.write_merged(content = 'recordindex', table = di) \n self.write_merged(content = 'cdm_tables', table= '')\n\n\n source_conf=xr.Dataset()\n source_files = np.array(source_files).astype(dtype='|S70')\n source_conf['source_file'] = ( {'source_file' : source_files.shape } , source_files )\n self.write_merged(content = 'source_configuration', table= source_conf )\n\n print(0)\n\n\n \"\"\" Concatenation of station_configurations \"\"\"\n station_conf = pd.concat( station_configurations ) \n for k in station_conf.columns:\n try:\n a =np.array( station_conf[k])\n self.write_merged(content = 'station_configuration', table= {k:a})\n logging.debug('*** Written station_configuration %s: ', k)\n except:\n print(\" Failed station_configuration \" , k )\n \n return 0", "def merge(self, otr):\n self._duration = otr.get_start() - self.get_start()\n self._duration += otr.get_duration()\n self._line[3] = self._duration", "def merge(self, projects):\n benchmarks = set()\n for project in projects:\n for runspec in project:\n for classresult in runspec:\n for instresult in classresult.instresults:\n instresult.instance.maxRuns = max(instresult.instance.maxRuns, len(instresult.runs))\n benchmarks.add(runspec.benchmark)\n return BenchmarkMerge(benchmarks)", "def make_entries(self, user=None, projects=None, dates=None,\n hours=1, minutes=0):\n if not user:\n user = self.user\n if not projects:\n projects = self.default_projects\n if not dates:\n dates = self.default_dates\n for project in projects:\n for day in dates:\n self.log_time(project=project, start=day,\n delta=(hours, minutes), user=user)", "def time_tracking(self):\n fb = FreshBooks()\n tg = Toggl()\n self.print_splash()\n self.print(\"Tip: You can always enter 'skip' when you want to skip a time entry.\", format='warn')\n days = self.get_interactive_days() # number of days to go back\n self.print(\"OK, I'll run you through the Toggl time entries of the past %i day(s).\" % (days))\n timestamp = self.get_timestamp(days) # unix timestamp including tz\n time_entries = tg.get_time_entries(timestamp)\n if len(time_entries) == 0:\n self.print(\"No Toggl entries in this time span!\", 'warn')\n return False\n time_entries = self.merge_toggl_time_entries(time_entries) # merge Toggl entries\n fb_projects = fb.get_projects()\n # Loop through merged Toggl time entries:\n for entry in time_entries:\n # Get and convert all necessary info:\n client_id = tg.get_client_id(project_id=entry.get('pid'))\n client_name = tg.get_client_name(client_id)\n project = tg.get_project(entry.get('pid'))\n duration = int(entry['duration']) / 60 / 60 # convert duration to hours\n duration = round(duration * 4 ) / 4 # round hours to nearest .25\n description = self.format_description(project['name'], entry['description'])\n date = str(parser.parse(entry['start']).date())\n # Print info in a nice way:\n self.print_divider(30)\n self.print(\"Description: \" + description)\n self.print(\"Date: \" + date)\n self.print(\"Hours spent: \" + str(duration))\n # Skip if Toggl entry is already booked:\n if entry.get('tags') and tg.BOOKED_TAG in entry['tags']:\n self.print(\"Skipping this entry because it is already in Freshbooks.\", 'cross')\n # Skip if duration is below 0.25:\n elif duration < 0.25:\n self.print(\"Skipping this entry because there are less than 0.25 hours spent.\", 'cross')\n # If billable, add to Freshbooks:\n elif entry['billable']:\n # Get FreshBooks project name through interactive search:\n try:\n self.print(\"Project: \\U0001F50D \")\n fb_project_name = self.interactive_search(fb_projects.keys(), client_name)\n # Handle KeyboardInterrupt\n except KeyboardInterrupt:\n answer = input(\"\\nKeyboardInterrupt! Skip current entry or quit time tracking? (S/q) \")\n if answer.lower() == 's' or answer == '':\n self.clear_lines(1)\n self.print(\"Skipping this entry.\", 'cross')\n continue\n else:\n self.clear_lines(1)\n self.print(\"Ok, stopping time tracking.\", 'cross')\n sys.exit()\n # If user requests so, skip this entry:\n self.clear_lines(1)\n if not fb_project_name:\n self.print(\"Skipping this entry.\", 'cross')\n continue\n # Otherwise, add entry to FreshBooks and tag Toggl entry/entries:\n self.print(\"Project: \" + fb_project_name)\n project_id = fb.get_project_id(fb_project_name)\n fb.add_entry(project_id, duration, description, date)\n tg.tag_projects(entry['merged_ids'], tg.BOOKED_TAG)\n # If not billable, skip entry:\n else:\n self.print(\"Skipping this entry because it is not billable.\", 'cross')\n self.print_divider(30)\n answer = input(\"All done! Open FreshBooks in browser to verify? (Y/n) \")\n if answer.lower() == 'y' or answer == '':\n webbrowser.open('https://%s.freshbooks.com/timesheet' % fb.fb_creds['subdomain'])", "def time_budget(self, mode):\n\n def time_budget_analysis(cursor, plot_parameters, by_category=False):\n \"\"\"\n extract number of occurrences, total duration, mean ...\n if start_time = 0 and end_time = 0 all events are extracted\n \"\"\"\n\n categories, out = {}, []\n for subject in plot_parameters[\"selected subjects\"]:\n out_cat, categories[subject] = [], {}\n\n for behavior in plot_parameters[\"selected behaviors\"]:\n\n if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\"SELECT distinct modifiers FROM events WHERE subject = ? AND code = ?\",\n (subject, behavior))\n distinct_modifiers = list(cursor.fetchall())\n\n if not distinct_modifiers:\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n if POINT in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = cursor.fetchall()\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = list(cursor.fetchall())\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": modifier[0], \"duration\": UNPAIRED,\n \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\n \"end time\"] and plot_parameters[\"start time\"] <= rows[idx + 1][0] <= \\\n plot_parameters[\"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n # all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations),\n 3) if len(all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(\n all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(\n statistics.mean(all_event_interdurations), 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n else: # no modifiers\n\n if POINT in self.eventType(behavior).upper():\n\n # if len(selectedObservations) > 1:\n cursor.execute(\n \"SELECT occurence,observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n\n if len(selectedObservations) == 1:\n new_rows = []\n for occurence, observation in rows:\n new_occurence = max(float(plot_parameters[\"start time\"]), occurence)\n new_occurence = min(new_occurence, float(plot_parameters[\"end time\"]))\n new_rows.append([new_occurence, observation])\n rows = list(new_rows)\n\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(statistics.stdev(all_event_interdurations),\n 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n\n cursor.execute(\n \"SELECT occurence, observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]: # include behaviors without events\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": \"-\", \"duration\": 0, \"duration_mean\": 0,\n \"duration_stdev\": \"NA\", \"number\": 0, \"inter_duration_mean\": \"-\",\n \"inter_duration_stdev\": \"-\"})\n continue\n\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior, \"modifiers\": \"NA\",\n \"duration\": UNPAIRED, \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\"end time\"] and \\\n plot_parameters[\"start time\"] <= rows[idx + 1][0] <= plot_parameters[\n \"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations), 3) if len(\n all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n out += out_cat\n\n if by_category: # and flagCategories:\n\n for behav in out_cat:\n\n try:\n category = [self.pj[ETHOGRAM][x][\"category\"] for x in self.pj[ETHOGRAM] if\n \"category\" in self.pj[ETHOGRAM][x] and self.pj[ETHOGRAM][x][\"code\"] == behav[\n 'behavior']][0]\n except:\n category = \"\"\n\n if category in categories[subject]:\n if behav[\"duration\"] not in [\"-\", \"NA\"] and categories[subject][category][\n \"duration\"] != \"-\":\n categories[subject][category][\"duration\"] += behav[\"duration\"]\n else:\n categories[subject][category][\"duration\"] = \"-\"\n categories[subject][category][\"number\"] += behav[\"number\"]\n else:\n categories[subject][category] = {\"duration\": behav[\"duration\"], \"number\": behav[\"number\"]}\n\n out_sorted = []\n for subject in plot_parameters[\"selected subjects\"]:\n for behavior in plot_parameters[\"selected behaviors\"]:\n for row in out:\n if row[\"subject\"] == subject and row[\"behavior\"] == behavior:\n out_sorted.append(row)\n\n ### http://stackoverflow.com/questions/673867/python-arbitrary-order-by\n return out_sorted, categories\n\n def default_value(behav, param):\n \"\"\"\n return value for duration in case of point event\n \"\"\"\n default_value_ = 0\n if ({self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behav} == {\"Point event\"}\n and param in [\"duration\"]):\n default_value_ = \"-\"\n return default_value_\n\n def init_behav_modif():\n \"\"\"\n initialize dictionary with subject, behaviors and modifiers\n \"\"\"\n behaviors = {}\n for subj in plot_parameters[\"selected subjects\"]:\n behaviors[subj] = {}\n for behav_modif in distinct_behav_modif:\n behav, modif = behav_modif\n if behav not in behaviors[subj]:\n behaviors[subj][behav] = {}\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n behaviors[subj][behav][param[0]] = default_value(behav, param[0])\n\n if plot_parameters[\"include modifiers\"]:\n behaviors[subj][behav][modif] = {}\n for param in parameters:\n behaviors[subj][behav][modif][param[0]] = default_value(behav, param[0])\n\n return behaviors\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n # check if state events are paired\n out = \"\"\n not_paired_obs_list = []\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n\n if out:\n out = \"Some observations have UNPAIRED state events<br><br>\" + out\n self.results = dialog.Results_dialog()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.pbSave.setVisible(False)\n self.results.pbCancel.setVisible(True)\n\n if not self.results.exec_():\n return\n\n flagGroup = False\n if len(selectedObservations) > 1 and mode != \"synthetic\":\n flagGroup = dialog.MessageDialog(programName, \"Group observations in one time budget analysis?\",\n [YES, NO]) == YES\n\n '''\n # check if state events are paired\n out = \"\"\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId],\n self.timeFormat)\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n if out:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.show()\n '''\n\n selectedObsTotalMediaLength = Decimal(\"0.0\")\n max_obs_length = 0\n for obsId in selectedObservations:\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n logging.debug(\"media length for {0}: {1}\".format(obsId, obs_length))\n\n if obs_length in [0, -1]:\n selectedObsTotalMediaLength = -1\n break\n max_obs_length = max(max_obs_length, obs_length)\n\n selectedObsTotalMediaLength += obs_length\n\n # an observation media length is not available\n if selectedObsTotalMediaLength == -1:\n # propose to user to use max event time\n if dialog.MessageDialog(programName,\n \"A media length is not available.<br>Use last event time as media length?\",\n [YES, NO]) == YES:\n maxTime = 0 # max length for all events all subjects\n for obsId in selectedObservations:\n if self.pj[OBSERVATIONS][obsId][EVENTS]:\n maxTime += max(self.pj[OBSERVATIONS][obsId][EVENTS])[0]\n logging.debug(\"max time all events all subjects: {}\".format(maxTime))\n selectedObsTotalMediaLength = maxTime\n else:\n selectedObsTotalMediaLength = 0\n\n logging.debug(\"selectedObsTotalMediaLength: {}\".format(selectedObsTotalMediaLength))\n\n if mode in [\"by_behavior\", \"by_category\"]:\n if len(selectedObservations) > 1:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n by_category=(mode == \"by_category\"))\n else:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=selectedObsTotalMediaLength,\n by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n flagShowExcludeBehaviorsWoEvents=False,\n by_category=False)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n # check if time_budget window must be used\n if mode in [\"by_behavior\", \"by_category\"] and (flagGroup or len(selectedObservations) == 1):\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], selectedObservations,\n plot_parameters[\"selected behaviors\"])\n\n total_observation_time = 0\n for obsId in selectedObservations:\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n logging.debug(\"distinct_modifiers: {}\".format(distinct_modifiers))\n\n for modifier in distinct_modifiers:\n\n logging.debug(\"modifier #{}#\".format(modifier[0]))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n total_observation_time += (max_time - min_time)\n\n cursor.execute(\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n # widget for results visualization\n self.tb = timeBudgetResults(logging.getLogger().getEffectiveLevel(), self.pj)\n\n # observations list\n self.tb.label.setText(\"Selected observations\")\n for obs in selectedObservations:\n self.tb.lw.addItem(obs)\n\n # media length\n if len(selectedObservations) > 1:\n if total_observation_time:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {}\".format(seconds2time(total_observation_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {:0.3f}\".format(float(total_observation_time)))\n else:\n self.tb.lbTotalObservedTime.setText(\"Total observation length: not available\")\n else:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {} to {}\".format(seconds2time(min_time), seconds2time(max_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {:0.3f} to {:0.3f} s\".format(float(min_time), float(max_time)))\n\n if mode == \"by_behavior\":\n\n tb_fields = [\"Subject\", \"Behavior\", \"Modifiers\", \"Total number\", \"Total duration (s)\",\n \"Duration mean (s)\", \"Duration std dev\", \"inter-event intervals mean (s)\",\n \"inter-event intervals std dev\", \"% of total length\"]\n\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\", \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for row in out:\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n column = 0\n for field in fields:\n item = QTableWidgetItem(str(row[field]).replace(\" ()\", \"\"))\n # no modif allowed\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n column += 1\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n item = QTableWidgetItem(str(round(row[\"duration\"] / float(total_observation_time) * 100, 1)))\n else:\n item = QTableWidgetItem(\"NA\")\n\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n if mode == \"by_category\":\n tb_fields = [\"Subject\", \"Category\", \"Total number\", \"Total duration (s)\"]\n fields = [\"number\", \"duration\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for subject in categories:\n\n for category in categories[subject]:\n\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n\n column = 0\n item = QTableWidgetItem(subject)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n column = 1\n if category == \"\":\n item = QTableWidgetItem(\"No category\")\n else:\n item = QTableWidgetItem(category)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n for field in fields:\n column += 1\n item = QTableWidgetItem(str(categories[subject][category][field]))\n item.setFlags(Qt.ItemIsEnabled)\n item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n self.tb.twTB.resizeColumnsToContents()\n\n self.tb.show()\n\n if mode in [\"by_behavior\", \"by_category\"] and (\n not flagGroup and len(selectedObservations) > 1) or mode == \"synthetic\":\n\n if mode in [\"by_behavior\", \"by_category\"]:\n items = (\"Tab Separated Values (*.tsv)\",\n \"Comma separated values (*.csv)\",\n \"OpenDocument Spreadsheet (*.ods)\",\n \"OpenDocument Workbook (*.ods)\",\n \"Microsoft Excel Spreadsheet (*.xlsx)\",\n \"Microsoft Excel Workbook (*.xlsx)\",\n \"HTML (*.html)\",\n \"Legacy Microsoft Excel Spreadsheet (*.xls)\")\n\n formats = [\"tsv\", \"csv\", \"od spreadsheet\", \"od workbook\", \"xlsx spreadsheet\", \"xlsx workbook\", \"html\",\n \"xls legacy\"]\n\n item, ok = QInputDialog.getItem(self, \"Time budget analysis format\", \"Available formats\", items, 0,\n False)\n if not ok:\n return\n\n outputFormat = formats[items.index(item)]\n extension = re.sub(\".* \\(\\*\\.\", \"\", item)[:-1]\n\n flagWorkBook = False\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" in outputFormat:\n workbook = tablib.Databook()\n flagWorkBook = True\n if \"xls\" in outputFormat:\n filters = \"Microsoft Excel Workbook *.xlsx (*.xlsx);;All files (*)\"\n if \"od\" in outputFormat:\n filters = \"Open Document Workbook *.ods (*.ods);;All files (*)\"\n\n if QT_VERSION_STR[0] == \"4\":\n WBfileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget analysis\",\n \"\", filters)\n else:\n WBfileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget analysis\", \"\",\n filters)\n if not WBfileName:\n return\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" not in outputFormat: # not workbook\n exportDir = QFileDialog(self).getExistingDirectory(self,\n \"Choose a directory to save the time budget analysis\",\n os.path.expanduser(\"~\"),\n options=QFileDialog.ShowDirsOnly)\n if not exportDir:\n return\n\n if mode == \"synthetic\":\n\n formats_str = (\"Tab Separated Values *.txt, *.tsv (*.txt *.tsv);;\"\n \"Comma Separated Values *.txt *.csv (*.txt *.csv);;\"\n \"Open Document Spreadsheet *.ods (*.ods);;\"\n \"Microsoft Excel Spreadsheet *.xlsx (*.xlsx);;\"\n # \"Pandas dataframe (*.df);;\"\n \"Legacy Microsoft Excel Spreadsheet *.xls (*.xls);;\"\n \"HTML *.html (*.html);;\"\n \"All files (*)\")\n\n while True:\n if QT_VERSION_STR[0] == \"4\":\n fileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget report\",\n \"\", formats_str)\n else:\n fileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget report\", \"\",\n formats_str)\n\n if not fileName:\n return\n\n extension = \"\"\n availableFormats = (\n \"tsv\", \"csv\", \"ods\", \"xlsx)\", \"xls)\", \"html\") # ) is added to distinguish between xls and xlsx\n for fileExtension in availableFormats:\n if fileExtension in filter_:\n extension = fileExtension.replace(\")\", \"\")\n if not extension:\n QMessageBox.warning(self, programName, \"Choose a file format\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n else:\n break\n\n data_report = tablib.Dataset()\n data_report.title = \"Synthetic time budget\"\n\n parameters = [[\"duration\", \"Total duration\"], [\"number\", \"Number of occurrences\"]]\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"],\n selectedObservations, plot_parameters[\"selected behaviors\"])\n\n cursor.execute(\"SELECT distinct code, modifiers FROM events WHERE subject in ({})\".format(\n \",\".join(\"?\" * len(plot_parameters[\"selected subjects\"]))),\n (plot_parameters[\"selected subjects\"]))\n\n distinct_behav_modif = [[rows[\"code\"], rows[\"modifiers\"]] for rows in cursor.fetchall()]\n\n # add selected behaviors that are not observed\n for behav in plot_parameters[\"selected behaviors\"]:\n if [x for x in distinct_behav_modif if x[0] == behav] == []:\n distinct_behav_modif.append([behav, \"-\"])\n\n behaviors = init_behav_modif()\n\n subj_header, behav_header, modif_header, param_header = [\"\", \"\"], [\"\", \"\"], [\"\", \"\"], [\"\",\n \"Total length (s)\"]\n # subj_header, behav_header, modif_header, param_header = [\"\"], [\"\"], [\"\"], [\"\"]\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n param_header.append(param[1])\n\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n modif_header.append(modif)\n param_header.append(param[1])\n\n data_report.append(subj_header)\n data_report.append(behav_header)\n if plot_parameters[\"include modifiers\"]:\n data_report.append(modif_header)\n data_report.append(param_header)\n\n if mode == \"by_behavior\":\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\",\n \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n\n if mode == \"by_category\":\n fields = [\"subject\", \"category\", \"number\", \"duration\"]\n\n for obsId in selectedObservations:\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], [obsId],\n plot_parameters[\"selected behaviors\"])\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n # if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n for modifier in distinct_modifiers:\n\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\"\"\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\"\"\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n cursor.execute(\"\"\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\"\"\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n\n behaviors = init_behav_modif()\n\n for element in out:\n for param in parameters:\n if not plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][param[0]] = element[param[0]]\n except:\n pass\n if plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][element[\"modifiers\"]][param[0]] = \\\n element[param[0]]\n except:\n pass\n\n columns = []\n columns.append(obsId)\n columns.append(\"{:0.3f}\".format(max_time - min_time))\n # columns.append([obsId])\n\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n columns.append(behaviors[subj][behav][param[0]])\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n columns.append(behaviors[subj][behav][modif][param[0]])\n\n data_report.append(columns)\n\n if mode in [\"by_behavior\", \"by_category\"]:\n rows = []\n # observation id\n rows.append([\"Observation id\", obsId])\n rows.append([\"\"])\n\n labels = [\"Independent variables\"]\n values = [\"\"]\n if INDEPENDENT_VARIABLES in self.pj and self.pj[INDEPENDENT_VARIABLES]:\n for idx in self.pj[INDEPENDENT_VARIABLES]:\n labels.append(self.pj[INDEPENDENT_VARIABLES][idx][\"label\"])\n if (INDEPENDENT_VARIABLES in self.pj[OBSERVATIONS][obsId]\n and self.pj[INDEPENDENT_VARIABLES][idx][\"label\"] in self.pj[OBSERVATIONS][obsId][\n INDEPENDENT_VARIABLES]):\n values.append(self.pj[OBSERVATIONS][obsId][INDEPENDENT_VARIABLES][\n self.pj[INDEPENDENT_VARIABLES][idx][\"label\"]])\n rows.append(labels)\n rows.append(values)\n rows.append([\"\"])\n\n rows.append(\n [\"Analysis from\", \"{:0.3f}\".format(float(min_time)), \"to\", \"{:0.3f}\".format(float(max_time))])\n rows.append([\"Total length (s)\", \"{:0.3f}\".format(float(max_time - min_time))])\n rows.append([\"\"])\n rows.append([\"Time budget\"])\n\n if mode == \"by_behavior\":\n\n rows.append(fields + [\"% of total length\"])\n # data.headers = fields + [\"% of total media length\"]\n\n for row in out:\n values = []\n for field in fields:\n values.append(str(row[field]).replace(\" ()\", \"\"))\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n # if row[\"duration\"] != \"-\" and row[\"duration\"] != 0 and row[\"duration\"] != UNPAIRED and selectedObsTotalMediaLength:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n if len(selectedObservations) > 1:\n values.append(round(row[\"duration\"] / float(selectedObsTotalMediaLength) * 100, 1))\n else:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n else:\n values.append(\"-\")\n\n rows.append(values)\n\n if mode == \"by_category\":\n rows.append = fields\n # data.headers = fields # + [\"% of total media length\"]\n for subject in categories:\n\n for category in categories[subject]:\n values = []\n values.append(subject)\n if category == \"\":\n values.append(\"No category\")\n else:\n values.append(category)\n\n values.append(categories[subject][category][\"number\"])\n values.append(categories[subject][category][\"duration\"])\n\n rows.append(values)\n\n data = tablib.Dataset()\n data.title = obsId\n for row in rows:\n data.append(complete(row, max([len(r) for r in rows])))\n\n if \"xls\" in outputFormat:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n\n if flagWorkBook:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n if \"xls\" in outputFormat:\n if len(data.title) > 31:\n data.title = data.title[:31]\n workbook.add_sheet(data)\n\n else:\n\n fileName = exportDir + os.sep + safeFileName(obsId) + \".\" + extension\n\n if outputFormat in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data.export(outputFormat)))\n\n if outputFormat == \"od spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.ods)\n\n if outputFormat == \"xlsx spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.xlsx)\n\n if outputFormat == \"xls legacy\":\n if len(data.title) > 31:\n data.title = data.title[:31]\n QMessageBox.warning(None, programName, (\n \"The worksheet name <b>{0}</b> was shortened to <b>{1}</b> due to XLS format limitations.\\n\"\n \"The limit on worksheet name length is 31 characters\").format(obsId, data.title),\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n\n with open(fileName, \"wb\") as f:\n f.write(data.xls)\n\n if mode == \"synthetic\":\n if extension in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data_report.export(extension)))\n if extension in [\"ods\", \"xlsx\", \"xls\"]:\n with open(fileName, \"wb\") as f:\n f.write(data_report.export(extension))\n\n if mode in [\"by_behavior\", \"by_category\"] and flagWorkBook:\n if \"xls\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.xlsx)\n if \"od\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.ods)", "def merge_delta_time(\n username: str | None = None,\n password: str | None = None,\n verbose: bool = False,\n mode: oct = 0o775\n ):\n # retrieve history delta time files\n pull_deltat_file('historic_deltat.data',\n username=username, password=password,\n verbose=verbose, mode=mode\n )\n # read historic delta time file\n historic_file=pyTMD.utilities.get_data_path(['data','historic_deltat.data'])\n historic = np.loadtxt(historic_file, skiprows=2)\n HY = np.floor(historic[:,0])\n HM = 12.0*np.mod(historic[:,0],1.0) + 1.0\n HD = np.ones_like(historic[:,0])\n # retrieve monthly delta time files\n pull_deltat_file('deltat.data',\n username=username, password=password,\n verbose=verbose, mode=mode\n )\n # read modern monthly delta time file\n monthly_file = pyTMD.utilities.get_data_path(['data','deltat.data'])\n monthly = np.loadtxt(monthly_file)\n monthly_time = convert_calendar_decimal(monthly[:,0],monthly[:,1],\n day=monthly[:,2])\n # retrieve daily delta time files\n merge_bulletin_a_files(\n username=username, password=password,\n verbose=verbose, mode=mode\n )\n # read modern daily delta time file from IERS Bulletin A files\n daily_file = pyTMD.utilities.get_data_path(['data','iers_deltat.data'])\n daily = np.loadtxt(daily_file)\n daily_time = convert_calendar_decimal(daily[:,0], daily[:,1],\n day=daily[:,2])\n # write to new merged file\n merged_file = pyTMD.utilities.get_data_path(['data','merged_deltat.data'])\n fid = merged_file.open(mode='w', encoding='utf8')\n logging.info(str(merged_file))\n file_format = ' {0:4.0f} {1:2.0f} {2:2.0f} {3:7.4f}'\n # use historical values for times prior to monthly\n ind1, = np.nonzero(historic[:,0] < monthly_time[0])\n for i in ind1:\n args = (HY[i],HM[i],HD[i],historic[i,1])\n print(file_format.format(*args),file=fid)\n # use monthly values for times prior to daily\n ind2, = np.nonzero(monthly_time < np.min(daily_time))\n for i in ind2:\n args = (monthly[i,0],monthly[i,1],monthly[i,2],monthly[i,3])\n print(file_format.format(*args),file=fid)\n # use daily values for all times available\n for i in np.argsort(daily_time):\n args = (daily[i,0],daily[i,1],daily[i,2],daily[i,3])\n print(file_format.format(*args),file=fid)\n # close the merged file and change the permissions mode\n fid.close()\n merged_file.chmod(mode)", "def merge_logs(self):\n ourlog = LogData()\n for l in self.data_set:\n ourlog.entries = ourlog.entries + l.entries\n ourlog.sort_time()\n self.finalized_data = ourlog", "def timings_across_runs(self):\n\n\t\t# first determine individual run duration (to make sure that stimulus timings of all runs are correct)\n\t\trun_duration = []\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\tniiFile = NiftiImage(self.runFile(stage = 'processed/mri', run = r))\n\t\t\ttr, nr_trs = round(niiFile.rtime*1)/1000.0, niiFile.timepoints\n\t\t\trun_duration.append(tr * nr_trs)\n\t\trun_duration = np.r_[0,np.cumsum(np.array(run_duration))]\n\n\t\t# timing information stimuli\n\t\tstim_info = []\n\t\trun = 0\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\tstim_events = np.loadtxt(self.runFile(stage = 'processed/behavior', run = r, extension = '.txt', postFix = ['stim' ,'all','task']))\n\t\t\tstim_events[:,:2] += run_duration[run]\n\t\t\tstim_info.append(stim_events)\n\t\t\trun += 1\n\n\t\t# save stim_info as text_file\t\n\t\tnp.savetxt(self.runFile(stage = 'processed/behavior', postFix = ['stim_info_all'],extension = '.txt'), np.vstack(stim_info), fmt = '%3.2f', delimiter = '\\t')", "def merge_time_constraints(list_constraints, constraint_one, constraint_two):\n\n list_constraints.remove(constraint_one)\n list_constraints.remove(constraint_two)\n\n ending_time1 = constraint_one.starting_time + constraint_one.duration\n ending_time2 = constraint_two.starting_time + constraint_two.duration\n\n new_name = constraint_one.name + \" and \" + constraint_two.name\n starting_time = min(constraint_one.starting_time, constraint_two.starting_time)\n duration = max(ending_time1, ending_time2) - starting_time\n\n new_constraint = Constraint(new_name, constraint_one.month, constraint_one.day,\n # Starting time\n starting_time,\n # Ending time\n duration,\n False, False, False)\n\n list_constraints.append(new_constraint)\n\n return list_constraints", "def merge_time_metric(self, metric):\n\n self.merge_raw_time_metric(metric.duration, metric.exclusive)", "def join_target(self):\n df = self.get_all_data()\n target_df = self.get_target_df().copy(deep=True)\n target_df['ft_data_dt'] = target_df['ft_data_dt'].astype('datetime64[M]') - pd.DateOffset(months=2) + MonthEnd(1)\n df = df.merge(target_df, on=['idd', 'ft_data_dt'], how='left')\n values = {'target': 0}\n df['target'] = df['target'].replace(np.nan, 0)\n self.set_prep_data(df)", "def sync_update(self):\n for rec in self:\n if rec.ks_last_exported_date and rec.ks_sync_date:\n ks_reduced_ks_sync_time = rec.ks_last_exported_date - datetime.timedelta(seconds=30)\n ks_increased_ks_sync_time = rec.ks_last_exported_date + datetime.timedelta(seconds=30)\n if rec.ks_sync_date > ks_reduced_ks_sync_time and rec.ks_sync_date < ks_increased_ks_sync_time:\n rec.ks_sync_status = True\n else:\n rec.ks_sync_status = False\n else:\n rec.ks_sync_status = False", "def updateDateValues(self):\n kwargs = {\"cdb_project_id\": self.project.cdb_project_id}\n cca = Project.MakeChangeControlAttributes()\n kwargs.update(cdb_mdate=sqlapi.SQLdbms_date(cca[u\"cdb_mdate\"]))\n kwargs.update(cdb_mpersno=cca[u\"cdb_mpersno\"])\n\n update_gap_by_view = \"\"\"cdbpcs_taskrel_gaps_v SET gap = new_gap\n WHERE pred_pid = '%(cdb_project_id)s'\n OR succ_pid = '%(cdb_project_id)s'\"\"\" % kwargs\n update_gap_by_select = \"\"\"cdbpcs_taskrel SET gap =\n (SELECT CASE\n WHEN cdbpcs_taskrel.rel_type = 'AA' THEN b.start_time_fcast_offset - a.start_time_fcast_offset\n WHEN cdbpcs_taskrel.rel_type = 'AE' THEN b.end_time_fcast_offset - a.start_time_fcast_offset\n WHEN cdbpcs_taskrel.rel_type = 'EA' THEN b.start_time_fcast_offset - a.end_time_fcast_offset\n WHEN cdbpcs_taskrel.rel_type = 'EE' THEN b.end_time_fcast_offset - a.end_time_fcast_offset\n ELSE 0 END +\n CASE\n WHEN a.milestone = 1 AND b.milestone = 1 AND a.early_position = 0 AND b.early_position = 1 THEN -1\n WHEN a.milestone = 1 AND b.milestone = 0 AND a.early_position = 0 AND cdbpcs_taskrel.rel_type IN ('EA', 'AA') THEN -1\n WHEN a.milestone = 0 AND b.milestone = 1 AND b.early_position = 1 AND cdbpcs_taskrel.rel_type IN ('EA', 'EE') THEN -1\n WHEN a.milestone = 0 AND b.milestone = 0 AND cdbpcs_taskrel.rel_type IN ('EA') THEN -1\n ELSE 0 END +\n CASE\n WHEN a.status = 180 THEN a.days_fcast\n ELSE 0 END\n FROM cdbpcs_task a, cdbpcs_task b\n WHERE cdbpcs_taskrel.cdb_project_id2 = a.cdb_project_id\n AND cdbpcs_taskrel.task_id2 = a.task_id\n AND cdbpcs_taskrel.cdb_project_id = b.cdb_project_id\n AND cdbpcs_taskrel.task_id = b.task_id)\n WHERE cdb_project_id2 = '%(cdb_project_id)s'\n OR cdb_project_id = '%(cdb_project_id)s'\n \"\"\" % kwargs\n\n update_gap_stmt = {\n sqlapi.DBMS_SQLITE: update_gap_by_select,\n sqlapi.DBMS_MSSQL: update_gap_by_view,\n sqlapi.DBMS_ORACLE: update_gap_by_view,\n }\n\n updates = [\n \"\"\"cdbpcs_project\n SET start_time_plan = (SELECT CASE\n WHEN MIN(cdbpcs_task.start_time_fcast) < MIN(cdbpcs_task.start_time_plan)\n THEN MIN(cdbpcs_task.start_time_fcast)\n ELSE MIN(cdbpcs_task.start_time_plan)\n END\n FROM cdbpcs_task\n WHERE cdbpcs_task.cdb_project_id = '%(cdb_project_id)s'\n AND cdbpcs_task.parent_task = ''\n ), end_time_plan = (SELECT CASE\n WHEN MAX(cdbpcs_task.end_time_fcast) > MAX(cdbpcs_task.end_time_plan)\n THEN MAX(cdbpcs_task.end_time_fcast)\n ELSE MAX(cdbpcs_task.end_time_plan)\n END\n FROM cdbpcs_task\n WHERE cdbpcs_task.cdb_project_id = '%(cdb_project_id)s'\n AND cdbpcs_task.parent_task = ''\n ), cdb_mdate = %(cdb_mdate)s,\n cdb_mpersno = '%(cdb_mpersno)s'\n WHERE cdb_project_id = '%(cdb_project_id)s'\n \"\"\" % kwargs,\n (\"\"\"cdbpcs_project\n SET start_time_fcast = start_time_plan,\n end_time_fcast = end_time_plan,\n days_fcast = days,\n duration_fcast = duration,\n cdb_mdate = %(cdb_mdate)s,\n cdb_mpersno = '%(cdb_mpersno)s'\n WHERE cdb_project_id = '%(cdb_project_id)s'\n \"\"\" % kwargs) if self.project.auto_update_time else None,\n \"\"\"cdbpcs_task\n SET total_float = late_finish_offset - end_time_fcast_offset,\n cdb_mdate = %(cdb_mdate)s,\n cdb_mpersno = '%(cdb_mpersno)s'\n WHERE cdb_project_id = '%(cdb_project_id)s'\n \"\"\" % kwargs,\n update_gap_stmt[sqlapi.SQLdbms()],\n \"\"\"cdbpcs_task\n SET cdb_mdate = %(cdb_mdate)s,\n cdb_mpersno = '%(cdb_mpersno)s'\n WHERE cdb_project_id = '%(cdb_project_id)s'\n AND task_id IN (SELECT task_id FROM cdbpcs_taskrel\n WHERE cdb_project_id = '%(cdb_project_id)s'\n AND (violation = 0 AND minimal_gap > gap\n OR violation = 1 AND minimal_gap <= gap)\n UNION\n SELECT task_id2 FROM cdbpcs_taskrel\n WHERE cdb_project_id2 = '%(cdb_project_id)s'\n AND (violation = 0 AND minimal_gap > gap\n OR violation = 1 AND minimal_gap <= gap))\n \"\"\" % kwargs,\n \"\"\"cdbpcs_taskrel\n SET violation = CASE\n WHEN minimal_gap <= gap\n THEN 0\n ELSE 1\n END\n WHERE cdb_project_id = '%(cdb_project_id)s'\n OR cdb_project_id2 = '%(cdb_project_id)s'\n \"\"\" % kwargs,\n ]\n for upd in updates:\n if upd:\n sqlapi.SQLupdate(upd)", "def make_all_datetime(self):\n \n logging.info('\\n *** Running make_all_datetime ' )\n \n all_uniques = [] # storing a list with all the unique date_tmes \n which_k_in_dt = {} # list of avilable dataset for each unique date_time, so that when looping over the distinct date_times, only the proper dataset will be read and compared \n\n def add_time_delta(time_offset_value, date_time, dataset):\n \"\"\" Converting to proper date_time adding the time_delta. \n Removes minutes rounding to closest integer hour. \"\"\" \n \n if 'minutes' in time_offset:\n date_time_delta = [ timedelta(minutes = float(i) ) + time_offset_value for i in date_time ]\n elif 'hours' in time_offset:\n date_time_delta = [ timedelta(hours = float(i) ) + time_offset_value for i in date_time ] \n elif 'seconds' in time_offset: \n date_time_delta = [ timedelta(seconds = float(i) ) + time_offset_value for i in date_time ] \n \n \n '''\n if 'era' not in dataset:\n \n if 'minutes' in time_offset:\n date_time_delta = [ timedelta(minutes = float(i) ) + time_offset_value for i in date_time ]\n elif 'hours' in time_offset:\n date_time_delta = [ timedelta(hours = float(i) ) + time_offset_value for i in date_time ] \n elif 'seconds' in time_offset: \n date_time_delta = [ timedelta(seconds = float(i) ) + time_offset_value for i in date_time ] \n else:\n date_time = np.array( [ datetime.strptime(str(int(i)), '%Y%m%d%H') for i in date_time ] )# convert to datetime object \n ''' \n \n #else:\n # print('check if time is wrong !!!! (should never happen)')\n # sys.exit() \n #unique_dt = [i for i in [ time_offset_value + j for j in delta ] ] \n #unique_dt = [ i +0 ]\n date_time_delta = [ i.replace(minute=0, second=0) for i in date_time_delta ] \n \n return date_time_delta \n\n\n for k,v in self.datasets.items() : \n self.unique_dates[k] = {}\n \n self.unique_dates[k]['indices'] = {} \n #self.unique_dates[k]['indices_low'] = {} \n #self.unique_dates[k]['index_up'] = {} \n \n \"\"\" recordtimestamp from the input file \"\"\"\n \n \"\"\" Convert to proper date_time using the add_time_delta funtion \"\"\"\n logging.debug(' Calculating the time_delta for : %s', k )\n \n File = nc.Dataset(self.datasets[k]) \n unique = File.variables['recordtimestamp']\n \n self.data[k]['recordtimestamp'] = File.variables['recordtimestamp'][:].data\n self.data[k]['recordindex'] = File.variables['recordindex'][:].data\n \n time_offset = File.groups['observations_table']['date_time'].units\n time_offset_value = time_offset.split('since ') [1] \n time_offset_value = datetime.strptime(time_offset_value, '%Y-%m-%d %H:%M:%S')\n \n #unique = self.data[k]['recordtimestamp']\n \n unique_dt = add_time_delta (time_offset_value, unique, k ) \n \n all_uniques += unique_dt # adding to the total unique date_times \n \n \"\"\" Extracting the recordindex low and up from the input file \"\"\"\n indices = self.data[k]['recordindex']\n \n \"\"\" Loop over all the date_times of each dataset \"\"\"\n for dt, index_low, count in zip (unique_dt, indices, range(len(unique_dt)) ):\n \n try: \n which_k_in_dt[dt].append(k)\n except:\n which_k_in_dt[dt] = []\n which_k_in_dt[dt].append(k) \n \n self.unique_dates[k]['indices'][dt] = {}\n self.unique_dates[k]['indices'][dt]['low'] = index_low \n try:\n index_up = indices[ count + 1 ] # works until the last available recordindex\n except: \n #index_up = len(indices-1) \n index_up = len(indices)-1 \n \n self.unique_dates[k]['indices'][dt]['up'] = index_up\n \n #self.unique_dates[k]['indices'].append(index) \n #self.unique_dates[k]['indices_up'].append(index_up) \n \n self.dataset_per_dt = which_k_in_dt \n self.merged_unique_dates = np.unique(np.array(all_uniques) ) # storing the set of all distinct dt values \n logging.debug('make_all_datetime finished ')", "def add_jira_entries(config, date, dry_run, economic):\n if date is not None:\n jira = Jira(config.items('Jira'))\n for task in jira.get_tasks():\n if task:\n economic.add_time_entry(task, dry_run)", "def wg_task_summary(request, fieldname='workinggroup', view='production', taskdays=3):\n query = {}\n hours = 24 * taskdays\n startdate = datetime.now() - timedelta(hours=hours)\n startdate = startdate.strftime(settings.DATETIME_FORMAT)\n enddate = datetime.now().strftime(settings.DATETIME_FORMAT)\n query['modificationtime__castdate__range'] = [startdate, enddate]\n if fieldname == 'workinggroup':\n query['workinggroup__isnull'] = False\n if view == 'production':\n query['tasktype'] = 'prod'\n elif view == 'analysis':\n query['tasktype'] = 'anal'\n\n if 'processingtype' in request.session['requestParams']:\n query['processingtype'] = request.session['requestParams']['processingtype']\n\n if 'workinggroup' in request.session['requestParams']:\n query['workinggroup'] = request.session['requestParams']['workinggroup']\n\n if 'project' in request.session['requestParams']:\n query['taskname__istartswith'] = request.session['requestParams']['project']\n\n summary = JediTasks.objects.filter(**query).values(fieldname, 'status').annotate(Count('status')).order_by(\n fieldname, 'status')\n totstates = {}\n tottasks = 0\n wgsum = {}\n for state in const.TASK_STATES:\n totstates[state] = 0\n for rec in summary:\n wg = rec[fieldname]\n status = rec['status']\n count = rec['status__count']\n if status not in const.TASK_STATES:\n continue\n tottasks += count\n totstates[status] += count\n if wg not in wgsum:\n wgsum[wg] = {}\n wgsum[wg]['name'] = wg\n wgsum[wg]['count'] = 0\n wgsum[wg]['states'] = {}\n wgsum[wg]['statelist'] = []\n for state in const.TASK_STATES:\n wgsum[wg]['states'][state] = {}\n wgsum[wg]['states'][state]['name'] = state\n wgsum[wg]['states'][state]['count'] = 0\n wgsum[wg]['count'] += count\n wgsum[wg]['states'][status]['count'] += count\n\n # convert to ordered lists\n suml = []\n for f in wgsum:\n itemd = {}\n itemd['field'] = f\n itemd['count'] = wgsum[f]['count']\n kys = copy.deepcopy(const.TASK_STATES)\n iteml = []\n for ky in kys:\n iteml.append({'kname': ky, 'kvalue': wgsum[f]['states'][ky]['count']})\n itemd['list'] = iteml\n suml.append(itemd)\n suml = sorted(suml, key=lambda x: x['field'])\n return suml", "def aggregatePlans(update):\n out.header('Aggregating plans\\n')\n # For now we just order the plans and return a new list\n update.plans.sort()", "def test_merge_staypoints_time(self, example_staypoints_merge):\n sp, tpls = example_staypoints_merge\n merged_sp = sp.as_staypoints.merge_staypoints(tpls)\n # user 1 - id 7 and 80 merged\n assert sp.loc[7, \"started_at\"] == merged_sp.loc[7, \"started_at\"]\n assert sp.loc[80, \"finished_at\"] == merged_sp.loc[7, \"finished_at\"]\n # user 0 - id 2,6, and 15 merged\n assert sp.loc[2, \"started_at\"] == merged_sp.loc[2, \"started_at\"]\n assert sp.loc[15, \"finished_at\"] == merged_sp.loc[2, \"finished_at\"]", "def _03_merge_plant_status(records, **params):\n plant_names = list(set([record[\"Power Plant Name\"] for record in records]))\n for plant_name in plant_names:\n plant_records = [record for record in records if record[\"Power Plant Name\"] == plant_name]\n plant_statuses = list(set([record[\"Plant Status\"] for record in plant_records]))\n record0 = [record for record in plant_records][0]\n if \"NULL\" in plant_statuses:\n record0[\"Plant Status\"] = \"NULL\"\n elif plant_statuses == [\"Active\"]:\n record0[\"Plant Status\"] = \"Active\"\n elif \"Active\" in plant_statuses or \"Partially Active\" in plant_statuses:\n record0[\"Plant Status\"] = \"Partially Active\"\n # else there's also a possibility of \"Inactive\"\n for record in [record for record in records if record != record0]:\n record[\"Plant Status\"] = None\n return records", "def merge_all_data(self):\n \n logging.info('***** Starting the merging process ')\n\n \n \"\"\" All possible unqiue_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n \n date_times = np.array(date_times) \n \n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_merged_obs , all_merged_head, all_merged_fb , merged_indices , merged_date_time, mi= [] , [] , [] , [] , [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #for dt in date_times[3008:3100]: # loop over all the possible date_times \n \n tot = len(date_times)\n for dt, c in zip(date_times[3008:3100], range(tot) ): # loop over all the possible date_times \n #print('Analize : ', str(c) , '/', str(tot) , ' ', dt , ' ', now(time.time()) )\n \n logging.info('Analize : %s %s /', str(c) , str(tot) )\n \n cleaned_df_container = {} \n chunk = ''\n \n for k in self.dataset_per_dt[dt] : # checking the list of available datasets \n \n index, index_up = self.unique_dates[k]['indices'][dt]['low'] , self.unique_dates[k]['indices'][dt]['up'] # extracting the exact chunk of the dataframe where the data of this are stored \n \n chunk = self.data[k]['dataframe'].iloc[index:index_up]\n \n chunk['date_time'] = dt\n chunk = self.clean_dataframe(chunk) # cleaning from wrong or nan values \n \n if len(chunk)==0:\n continue\n \n cleaned_df_container[k] = {} \n cleaned_df_container[k]['df'] = chunk # cleaned dataframe \n\n \n if all(value == 0 for value in cleaned_df_container.values()):\n logging.debug('No data were found! ')\n continue\n \n merged_observations_table, best_ds, duplicates, header = self.merge_record(dt, container = cleaned_df_container)\n \n merged_observations_table['source_id'] = best_ds # adding extra columns i.e. chosen dataset, other dataset with data, number of pressure levels \n merged_observations_table['z_coordinate_type'] = 1 # only pressure inn [Pa] available at the moment. Check z_coordinate_type table for the correpsonding code \n \n \n \"\"\" Extracting the merged feedback, flagging the advanced_observations_feedback flag = 1\"\"\"\n feedback, merged_obs = self.get_reanalysis_feedback( dt, merged_observations_table , reanalysis='era5fb', best_ds= best_ds)\n all_merged_fb.append(feedback) \n all_merged_obs.append(merged_obs)\n \n \"\"\" Setting the correct report_id in the header table \"\"\"\n merged_report_id = merged_obs['report_id'].values[0] # same report_id as calculated in the observation_table \n header['report_id'] = merged_report_id \n all_merged_head.append(header)\n \n #if len(merged_observations_table) != len(header): \n #print('lengths check best ds: ', best_ds , ' obs_merged: ' , len(merged_observations_table), ' feedback:' , len(feedback) , ' header: ' , len(header) )\n #print( len(merged_observations_table), ' ' , len(feedback) )\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n merged_indices.append(len(merged_observations_table)) \n merged_date_time.append(dt)\n\n\n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n merged_date_time = np.array(merged_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : merged_date_time.shape } , merged_date_time )\n \n \n \"\"\" Creating the merged indices \"\"\"\n mi.append(0)\n for i,ind in zip(merged_indices[0:], range(len(merged_indices[0:]) ) ) :\n mi.append(mi[ind] + i )\n mi = np.array(mi) \n di['recordindex'] = ( {'recordindex' : mi.shape } , mi )\n self.MergedRecordIndex = di \n \n \n \"\"\" Creating the merged dataframes \"\"\"\n logging.debug('*** Concatenating the observations_table dataframes' ) \n merged_obs = pd.concat (all_merged_obs)\n \n self.MergedObs = merged_obs \n logging.debug('*** Finished concatenating theobservations_table dataframes' ) \n \n logging.debug('*** Concatenating the header_table dataframes' ) \n merged_hd = pd.concat (all_merged_head)\n self.MergedHead = merged_hd \n logging.debug('*** Finished concatenating the header_table dataframes' ) \n \n logging.debug('*** Concatenating the feedback dataframes' ) \n merged_fb = pd.concat (all_merged_fb)\n self.MergedFeedback = merged_fb \n logging.debug('*** Finished concatenating the feedback dataframes' ) \n\n return 0", "def extract_time_features(self, hash_list, issue_id_list, log_message_info_path):\n\n time_filtering_obj = time_filtering.TimeFiltering(ISSUE_DATE_KEYWORD=self.ISSUE_DATE_KEYWORD, COMMIT_DATE_KEYWORD=self.COMMIT_DATE_KEYWORD)\n # date_issue_dict [dict<issue id, dict<date keyword, date (datetime object)>>] -- extract date for each date keyword for each issue. date keywords are created, updated, and resolutiondate\n date_issue_dict = self.extract_dates(self.db_path)\n\n # repo_dict [dict<commit hash, dict<key name, data>>] -- key name list: author_date, commit_date, author, committer, issue_id\n date_repo_dict = util.load_pickle(log_message_info_path) #\n\n if self.verbose > 0:\n len_issue_id = len(issue_id_list)\n\n time_diff_dict = {}\n time_diff_type_dict = {}\n candidate_issue2hash_dict = {}\n ISSUE_DATE_KEYWORD = time_filtering_obj.ISSUE_DATE_KEYWORD\n COMMIT_DATE_KEYWORD = time_filtering_obj.COMMIT_DATE_KEYWORD\n for idx_issue_id, issue_id in enumerate(issue_id_list):\n\n if self.verbose > 0:\n if (idx_issue_id%1000)==0:\n print(\"time feature -- Done {0}/{1}\".format(idx_issue_id, len_issue_id))\n\n time_diff_dict[issue_id] = {}\n time_diff_type_dict[issue_id] = {}\n for commit_hash in hash_list:\n\n candidate_flag = 0\n for issue_date_key in ['created', 'updated', 'resolutiondate']:\n for commit_date_key in ['author_date', 'commit_date']:\n if date_issue_dict[issue_id][issue_date_key] <= (date_repo_dict[commit_hash][commit_date_key] + self.CANDIDATE_TIME_FILTER_AFTER) and date_issue_dict[issue_id][issue_date_key] >= (date_repo_dict[commit_hash][commit_date_key] - self.CANDIDATE_TIME_FILTER_BEFORE):\n candidate_flag = 1\n\n if candidate_flag==0:\n continue\n \n if not issue_id in candidate_issue2hash_dict:\n candidate_issue2hash_dict[issue_id] = set()\n candidate_issue2hash_dict[issue_id].add(commit_hash)\n\n if date_issue_dict[issue_id][ISSUE_DATE_KEYWORD] <= date_repo_dict[commit_hash][COMMIT_DATE_KEYWORD]:\n time_diff_dict[issue_id][commit_hash] = (date_repo_dict[commit_hash][COMMIT_DATE_KEYWORD] - date_issue_dict[issue_id][ISSUE_DATE_KEYWORD]).total_seconds()\n time_diff_type_dict[issue_id][commit_hash] = 0\n elif date_issue_dict[issue_id][ISSUE_DATE_KEYWORD] > date_repo_dict[commit_hash][COMMIT_DATE_KEYWORD]:\n time_diff_dict[issue_id][commit_hash] = (date_issue_dict[issue_id][ISSUE_DATE_KEYWORD] - date_repo_dict[commit_hash][COMMIT_DATE_KEYWORD]).total_seconds()\n time_diff_type_dict[issue_id][commit_hash] = 1\n else:\n print(\"ERROR\")\n print(commit_hash)\n print(issue_id)\n sys.exit()\n\n return time_diff_dict, time_diff_type_dict, candidate_issue2hash_dict", "def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def merge_record(self, dt, container = ''): \n record_dataset_legth ={} \n \n \n \"\"\" Combining the ncar_t and ncar_w files.\n If both are present, select the ncar_t data and rename it as 'ncar'. \n If only one is present, simply rename it as 'ncar'. \n \"\"\" \n if ('ncar_t' in list(container.keys()) ):\n container['ncar'] = {} \n container['ncar']['df'] = container['ncar_t']['df'] \n \n elif ( 'ncar_w' in list(container.keys()) and 'ncar_t' not in list(container.keys()) ) :\n container['ncar'] = {} \n container['ncar']['df'] = container['ncar_w']['df'] \n\n \n for k in container.keys():\n if k == 'ncar_t' or k == 'ncar_w': \n continue \n record_dataset_legth[k] = len(container[k]['df'] )\n \n \n \"\"\" For now, choosing the dataset with more records of all or igra2>ncar>rest data if available and with same number of records \"\"\"\n best_ds, all_ds , best_datasets, all_ds_reports = 'dummy' , [] , [], [] # total number of records, name of the chosen dataset , list of other possible dataset with available data \n \n most_records = max( [ v for v in record_dataset_legth.values() ] ) # maximum number of records per date_time \n \n for k, v in record_dataset_legth.items(): \n if v == 0:\n continue\n if v == most_records:\n best_datasets.append(k) \n if v > 0:\n all_ds.append(k) # all other datasets with smaller number of records than the maximum found\n try: \n all_ds_reports.append( self.observation_ids_merged[k] * 1000000000 + container[k]['df']['report_id'].values[0] ) # converting the original report id using the same convention as for observation_id\n except:\n all_ds_reports.append( self.observation_ids_merged[k] * 1000000000 + int( (container[k]['df']['report_id'].values[0]).tostring() ) ) # converting the original report id using the same convention as for observation_id\n \n \n #all_ds_reports.append(np.nan)\n #print ( type(container[k]['df']['report_id'].values) )\n #all_ds_reports.append( self.observation_ids_merged[k] * 1000000000 + float(container[k]['df']['report_id'].values[0].decode('latin1') ))\n \n if len(best_datasets) ==0:\n print('wrong??? please check')\n return 0,0,0,0 \n \n if 'igra2' in best_datasets:\n best_ds = 'igra2'\n elif 'ncar' in best_datasets:\n best_ds = 'ncar'\n elif 'era5_1' in best_datasets:\n best_ds = 'era5_1' \n else:\n best_ds = best_datasets[0]\n \n \"\"\" Extract container \"\"\" \n selected_df = container[best_ds]['df'].copy(deep = True) # might take extra time, dont know how to get rid of this \n\n try:\n merged_report = self.observation_ids_merged[best_ds] * 1000000000 + int( selected_df['report_id'].values[0].tostring() ) \n except:\n merged_report = np.nan \n\n \"\"\" Calculate new unique observation id \"\"\"\n try: \n obs_ids_merged = [ self.observation_ids_merged[best_ds] * 1000000000 + int( i.tostring() ) for i in selected_df['observation_id'] ]\n except:\n obs_ids_merged = [ np.nan for i in selected_df['observation_id'] ]\n \n \n selected_df['observation_id'] = obs_ids_merged\n \n \"\"\" Calculate new unique report id \"\"\" \n selected_df['report_id'] = merged_report\n\n \"\"\" Returning a string with the alternative available datasets data \"\"\"\n if len(all_ds_reports) > 1: \n duplicates = \",\".join( [ str(i) for i in all_ds_reports] )\n else:\n duplicates = str(all_ds_reports[0])\n \n \n \"\"\" Extracting the merged header_table.\n Again, must consider the special case where best_ds == ncar. \n Note that the header table *should* be identical for ncar_w or ncar_t \"\"\" \n if best_ds != 'ncar':\n header = self.get_header_table(dt, ds= best_ds, all_ds = duplicates , length= len(selected_df) )\n \n elif ( best_ds == 'ncar' and 'ncar_t' in list(container.keys()) ) :\n header = self.get_header_table(dt, ds = 'ncar_t', all_ds = duplicates, length= len(selected_df))\n \n elif ( best_ds == 'ncar' and 'ncar_t' not in list(container.keys()) ) :\n header = self.get_header_table(dt, ds = 'ncar_w', all_ds = duplicates, length= len(selected_df) ) \n \n logging.debug('I use %s record since it has more entries: %s but other available datasets are : %s' , best_ds , str(most_records) , all_ds ) \n \n #print ('duplicates are: ', duplicates)\n return selected_df, best_ds , duplicates, header", "def format_task(oldtask):\n\n newtask = {\n 'name': oldtask['title'],\n 'notes': [],\n 'priority': format_priority(oldtask['priority']),\n 'repeat': format_repeat(oldtask['recurrence'], oldtask['repeat_until']),\n # make a copy so we can modify it\n 'tags': list(oldtask['tags']),\n }\n\n if oldtask['notes']:\n newtask['notes'].append(oldtask['notes'])\n\n # datetime\n for ts in ('due_date',):\n newtask[ts] = format_date(oldtask[ts])\n\n # seconds to minutes\n # RTM doesn't do 'elapsed'.\n for ts in ('estimated',):\n newtask[ts] = format_estimate(oldtask[ts])\n\n # bool (RTM doesn't take dates for these).\n for ts in ('completed', 'deleted'):\n newtask[ts] = bool(oldtask[ts])\n if newtask[ts]:\n newtask['tags'].append('astrid-' + ts)\n\n if newtask['notes']:\n newtask['tags'].append('astrid-notes')\n\n if 'alarms' in oldtask and oldtask['alarms']:\n newtask['tags'].append('astrid-alarms')\n newtask['notes'].append(\"\\n\".join(['astrid-alarms:'] + [\n ts.isoformat() for ts in oldtask['alarms']\n ]))\n\n if not newtask['notes']:\n newtask['notes'] = None\n\n newtask['smart_add'] = smart_add(\n name = newtask['name'],\n due_date = format_date(oldtask['due_date'], local=True),\n priority = newtask['priority'],\n tags = newtask['tags'],\n repeat = format_repeat(oldtask['recurrence'],\n oldtask['repeat_until'], local=True,\n ),\n estimated = newtask['estimated'],\n )\n\n return newtask", "def smartAssigning(names, statuses, projects, tasks):\n\n\n # first remove team members who are on vacation\n for i in range(len(names)):\n print range(len(names))\n print statuses[i]\n if statuses[i] == True:\n names.pop(i)\n statuses.pop(i)\n projects.pop(i)\n tasks.pop(i)\n\n # then, compare team member projects and tasks to find min\n\n for i in range(len(names)):\n if projects[i] != min(projects):\n names.pop(i)\n statuses.pop(i)\n projects.pop(i)\n tasks.pop(i)\n\n if len(names) == 1:\n return names[0]\n\n for i in range(len(names)):\n if tasks[i] != min(tasks):\n names.pop(i)\n statuses.pop(i)\n projects.pop(i)\n tasks.pop(i)\n\n #return names[0]\n\n\n # first, organize team members in a dict for quick lookup\n # team_member_availability = {}\n \n # binary_statuses = [] # converting to 1 or 0 to compare later\n \n # for s in statuses:\n # if s == True:\n # binary_statuses.append(1)\n # elif s == False:\n # binary_statuses.append(0)\n \n # for i in range(len(names)):\n # if names[i] not in team_member_availability:\n # team_member_availability[names[i]] = (binary_statuses[i], projects[i], tasks[i])\n \n # team_member_availability looks like:\n # { 'John': (false, 2, 16), \n # 'Martin': (false, 1, 5) }\n \n # next, determine which team member is more available:", "def _add_time_field(self) -> None:\n self.data[\"time\"] = [datetime(int(yyyy), int(mm), int(dd)) + timedelta(hours=hh) for yyyy, mm, dd, hh in zip(self.data[\"year\"], self.data[\"month\"], self.data[\"day\"], self.data[\"hour\"])]\n for key in [\"year\", \"doy\", \"month\", \"day\", \"hour\"]:\n del self.data[key]", "def _01_merge_plant_project_fuels(records, **params):\n fuels = OrderedDict()\n for record in records:\n project_key = (record[\"Power Plant Name\"], record[\"Project Name\"])\n plant_keys = [\n (r[\"Power Plant Name\"], r[\"Project Name\"])\n for r in records\n if r[\"Type\"] == \"Plant\" and r[\"Power Plant Name\"] == record[\"Power Plant Name\"]\n ]\n if len(plant_keys) == 0:\n plant_keys = [\n (r[\"Power Plant Name\"], r[\"Project Name\"]) for r in records if r[\"Type\"] == \"Plant\"\n ]\n plant_key = plant_keys[0]\n if project_key not in fuels:\n fuels[project_key] = []\n if plant_key not in fuels:\n fuels[plant_key] = []\n\n for field in [\n field\n for field in record\n if re.match(r\"^(Plant|Project) Fuel \\d+$\", field) and record[field] not in [None, \"NA\"]\n ]:\n fuel_vals = record[field].split(\";\")\n category_vals = record[field + \" Category\"].split(\";\") # parallel (see normalize)\n for fuel_val in fuel_vals:\n category_val = category_vals[fuel_vals.index(fuel_val)]\n if \"Project\" in field and (fuel_val, category_val) not in fuels[project_key]:\n fuels[project_key].append((fuel_val, category_val))\n # also put all fuel values into the Plant record\n if (fuel_val, category_val) not in fuels[plant_key]:\n fuels[plant_key].append((fuel_val, category_val))\n\n # reduce\n record[field] = None\n record[field + \" Category\"] = None\n\n # for each key in fuels, put all values in the first record with the same key\n for key in fuels:\n project_records = [\n record\n for record in records\n if (record[\"Power Plant Name\"], record[\"Project Name\"]) == key\n ]\n if len(project_records) > 0:\n project_record = project_records[0]\n for i, fuel in enumerate(list(set(fuels[key]))):\n project_record[f\"{project_record['Type']} Fuel {i+1}\"] = fuel[0]\n project_record[f\"{project_record['Type']} Fuel {i+1} Category\"] = fuel[1]\n else:\n log.error(\"KEY NOT FOUND: %r\" % (key,))\n\n return records", "def TimeDuration(employee, date):\n # fetch data from database ActionTime to get difference between each checkIn and checkout\n FetchDateTime = c.execute(\"\"\"SELECT ActionTime\n FROM AttendanceActions\n JOIN Attendance A on A.Id = AttendanceActions.AttendanceId\n WHERE employee=:employee\n AND ActionTime LIKE (:date)\"\"\", {'employee': employee, 'date': '%' + date + '%'})\n # get all rows from database\n DateTime_as_tuple = c.fetchall()\n\n # empty list to store formatted datetime\n # Dt store datetime as string coming form database\n Dt = []\n # StoreDate store converted datetime into datetime type using datetime lib\n StoreDate = []\n\n # append all tuple value in list coming from database to list \"clean\"\n for DTimes in DateTime_as_tuple:\n for ele in DTimes:\n Dt.append(ele)\n\n # get length of that list to use it in important process\n Dt_length = len(Dt)\n\n # convert all element in Dt list from string to datetime type and append it to StoreDate database\n for i in range(Dt_length):\n StoreDate.append(datetime.datetime.strptime(Dt[i], Timeformate))\n\n # Create action variable to use it for missing checkIn or checkout by employees\n Action = None\n\n # this condition know if there are missing checkIN or out by employees\n if Dt_length % 2 != 0:\n # Get last action by employee to know tha last action in the days\n FetchAction = c.execute(\"\"\"select Action\n from AttendanceActions\n join Attendance A on A.Id = AttendanceActions.AttendanceId\n where employee= :employee AND ActionTime like :datetime;\"\"\",\n {'employee': employee, 'datetime': Dt[Dt_length-1]})\n # convert it to string should be string as \"checkIn\" or \"checkOut\"\n Action = c.fetchone()\n Action = ''.join(Action)\n\n # get last date realted to last action by employees\n year = datetime.datetime.strptime(Dt[Dt_length-1], Timeformate).year\n month = datetime.datetime.strptime(Dt[Dt_length-1], Timeformate).month\n day = datetime.datetime.strptime(Dt[Dt_length-1], Timeformate).day\n\n # if last action is checkIn employee miss to check out and the time closed by midnight as required\n if Action == \"CheckIn\":\n MidNight = datetime.datetime(year, month, day, 23, 59, 59)\n # append that day to StoreDate list to contribute to calculation\n StoreDate.append(MidNight)\n\n # if last action is checkout employee forget to check in time calculate passed in previous day\n if Action == \"CheckOut\":\n PreviousDay = datetime.datetime(year, month, day-1, 23, 59, 59)\n StoreDate.append(PreviousDay)\n # add reverse method here to undo comming reverse that not required if that checkout case\n StoreDate.reverse()\n\n # reverse list to subtract last day from next day\n StoreDate.reverse()\n # make total variable to collect the number for each two datetime different\n total = datetime.datetime.now().replace(second=0, microsecond=0)\n\n # check out if the length of list is = 1 or not to avoid index error\n # and give j variable the will subtracted from Dt_length in range() method in next loop\n # if Dt_length = 1 and subtract 1 from it the loop will never enter\n # so put j= 0 if equal 1 and j = 1 otherwise\n if Dt_length == 1:\n j = 0\n else:\n j = 1\n\n # make variable that will use in loop to be independent from loop variable i\n n = 0\n # final step to calculate time duration for employee\n for i in range(Dt_length-j):\n value = StoreDate[n] - StoreDate[n+1]\n total += value\n n += 2\n # time correction in required formate\n duration = total-datetime.datetime.now().replace(second=0, microsecond=0)\n duration = duration + datetime.timedelta(seconds=1)\n return duration", "def _sum_hours(time_entries):\n hours = sum(map(lambda x: x.duration, time_entries), 0)\n hours = round(hours, DECIMALS_HOUR)\n return hours", "def _join_activity(df_activity: pd.DataFrame, df_sens: pd.DataFrame):\n df_sens[\"task_id\"] = 0\n for idx, row in df_activity.iterrows():\n df_sens.loc[\n (df_sens[\"sys_time\"] >= row[\"start_time\"])\n & (df_sens[\"sys_time\"] <= row[\"end_time\"]),\n \"task_id\",\n ] = row[\"task_id\"]\n\n # Map 24 task ids down to 6 task types\n df_sens[\"task_type\"] = (\n df_sens[\"task_id\"].astype(\"int8\").replace(HMOG_TASK_IDS_TYPES)\n )\n df_sens = df_sens.drop(columns=[\"task_id\"])\n\n return df_sens", "def combine_days(action, pdata, debug=False):\n assert action == 'sum' or action == 'mean'\n\n starts, ends, values, weight_sum = [], [], [], []\n\n def get_time_delta_in_hours(start, end):\n \"\"\" NOTE assumes no overflows or wraps or nothing \"\"\"\n dhour = end.hour - start.hour\n dmin = end.minute - start.minute\n dsec = end.second - start.second\n dtime = timedelta(hours=dhour, minutes=dmin, seconds=dsec) # NOTE rounds to nearest second\n # print start, end, dtime\n return float(dtime.seconds) / (60*60)\n def add_new_day(dstart, dend, dval):\n weight = '-'\n starts.append(dstart)\n ends.append(dend)\n if action == 'sum':\n values.append(dval)\n elif action == 'mean':\n weight = float(get_time_delta_in_hours(dstart, dend))\n values.append(weight*dval)\n weight_sum.append(weight)\n else:\n raise Exception('invalid action'+action)\n if debug:\n print ' new day', dstart, dend, weight, dval\n def increment_day(dstart, dend, dval):\n ends[-1] = dend\n weight = '-'\n if action == 'sum':\n values[-1] += dval\n elif action == 'mean':\n weight = float(get_time_delta_in_hours(dstart, dend))\n values[-1] += weight * dval\n weight_sum[-1] += weight\n else:\n raise Exception('invalid action'+action)\n if debug:\n print ' increment', starts[-1], dend, weight, dval, ' ', values[-1]\n def incorporate_value(istart, iend, ival):\n # if debug:\n # print ' incorporate', istart, iend, ival\n if len(values) == 0 or ends[-1].day != istart.day:\n add_new_day(istart, iend, ival)\n else:\n increment_day(istart, iend, ival)\n\n for ival in range(len(pdata['values'])):\n start = pdata['time-layout']['start'][ival]\n if len(pdata['time-layout']['end']) > 0: # some of them only have start times\n end = pdata['time-layout']['end'][ival]\n elif len(pdata['time-layout']['start']) > ival+1: # so use the next start time minus a ms if we can\n end = pdata['time-layout']['start'][ival+1] - timedelta(milliseconds=-1)\n else:\n end = pdata['time-layout']['start'][ival] + timedelta(hours=6) # otherwise just, hell, add six hours\n if debug:\n print ' day %3d-%-3d hour %3d-%-3d %s' % (start.day, end.day, start.hour, end.hour, pdata['values'][ival])\n\n # skip null values (probably from cloud cover)\n if pdata['values'][ival] == None:\n if debug:\n print ' skipping null value'\n continue\n\n val = float(pdata['values'][ival])\n if start.day == end.day:\n incorporate_value(start, end, val)\n else:\n if debug:\n print ' start (%s) and end (%s) days differ' % (start, end)\n assert start.day + 1 == end.day # for now only handle the case where they differ by one day\n midnight = datetime(year=end.year, month=end.month, day=end.day, hour=0, minute=0, second=0)\n if action == 'sum':\n hours_before = get_time_delta_in_hours(start, midnight) #24 - start.hour\n hours_after = get_time_delta_in_hours(midnight, end) #end.hour\n val_before = val * float(hours_before) / (hours_before + hours_after)\n val_after = val * float(hours_after) / (hours_before + hours_after)\n if debug:\n print ' apportioning between',\n print 'first %f * %f / (%f + %f) = %f' % (val, hours_before, hours_before, hours_after, val_before),\n print 'and second %f * %f / (%f + %f) = %f' % (val, hours_after, hours_before, hours_after, val_after)\n else:\n val_before, val_after = val, val\n incorporate_value(start, midnight + timedelta(milliseconds=-1), val_before) #start + timedelta(hours=24-start.hour, milliseconds=-1), val_before)\n incorporate_value(midnight, end + timedelta(milliseconds=-1), val_after) # end - timedelta(hours=end.hour), end, val_after)\n\n dailyvals = {}\n for ival in range(len(values)):\n dailyvals[int(starts[ival].day)] = values[ival]\n if action == 'mean':\n # if debug:\n # print 'total', get_time_delta_in_hours(starts[ival], ends[ival])\n dailyvals[int(starts[ival].day)] /= weight_sum[ival] #get_time_delta_in_hours(starts[ival], ends[ival])\n\n if debug:\n print ' final:'\n for key in sorted(dailyvals.keys()):\n print ' ', key, dailyvals[key]\n return dailyvals", "def agg_by_project(timesheet, date1=None, date2=None, freq=None):\n f = slice_by_dates(timesheet, date1, date2)\n\n if freq is not None:\n f = f.groupby('project').apply(\n lambda x: x.set_index('date')[['duration']].resample(freq\n ).sum().fillna(0)).reset_index()\n f = f[['date', 'project', 'duration']].sort_values(\n 'date')\n f['period'] = f['date'].map(lambda x: pd.Period(x, freq))\n f['start_date'] = f['period'].map(lambda x: x.start_time)\n f['end_date'] = f['period'].map(lambda x: x.end_time)\n else:\n start_date, end_date = f['date'].min(), f['date'].max()\n f = f.groupby('project').agg({'duration': np.sum}\n ).reset_index()\n f['start_date'] = start_date\n f['end_date'] = end_date\n\n return f[['start_date', 'end_date', 'project', 'duration']].copy()", "def finish_project_processing(year):\n\n generators = filter_plants_by_region_id(13, year)\n generators = assign_heat_rates_to_projects(generators, year)\n existing_gens = generators[generators['Operational Status']=='Operable']\n proposed_gens = generators[generators['Operational Status']=='Proposed']\n\n fname = 'existing_generation_projects_{}.tab'.format(year)\n with open(os.path.join(outputs_directory, fname),'w') as f:\n existing_gens.to_csv(f, sep='\\t', encoding='utf-8', index=False)\n\n uprates = pd.DataFrame()\n new_gens = pd.DataFrame()\n for idx in proposed_gens.index:\n pc = proposed_gens.loc[idx,'EIA Plant Code']\n pm = proposed_gens.loc[idx,'Prime Mover']\n es = proposed_gens.loc[idx,'Energy Source']\n existing_units_for_proposed_gen = existing_gens[\n (existing_gens['EIA Plant Code'] == pc) &\n (existing_gens['Prime Mover'] == pm) &\n (existing_gens['Energy Source'] == es)]\n if len(existing_units_for_proposed_gen) == 0:\n new_gens = pd.concat([new_gens, pd.DataFrame(proposed_gens.loc[idx,:]).T], axis=0)\n elif len(existing_units_for_proposed_gen) == 1:\n uprates = pd.concat([uprates, pd.DataFrame(proposed_gens.loc[idx,:]).T], axis=0)\n else:\n print \"There is more than one option for uprating plant id {}, prime mover {} and energy source {}\".format(int(pc), pm, es)\n\n fname = 'new_generation_projects_{}.tab'.format(year)\n with open(os.path.join(outputs_directory, fname),'w') as f:\n new_gens.to_csv(f, sep='\\t', encoding='utf-8', index=False)\n\n fname = 'uprates_to_generation_projects_{}.tab'.format(year)\n with open(os.path.join(outputs_directory, fname),'w') as f:\n uprates.to_csv(f, sep='\\t', encoding='utf-8', index=False)", "def addDurationM9(df):\n # Calculate duration in M9 buffer (time without food)\n dt_0 = [dt.datetime.strptime(t,'%H:%M') for t in df['time_washed']]\n dt_1 = [dt.datetime.strptime(t,'%H:%M') for t in df['middle_wormsorter_time']]\n \n df['duration_M9_seconds'] = [(r - w).total_seconds() for r, w in zip(dt_1, dt_0)]\n\n return df", "def run(self):\n modify_tasks = filter(self._task_filter, acm.FAelTask.Select(''))\n print([task.Name() for task in modify_tasks])\n for task in modify_tasks:\n #new_task = task.Clone()\n self._update(task)\n try:\n task.Commit()\n except:\n print('Skipping: Task already exists')", "def _get_hours_pro_entry(time_entries):\n events = []\n for event in time_entries:\n start_time = datetime.datetime(\n date.today().year,\n date.today().month,\n date.today().day,\n event.start_at.hour,\n event.start_at.minute,\n event.start_at.second,\n )\n end_time = datetime.datetime(\n date.today().year,\n date.today().month,\n date.today().day,\n event.finish_at.hour,\n event.finish_at.minute,\n event.finish_at.second,\n )\n\n timediff = end_time - start_time\n events.append(\n {\n \"worked_hours\": round(timediff.total_seconds() / 3600, DECIMALS_HOUR),\n \"event\": event,\n }\n )\n return events", "def ensure_timebox_trackers_accurate(self):\n\n for item in self.api_wrapper.get_all_items():\n\n if item['content'].startswith('[TBS'):\n total_timeboxes = 0\n completed_timeboxes = 0\n\n master_id = item['id']\n\n # Feels inefficient to loop through everything again\n for sub_item in self.api_wrapper.get_all_items():\n if sub_item['parent_id'] == master_id:\n total_timeboxes += 1\n\n if sub_item['checked'] == 1:\n completed_timeboxes += 1\n\n timebox_tracker_match = TIMEBOX_TRACKER_RE.search(item['content'])\n\n if timebox_tracker_match:\n new_content = ('[TBS {}/{}]'.format(completed_timeboxes, total_timeboxes)\n + timebox_tracker_match.group('Description'))\n item.update(content=new_content)\n\n # Collapse the item regardless of initial state\n item.update(collapsed=1)", "def generateMigrateTaskStatistics(self, taskTree, fn):\n self._getTaskStatistics(taskTree, self.parent)\n for key in self.taskStat:\n elapsedTime = time.strftime(\"%H:%M:%S\", time.gmtime((self.taskStat[key]['EndedTS'] - self.taskStat[key]['StartedTS']).total_seconds()))\n convertedStartTS = self.taskStat[key]['StartedTS'].replace(tzinfo=self.from_zone).astimezone(self.to_zone)\n convertedEndTS = self.taskStat[key]['EndedTS'].replace(tzinfo=self.from_zone).astimezone(self.to_zone)\n self.taskStat[key]['StartedTS'] = datetime.strftime(convertedStartTS, \"%Y-%m-%dT%H:%M.%S\")\n self.taskStat[key]['EndedTS'] = datetime.strftime(convertedEndTS, \"%Y-%m-%dT%H:%M.%S\")\n self.taskStat[key]['Elapsed'] = elapsedTime\n with open(fn, 'a+b') as fp:\n data = [['Task Group Name', 'Started Time', 'Ended Time', 'Elapsed Time', 'SubTask#', 'Task State']]\n logger.info('Generating migration task report', also_console=True)\n for i in range(len(self.groupingResShort)):\n if self.groupingResShort[i] in self.taskStat.keys():\n data.append([self.taskStat[self.groupingResShort[i]]['Description'],\n self.taskStat[self.groupingResShort[i]]['StartedTS'],\n self.taskStat[self.groupingResShort[i]]['EndedTS'],\n self.taskStat[self.groupingResShort[i]]['Elapsed'],\n str(self.taskStat[self.groupingResShort[i]]['count']),\n self.taskStat[self.groupingResShort[i]]['State']])\n maxRow = [max(map(len, col)) for col in zip(*data)]\n header = \" \".join((' ' + val.ljust(maxLength) + ' ' for val, maxLength in zip(data[0], maxRow)))\n fp.write(\"-\" * len(header) + '\\r\\n')\n fp.write(header + '\\r\\n')\n logger.info(header, also_console=True)\n fp.write(\"-\" * len(header) + '\\r\\n')\n data.remove(data[0])\n for row in data:\n tablerow = \" \".join((' ' + val.ljust(maxLength) + ' ' for val, maxLength in zip(row, maxRow)))\n fp.write(tablerow + '\\r\\n')\n logger.info(tablerow, also_console=True)\n fp.write(\"-\" * len(header) + '\\r\\n\\r\\n')\n logger.info('Completed generating migration task report %s' % fn)\n return self.taskStat", "def get_duration_data(durations, owner_repo=\"edx/edx-platform\", since=None):\n open_issues_generator = itertools.izip(\n get_pulls(owner_repo, state=\"open\", org=True),\n itertools.repeat(\"open\")\n )\n closed_issues_generator = itertools.izip(\n get_pulls(owner_repo, state=\"closed\", since=since, org=True),\n itertools.repeat(\"closed\")\n )\n\n for issue, state in itertools.chain(open_issues_generator, closed_issues_generator):\n created_at = issue.created_at\n if state == \"open\":\n closed_at = datetime.utcnow()\n else:\n closed_at = issue.closed_at\n issue.duration = closed_at - created_at\n\n if DEBUG:\n print(\"{pr.id}: {pr.intext} {state}\".format(\n pr=issue, state=state\n ), file=sys.stderr)\n\n durations[state][issue.intext].append(issue)", "def gather_project_entries(self):\n\n user_inputs = [\n self.customer_name.get(), self.proj_date.get(),\n self.proj_descrpt.get(), self.proj_estdatest.get(),\n self.proj_estdateend.get(), self.proj_estbudget.get(),\n self.proj_actdatest.get(), self.proj_actdateend.get(),\n self.proj_actcost.get()\n ]\n\n return self.check_input_empty(user_inputs)", "def merge_entries(mmf_entries, kvs_entries, start, indicator_timeout, stats):\n rm_entries = []\n retained_indicators = 0\n\n for mmfe in mmf_entries:\n kvse = kvs_entries.get(mmfe['indicator'])\n if kvse is not None:\n kvse['is_present'] = True\n mmfe['_key'] = kvse['_key']\n\n for info in kvs_entries.itervalues():\n if info['is_present']:\n pass\n elif info['splunk_last_seen'] + indicator_timeout < start:\n rm_entries.append(info['_key'])\n else:\n retained_indicators += 1\n\n return rm_entries, retained_indicators", "def get_jira_tasks(start_date, end_date, pj_name=project_name):\n\n start_date=start_date.replace(\"-\",'/')\n end_date=end_date.replace(\"-\",'/')\n try:\n jira = JIRA(options=options, basic_auth=(usr, pas))\n except JIRAError as e:\n if e.status_code == 401:\n print (\"Login to JIRA failed.\")\n jq = \"\"\"project = {} \n and duedate >= \"{}\" \n and duedate <= \"{}\" \n order by created DESC\"\"\".format(pj_name, start_date,end_date )\n issues = jira.search_issues(jq)\n columns = ['year','month','day', 'name','timeoriginalestimate','timespent']\n data = pd.DataFrame([], columns=columns)\n for issue in issues:\n name = \"NoAssign\"\n if issue.fields.assignee:\n name = issue.fields.assignee.displayName\n (year, month, day) = issue.fields.duedate.split(\"-\")\n timeoriginalestimate = issue.fields.timeoriginalestimate if issue.fields.timeoriginalestimate is not None else 0\n timespent = issue.fields.timespent if issue.fields.timespent is not None else 0\n tmp_df = pd.DataFrame([[year, month, day, name, timeoriginalestimate/3600, timespent/3600]], columns=columns)\n data = data.append(tmp_df)\n\n data.reset_index(drop=True, inplace=True)\n return data", "def show_project_dates (self,\r\n entrylist=None,\r\n determinant='ymd',\r\n dictionaryobject=None):\r\n\r\n if not dictionaryobject:\r\n if 'PROJ'+determinant not in self.default_dict['date_dict']:\r\n self.default_dict['date_dict']['PROJ'+determinant] = {}\r\n dictionaryobject = self.default_dict['date_dict']['PROJ'+determinant]\r\n self.default_dict['date_dict']['PROJ'+determinant].clear()\r\n\r\n\r\n if entrylist is None:\r\n\r\n entryset = self.apply_limit(self.find_within(indexfrom=Index(0),orequal=True))\r\n indexrange=False\r\n else:\r\n indexrange=True\r\n entryset = set(entrylist)\r\n\r\n\r\n\r\n for project_temp in self.default_dict['projects'].get_all_projects():\r\n dates_temp = self.default_dict['projects'].get_date_list(project=project_temp)\r\n dates_temp = {clip_date(d_temp,determinant) for d_temp in dates_temp}\r\n for date in dates_temp:\r\n if date not in dictionaryobject:\r\n dictionaryobject[date] = set()\r\n if not indexrange:\r\n dictionaryobject[date].add(project_temp)\r\n else:\r\n if entryset.intersection({str(x_temp)\r\n for x_temp\r\n in self.default_dict['projects']\r\n .get_all_indexes(project=project_temp)}):\r\n dictionaryobject[date].add(project_temp)", "def example_staypoints_merge():\n p1 = Point(8.5067847, 47.4)\n\n t1 = pd.Timestamp(\"1971-01-01 00:00:00\", tz=\"utc\")\n t2 = pd.Timestamp(\"1971-01-02 05:00:00\", tz=\"utc\")\n t3 = pd.Timestamp(\"1971-01-02 06:45:00\", tz=\"utc\")\n t4 = pd.Timestamp(\"1971-01-02 08:55:00\", tz=\"utc\")\n t45 = pd.Timestamp(\"1971-01-02 08:57:00\", tz=\"utc\")\n t5 = pd.Timestamp(\"1971-01-02 09:00:00\", tz=\"utc\")\n t6 = pd.Timestamp(\"1971-01-02 09:20:00\", tz=\"utc\")\n\n list_dict = [\n {\"id\": 1, \"user_id\": 0, \"started_at\": t1, \"finished_at\": t2, \"geom\": p1, \"location_id\": 1},\n {\"id\": 5, \"user_id\": 0, \"started_at\": t2, \"finished_at\": t2, \"geom\": p1, \"location_id\": 2},\n {\"id\": 2, \"user_id\": 0, \"started_at\": t3, \"finished_at\": t4, \"geom\": p1, \"location_id\": 2},\n {\"id\": 6, \"user_id\": 0, \"started_at\": t4, \"finished_at\": t45, \"geom\": p1, \"location_id\": 2},\n {\"id\": 15, \"user_id\": 0, \"started_at\": t5, \"finished_at\": t6, \"geom\": p1, \"location_id\": 2},\n {\"id\": 7, \"user_id\": 1, \"started_at\": t3, \"finished_at\": t4, \"geom\": p1, \"location_id\": 2},\n {\"id\": 80, \"user_id\": 1, \"started_at\": t45, \"finished_at\": t5, \"geom\": p1, \"location_id\": 2},\n {\"id\": 3, \"user_id\": 1, \"started_at\": t5, \"finished_at\": t6, \"geom\": p1, \"location_id\": 4},\n ]\n sp = gpd.GeoDataFrame(data=list_dict, geometry=\"geom\", crs=\"EPSG:4326\")\n sp = sp.set_index(\"id\")\n sp.as_staypoints\n\n # generate empty triplegs for the merge function\n tpls = pd.DataFrame([], columns=[\"user_id\", \"started_at\", \"finished_at\"])\n return sp, tpls", "def sum_durations(self, start, end=None, exclude_acts='lunch',\n longbreaklimit=10):\n\n if end is None:\n end = dt.datetime.today()\n elif not isinstance(end, (dt.date, dt.datetime)):\n raise TypeError(\"The end argument has to be a date or datetime.\")\n\n if start == 'day':\n start = dt.datetime.today().date()\n elif start == 'week':\n today = dt.datetime.today().date()\n start = today - dt.timedelta(today.weekday())\n elif not isinstance(start, (dt.date, dt.datetime)):\n raise TypeError(\"The start argument has to be a date or datetime.\")\n\n if isinstance(exclude_acts, basestring):\n exclude_acts = (exclude_acts, )\n elif isinstance(exclude_acts, list):\n exclude_acts = tuple(exclude_acts)\n elif not isinstance(exclude_acts, tuple):\n raise TypeError(\"exclude_acts should either be string, list, or \"\n \"tuple!\")\n\n # working hours for today:\n # sum the duration of jobs, but exclude lunches and breaks > 10min\n self.alog.dbcur.execute(\n \"SELECT SUM(duration) FROM jobs \"\n \"WHERE start >= ? AND start <= ? \"\n \"AND NOT activity IN (\"\n \"SELECT id FROM activities \"\n \"WHERE name IN (%s) ) \"\n \"AND NOT ( \"\n \"activity IN (\"\n \"SELECT id FROM activities \"\n \"WHERE name IN ('break') ) \"\n \"AND duration > ? )\" % ', '.join('?' for a in exclude_acts),\n (start, end) + exclude_acts + (float(longbreaklimit) * 60, ))\n total_dur = self.alog.dbcur.fetchone()[0]\n\n if total_dur is None:\n total_dur = 0\n else:\n total_dur = total_dur / 60\n\n return total_dur", "def condense_meeting_times_2(arr):\n\n # sort the meeting times by start time (this will be O(lg(n)), at least)\n # without sorting by start times, the random order will make this O(n^2)\n arr.sort()\n\n # make a list to store output\n output = [arr[0]]\n\n # iterate over all the time blocks and check for merges\n for time_block in arr[1:]:\n # get the times to compare against from the latest block in output\n first_start, first_stop = output[-1]\n # unpack the current time block being assessed for overlap\n second_start, second_stop = time_block\n # if the current time block overlaps with most recent, condense the two\n # by updating the entire tuple in the output list with latest time\n if second_start <= first_stop:\n output[-1] = (first_start, max(first_stop, second_stop))\n # else, there was no overlap. Add current to output and continue loop\n else:\n output.append((second_start, second_stop))\n\n return output", "def test_aggregate_times(self):\n f1 = DarshanIngestedJobFile(\"job\")\n f2 = DarshanIngestedJobFile(\"job\")\n f1.read_time_start = 123456\n f2.read_time_start = 456789\n f1.read_time_end = 444\n f2.read_time_end = 555\n f1.write_time_start = 222\n f2.write_time_start = 111\n f1.write_time_end = 666\n f2.write_time_end = 777\n f1.aggregate(f2)\n self.assertEqual(f1.read_time_start, 123456)\n self.assertEqual(f1.read_time_end, 555)\n self.assertEqual(f1.write_time_start, 111)\n self.assertEqual(f1.write_time_end, 777)\n\n f1 = DarshanIngestedJobFile(\"job\")\n f2 = DarshanIngestedJobFile(\"job\")\n f1.read_time_start = 456789\n f2.read_time_start = 123456\n f1.read_time_end = 555\n f2.read_time_end = 444\n f1.write_time_start = 111\n f2.write_time_start = 222\n f1.write_time_end = 777\n f2.write_time_end = 666\n f1.aggregate(f2)\n self.assertEqual(f1.read_time_start, 123456)\n self.assertEqual(f1.read_time_end, 555)\n self.assertEqual(f1.write_time_start, 111)\n self.assertEqual(f1.write_time_end, 777)\n\n # One equals None\n f1 = DarshanIngestedJobFile(\"job\")\n f2 = DarshanIngestedJobFile(\"job\")\n f1.read_time_start = 123456\n f2.read_time_start = None\n f1.read_time_end = 555\n f2.read_time_end = None\n f1.write_time_start = None\n f2.write_time_start = 111\n f1.write_time_end = None\n f2.write_time_end = 666\n f1.aggregate(f2)\n self.assertEqual(f1.read_time_start, 123456)\n self.assertEqual(f1.read_time_end, 555)\n self.assertEqual(f1.write_time_start, 111)\n self.assertEqual(f1.write_time_end, 666)\n\n # The other equals None\n f1 = DarshanIngestedJobFile(\"job\")\n f2 = DarshanIngestedJobFile(\"job\")\n f1.read_time_start = None\n f2.read_time_start = 456789\n f1.read_time_end = None\n f2.read_time_end = 444\n f1.write_time_start = 222\n f2.write_time_start = None\n f1.write_time_end = 777\n f2.write_time_end = None\n f1.aggregate(f2)\n self.assertEqual(f1.read_time_start, 456789)\n self.assertEqual(f1.read_time_end, 444)\n self.assertEqual(f1.write_time_start, 222)\n self.assertEqual(f1.write_time_end, 777)\n\n # Both equal None\n f1 = DarshanIngestedJobFile(\"job\")\n f2 = DarshanIngestedJobFile(\"job\")\n f1.read_time_start = None\n f2.read_time_start = None\n f1.read_time_end = None\n f2.read_time_end = None\n f1.write_time_start = None\n f2.write_time_start = None\n f1.write_time_end = None\n f2.write_time_end = None\n f1.aggregate(f2)\n self.assertEqual(f1.read_time_start, None)\n self.assertEqual(f1.read_time_end, None)\n self.assertEqual(f1.write_time_start, None)\n self.assertEqual(f1.write_time_end, None)", "def Mode1_2_5(Occupied_Timeline):\n \n \n Timeline_settings = OPT_Config_File.Timeline_settings()\n \n \"Earliest possible date an Operational Science Mode is scheduled\"\n initial_date = ephem.Date(Timeline_settings['start_date'])\n \n \n Occupied_Timeline_values = []\n \n \"Extract all scheduled modes with their scheduled dates and sort them in chronological order. Skip the ones which are empty or entirely scheduled before initial_date\"\n for Occupied_value in Occupied_Timeline.values():\n if( Occupied_value == [] ):\n continue\n \n for date in Occupied_value:\n \n if( date[0] < initial_date and date[1] < initial_date):\n continue\n \n else:\n Occupied_Timeline_values.append(date)\n \n Occupied_Timeline_values.sort()\n \n dates = []\n \n \"The least amount of time that needs to be available for mode1/2 to be scheduled\"\n minDuration = ephem.second*Timeline_settings['Mode1_2_5_minDuration']\n iterations = 0\n \n \"\"\"To fill in modes inbetween already schedueled modes. The amount of iterations is equal to \n the number of modes scheduled plus 1 as there is a possibility for the modes to be scheduled \n at the start and end of the timeline.\"\"\"\n for x in range(len(Occupied_Timeline_values)+1):\n \n ## If Occupied_Timeline_values is empty then just schedule until the end of the timeline\n if( len(Occupied_Timeline_values) == 0 ):\n timeline_end = ephem.Date( ephem.Date(Timeline_settings['start_date'])+ephem.second*Timeline_settings['duration'])\n date = initial_date\n endDate = ephem.Date(timeline_end - ephem.second*Timeline_settings['mode_separation'])\n dates.append( (date, endDate) )\n \n iterations = iterations + 1\n \n ## For first iteration; Check if there is spacing between initial_date and the the first mode running\n elif( x == 0 and Occupied_Timeline_values[0][0] != initial_date):\n time_between_modes = Occupied_Timeline_values[0][0] - initial_date \n if(time_between_modes > minDuration ):\n date = initial_date\n \n endDate = ephem.Date(Occupied_Timeline_values[x][0] - ephem.second*Timeline_settings['mode_separation'])\n dates.append( (date, endDate) )\n iterations = iterations + 1\n \n ## For last iteration; Check if there is spacing in between end of the last mode and the end of the timeline\n elif( x == len(Occupied_Timeline_values) ):\n timeline_end = ephem.Date( ephem.Date(Timeline_settings['start_date'])+ephem.second*Timeline_settings['duration'])\n time_between_modes = timeline_end - Occupied_Timeline_values[-1][1] \n if(time_between_modes > minDuration ):\n date = Occupied_Timeline_values[-1][1]\n endDate = ephem.Date(timeline_end - ephem.second*Timeline_settings['mode_separation'])\n dates.append( (date, endDate) )\n iterations = iterations + 1\n \n ## For all other iterations; Start scheduling Mode1,2,5 inbetween already schedueled modes and CMDs\n elif( x != 0 and x != len(Occupied_Timeline_values) ):\n time_between_modes = Occupied_Timeline_values[x][0] - Occupied_Timeline_values[x-1][1] \n if(time_between_modes > minDuration ):\n date = Occupied_Timeline_values[x-1][1]\n endDate = ephem.Date(Occupied_Timeline_values[x][0] - ephem.second*Timeline_settings['mode_separation'])\n dates.append( (date, endDate) )\n iterations = iterations + 1\n \n \n \n \n \n if( 'Mode1' in Occupied_Timeline ):\n Occupied_Timeline['Mode1'] = dates\n elif( 'Mode2' in Occupied_Timeline ):\n Occupied_Timeline['Mode2'] = dates\n elif( 'Mode5' in Occupied_Timeline ):\n Occupied_Timeline['Mode5'] = dates\n \n Logger.debug('Scheduled Operational Science Mode (date, endDate): '+str(dates))\n comment = 'Number of Modes inserted: ' + str(iterations)\n \n \n return Occupied_Timeline, comment", "def goals_difference_to_time_period(self, team_id, time_period_type='M', time_period_num=1):\n # {{{\n if time_period_type not in ['W', 'M', 'Y', 'S', 'L']:\n time_period_type = 'M'\n if type(time_period_num) is not int or time_period_num == 0:\n time_period_num = 1\n\n if time_period_type in ['W', 'M', 'Y', 'S']:\n goals_scored = np.nan\n goals_conceded = np.nan\n if time_period_type in ['W', 'M', 'Y']:\n matches_containing_team = self.matches[(self.matches[\"HID\"] == team_id) |\n (self.matches[\"AID\"] == team_id)].sort_index()\n if time_period_type == 'W':\n time_period_num *= 7 # week fixed to 7 days\n elif time_period_type == 'M':\n time_period_num *= 30 # month fixed to 30 days\n elif time_period_type == 'Y':\n time_period_num *= 365 # year fixed to 365 days\n\n how_deep_to_past = np.datetime64(self.today) - np.timedelta64(time_period_num, 'D')\n matches_containing_team = matches_containing_team[(matches_containing_team['Date'] >= str(how_deep_to_past))\n & (matches_containing_team['Date'] < self.yesterday)]\n if not matches_containing_team.empty:\n goals_conceded = matches_containing_team[matches_containing_team[\"HID\"] == team_id]['ASC'].sum() + \\\n matches_containing_team[matches_containing_team[\"AID\"] == team_id]['HSC'].sum()\n goals_scored = matches_containing_team[matches_containing_team[\"HID\"] == team_id]['HSC'].sum() + \\\n matches_containing_team[matches_containing_team[\"AID\"] == team_id]['ASC'].sum()\n\n elif time_period_type == 'S':\n # It is assumed that team is already added in DataFrame self.LL_data\n matches_containing_team = self.SL_data.xs(team_id, level='second')[-1-time_period_num:-1]\n if not matches_containing_team.empty:\n goals_conceded = matches_containing_team['SL_Goals_Conceded'].sum()\n goals_scored = matches_containing_team['SL_Goals_Scored'].sum()\n\n return goals_scored - goals_conceded\n elif time_period_type == 'L':\n # It is assumed that team is already added in DataFrame self.LL_data\n return self.LL_data.loc[team_id, 'LL_Goals_Scored'] - self.LL_data.loc[team_id, 'LL_Goals_Conceded']", "def make_all_datetime(self):\n \n logging.info('\\n *** Running make_all_datetime ' )\n\n all_uniques = [] # storing a list with all the unique date_times \n which_k_in_dt = {} # list of avilable dataset for each unique date_time, so that when looping over the distinct date_times, only the proper dataset will be read and compared \n\n \"\"\" Loop over all the datasets \n k: name of the dataset\n v: list of file paths, eg 'era5_1':[filepath_1, filepath_2 ]\"\"\"\n\n for k,v in self.datasets.items() :\n self.unique_dates[k] = {}\n for F in v: \n self.unique_dates[k][F] = {}\n \n self.unique_dates[k][F]['indices'] = {} \n self.unique_dates[k][F]['index_offset_next'] = 0 # to be replaced later when slicing \n self.unique_dates[k][F]['index_offset'] = 0 # to be replaced later when slicing \n\n unique_dt = list(data[k][F]['recordtimestamp'])\n \n indices = list(data[k][F]['recordindex'])\n all_uniques += unique_dt # adding to the total unique date_times \n\n \"\"\" Loop over all the date_times of each dataset \"\"\"\n for dt, index_low, count in zip (unique_dt, indices, range(len(unique_dt)) ):\n\n if dt not in which_k_in_dt.keys():\n which_k_in_dt[dt] = {}\n if k not in which_k_in_dt[dt].keys():\n which_k_in_dt[dt][k] = [] \n if F not in which_k_in_dt[dt][k]:\n which_k_in_dt[dt][k].append(F)\n # at this point I have e.g. which_k_in_dt= {1990-01-01-12-00: {era5_1:[file1,file2] , ncar:[file3] } }\n\n self.unique_dates[k][F]['indices'][dt] = {}\n self.unique_dates[k][F]['indices'][dt]['low'] = index_low \n try:\n index_up = indices[ count + 1 ] # works until the last available recordindex\n except: \n index_up = max(indices)+1000000 # dummy large number \n\n self.unique_dates[k][F]['indices'][dt]['up'] = index_up\n self.unique_dates[k][F]['up_to_dt_slice'] = data[k][F]['min_date'] \n \n\n self.dataset_per_dt = which_k_in_dt \n self.merged_unique_dates = np.unique(np.array(all_uniques) ) # storing the set of *ALL* distinct dt values of all datasets and all files \n logging.debug('*** make_all_datetime finished ')", "def combine_record(self, dt, container = ''):\n \n record_dataset_legth ={} \n other_ds = []\n\n ''' I fill the dic e.g. record_dataset_legth{100:['era5_1','ncar'], 80:['bufr','igra2'] }\n i.e. the keys are the lengths, the entries are the lists of datasets '''\n\n duplicates = []\n\n for k in container.keys(): # loop over the dataset\n if k not in other_ds:\n other_ds.append(k)\n for f in container[k]: # loop over the file per dataset\n num_rec = len(container[k][f]['obs_tab'][\"date_time\"])\n \n \"\"\" Storing all the reports id with the proper prefix (for each different dataset) \"\"\"\n rep_id = b''.join(container[k][f][\"obs_tab\"]['report_id'][0]) \n rep_id = self.observation_ids_merged[k] + rep_id \n duplicates.append( rep_id ) \n \n if num_rec not in record_dataset_legth.keys():\n record_dataset_legth[num_rec] = {}\n record_dataset_legth[num_rec]['best_ds'] = []\n record_dataset_legth[num_rec]['file'] = []\n\n record_dataset_legth[num_rec]['best_ds'].append(k)\n record_dataset_legth[num_rec]['file'].append(f)\n\n max_entries = max(record_dataset_legth.keys())\n \n ''' best_ds is the list of longest datasets, best_datasets the list of all the datasets available including best_ds '''\n best_datasets = record_dataset_legth[max_entries]\n\n \"\"\" Choosing the priority of the datasets:\n - if era5_1 or era5_2 are present, pick them (they cant be both present for the same date_time)\n - else, if igra2 is present, pick it\n - else, one of the remaining ones \"\"\"\n\n if 'era5_2' in best_datasets and 'era5_1' not in best_datasets: # era5_1 and era5_2 should never be both present anyway...\n best_ds = 'era5_2' \n elif 'era5_1' in best_datasets and 'era5_2' not in best_datasets:\n best_ds = 'era5_1'\n elif 'era5_1' not in best_datasets and 'era5_2' not in best_datasets and 'igra2' in best_datasets:\n best_ds = 'igra2'\n elif 'era5_1' not in best_datasets and 'era5_2' not in best_datasets and 'igra2' not in best_datasets:\n best_ds = record_dataset_legth[max_entries]['best_ds'][0] # pick the first of the list \n\n best_file = record_dataset_legth[max_entries]['file'][0]\n\n ''' If more file are available for the same best_ds, pick the first one from the list '''\n selected_obstab, selected_era5fb = container[best_ds][best_file]['obs_tab'] , container[best_ds][best_file]['era5fb_tab']\n\n ''' Creating the correct observations and record ids. \n All the bytes variable are shrunk to a long |S1 byte variable type, otherwise \n writing in h5py will not work. '''\n \n for var in ['observation_id']:\n if type (selected_obstab[var] ) == np.ndarray and type (selected_obstab[var][0] ) == np.bytes_:\n selected_obstab[var] = np.array ([self.observation_ids_merged[best_ds] + b''.join(l) for l in selected_obstab[var] ] )\n elif type (selected_obstab[var] ) == np.ndarray and type (selected_obstab[var][0] ) == np.ndarray:\n selected_obstab[var] = np.array ([self.observation_ids_merged[best_ds] + b''.join(l) for l in selected_obstab[var][:] ] )\n\n for var in ['report_id']:\n val = selected_obstab[var][0]\n if type (selected_obstab[var] ) == np.ndarray and type (val) == np.bytes_:\n value = self.observation_ids_merged[best_ds] + b''.join(val) # it is the same for each row in the table\n elif type (selected_obstab[var] ) == np.ndarray and type (val) == np.ndarray:\n value = self.observation_ids_merged[best_ds] + b''.join(val) \n arr = np.full( (1, len( selected_obstab['date_time']) ) , value )[0] # np.full returns a list of lists\n\n selected_obstab[var] = arr\n\n\n for var in selected_era5fb.keys():\n if type (selected_era5fb[var]) == np.ndarray and type (selected_era5fb[var][0] ) == np.ndarray:\n try:\n selected_era5fb[var] = np.array( [b''.join(l) for l in selected_era5fb[var][:] ] )\n #print('MANAGED FFF', var)\n except:\n value = [b''.join(l) for l in selected_era5fb[var][0] ][0]\n #print('VALUE IS FFF', value)\n selected_era5fb[var] = np.array( (1, len( selected_obstab[var]) ) ).fill(value)\n\n \"\"\" Extracting the header \"\"\"\n selected_head = self.get_header_table(dt, ds = best_ds, File = best_file )\n for var in selected_head.keys():\n if type (selected_head[var] ) == np.ndarray and type (selected_head[var][0] ) == np.bytes_:\n selected_head[var] = np.array( [b''.join(l) for l in selected_head[var][:] ] )\n\n if 'best_ds' == 'era5_1' or best_ds == 'era5_2' :\n selected_obstab['advanced_assimilation_feedback'] = np.array([1]*len(selected_obstab['date_time']) )\n else:\n selected_obstab['advanced_assimilation_feedback'] = np.array([0]*len(selected_obstab['date_time']) )\n\n #best_ds_byte = np.bytes_(best_ds, ndtype = '|S10') # converting to bytes object\n best_ds_byte = np.bytes_(best_ds) # converting to bytes object \n arr = np.full( (1, len( selected_obstab['date_time']) ) , best_ds_byte )[0]\n selected_obstab['source_id'] = arr\n\n duplicate = b','.join(duplicates)\n #selected_head['duplicates'] = np.array(duplicate)\n\n duplicate = np.array(duplicate).astype(dtype='|S70')\n selected_head['duplicates'] = np.array([duplicate])\n selected_head['report_id'] = np.array([selected_obstab['report_id'][0]])\n selected_head['source_id'] = np.array([selected_obstab['source_id'][0]])\n selected_head['record_timestamp'] = np.array([selected_obstab['date_time'][0]])\n\n selected_file = np.bytes_(best_file.split('/')[-1])\n \n return best_ds, selected_obstab, selected_era5fb, selected_head, selected_file, best_file", "def _compute_duration(self):\n diff_float = 0\n for ts_line in self:\n if ts_line.x_start_date:\n st_datetime = fields.Datetime.from_string(\n ts_line.x_start_date)\n # autocomplete date from start date\n st_date_tz = fields.Datetime.context_timestamp(\n self, st_datetime).date()\n ts_line.date = st_date_tz\n\n # autocomplete name from start date\n st_datetime_tz = fields.Datetime.context_timestamp(\n self, st_datetime)\n string_st_dt_tz = fields.Datetime.to_string(st_datetime_tz)\n ts_line.name = ts_line.user_id.name + '/' + string_st_dt_tz\n\n en_datetime = fields.Datetime.from_string(\n ts_line.x_end_date)\n diff = en_datetime - st_datetime\n if(time(1, 00) <= st_datetime.time() <= time(5, 00)):\n if(time(6, 00) <= en_datetime.time() <= time(10, 00)):\n # del 1 hour for breaking lunch\n diff_float = round(diff.total_seconds() / 3600.0, 2)-1\n else:\n diff_float = round(diff.total_seconds() / 3600.0, 2)\n ts_line.unit_amount = diff_float", "def update_completion_time(tests_dataframe):\r\n tests_dataframe['time_test_arrives_lab'] = pd.to_datetime(tests_dataframe['time_test_arrives_lab'])\r\n hours = 5\r\n processing_time = datetime.timedelta(hours = hours)\r\n tests_dataframe['completion_time'] = tests_dataframe['time_test_arrives_lab'] + processing_time\r\n return tests_dataframe", "def merge(df):\n return (df['utterance_t-3'] + df['utterance_t-2'] + df['utterance_t-1'] \\\n + df['utterance_t'])", "def setup_schedule():\n for project in Project.select():\n if (project.schedule_interval is not None) and (project.schedule_interval > 0):\n schedule.add_job(pull_build_project, \"interval\", id=\"building_\" + str(project.id),\n hours=project.schedule_interval,\n args=[project, \"master\"])", "def completeMerge(self):\n #--Remove lists that aren't the sum of at least two esps.\n srcMods = self.srcMods\n for levls in (self.levcs,self.levis):\n for listId in levls.keys():\n if len(srcMods[listId]) < 2 or levls[listId].isDeleted:\n self.records.remove(levls[listId])\n del levls[listId]\n del srcMods[listId]\n #--Log\n log = self.log\n for label, levls in (('Creature',self.levcs), ('Item',self.levis)):\n if not len(levls): continue\n log.setHeader(_('Merged %s Lists:') % (label,))\n for listId in sorted(levls.keys(),key=lambda a: a.lower() ):\n log(listId)\n for mod in srcMods[listId]:\n log(' '+mod)", "def test_issue_add_time(self):\n pass", "def write_tasks_table(self):\n tasks = self._get_all_tasks()\n\n self.tasks_view.setRowCount(len(tasks))\n\n row_counter = 0\n for task in tasks:\n\n end_time = None\n start_time = None\n\n # Convert to display data\n if task.StartDate is not None:\n start_time = Time.date_time_format(int(task.StartDate))\n\n if task.EndDate is not None:\n end_time = Time.date_time_format(int(task.EndDate))\n\n # Project name header\n self.tasks_view.setItem(row_counter, 0, QtGui.QTableWidgetItem(str(task.Name)))\n self.tasks_view.setItem(row_counter, 1, QtGui.QTableWidgetItem(str(start_time)))\n self.tasks_view.setItem(row_counter, 2, QtGui.QTableWidgetItem(str(end_time)))\n self.tasks_view.setItem(row_counter, 4, QtGui.QTableWidgetItem(str(task.Assignee)))\n self.tasks_view.setItem(row_counter, 5, QtGui.QTableWidgetItem(str(self.get_project(task.Project))))\n self.tasks_view.setItem(row_counter, 6, QtGui.QTableWidgetItem(str(task.Description)))\n self.tasks_view.setItem(row_counter, 7, QtGui.QTableWidgetItem(str(task.Id)))\n\n # Status header\n if task.Status is None:\n task.Status = int(0)\n\n if int(task.Status) is 1:\n # TODO need translation\n display_status = \"In Progress\"\n\n elif int(task.Status) is 2:\n display_status = \"Not Started\"\n\n elif int(task.Status) is 3:\n display_status = \"Forecast\"\n\n else:\n # TODO need translation\n display_status = \"Done\"\n\n self.tasks_view.setItem(row_counter, 3, QtGui.QTableWidgetItem(str(display_status)))\n\n if task.Description is not None:\n self.tasks_view.setItem(row_counter, 6, QtGui.QTableWidgetItem(task.Description))\n\n row_counter += 1", "def add_deltas(self, query_data, query_sum):\n if \"__\" in self._delta:\n return self.add_current_month_deltas(query_data, query_sum)\n else:\n return super().add_deltas(query_data, query_sum)", "def __merge(self, year, month, day):\n print 'Merge...'\n logging.info('[merge]->Merge...')\n\n k7dir = self.aodSetting.k7_dir # path.join(baseDir, 'k7')\n mdir = self.aodSetting.merge_dir # path.join(baseDir, 'merge')\n t = datetime.datetime(year, month, day)\n\n stids = self.aodSetting.stations.getstIds()\n\n # Loop - merge k7 files for each station\n for stid in stids:\n stk7dir = path.join(\n k7dir, stid, t.strftime('%Y%m'), t.strftime('%d'))\n if not path.isdir(stk7dir):\n continue\n\n fns = glob.glob(path.join(stk7dir, '*.k7'))\n if len(fns) == 0:\n continue\n\n # check k7 and remove it if empty file\n for fn in fns:\n if path.getsize(fn) == 0:\n print 'Empty K7 [{0}] => {1} '.format(stid, fn)\n logging.info(\n '[merge]->Empty K7 [{0}] => {1}'.format(stid, fn))\n fns.remove(fn)\n\n stmdir = path.join(mdir, stid, t.strftime('%Y%m'))\n if not os.path.exists(stmdir):\n os.makedirs(stmdir)\n\n outfn = path.join(stmdir, stid + '_' +\n t.strftime('%Y%m%d') + '_merge.k7')\n spdata.merge_files(fns, outfn)\n print 'Merge [{0}] => {1}'.format(stid, outfn)\n logging.info('[merge]->Merge [{0}] => {1}'.format(stid, outfn))\n\n print 'Merge Done!'\n logging.info('[merge]->Merge Done!')", "def groupTrajectories(self, dt = 100)->None:#4 * 30)->None:\r\n for i, p1 in enumerate(self._analyzer.activePeople):\r\n for j, p2 in enumerate(self._analyzer.activePeople):\r\n if (i > j) and (p1 not in p2.inGroupWith):\r\n if ((len(p1.coordinates) >= dt) and (len(p2.coordinates) >= dt)):\r\n in_group = True\r\n for k in range(dt):\r\n if ((p1.coordinates[-k] != None) and (p2.coordinates[-k] != None) and (p1.coordinates[-k].DistanceFrom(p2.coordinates[-k]) > self._minDist)):\r\n in_group = False\r\n if in_group:\r\n p1.inGroupWith.append(p2)\r\n p2.inGroupWith.append(p1)", "def merge(self, other: PerfData):\n self.total_samples += other.total_samples\n if self.total_time == 0.0:\n self.total_time = other.total_time\n self.compile_time = max(self.compile_time, other.compile_time)\n self.programming_time = max(\n self.programming_time, other.programming_time\n )\n if self.est_samples_per_sec == 0.0:\n self.est_samples_per_sec = other.est_samples_per_sec\n else:\n assert (\n self.est_samples_per_sec == other.est_samples_per_sec\n ), \"Expected all fabric-based performance estimates to be identical\"\n\n if self.total_time > 0:\n self.samples_per_sec = float(self.total_samples) / self.total_time\n else:\n self.samples_per_sec = 0.0", "def task_summary_dict(request, tasks, fieldlist=None):\n sumd = {}\n numeric_fields_task = ['reqid', 'corecount', 'taskpriority', 'workqueue_id']\n\n if fieldlist:\n flist = fieldlist\n else:\n flist = copy.deepcopy(const.TASK_FIELDS_STANDARD)\n\n for task in tasks:\n for f in flist:\n if 'tasktype' in request.session['requestParams'] and request.session['requestParams']['tasktype'].startswith('analy'):\n # Remove the noisy useless parameters in analysis listings\n if flist in ('reqid', 'stream', 'tag'):\n continue\n\n if 'taskname' in task and len(task['taskname'].split('.')) == 5:\n if f == 'project':\n try:\n if not f in sumd:\n sumd[f] = {}\n project = task['taskname'].split('.')[0]\n if not project in sumd[f]:\n sumd[f][project] = 0\n sumd[f][project] += 1\n except:\n pass\n if f == 'stream':\n try:\n if not f in sumd:\n sumd[f] = {}\n stream = task['taskname'].split('.')[2]\n if not re.match('[0-9]+', stream):\n if not stream in sumd[f]:\n sumd[f][stream] = 0\n sumd[f][stream] += 1\n except:\n pass\n if f == 'tag':\n try:\n if not f in sumd:\n sumd[f] = {}\n tags = task['taskname'].split('.')[4]\n if not tags.startswith('job_'):\n tagl = tags.split('_')\n tag = tagl[-1]\n if not tag in sumd[f]:\n sumd[f][tag] = 0\n sumd[f][tag] += 1\n except:\n pass\n if f in task:\n val = task[f]\n if val is None or val == '':\n val = 'Not specified'\n if val == 'anal':\n val = 'analy'\n if f not in sumd:\n sumd[f] = {}\n if val not in sumd[f]:\n sumd[f][val] = 0\n sumd[f][val] += 1\n\n # convert to ordered lists\n suml = []\n for f in sumd:\n itemd = {}\n itemd['field'] = f\n iteml = []\n kys = sumd[f].keys()\n if f != 'ramcount':\n for ky in kys:\n iteml.append({'kname': ky, 'kvalue': sumd[f][ky]})\n iteml = sorted(iteml, key=lambda x: str(x['kname']).lower())\n else:\n newvalues = {}\n for ky in kys:\n if ky != 'Not specified':\n roundedval = int(ky / 1000)\n else:\n roundedval = -1\n if roundedval in newvalues:\n newvalues[roundedval] += sumd[f][ky]\n else:\n newvalues[roundedval] = sumd[f][ky]\n for ky in newvalues:\n if ky >= 0:\n iteml.append({'kname': str(ky) + '-' + str(ky + 1) + 'GB', 'kvalue': newvalues[ky]})\n else:\n iteml.append({'kname': 'Not specified', 'kvalue': newvalues[ky]})\n iteml = sorted(iteml, key=lambda x: str(x['kname']).lower())\n itemd['list'] = iteml\n suml.append(itemd)\n suml = sorted(suml, key=lambda x: x['field'])\n return suml", "def timeFieldGrp(*args, adjustableColumn: int=0, adjustableColumn2: int=0, adjustableColumn3:\n int=0, adjustableColumn4: int=0, adjustableColumn5: int=0, adjustableColumn6:\n int=0, annotation: Union[AnyStr, bool]=\"\", backgroundColor: Union[List[float,\n float, float], bool]=None, changeCommand: Script=None, columnAlign:\n Union[List[int, AnyStr], List[List[int, AnyStr]]]=None, columnAlign2:\n List[AnyStr, AnyStr]=None, columnAlign3: List[AnyStr, AnyStr, AnyStr]=None,\n columnAlign4: List[AnyStr, AnyStr, AnyStr, AnyStr]=None, columnAlign5:\n List[AnyStr, AnyStr, AnyStr, AnyStr, AnyStr]=None, columnAlign6: List[AnyStr,\n AnyStr, AnyStr, AnyStr, AnyStr, AnyStr]=None, columnAttach: Union[List[int,\n AnyStr, int], List[List[int, AnyStr, int]]]=None, columnAttach2: List[AnyStr,\n AnyStr]=None, columnAttach3: List[AnyStr, AnyStr, AnyStr]=None, columnAttach4:\n List[AnyStr, AnyStr, AnyStr, AnyStr]=None, columnAttach5: List[AnyStr, AnyStr,\n AnyStr, AnyStr, AnyStr]=None, columnAttach6: List[AnyStr, AnyStr, AnyStr,\n AnyStr, AnyStr, AnyStr]=None, columnOffset2: List[int, int]=None,\n columnOffset3: List[int, int, int]=None, columnOffset4: List[int, int, int,\n int]=None, columnOffset5: List[int, int, int, int, int]=None, columnOffset6:\n List[int, int, int, int, int, int]=None, columnWidth: Union[List[int, int],\n List[List[int, int]]]=None, columnWidth1: int=0, columnWidth2: List[int,\n int]=None, columnWidth3: List[int, int, int]=None, columnWidth4: List[int, int,\n int, int]=None, columnWidth5: List[int, int, int, int, int]=None,\n columnWidth6: List[int, int, int, int, int, int]=None, defineTemplate:\n AnyStr=\"\", docTag: Union[AnyStr, bool]=\"\", dragCallback: Script=None,\n dragCommand: Script=None, dropCallback: Script=None, enable: bool=True,\n enable1: bool=True, enable2: bool=True, enable3: bool=True, enable4: bool=True,\n enableBackground: bool=True, enableKeyboardFocus: bool=True, exists: bool=True,\n extraLabel: Union[AnyStr, bool]=\"\", fullPathName: bool=True, height: Union[int,\n bool]=0, highlightColor: Union[List[float, float, float], bool]=None,\n isObscured: bool=True, label: Union[AnyStr, bool]=\"\", manage: bool=True,\n noBackground: bool=True, numberOfFields: int=0, numberOfPopupMenus: bool=True,\n parent: Union[AnyStr, bool]=\"\", popupMenuArray: bool=True, precision: int=0,\n preventOverride: bool=True, rowAttach: Union[List[int, AnyStr, int],\n List[List[int, AnyStr, int]]]=None, statusBarMessage: AnyStr=\"\", step:\n Union[time, bool]=None, useTemplate: AnyStr=\"\", value: Union[List[time, time,\n time, time], bool]=None, value1: Union[time, bool]=None, value2: Union[time,\n bool]=None, value3: Union[time, bool]=None, value4: Union[time, bool]=None,\n visible: bool=True, visibleChangeCommand: Union[Script, bool]=None, width:\n Union[int, bool]=0, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def absulute2relative_time(x): \n if x.viewed:\n x.viewed_reltime=x.viewed_time-x.start\n \n if x.completed:\n x.completed_reltime=x.completed_time-x.start\n \n return x", "def update_timeindex(self, event):\n latest_datetime = self.bars.get_latest_bar_datetime(self.symbol_list[0])\n \n # Update positions\n # ================\n dp = dict((k,v) for k,v in [(s,0) for s in self.symbol_list])\n dp['datetime'] = latest_datetime\n \n for s in self.symbol_list:\n dp[s] = self.current_positions[s]\n \n # Append the current positions\n self.all_positions.append(dp)\n \n # Update holdings\n # ===============\n dh = dict((k,v) for k,v in [(s,0) for s in self.symbol_list])\n dh['datetime'] = latest_datetime\n dh['cash'] = self.current_holdings['cash']\n dh['commission'] = self.current_holdings['commission']\n dh['total'] = self.current_holdings['cash']\n \n for s in self.symbol_list:\n # Approximation to the real value\n market_value = self.current_positions[s] * self.bars.get_latest_bar_value(s, \"close\")\n dh[s] = market_value\n dh['total'] += market_value\n \n # Append the current holdings\n self.all_holdings.append(dh)", "def list_tasks(q = None):\n to = {\"p\":{}, \"v\":{}}\n for k, v in to.items():\n pin = HeaterController.pin_ids[k]\n state = subprocess.check_output([\"gpio\", 'read', pin]).strip()\n to[k][\"state\"] = \"on\" if state==\"0\" else \"off\"\n to[k][\"on_id\"] = \"\"\n to[k][\"on_time\"] = \"\"\n to[k][\"off_id\"] = \"\"\n to[k][\"off_time\"] = \"\"\n\n tasks = []\n if q is None:\n output = subprocess.check_output([\"atq\"])\n else:\n output = subprocess.check_output([\"atq\", \"-q\", q])\n for t in output.split(\"\\n\"):\n m = HeaterController.task_parse.match(t.strip())\n if m is not None:\n task_id = m.group(1)\n task_time = datetime.strptime(m.group(2), r'%a %b %d %H:%M:%S %Y').strftime(r'%y%m%d%H%M')\n q_name = m.group(3)\n tasks.append((task_id, task_time, q_name))\n tasks = sorted(tasks, key=lambda x: x[2] + x[1])\n while len(tasks):\n task_id, task_time, q_name = tasks.pop(0)\n output = subprocess.check_output([\"at\", \"-c\", task_id])\n # get last line of the output\n lines = output.strip().split(\"\\n\")\n # find value of -o parameter that specifies operation\n m = HeaterController.cmd_parse.match(lines[-1].strip())\n if m is not None:\n cmd = m.group(1)\n if cmd == r'on':\n to[q_name][\"on_id\"] = task_id\n to[q_name][\"on_time\"] = task_time\n elif cmd == r'off':\n to[q_name][\"off_id\"] = task_id\n to[q_name][\"off_time\"] = task_time\n else:\n assert False, \"Unexpected value of -o parameter: {}\".format(cmd)\n\n return {\"tasks\":to}", "def update_timeindex(self, event):\n latest_datetime = self.bars.get_latest_bar_datetime(self.symbol_list[0])\n \n # Update positions\n # ================\n dp = dict((k,v) for k,v in [(s,0) for s in self.symbol_list])\n dp['datetime'] = latest_datetime\n \n for s in self.symbol_list:\n dp[s] = self.current_positions[s]\n \n # Append the current positions\n self.all_positions.append(dp)\n \n # Update holdings\n # ===============\n dh = dict((k,v) for k,v in [(s,0) for s in self.symbol_list])\n dh['datetime'] = latest_datetime\n dh['cash'] = self.current_holdings['cash']\n dh['commission'] = self.current_holdings['commission']\n dh['total'] = self.current_holdings['cash']\n \n for s in self.symbol_list:\n # Approximation to the real value\n market_value = self.current_positions[s] * self.bars.get_latest_bar_value(s, \"adj_close\")\n dh[s] = market_value\n dh['total'] += market_value\n \n # Append the current holdings\n self.all_holdings.append(dh)\n print('timeindex: ', dh)", "def __update_current_measure_durations(self, duration: int) -> None:\n total_duration = sum(self.current_measure_durations) + duration\n if total_duration < N_EIGHTHS_PER_MEASURE:\n self.current_measure_durations.append(duration)\n elif total_duration == N_EIGHTHS_PER_MEASURE:\n self.current_measure_durations = []\n else:\n syncopated_duration = total_duration - N_EIGHTHS_PER_MEASURE\n self.current_measure_durations = [syncopated_duration]", "def get_job_metrics_summary_for_task(query):\n metric_list = ['hs06sec', 'gco2_global']\n metrics = {}\n for m in metric_list:\n metrics[m] = {'finished': 0, 'failed': 0, 'total': 0}\n\n hquery = copy.deepcopy(query)\n hquery['jobstatus__in'] = ('finished', 'failed')\n\n if 'jeditaskid' in hquery:\n\n hs06sec_sum = []\n # getting jobs. Can not use the .annotate() as there can be duplicates\n jobs = []\n jvalues = ['pandaid', 'jobstatus', ] + metric_list\n jobs.extend(Jobsarchived4.objects.filter(**hquery).values(*jvalues))\n jobs.extend(Jobsarchived.objects.filter(**hquery).values(*jvalues))\n jobs = drop_duplicates(jobs)\n\n for job in jobs:\n for m in metric_list:\n metrics[m]['total'] += job[m] if m in job and job[m] is not None else 0\n if job['jobstatus'] == 'finished':\n metrics[m]['finished'] += job[m] if m in job and job[m] is not None else 0\n elif job['jobstatus'] == 'failed':\n metrics[m]['failed'] += job[m] if m in job and job[m] is not None else 0\n\n # getting data from ATLARC DB, only hs06s\n pj_models = get_pandajob_arch_models_by_year(query['modificationtime__castdate__range'])\n if len(pj_models) > 0:\n for pjm in pj_models:\n try:\n hs06sec_sum.extend(pjm.objects.filter(**hquery).values('jobstatus').annotate(hs06secsum=Sum('hs06sec')))\n except Exception as ex:\n _logger.exception('Failed to get hs06sec from {} at ATLARC DB:\\n{}'.format(pjm, ex))\n\n if len(hs06sec_sum) > 0:\n for hs in hs06sec_sum:\n metrics['hs06sec']['total'] += hs['hs06secsum'] if hs['hs06secsum'] is not None else 0\n if hs['jobstatus'] == 'finished':\n metrics['hs06sec']['finished'] += hs['hs06secsum'] if hs['hs06secsum'] is not None else 0\n elif hs['jobstatus'] == 'failed':\n metrics['hs06sec']['failed'] += hs['hs06secsum'] if hs['hs06secsum'] is not None else 0\n\n\n return metrics", "def update_streak(self, test_for_success=False):\n\n pb = PushBullet(PUSHBULLET_KEY)\n\n def github():\n \"\"\"Checks if a commit has been made in the last 24 hours.\"\"\"\n try:\n GITHUB_API = \"https://api.github.com/users/{0}/events\".format(self.user.username)\n\n accepted_events = AcceptedEvent.objects.values_list(\"name\", flat=True)\n events = json.loads(requests.get(GITHUB_API).text)\n for event in events:\n # it needs to be either a commit or a pull request\n # it must also be after the last update.\n if event[\"type\"] in accepted_events \\\n and self.date < dateutil.parser.parse(event[\"created_at\"]):\n return True\n else:\n return False\n except:\n return False\n\n def freecodecamp():\n \"\"\"Checks your freecodecamp profile for progress matching today's date.\"\"\"\n try:\n CODECAMP_URL = \"https://www.freecodecamp.com/{0}\".format(self.user.username)\n document = html5lib.parse(requests.get(CODECAMP_URL).text)\n if document.findtext((datetime.now()-timedelta(days=1)).strftime(\"%b %d, %Y\"), default=None) is None:\n return False\n return True\n except:\n return False\n\n def gitlab():\n try:\n repos_endpoint = \"/api/v3/projects\"\n repos = json.loads(requests.get(\n \"{0}{1}?order_by=last_activity_at&private_token={2}\".format(GITLAB_URL, repos_endpoint, GITLAB_KEY)).text)\n\n commits_endpoint = \"/api/v3/projects/{0}/repository/commits\"\n for repo in repos:\n commits = json.loads(requests.get(\n \"{0}{1}?order_by=last_activity_at&private_token={2}\".format(GITLAB_URL,\n commits_endpoint.format(repo[\"id\"]),\n GITLAB_KEY)).text)\n\n # if we get to a repo hasn't been updated in the last 24 hours, return false\n # (they are ordered by latest activity)\n if self.date > dateutil.parser.parse(repo[\"last_activity_at\"]):\n return False\n\n for commit in commits: # if the date is not in the last day, break\n if self.date < dateutil.parser.parse(commit[\"created_at\"]):\n # if we have the right guy, return true\n if commit[\"author_name\"] == self.user.username:\n return True\n else:\n break\n except:\n return False\n\n def session():\n date_from = datetime.now() - timedelta(days=1)\n\n if self.sessions.objects.filter(start__gte=date_from):\n return True\n\n return False\n\n successful = gitlab() or github() or freecodecamp() or session()\n\n if test_for_success is False:\n self.streak += (1*int(successful)*int(self.lost)) # stops you getting more points after losing.\n self.lost = not successful or self.lost # if you lost, it will stay until you open the app.\n self.date = datetime.now()\n if self.lost:\n push = pb.push_link(urlresolvers.resolve(\"codestreak:root\"), \"Your streak is over! Visit the app to reset.\")\n self.save()\n else:\n if successful:\n push = pb.push_note(\"Well done. You made a commit today.\", \":)\")\n else:\n push = pb.push_note(\"You're risking your streak!\", \"It's quite late and you still haven't made a commit. Hurry!\")\n\n return True if successful else False", "def merge_action_stages(env):\n stage_draft = env.ref('mgmtsystem_action.stage_draft')\n stage_open = env.ref('mgmtsystem_action.stage_open')\n stage_close = env.ref('mgmtsystem_action.stage_close')\n\n old_stage_draft_id = env.ref('crm_claim.stage_claim1').id\n old_stage_open_id = env.ref('crm_claim.stage_claim5').id\n old_stage_close_id = env.ref('crm_claim.stage_claim2').id\n\n env['mgmtsystem.action'].search([\n ('stage_id', '=', old_stage_draft_id)\n ]).write({'stage_id': stage_draft.id})\n\n env['mgmtsystem.action'].search([\n ('stage_id', '=', old_stage_open_id)\n ]).write({'stage_id': stage_open.id})\n\n env['mgmtsystem.action'].search([\n ('stage_id', '=', old_stage_close_id)\n ]).write({'stage_id': stage_close.id})\n\n env['mgmtsystem.action.stage'].browse([\n old_stage_draft_id, old_stage_open_id, old_stage_close_id\n ]).unlink()", "def time_contributed(self, start=None, end=None):\n\n sum = 0\n qry = Contribution.query.filter_by(project=self.id)\n\n # apply date filters\n if start:\n qry = qry.filter(Contribution.date >= start)\n if end:\n qry = qry.filter(Contribution.date <= end)\n\n contributions = qry.all()\n for contribution in contributions:\n sum += contribution.time\n return sum", "def aggregate(self, prettify: bool = False):\n openpose_data = self.openpose_data\n densepose_data = self.densepose_data\n openface_data = self.openface_data\n video_data = self.video_data\n\n if not openpose_data:\n log('ERROR', 'Nothing to Aggregate. Use -op -of and -dp to include openpose, openface and densepose data. The use of Openpose data is mandatory.')\n\n if openface_data:\n cleaned_openface = openface_data['cleaned']\n processed_openface = openface_data['processed']\n\n if densepose_data:\n cleaned_densepose = densepose_data['cleaned']\n processed_densepose = densepose_data['processed']\n\n cleaned_openpose = openpose_data['cleaned']\n processed_openpose = openpose_data['processed']\n tasks = cleaned_openpose.keys()\n\n if self.specific_task is not None:\n tasks = [task for task in tasks if str(self.specific_task) in task]\n\n for task in tasks:\n self.reset_files = True\n output_frame_directory = self.group_directory / \\\n FEATURE_AGGREGATE_DIR / task\n makedirs(output_frame_directory, exist_ok=True)\n\n processed_openpose_files = processed_openpose[task]\n\n if self.specific_frame is not None:\n processed_openpose_files = {\n self.specific_frame: processed_openpose_files[self.specific_frame]}\n\n for frame_idx in processed_openpose_files:\n output_frame_file = output_frame_directory / \\\n (\"%.12d\" % frame_idx + '.json')\n aggregate_frame = AggregateFrame(frame_idx)\n\n # OPENPOSE\n if self.verbose:\n print(\"Cleaned OpenPose\")\n self.framework_being_processed = OPENPOSE_KEY\n for camera in cleaned_openpose[task]:\n cleaned_openpose_files = cleaned_openpose[task][camera]\n openpose_clean_frame_data = json.load(\n open(cleaned_openpose_files[frame_idx], 'r'))\n aggregate_frame = self.read_frame_data(aggregate_frame,\n openpose_clean_frame_data,\n camera=camera,\n frame_data_type='raw')\n\n if self.verbose:\n print(\"Processed Openpose\")\n openpose_processed_frame_data = json.load(\n open(processed_openpose_files[frame_idx], 'r'))\n\n aggregate_frame = self.read_frame_data(aggregate_frame,\n openpose_processed_frame_data,\n frame_data_type='processed')\n\n # OPENFACE\n if openface_data:\n if self.verbose:\n print(\"Cleaned OpenFace\")\n self.framework_being_processed = OPENFACE_KEY\n cleaned_task_openface = cleaned_openface[task]\n for camera in cleaned_task_openface:\n cleaned_openface_files = cleaned_task_openface[camera]\n if frame_idx in cleaned_openface_files:\n openface_clean_frame_data = json.load(\n open(cleaned_openface_files[frame_idx], 'r'))\n aggregate_frame = self.read_frame_data(aggregate_frame,\n openface_clean_frame_data,\n camera=camera,\n frame_data_type='raw')\n\n if self.verbose:\n print(\"Processed Openface\")\n\n processed_task_openface = processed_openface[task]\n if frame_idx in processed_task_openface:\n processed_task_frame = processed_task_openface[frame_idx]\n for camera, frame_file in processed_task_frame.items():\n openface_processed_frame_data = json.load(\n open(frame_file, 'r'))\n aggregate_frame = self.read_frame_data(aggregate_frame,\n openface_processed_frame_data,\n camera=camera,\n frame_data_type='processed')\n\n # DENSEPOSE\n if densepose_data:\n if self.verbose:\n print(\"Cleaned Densepose\")\n \n self.framework_being_processed = DENSEPOSE_KEY\n for camera in cleaned_densepose[task]:\n cleaned_densepose_files = cleaned_densepose[task][camera]\n densepose_clean_frame_data = json.load(\n open(cleaned_densepose_files[frame_idx], 'r'))\n aggregate_frame = self.read_frame_data(aggregate_frame,\n densepose_clean_frame_data,\n camera=camera,\n frame_data_type='raw')\n\n if self.verbose:\n print(\"Processed Densepose\")\n \n densepose_processed_frame_data = json.load(\n open(processed_densepose[task][frame_idx], 'r'))\n\n aggregate_frame = self.read_frame_data(aggregate_frame,\n densepose_processed_frame_data,\n frame_data_type='processed')\n\n # VIDEO\n if video_data:\n self.framework_being_processed = OPENCV_KEY\n processed_video_data = video_data['processed']\n if task in processed_video_data:\n processed_video_data_task = processed_video_data[task]\n if frame_idx in processed_video_data_task:\n processed_video_data_frame = processed_video_data_task[frame_idx]\n for camera, frame_file in processed_video_data_frame.items():\n video_data_processed_frame_data = json.load(\n open(frame_file, 'r'))\n aggregate_frame = self.read_frame_data(aggregate_frame,\n video_data_processed_frame_data,\n camera=camera,\n frame_data_type='processed')\n\n self.plot_generator(aggregate_frame, output_frame_directory)\n\n if prettify:\n json.dump(aggregate_frame.to_json(), open(\n output_frame_file, 'w'), indent=2)\n else:\n json.dump(aggregate_frame.to_json(),\n open(output_frame_file, 'w'))\n\n if video_data:\n video_data_heatmaps = video_data['heatmap']\n if task in video_data_heatmaps:\n video_data_heatmaps_task = video_data_heatmaps[task]\n for file_name in video_data_heatmaps_task:\n shutil.copy(file_name, output_frame_directory)", "def award_status_populator():\n award_status_list = funding_data[\"Project Status:\"].unique()\n return [{'label': i, 'value': i} for i in award_status_list]", "def calculate_times(log):\n log['processing_time'] = 0\n log['multitasking'] = 0\n log = log.to_dict('records')\n log = sorted(log, key=lambda x: (x['source'], x['caseid']))\n for _, group in itertools.groupby(log, key=lambda x: (x['source'], x['caseid'])):\n events = list(group)\n events = sorted(events, key=itemgetter('start_timestamp'))\n for i in range(0, len(events)):\n # In one-timestamp approach the first activity of the trace\n # is taken as instantsince there is no previous timestamp\n # to find a range\n dur = (events[i]['end_timestamp'] -\n events[i]['start_timestamp']).total_seconds()\n if i == 0:\n wit = 0\n else:\n wit = (events[i]['start_timestamp'] -\n events[i-1]['end_timestamp']).total_seconds()\n events[i]['waiting_time'] = wit if wit >= 0 else 0\n events[i]['processing_time'] = dur\n return pd.DataFrame.from_dict(log)", "def duration_squash(self, duration_squash):\n\n self._duration_squash = duration_squash", "def update_timeindex(self, event):\r\n\r\n latest_datetime = self.bars.get_latest_bar_datetime(\r\n self.symbol_list[0]\r\n )\r\n\r\n #aggiorna le posizioni\r\n #=====================\r\n dp = dict( (k,v) for k, v in [(s, 0) for s in self.symbol_list] )\r\n dp['datetime'] = latest_datetime\r\n\r\n for s in self.symbol_list:\r\n dp[s] = self.current_positions[s]\r\n\r\n self.all_positions.append(dp)\r\n\r\n dh = dict( (k,v) for k, v in [(s, 0) for s in self.symbol_list] )\r\n dh['datetime'] = latest_datetime\r\n dh['cash'] = self.current_holdings['cash']\r\n dh['total'] = self.current_holdings['cash']\r\n\r\n for s in self.symbol_list:\r\n market_value = self.current_positions[s] * \\\r\n self.bars.get_latest_bar_value(s, \"adj_close\")\r\n dh[s] = market_value\r\n dh['total'] += market_value\r\n\r\n self.all_holdings.append(dh)", "def group_data():\n\n # Merge on Departure.\n\n # Merge on Arrival.\n\n data = pd.read_csv(path + \"/data/public/public_train.csv\")[[\"DateOfDeparture\", \"Arrival\"]]\n data['DateOfDeparture'] = pd.to_datetime(data['DateOfDeparture'])\n\n arrival = join_cleaned_data().\\\n rename(columns={'Date': 'DateOfDeparture', 'Airport': 'Arrival'}).\\\n set_index(\"DateOfDeparture\")\n\n merged_arrv = pd.merge(data, arrival, on=[\"DateOfDeparture\", \"Arrival\"], how=\"left\")\n\n # Rename and drop columns.\n\n merged_arrv.columns = [c + \"_Arrival\" if c not in [\"DateOfDeparture\",\n \"DateOfArrival\",\n \"Arrival\",\n \"WeeksToDeparture\"]\n else c\n for c in merged_arrv.columns]\n print merged_arrv\n merged_arrv = merged_arrv.drop([\"Arrival\"], axis=1)\n\n # Concatenate the two fields.\n # merged_all = pd.concat([merged_arrv, merged_dept], axis=1)\n\n merged_all = merged_arrv.\\\n convert_objects(convert_numeric=True)\n merged_all.to_csv(path + \"/Submission/temperatures.csv\")", "def add_time_features(self, year=False, month=False, week=True, tod=True, dow=True):\n\n var_to_expand = []\n\n if self.preprocessed_data.empty:\n data = self.original_data\n else:\n data = self.preprocessed_data\n\n if year:\n data[\"year\"] = data.index.year\n var_to_expand.append(\"year\")\n if month:\n data[\"month\"] = data.index.month\n var_to_expand.append(\"month\")\n if week:\n data[\"week\"] = data.index.week\n var_to_expand.append(\"week\")\n if tod:\n data[\"tod\"] = data.index.hour\n var_to_expand.append(\"tod\")\n if dow:\n data[\"dow\"] = data.index.weekday\n var_to_expand.append(\"dow\")\n\n # One-hot encode the time features\n for var in var_to_expand:\n \n add_var = pd.get_dummies(data[var], prefix=var, drop_first=True)\n \n # Add all the columns to the model data\n data = data.join(add_var)\n\n # Drop the original column that was expanded\n data.drop(columns=[var], inplace=True)\n\n self.preprocessed_data = data", "def summarise(self, reminder_info):\n return self.time_summariser.summarise(reminder_info)", "def build_schedule(solution, new_examiners, new_students):\n examiners = deepcopy(new_examiners)\n students = deepcopy(new_students)\n\n def student_is_available(target_student, target_time, target_duration):\n \"\"\"\n Checks whether a student is available at a given time for a certain duration\n :param target_student: the student\n :param target_time: the time at which the student should be available\n :param target_duration: the duration during which the student should be available\n :return:\n \"\"\"\n for exam, exam_time in target_student.items():\n if exam_time == -1:\n continue\n\n if target_time <= exam_time < target_time + target_duration + delay:\n return False\n elif exam_time <= target_time < exam_time + durations[exam] + delay:\n return False\n\n return True\n\n def examiner_is_available(target_examiner, target_time):\n \"\"\"\n Checks whether an examiner is available at a given time for his exam's duration\n :param target_examiner: the examiner\n :param target_time: the duration during which the examiner should be available\n :return:\n \"\"\"\n examiner_number, examiner_exams = target_examiner[\"Number\"], target_examiner[\"Exams\"]\n\n for _, exam_time in examiner_exams.items():\n if exam_time == -1:\n continue\n\n if target_time <= exam_time < target_time + durations[examiner_number]:\n return False\n elif exam_time <= target_time < exam_time + durations[examiner_number]:\n return False\n\n return True\n\n examiners_order, *students_orders = solution\n\n for j in examiners_order:\n all_set = False\n t = 0\n while not all_set:\n all_set = [examiners[j][\"Exams\"][i] != -1 for i in range(student_count)] == [True] * student_count\n placed = False\n for student in students_orders[j]:\n if examiners[j][\"Exams\"][student] != -1:\n continue\n\n if student_is_available(students[student], t, durations[j]):\n if examiner_is_available(examiners[j], t):\n placed = True\n students[student][j] = t\n examiners[j][\"Exams\"][student] = t\n break\n\n if not placed:\n t += 1\n else:\n t += durations[j]\n\n return examiners, students", "def extra_tasks_for_today(self):\n localtz = tzlocal()\n datetime_today = datetime.fromtimestamp(rospy.get_rostime().to_sec(), tz=localtz)\n day_today = datetime_today.strftime(\"%A\")\n date_today = datetime_today.date()\n rospy.loginfo('Looking for daily tasks for %s, %s' % (day_today, date_today))\n \n eight_forty_five= time(8,45, tzinfo=localtz)\n eleven_thirty= time(11,30, tzinfo=localtz)\n fourteen_thirty=time(14,30, tzinfo=localtz)\n seventeen_fifteen= time(17,15, tzinfo=localtz)\n past_bedtime = time(23,59, tzinfo=localtz)\n \n # day_end = seventeen_fifteen\n day_end = past_bedtime\n\n\n\n metric_wps=['WayPoint13', 'WayPoint18', 'WayPoint9','WayPoint11','WayPoint5','WayPoint3'] \n object_learn_wps=['WayPoint13', 'WayPoint18', 'WayPoint9', 'WayPoint11'] \n object_search_wps=['WayPoint1', 'WayPoint2', 'WayPoint3']\n door_wps=['WayPoint7', 'WayPoint4']\n \n morning_start = eight_forty_five\n morning_duration = delta_between(eleven_thirty, morning_start)\n \n lunch_start = eleven_thirty\n lunch_duration = delta_between(fourteen_thirty, lunch_start)\n\n afternoon_start = fourteen_thirty\n afternoon_duration = delta_between(day_end, afternoon_start)\n\n tasks = []\n \n #door checks at fixed times (to evaluate system ability to do stuff at corret times)\n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(10,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(13,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(16,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n \n #random tasks\n for i in range(4):\n #morning\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n #lunch (less tasks because we want the robot mostly learning people tracks)\n if i<1:\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n \n #afternoon\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n return tasks", "def _addTiming(self, key, duration):\n pass", "def consolidate(self):\n\t\tprint \"\\tConsolidating breakends\"\n\t\tmap(lambda X: self._consolidateEmptyBreakend(X), self)", "def goalStatus(self, fromdt, todt):\r\n ret = {}\r\n tot = self.totalSaved(fromdt, todt)\r\n\r\n return dict([(g, tot - g.amount) for g in self.goals])" ]
[ "0.52670914", "0.521612", "0.5171499", "0.49770486", "0.49408284", "0.49355707", "0.47276974", "0.47106627", "0.4675955", "0.4652707", "0.46401706", "0.46156195", "0.45833963", "0.4529443", "0.45191568", "0.44983798", "0.44943383", "0.4493547", "0.4472287", "0.44453704", "0.4442126", "0.4402147", "0.4399679", "0.43958047", "0.43868315", "0.4376463", "0.4354266", "0.4350832", "0.4348201", "0.43386894", "0.4337404", "0.43361083", "0.43124256", "0.4302745", "0.4280693", "0.42769805", "0.4275281", "0.4265883", "0.4257348", "0.42377436", "0.4229916", "0.42274445", "0.42267755", "0.42240515", "0.42135024", "0.42097497", "0.4197707", "0.41875094", "0.41875032", "0.4178804", "0.4145356", "0.4144688", "0.41282928", "0.41233966", "0.41203138", "0.41142416", "0.41100687", "0.4097316", "0.40970254", "0.40966478", "0.40882823", "0.40880632", "0.407608", "0.4073354", "0.40729722", "0.40621635", "0.40607977", "0.40594816", "0.4048218", "0.4045775", "0.40419328", "0.403145", "0.40312928", "0.4029028", "0.40288198", "0.40251783", "0.40212828", "0.40175146", "0.4009128", "0.40085834", "0.40071023", "0.40066877", "0.40038955", "0.39997396", "0.3998115", "0.39972618", "0.39970234", "0.39951238", "0.39877945", "0.3984158", "0.39831182", "0.3982259", "0.39806217", "0.3965724", "0.39633942", "0.39585787", "0.3953438", "0.39485675", "0.39480734", "0.39429617" ]
0.6820048
0
Returns isoformat string of beginning of past x day(s). Assumes Europe/Amsterdam locale.
def get_timestamp(self, days=1): offset = datetime.datetime.utcnow().date() - datetime.timedelta(days=days-1) # est = tz.gettz('Europe/Amsterdam') # temporary dirty fix for timezone: timezone = '+02:00' start = datetime.datetime(offset.year, offset.month, offset.day) return start.isoformat() + timezone
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_past_date(self, days):\n past_date = datetime.now() - timedelta(days=days)\n return past_date.isoformat()", "def xlDateISO(xdate):\n # QuantLib doesn't support dates prior to 1901\n # which saves us from dealing with the leap year problem\n if xdate < 367:\n return \"#Date prior to 1901-01-01\"\n \n # python dates are from year zero, excel from 1900\n return date.fromordinal(693594 + int(xdate)).isoformat()", "def iso_date(self):\n return self.strftime(self.FORMAT_PRECISION_DAY)", "def isoformat(self):\n return \"%04d-%02d-%02d\" % (self._year, self._month, self._day)", "def least_current_date():\n # This is not the right way to do it, timezones can change\n # at the time of writing, Baker Island observes UTC-12\n return datetime.now(timezone(timedelta(hours=-12))).strftime(\"%Y-%m-%d\")", "def yesterday_string(fmt='%Y-%m-%d'):\n return (brasilia_time() - pd.Timedelta(days=1)).strftime(fmt)", "def nowISO():\n return dt2ISO(datetime.datetime.utcnow())", "def isoformat(self):\n s = '{0:04}'.format(self._year)\n if self._month:\n s += '-{0:02}'.format(self._month)\n if self._day:\n s += '-{0:02}'.format(self._day)\n return s", "def next_day(isotext):\n as_arrow = arrow.get(isotext)\n return as_arrow.replace(days=+1).isoformat()", "def next_day(isotext):\n as_arrow = arrow.get(isotext)\n return as_arrow.replace(days=+1).isoformat()", "def get_gds_current_date(self, remove_leading_zero='true'):\r\n time_now = datetime.datetime.now().time()\r\n today_2pm = time_now.replace(hour=14, minute=31, second=0, microsecond=0)\r\n if time_now < today_2pm:\r\n gds_date = datetime.datetime.now() - datetime.timedelta(days=int(1))\r\n else:\r\n gds_date = datetime.datetime.now()\r\n\r\n if remove_leading_zero.lower() == 'true':\r\n return str('{dt.day}{dt:%b}'.format(dt=gds_date).upper())\r\n else:\r\n return self._set_gds_date_format(gds_date)", "def format_iso_now():\n return datetime.datetime.utcnow().isoformat()+'Z'", "def format_date_iso(value: int) -> str:\n\n return (datetime(1970, 1, 1) + timedelta(milliseconds=value)).strftime('%Y-%m-%d')", "def isoformat_now():\n return datetime_isoformat(datetime.datetime.utcnow())", "def toisostring(dt):\n return dt.format(ISOFORMAT) + 'Z'", "def getISO8601Start(self):\n return self.iso8601start", "def todaystr():\n today = datetime.datetime.today()\n return f\"{today.year}{today.month:02}{today.day:02}\"", "def iso_year_start(self, iso_year):\n fourth_jan = datetime.date(iso_year, 1, 4)\n delta = datetime.timedelta(fourth_jan.isoweekday() - 1)\n return fourth_jan - delta", "def now_iso():\n return to_iso(now())", "def __call__(self, x: Sequence[datetime]) -> Sequence[str]:\n if self.tz is not None:\n x = [d.astimezone(self.tz) for d in x]\n return [d.strftime(self.fmt) for d in x]", "def standardDate(event):\n startTime = localize(event.start_time)\n if event.end_time:\n endTime = localize(event.end_time)\n if endTime.day == startTime.day:\n event_date = startTime.strftime('%A') + ', ' + startTime.strftime('%B') + ' ' + startTime.strftime('%d').lstrip('0') + ', '\n event_date += startTime.strftime('%I:%M%p').lower() + ' - ' + endTime.strftime('%I:%M%p').lower().lstrip('0')\n else:\n event_date = startTime.strftime('%A') + ', ' + startTime.strftime('%B') + ' ' + startTime.strftime('%d').lstrip('0') + ', ' + startTime.strftime('%I:%M%p').lower().lstrip('0') + ' - '\n event_date += endTime.strftime('%A') + ', ' + startTime.strftime('%B') + ' ' + startTime.strftime('%d').lstrip('0') + ', ' + endTime.strftime('%I:%M%p').lower().lstrip('0')\n else:\n event_date = startTime.strftime('%A') + ', ' + startTime.strftime('%B') + ' ' + startTime.strftime('%d').lstrip('0') + ', '\n event_date += startTime.strftime('%I:%M%p').lower().lstrip('0')\n return event_date", "def calculate_date(x, now):\n\t#now = datetime.datetime.now()\n\tn = int(extract_only_number(x))\n\tif n > 0:\n\t\treturn (now - datetime.timedelta(n)).strftime(\"%d-%m-%Y\")\n\treturn now.strftime(\"%d-%m-%Y\")", "def numeric_date_recover(self):\n \n sp_time_zone, current_datetime = self.setup_datetime() \n converter2sptimezone = current_datetime.astimezone(sp_time_zone)\n \n return converter2sptimezone.strftime('%d-%m-%Y')", "def isoformat(dt):\n return dt.isoformat().replace(\"+00:00\", \"Z\")", "def ISO(self):\n return \"%.4d-%.2d-%.2d %.2d:%.2d:%.2d\" % (\n self._year, self._month, self._day,\n self._hour, self._minute, self._second)", "def formalDateToday():\n return dt.date.today().strftime(\"%B %d, %Y\")", "def find_min_date(self):\n\n to_datetime = lambda x: datetime.datetime.strptime(x, \"%Y-%m-%dT%H:%M:%S\")\n all_dates = [to_datetime(event['start']['dateTime'][:-2]) for event in self.formatted_events]\n self.min_date = min(all_dates).isoformat() + 'Z'", "def date() -> str:\n\n return datetime.strftime(datetime.today(), _fmt)", "def get_date_display(self, context):\n return '{year}/{month}/{day}'.format(\n year=self.get_year(),\n month=self.get_month().zfill(2),\n day=self.get_day().zfill(2))", "def _default_dates():\n today = datetime.now().date()\n five_days_from_now = today + timedelta(days=5)\n # create readable format, as should be input\n # return [today.strftime('%Y-%m-%d'), five_days_from_now.strftime('%Y-%m-%d')]\n return [today, five_days_from_now]", "def easter_date(y):\r\n return dateutil.easter.easter(int(y)).strftime('%Y%m%d')", "def get_n_days_ahead(self, startdate, n, fmt=None):\n return startdate + datetime.timedelta(days=n)", "def get_date():\n return (datetime.now() - TIMEDELTA).isoformat()", "def get_date(self):\n return self.date.strftime(\"%a %x\")", "def get_start_date(self):\n return \"%d%02d\" % (self.year, self.term)", "def get_oldest_article_date():\n\n # date = datetime.datetime.strptime(date, \"%m/%d/%Y\")\n today_date = datetime.date.today()\n last_week = today_date-timedelta(days=2)\n search_date = last_week.isoformat()\n\n return search_date", "def str_day(s):\n # TODO: Fix the -06:00 time zone offset\n if s:\n d = convert_from_iso(s)\n return datetime.datetime.strftime(d, \"%d\").strip(\" \")\n else:\n # Couldn't parse, return original.\n return s", "def nepalinow(format=\"%B %d, %Y, %A\"):\n\treturn to_nepali_datetime(timezone.now()).strftime(format)", "def starting_date(self):\n return datetime.date(2016, 1, 4)", "def get_day_today() -> str:\n day = datetime.now().strftime(\"%w\")\n if day == '0': # Sunday\n return '6'\n elif day == '6': # Saturday\n return '5'\n elif day == '1': # Monday\n return '0'\n elif day == '2': # Tuesday\n return '1'\n elif day == '3': # Wednesday\n return '2'\n elif day == '4': # Thursday\n return '3'\n elif day == '5': # Friday\n return '4'", "def get_day_of_week() -> str:\n return datetime.now(pytz.timezone('US/Eastern')).strftime(\"%a\").lower()", "def day_name(x):\r\n if x==0:\r\n return \"Sunday\"\r\n elif x==1:\r\n return \"Monday\"\r\n elif x==2:\r\n return \"Tuesday\"\r\n elif x==3:\r\n return \"Wednesday\"\r\n elif x==4:\r\n return \"Thursday\"\r\n elif x==5:\r\n return \"Friday\"\r\n elif x==6:\r\n return \"Saturday\"", "def formatISODT(dt):\n\tif dt is None:\n\t\treturn None\n\treturn dt.replace(microsecond=0, tzinfo=None).isoformat()+\"Z\"", "def start_date_text(self):\r\n i18n = self.runtime.service(self, \"i18n\")\r\n _ = i18n.ugettext\r\n strftime = i18n.strftime\r\n\r\n def try_parse_iso_8601(text):\r\n try:\r\n result = Date().from_json(text)\r\n if result is None:\r\n result = text.title()\r\n else:\r\n result = strftime(result, \"SHORT_DATE\")\r\n except ValueError:\r\n result = text.title()\r\n\r\n return result\r\n\r\n if isinstance(self.advertised_start, basestring):\r\n return try_parse_iso_8601(self.advertised_start)\r\n elif self.start_date_is_still_default:\r\n # Translators: TBD stands for 'To Be Determined' and is used when a course\r\n # does not yet have an announced start date.\r\n return _('TBD')\r\n else:\r\n when = self.advertised_start or self.start\r\n return strftime(when, \"SHORT_DATE\")", "def first_month_day():\r\n return datetime.now().replace(day=1).strftime('%d-%m-%Y')", "def ISO8601(self):\n if self.timezoneNaive():\n return \"%0.4d-%0.2d-%0.2dT%0.2d:%0.2d:%0.2d\" % (\n self._year, self._month, self._day,\n self._hour, self._minute, self._second)\n tzoffset = _tzoffset2iso8601zone(_tzoffset(self._tz, self._t))\n return \"%0.4d-%0.2d-%0.2dT%0.2d:%0.2d:%0.2d%s\" % (\n self._year, self._month, self._day,\n self._hour, self._minute, self._second, tzoffset)", "def get_yesterday_label(self):\n return gettext_lazy('Yesterday')", "def generate_dates(self):\r\n\r\n numdays = 20\r\n\r\n base = datetime.datetime.today()\r\n\r\n date_list = [base + datetime.timedelta(days=x) for x in range(numdays)]\r\n\r\n date_str = [x.strftime(\"%d-%m-%Y\") for x in date_list]\r\n\r\n return date_str", "def day_of_week(self) -> str:\n return self.elements[4]", "def format_datestr(v):\n return v.isoformat() + 'Z'", "def largest_current_date():\n # This is not the right way to do it, timezones can change\n # at the time of writing, Samoa observes UTC+14 in Summer\n return datetime.now(timezone(timedelta(hours=14))).strftime(\"%Y-%m-%d\")", "def ingame_formatted(dt: datetime) -> str:\n return dt.strftime(\"%Y - %B\")", "def get_at_as_string(self):\n\n return self.at.strftime(\"%Y-%m-%dT%H:%M:%S.000Z\")", "def day_str(day):\n return str(day) if day >= 10 else '0' + str(day)", "def day_str(day):\n return str(day) if day >= 10 else '0' + str(day)", "def day_str(day):\n return str(day) if day >= 10 else '0' + str(day)", "def last_month_first_day():\r\n return (datetime.now().replace(day=1) + relativedelta(months=-1) + timedelta(days=-1)).strftime(\r\n '%d-%m-%Y')", "def get_day_string(self, date_obj):\n return date_obj.strftime('%A')[:3].upper()", "def get_nicedate(self):\n if self.valid is None:\n return \"(unknown issuance time)\"\n localts = self.valid\n fmt = \"%b %-d, %H:%M UTC\"\n if self.tz is not None:\n localts = self.valid.astimezone(self.tz)\n # A bit of complexity as offices may not implement daylight saving\n if self.z.endswith(\"ST\") and localts.dst():\n localts -= datetime.timedelta(hours=1)\n fmt = \"%b %-d, %-I:%M %p \" + self.z\n return localts.strftime(fmt)", "def utc_today_str():\n return datetime.datetime.strftime(datetime.datetime.utcnow(), \"%Y-%m-%d\")", "def startdate_display(self):\n if self.startdate:\n return self.startdate.strftime(self.format)", "def get_date_from_display(self) -> str:\n return _date(\n self.date_from,\n \"DATETIME_FORMAT\" if self.settings.show_times else \"DATE_FORMAT\"\n )", "def format_iso_date(date, night_date=True):\n if isinstance(date, str):\n date = Time(date, format=\"fits\").datetime\n elif isinstance(date, datetime):\n date = Time(date, format=\"datetime\").datetime\n\n if night_date:\n return (\n date - timedelta(hours=15)\n ).date() # If obs goes up to 15pm it still belongs to day before\n else:\n return date", "def _ten_days_ago() -> str:\n ten_days_ago = gmtime(mktime(gmtime()) - TEN_DAYS_SECONDS)\n\n return strftime(DATE_FORMAT, ten_days_ago)", "def get_quarter_start(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof.replace(month=(asof.month - 1) // 3 * 3 + 1, day=1)", "def get_date_prefix(date, prefix_tmpl=STD_DATE_PREFIX):\n return prefix_tmpl.format(date.year, date.month, date.day)", "def get_date_range(day_positive, days_to_check):\n\t\n\treturn (\"{},{}\".format((day_positive - timedelta(days=days_to_check)).strftime(\"%Y-%m-%dT%H:%M:%SZ\"), day_positive.strftime(\"%Y-%m-%dT%H:%M:%SZ\")))", "def today_string(fmt='%Y-%m-%d'):\n return brasilia_time().strftime(fmt)", "def GetCompactDateString():\r\n utc_time = pytz.UTC.localize(datetime.datetime.utcnow())\r\n pac_time = utc_time.astimezone(PACIFIC)\r\n is_dst = time.localtime().tm_isdst\r\n if is_dst:\r\n return pac_time.strftime(\"%Y%m%d-%Hd%M%S\")\r\n else:\r\n return pac_time.strftime(\"%Y%m%d-%Hs%M%S\")", "def shortDate(self, date):\n return u'%s %02i' % (date.pMonth(), date.day())", "def get_last_seven_days_label(self):\n return gettext_lazy('Last seven days')", "def get_offset():\n offset = datetime.date.today() - start_day\n return int(offset.days) - 4", "def actual_ico_starts_at(uncapped_flatprice: Contract, preico_ends_at):\n return preico_ends_at + 24*3600 + 90", "def date_to_iso8601(date):\n return '%s-%02d-%02d' % (date.year, date.month, date.day)", "def naturalday(value, format='%b %d'):\r\n try:\r\n value = date(value.year, value.month, value.day)\r\n except AttributeError:\r\n # Passed value wasn't date-ish\r\n return value\r\n except (OverflowError, ValueError):\r\n # Date arguments out of range\r\n return value\r\n delta = value - date.today()\r\n if delta.days == 0:\r\n return _('today')\r\n elif delta.days == 1:\r\n return _('tomorrow')\r\n elif delta.days == -1:\r\n return _('yesterday')\r\n return value.strftime(format)", "def to_iso(dt):\n return dt.strftime(ISO_FORMAT)", "def __get_day(self, day, month, year):\n date = self.today.today().replace(day=day, month=month, year=year).date()\n # emoji format for current date\n ret = emoji.emojize(':round_pushpin:') if self.today.date() == date else ''\n\n return ret + str(day)", "def get_today_label(self):\n return gettext_lazy('Today')", "def pretty_date_filter(dt, default=None):\n\n if default is None:\n default = 'just now'\n\n now = datetime.utcnow()\n diff = now - dt\n\n periods = (\n (diff.days / 365, 'year', 'years'),\n (diff.days / 30, 'month', 'months'),\n (diff.days / 7, 'week', 'weeks'),\n (diff.days, 'day', 'days'),\n (diff.seconds / 3600, 'hour', 'hours'),\n (diff.seconds / 60, 'minute', 'minutes'),\n (diff.seconds, 'second', 'seconds'),\n )\n\n for period, singular, plural in periods:\n\n if not period:\n continue\n\n if period == 1:\n return u'%d %s ago' % (period, singular)\n else:\n return u'%d %s ago' % (period, plural)\n\n return default", "def qToday():\n \n return _qDate.todaysDate().ISO()", "def fixstartdate(startdate):\n if not startdate: return \"01/01/2017\"\n s = str(startdate)\n d,m,y = [max(dt,1) for dt in [int(s[6:8]),int(s[4:6]),int(s[:4])]]\n return f\"{d:0>2}/{m:0>2}/{y:0>4}\"", "def _today() -> str:\n return strftime(DATE_FORMAT, gmtime())", "def datetime_to_isoformat(obj: datetime.datetime) -> str:\n return obj.replace(tzinfo=datetime.timezone.utc).isoformat().replace(\"+00:00\", \"Z\")", "def get_date_hour_today() -> str:\n return datetime.now().strftime(\"%Y-%m-%dT%H\")", "def to_iso8601(when):\n return when.strftime(boto.utils.ISO8601)", "def get_current_datetime_string ( ) :\n return get_current_datetime( ).strftime( \"%Y%m%d-%H%M%S\" )", "def day(dt: datetime.datetime) -> str:\n day: str = dt.strftime(\"%A\")\n return day", "def get_prev_weekday(x: Optional[Date] = None) -> Date:\n ## Get the day:\n x = x or get_today()\n\n ## Define the offset:\n offset = max(1, (x.weekday() + 6) % 7 - 3)\n\n ## Compute the day and return:\n return x - TimeDelta(days=offset)", "def utc_iso_now():\n now=time.gmtime()\n fmt=\"%Y-%m-%dT%H:%M:%S\"\n r=time.strftime(fmt, now)\n return (now, r)", "def brasilia_day():\n return (dt.datetime.utcnow() + dt.timedelta(hours=-3)).replace(hour=0, minute=0, second=0, microsecond=0)", "def __str__(self):\n return '{y}-{m:0>2}-{d:0>2}'.format(y=self.year, m=self.month, d=self.day)", "def yesterdayDate(self):\n yesterday = time.time() - 24*3600\n return time.strftime(\"%m/%d/%Y\", time.localtime(yesterday))", "def dayname(self):\n return self.strftime(\"%A\")", "def date_now():\n return datetime.today().strftime('%c')", "def get_previous_byday(self, daystring, startdate, fmt=None):\n # decimal number day of the week we're starting from. %w formats using Sunday as day 0.\n dow_start = int(datetime.datetime.strftime(startdate, '%w'))\n\n # decimal number day of week we're trying to get.\n dow_target = self.weekdays.index(daystring)\n\n days_back = 7 - ((dow_target + (7 - dow_start)) % 7)\n\n res = startdate - datetime.timedelta(days=days_back)\n return res", "def plastic_date():\n return 'Zun, 99 Zun 9999 99:61:61'", "def get_date_today() -> str:\n return datetime.now().strftime(\"%Y-%m-%d\")", "def _FirstSunday(self, dtz): # pylint: disable-msg=C0103,R0201\n return dtz + datetime.timedelta(days=(6-dtz.weekday()))", "def today(self):\n return(datetime.date.today().isoformat())", "def get_date():\n return str(datetime.now()).split(' ')[0]" ]
[ "0.58592373", "0.57152444", "0.57139474", "0.55301887", "0.5470207", "0.5442907", "0.5421798", "0.54160655", "0.53812414", "0.53812414", "0.5367235", "0.5362113", "0.5331076", "0.5325458", "0.5298497", "0.5294768", "0.5270717", "0.5265063", "0.52353704", "0.5232425", "0.52232045", "0.52104664", "0.5192937", "0.51783615", "0.5178192", "0.5149079", "0.5145021", "0.51316077", "0.51109004", "0.51086503", "0.5101139", "0.50857735", "0.50724196", "0.5070349", "0.5056718", "0.5043922", "0.5039766", "0.50283736", "0.501801", "0.5014966", "0.50082177", "0.5003938", "0.50002474", "0.49551395", "0.49372342", "0.4926853", "0.49254793", "0.4921969", "0.49177596", "0.49143204", "0.49015382", "0.4895714", "0.48872745", "0.48729616", "0.48729616", "0.48729616", "0.48696068", "0.48571718", "0.48492262", "0.48482934", "0.48468968", "0.48440135", "0.48439306", "0.48427472", "0.4813266", "0.4812903", "0.481273", "0.4798252", "0.47922385", "0.47915077", "0.47872293", "0.47804943", "0.47776526", "0.47747475", "0.47724652", "0.4772022", "0.47576153", "0.47502372", "0.47488272", "0.47468543", "0.4743204", "0.473619", "0.47356254", "0.47320658", "0.47265875", "0.47263235", "0.47262624", "0.47240707", "0.4723733", "0.4723179", "0.4721236", "0.4714468", "0.4712077", "0.4709197", "0.47082302", "0.4705764", "0.4704979", "0.47006366", "0.4700254", "0.4694301" ]
0.5205546
22
Private utility to get routes from an extended class
def __get_parent_routes(self, router: APIRouter): for route in router.routes: options = {key: getattr(route, key) for key in __router_params__} # inherits child tags if presents if len(options["tags"]) == 0 and self.openapi_tag: options["tags"].append(self.openapi_tag["name"]) self.router.add_api_route(route.path, route.endpoint, **options)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getRoutes(self):\n pass", "def get_routes(self):\n return [\n (route, handler.handler_class, handler.init_kwargs)\n for route, handler in self._routes.iteritems()\n ]", "def get_routes():\n return sum([load_module(m).ROUTES for m in settings.INSTALLED_HANDLERS], []) + ROUTES", "def getPluginRoutes(cls):\n pluginImports = cls.__getPluginImports()\n pluginRoutes = []\n if pluginImports is not None:\n logger = LoggingManager.getInstance().getLogger(cls.__name__)\n for pluginImport in pluginImports:\n logger.debug(\"Importing routes from: %s\" % pluginImport)\n try:\n plugin = importlib.import_module(pluginImport)\n pluginRoutes += plugin.RouteDescriptor.getRoutes()\n except ImportError as ex:\n logger.error(\"Could not import %s: \" % pluginImport)\n logger.error(ex.message)\n except AttributeError as ex:\n logger.error(\"Could not load routes from: %s\" % pluginImport)\n logger.error(ex.message)\n\n return pluginRoutes", "def add_routes(self):\n pass", "def routes(self):\n return self._routes", "def get_routers(self):", "def test_read_namespaced_route(self):\n pass", "def route_methods(self, route):\n return [route.method]", "def static_routes(self):\n return self._static_routes", "def yieldroutes(func):\r\n import inspect # Expensive module. Only import if necessary.\r\n path = '/' + func.__name__.replace('__','/').lstrip('/')\r\n spec = inspect.getargspec(func)\r\n argc = len(spec[0]) - len(spec[3] or [])\r\n path += ('/:%s' * argc) % tuple(spec[0][:argc])\r\n yield path\r\n for arg in spec[0][argc:]:\r\n path += '/:%s' % arg\r\n yield path", "def routes():\n routeList = []\n for profile in globalProfile():\n routeList.append(profile.route)\n return routeList", "def route(self):\n pass", "def _get_route_map(self):\n return self.__route_map", "def _get_static_route_map(self):\n return self.__static_route_map", "def getRoutes(request):\n routes = {\n 'Item list': '/api/v1/items/',\n 'Item details': '/api/v1/item/<int:pk>/',\n\n 'JWT': '/api/v1/users/login/',\n }\n\n return Response(routes)", "def test_list_namespaced_route(self):\n pass", "def route_methods(self, route: web.Route):\n return [route.method]", "def get_handlers():\n handlers = list()\n\n #login\n handlers.append((r'/login', Login))\n handlers.append((r'/logout', Logout))\n\n # main\n handlers.append((r'/', Index))\n\n\n #user\n handlers.extend(get_routes(UserController))\n\n #role\n handlers.extend(get_routes(RoleController))\n\n\n handlers.extend(get_routes(ApiServiceController))\n\n handlers.extend(get_routes(InventarioController))\n\n return handlers", "def __call__(self, req):\n return self._router", "def get_routes():\n output = [f'{\"S. No.\":6}\\t{\"Endpoint\":50}\\t{\"Method\":8}\\n']\n\n for index, rule in enumerate(app.url_map.iter_rules()):\n for i, method in enumerate(rule.methods):\n output.append(f'{index + 1 if i == 0 else \"\":<6}\\t{rule.rule:50}\\t{method:10}')\n\n try:\n output.append(f'\\n{eval(rule.endpoint).__doc__}\\n')\n except NameError:\n output.append('\\n')\n\n return Response('\\n'.join(output), 200, mimetype='text/plain')", "def test_list_route_for_all_namespaces(self):\n pass", "def list_routes():\n import urllib\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = urllib.parse.unquote(\n \"{:35s} {:35s} {}\".format(\n rule.endpoint,\n methods,\n url\n )\n )\n output.append(line)\n\n for line in sorted(output):\n print(line)", "def copy_routes(self, remapping=None):\n if not self.inherit_from:\n raise APIBlueprint.InheritanceError(\n 'Blueprint not properly configured to inherit routes'\n )\n\n parent_blueprint = self.inherit_from\n for rule, view_info in six.iteritems(parent_blueprint.routes_to_views_map):\n view_func = view_info.get('view_func')\n options_dict = view_info.get('options')\n\n if remapping and rule in remapping:\n if remapping[rule]:\n rule = remapping[rule]\n else:\n continue\n\n self.add_url_rule(rule, view_func=view_func, **options_dict)", "def route(self) -> APIRouter:\n return self.router", "def __create_routes__(self):\n self.app.add_url_rule('/', 'main_page', self.main_page)\n self.app.add_url_rule('/day', 'day', self.get_current_iteration, methods=['GET'])\n self.app.add_url_rule('/traders', 'traders', self.register_trader, methods=['POST'])\n self.app.add_url_rule('/traders/<id>', 'trader', self.get_trader_state, methods=['GET'])\n self.app.add_url_rule('/stock/price', 'price', self.get_stock_price, methods=['GET'])\n self.app.add_url_rule('/stock/history', 'history', self.get_stock_price_history, methods=['GET'])\n self.app.add_url_rule('/stock/buy', 'buy', self.place_buy_order, methods=['POST'])\n self.app.add_url_rule('/stock/sell', 'sell', self.place_sell_order, methods=['POST'])\n self.app.add_url_rule('/simulation/step', 'step', self.market_session_step, methods=['POST'])\n self.app.add_url_rule('/simulation/run', 'run', self.run_simulation, methods=['POST'])", "def routes(self) -> List[Tuple[int, bytes]]:\n raise NotImplementedError() # pragma: no cover", "def routes():\n import urllib.request, urllib.parse, urllib.error\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = urllib.parse.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, url))\n output.append(line)\n for line in sorted(output):\n print (line)", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GatewayApiRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def __init__(self):\n super(RouteLayer, self).__init__()\n\n routes = [(\"^/ping\", views.ping),\n (\"^/e(co)?(?P<eco_message>[^$]+)$\", views.echo),\n (\"^/p(iada)?\\s*$\", views.get_piada)]\n\n routes.extend(MediaViews(self).routes)\n routes.extend(StaticViews(self).routes)\n # routes.extend(GroupAdminViews(self).routes)\n\n self.views = [(re.compile(pattern), callback) for pattern, callback in routes]", "def __parse_controller_router(cls):\n router = getattr(cls, Controller.RC_KEY)\n\n dependencies = None\n if hasattr(cls, \"dependencies\"):\n dependencies = deepcopy(cls.dependencies)\n delattr(cls, \"dependencies\")\n\n for route in router.routes:\n # add class dependencies\n if dependencies:\n for depends in dependencies[::-1]:\n route.dependencies.insert(0, depends)\n\n # get the signature of the endpoint function\n signature = inspect.signature(route.endpoint)\n # get the parameters of the endpoint function\n signature_parameters = list(signature.parameters.values())\n\n # replace the class instance with the itself FastApi Dependecy\n signature_parameters[0] = signature_parameters[0].replace(\n default=Depends(cls)\n )\n\n # set self and after it the keyword args\n new_parameters = [signature_parameters[0]] + [\n parameter.replace(kind=inspect.Parameter.KEYWORD_ONLY)\n for parameter in signature_parameters[1:]\n ]\n\n new_signature = signature.replace(parameters=new_parameters)\n setattr(route.endpoint, Controller.SIGNATURE_KEY, new_signature)\n\n return router", "def test_base_route_class(self):\n with mock.patch.object(Route, 'handler_class') as handler:\n errors = Route.check()\n\n self.assertFalse(handler.check.called)\n self.assertEqual(errors, [])", "def routes(methods_filter, route_filter):\n from utils import list_routes\n\n app_routes = list_routes(app, methods_filter, route_filter)\n if app_routes:\n for line in sorted(app_routes):\n print(\"{:8s} {:{width}s} {}\".format(line['method'], line['route'], line['endpoint'],\n width=70 + line['route_expanded_length']))\n else:\n print(\"No route !\")", "def routes(self) -> dict:\n return dict(self._routes)", "def list_routes(app):\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n\n methods = ','.join(rule.methods)\n line = urllib.parse.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, rule))\n output.append(line)\n\n return sorted(output)", "def test_create_route_for_all_namespaces(self):\n pass", "def routes(self, body):\n pass", "def get_view_by_introspector(request, route):\n introspector = request.registry.introspector\n route_intr = introspector.get('routes', route.name)\n\n related_intr = introspector.related(route_intr)\n if related_intr is None:\n return None\n\n for related in related_intr:\n print \"related\", related\n if related.category_name == 'views':\n view_func = related['callable']\n if isinstance(view_func, static_view):\n # Lets skip over static views\n continue\n if related['attr']:\n view_action = \".\".join([view_func.__module__, view_func.__name__, related['attr']])\n else:\n view_action = \".\".join([view_func.__module__, view_func.__name__])\n return view_action", "def get_route(request, config_dict):\n try:\n route = re.search(\"GET (.*) HTTP\", request).group(1)\n except:\n logger.error(\"Not a get request from client %s\" % request)\n raise Exception\n return\n try:\n encoding = re.search(\"Accept-Encoding: (.*)\", request).group(1)\n gzip_flag = (\"gzip\" in encoding)\n except:\n gzip_flag = False\n\n query_url = config_dict['target_url'] + \"/service/publicXMLFeed?command=\"\n routers = route.split(\"/\")\n\n if route == \"/\":\n return [route, \"\", gzip_flag]\n\n short_title = \"\"\n if str(routers[-1]) == 'useShortTitles':\n short_title = API_ENDPOINTS['useShortTitles']\n del routers[-1]\n try:\n query_url = next_xml_url(query_url, API_ENDPOINTS[str(routers[3])],\n routers) + short_title\n return [route, query_url, gzip_flag]\n except Exception as e:\n logger.error(\"Request '%s returned with %s \" % (str(route), e))", "def routers():\n routers = []\n\n for app_controller in __app_controllers__:\n routers.append(app_controller.router())\n\n return routers", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def register(self):\n for _, member in inspect.getmembers(self):\n if isinstance(member, Route):\n member.set_parent(self)\n member.register(self.core)", "def test_get_routes(self):\n routes = self.stop.routes\n self.assertEqual(type(routes), type([]))\n [self.assertEqual(type(i), BusRoute) for i in routes]\n routes[0].__repr__()\n routes[0].__str__()\n routes[0].__unicode__()", "def add_routes(self):\n\n # create a routegroup\n routegroup = MewloRouteGroup('testsite_routegroup')\n # overide the parent import-pack-directory for the urls in this group? if we don't it will use the controller root set in SITE config\n # routegroup.set_controllerroot(pkgdirimp_controllers)\n\n routegroup.append(\n MewloRoute(\n id = 'home',\n path = \"/\",\n controller = MewloController(function='requests.request_home')\n ))\n\n\n routegroup.append(\n MewloRoute(\n id = 'hello',\n path = '/test/hello',\n args = [\n MewloRouteArgString(\n id = 'name',\n required = True,\n help = \"name of person to say hello to\",\n ),\n MewloRouteArgInteger(\n id = 'age',\n required = False,\n help = \"age of person (optional)\",\n defaultval = 44,\n )\n ],\n controller = MewloController(function=\"requests.request_sayhello\"),\n # we can pass in any extra data which will just be part of the route that can be examined post-matching\n extras = { 'stuff': \"whatever we want\" },\n # we can force the route to simulate as if certain url call args were assigned (this works whether there are RouteArgs for these or not; no type checking is performed on them)\n # this could be useful in two scenarios: first, if we initially wrote code to handle an arg and then changed our mind and want to not let user set that arg; second, if we reuse a controller function in different places and simulate dif arg values for each\n forcedargs = { 'sign': u\"aries\" },\n ))\n\n\n\n from controllers import requests\n routegroup.append(\n MewloRoute(\n id = 'article',\n path = '/article',\n args = [\n MewloRouteArgString(\n id = 'title',\n required = False,\n positional = True,\n help = \"title of article to display\",\n )\n ],\n # another way to specify the controller is to pass in the actual function reference (rather than as a string)\n controller = MewloController(function=requests.request_article),\n ))\n\n routegroup.append(\n MewloRoute(\n id = 'help',\n path = '/user/help',\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_help'),\n ))\n routegroup.append(\n MewloRoute(\n id = 'contact',\n path = '/help/contact',\n # we can pass the root pack to the MewloController constructor, which has the benefit of doing the import immediately and raising exception if not found; otherwise the error will come up during preparation\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_contact'),\n ))\n routegroup.append(\n MewloRoute(\n id = 'about',\n path = '/help/about',\n # we can pass the root pack to the MewloController constructor, which has the benefit of doing the import immediately and raising exception if not found; otherwise the error will come up during preparation\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_about'),\n ))\n\n\n #static file server\n if (False):\n routegroup.append(\n MewloRoute_StaticFiles(\n id = 'static_files',\n path = '/static',\n controller = MewloController_StaticFiles(\n sourcepath = '${sitefilepath}/staticfilesource'\n ),\n ))\n\n\n # add routegroup we just created to the site\n self.comp('routemanager').append(routegroup)", "def add_rest_routes(self, route, api=None, pos=0):\n def decorator(cls):\n # parent is the parent class of the relation\n cls_name = cls.__name__.lower()\n #print(cls_name)\n # default REST is the following pattern:\n # (r\"/post/(?P<param1>[^\\/]+)/?(?P<param2>[^\\/]+)?/?(?P<param3>[^\\/]+)?\", PostHandler),\n action=\"\"\n # if cls_name.endswith(\"handler\"):\n # action=action[:-7]\n # else:\n # action = cls_name\n # if route:\n action=route\n\n r=r\"/\"+action+r\"/(?P<param1>[^\\/]+)/?(?P<param2>[^\\/]+)?/?(?P<param3>[^\\/]+)?\"\n if api:\n # render the given api in the route URL\n r=r\"/\"+action+r\"/\"+str(api)+r\"/(?P<param1>[^\\/]+)/?(?P<param2>[^\\/]+)?/?(?P<param3>[^\\/]+)?\"\n \n #print(\"added the following routes: \" + r)\n handlers=getattr(self.__class__, \"handlers\", None)\n handlers.append((r,cls))\n \n # use the positioned handlers\n handlers_tmp=getattr(self.__class__, \"handlers_tmp\", None)\n handlers_tmp.append(((r,cls),pos))\n\n r=r\"/\"+action+r\"/*\"\n #print(\"added the following routes: \" + r)\n handlers.append((r,cls))\n handlers_tmp.append(((r,cls),pos))\n #print(\"handlers: \" + str(self.handlers))\n print(\"ROUTING: added RESTful routes for: \" + cls.__name__ + \" as /\" + action)\n #print(dir())\n return cls\n return decorator", "def normalise(self) -> \"Route\":\n pass", "def _GetPaths(self) -> Dict[str, Dict[Any, Any]]:\n\n # The `Paths Object` `paths` field of the root `OpenAPI Object`.\n paths_obj: DefaultDict[str, Dict[Any, Any]] = collections.defaultdict(dict)\n\n router_methods = self.router.__class__.GetAnnotatedMethods()\n for router_method in router_methods.values():\n # To extract optional path parameters, all the routes associated with this\n # router method must be analysed and grouped.\n ungrouped_routes = []\n for http_method, path, _ in router_method.http_methods:\n path_components = path.split(\"/\")\n # Remove any empty strings from the list of path components.\n path_components = [comp for comp in path_components if comp]\n\n ungrouped_routes.append([http_method] + path_components)\n\n grouped_routes = _GetGroupedRoutes(ungrouped_routes)\n for route_info in grouped_routes:\n # Components (comps) are URL components, including Werkzeug path\n # arguments such as `<client_id>` or `<path:file_path>`.\n route_comps, req_path_param_comps, opt_path_param_comps = route_info\n http_method = route_comps[0]\n path = \"/\".join(route_comps[1:])\n\n # Separate the route parameters into path params, query params and\n # request body params.\n path_params, query_params, body_params = self._SeparateFieldsIntoParams(\n http_method, path, router_method.args_type)\n\n # Separate the path params into required and optional path params.\n # First, extract path param names by normalizing the Werkzeug path arg\n # components to OpenAPI path args and remove the surrounding brackets.\n req_path_param_names = [\n _NormalizePathComponent(comp)[1:-1] for comp in req_path_param_comps\n ]\n opt_path_param_names = [\n _NormalizePathComponent(comp)[1:-1] for comp in opt_path_param_comps\n ]\n req_path_params = []\n opt_path_params = []\n for path_param in path_params:\n path_param_name = casing.SnakeToCamel(path_param.name)\n if path_param_name in req_path_param_names:\n req_path_params.append(path_param)\n elif path_param_name in opt_path_param_names:\n opt_path_params.append(path_param)\n else:\n raise AssertionError(\n f\"Path parameter {path_param_name} was not classified as \"\n f\"required/optional.\")\n\n normalized_path = _NormalizePath(path)\n path_obj = paths_obj[normalized_path]\n path_obj[http_method.lower()] = (\n self._GetOperationDescription(router_method, req_path_params,\n opt_path_params, query_params,\n body_params))\n\n return paths_obj", "def get_fab_url_classes():\n urls=()\n fab_classes={}\n for name, f in inspect.getmembers(fabfile):\n if inspect.isfunction(f):\n urls += ('/'+name, name)\n fns={}\n fns[name]=staticmethod(f)\n fns['GET']=GET\n fns['POST']=POST\n fab_classes[name]=type(name, (object,), fns)\n return (urls,fab_classes)", "def routes_info():\n routes = []\n for rule in app.url_map.iter_rules():\n try:\n if rule.endpoint != 'static':\n if hasattr(app.view_functions[rule.endpoint], 'import_name'):\n import_name = app.view_functions[rule.endpoint].import_name\n obj = import_string(import_name)\n routes.append({rule.rule: \"%s\\n%s\" % (\",\".join(list(rule.methods)), obj.__doc__)})\n else:\n routes.append({rule.rule: app.view_functions[rule.endpoint].__doc__})\n except Exception as exc:\n routes.append({rule.rule: \n \"(%s) INVALID ROUTE DEFINITION!!!\" % rule.endpoint})\n route_info = \"%s => %s\" % (rule.rule, rule.endpoint)\n app.logger.error(\"Invalid route: %s\" % route_info, exc_info=True)\n # func_list[rule.rule] = obj.__doc__\n\n return jsonify(code=200, data=routes)", "def urlpatterns(self) -> list:\n raise NotImplementedError()", "def available_routes():\r\n return (\r\n f\"Available Routes:<br/>\"\r\n f\"/api/v1.0/precipitation<br/>\"\r\n f\"/api/v1.0/stations<br/>\"\r\n f\"/api/v1.0/tobs<br/>\"\r\n f\"/api/v1.0/start<br/>\"\r\n f\"/api/v1.0/start/end\"\r\n )", "def test_create_namespaced_route(self):\n pass", "def get_children(self):\n return self._routes.values()", "def test_patch_namespaced_route(self):\n pass", "def test_subclasses(self):\n subclasses = Route.get_subclasses()\n self.assertIn(RouteSubclass, subclasses)", "def get_routes(cohesity_client):\n routes = cohesity_client.routes.get_routes() or []\n for route in routes:\n exported_res_dict[\"Routes\"].append(route.iface_group_name)\n return routes", "def test_nested_subclasses(self):\n subclasses = Route.get_subclasses()\n self.assertIn(NestedRouteSubclass, subclasses)", "def add_routes_hook(map, *args, **kwargs):\n map.connect('/dex/media/*path', controller='dex', action='media')\n map.connect('/dex/lookup', controller='dex', action='lookup')\n map.connect('/dex/suggest', controller='dex', action='suggest')\n map.connect('/dex/parse_size', controller='dex', action='parse_size')\n\n # These are more specific than the general pages below, so must be first\n map.connect('/dex/moves/search', controller='dex_search', action='move_search')\n map.connect('/dex/pokemon/search', controller='dex_search', action='pokemon_search')\n\n map.connect('/dex/abilities/{name}', controller='dex', action='abilities')\n map.connect('/dex/items/{pocket}', controller='dex', action='item_pockets')\n map.connect('/dex/items/{pocket}/{name}', controller='dex', action='items')\n map.connect('/dex/locations/{name}', controller='dex', action='locations')\n map.connect('/dex/moves/{name}', controller='dex', action='moves')\n map.connect('/dex/natures/{name}', controller='dex', action='natures')\n map.connect('/dex/pokemon/{name}', controller='dex', action='pokemon')\n map.connect('/dex/pokemon/{name}/flavor', controller='dex', action='pokemon_flavor')\n map.connect('/dex/pokemon/{name}/locations', controller='dex', action='pokemon_locations')\n map.connect('/dex/types/{name}', controller='dex', action='types')\n\n map.connect('/dex/abilities', controller='dex', action='abilities_list')\n map.connect('/dex/items', controller='dex', action='items_list')\n map.connect('/dex/natures', controller='dex', action='natures_list')\n map.connect('/dex/moves', controller='dex', action='moves_list')\n map.connect('/dex/pokemon', controller='dex', action='pokemon_list')\n map.connect('/dex/types', controller='dex', action='types_list')\n\n map.connect('/dex/gadgets/compare_pokemon', controller='dex_gadgets', action='compare_pokemon')\n map.connect('/dex/gadgets/pokeballs', controller='dex_gadgets', action='capture_rate')\n map.connect('/dex/gadgets/stat_calculator', controller='dex_gadgets', action='stat_calculator')\n map.connect('/dex/gadgets/whos_that_pokemon', controller='dex_gadgets', action='whos_that_pokemon')\n\n # JSON API\n map.connect('/dex/api/pokemon', controller='dex_api', action='pokemon')", "def add_routes(self, mapper):\n pass", "def add_routes(app: web.Application):\n ActionsView.register_view(app)\n PingView.register_view(app)\n CoreShutdownView.register_view(app)\n CoreRestartView.register_view(app)\n ReloadConfigView.register_view(app)\n ListItemsView.register_view(app)\n GetItemView.register_view(app)\n ItemStatesView.register_view(app)\n ItemStateView.register_view(app)\n ActionsView.register_view(app)\n ExecuteActionView.register_view(app)\n ListModulesView.register_view(app)", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualHubRouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualHubRouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def get_route(self, endpoint: str):\n for route in self.tree_routes:\n if route.get_endpoint_name() == endpoint:\n return route\n\n return None", "def base_urls(self):\n # Due to the way Django parses URLs, ``get_multiple`` won't work without\n # a trailing slash.\n return [\n url(r\"^(?P<resource_name>%s)%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name=\"api_dispatch_list\"),\n url(r\"^(?P<resource_name>%s)/schema%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name=\"api_get_schema\"),\n url(r\"^(?P<resource_name>%s)/set/(?P<slug_list>[\\w\\d_-]+)/$\" % self._meta.resource_name, self.wrap_view('get_multiple'), name=\"api_get_multiple\"),\n url(r\"^(?P<resource_name>%s)/(?P<slug>[\\w\\d_-]+)%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_detail'), name=\"api_dispatch_detail\"),\n ]", "def API(root, **routes):\n\n # this creats a dict of properties that create Senders for\n # all the friendly name --> suffix combidantions passed in\n props = {'root': Route(root)}\n for (short, path) in routes.iteritems():\n props[short] = lambda self, content: self.sender.send_path(path, content)\n\n return type('API', (GenericAPI,), props)", "def route(self, routing_url: str, methods: typing.Iterable[str] = (\"GET\",)):\n\n def _inner(func: callable):\n route = self.wrap_route(func)\n self.add_route(route, routing_url, methods)\n return route\n\n return _inner", "def initialize_routes(app):\n # Authentification \n app.add_resource(auth.LoginApi, '/auth/login')\n app.add_resource(auth.SignupApi, '/auth/SignupApi')\n # Intialisation et activation d'un parking\n app.add_resource(parkingInit.InitilizeAParking, '/administrate/add')\n app.add_resource(parkingInit.ActivateParking, '/administrate/activate')\n app.add_resource(parkingInit.InitilizeAParking, '/administrate/getall', endpoint='getall')\n # Gestion de Clients\n app.add_resource(useresResources.GestionUstilisateurs, '/administrate/usesrs/get')\n app.add_resource(useresResources.GestionUstilisateurs, '/administrate/usesrs/getById/<int:idUser>', endpoint='get_by_id')\n # statistiques financéres\n app.add_resource(stats.Money, '/administrate/finance/monthly', endpoint='monthly')\n app.add_resource(stats.Money, '/administrate/finance/yearly', endpoint='yearly')", "def get_routing_methods(self):\r\n svc = self.client['Network_Application_Delivery_Controller_'\r\n 'LoadBalancer_Routing_Method']\r\n return svc.getAllObjects()", "def routes(self) -> pulumi.Output[Sequence['outputs.RouteTableRoute']]:\n return pulumi.get(self, \"routes\")", "def test_replace_namespaced_route(self):\n pass", "def list_routes():\n for rule in sorted(application.url_map.iter_rules(), key=lambda r: r.rule):\n print(\"{:10} {}\".format(\", \".join(rule.methods - set(['OPTIONS', 'HEAD'])), rule.rule))", "def routes(self):\n if self.in_build():\n raise BuildTimeVariableAccessException(\n 'Routes are not available during the build phase.'\n )\n if not self._routesDef:\n raise NotValidPlatformException(\n 'No routes are defined. Are you sure you are running on Platform.sh?'\n )\n return self._routesDef", "def build_routes(app):\n app.register_blueprint(workflow_plans_blueprint)\n app.register_blueprint(cache_blueprint)\n app.register_blueprint(config_blueprint)\n app.register_blueprint(dataset_blueprint)\n app.register_blueprint(graph_blueprint)\n app.register_blueprint(jobs_blueprint)\n app.register_blueprint(project_blueprint)\n app.register_blueprint(templates_blueprint)\n app.register_blueprint(version_blueprint)\n app.register_blueprint(apispec_blueprint)\n app.register_blueprint(versions_list_blueprint)", "def path_entries(self):", "def add_routes(self):\n# from server.flask import views as flask_views\n# flask_views_custom_methods = filter(lambda x: x.startswith(\"view_\"), dir(flask_views))\n# for custom_method in flask_views_custom_methods:\n# # Retrieve data needed to add the URL rule to the Flask app\n# view_method = getattr(locals()[\"flask_views\"], custom_method)\n# docstring = getattr(view_method, \"__doc__\")\n# index_start = docstring.index(\"@app.route\")\n# index_end = index_start + len(\"@app.route\") + 1\n# custom_method_url = docstring[index_end:].replace(\" \",\"\").replace(\"\\n\",\"\")\n# # Get: (a) method URL to bind flask app, (b), method name, (c) method object to invoke\n# self._app.add_url_rule(custom_method_url, custom_method, view_func=view_method(self._app.mongo))\n self._app.register_blueprint(ro_flask_views)", "def create_routes():\n app_dir = os.path.dirname(os.path.abspath(__file__))\n controller_dir = os.path.join(app_dir, \"controllers\")\n routes = Mapper(directory=controller_dir)\n routes.connect(\"/\", controller=\"root\", action=\"index\")\n routes.connect(\"/body\", controller=\"root\", action=\"body\")\n routes.connect(\"/raise_exception\", controller=\"root\", action=\"raise_exception\")\n routes.connect(\"/raise_wrong_code\", controller=\"root\", action=\"raise_wrong_code\")\n routes.connect(\"/raise_custom_code\", controller=\"root\", action=\"raise_custom_code\")\n routes.connect(\"/raise_code_method\", controller=\"root\", action=\"raise_code_method\")\n routes.connect(\"/render\", controller=\"root\", action=\"render\")\n routes.connect(\"/path-params/{year:\\d+}/{month}/\", controller=\"root\", action=\"path_params\") # noqa: W605\n routes.connect(\"/render_exception\", controller=\"root\", action=\"render_exception\")\n routes.connect(\"/response_headers\", controller=\"root\", action=\"response_headers\")\n routes.connect(\"/identify\", controller=\"root\", action=\"identify\")\n return routes", "def test_custom_route(self):\n\n # Create a human object\n Human.create(id=1, name='John')\n Dog.create(id=5, name='Johnny', owner='John')\n\n # Get the custom route\n rv = self.client.get('/humans/1/my_dogs')\n assert rv.status_code == 200\n assert rv.json['total'] == 1\n assert rv.json['dogs'][0] == {'age': 5, 'id': 5, 'name': 'Johnny', 'owner': 'John'}", "def initialize_routes(api):\n api.add_resource(WatchlistsApi, '/api/watchlists')\n api.add_resource(WatchlistApi, '/api/watchlist/<id>')\n api.add_resource(RegisterUserApi, '/api/auth/register')\n api.add_resource(LoginUserApi, '/api/auth/login')\n api.add_resource(ResetPassword, '/api/auth/reset')\n api.add_resource(ResetFogottenPassword, '/api/auth/reset/password')\n api.add_resource(ForgotPassword, '/api/auth/forgot')\n api.add_resource(ForgotPasswordReset, '/reset/password/<token>')\n api.add_resource(Home, '/')\n api.add_resource(Logout, '/logout')\n api.add_resource(Dashboard, '/dashboard')\n api.add_resource(DashboardSearch, '/dashboard/search')\n api.add_resource(SearchMovies, '/search/movies/<title>')\n api.add_resource(SearchMovieDetails, '/search/movie/details/<id>')\n api.add_resource(SearchTvShows, '/search/shows/<title>')\n api.add_resource(SearchShowDetails, '/search/show/details/<id>')\n api.add_resource(SearchTrendingMovies, '/search/trending/movies')\n api.add_resource(Recommend, '/recommend')", "def __repr__(self):\n return '<Route {}>'.format(self.name)", "def create_routes(self):\r\n self._app.route('/api/autoconf',\r\n methods=['GET'],\r\n endpoint='api_autoconf')(self.entrypoint)\r\n self._app.route('/api/autoconf/<string:session_id>',\r\n methods=['GET', 'POST', 'DELETE'],\r\n endpoint='api_autoconf_status')(self.entrypoint)\r\n self._app.route('/api/autoconf/rgc',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_rgc')(self.entrypoint)\r\n self._app.route('/api/autoconf/pd',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_pd')(self.entrypoint)", "def route( request, c ):", "def show_routes(self):\n routelist= [(handler.regex.pattern, handler.handler_class) for handler in self.handlers[0][1]]\n print(55*\"-\")\n print(\" Routing table (order matters) :\")\n print(55*\"-\")\n for elem in routelist:\n print('{0:<20} {1:<30} '.format(elem[0], str(elem[1])))", "def _get_app_endpoints():\n endpoints = {\n (r'/', handlers.HeartbeatRequestHandler),\n (r'/1/issue/retrieve', handlers.RetrieveRequestHandler),\n (r'/1/issue/search', handlers.SearchRequestHandler),\n (r'/1/issue/search/setup', handlers.SearchSetupRequestHandler),\n }\n\n log(\"Endpoint to handler mappings:\")\n for url, handler in sorted(endpoints, key=lambda ep: ep[0]):\n log(\"{0} ---> {1}\".format(url, handler))\n\n return endpoints", "def route(self, method, pattern, handler):\n pass", "def find(self, route):\n curr = self.root\n for part in route:\n if part not in curr.children:\n return None\n curr = curr.children[part]\n return curr.handler", "async def get_routes(self) -> Sequence[str]:\n results = []\n storage: BaseStorage = await self._context.inject(BaseStorage)\n async for record in storage.search_records(\n self.RECORD_TYPE, {\"to\": self._sender_verkey}\n ):\n results.append(record.value)\n return results", "def _url_map(self):\n return Map([\n Rule('/init', endpoint='init'),\n Rule('/op/<name>', endpoint='op'),\n Rule('/handler/<name>', endpoint='handler'),\n Rule('/hook/<name>', endpoint='hook'),\n Rule('/provider/<name>/<action>', endpoint='provider'),\n Rule('/timer/<name>', endpoint='timer'),\n ])", "def get_endpoints(self, request):\n enumerator = self.endpoint_enumerator_class(\n self._gen.patterns, self._gen.urlconf, request=request)\n endpoints = enumerator.get_api_endpoints()\n view_paths = defaultdict(list)\n view_cls = {}\n for path, method, callback, decorators in reversed(endpoints):\n view = self.create_view(callback, method, request)\n path = self._gen.coerce_path(path, method, view)\n view_paths[path].append((method, view, decorators))\n view_cls[path] = callback.cls\n return {path: (view_cls[path], methods)\n for path, methods in view_paths.items()}", "def print_routes(self):\n\n for route in self.app.router.routes():\n route_info = route.get_info()\n if \"formatter\" in route_info:\n url = route_info[\"formatter\"]\n elif \"path\" in route_info:\n url = route_info[\"path\"]\n elif \"prefix\" in route_info:\n url = route_info[\"prefix\"]\n else:\n url = \"Unknown type of route %s\" % route_info\n\n self.logger.info(\"Route has been setup %s at %s\", route.method, url)", "def test_get_handler(self):\n class DummyHandler(handlers.BaseHandler):\n pass\n\n route = RouteFactory.build()\n route.handler_class = DummyHandler\n\n handler = route.get_handler()\n self.assertIsInstance(handler, DummyHandler)\n self.assertEqual(handler.route, route)", "def get_routes():\n\n return Db().get_line_ids()", "def test_path(self):\n base_handler_path = 'conman.routes.handlers.BaseHandler'\n self.assertEqual(BaseHandler.path(), base_handler_path)", "def routes(self) -> pulumi.Output[Sequence['outputs.VirtualHubRouteTableRoute']]:\n return pulumi.get(self, \"routes\")", "def _get_route_reflector_client(self):\n return self.__route_reflector_client", "def get_api(self):\n from geoffrey.utils import get_api\n return get_api(self.app.routes, prefix=\"/\")", "def get_routes(self, ApiId: str, MaxResults: str = None, NextToken: str = None) -> Dict:\n pass", "def getURLs():", "def route(self):\n # TODO: wenn keine url, herausfinden, welche ????\n # TODO: wenn url = hostname (fqdn), dann -> google.ch\n if not (self.META.has_key('REMOTE_ADDR') and \n self.GET.has_key('provider')):\n #self.GET.has_key('url')):\n #return HttpResponseRedirect('/index.php')\n # TODO: Auf die Fehlerseite Link zu back.php\n return render_to_response('error.htm', {\n 'error': \"Falsche Parameter auf route.php\",\n })\n src_ip = self.META['REMOTE_ADDR']\n prov = self.GET['provider']\n url = \"http://www.google.ch\"\n if self.GET.has_key('url'):\n url = self.GET['url']\n # Add and save new route\n add_active_route(src_ip = src_ip, prov = prov)\n return HttpResponseRedirect(url)", "def routes_available():\n return json.dumps(\n [\"%s\" % rule for rule in app.url_map.iter_rules()],\n indent=4,\n separators=(\",\", \": \"),\n )" ]
[ "0.7202516", "0.67574275", "0.6573126", "0.6540048", "0.62880886", "0.62165105", "0.6130403", "0.6115117", "0.6013663", "0.60105985", "0.5981652", "0.59521013", "0.593293", "0.59178156", "0.59062076", "0.58861256", "0.5860622", "0.57698715", "0.57652", "0.5759567", "0.57402325", "0.5714109", "0.5687467", "0.56682754", "0.5654816", "0.5645261", "0.560027", "0.5563639", "0.5544439", "0.5537411", "0.5533726", "0.5511044", "0.5481065", "0.5473545", "0.5471121", "0.5469058", "0.54644585", "0.5456891", "0.54409975", "0.54388356", "0.54298323", "0.54298323", "0.5429133", "0.5426095", "0.5410618", "0.5400181", "0.539919", "0.5397143", "0.53859365", "0.53818715", "0.53770447", "0.537591", "0.53620386", "0.53470737", "0.53253347", "0.53233135", "0.5309212", "0.528658", "0.52707434", "0.5259004", "0.5251795", "0.52490205", "0.52490205", "0.5242764", "0.52353454", "0.5217676", "0.5212461", "0.5209176", "0.5192118", "0.51919675", "0.5177339", "0.51748705", "0.5171279", "0.51706666", "0.5168391", "0.51678026", "0.5165113", "0.51628685", "0.51608616", "0.51541847", "0.51254374", "0.51243675", "0.51172674", "0.5107846", "0.510464", "0.50930554", "0.5092667", "0.5087092", "0.5081203", "0.50769734", "0.50759846", "0.50736785", "0.5065551", "0.5032397", "0.50290394", "0.502706", "0.5015663", "0.50074774", "0.50069547", "0.5005883" ]
0.5733189
21
Mark a class as Controller Resource
def add_resource(self, cls): # check if the same controller was already used for another cls (Resource) if ( hasattr(self, Controller.RESOURCE_CLASS_KEY) and getattr(self, Controller.RESOURCE_CLASS_KEY) != cls ): raise MultipleResourceException() # check if cls (Resource) was exteded from another if hasattr(cls, Controller.RC_KEY): self.__get_parent_routes(cls.__router__) setattr(cls, Controller.RC_KEY, self.router) setattr(self, Controller.RESOURCE_CLASS_KEY, cls) cls.router = lambda: Controller.__parse_controller_router(cls) return cls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_resource():\n return wsgi.Resource(Controller())", "def setController(self, controller):\n self.__controller = controller", "def __init__(self, controller):\n self._controller = controller", "def resource(self, prefix):\n def wrapper(cls):\n # Save the original init\n clsinit = getattr(cls, '__init__', lambda self: None)\n\n # Dirty trick, make the class belong to the type restful.Resource\n cls = type(cls.__name__, (Resource,), dict(cls.__dict__))\n\n aliases = getattr(cls, 'aliases', None)\n if isinstance(aliases, dict) and len(aliases) > 0:\n cls.preparer = FieldsPreparer(fields=aliases)\n\n # Rename self for using inside __init__\n api = self\n\n def __init__(self, *args, **kwargs):\n # Call Resource constructor\n super(cls, self).__init__(api)\n\n # Initialize the instance\n clsinit(self, *args, **kwargs)\n\n cls.__init__ = __init__\n\n # Add the resource to the API\n cls.add_url_rules(self.app, prefix)\n\n return cls\n\n return wrapper", "def resource(self, resource):\n self._resource = resource", "def resource(self, resource):\n self._resource = resource", "def resource(self, resource):\n\n self._resource = resource", "def create_resource():\n return wsgi.Resource(WorkersController())", "def setController_( self, Controller ):\n\t\ttry:\n\t\t\tself._controller = Controller\n\t\texcept Exception as e:\n\t\t\tself.logToConsole( \"setController_: %s\" % str(e) )", "def expose(self, model, route='/api', access_control=None, resource_class=Resource, **kwargs):\n endpoint_path = route + '/' + inflection.pluralize(inflection.underscore(model.__name__))\n endpoint = endpoint_path\n resource = Resource(model=model, access_control=access_control)\n self._add_api_method(endpoint_path, resource.list_,\n methods=['GET'], endpoint=endpoint + '/list')\n self._add_api_method('%s/<id>' % endpoint_path, resource.get_,\n methods=['GET'], endpoint=endpoint + '/get')\n\n self._add_api_method(endpoint_path, resource.put_,\n methods=['PUT'], endpoint=endpoint + '/put')\n\n self._add_api_method('%s/<id>' % endpoint_path, resource.delete_,\n methods=['DELETE'], endpoint=endpoint + '/delete')\n\n self._add_api_method(endpoint_path, resource.post_,\n methods=['POST'], endpoint=endpoint + 'post')\n\n self._add_api_method('%s/<id>' % endpoint_path, resource.patch_,\n methods=['PATCH'], endpoint=endpoint + 'patch')", "def create_controller(self, typ):\n return self.controller_objects[typ]()", "def add_resource(self, cls, url, **kwargs):\n methods = []\n callmap = {}\n # Create instance of resource handler, if passed as just class (not instance)\n try:\n obj = cls()\n except TypeError:\n obj = cls\n # Get all implemented HTTP methods and make callmap\n for m in ['GET', 'POST', 'PUT', 'PATCH', 'DELETE']:\n fn = m.lower()\n if hasattr(obj, fn):\n methods.append(m)\n callmap[m.encode()] = (getattr(obj, fn), kwargs)\n self.add_route(url, restful_resource_handler,\n methods=methods,\n save_headers=['Content-Length', 'Content-Type'],\n _callmap=callmap)", "def use(_):\n\n def wrapper(cls):\n __app_controllers__.append(cls)\n return cls\n\n return wrapper", "def create_controller() -> Controller:\n _controller = Controller()\n return _controller", "def create_resource():\n #deserializer = ImageDeserializer()\n #serializer = ImageSerializer()\n return wsgi.Resource(Controller())", "def _get_controller(self):\n return self.__controller", "def _set_controller(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"controller_name\",controller.controller, yang_name=\"controller\", rest_name=\"controller\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='controller-name', extensions={u'tailf-common': {u'info': u'OpenFlow controller configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-incomplete-command': None, u'callpoint': u'OpenFlowGlobalControllerCallpoint'}}), is_container='list', yang_name=\"controller\", rest_name=\"controller\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OpenFlow controller configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-incomplete-command': None, u'callpoint': u'OpenFlowGlobalControllerCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"controller must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"controller_name\",controller.controller, yang_name=\"controller\", rest_name=\"controller\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='controller-name', extensions={u'tailf-common': {u'info': u'OpenFlow controller configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-incomplete-command': None, u'callpoint': u'OpenFlowGlobalControllerCallpoint'}}), is_container='list', yang_name=\"controller\", rest_name=\"controller\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OpenFlow controller configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-incomplete-command': None, u'callpoint': u'OpenFlowGlobalControllerCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__controller = t\n if hasattr(self, '_set'):\n self._set()", "def _create_controller(main_controller, action_controller_list):\n controller = server.wsgi.Resource(main_controller())\n for ctl in action_controller_list:\n controller.register_actions(ctl())\n return controller", "def rest_resource(resource_cls):\n ecommerce_checkout_api.add_resource(resource_cls, *resource_cls.endpoints)\n return resource_cls", "def resource(name):\n \n def w(cls):\n RESOURCES[name] = cls\n return cls\n \n return w", "def mvcObj(self, router):\n pass", "def create_resource():\n deserializer = wsgi.JSONRequestDeserializer()\n serializer = wsgi.JSONResponseSerializer()\n return wsgi.Resource(Controller(), deserializer, serializer)", "def name(self) -> str:\n return \"Controller\"", "def create_resource():\n return wsgi.Resource(Controller(), serializer=ImageSerialize())", "def resource(self):\n return self.add_resource", "def define_route(self, route, **kwargs):\n\n def decorator(cls):\n if is_class(cls):\n resource = cls(**kwargs)\n else:\n resource = cls\n\n self.add_route(route, resource)\n\n return cls\n\n return decorator", "def resource_type(cls):\n pass", "def releaseController(self,entry):\n \n controllerName = entry.get('controller')\n \n if controllerName is None:\n self.logger.debug('Path: \"{}\" controller not decleared, we leave'.format(entry.get('path')))\n self.controller = Controller(self)\n return\n \n self.logger.debug(\"entrypath: {} controller: {}\".format(entry.get('path'),controllerName))\n\n sControllerPath = entry.get('path','').replace('/','.')\n sControllerPath = sControllerPath.lower()\n \n if sControllerPath.startswith('.'): sControllerPath = sControllerPath[1:]\n\n if sControllerPath == '':\n sControllerFile = 'mvc.controller.{}'.format(controllerName)\n else:\n sControllerFile = 'mvc.controller.{}.{}'.format(sControllerPath,controllerName)\n \n sControllerFile = self.settings.base+'/'+sControllerFile.replace('.','/')+'.py'\n sControllerFile = os.path.realpath(sControllerFile)\n \n if not os.path.isfile(sControllerFile):\n msg = 'Keinen Controller Datei {} gefunden'.format(sControllerFile)\n self.logger.debug(msg)\n self.content = msg\n Emergency.stop(msg)\n return\n\n if sControllerPath == '':\n sCommand = \"from mvc.controller.{0} import {0}\".format(controllerName)\n else:\n sCommand = \"from mvc.controller.{0}.{1} import {1}\".format(sControllerPath,controllerName)\n \n self.logger.debug('Import Controller over \"{}\"'.format(sCommand))\n try:\n exec(sCommand)\n except Exception as ex:\n msg = 'Fehler bei Import des Controller \"{}\": \"{}\"'.format(sCommand,ex)\n self.content = msg\n self.logger.debug(msg)\n Emergency.stop(msg)\n \n self.controller = None\n sCommand = \"{}(self)\".format(controllerName)\n self.logger.debug('Build controller by sentence: \"{}\"'.format(sCommand))\n\n try:\n self.controller = eval(sCommand)\n except Exception as ex:\n msg = 'Controller \"{}\" kann nicht initialiert werden; Meldung: \"{}\"'.format(sCommand,ex)\n self.content = msg\n self.logger.debug(msg)\n Emergency.stop(msg)\n\n \n self.prepareController()\n \n try:\n self.controller.get()\n except Exception as ex:\n msg = 'Fehler bei get() des Controller \"{}\": \"{}\" Abbruch'.format(controllerName,ex)\n self.logger.debug(msg)\n self.logger.debug(self.content)\n self.controller.status == self.controller.FAILED\n Emergency.stop(msg)", "def pre_routing_instance_create(self, resource_dict):\n pass", "def target_resource(self, target_resource):\n self._target_resource = target_resource", "def controller( self ):\n\t\ttry:\n\t\t\treturn self._controller\n\t\texcept Exception as e:\n\t\t\tself.logToConsole( \"controller: %s\" % str(e) )", "def __init__(self):\n\n # Diccionario que contendra todas las fuentes para ir llamandolas una por una en ejecucion\n # o poder seleccionar cual lanzar usando el patron factoria a traves de esta clase\n\n self.controller_objects = {'iptables': IptablesController}", "def register(self, resource=None, **meta):\r\n if resource is None:\r\n def wrapper(resource):\r\n return self.register(resource, **meta)\r\n return wrapper\r\n\r\n # Must be instance of ResourceView\r\n if not issubclass(resource, ResourceView):\r\n raise AssertionError(\"%s not subclass of ResourceView\" % resource)\r\n\r\n # Cannot be abstract\r\n if resource._meta.abstract:\r\n raise AssertionError(\"Attempt register of abstract resource: %s.\"\r\n % resource)\r\n\r\n # Fabric of resources\r\n meta = dict(self.meta, **meta)\r\n meta['name'] = meta.get('name', resource._meta.name)\r\n options = type('Meta', tuple(), meta)\r\n\r\n params = dict(api=self, Meta=options, **meta)\r\n\r\n params['__module__'] = '%s.%s' % (\r\n self.prefix, self.str_version.replace('.', '_'))\r\n\r\n params['__doc__'] = resource.__doc__\r\n\r\n new_resource = type(\r\n '%s%s' % (resource.__name__, len(self.resources)),\r\n (resource,), params)\r\n\r\n if self.resources.get(new_resource._meta.url_name):\r\n logger.warning(\r\n \"A resource '%r' is replacing the existing record for '%s'\",\r\n new_resource, self.resources.get(new_resource._meta.url_name))\r\n\r\n self.resources[new_resource._meta.url_name] = new_resource\r\n\r\n return resource", "def make_controller(cls, config):\n if config.REDUCED_CRAWL:\n if config.NUM_THREADS > 1 or config.MULTI:\n return MultiControllerReduced(cls, config)\n else:\n return ControllerReduced(cls, config)\n else:\n return super().make_controller(config)", "def __getattr__(self, attr):\n actual_resource = getattr(self.swagger_client, attr)\n if attr in [\"Authorization\", \"Effects\", \"Identify\", \"Info\",\n \"PanelLayout\", \"State\"]:\n return WrappedResource(actual_resource, attr)\n else:\n return actual_resource", "def __get__(self, instance, owner):\r\n self.resource_meta = instance\r\n return self", "def update_controller(self):", "def get_database_resource_table_controller() -> [_DatabaseResourceTableController]:\n\n return _DatabaseResourceTableController()", "def __call__(self, id=None):\n if id == None:\n return self\n\n self.id = str(id)\n\n key = self.path + '/' + self.id\n\n self.client.resources[key] = self.client.resource_class(self.client, key)\n\n return self.client.resources[key]", "def setup_class(cls):\n cls.handler = MyScaffoldHandler(\"handler\", SkillContext())", "def controller(code):\n\n def register_controller(func):\n CONTROLLERS[code] = func\n return func\n\n return register_controller", "def resource_prefix(self):", "def __init__(self, realm):\r\n Resource.__init__(self)\r\n\r\n verifyObject(IMasterRealm, realm)\r\n self._realm = realm", "def register_resource_for_model(model, resource):\n _model_to_resources[model] = resource", "def register(self, wsgi_app):\n wsgi_app.add_url_rule(\n rule=self.path,\n view_func=self.controller,\n methods=self.methods)", "def __getattr__(self, resource):\n return ResourceManager(self, resource)", "def __init__(self):\n super(Resource, self).__init__()\n\n # ____building form class for resources validation____\n self.request = Requests(self.__class__.__name__)\n self.response = Response()", "def register_resources(self):\n raise NotImplementedError", "def prepareController(self):\n pass", "def test_create_namespaced_resource_access_review(self):\n pass", "def _class(self, _class):\n\n self.__class = _class", "def _class(self, _class):\n\n self.__class = _class", "def activate_controller(self):\n if self.controller_address:\n #print \"Activating controller...\"\n self.controller = Controller(\n self.controller_address,\n self.proxy_address,\n self.migrating)\n self.controller.switch = self\n else:\n print \"[WARNING] Controller undefined\"", "def register(self):\n # self.register_route(\"GET\", self.__route, lambda req, res: self.status(req, res))\n self.register_route(\"GET\", self.__route, None, self.status)", "def resources(self):", "def pre_routing_instance_update(self, resource_id, resource_dict):\n pass", "def set_self_ref(self, resource):\n fullname = utils.class_fullname(resource)\n if fullname not in self._models_index:\n self.send_error(400,\n message=\"Unrecognized resource type: %s\" % type(resource))\n return -1\n resource_name = self._models_index[fullname]\n resource_url = self.reverse_url(\n self._collections[resource_name][\"name\"], resource[self.Id]) \n resource[\"selfRef\"] = \"%s://%s%s\" % (\n self.request.protocol, self.request.host, resource_url)\n return 0", "def init_controllers(self):\n if self.controllers == None:\n return\n controllers_namespace = self.__namespace + \".controllers\" # TODO: allow customize this\n try:\n controllers_package = import_module(controllers_namespace)\n except:\n return None\n\n from ron import Application\n controllers_modules = self._get_package_modules(controllers_package)\n for controller_name in controllers_modules:\n imported_controller = import_module('.' + controller_name, package=controllers_namespace)\n for i in dir(imported_controller):\n attribute = getattr(imported_controller, i)\n if inspect.isclass(attribute) and issubclass(attribute, Controller):\n controller_class = attribute(self)\n self.controllers[controllers_namespace+'.'+controller_name] = controller_class\n Application().controllers[controllers_namespace+'.'+controller_name] = controller_class", "def root(self):\n return Resource()", "def track():\n\n if deployment_settings.get_security_map() and not s3_has_role(\"MapAdmin\"):\n unauthorised()\n\n tablename = \"%s_%s\" % (module, resourcename)\n\n # Model options\n # used in multiple controllers, so defined in model\n\n # CRUD Strings\n # used in multiple controllers, so defined in model\n\n return s3_rest_controller(module, resourcename)", "def pre_route_target_create(self, resource_dict):\n pass", "def _resource_factory(self, raw) -> ApiResource:\n raise NotImplemented", "def register(self, key, resource=None):\n if resource is not None and not isinstance(resource, str) and not isinstance(resource, Resource):\n raise ValueError(\"resource must be str, Resource type or None\")\n if isinstance(resource, str) and \".\" not in resource:\n raise ValueError(\"resource class name must be fully qualified\")\n if resource is None:\n self._resources.pop(key, None)\n else:\n self._resources[key] = resource", "def _child(self, resource_class, path, **attrs):\n resource_class = resolve_resource(resource_class)\n return resource_class(path, self, **attrs)", "def __init__(self, *args, **kwargs):\n\n\t\tassert ltrace(TRACE_USERS, '> UsersController.__init__(%s)' %\n\t\t\tUsersController.init_ok)\n\n\t\tif UsersController.init_ok:\n\t\t\treturn\n\n\t\tsuper(self.__class__, self).__init__(name='users')\n\n\t\tUsersController.init_ok = True\n\t\tassert ltrace(TRACE_USERS, '< UsersController.__init__(%s)' %\n\t\t\tUsersController.init_ok)", "def __init__(self, utils, controller):\n self.__utils = utils\n self.__controller = controller", "def process_resource_api(self, resources, resource, api, context):\n pass", "def add_flow_controller(cls, name, controller):\n cls.registered_controllers[name] = controller", "def updateResource(self, authenticationToken, resource):\r\n pass", "def resources(self, resources):\n self._resources = resources", "def __init__(self, collection_id):\n BaseResourceHandler.__init__(self, collection_id)", "def register_permission(self, cls):\n return self.register_entity('permission', cls)", "def getClassResource(self, className):\n\t\t#To prevent resources from having another type\n\t\tif(className == \"Resource\"):\n\t\t\treturn None\n\t\t#Now check if we have a mapping in here\n\t\tif(className in self._classMapping):\n\t\t\treturn self._classMapping[className]\n\t\telse:\n\t\t\traise Exception(\"Given class name \\\"\" + className + \"\\\"is not associated with a uri!\")", "def get_controller(cls):\n if not cls.hnd:\n raise Exception('A handler is to be set for getting contoller.')\n if not cls.controller:\n cls.controller = cls.config.controller_class(cls.hnd)\n cls.session = cls.controller.session\n return cls.controller", "def rename(cls, client, resource, new_name) :\n\t\ttry :\n\t\t\trenameresource = rewriteaction()\n\t\t\tif type(resource) == cls :\n\t\t\t\trenameresource.name = resource.name\n\t\t\telse :\n\t\t\t\trenameresource.name = resource\n\t\t\treturn renameresource.rename_resource(client,new_name)\n\t\texcept Exception as e :\n\t\t\traise e", "def __parse_controller_router(cls):\n router = getattr(cls, Controller.RC_KEY)\n\n dependencies = None\n if hasattr(cls, \"dependencies\"):\n dependencies = deepcopy(cls.dependencies)\n delattr(cls, \"dependencies\")\n\n for route in router.routes:\n # add class dependencies\n if dependencies:\n for depends in dependencies[::-1]:\n route.dependencies.insert(0, depends)\n\n # get the signature of the endpoint function\n signature = inspect.signature(route.endpoint)\n # get the parameters of the endpoint function\n signature_parameters = list(signature.parameters.values())\n\n # replace the class instance with the itself FastApi Dependecy\n signature_parameters[0] = signature_parameters[0].replace(\n default=Depends(cls)\n )\n\n # set self and after it the keyword args\n new_parameters = [signature_parameters[0]] + [\n parameter.replace(kind=inspect.Parameter.KEYWORD_ONLY)\n for parameter in signature_parameters[1:]\n ]\n\n new_signature = signature.replace(parameters=new_parameters)\n setattr(route.endpoint, Controller.SIGNATURE_KEY, new_signature)\n\n return router", "def register(cls, class_):\n cls._registered[class_.tag()] = class_", "def resource_type(self, resource_type):\n\n self._resource_type = resource_type", "def resource_type(self, resource_type):\n\n self._resource_type = resource_type", "def resource_type(self, resource_type):\n\n self._resource_type = resource_type", "def resource_type(self, resource_type):\n\n self._resource_type = resource_type", "def register(self, controller_class, model=None, parent=None, **options):\n # ensure the base either had a model or one was passed\n if controller_class.model:\n if model and model != controller_class.model:\n raise ValueError(\n 'You cannot use a Controller with a \"model\" specified as '\n 'a generic Controller class.'\n )\n model = controller_class.model\n else:\n if not model:\n raise ValueError(\n 'You must provide a \"model\" for this Controller.'\n )\n controller_class.model = model\n\n if model._meta.abstract:\n raise ImproperlyConfigured(\n 'The model {} is abstract, so it cannot be registered '\n 'with a controller.'.format(model.__name__)\n )\n\n # Ignore the registration if the model has been swapped out.\n if not model._meta.swapped:\n if options:\n options['__module__'] = __name__\n controller_class = type(\n \"%sController\" % model.__name__,\n (controller_class,),\n options\n )\n\n # Instantiate the controller and save in the appropriate registry\n parent = parent or self\n controller_obj = controller_class(parent=parent, registrar=self)\n if settings.DEBUG:\n system_check_errors.extend(controller_obj.check())\n\n self._registry[model] = controller_obj\n\n # handle all child registrations at this time\n for child_controller_class in controller_class.children:\n controller_obj.register(child_controller_class,\n parent=controller_obj)", "def controller(self):\n return self._controller", "def __init__(self, resource_path):\n self.resource_path = resource_path", "def pre_qos_forwarding_class_create(self, resource_dict):\n pass", "def add_rest_routes(self, route, api=None, pos=0):\n def decorator(cls):\n # parent is the parent class of the relation\n cls_name = cls.__name__.lower()\n #print(cls_name)\n # default REST is the following pattern:\n # (r\"/post/(?P<param1>[^\\/]+)/?(?P<param2>[^\\/]+)?/?(?P<param3>[^\\/]+)?\", PostHandler),\n action=\"\"\n # if cls_name.endswith(\"handler\"):\n # action=action[:-7]\n # else:\n # action = cls_name\n # if route:\n action=route\n\n r=r\"/\"+action+r\"/(?P<param1>[^\\/]+)/?(?P<param2>[^\\/]+)?/?(?P<param3>[^\\/]+)?\"\n if api:\n # render the given api in the route URL\n r=r\"/\"+action+r\"/\"+str(api)+r\"/(?P<param1>[^\\/]+)/?(?P<param2>[^\\/]+)?/?(?P<param3>[^\\/]+)?\"\n \n #print(\"added the following routes: \" + r)\n handlers=getattr(self.__class__, \"handlers\", None)\n handlers.append((r,cls))\n \n # use the positioned handlers\n handlers_tmp=getattr(self.__class__, \"handlers_tmp\", None)\n handlers_tmp.append(((r,cls),pos))\n\n r=r\"/\"+action+r\"/*\"\n #print(\"added the following routes: \" + r)\n handlers.append((r,cls))\n handlers_tmp.append(((r,cls),pos))\n #print(\"handlers: \" + str(self.handlers))\n print(\"ROUTING: added RESTful routes for: \" + cls.__name__ + \" as /\" + action)\n #print(dir())\n return cls\n return decorator", "def post_routing_instance_create(self, resource_dict):\n pass", "def set_resource_data(self, resource, meta):", "def getController(self):\n return self.__controller", "def __init__(__self__, *,\n resource_id: pulumi.Input[str]):\n pulumi.set(__self__, \"resource_id\", resource_id)", "def __init__(__self__, *,\n resource_id: pulumi.Input[str]):\n pulumi.set(__self__, \"resource_id\", resource_id)", "def register(cls):\n register(cls, cls.provided_class)", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def _setResource(self, r_type, x, y, amount):\n cell = self.get_cell(x, y)\n cell.resource = Resource(r_type, amount)", "def __init__(self, view, model):\n self.view = view\n self.view.set_controller(self)\n self.model = model", "def setup_class(cls):\n cls.behaviour = MyScaffoldBehaviour(\"behaviour\", SkillContext())", "def resource_class(self):\n resource_module = '.'.join(self.resource_class_path.split('.')[:-1])\n resource_class_name = self.resource_class_path.split('.')[-1]\n return getattr(import_module(resource_module), resource_class_name)" ]
[ "0.6103949", "0.6056706", "0.59474033", "0.5917977", "0.58889747", "0.58889747", "0.5795911", "0.5779954", "0.57289934", "0.5706962", "0.5686955", "0.5658022", "0.55966556", "0.55881155", "0.5557832", "0.55321854", "0.5516671", "0.55104506", "0.5504374", "0.5495111", "0.54755294", "0.5465437", "0.5447657", "0.54118544", "0.53996545", "0.53638494", "0.53637004", "0.5351829", "0.5348223", "0.53410304", "0.5302425", "0.5298105", "0.5271923", "0.5243368", "0.5241477", "0.5238986", "0.52378595", "0.5210279", "0.52093256", "0.5204531", "0.5196752", "0.51833826", "0.5179795", "0.51704454", "0.5168866", "0.51639616", "0.51567864", "0.5128619", "0.51261574", "0.512612", "0.5095343", "0.5095343", "0.5089146", "0.5086057", "0.5070391", "0.5066875", "0.50654113", "0.50645113", "0.50632596", "0.5055133", "0.5044353", "0.5029234", "0.50277215", "0.50121456", "0.5000053", "0.49829304", "0.49825162", "0.4969026", "0.49634182", "0.49595174", "0.49585077", "0.49560297", "0.49514517", "0.49422184", "0.4933909", "0.49288064", "0.49246126", "0.49190074", "0.49190074", "0.49190074", "0.49190074", "0.4917553", "0.4912698", "0.4910863", "0.49108118", "0.49107313", "0.49032575", "0.48991758", "0.48952106", "0.48822892", "0.48822892", "0.48787642", "0.48621088", "0.48621088", "0.48621088", "0.48621088", "0.48593467", "0.4859209", "0.48560512", "0.48487332" ]
0.6852721
0
A decorator function to mark a Class as a Controller
def resource(self): return self.add_resource
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def use(_):\n\n def wrapper(cls):\n __app_controllers__.append(cls)\n return cls\n\n return wrapper", "def create_controller() -> Controller:\n _controller = Controller()\n return _controller", "def controller(code):\n\n def register_controller(func):\n CONTROLLERS[code] = func\n return func\n\n return register_controller", "def get_controller(request: pytest.FixtureRequest) -> Callable[..., Controller]:\n default_class = Controller\n marker = request.node.get_closest_marker(\"controller_data\")\n if marker and marker.kwargs:\n # Must copy so marker data do not change between test cases if marker is\n # applied to test class\n markerdata = marker.kwargs.copy()\n else:\n markerdata = {}\n\n def getter(\n handler: Any,\n class_: Optional[Type[Controller]] = None,\n **server_kwargs,\n ) -> Controller:\n \"\"\"\n :param handler: The handler object\n :param class_: If set to None, check controller_data(class_).\n If both are none, defaults to Controller.\n \"\"\"\n assert not inspect.isclass(handler)\n marker_class: Optional[Type[Controller]]\n marker_class = markerdata.pop(\"class_\", default_class)\n class_ = class_ or marker_class\n if class_ is None:\n raise RuntimeError(\n f\"Fixture '{request.fixturename}' needs controller_data to specify \"\n f\"what class to use\"\n )\n ip_port: HostPort = markerdata.pop(\"host_port\", HostPort())\n # server_kwargs takes precedence, so it's rightmost (PEP448)\n server_kwargs = {**markerdata, **server_kwargs}\n server_kwargs.setdefault(\"hostname\", ip_port.host)\n server_kwargs.setdefault(\"port\", ip_port.port)\n return class_(\n handler,\n **server_kwargs,\n )\n\n return getter", "def method(cls):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n return func(*args, **kwargs)\n setattr(cls, func.__name__, wrapper)\n return func\n return decorator", "def name(self) -> str:\n return \"Controller\"", "def create_controller(self, typ):\n return self.controller_objects[typ]()", "def decorator(self, decorator: Route.Decorator):\n pass", "def setController(self, controller):\n self.__controller = controller", "def __init__(self, controller):\n self._controller = controller", "def _get_controller(self):\n return self.__controller", "def make_controller(cls, config):\n if config.REDUCED_CRAWL:\n if config.NUM_THREADS > 1 or config.MULTI:\n return MultiControllerReduced(cls, config)\n else:\n return ControllerReduced(cls, config)\n else:\n return super().make_controller(config)", "def get_controller(cls):\n if not cls.hnd:\n raise Exception('A handler is to be set for getting contoller.')\n if not cls.controller:\n cls.controller = cls.config.controller_class(cls.hnd)\n cls.session = cls.controller.session\n return cls.controller", "def define_route(self, route, **kwargs):\n\n def decorator(cls):\n if is_class(cls):\n resource = cls(**kwargs)\n else:\n resource = cls\n\n self.add_route(route, resource)\n\n return cls\n\n return decorator", "def controller( self ):\n\t\ttry:\n\t\t\treturn self._controller\n\t\texcept Exception as e:\n\t\t\tself.logToConsole( \"controller: %s\" % str(e) )", "def view(cls):\n @wraps(cls)\n def wrapper(request, **kwargs):\n if hasattr(cls, 'as_view'):\n return cls.as_view()(request, **kwargs)\n obj = cls(request, **kwargs)\n handler = getattr(obj, request.method.lower(), None)\n if handler is None:\n return HttpResponseNotAllowed('%s not allowed' % request.method)\n res = obj.setup(obj.c) or handler(obj.c) or obj.render(obj.c)\n if isinstance(res, (dict, list)):\n return JsonResponse(res, safe=False)\n return res\n return wrapper", "def decorate(func):\n from aha.dispatch.router import get_router\n r = get_router()\n r.connect(None, path, controller = func, **params)\n return func", "def setController_( self, Controller ):\n\t\ttry:\n\t\t\tself._controller = Controller\n\t\texcept Exception as e:\n\t\t\tself.logToConsole( \"setController_: %s\" % str(e) )", "def class_based_view(class_obj):\n def _instantiate_view_class(request, *args, **kwargs):\n return class_obj()(request, *args, **kwargs)\n return _instantiate_view_class", "def classmethod(self, encoding):\n # Add encodings for hidden self and cmd arguments.\n encoding = ensure_bytes(encoding)\n typecodes = parse_type_encoding(encoding)\n typecodes.insert(1, b'@:')\n encoding = b''.join(typecodes)\n\n def decorator(f):\n def objc_class_method(objc_cls, objc_cmd, *args):\n py_cls = ObjCClass(objc_cls)\n py_cls.objc_cmd = objc_cmd\n args = convert_method_arguments(encoding, args)\n result = f(py_cls, *args)\n if isinstance(result, ObjCClass):\n result = result.ptr.value\n elif isinstance(result, ObjCInstance):\n result = result.ptr.value\n return result\n name = f.__name__.replace('_', ':')\n self.add_class_method(objc_class_method, name, encoding)\n return objc_class_method\n return decorator", "def _find_controller(self, controller):\n if controller is None:\n return None\n # If the output specified is a string controller e.g. \"WelcomeController@show\"\n elif isinstance(controller, str):\n if \"@\" in controller:\n controller_path, controller_method_str = controller.split(\"@\")\n else:\n controller_path = controller\n controller_method_str = \"__call__\"\n\n controller_path = modularize(controller_path).split(\".\")\n if len(controller_path) > 1:\n controller_name = controller_path.pop()\n prefix_path = \".\".join(controller_path)\n else:\n controller_name = controller_path[0]\n prefix_path = \"\"\n # build a list of all locations where the controller can be found\n # if the controller is defined such as auth.WelcomeController, append the prefix path to\n # the locations\n locations = list(\n map(\n lambda loc: f\"{loc}.{removeprefix(prefix_path, loc)}\"\n if prefix_path\n else loc,\n self.controllers_locations,\n )\n )\n try:\n self.controller_class = Loader.find(\n Controller, locations, controller_name, raise_exception=True\n )\n except LoaderNotFound as e:\n self.e = e\n print(f\"\\033[93mTrouble importing controller!\\n> {str(e)}\\033[0m\")\n # controller is an instance with a bound method\n elif hasattr(controller, \"__self__\"):\n _, controller_method_str = controller.__qualname__.split(\".\")\n self.controller_instance = controller.__self__\n\n # it's a class or class.method, we don't have to find it, just get the class\n elif hasattr(controller, \"__qualname__\"):\n if \".\" in controller.__qualname__:\n controller_name, controller_method_str = controller.__qualname__.split(\n \".\"\n )\n else:\n controller_name = controller.__qualname__\n controller_method_str = \"__call__\"\n\n try:\n self.controller_class = Loader.get_object(\n controller.__module__, controller_name, raise_exception=True\n )\n except LoaderNotFound as e:\n self.e = e\n print(f\"\\033[93mTrouble importing controller!\\n> {str(e)}\\033[0m\")\n # it's a controller instance\n else:\n self.controller_instance = controller\n controller_method_str = \"__call__\"\n\n # Set the controller method on class. This is a string\n self.controller_method = controller_method_str", "def auto_validator_hook():\n\n def decorator(cls):\n \"\"\"\n decorates the given class and registers an instance\n of it into available auto validator hooks.\n\n :param type cls: auto validator hook class.\n\n :returns: auto validator hook class.\n :rtype: type\n \"\"\"\n\n instance = cls()\n auto_validator_services.register_hook(instance)\n\n return cls\n\n return decorator", "def class_based_view_decorator(decorator):\n def _dec(cls):\n assert (isinstance(cls, type) and issubclass(cls, View)), (\n \"Only subclasses of django.views.generic.View may use this decorator.\"\n )\n _method_decorator = method_decorator(decorator)\n cls.dispatch = _method_decorator(cls.dispatch)\n return cls\n\n update_wrapper(_dec, decorator, assigned=available_attrs(decorator))\n return _dec", "def decorate(func, *args, **kws):\n def do_authenticate():\n \"\"\"\n A function to perform authentication\n every time decorated function is called.\n \"\"\"\n #try:\n if 1:\n if 'referer' not in self.session:\n path = urlsplit(self.request.url)[2]\n self.session['referer'] = path\n self.session.put()\n #except:\n # pass\n aobj = self.config.auth_obj()\n self.get_controller()\n auth_res = aobj.auth(self.controller, *args, **kws)\n if auth_res:\n return func(*args, **kws)\n aobj.auth_redirect(self.controller, *args, **kws)\n # clear controller for development environment.\n\n return do_authenticate", "def init_controllers(self):\n if self.controllers == None:\n return\n controllers_namespace = self.__namespace + \".controllers\" # TODO: allow customize this\n try:\n controllers_package = import_module(controllers_namespace)\n except:\n return None\n\n from ron import Application\n controllers_modules = self._get_package_modules(controllers_package)\n for controller_name in controllers_modules:\n imported_controller = import_module('.' + controller_name, package=controllers_namespace)\n for i in dir(imported_controller):\n attribute = getattr(imported_controller, i)\n if inspect.isclass(attribute) and issubclass(attribute, Controller):\n controller_class = attribute(self)\n self.controllers[controllers_namespace+'.'+controller_name] = controller_class\n Application().controllers[controllers_namespace+'.'+controller_name] = controller_class", "def instance():\n\n if Controller._instance == None:\n Controller._instance = Controller()\n return Controller._instance", "def _set_controller(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"controller_name\",controller.controller, yang_name=\"controller\", rest_name=\"controller\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='controller-name', extensions={u'tailf-common': {u'info': u'OpenFlow controller configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-incomplete-command': None, u'callpoint': u'OpenFlowGlobalControllerCallpoint'}}), is_container='list', yang_name=\"controller\", rest_name=\"controller\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OpenFlow controller configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-incomplete-command': None, u'callpoint': u'OpenFlowGlobalControllerCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"controller must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"controller_name\",controller.controller, yang_name=\"controller\", rest_name=\"controller\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='controller-name', extensions={u'tailf-common': {u'info': u'OpenFlow controller configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-incomplete-command': None, u'callpoint': u'OpenFlowGlobalControllerCallpoint'}}), is_container='list', yang_name=\"controller\", rest_name=\"controller\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OpenFlow controller configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-incomplete-command': None, u'callpoint': u'OpenFlowGlobalControllerCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__controller = t\n if hasattr(self, '_set'):\n self._set()", "def testGetAttributeReturnsItsControllerWhenAsked(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tself.failIf(type(x._MockObject__controller) == type(x.w))", "def decorator(cls):\n\n instance = cls()\n auto_validator_services.register_hook(instance)\n\n return cls", "def register_instance(cls):\n\n @functools.wraps(cls)\n def wrapper_decorator(*args, **kwargs):\n\n instance = cls(*args, **kwargs)\n\n Register[cls.__name__] = instance\n\n return instance\n\n return wrapper_decorator", "def controller(url_prefix_or_controller_cls: Union[str, Type[Controller]],\n controller_cls: Optional[Type[Controller]] = None,\n *,\n rules: Optional[Iterable[Union[Route, RouteGenerator]]] = None,\n ) -> RouteGenerator:\n url_prefix, controller_cls = _normalize_args(\n url_prefix_or_controller_cls, controller_cls, _is_controller_cls)\n url_prefix = url_prefix or controller_cls.Meta.url_prefix\n\n routes = []\n controller_routes = getattr(controller_cls, CONTROLLER_ROUTES_ATTR)\n if rules is None:\n routes = controller_routes.values()\n else:\n for route in _reduce_routes(rules):\n existing = controller_routes.get(route.method_name)\n if existing:\n routes.append(_inherit_route_options(route, existing[0]))\n else:\n routes.append(route)\n\n yield from _normalize_controller_routes(routes, controller_cls,\n url_prefix=url_prefix)", "def setup_class(cls):\n cls.handler = MyScaffoldHandler(\"handler\", SkillContext())", "def as_view(cls):\n \n @csrf_exempt\n @slack_augment\n def view(request):\n return cls(request).dispatch()\n return view", "def serializer(*args, **kwargs):\n\n def decorator(cls):\n \"\"\"\n decorates the given class and registers an instance\n of it into available serializers.\n\n :param type cls: serializer class.\n\n :returns: serializer class.\n :rtype: type\n \"\"\"\n\n instance = cls(*args, **kwargs)\n serializer_services.register_serializer(instance, **kwargs)\n\n return cls\n\n return decorator", "def register(self, cls, method=None):\n if isinstance(cls, (classmethod, staticmethod)):\n first_annotation = {}\n for k, v in cls.__func__.__annotations__.items():\n first_annotation[k] = v\n break\n cls.__annotations__ = first_annotation\n return self.dispatcher.register(cls, func=method)", "def register(self, cls, method=None):\n if isinstance(cls, (classmethod, staticmethod)):\n first_annotation = {}\n for k, v in cls.__func__.__annotations__.items():\n first_annotation[k] = v\n break\n cls.__annotations__ = first_annotation\n return self.dispatcher.register(cls, func=method)", "def add_flow_controller(cls, name, controller):\n cls.registered_controllers[name] = controller", "def _create_controller(main_controller, action_controller_list):\n controller = server.wsgi.Resource(main_controller())\n for ctl in action_controller_list:\n controller.register_actions(ctl())\n return controller", "def getController(self):\n return self.__controller", "def releaseController(self,entry):\n \n controllerName = entry.get('controller')\n \n if controllerName is None:\n self.logger.debug('Path: \"{}\" controller not decleared, we leave'.format(entry.get('path')))\n self.controller = Controller(self)\n return\n \n self.logger.debug(\"entrypath: {} controller: {}\".format(entry.get('path'),controllerName))\n\n sControllerPath = entry.get('path','').replace('/','.')\n sControllerPath = sControllerPath.lower()\n \n if sControllerPath.startswith('.'): sControllerPath = sControllerPath[1:]\n\n if sControllerPath == '':\n sControllerFile = 'mvc.controller.{}'.format(controllerName)\n else:\n sControllerFile = 'mvc.controller.{}.{}'.format(sControllerPath,controllerName)\n \n sControllerFile = self.settings.base+'/'+sControllerFile.replace('.','/')+'.py'\n sControllerFile = os.path.realpath(sControllerFile)\n \n if not os.path.isfile(sControllerFile):\n msg = 'Keinen Controller Datei {} gefunden'.format(sControllerFile)\n self.logger.debug(msg)\n self.content = msg\n Emergency.stop(msg)\n return\n\n if sControllerPath == '':\n sCommand = \"from mvc.controller.{0} import {0}\".format(controllerName)\n else:\n sCommand = \"from mvc.controller.{0}.{1} import {1}\".format(sControllerPath,controllerName)\n \n self.logger.debug('Import Controller over \"{}\"'.format(sCommand))\n try:\n exec(sCommand)\n except Exception as ex:\n msg = 'Fehler bei Import des Controller \"{}\": \"{}\"'.format(sCommand,ex)\n self.content = msg\n self.logger.debug(msg)\n Emergency.stop(msg)\n \n self.controller = None\n sCommand = \"{}(self)\".format(controllerName)\n self.logger.debug('Build controller by sentence: \"{}\"'.format(sCommand))\n\n try:\n self.controller = eval(sCommand)\n except Exception as ex:\n msg = 'Controller \"{}\" kann nicht initialiert werden; Meldung: \"{}\"'.format(sCommand,ex)\n self.content = msg\n self.logger.debug(msg)\n Emergency.stop(msg)\n\n \n self.prepareController()\n \n try:\n self.controller.get()\n except Exception as ex:\n msg = 'Fehler bei get() des Controller \"{}\": \"{}\" Abbruch'.format(controllerName,ex)\n self.logger.debug(msg)\n self.logger.debug(self.content)\n self.controller.status == self.controller.FAILED\n Emergency.stop(msg)", "def __parse_controller_router(cls):\n router = getattr(cls, Controller.RC_KEY)\n\n dependencies = None\n if hasattr(cls, \"dependencies\"):\n dependencies = deepcopy(cls.dependencies)\n delattr(cls, \"dependencies\")\n\n for route in router.routes:\n # add class dependencies\n if dependencies:\n for depends in dependencies[::-1]:\n route.dependencies.insert(0, depends)\n\n # get the signature of the endpoint function\n signature = inspect.signature(route.endpoint)\n # get the parameters of the endpoint function\n signature_parameters = list(signature.parameters.values())\n\n # replace the class instance with the itself FastApi Dependecy\n signature_parameters[0] = signature_parameters[0].replace(\n default=Depends(cls)\n )\n\n # set self and after it the keyword args\n new_parameters = [signature_parameters[0]] + [\n parameter.replace(kind=inspect.Parameter.KEYWORD_ONLY)\n for parameter in signature_parameters[1:]\n ]\n\n new_signature = signature.replace(parameters=new_parameters)\n setattr(route.endpoint, Controller.SIGNATURE_KEY, new_signature)\n\n return router", "def controller(*args, allControllers: bool=True, children: bool=True, group: bool=True, index:\n Union[int, bool]=0, isController: Union[AnyStr, bool]=\"\", parent: bool=True,\n pickWalkDown: bool=True, pickWalkLeft: bool=True, pickWalkRight: bool=True,\n pickWalkUp: bool=True, unparent: bool=True, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def testControllerAndMockFactory(self):\n\t \n\t c = Controller()\n\t x = c.mock(KlassBeingMocked)\n\t self.failIf(x == None)", "def decorate_class(klass, decorator, methods=None):\n for n in dir(klass):\n # skips if not in desired methods\n if methods and (n not in methods):\n continue\n f = getattr(klass, n)\n if hasattr(f, 'im_func'):\n setattr(klass, n, decorator(f.im_func))", "def __call__ (self, cls):\n # Define a wrapper function to capture the actual instantiation and __init__ params\n @wraps(cls)\n def wrapper_f(*args, **kwargs):\n #print(f'type of cls is {type(cls)}')\n peripheral = self.peripheral_type(**self.kwargs)\n o = cls(*args, **kwargs)\n o.message_debug(f\"Decorating class {cls.__name__} with {self.peripheral_type.__name__}\")\n o.attach_sensor(peripheral)\n return o\n return wrapper_f", "def as_view(cls, *class_args, **class_kwargs):\n def view(*args, **kwargs):\n self = view.view_class(*class_args, **class_kwargs)\n return self.dispatch_request(*args, **kwargs)\n\n if cls.decorators:\n view.__module__ = cls.__module__\n for decorator in cls.decorators:\n view = decorator(view)\n\n view.view_class = cls\n view.__doc__ = cls.__doc__\n view.__module__ = cls.__module__\n return view", "def controller_factory(cls, passes, options, **partial_controller):\n if None in partial_controller.values():\n raise TranspilerError('The controller needs a condition.')\n\n if partial_controller:\n for registered_controller in cls.registered_controllers.keys():\n if registered_controller in partial_controller:\n return cls.registered_controllers[registered_controller](passes, options,\n **partial_controller)\n raise TranspilerError(\"The controllers for %s are not registered\" % partial_controller)\n\n return FlowControllerLinear(passes, options)", "def decorator():\n return _decorator", "def register(self, controller_class, model=None, parent=None, **options):\n # ensure the base either had a model or one was passed\n if controller_class.model:\n if model and model != controller_class.model:\n raise ValueError(\n 'You cannot use a Controller with a \"model\" specified as '\n 'a generic Controller class.'\n )\n model = controller_class.model\n else:\n if not model:\n raise ValueError(\n 'You must provide a \"model\" for this Controller.'\n )\n controller_class.model = model\n\n if model._meta.abstract:\n raise ImproperlyConfigured(\n 'The model {} is abstract, so it cannot be registered '\n 'with a controller.'.format(model.__name__)\n )\n\n # Ignore the registration if the model has been swapped out.\n if not model._meta.swapped:\n if options:\n options['__module__'] = __name__\n controller_class = type(\n \"%sController\" % model.__name__,\n (controller_class,),\n options\n )\n\n # Instantiate the controller and save in the appropriate registry\n parent = parent or self\n controller_obj = controller_class(parent=parent, registrar=self)\n if settings.DEBUG:\n system_check_errors.extend(controller_obj.check())\n\n self._registry[model] = controller_obj\n\n # handle all child registrations at this time\n for child_controller_class in controller_class.children:\n controller_obj.register(child_controller_class,\n parent=controller_obj)", "def delegated(cls):\n return cls", "def decorator(cls):\n\n instance = cls(*args, **kwargs)\n serializer_services.register_serializer(instance, **kwargs)\n\n return cls", "def activate_controller(self):\n if self.controller_address:\n #print \"Activating controller...\"\n self.controller = Controller(\n self.controller_address,\n self.proxy_address,\n self.migrating)\n self.controller.switch = self\n else:\n print \"[WARNING] Controller undefined\"", "def check_acl(func):\n\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if request.method in EXEMPT_METHODS: # pragma: no cover\n return func(*args, **kwargs)\n # 'func' is a Flask.view.MethodView so we have access to some special\n # params\n cls = func.view_class\n login_required = getattr(cls, \"login_required\", True)\n if (\n bui.auth != \"none\"\n and login_required\n and not bui.config.get(\"LOGIN_DISABLED\", False)\n ):\n if current_user.is_anonymous:\n abort(403)\n return func(*args, **kwargs)\n\n return decorated_view", "def prepareController(self):\n pass", "def checktype(type):\n def decorator(klass):\n register_type(type, klass)\n return klass\n\n return decorator", "def controller(self):\n return self._controller", "def annotations_class(cls):\n assert(isclass(cls))\n # To play it safe we avoid to modify the dict while iterating over it,\n # so we previously cache keys.\n # For this we don't use keys() because of Python 3.\n # Todo: Better use inspect.getmembers here\n keys = [key for key in cls.__dict__]\n for key in keys:\n memb = cls.__dict__[key]\n if _check_as_func(memb):\n annotations_func(memb)\n elif isclass(memb):\n annotations_class(memb)\n return cls", "def Unprotected():\n def wrapper(original_class):\n orig_init = original_class.__init__\n\n @functools.wraps(original_class)\n def __init__(self, *args, **kws):\n self.falcon_security__roles = []\n self.falcon_security__unprotected = True\n orig_init(self, *args, **kws)\n\n original_class.__init__ = __init__\n return original_class\n return wrapper", "def as_view(cls, **initkwargs):\n # sanitize keyword arguments\n for key in initkwargs:\n if key in cls.http_method_names:\n raise TypeError(\"You tried to pass in the %s method name as a \"\n \"keyword argument to %s(). Don't do that.\"\n % (key, cls.__name__))\n if not hasattr(cls, key):\n raise TypeError(\"%s() received an invalid keyword %r\" % (\n cls.__name__, key))\n\n def view(request, *args, **kwargs):\n self = cls(**initkwargs)\n return self.dispatch(request, *args, **kwargs)\n\n # take name and docstring from class\n update_wrapper(view, cls, updated=())\n\n # and possible attributes set by decorators\n # like csrf_exempt from dispatch\n update_wrapper(view, cls.dispatch, assigned=())\n return view", "def get_controller_func(controller):\n\n if controller in CONTROLLERS:\n return CONTROLLERS[controller]\n\n return None", "def _controller(self):\n # TODO: Probably better to use request_patron and check for\n # None here.\n patron = self.authenticated_patron_from_request()\n storage = CirculationPatronProfileStorage(patron, flask.url_for)\n return CoreProfileController(storage)", "def add_status_code(code):\n def class_decorator(cls):\n cls.status_code = code\n return cls\n return class_decorator", "def good_classmethod_decorator(decorator): \n def new_decorator(cls, f):\n g = decorator(cls, f)\n g.__name__ = f.__name__\n g.__doc__ = f.__doc__\n g.__dict__.update(f.__dict__)\n return g\n \n new_decorator.__name__ = decorator.__name__\n new_decorator.__doc__ = decorator.__doc__\n new_decorator.__dict__.update(decorator.__dict__)\n\n return new_decorator", "def under_review():\n\n def decorator(cls_or_callable: Union[Callable, Type], feature_name: Optional[str]=None, was_class: bool=False):\n if feature_name is None:\n feature_name = cls_or_callable.__qualname__\n message = f'The feature {feature_name} is currently marked under review.'\n filterwarnings('once', message, UnderReviewWarning)\n if inspect.isclass(cls_or_callable):\n cls_or_callable.__init__ = decorator(cls_or_callable.__init__, feature_name=cls_or_callable.__qualname__, was_class=True)\n cls_or_callable.__doc__ = _create_docstring_message(cls_or_callable.__doc__, message)\n return cls_or_callable\n\n @functools.wraps(cls_or_callable)\n def wrapper(*args, **kwargs):\n _raise_review_warning(message)\n return cls_or_callable(*args, **kwargs)\n if not was_class:\n wrapper.__doc__ = _create_docstring_message(cls_or_callable.__doc__, message)\n return wrapper\n return decorator", "def create_decorated_class(klass, decorator, methods=None):\n class Decorated(klass): pass\n d_klass = Decorated\n decorate_class(d_klass, decorator, methods)\n return d_klass", "def __init__(self, decorated):\n self.decorated = decorated", "def __init__(self, decorated):\n self.decorated = decorated", "def __init__(self, decorated):\n self.decorated = decorated", "def register_keras_custom_object(cls):\n tf.keras.utils.get_custom_objects()[cls.__name__] = cls\n return cls", "def init_controllers(app):\n for controller in os.listdir(os.getcwd() + \"/controllers\"):\n module_name, ext = os.path.splitext(controller)\n if module_name.endswith('_controller') and ext == '.py':\n module = __import__(\"controllers.%s\" % module_name)\n PYSTHClient.controllers.append(\n module.__getattribute__(module_name))\n for controller in PYSTHClient.controllers:\n app.register_blueprint(controller.PAGE)", "def device(view):\n def _decorator(request, *args, **kwargs):\n if not hasattr(request, \"device\"):\n m = DeviceMiddleware()\n m.process_request(request)\n return view(request, *args, **kwargs)\n\n _decorator.__doc__ = view.__doc__\n _decorator.__name__ = view.__name__\n\n return _decorator", "def register_controllers(app: FastAPI) -> None:\n app.include_router(base.router)\n app.include_router(checks.router, prefix=\"/checks\", tags=[\"checks\"])", "def setup_one_time_controllers(self):\n self.index_controller = IndexController(self)\n self.opds_feeds = OPDSFeedController(self)\n self.marc_records = MARCRecordController(self)\n self.loans = LoanController(self)\n self.annotations = AnnotationController(self)\n self.urn_lookup = URNLookupController(self)\n self.work_controller = WorkController(self)\n self.analytics_controller = AnalyticsController(self)\n self.profiles = ProfileController(self)\n self.heartbeat = HeartbeatController()\n self.odl_notification_controller = ODLNotificationController(self)\n self.shared_collection_controller = SharedCollectionController(self)\n self.static_files = StaticFileController(self)\n self.rbdproxy = RBDFulfillmentProxyController(self)\n\n from api.lcp.controller import LCPController\n self.lcp_controller = LCPController(self)", "def setup_controller(cls, args, config):\n logging.debug(\"MOLNSController.setup_controller(config={0})\".format(config))\n # name\n if len(args) > 0:\n controller_name = args[0]\n else:\n print \"Usage: molns.py controller setup NAME\"\n return\n try:\n controller_obj = config.get_object(args[0], kind='Controller')\n except DatastoreException as e:\n # provider\n providers = config.list_objects(kind='Provider')\n if len(providers) == 0:\n print \"No providers configured, \" \\\n \"please configure one ('molns provider setup') before initializing controller.\"\n return\n print \"Select a provider:\"\n for n, p in enumerate(providers):\n print \"\\t[{0}] {1}\".format(n, p.name)\n provider_ndx = int(raw_input_default(\"Enter the number of provider:\", default='0'))\n provider_id = providers[provider_ndx].id\n provider_obj = config.get_object(name=providers[provider_ndx].name, kind='Provider')\n logging.debug(\"using provider {0}\".format(provider_obj))\n # create object\n try:\n controller_obj = config.create_object(ptype=provider_obj.type, name=controller_name, kind='Controller',\n provider_id=provider_id)\n except DatastoreException as e:\n print e\n return\n setup_object(controller_obj)\n config.save_object(controller_obj, kind='Controller')", "def lock_model():\n def decorate(cls):\n def decorator(fn):\n @wraps(fn)\n def inner(*args, **kwargs):\n # This is a class decorator and its targets are methods, so\n # the first argument will be ``self``.\n self = args[0]\n model = self.request.validated.get('model', None)\n method_name = '%s.%s' % (self.__class__.__name__, fn.__name__)\n\n # Lock the model before entering the method body.\n if model:\n model.lock.acquire()\n log.info('Model locked by %s' % method_name)\n\n try:\n result = fn(*args, **kwargs)\n except:\n # Release the lock if an exception occurs and propagate\n # the exception up the stack.\n if model:\n model.lock.release()\n log.exception('Model unlocked after view exception '\n 'by %s' % method_name)\n raise\n\n # Release the lock after the method completes.\n if model:\n model.lock.release()\n log.info('Model unlocked by %s' % method_name)\n\n return result\n return inner\n\n targets = ['get', 'put', 'post', 'delete']\n for method in [attr for attr in cls.__dict__\n if attr in targets and callable(getattr(cls, attr))]:\n setattr(cls, method, decorator(getattr(cls, method)))\n\n return cls\n return decorate", "def method(verb):\n\n def decorator(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n client = kwargs.pop('client', self.default_client)\n token = self.token(client)\n headers = kwargs.pop('headers', {})\n headers.update(self.bearer(token))\n meth = functools.partial(getattr(self, verb), headers=headers)\n return f(self, meth, *args, **kwargs)\n return wrapper\n return decorator", "def loadControllers(self):\n\n\t\tfor param_tuple, handler in self.dispatch_rules.items():\n\t\t\tcallable_key = self.__conventionalizeParams(param_tuple)\t\t\n\t\t\tcontroller = handler.im_class(self.options)\n\t\t\tself.callables[callable_key] = getattr(controller, handler.__name__)", "def secure_class(cls): # type: ignore\n return cls", "def setup_class(cls):\n cls.behaviour = MyScaffoldBehaviour(\"behaviour\", SkillContext())", "def decorator(func):\n\t\treturn push_aspect(name or func.__name__, func)", "def opaque_class(self, classobj):\n self.restrict_class(classobj, None)", "def route(cls, url, method='GET'):\n def route_decorator(func):\n item = (url, method, func)\n cls._docoratedRouteHandlers.append(item)\n return func\n return route_decorator", "def add_rest_routes(self, route, api=None, pos=0):\n def decorator(cls):\n # parent is the parent class of the relation\n cls_name = cls.__name__.lower()\n #print(cls_name)\n # default REST is the following pattern:\n # (r\"/post/(?P<param1>[^\\/]+)/?(?P<param2>[^\\/]+)?/?(?P<param3>[^\\/]+)?\", PostHandler),\n action=\"\"\n # if cls_name.endswith(\"handler\"):\n # action=action[:-7]\n # else:\n # action = cls_name\n # if route:\n action=route\n\n r=r\"/\"+action+r\"/(?P<param1>[^\\/]+)/?(?P<param2>[^\\/]+)?/?(?P<param3>[^\\/]+)?\"\n if api:\n # render the given api in the route URL\n r=r\"/\"+action+r\"/\"+str(api)+r\"/(?P<param1>[^\\/]+)/?(?P<param2>[^\\/]+)?/?(?P<param3>[^\\/]+)?\"\n \n #print(\"added the following routes: \" + r)\n handlers=getattr(self.__class__, \"handlers\", None)\n handlers.append((r,cls))\n \n # use the positioned handlers\n handlers_tmp=getattr(self.__class__, \"handlers_tmp\", None)\n handlers_tmp.append(((r,cls),pos))\n\n r=r\"/\"+action+r\"/*\"\n #print(\"added the following routes: \" + r)\n handlers.append((r,cls))\n handlers_tmp.append(((r,cls),pos))\n #print(\"handlers: \" + str(self.handlers))\n print(\"ROUTING: added RESTful routes for: \" + cls.__name__ + \" as /\" + action)\n #print(dir())\n return cls\n return decorator", "def register(self, wsgi_app):\n wsgi_app.add_url_rule(\n rule=self.path,\n view_func=self.controller,\n methods=self.methods)", "def _register_controller(controller):\n with _running_threads_lock:\n if _running_threads_stopping:\n raise threadprop.InterruptExceptionStop()\n name=controller.name\n if name in _running_threads:\n raise threadprop.DuplicateControllerThreadError(\"thread with name {} already exists\".format(name))\n if name not in _created_threads:\n raise threadprop.NoControllerThreadError(\"thread with name {} hasn't been created\".format(name))\n _running_threads[name]=controller\n del _created_threads[name]\n _running_threads_notifier.notify()", "def trace_cls(name, **kwargs):\n\n def decorator(cls):\n if profiler and 'profiler' in CONF:\n trace_decorator = profiler.trace_cls(name, kwargs)\n return trace_decorator(cls)\n return cls\n\n return decorator", "def test_uses_wraps(self):\n @self.actions(\"ctx_name\", [])\n def myview(request, some_id):\n \"\"\"docstring\"\"\"\n\n self.assertEqual(myview.func_name, \"myview\")\n self.assertEqual(myview.func_doc, \"docstring\")", "def expose(self, model, route='/api', access_control=None, resource_class=Resource, **kwargs):\n endpoint_path = route + '/' + inflection.pluralize(inflection.underscore(model.__name__))\n endpoint = endpoint_path\n resource = Resource(model=model, access_control=access_control)\n self._add_api_method(endpoint_path, resource.list_,\n methods=['GET'], endpoint=endpoint + '/list')\n self._add_api_method('%s/<id>' % endpoint_path, resource.get_,\n methods=['GET'], endpoint=endpoint + '/get')\n\n self._add_api_method(endpoint_path, resource.put_,\n methods=['PUT'], endpoint=endpoint + '/put')\n\n self._add_api_method('%s/<id>' % endpoint_path, resource.delete_,\n methods=['DELETE'], endpoint=endpoint + '/delete')\n\n self._add_api_method(endpoint_path, resource.post_,\n methods=['POST'], endpoint=endpoint + 'post')\n\n self._add_api_method('%s/<id>' % endpoint_path, resource.patch_,\n methods=['PATCH'], endpoint=endpoint + 'patch')", "def extend(class_to_extend):\n def decorator(func):\n if hasattr(class_to_extend, func.func_name):\n raise except_osv(_(\"Developper Error\"),\n _(\"You can extend the class %s with the method %s.\",\n \"Indeed this method already exist use the decorator 'replace' instead\"))\n setattr(class_to_extend, func.func_name, func)\n return class_to_extend\n return decorator", "def __call__(self, environ, start_response):\n request = Request(environ)\n response = Response()\n WSGIApplication.active_instance = self\n\n # Match the path against registered routes.\n kargs = self.mapper.match(request.path)\n if kargs is None:\n raise TypeError('No routes match. Provide a fallback to avoid this.')\n\n # Extract the module and controller names from the route.\n try:\n module_name, class_name = kargs['controller'].split(':', 1)\n except (KeyError, ValueError):\n module_name = kargs['controller']\n class_name = module_name\n del kargs['controller']\n module_name = _CONTROLLERS_MODULE_PREFIX + '.' + module_name\n\n # Initialize matched controller from given module.\n try:\n __import__(module_name)\n module = sys.modules[module_name]\n controller = getattr(module, class_name)()\n controller.initialize(request, response)\n except (ImportError, AttributeError):\n logging.exception('Could not import controller %s:%s',\n module_name, class_name)\n raise ImportError('Controller %s from module %s could not be initialized.'\n % (class_name, module_name))\n\n # Use the action set in the route, or the HTTP method.\n if 'action' in kargs:\n action = kargs['action']\n del kargs['action']\n else:\n action = environ['REQUEST_METHOD'].lower()\n if action not in [\n 'get', 'post', 'head', 'options', 'put', 'delete', 'trace']:\n action = None\n\n if controller and action:\n try:\n # Execute the requested action, passing the route dictionary as\n # named parameters.\n getattr(controller, action)(**kargs)\n except error.AccessDenied, acl_e:\n logging.exception(acl_e)\n response.set_status(404)\n except Exception, e:\n # We want to catch any exception thrown by the controller and\n # pass it on to the controller's own exception handler.\n controller.handle_exception(e, self.__debug)\n\n response.wsgi_write(start_response)\n return ['']\n else:\n response.set_status(404)", "def become_controller(self):\n return self._send_command('control')", "def class_method_name_scope(method):\n @wraps(method)\n def wrapper(self, *args, **kwargs):\n with tf.name_scope(self.__class__.__name__):\n return method(*args, **kwargs)\n\n wrapper.original_method = method\n return wrapper", "def jit_class(cls):\n from mindspore import nn\n # Check if cls is of type class.\n if not inspect.isclass(cls):\n raise TypeError(f'Decorator jit_class can only be used for class type, but got {cls}.')\n # Check if cls is nn.Cell.\n if issubclass(cls, nn.Cell):\n raise TypeError(f\"Decorator jit_class is used for user-defined classes and cannot be used for nn.Cell: {cls}.\")\n setattr(cls, '__ms_class__', True)\n return cls", "def decorator(func):\n\n pass", "def override(class_to_extend, prefix):\n def decorator(func):\n if not hasattr(class_to_extend, func.func_name):\n raise except_osv(_(\"Developper Error\"),\n _(\"You can replace the method %s of the class %s. \"\n \"Indeed this method doesn't exist\")%(func.func_name, class_to_extend))\n original_function_name = prefix + func.func_name\n if hasattr(class_to_extend, original_function_name):\n raise except_osv(_(\"Developper Error\"),\n _(\"The method %s already exist. \"\n \"Please change the prefix name\")%original_function_name)\n setattr(class_to_extend, original_function_name, getattr(class_to_extend, func.func_name))\n setattr(class_to_extend, func.func_name, func)\n return class_to_extend\n return decorator", "def decorator(obj):\n\t\tDRIVERS[name] = obj\n\t\treturn obj", "def __init__(self, view, model):\n self.view = view\n self.view.set_controller(self)\n self.model = model", "def predicate(cls: nodes.ClassDef) -> bool:\n if cls.name in CLASS_NAME_SKIPLIST:\n # class looks like an API model class, but it isn't.\n return False\n\n if not cls.name.endswith(\"API\") and \"schema\" not in cls.locals:\n # class does not look like an API model class.\n return False\n\n return True", "def raise_on_gen1_controller(\n func: Callable[..., Awaitable[_T]]\n ) -> Callable[..., Awaitable[_T]]:\n\n def decorator(\n inst: type[EndpointManager], *args: _P.args, **kwargs: _P.kwargs\n ) -> Awaitable[_T]:\n if inst.controller.hardware_version == \"1\":\n raise UnknownAPICallError(\n f\"Can't call {func.__name__} on a 1st generation controller\"\n )\n return func(inst, *args, **kwargs)\n\n return decorator", "def trace_cls(name, **kwargs):\n\n def decorator(cls):\n if profiler and hasattr(CONF, 'profiler') and CONF.profiler.enabled:\n trace_decorator = profiler.trace_cls(name, kwargs)\n return trace_decorator(cls)\n return cls\n\n return decorator", "def get_controller(equipment, accessmethod, logfile=None):\n path = _CONTROLLERMAP[accessmethod]\n constructor = module.get_object(path)\n return constructor(equipment, logfile)" ]
[ "0.65690434", "0.6126933", "0.5997419", "0.5680925", "0.5661249", "0.5657938", "0.5650889", "0.5638046", "0.55720645", "0.55620176", "0.5495032", "0.54726076", "0.547135", "0.54366666", "0.5421368", "0.54175097", "0.5387649", "0.5378564", "0.52877134", "0.5242758", "0.5221134", "0.5186185", "0.5183567", "0.5162877", "0.51590693", "0.5149883", "0.5136152", "0.5085383", "0.50821674", "0.5064457", "0.5062116", "0.5027159", "0.5022082", "0.5019816", "0.5014772", "0.5014772", "0.50080013", "0.50068796", "0.50010717", "0.49932215", "0.49910104", "0.49833566", "0.4971317", "0.49702176", "0.49378818", "0.49361604", "0.49191028", "0.4909078", "0.48898256", "0.48671907", "0.4859925", "0.48493305", "0.48332062", "0.48289433", "0.4816957", "0.48156723", "0.47999913", "0.47940773", "0.47849154", "0.4775657", "0.47724548", "0.47715944", "0.4770078", "0.47616273", "0.4755609", "0.47524813", "0.47524813", "0.47524813", "0.47366336", "0.4736012", "0.4731254", "0.47245285", "0.4723038", "0.4720703", "0.47143", "0.47101206", "0.4677001", "0.4668623", "0.46655482", "0.465169", "0.464579", "0.46451926", "0.46427378", "0.46402752", "0.46294478", "0.46259603", "0.46258092", "0.46246615", "0.46227953", "0.461126", "0.45987543", "0.45955008", "0.45881355", "0.45852304", "0.45814943", "0.4553036", "0.45446432", "0.45246035", "0.4521291", "0.45183784", "0.4518216" ]
0.0
-1
A decorator function to mark a Class to be automatically loaded by the Controller
def use(_): def wrapper(cls): __app_controllers__.append(cls) return cls return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register(cls, D: DONLOADER_CLASS) -> DONLOADER_CLASS:\r\n ...", "def identify_class(self, cls):", "def register(cls, class_to_register):\n cls.registered_loaders.append(class_to_register)\n return class_to_register", "def register(cls):\n register(cls, cls.provided_class)", "def setup_class(klass):", "def setup_class(klass):", "def setup_class(cls):", "def setup_class(cls):", "def auto_validator_hook():\n\n def decorator(cls):\n \"\"\"\n decorates the given class and registers an instance\n of it into available auto validator hooks.\n\n :param type cls: auto validator hook class.\n\n :returns: auto validator hook class.\n :rtype: type\n \"\"\"\n\n instance = cls()\n auto_validator_services.register_hook(instance)\n\n return cls\n\n return decorator", "def decorator(cls):\n\n instance = cls()\n auto_validator_services.register_hook(instance)\n\n return cls", "def _class(self, _class):\n\n self.__class = _class", "def _class(self, _class):\n\n self.__class = _class", "def register(cls, class_):\n cls._registered[class_.tag()] = class_", "def register_instance(cls):\n\n @functools.wraps(cls)\n def wrapper_decorator(*args, **kwargs):\n\n instance = cls(*args, **kwargs)\n\n Register[cls.__name__] = instance\n\n return instance\n\n return wrapper_decorator", "def before_request():\r\n\r\n\tinit_classes()", "def _loadClass(self, loader):\r\n raise NotImplementedError(\"The method 'loadClass' has to \"\r\n 'be implemented.')", "def after_class_creation(cls):\n pass", "def extension(klass):\n registry.register(klass)\n return klass", "def setup_class(cls):\n pass", "def setup_class(cls):\n pass", "def discoverable(_class):\n\n # Set the attribute to the class name, to prevent subclasses from also\n # being discoverable.\n setattr(_class, _get_discoverable_attribute(_class), True)\n return _class", "def register(self, cls, method=None):\n if isinstance(cls, (classmethod, staticmethod)):\n first_annotation = {}\n for k, v in cls.__func__.__annotations__.items():\n first_annotation[k] = v\n break\n cls.__annotations__ = first_annotation\n return self.dispatcher.register(cls, func=method)", "def register(self, cls, method=None):\n if isinstance(cls, (classmethod, staticmethod)):\n first_annotation = {}\n for k, v in cls.__func__.__annotations__.items():\n first_annotation[k] = v\n break\n cls.__annotations__ = first_annotation\n return self.dispatcher.register(cls, func=method)", "def setup_class(self):\n pass", "def setup_class(cls):\n cls.behaviour = MyScaffoldBehaviour(\"behaviour\", SkillContext())", "def __call__(cls, *args, **kwargs):\n if cls not in cls._instance:\n cls._instance[cls] = super(Metaclass, cls).__call__(*args, **kwargs)\n return cls._instance[cls]", "def register_for_auto_class(cls, auto_class=\"FlaxAutoModel\"):\n if not isinstance(auto_class, str):\n auto_class = auto_class.__name__\n\n import transformers.models.auto as auto_module\n\n if not hasattr(auto_module, auto_class):\n raise ValueError(f\"{auto_class} is not a valid auto class.\")\n\n cls._auto_class = auto_class", "def setup_class(cls):\n cls.handler = MyScaffoldHandler(\"handler\", SkillContext())", "def register_class(self, cls, *, name=None):\n cls_name = self.host.cache_class(cls, name)\n self.register_constant(cls, cls_name)", "def __init_on_load__(self):", "def annotations_class(cls):\n assert(isclass(cls))\n # To play it safe we avoid to modify the dict while iterating over it,\n # so we previously cache keys.\n # For this we don't use keys() because of Python 3.\n # Todo: Better use inspect.getmembers here\n keys = [key for key in cls.__dict__]\n for key in keys:\n memb = cls.__dict__[key]\n if _check_as_func(memb):\n annotations_func(memb)\n elif isclass(memb):\n annotations_class(memb)\n return cls", "def __init__(self, classx, method_name, decorator_func):\n self.method_name = method_name\n self.decorator_func = decorator_func\n self.classx = classx\n self.patched_by_me = False", "def addClassToPickleWhitelist(cls):\n unpickleWhitelist_.add(cls)", "def jit_class(cls):\n from mindspore import nn\n # Check if cls is of type class.\n if not inspect.isclass(cls):\n raise TypeError(f'Decorator jit_class can only be used for class type, but got {cls}.')\n # Check if cls is nn.Cell.\n if issubclass(cls, nn.Cell):\n raise TypeError(f\"Decorator jit_class is used for user-defined classes and cannot be used for nn.Cell: {cls}.\")\n setattr(cls, '__ms_class__', True)\n return cls", "def checktype(type):\n def decorator(klass):\n register_type(type, klass)\n return klass\n\n return decorator", "def class_based_view(class_obj):\n def _instantiate_view_class(request, *args, **kwargs):\n return class_obj()(request, *args, **kwargs)\n return _instantiate_view_class", "def on_register(cls):", "def register(self, klass):\n if klass not in self.extensions:\n self.extensions.append(klass)", "def __init__(self, cls):\n super().__init__()\n self._cls = cls", "def register_class(cls):\n if cls is RegisteredType:\n raise \"Please do _not_ register RegisteredType!\"\n \n cid = RegisteredType._reg[autoid]\n RegisteredType._reg['classes'][cls] = cid\n RegisteredType._reg['classids'][cid] = cls\n RegisteredType._reg['autoid'] += 1", "def extend_class(cls):\n return lambda f: (setattr(cls, f.__name__, f) or f)", "def secure_class(cls): # type: ignore\n return cls", "def register_keras_custom_object(cls):\n tf.keras.utils.get_custom_objects()[cls.__name__] = cls\n return cls", "def register_class(obj):\r\n try:\r\n KnownClass.objects.get(module_name=obj.__module__, class_name=obj.__class__.__name__)\r\n except DoesNotExist:\r\n # Create it\r\n KnownClass(module_name = obj.__module__, class_name = obj.__class__.__name__).save()", "def opaque_class(self, classobj):\n self.restrict_class(classobj, None)", "def load_later(klass, name, load_func, *a, **kw):\n m = klass(name)\n m._loaded = False\n m._loader = (load_func, a, kw)\n return m", "def addClassRef(clazz):\n\n global h_classes\n header = \"class %s;\" % clazz\n if not header in h_classes:\n h_classes.append(header)", "def define_route(self, route, **kwargs):\n\n def decorator(cls):\n if is_class(cls):\n resource = cls(**kwargs)\n else:\n resource = cls\n\n self.add_route(route, resource)\n\n return cls\n\n return decorator", "def method(cls):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n return func(*args, **kwargs)\n setattr(cls, func.__name__, wrapper)\n return func\n return decorator", "def __call__ (self, cls):\n # Define a wrapper function to capture the actual instantiation and __init__ params\n @wraps(cls)\n def wrapper_f(*args, **kwargs):\n #print(f'type of cls is {type(cls)}')\n peripheral = self.peripheral_type(**self.kwargs)\n o = cls(*args, **kwargs)\n o.message_debug(f\"Decorating class {cls.__name__} with {self.peripheral_type.__name__}\")\n o.attach_sensor(peripheral)\n return o\n return wrapper_f", "def delegated(cls):\n return cls", "def deferred(ref):\n module, _ = ref.split(\".\", 1)\n if module in sys.modules:\n return _getcls(ref)\n\n @meta\n def check(cls):\n full_cls_mod = getattr(cls, \"__module__\", None)\n cls_module = full_cls_mod.split(\".\", 1)[0] if full_cls_mod else None\n if cls_module == module:\n return issubclass(cls, _getcls(ref))\n else:\n return False\n\n return check", "def _decorate(cls):\n global_validators = [session_required, catch_typeerror]\n # Cheat methods _hosts_name_label\n # -------------\n # Methods that have a trivial implementation for all classes.\n # 1. get_by_uuid == getting by ref, so just return uuid for\n # all get_by_uuid() methods.\n \n for api_cls in classes.keys():\n # We'll let the autoplug classes implement these functions\n # themselves - its much cleaner to do it in the base class\n \n get_by_uuid = '%s_get_by_uuid' % api_cls\n get_uuid = '%s_get_uuid' % api_cls\n get_all_records = '%s_get_all_records' % api_cls \n\n def _get_by_uuid(_1, _2, ref):\n return xen_api_success(ref)\n\n def _get_uuid(_1, _2, ref):\n return xen_api_success(ref)\n\n def unpack(v):\n return v.get('Value')\n\n def _get_all_records(_api_cls):\n return lambda s, session: \\\n xen_api_success(dict([(ref, unpack(getattr(cls, '%s_get_record' % _api_cls)(s, session, ref)))\\\n for ref in unpack(getattr(cls, '%s_get_all' % _api_cls)(s, session))]))\n\n setattr(cls, get_by_uuid, _get_by_uuid)\n setattr(cls, get_uuid, _get_uuid)\n setattr(cls, get_all_records, _get_all_records(api_cls))\n\n # Autoplugging classes\n # --------------------\n # These have all of their methods grabbed out from the implementation\n # class, and wrapped up to be compatible with the Xen-API.\n\n# def getter(ref, type):\n# return XendAPIStore.get(ref, type)\n\n def wrap_method(name, new_f):\n try:\n f = getattr(cls, name)\n wrapped_f = (lambda * args: new_f(f, *args))\n wrapped_f.api = f.api\n wrapped_f.async = f.async\n setattr(cls, name, wrapped_f)\n except AttributeError:\n # Logged below (API call: %s not found)\n pass\n\n\n def setter_event_wrapper(api_cls, attr_name):\n setter_name = '%s_set_%s' % (api_cls, attr_name)\n wrap_method(\n setter_name,\n lambda setter, s, session, ref, *args:\n _setter_event_dispatch(s, setter, api_cls, attr_name,\n session, ref, args))\n\n\n def ctor_event_wrapper(api_cls):\n ctor_name = '%s_create' % api_cls\n wrap_method(\n ctor_name,\n lambda ctor, s, session, *args:\n _ctor_event_dispatch(s, ctor, api_cls, session, args))\n\n\n def dtor_event_wrapper(api_cls):\n dtor_name = '%s_destroy' % api_cls\n wrap_method(\n dtor_name,\n lambda dtor, s, session, ref, *args:\n _dtor_event_dispatch(s, dtor, api_cls, session, ref, args))\n\n\n # Wrapping validators around XMLRPC calls\n # ---------------------------------------\n for api_cls, validator in classes.items():\n def doit(n, takes_instance, async_support=False,\n return_type=None):\n n_ = n.replace('.', '_')\n try:\n f = getattr(cls, n_)\n if n not in argcounts:\n argcounts[n] = f.func_code.co_argcount - 1\n \n validators = takes_instance and validator and \\\n [validator] or []\n \n validators += global_validators\n for v in validators:\n f = v(f)\n f.api = n\n f.async = async_support\n if return_type:\n f.return_type = return_type\n \n setattr(cls, n_, f)\n except AttributeError:\n log.warn(\"API call: %s not found\" % n)\n\n \n ro_attrs = getattr(cls, '%s_attr_ro' % api_cls, []) \\\n + cls.Base_attr_ro\n rw_attrs = getattr(cls, '%s_attr_rw' % api_cls, []) \\\n + cls.Base_attr_rw\n methods = getattr(cls, '%s_methods' % api_cls, []) \\\n + cls.Base_methods\n funcs = getattr(cls, '%s_funcs' % api_cls, []) \\\n + cls.Base_funcs\n\n # wrap validators around readable class attributes\n for attr_name in ro_attrs + rw_attrs:\n doit('%s.get_%s' % (api_cls, attr_name), True,\n async_support=False)\n\n # wrap validators around writable class attrributes\n for attr_name in rw_attrs:\n doit('%s.set_%s' % (api_cls, attr_name), True,\n async_support=False)\n setter_event_wrapper(api_cls, attr_name)\n\n # wrap validators around methods\n for method_name, return_type in methods:\n doit('%s.%s' % (api_cls, method_name), True,\n async_support=True)\n\n # wrap validators around class functions\n for func_name, return_type in funcs:\n \n doit('%s.%s' % (api_cls, func_name), False,\n async_support=True,\n return_type=return_type)\n \n ctor_event_wrapper(api_cls)\n dtor_event_wrapper(api_cls)", "def __init__(self, loader, *args, **kw):\r\n self._loader = loader", "def extend(class_to_extend):\n def decorator(func):\n if hasattr(class_to_extend, func.func_name):\n raise except_osv(_(\"Developper Error\"),\n _(\"You can extend the class %s with the method %s.\",\n \"Indeed this method already exist use the decorator 'replace' instead\"))\n setattr(class_to_extend, func.func_name, func)\n return class_to_extend\n return decorator", "def __class__(self, ???):", "def test_loader(cls):\r\n return _test_loader_factory(cls)", "def loader(self):\n return self.loader_class()", "def trace_cls(name, **kwargs):\n\n def decorator(cls):\n if profiler and 'profiler' in CONF:\n trace_decorator = profiler.trace_cls(name, kwargs)\n return trace_decorator(cls)\n return cls\n\n return decorator", "def visit_class(self, flags, scope, token, parent):\r\n\r\n # define the class name in the current scope\r\n # see visit_block\r\n #scope.define(SC_FUNCTION, token.children[0])\r\n scope.defer(token)", "def setup_class(cls):\n if not cls._lib:\n cls._lib = setup_lib()", "def setup_class(cls):\n if not cls._lib:\n cls._lib = setup_lib()", "def ms_class(cls):\n\n logger.warning(\"'mindspore.ms_class' will be deprecated and removed in a future version. \"\n \"Please use 'mindspore.jit_class' instead.\")\n\n # Check if cls is of type class.\n if not inspect.isclass(cls):\n raise TypeError(f'Decorator ms_class can only be used for class type, but got {cls}.')\n # Check if cls is nn.Cell.\n if issubclass(cls, ms.nn.Cell):\n raise TypeError(f\"Decorator ms_class is used for user-defined classes and cannot be used for nn.Cell: {cls}.\")\n logger.info(f'Found ms_class: {cls}.')\n setattr(cls, '__ms_class__', True)\n return cls", "def register(name):\n def func(cls):\n \"\"\"\n See register\n \"\"\"\n REGISTRY[name] = cls()\n return cls\n return func", "def __init__(self, decorated):\n self.decorated = decorated", "def __init__(self, decorated):\n self.decorated = decorated", "def __init__(self, decorated):\n self.decorated = decorated", "def serializer(*args, **kwargs):\n\n def decorator(cls):\n \"\"\"\n decorates the given class and registers an instance\n of it into available serializers.\n\n :param type cls: serializer class.\n\n :returns: serializer class.\n :rtype: type\n \"\"\"\n\n instance = cls(*args, **kwargs)\n serializer_services.register_serializer(instance, **kwargs)\n\n return cls\n\n return decorator", "def __init__(self, loader, *args, **kw):\n self._loader = loader", "def process_class(self, parent, cls):\n if cls.typemap.flat_name in self.class_map:\n raise RuntimeError(\"process_class: class {} already exists in class_map\"\n .format(cls.typemap.flat_name))\n self.class_map[cls.typemap.flat_name] = cls\n for var in cls.variables:\n self.add_var_getter_setter(parent, cls, var)\n cls.functions = self.define_function_suffix(cls.functions)", "def register(dmm, typecls):\n def wraps(fn):\n dmm.register(typecls, fn)\n return fn\n\n return wraps", "def spidercls_for_request(spider_loader, request, default_spidercls: Optional[Any] = ..., log_none: bool = ..., log_multiple: bool = ...):\n ...", "def force_load(self):\n pass", "def decorator(cls):\n\n instance = cls(*args, **kwargs)\n caching_services.register_cache(instance, **kwargs)\n\n return cls", "def decorator(obj):\n\t\tDRIVERS[name] = obj\n\t\treturn obj", "def djcat_attr():\n def decorate(cls):\n for b in cls.__bases__:\n if getattr(b, '_is_djcat_attr', None) and getattr(b, 'attr_key', None):\n setattr(cls, '_attr_class', b)\n return cls\n return decorate", "def trace_cls(name, **kwargs):\n\n def decorator(cls):\n if profiler and hasattr(CONF, 'profiler') and CONF.profiler.enabled:\n trace_decorator = profiler.trace_cls(name, kwargs)\n return trace_decorator(cls)\n return cls\n\n return decorator", "def as_handler(cls, **initkwargs):\n @wraps(cls, updated=())\n def handler(asset, *args, **kwargs):\n return handler.handler_class(**initkwargs)(asset, *args, **kwargs)\n handler.handler_class = cls\n handler.supports_check_mode = cls.supports_check_mode\n return handler", "def on_load(self):\n self.__init__()", "def register_predictor(cls=None, *, name=None):\n\n def _register(cls):\n if name is None:\n local_name = cls.__name__\n else:\n local_name = name\n if local_name in _PREDICTORS:\n raise ValueError(f'Already registered models with name: {local_name}')\n _PREDICTORS[local_name] = cls\n return cls\n\n if cls is None:\n return _register\n else:\n return _register(cls)", "def decorator(cls):\n\n instance = cls(*args, **kwargs)\n serializer_services.register_serializer(instance, **kwargs)\n\n return cls", "def classAtributesLoader(objeto, cfg, lista_excessao=[], prefix=None):\n excessoes = []\n tot_carregados = 0\n for keyAbs in cfg:\n key = keyAbs if prefix is None else prefix + keyAbs\n if key in objeto.__dict__:\n tot_carregados += 1\n setattr(objeto, key, cfg[keyAbs])\n else:\n excessoes.append(key)\n\n if not excessoes:\n return True, 'Carga automatica de atributos Classe:{0} atributos:{1} ignorados:0'.format(str(objeto.__class__.__name__), tot_carregados)\n\n nova = list(set(excessoes) - set(lista_excessao))\n if not nova:\n return True, 'Carga automatica de atributos Classe:{0} atributos:{1} ignorados:{2}'.format(str(objeto.__class__.__name__), tot_carregados, len(lista_excessao))\n\n return False, 'Campo(s):{0} nao existe(m) na classe:{1}'.format(str(nova), objeto.__class__.__name__)", "def can_load_page(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n expect_loading = False\n if 'expect_loading' in kwargs:\n expect_loading = kwargs['expect_loading']\n del kwargs['expect_loading']\n if expect_loading:\n self._loaded = False\n result = func(self, *args, **kwargs)\n self.wait_for_page_loaded()\n return result\n return func(self, *args, **kwargs)\n\n return wrapper", "def __init__(self):\n if Classifier.__instance is not None:\n LOGGER.logger.exception(\"This class is a singleton!\")\n else:\n self.model = False\n self.load_model()", "def lassh():", "def visit_ClassDef(self, node):\n self.classes[node.name] = self._generate_pytest_decorators(node.decorator_list)\n self.generic_visit(node)", "def classmethod(self, encoding):\n # Add encodings for hidden self and cmd arguments.\n encoding = ensure_bytes(encoding)\n typecodes = parse_type_encoding(encoding)\n typecodes.insert(1, b'@:')\n encoding = b''.join(typecodes)\n\n def decorator(f):\n def objc_class_method(objc_cls, objc_cmd, *args):\n py_cls = ObjCClass(objc_cls)\n py_cls.objc_cmd = objc_cmd\n args = convert_method_arguments(encoding, args)\n result = f(py_cls, *args)\n if isinstance(result, ObjCClass):\n result = result.ptr.value\n elif isinstance(result, ObjCInstance):\n result = result.ptr.value\n return result\n name = f.__name__.replace('_', ':')\n self.add_class_method(objc_class_method, name, encoding)\n return objc_class_method\n return decorator", "def setup_class(cls):\n # ns.assert_true(False, \"setup_class run\")\n print('setup_class\\n')", "def register_app_class(self, cls):\n assert isinstance(cls, type) and issubclass(cls, Model)\n name = cls.__name__\n if not valid_app_name(name):\n raise ValueError('Given app does not have a valid name %r' % name)\n pending, connected = [], []\n if name in self._proxies and cls is not self._proxies[name][0]:\n oldCls, pending, connected = self._proxies[name]\n logger.warn('Re-registering app class %r' % name)\n #raise ValueError('App with name %r already registered' % name)\n self._proxies[name] = cls, pending, connected", "def register(name: str, as_key: bool = False):\n\n # registering magic\n # See https://realpython.com/primer-on-python-decorators/#decorators-with-arguments\n def _register(type_class: type):\n if as_key:\n assert name not in registered_keys, 'key \"' + str(name) + '\" was already registered'\n registered_keys[name] = type_class\n else:\n assert name not in registered_types, 'type \"' + str(name) + '\" was already registered'\n registered_types[name] = type_class\n return type_class\n\n return _register", "def not_discoverable(_class):\n\n # Set the attribute to the class name, to prevent subclasses from also\n # being not discoverable.\n setattr(_class, _get_discoverable_attribute(_class), False)\n return _class", "def add_class(self, cls):\n self.commands.append(cls)", "def ifc_fallback_class(cls):\n\n if \"*\" in classes:\n raise ImportError(\"Already registered {oc} as fallback, cannot register {nc}\".format(\n oc=classes[\"*\"].__name__,\n nc=cls.__name__))\n classes[\"*\"] = cls\n return cls", "def predicate(cls: nodes.ClassDef) -> bool:\n if cls.name in CLASS_NAME_SKIPLIST:\n # class looks like an API model class, but it isn't.\n return False\n\n if not cls.name.endswith(\"API\") and \"schema\" not in cls.locals:\n # class does not look like an API model class.\n return False\n\n return True", "def getClassifier(self):\n return self.classify", "def augment(s, request):\n request.__class__ = s\n return None", "def __init__(self):\n self.classes = {}", "def _pre_mcs_init(cls):\n # technically you could also put a @classmethod with the same name on\n # the Model class, if you prefer that approach", "def _metaclass(mcs):\n def decorator(cls):\n body = vars(cls).copy()\n body.pop('__dict__', None)\n body.pop('__weakref__', None)\n return mcs(cls.__name__, cls.__bases__, body)\n return decorator", "def __subclasshook__(self, ???):" ]
[ "0.6117717", "0.6076721", "0.6028413", "0.59642714", "0.5934709", "0.5934709", "0.5899243", "0.5899243", "0.58098847", "0.5786942", "0.57629675", "0.57629675", "0.573222", "0.56865835", "0.5675948", "0.56697935", "0.56556857", "0.5631191", "0.56014067", "0.56014067", "0.55763286", "0.55471903", "0.55471903", "0.5546332", "0.5523876", "0.55198777", "0.5516845", "0.5511459", "0.5501034", "0.5486917", "0.54709965", "0.54157823", "0.5401205", "0.5397113", "0.537142", "0.5364816", "0.5349416", "0.5348731", "0.5332487", "0.5331967", "0.5316742", "0.5301432", "0.529573", "0.52887887", "0.52875507", "0.52859235", "0.52523786", "0.52341586", "0.52267176", "0.51957995", "0.51923865", "0.5186844", "0.51779145", "0.5167192", "0.5164907", "0.51557195", "0.51517415", "0.5138351", "0.5130247", "0.512799", "0.512616", "0.512616", "0.5112343", "0.50960577", "0.508595", "0.508595", "0.508595", "0.5084199", "0.5079011", "0.5074851", "0.5065514", "0.50555474", "0.5045305", "0.5043839", "0.5043634", "0.50425667", "0.502644", "0.50165004", "0.5012911", "0.5005606", "0.50015336", "0.49986234", "0.4997622", "0.49950498", "0.49871412", "0.49820235", "0.49780264", "0.49699098", "0.496975", "0.49656275", "0.49627128", "0.49565402", "0.49314672", "0.49291787", "0.49286056", "0.4924413", "0.49242014", "0.49200195", "0.4918895", "0.4918224" ]
0.5904735
6
Private utility to parse the router controller property and extract the correct functions handlers
def __parse_controller_router(cls): router = getattr(cls, Controller.RC_KEY) dependencies = None if hasattr(cls, "dependencies"): dependencies = deepcopy(cls.dependencies) delattr(cls, "dependencies") for route in router.routes: # add class dependencies if dependencies: for depends in dependencies[::-1]: route.dependencies.insert(0, depends) # get the signature of the endpoint function signature = inspect.signature(route.endpoint) # get the parameters of the endpoint function signature_parameters = list(signature.parameters.values()) # replace the class instance with the itself FastApi Dependecy signature_parameters[0] = signature_parameters[0].replace( default=Depends(cls) ) # set self and after it the keyword args new_parameters = [signature_parameters[0]] + [ parameter.replace(kind=inspect.Parameter.KEYWORD_ONLY) for parameter in signature_parameters[1:] ] new_signature = signature.replace(parameters=new_parameters) setattr(route.endpoint, Controller.SIGNATURE_KEY, new_signature) return router
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_handlers():\n handlers = list()\n\n #login\n handlers.append((r'/login', Login))\n handlers.append((r'/logout', Logout))\n\n # main\n handlers.append((r'/', Index))\n\n\n #user\n handlers.extend(get_routes(UserController))\n\n #role\n handlers.extend(get_routes(RoleController))\n\n\n handlers.extend(get_routes(ApiServiceController))\n\n handlers.extend(get_routes(InventarioController))\n\n return handlers", "def parse_handler_template(request, handler, args, kwargs):\n args = request.path[1:].split('/')\n\n dict_path = {}\n for index, value in enumerate(args):\n if (len(value) == 0):\n continue\n dict_path[index] = value\n\n module = dict_path.get(0, spy_setting.DEFAULT_MODULE)\n controller = dict_path.get(1, spy_setting.DEFAULT_CONTROLLER)\n action = dict_path.get(2, spy_setting.DEFAULT_ACTION)\n action = (action[0]).upper() + action[1:]\n\n kwargs = {'module': module, 'controller': controller, 'action': action}\n # logging.info(kwargs)\n # logging.info(args)\n counter = 3\n while True:\n key = dict_path.get(counter, None)\n val = dict_path.get(counter+1, None)\n if key is not None:\n request.GET.add(key, val)\n counter += 2\n else:\n break\n\n def sub(match):\n return kwargs.get(match.group().strip('{}'))\n\n return re.sub('{.*?}', sub, handler), args, kwargs", "def mvcObj(self, router):\n pass", "def _get_controller_parameters(self):\n pass", "def get_controller_func(controller):\n\n if controller in CONTROLLERS:\n return CONTROLLERS[controller]\n\n return None", "def _get_event_handler(self, handler):\n assert handler\n\n obj = self\n for attr in handler.split(\".\"):\n obj = getattr(obj, attr)\n return obj", "def _get_event_handler(self, handler):\n assert handler\n\n obj = self\n for attr in handler.split(\".\"):\n obj = getattr(obj, attr)\n return obj", "def get_routers(self):", "def mvcRouter(self, router):\n pass", "def loadControllers(self):\n\n\t\tfor param_tuple, handler in self.dispatch_rules.items():\n\t\t\tcallable_key = self.__conventionalizeParams(param_tuple)\t\t\n\t\t\tcontroller = handler.im_class(self.options)\n\t\t\tself.callables[callable_key] = getattr(controller, handler.__name__)", "def route( request, c ):", "def routes_info():\n routes = []\n for rule in app.url_map.iter_rules():\n try:\n if rule.endpoint != 'static':\n if hasattr(app.view_functions[rule.endpoint], 'import_name'):\n import_name = app.view_functions[rule.endpoint].import_name\n obj = import_string(import_name)\n routes.append({rule.rule: \"%s\\n%s\" % (\",\".join(list(rule.methods)), obj.__doc__)})\n else:\n routes.append({rule.rule: app.view_functions[rule.endpoint].__doc__})\n except Exception as exc:\n routes.append({rule.rule: \n \"(%s) INVALID ROUTE DEFINITION!!!\" % rule.endpoint})\n route_info = \"%s => %s\" % (rule.rule, rule.endpoint)\n app.logger.error(\"Invalid route: %s\" % route_info, exc_info=True)\n # func_list[rule.rule] = obj.__doc__\n\n return jsonify(code=200, data=routes)", "def test_read_namespaced_route(self):\n pass", "def get_route_param_names(endpoint):\n try:\n g = current_app.url_map.iter_rules(endpoint)\n return next(g).arguments\n except KeyError:\n return {}", "def mvcT(self, router, provider):\n pass", "def route_methods(self, route):\n return [route.method]", "def get_routes():\n return sum([load_module(m).ROUTES for m in settings.INSTALLED_HANDLERS], []) + ROUTES", "def handler(req):\n name = gethandlername(req.uri)\n if name == \"dispatcher\":\n raise404(\"Can't display the dispatcher\")\n handlerfunc = gethandlerfunc(name)\n return handlerfunc(req)", "def route(self, method, pattern, handler):\n pass", "def connect(controller, path_prefix, routes):\n # register the routes with the mapper, while keeping track of which\n # methods are defined for each URL\n urls = {}\n for r in routes:\n url = path_prefix + r['url']\n methods = r['method']\n if isinstance(methods, six.string_types):\n methods = [methods]\n methods_str = ','.join(methods)\n mapper.connect(r['name'], url, controller=controller,\n action=r['action'],\n conditions={'method': methods_str})\n if url not in urls:\n urls[url] = methods\n else:\n urls[url] += methods\n #print (urls)\n\n # now register the missing methods to return 405s, and register\n # a handler for OPTIONS that returns the list of allowed methods\n for url, methods in urls.items():\n all_methods = ['HEAD', 'GET', 'POST', 'PUT', 'PATCH', 'DELETE']\n missing_methods = [m for m in all_methods if m not in methods]\n allowed_methods_str = ','.join(methods)\n mapper.connect(url,\n controller=default_resource,\n action='reject',\n allowed_methods=allowed_methods_str,\n conditions={'method': missing_methods})\n #print('reject %(url)s , %(missing)s' % {'url':url, 'missing':missing_methods})\n if 'OPTIONS' not in methods:\n mapper.connect(url,\n controller=default_resource,\n action='options',\n allowed_methods=allowed_methods_str,\n conditions={'method': 'OPTIONS'})", "def get_route(function):\n return '/%s%s' % (app.config['PUBLIC_API_PREFIX'], app.config['PUBLIC_API_ROUTES'][function])", "def _get_controller(self):\n return self.__controller", "def test_read_namespaced_route_status(self):\n pass", "def __call__(self, req):\n return self._router", "def route_methods(self, route: web.Route):\n return [route.method]", "def __init__(self, settings={}, prefix=\"\"):\n self.settings = settings\n self.mapper = routes.Mapper()\n self.setup_handlers(self.mapper)", "def __initHandlersUser(self):\n handlers = {}\n handlers['WRITE_FILE'] = self.write_file\n handlers['READU_FILE'] = self.read_file\n handlers['DELET_FILE'] = self.delete_file\n handlers['STATUS_SRV'] = self.status_server\n handlers['RSYNC_FILE'] = self.rsync_file\n handlers['WSYNC_FILE'] = self.wsync_file\n return handlers", "def pathfor( request, *args, **kwargs ):", "def prepare(self):\n self.uri = self.request.uri\n self.path = self.request.uri.split('?')[0]\n self.method = self.path.split('/')[-1]\n self.default_methods = {}\n #\n # You can use the before_handler in a local controller to\n # process your own prepare stuff.\n # a common use case is to call: self.print_debug_info().\n # which then applies only to this specific handler.\n # \n before_handler = getattr(self, \"before_handler\", None)\n print(\"calling before_handler for \" + str(self.__class__))\n if callable(before_handler):\n before_handler()", "def route_info(self):\n return self._request.match_info", "def test_assemble_endpoint_data(self):\n urlparser = UrlParser()\n pattern = self.url_patterns[0]\n\n data = urlparser.__assemble_endpoint_data__(pattern)\n\n self.assertEqual(data['path'], '/a-view/')\n self.assertEqual(data['callback'], MockApiView)\n self.assertEqual(data['pattern'], pattern)", "def getRoutes(self):\n pass", "def route(self):\n pass", "def getRoutes(request):\n routes = {\n 'Item list': '/api/v1/items/',\n 'Item details': '/api/v1/item/<int:pk>/',\n\n 'JWT': '/api/v1/users/login/',\n }\n\n return Response(routes)", "def get_controller_parameters(self) -> PhyPropMapping:\n\n return {var: getattr(self, var) for var in self._controller_params}", "def dispatch(environ, start_response):\n url_path = environ['PATH_INFO']\n print environ['PATH_INFO']\n if(url_path == '/alarms'):\n content = app.alarms(environ, start_response)\n\treturn content\n if(url_path == '/enodes'):\n content = app.enodeb(environ, start_response)\n return content\n if(url_path == '/perf'):\n content = app.perf(environ, start_response)\n return content\n if(url_path == '/hoa_son'):\n content = app.hoa_son(environ, start_response)\n return content\n if(url_path == '/hoa_w_son'):\n content = app.hoa_w_son(environ, start_response)\n return content\n if(url_path == '/anrs'):\n content = app.ANR(environ, start_response)\n return content\n if(url_path == '/post'):\n content = app.post(environ, start_response)\n return content\n else:\n\tcontent = app.application2(environ,start_response)\n\treturn content", "def handler_mappings(self):\n return {}", "def uri_dispatch(uri):\n\n return uri_dispatch_map[os.path.splitext(uri)[1]]", "def lambda_handler(event, context):\n\n retval = {}\n\n # retrieve event information (i.e. station name and direction)\n station = get_origin_name(event)\n destination = get_destination(event)\n query_direction = get_direction(event).title()\n\n # finds abbreviation for origin and dest station\n query_orig = get_station_abbr(station)\n if destination:\n query_dest = get_station_abbr(destination)\n return dest_route(query_orig, query_dest, station, destination)\n\n else:\n return direction_route(query_orig, query_direction, station)", "def get_view_by_introspector(request, route):\n introspector = request.registry.introspector\n route_intr = introspector.get('routes', route.name)\n\n related_intr = introspector.related(route_intr)\n if related_intr is None:\n return None\n\n for related in related_intr:\n print \"related\", related\n if related.category_name == 'views':\n view_func = related['callable']\n if isinstance(view_func, static_view):\n # Lets skip over static views\n continue\n if related['attr']:\n view_action = \".\".join([view_func.__module__, view_func.__name__, related['attr']])\n else:\n view_action = \".\".join([view_func.__module__, view_func.__name__])\n return view_action", "def view_function(*args, **kwargs):\n\n res = {}\n status = 200\n\n try:\n from apis import apis\n url_rule = request.url_rule.rule\n apis_keys = [a[1:] for a in apis.keys()]\n url_rule_splitted = [a for a in url_rule.split(\"/\") if a in apis_keys]\n blueprint = url_rule_splitted[-1]\n blueprint = \"/\" + blueprint\n\n controller_function = apis[blueprint].functions[url_rule]\n res, status = controller_function(args, kwargs, request=request)\n\n except Exception as exc:\n # TODO: log error\n print(exc)\n\n res['error'] = True\n status = 400\n\n return res, status", "def serve(self, event: Dict) -> Union[MSG_RETURN, None]:\n raw_msg = event['content']['body']\n for k in self.routes.keys():\n m = re.search(k, raw_msg, re.IGNORECASE)\n\n if m:\n\n matches = m.groupdict()\n route = matches.get('route')\n msg = matches.get('msg')\n\n func = self.routes.get(k)\n\n if func:\n\n logger.info(\n (\n 'matched route %s '\n 'with msg %s '\n 'from %s '\n 'and triggered \"%s\"'\n ),\n route, msg, raw_msg, func.__name__\n )\n\n return func(route, msg, event)\n\n return None", "def __create_routes__(self):\n self.app.add_url_rule('/', 'main_page', self.main_page)\n self.app.add_url_rule('/day', 'day', self.get_current_iteration, methods=['GET'])\n self.app.add_url_rule('/traders', 'traders', self.register_trader, methods=['POST'])\n self.app.add_url_rule('/traders/<id>', 'trader', self.get_trader_state, methods=['GET'])\n self.app.add_url_rule('/stock/price', 'price', self.get_stock_price, methods=['GET'])\n self.app.add_url_rule('/stock/history', 'history', self.get_stock_price_history, methods=['GET'])\n self.app.add_url_rule('/stock/buy', 'buy', self.place_buy_order, methods=['POST'])\n self.app.add_url_rule('/stock/sell', 'sell', self.place_sell_order, methods=['POST'])\n self.app.add_url_rule('/simulation/step', 'step', self.market_session_step, methods=['POST'])\n self.app.add_url_rule('/simulation/run', 'run', self.run_simulation, methods=['POST'])", "def get_view(self, request, *args, **kwargs):\n\n if request.method.lower() in self.http_method_names:\n handler = getattr(self, request.method.lower(), \n self.http_method_not_allowed)\n else:\n handler = self.http_method_not_allowed\n\n return handler", "def api_index():\n func_list = {}\n for rule in app.url_map.iter_rules():\n if rule.endpoint != 'static':\n func_list[rule.rule] = app.view_functions[rule.endpoint].__doc__\n return jsonify(func_list)", "def controller(code):\n\n def register_controller(func):\n CONTROLLERS[code] = func\n return func\n\n return register_controller", "def __init__(self, recipes, decode_param_from=None, custom_handlers=None):\n\n if not recipes or not isinstance(recipes, list):\n logger.error('Unsupported _functions type! Something went wrong!')\n\n # Get required functions\n self.functions = [] # {func: func_obj, func_params: (params), fields=[]}\n\n for _func in recipes:\n # Check the syntax of provided function\n\n # Case: handler_name\n if match(r'(^[a-zA-Z0-9_-]{3,20}$)', _func, IGNORECASE):\n pass\n\n # Case: handler_name<param>XyZ<field>AbC\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<param>.{1,512}<field>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<param>XyZ<field>AbC<rfield>YzX\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<param>.{1,512}<field>.{1,512}<rfield>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<param>XyZ\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<param>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<field>AbC\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<field>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<param>AbC<rfield>XXX\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<param>.{1,512}<rfield>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<field>AbC<rfield>XXX\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<field>.{1,512}<rfield>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<rfield>ABCD\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<rfield>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: Syntax error\n else:\n logger.error('Syntax Error. Function: %s' % _func)\n logger.error(\n 'The example syntax of registry handler function shuld be: \\n\"-rh function_name<param>param1<param>param2<field>field_name_to_process<rfield>output_field_name\" (<param>,<field> and <rfield> are optional and depends on given function)\\nUse -prh for more details')\n exit(ERR_PROVIDER_INCORRECT_FUNCTION_SYNTAX)\n\n\n _func_name = ''\n _func_params = None\n _func_fields = None\n _func_output_fields = None\n\n # Get function, parameter(s) and fields (if specified)\n # Get _func_name\n _func_name, separator, _ = _func.partition('<')\n _func_name = _func_name.lower()\n\n if '<rfield>' in _func:\n _func, _, _func_output_fields = _func.partition('<rfield>')\n _func_output_fields = _func_output_fields.split(';')\n map(str.strip, _func_output_fields)\n\n if '<field>' in _func:\n _func, _, _func_fields = _func.partition('<field>')\n _func_fields = _func_fields.split(';')\n map(str.strip, _func_fields)\n\n if '<param>' in _func:\n _func, _, _func_params = _func.partition('<param>')\n _func_params = _func_params.split(';')\n map(str.strip, _func_params)\n\n if decode_param_from:\n if decode_param_from.lower() == 'base64':\n _func_params = list(map(base64.b64decode, _func_params))\n _func_params = list(map(bytes.decode, _func_params))\n else:\n logger.error('Unable to create a registry handler: \"%s\"\\n'\n 'Function: \"%s\"\\n'\n 'Unsupported param encoding: \"%s\"' %\n (_func_name, _func, decode_param_from))\n return None\n\n _func_params = tuple(_func_params)\n\n try:\n if not custom_handlers:\n func_class = getattr(handlers, _func_name)\n else:\n try:\n func_class = getattr(handlers, _func_name)\n except AttributeError:\n func_class = getattr(custom_handlers, _func_name)\n\n func_obj = getattr(func_class, _func_name)\n\n # if _func_output_fields is None:\n # _func_output_fields = _func_fields\n # pass\n\n self.functions.append({'func': func_obj, 'func_params': _func_params, 'func_fields': _func_fields,\n 'result_fields': _func_output_fields})\n\n except Exception as msg:\n logger.warning('Unable to get function object for: %s. Error: %s' % (_func_name, msg))\n logger.error('Unsupported Registry Handler: \"%s\"' % _func_name)\n\n self.default_fields = [registry_provider.registry_value.attributes.value_content]", "def interface(request):\n return request.param", "def prepareController(self):\n pass", "def test_lti20_rest_good_dispatch(self):\r\n for ginput, expected in self.GOOD_DISPATCH_INPUTS:\r\n self.assertEquals(self.xmodule.parse_lti_2_0_handler_suffix(ginput), expected)", "def re_path(route, view, **kwargs):\n return view, route, kwargs", "def _get_view_and_args(path, request):\n # Let's use urlconf from request object, if available:\n urlconf = getattr(request, \"urlconf\", settings.ROOT_URLCONF)\n resolver = RegexURLResolver(r\"^/\", urlconf)\n return resolver.resolve(path)", "def get_handler(self, name):\n return self.params[name].value_handler", "def urlpath( request, *args, **kwargs ):", "def parse_request(request):\n\n method, path, version = request.split(\"\\r\\n\")[0].split(\" \")\n if method != \"GET\":\n raise NotImplementedError\n return path", "def GoToControllerProperties(self):\n\t\t#print \"Searching for Controller Properites option ...\"\n\t\tkey_sent = False\n\t\tkPress = 0\n\t\tpattern = re.compile('Press <Ctrl><R> to Run Configuration Utility')\n\t\t#tsp,self.timeout = 0,5*60\n\t\tflag = self.Press_Ctrl_R(8*60)\n\t\tif not flag:\n\t\t\tself.reset()\n\t\t\tself.serial.reboot()\n\t\t\tflag = self.Press_Ctrl_R(8*60)\n\t\t\tif not flag:\n\t\t\t\treturn False\n\n\t\tself.serial.update()\n\t\tarea = self.serial.buffer.dump()\n\t\treturn area", "def _handlers(self) -> tuple:\n return self._classname2handlers[self.deco_class.__name__]", "def any_string_method(request):\n return request.param", "def _route_get(self):\n if self.path == '/status':\n self._create_status()\n else:\n self._create_method_not_allowed()", "def routes(self, body):\n pass", "def _get_route_map(self):\n return self.__route_map", "def extras_router(request, query):\n for pattern, func, req in patterns:\n match = pattern.match(query)\n if match and req:\n return func(request, **match.groupdict())\n elif match:\n return func(**match.groupdict())\n\n # Returns an Unimplemented response if no pattern matches\n return json_response(status=\"Unimplemented\", \n status_code=501, \n error=\"\", \n content=\"query: %s\" % query)", "def extras_router(request, query):\n for pattern, func, req in patterns:\n match = pattern.match(query)\n if match and req:\n return func(request, **match.groupdict())\n elif match:\n return func(**match.groupdict())\n\n # Returns an Unimplemented response if no pattern matches\n return json_response(status=\"Unimplemented\", \n status_code=501, \n error=\"\", \n content=\"query: %s\" % query)", "def yieldroutes(func):\r\n import inspect # Expensive module. Only import if necessary.\r\n path = '/' + func.__name__.replace('__','/').lstrip('/')\r\n spec = inspect.getargspec(func)\r\n argc = len(spec[0]) - len(spec[3] or [])\r\n path += ('/:%s' * argc) % tuple(spec[0][:argc])\r\n yield path\r\n for arg in spec[0][argc:]:\r\n path += '/:%s' % arg\r\n yield path", "def router(paramstring):\r\n # Parse a URL-encoded paramstring to the dictionary of\r\n # {<parameter>: <value>} elements\r\n params = dict(parse_qsl(paramstring))\r\n # Check the parameters passed to the plugin\r\n logger.info('Handling route params -- {}'.format(params))\r\n if params:\r\n title = params.get('title')\r\n uri = params.get('uri', None)\r\n action = params['action']\r\n country_code = params.get('country_code', None)\r\n global _country_code\r\n if country_code:\r\n _country_code = country_code\r\n\r\n if action == 'programs':\r\n list_programs(title, uri)\r\n\r\n elif action == 'program_details':\r\n list_program_details(title, uri)\r\n\r\n elif action == 'episodes':\r\n list_episodes(title, uri)\r\n\r\n elif action == 'seasons':\r\n list_seasons(title, uri)\r\n\r\n elif action == 'play':\r\n # Play a video from a provided URL.\r\n play_video(uri)\r\n\r\n elif action == 'search':\r\n list_search()\r\n\r\n elif action == 'channels':\r\n list_channels(title, uri)\r\n\r\n else:\r\n # If the provided paramstring does not contain a supported action\r\n # we raise an exception. This helps to catch coding errors,\r\n # e.g. typos in action names.\r\n raise ValueError('Invalid paramstring: {0}!'.format(paramstring))\r\n\r\n else:\r\n # List all the channels at the base level.\r\n list_channels()", "def get_controllers():\n return pyglet.input.get_controllers()", "def _find_controller(self, controller):\n if controller is None:\n return None\n # If the output specified is a string controller e.g. \"WelcomeController@show\"\n elif isinstance(controller, str):\n if \"@\" in controller:\n controller_path, controller_method_str = controller.split(\"@\")\n else:\n controller_path = controller\n controller_method_str = \"__call__\"\n\n controller_path = modularize(controller_path).split(\".\")\n if len(controller_path) > 1:\n controller_name = controller_path.pop()\n prefix_path = \".\".join(controller_path)\n else:\n controller_name = controller_path[0]\n prefix_path = \"\"\n # build a list of all locations where the controller can be found\n # if the controller is defined such as auth.WelcomeController, append the prefix path to\n # the locations\n locations = list(\n map(\n lambda loc: f\"{loc}.{removeprefix(prefix_path, loc)}\"\n if prefix_path\n else loc,\n self.controllers_locations,\n )\n )\n try:\n self.controller_class = Loader.find(\n Controller, locations, controller_name, raise_exception=True\n )\n except LoaderNotFound as e:\n self.e = e\n print(f\"\\033[93mTrouble importing controller!\\n> {str(e)}\\033[0m\")\n # controller is an instance with a bound method\n elif hasattr(controller, \"__self__\"):\n _, controller_method_str = controller.__qualname__.split(\".\")\n self.controller_instance = controller.__self__\n\n # it's a class or class.method, we don't have to find it, just get the class\n elif hasattr(controller, \"__qualname__\"):\n if \".\" in controller.__qualname__:\n controller_name, controller_method_str = controller.__qualname__.split(\n \".\"\n )\n else:\n controller_name = controller.__qualname__\n controller_method_str = \"__call__\"\n\n try:\n self.controller_class = Loader.get_object(\n controller.__module__, controller_name, raise_exception=True\n )\n except LoaderNotFound as e:\n self.e = e\n print(f\"\\033[93mTrouble importing controller!\\n> {str(e)}\\033[0m\")\n # it's a controller instance\n else:\n self.controller_instance = controller\n controller_method_str = \"__call__\"\n\n # Set the controller method on class. This is a string\n self.controller_method = controller_method_str", "def _dispatch(self, request):\n endpoint, values, context, param = self.read_request(request)\n if endpoint == 'init':\n parse_config(param.get('config', {}))\n return self.init_info()\n else:\n if endpoint == 'provider':\n return self.call_provider(context, values['name'],\n values['action'], param)\n elif endpoint == 'handler':\n return self.call_handler(context, values['name'], param)\n else:\n return self.call_func(context, endpoint, values['name'], param)", "def _handle_path(path: str) -> Callable:\n parts = Path(path).parts\n\n result = _cogs\n for part in parts:\n result = result[part]\n\n return result", "def u2handlers(self):\n return []", "def list_routes():\n for rule in sorted(application.url_map.iter_rules(), key=lambda r: r.rule):\n print(\"{:10} {}\".format(\", \".join(rule.methods - set(['OPTIONS', 'HEAD'])), rule.rule))", "def path_info_get(request, name, default_value=_NOT_SET, validator=None):\n return _XXX_get(request.match_info, name, default_value, validator)", "def _get_app_endpoints():\n endpoints = {\n (r'/', handlers.HeartbeatRequestHandler),\n (r'/1/issue/retrieve', handlers.RetrieveRequestHandler),\n (r'/1/issue/search', handlers.SearchRequestHandler),\n (r'/1/issue/search/setup', handlers.SearchSetupRequestHandler),\n }\n\n log(\"Endpoint to handler mappings:\")\n for url, handler in sorted(endpoints, key=lambda ep: ep[0]):\n log(\"{0} ---> {1}\".format(url, handler))\n\n return endpoints", "def _get_handler(self, transfer):\n if transfer.direction == 'pushToVoSpace':\n method = \"push_to_vospace\"\n elif transfer.direction == 'pullToVoSpace':\n method = \"pull_to_vospace\"\n elif transfer.direction == 'pullFromVoSpace':\n method = \"pull_from_vospace\"\n elif transfer.direction == 'pushFromVoSpace':\n method = \"push_from_vospace\"\n elif not(transfer.keepBytes):\n method = \"move_node\"\n elif transfer.keepBytes:\n method = \"copy_node\"\n else:\n raise VOSpaceError(500, \"One of the specified parameters is invalid.\")\n return method", "def get_routes(self):\n return [\n (route, handler.handler_class, handler.init_kwargs)\n for route, handler in self._routes.iteritems()\n ]", "def get_routes():\n output = [f'{\"S. No.\":6}\\t{\"Endpoint\":50}\\t{\"Method\":8}\\n']\n\n for index, rule in enumerate(app.url_map.iter_rules()):\n for i, method in enumerate(rule.methods):\n output.append(f'{index + 1 if i == 0 else \"\":<6}\\t{rule.rule:50}\\t{method:10}')\n\n try:\n output.append(f'\\n{eval(rule.endpoint).__doc__}\\n')\n except NameError:\n output.append('\\n')\n\n return Response('\\n'.join(output), 200, mimetype='text/plain')", "def test_get_handler(self):\n class DummyHandler(handlers.BaseHandler):\n pass\n\n route = RouteFactory.build()\n route.handler_class = DummyHandler\n\n handler = route.get_handler()\n self.assertIsInstance(handler, DummyHandler)\n self.assertEqual(handler.route, route)", "def _find_url_handler(self, req):\n # First try - lookup in explicit (non parameterized URLs)\n if req.path in self.explicit_url_map:\n return self.explicit_url_map[req.path]\n # Second try - strip last path segment and lookup in another map\n idx = req.path.rfind(b'/') + 1\n path2 = req.path[:idx]\n if len(path2) > 0 and path2 in self.parameterized_url_map:\n # Save parameter into request\n req._param = req.path[idx:].decode()\n return self.parameterized_url_map[path2]\n\n if self.catch_all_handler:\n return self.catch_all_handler\n\n # No handler found\n return (None, None)", "def _GetPaths(self) -> Dict[str, Dict[Any, Any]]:\n\n # The `Paths Object` `paths` field of the root `OpenAPI Object`.\n paths_obj: DefaultDict[str, Dict[Any, Any]] = collections.defaultdict(dict)\n\n router_methods = self.router.__class__.GetAnnotatedMethods()\n for router_method in router_methods.values():\n # To extract optional path parameters, all the routes associated with this\n # router method must be analysed and grouped.\n ungrouped_routes = []\n for http_method, path, _ in router_method.http_methods:\n path_components = path.split(\"/\")\n # Remove any empty strings from the list of path components.\n path_components = [comp for comp in path_components if comp]\n\n ungrouped_routes.append([http_method] + path_components)\n\n grouped_routes = _GetGroupedRoutes(ungrouped_routes)\n for route_info in grouped_routes:\n # Components (comps) are URL components, including Werkzeug path\n # arguments such as `<client_id>` or `<path:file_path>`.\n route_comps, req_path_param_comps, opt_path_param_comps = route_info\n http_method = route_comps[0]\n path = \"/\".join(route_comps[1:])\n\n # Separate the route parameters into path params, query params and\n # request body params.\n path_params, query_params, body_params = self._SeparateFieldsIntoParams(\n http_method, path, router_method.args_type)\n\n # Separate the path params into required and optional path params.\n # First, extract path param names by normalizing the Werkzeug path arg\n # components to OpenAPI path args and remove the surrounding brackets.\n req_path_param_names = [\n _NormalizePathComponent(comp)[1:-1] for comp in req_path_param_comps\n ]\n opt_path_param_names = [\n _NormalizePathComponent(comp)[1:-1] for comp in opt_path_param_comps\n ]\n req_path_params = []\n opt_path_params = []\n for path_param in path_params:\n path_param_name = casing.SnakeToCamel(path_param.name)\n if path_param_name in req_path_param_names:\n req_path_params.append(path_param)\n elif path_param_name in opt_path_param_names:\n opt_path_params.append(path_param)\n else:\n raise AssertionError(\n f\"Path parameter {path_param_name} was not classified as \"\n f\"required/optional.\")\n\n normalized_path = _NormalizePath(path)\n path_obj = paths_obj[normalized_path]\n path_obj[http_method.lower()] = (\n self._GetOperationDescription(router_method, req_path_params,\n opt_path_params, query_params,\n body_params))\n\n return paths_obj", "def get_verb_handler_extensions():\n extensions = instantiate_extensions(__name__)\n for name, extension in extensions.items():\n extension.VERB_HANDLER_NAME = name\n return order_extensions_by_name(extensions)", "def _dispatch(req):\n match = req.environ['wsgiorg.routing_args'][1]\n if not match:\n return webob.exc.HTTPNotFound()\n app = match['controller']\n return app", "def route_accepted(self, prefix, next_hop, as_path):", "def test_list_namespaced_route(self):\n pass", "def test_path(self):\n base_handler_path = 'conman.routes.handlers.BaseHandler'\n self.assertEqual(BaseHandler.path(), base_handler_path)", "def apply(self, callback, route):", "def show_routes(self):\n routelist= [(handler.regex.pattern, handler.handler_class) for handler in self.handlers[0][1]]\n print(55*\"-\")\n print(\" Routing table (order matters) :\")\n print(55*\"-\")\n for elem in routelist:\n print('{0:<20} {1:<30} '.format(elem[0], str(elem[1])))", "def get_handlers(self):\n svs = []\n paths = self.get_paths()\n for p in paths:\n s = re.sub(r\"(?<={)\\w+}\", \".*\", p).replace(\"{\", \"\")\n o = re.sub(r\"(?<=<)\\w+\", \"\", s).replace(\"<\", \"\").replace(\">\",\"\").replace(\"&\", \"\").replace(\"?\", \"\")\n svs.append((o, self))\n\n return svs", "def routeunpack(value):\n return str(value).replace(\"!\",\"/\")", "def __call__(self, environ, start_response):\n\t\tpath_info = environ.get('PATH_INFO', '')\n\t\turl = request.construct_url(environ, with_query_string=False)\n\n\t\t# Redirect /introspector to /introspector/ to ensure consistent URLs\n\t\tif path_info == '':\n\t\t\tstart_response('302 Found', [('Location', url + '/')])\n\t\t\treturn [ '' ]\n\n\t\t# Index page\n\t\tif path_info == '/':\n\t\t\tstart_response('200 OK', [('Content-Type', 'text/html')])\n\t\t\treturn [ self.introspect(url, \"chiral.web.introspector\", \"index\") ]\n\n\t\t# Parse the URL: [/introspector/]module/namespace/item\n\t\tpath = path_info.split('/')[1:]\n\t\tif len(path) < 3:\n\t\t\tstart_response('404 Not Found', [('Content-Type', 'text/html')])\n\t\t\treturn [ \"404 Not Found\" ]\n\n\t\tmodule, namespace, item = path\n\t\tscript_name = environ.get('SCRIPT_NAME', '') + \"/\"\n\n\t\tif module not in sys.modules or not hasattr(sys.modules[module], '_chiral_introspection'):\n\t\t\tstart_response('404 Not Found', [('Content-Type', 'text/html')])\n\t\t\treturn [ \"404 Not Found\" ]\n\n\t\t# Commands are slightly different: they must be POST, and the namespace has \"cmd_\" at the beginning\n\t\tif environ[\"REQUEST_METHOD\"] == \"POST\":\n\t\t\ttry:\n\t\t\t\tifunc = getattr(sys.modules[module]._chiral_introspection(), \"cmd_\" + namespace)\n\t\t\texcept AttributeError:\n\t\t\t\tstart_response('404 Not Found', [('Content-Type', 'text/html')])\n\t\t\t\treturn [ \"404 Not Found\" ]\n\n\t\t\tnext_url = ifunc(item)\n\t\t\tstart_response('302 Found', [('Location', script_name + next_url)])\n\t\t\treturn [ \"\" ]\n\n\t\t# Prevent shenanigans involving commands sent as GET\n\t\tif namespace.startswith(\"cmd_\"):\n\t\t\tstart_response('404 Not Found', [('Content-Type', 'text/html')])\n\t\t\treturn [ \"404 Not Found\" ]\n\t\t\t\t\t\n\t\tout_string = self.introspect(environ.get('SCRIPT_NAME', '') + '/', module, namespace, item)\n\n\t\tif out_string is None:\n\t\t\tstart_response('404 Not Found', [('Content-Type', 'text/html')])\n\t\t\treturn [ \"404 Not Found\" ]\n\n\t\tstart_response('200 OK', [('Content-Type', 'text/html')])\n\t\treturn [ out_string ]", "def _get_static_route_map(self):\n return self.__static_route_map", "def getKey(item):\n return item.get_router()", "def routes():\n import urllib.request, urllib.parse, urllib.error\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = urllib.parse.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, url))\n output.append(line)\n for line in sorted(output):\n print (line)", "def _get_target():\n from webdispatch.mixins import URLMapperMixin\n return URLMapperMixin", "def methods(domain, resource, pathtype, param=None):\n ret = {}\n if pathtype == 'additional_lookup':\n method = 'GET'\n ret[method] = {}\n ret[method]['label'] = get_label(domain, pathtype, method)\n ret[method]['params'] = schema(resource, param)\n else:\n key = '{0}_methods'.format(pathtype)\n methods = resource[key]\n for method in methods:\n ret[method] = {}\n ret[method]['label'] = get_label(domain, pathtype, method)\n ret[method]['params'] = []\n if method == 'POST':\n ret[method]['params'].extend(schema(resource))\n elif method == 'PATCH':\n ret[method]['params'].append(identifier(resource))\n ret[method]['params'].extend(schema(resource))\n elif pathtype == 'item':\n ret[method]['params'].append(identifier(resource))\n return ret", "def _match_request(request):\n for ((version, method), pattern) in PATTERNS.items():\n if request.method != method:\n continue\n match = pattern.match(request.path_info)\n if match is None:\n continue\n subject_id = match.group(1)\n # Ensure the subject id we got looks like an subject id to filter\n # out a URI like /subjects/detail. See LP Bug #879136\n if subject_id != 'detail':\n return (version, method, subject_id)", "def _get(self):\n return self.request(method=\"get\", path=self.router.fields)", "def _handlers_map(self):\n return {\n Segment.CONSTANT: self._build_push_constant,\n Segment.TEMP: self._handle_temp,\n Segment.STATIC: self._handle_static,\n Segment.POINTER: self._handle_pointer,\n Segment.LCL: self._handle_generic_segment,\n Segment.ARG: self._handle_generic_segment,\n Segment.THIS: self._handle_generic_segment,\n Segment.THAT: self._handle_generic_segment,\n Command.ADD: self._build_add,\n Command.SUB: self._build_sub,\n Command.EQ: self._build_eq,\n Command.GT: self._build_gt,\n Command.NOT: self._build_not,\n Command.NEG: self._build_neg,\n Command.AND: self._build_and,\n Command.OR: self._build_or,\n }", "def scan(controller, path):", "def route_multiplexer(methods_to_viewfunc):\n def multiplexer():\n viewfunc = methods_to_viewfunc.get(request.method)\n if not viewfunc:\n raise Exception(\"No viewfunc found somehow?\")\n return viewfunc()\n multiplexer.methods_to_viewfunc = methods_to_viewfunc\n return multiplexer", "async def rest_handler(request):\n # verify the request\n valid, reason = await verify_rest_request(request)\n if not valid:\n return generate_error(reason, 400)\n json = await request.json()\n # get the parameters\n cmd = json['cmd']\n params = json['params']\n # pass off to the correct target handler\n if cmd == 'find':\n response = await _find_handler(request, params)\n elif cmd == 'stats':\n response = await _stats_handler(request, params)\n elif cmd == 'download':\n response = await _download_handler(request, params)\n elif cmd == 'upload':\n response = await _upload_handler(request, params)\n elif cmd == 'provision':\n response = await _provision_handler(request, params)\n # return the response we get back fgrom the handler\n return response" ]
[ "0.5602386", "0.5543036", "0.5451026", "0.53584725", "0.5357934", "0.5338533", "0.5338533", "0.52820855", "0.5260781", "0.5229428", "0.5228598", "0.52258754", "0.520842", "0.50448126", "0.50413436", "0.49797627", "0.495311", "0.49384397", "0.49170753", "0.49129686", "0.4904387", "0.48919624", "0.4885884", "0.4845568", "0.4805337", "0.4791943", "0.477965", "0.47780785", "0.47748223", "0.47533348", "0.47385854", "0.47360256", "0.47266263", "0.47025004", "0.46872053", "0.4671119", "0.46687654", "0.46639556", "0.46622217", "0.46589231", "0.46581662", "0.4654738", "0.46259525", "0.46227178", "0.46221116", "0.4610356", "0.46079522", "0.46000326", "0.45920274", "0.4584518", "0.45660958", "0.4565144", "0.45650038", "0.4563449", "0.45612755", "0.45593", "0.4559282", "0.45513117", "0.45426786", "0.4541341", "0.4538496", "0.45341066", "0.45341066", "0.45323947", "0.45231876", "0.45187816", "0.4517155", "0.451243", "0.45104104", "0.45076355", "0.4504118", "0.4504067", "0.45016995", "0.4494437", "0.44912332", "0.44735917", "0.44603086", "0.4458254", "0.4455722", "0.44469523", "0.4425038", "0.4422542", "0.44208935", "0.4417688", "0.4416851", "0.4413624", "0.4406633", "0.43931633", "0.43925703", "0.43869948", "0.4386511", "0.43762863", "0.4376038", "0.43729234", "0.4362048", "0.43616393", "0.43607906", "0.43568102", "0.43544874", "0.4345757" ]
0.5382797
3
It returns all the Classes marked to be used by the "use" decorator
def routers(): routers = [] for app_controller in __app_controllers__: routers.append(app_controller.router()) return routers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def classes(self):\n if self.classname:\n return [self.classname]\n return []", "def get_classes(self):\n return", "def return_classes(self):\n\n\t\t \n\t\t \n\t\treturn self.classes", "def classes(self):\n raise NotImplementedError(\"Please implement this yourself.\")", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def iter_spider_classes(module):\n ...", "def classes(self):\r\n return self._classes", "def get_classes(self):\n return self._classes", "def _classesToCheck(self, cls):\r\n yield cls\r\n yield from inspect.getmro(cls)", "def classes(self):\n return list(self._classes_generator())", "def _classes_(cls):\n for base_cls in cls.__bases__:\n # Avoid infinite loop\n if base_cls == Sandbox:\n continue\n\n yield base_cls", "def classes(self):\n return self._.d", "def list_kg_classes():\n return [\n obj\n for name, obj in inspect.getmembers(sys.modules[__name__])\n if inspect.isclass(obj) and issubclass(obj, KGObject) and obj.__module__.startswith(__name__)\n ]", "def class_exts(cls):\n return set()", "def get_class_list(self):\r\n modules = []\r\n classes = []\r\n path = os.path.dirname(os.path.abspath(__file__))\r\n measures_path = os.path.join(path, 'measures')\r\n package_path = 'ruler.measures'\r\n\r\n # List through the modules in the specified package, ignoring __init__.py, and append them to a list.\r\n for f in os.listdir(measures_path):\r\n if f.endswith('.py') and not f.startswith('__init__'):\r\n modules.append('{0}.{1}'.format(package_path, os.path.splitext(f)[0]))\r\n\r\n module_references = []\r\n\r\n # Attempt to import each module in turn so we can access its classes\r\n for module in modules:\r\n module_references.append(importlib.import_module(module))\r\n\r\n # Now loop through each module, looking at the classes within it -\r\n # and then append each class to a list of valid classes.\r\n for module in module_references:\r\n for name, obj in inspect.getmembers(module):\r\n if inspect.isclass(obj):\r\n classes.append((obj.__name__, obj))\r\n\r\n return classes", "def getClasses(self):\n self._process()\n return self._sets", "def process_class_list(self, module, classes):", "def get_all():\n temp = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n temp = [x for x in temp if x[0] not in [\"Method\", \"Radpro\"]]\n return temp", "def find_classes(cls, cutoff_class=None):\n cutoff_class = cutoff_class or Interface\n module = sys.modules[__name__]\n for ni, vi in inspect.getmembers(module, inspect.isclass):\n if issubclass(vi, cutoff_class) and vi is not cutoff_class:\n yield vi", "def __get_available_classes(self, package):\n modules = []\n classes = []\n \n # List through the modules in the specified package, ignoring __init__.py, and append them to a list.\n for f in os.listdir(package):\n if f.endswith('.py') and not f.startswith('__init__'):\n modules.append('{0}.{1}'.format(package, os.path.splitext(f)[0]))\n \n module_references = []\n \n # Attempt to import each module in turn so we can access its classes\n for module in modules:\n module_references.append(importlib.import_module(module))\n \n # Now loop through each module, looking at the classes within it - and then append each class to a list of valid classes.\n for module in module_references:\n for name, obj in inspect.getmembers(module):\n if inspect.isclass(obj):\n classes.append((obj.__name__, obj))\n \n return classes", "def uses(self):\n uses = []\n for inst in self.module.instructions():\n if inst.is_using(self):\n uses.append(inst)\n return uses", "def get_classes(mod):\n return [\n key\n for key, _ in inspect.getmembers(mod, inspect.isclass)\n if key[0].isupper()\n ]", "def get_class_list(self):\n t = []\n for cls in self.classes:\n if not self.is_opaque(cls.classobj):\n t.append(cls)\n elif cls.parents or cls.childs:\n t.append(cls)\n \n return t", "def classes(self):\n return self.browser.classes(self)", "def get_all():\n temp = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n return [i[1] for i in temp if i[0] != \"Aggregator\"]", "def test___get_all_classes():\n config = {\"plugins\": [\"tests.mock_plugin\"]}\n classes = r._get_all_classes(config, r.DataSource)\n assert \"food\" in classes\n classes = r._get_all_classes(config, r.DataSink)\n assert \"food\" in classes", "def get_class_defs(self):\n return list(self._get_class_defs().values())", "def discover_classes(\n package,\n cls_match_func=trivial,\n module_match_func=trivial,\n):\n for module in discover_modules(package, module_match_func):\n # Check all the classes in that module\n for _, imported_class in inspect.getmembers(module, inspect.isclass):\n # Don't include things that are only there due to a side-effect of\n # importing\n if imported_class.__module__ != module.__name__:\n continue\n\n if cls_match_func(imported_class):\n yield imported_class", "def classes(attrs):\n return attrs.get('class', '').split()", "def class_names(self):\n raise NotImplementedError", "def _get_all(cls):\r\n # BaseProvider does so have __subclassess__. pylint: disable-msg=no-member\r\n return {klass.NAME: klass for klass in BaseProvider.__subclasses__()}", "def function_names(self):\n # Only select classes that extend the base class\n return self._classes.keys()", "def gen_extractor_classes():\n from .extractors import _ALL_CLASSES\n\n return _ALL_CLASSES", "def get_classes(self, include_ref=True):\n defs = self._get_class_defs()\n ans = {}\n ans.update(defs)\n if include_ref:\n refs = self._get_class_refs()\n ans.update(refs)\n return list(ans.values())", "def discover(\n package: ModuleType,\n cls_match_func: Callable[[type[Any]], bool],\n) -> set[type[Any]]:\n matched_classes = set()\n\n for _, module_name, _ in pkgutil.walk_packages(\n package.__path__,\n prefix=package.__name__ + '.',\n ):\n module = __import__(module_name, fromlist=['__trash'], level=0)\n\n # Check all the classes in that module\n for _, imported_class in inspect.getmembers(module, inspect.isclass):\n # Don't include things that are only there due to a side-effect of\n # importing\n if imported_class.__module__ != module.__name__:\n continue\n\n if cls_match_func(imported_class):\n matched_classes.add(imported_class)\n\n return matched_classes", "def classes(class_name):\r\n\td = {}\r\n\tfor k, v in class_name.__dict__.items():\r\n\t\tif not (k.startswith('__') and k.endswith('__')):\r\n\t\t\td[k] = v\r\n\treturn d", "def get_subclasses(self, class_name):\n return class_name.__subclasses__()", "def get_meta_classes(self):\n return self.meta_classes.values()", "def get_classes(engine: Engine) -> Dict[str, PlayableClass]:\n\n classes = engine.get_classes()\n assert classes is not None\n\n class_objs = {}\n for class_idx_data in classes:\n class_data = PlayableClass(engine, class_idx_data[\"id\"])\n class_objs[class_data.to_serialize[\"slug\"]] = class_data\n return class_objs", "def get_all_lr_classes():\n lr_classes = {}\n for name, obj in inspect.getmembers(sys.modules[__name__]):\n if inspect.isclass(obj) and name != 'ABC':\n lr_classes[name] = obj\n return lr_classes", "def list_all_classes(self):\n classes = list(self.extended_class_only_graph.nodes())\n classes = [SchemaClass(_cls, self) for _cls in classes]\n return classes", "def GetClassBases(self,cls):\n name = \"\"\n for i in cls:\n if i != \")\":\n name+=i\n\n lst = name.split(\"(\")\n cls_lst = lst[-1].split(\",\")\n if cls_lst:\n return cls_lst\n else:\n return False", "def relevant_classifications(self):\n return self.relevant_classes", "def _get_all_classnames(\n module: ModuleType\n) -> List[str]:\n return list(map(lambda x: x[0], inspect.getmembers(module, inspect.isclass)))", "def get_all_classes_defined_in_module(module):\n for _cls in inspect.getmembers(module, inspect.isclass):\n if module.__name__ == _cls[1].__module__:\n yield _cls", "def getAllCls(cls):\n newlist = list(clslist)\n return newlist", "def classes(self) -> Iterable[GDScriptClass]:\n for item in self._classes_by_type_id.values():\n yield item", "def instance_classes(self) -> Sequence[str]:\n return pulumi.get(self, \"instance_classes\")", "def iter_cls(*classes, blacklist=tuple()):\n for bases in permutations(classes):\n if bases not in blacklist:\n yield type('_'.join(c.__name__ for c in bases), bases, {})", "def _get_filter_classes_from_module(module_name):\n classes = []\n module = utils.import_object(module_name)\n for obj_name in dir(module):\n itm = getattr(module, obj_name)\n if _is_filter_class(itm):\n classes.append(itm)\n return classes", "def local_classes(self, classnames, typesets=frozenset(['cy', 'py'])):\n saved = {}\n for name in classnames:\n if 'c' in typesets and name in self.cython_ctypes:\n saved[name, 'c'] = _undot_class_name(name, self.cython_ctypes)\n if 'cy' in typesets and name in self.cython_cytypes:\n saved[name, 'cy'] = _undot_class_name(name, self.cython_cytypes)\n if 'py' in typesets and name in self.cython_pytypes:\n saved[name, 'py'] = _undot_class_name(name, self.cython_pytypes)\n self.clearmemo()\n yield\n for name in classnames:\n if 'c' in typesets and name in self.cython_ctypes:\n _redot_class_name(name, self.cython_ctypes, saved[name, 'c'])\n if 'cy' in typesets and name in self.cython_cytypes:\n _redot_class_name(name, self.cython_cytypes, saved[name, 'cy'])\n if 'py' in typesets and name in self.cython_pytypes:\n _redot_class_name(name, self.cython_pytypes, saved[name, 'py'])\n self.clearmemo()", "def _used_annotations(cls) -> set:\n return set(field.type for field in dataclasses.fields(cls))", "def get_classes(self, lines):\n result = []\n classes = self._split_lines(lines, Class.TITLE_MARKER)\n for c in classes:\n signature = self._get_group_title(c, Class.TITLE_MARKER)\n name, parent = self._split_title(signature)\n docstring = self.get_docstring(c)\n methods = self.get_methods(c)\n class_ = Class(name, parent, docstring, methods)\n if class_.is_public() or self.show_nonpublic:\n result.append(class_)\n return result", "def get_classes(self):\n query = read_query('structure exploration/classes')\n response = self._submit_query(query)\n\n return [elem['c']['value'].split('/')[-1] for elem in response]", "def list_class_names(clz, package):\n\n def isclz(obj):\n if inspect.isclass(obj):\n return issubclass(obj, clz) and not obj == clz\n return False\n\n module = importlib.import_module(package)\n\n return [name for name, _ in inspect.getmembers(module, isclz)]", "def gen_extractors():\n return [klass() for klass in gen_extractor_classes()]", "def classes(self):\n return str(self._classes)", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def getClasses(self):\n return self._call_java(\"getClasses\")", "def find_all(m, cls):\n return [node for node in ast.walk(m) if isinstance(node, cls)]", "def getWordClasses(self):\n it = self._call_java('getWordClasses').toIterator()\n result = []\n while (it.hasNext()):\n result.append(it.next().toString())\n return result", "def list_embedded_metadata_classes():\n return [\n obj\n for name, obj in inspect.getmembers(sys.modules[__name__])\n if inspect.isclass(obj) and issubclass(obj, EmbeddedMetadata) and obj.__module__.startswith(__name__)\n ]", "def get_classes(self):\n\n # Sort them.\n classes = ['Safe','Violence','Gun','Cold_Arms','Smoking','Kissing']\n classes = sorted(classes)\n\n # Return.\n if self.class_limit is not None:\n return classes[:self.class_limit]\n else:\n return classes", "def get_classes(source_or_file, prop='data'):\n tree = get_ast(source_or_file)\n lister = ClassLister().visit(tree)\n return getattr(lister, prop)", "def get_instance_classes():\n return Base_Instance.get_instance_classes()", "def __dir__(self) -> List[str]:\n return [*self.factories, *super().__dir__()]", "def _load_classes(self):\n classdocs = self._docset.get_classes()\n for classdoc in classdocs:\n files = [self._docmap[filedoc] for filedoc in classdoc.get_files()]\n classobj = Class(classdoc, files)\n self._docmap[classdoc] = classobj\n self._classes.add(classobj)", "def exts(self):\n return type(self).class_ext()", "def get_class_list_from_modulelist(self):\n class_list = []\n class_name_list = []\n for module in self.module_list:\n for name, obj in inspect.getmembers(module, inspect.isclass):\n if inspect.getmodule(obj) == module:\n class_list.append(obj)\n class_name_list.append(name)\n return class_list", "def obj_classes(self) -> ObjClassCollection:\n return self._obj_classes", "def mak(self):\n #print \"entering mak!!!\"\n for cl in self.classes:\n sage.misc.misc.inject_variable(repr(cl),cl, warn=False)\n #for long_name_for_class_that_probably_wont_be_in_global_namespace in self.classes:\n #if long_name_for_class_that_probably_wont_be_in_global_namespace != 0:\n #exec(repr(long_name_for_class_that_probably_wont_be_in_global_namespace) + \" = long_name_for_class_that_probably_wont_be_in_global_namespace\", locals(), where)", "def get_class_refs(self):\n return list(self._get_class_refs().values())", "def _find_decorators(self, decorator_class: Type[DecoratorType]) -> Generator[DecoratorType, None, None]:\n for decorator in self._decorators:\n if isinstance(decorator, decorator_class):\n yield decorator", "def register_classes():\n DiffuseCompChain.register_class()\n CatalogCompChain.register_class()\n DiffuseAnalysisChain.register_class()", "def all_operations():\n return OperationHandler().get_all_classes()", "def get_classes(self):\n out_classes = ()\n classes = super(NamedEntityRecognizerModel, self).get_classes()\n\n for c in classes:\n out_classes += (c[:2],)\n\n return ((self.outside_class, self.outside_class_display),) + out_classes", "def classes_(self):\n try:\n return self.encoder.classes_\n except:\n return self.classes", "def classes_(self):\n try:\n return self.encoder.classes_\n except:\n return self.classes", "def _get_all_loaded_classes(self):\n classes = {}\n for module in self.modules.values():\n for k,v in module.__dict__.items():\n # skip anything that's not a game class\n if not type(v) is type:\n continue\n base_classes = (game_object.GameObject, game_hud.GameHUD, game_room.GameRoom)\n # TODO: find out why above works but below doesn't!! O___O\n #base_classes = self.builtin_base_classes\n if issubclass(v, base_classes):\n classes[k] = v\n return classes", "def find(self):\n\n response = self.client.get(Classes.PATH_CLASSES)\n return response", "def standard_filters():\n classes = []\n filters_dir = __path__[0]\n for dirpath, dirnames, filenames in os.walk(filters_dir):\n relpath = os.path.relpath(dirpath, filters_dir)\n if relpath == '.':\n relpkg = ''\n else:\n relpkg = '.%s' % '.'.join(relpath.split(os.sep))\n for fname in filenames:\n root, ext = os.path.splitext(fname)\n if ext != '.py' or root == '__init__':\n continue\n module_name = \"%s%s.%s\" % (__package__, relpkg, root)\n mod_classes = _get_filter_classes_from_module(module_name)\n classes.extend(mod_classes)\n return classes", "def import_all_known_classes(debug=False):\r\n\r\n output = {}\r\n for cls in KnownClass.objects:\r\n if debug:\r\n print \"Importing %s.%s\"%(cls.module_name, cls.class_name)\r\n x = get_class(cls.module_name, cls.class_name)\r\n output[(cls.module_name, cls.class_name)] = x()\r\n return output", "def _get_classifiers(self):\n return self.__classifiers" ]
[ "0.73976636", "0.7189597", "0.7136581", "0.7131603", "0.6812443", "0.6812443", "0.6812443", "0.6812443", "0.6812443", "0.6812443", "0.67753637", "0.676244", "0.67491084", "0.6672196", "0.66617703", "0.6578411", "0.6575741", "0.6558602", "0.6541815", "0.6495872", "0.6475694", "0.6473026", "0.6429722", "0.6373241", "0.6344624", "0.6319792", "0.6310036", "0.6294613", "0.6278888", "0.6262828", "0.62609607", "0.6257365", "0.62153774", "0.62132406", "0.62127256", "0.6197778", "0.6191729", "0.6191087", "0.61885136", "0.6126048", "0.6095775", "0.60835373", "0.6075292", "0.6058685", "0.6042469", "0.60404706", "0.60327905", "0.60312825", "0.6013336", "0.5992171", "0.5989515", "0.5949229", "0.59169686", "0.59151477", "0.59084725", "0.59076726", "0.58983934", "0.58972496", "0.5887078", "0.58620596", "0.58501107", "0.58483857", "0.58423615", "0.58423615", "0.58423615", "0.58423615", "0.58423615", "0.58423615", "0.58423615", "0.58423615", "0.58423615", "0.58423615", "0.58423615", "0.58423615", "0.58423615", "0.58423615", "0.58423615", "0.5835191", "0.5832149", "0.58238804", "0.5811545", "0.5800444", "0.5794476", "0.57852685", "0.5780419", "0.5768865", "0.57543933", "0.57485557", "0.5733174", "0.5731488", "0.5728761", "0.5725719", "0.57240915", "0.5716397", "0.5716212", "0.5716212", "0.5696785", "0.56919396", "0.5691437", "0.56900597", "0.56836754" ]
0.0
-1
It returns the FastAPI router. Use it as if you are using the original one.
def route(self) -> APIRouter: return self.router
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, req):\n return self._router", "def create_router(self, environment, *, router=None):\n\n if router is None:\n router = self.router\n\n return utils.objects.ensure_instance(router, environment=environment)", "def __parse_controller_router(cls):\n router = getattr(cls, Controller.RC_KEY)\n\n dependencies = None\n if hasattr(cls, \"dependencies\"):\n dependencies = deepcopy(cls.dependencies)\n delattr(cls, \"dependencies\")\n\n for route in router.routes:\n # add class dependencies\n if dependencies:\n for depends in dependencies[::-1]:\n route.dependencies.insert(0, depends)\n\n # get the signature of the endpoint function\n signature = inspect.signature(route.endpoint)\n # get the parameters of the endpoint function\n signature_parameters = list(signature.parameters.values())\n\n # replace the class instance with the itself FastApi Dependecy\n signature_parameters[0] = signature_parameters[0].replace(\n default=Depends(cls)\n )\n\n # set self and after it the keyword args\n new_parameters = [signature_parameters[0]] + [\n parameter.replace(kind=inspect.Parameter.KEYWORD_ONLY)\n for parameter in signature_parameters[1:]\n ]\n\n new_signature = signature.replace(parameters=new_parameters)\n setattr(route.endpoint, Controller.SIGNATURE_KEY, new_signature)\n\n return router", "def get_api(self):\n from geoffrey.utils import get_api\n return get_api(self.app.routes, prefix=\"/\")", "def taskrouter(self):\n if self._taskrouter is None:\n self._taskrouter = Taskrouter(self)\n return self._taskrouter", "def useRouterOnly(self, router):\n return self.mountRouterOnly(router)", "def RemoteRouter(services):\n return PublicController(services)", "def _configure_api_routes(self, app: FastAPI):\n authenticator = JWTAuthenticator(self.signer)\n\n data_update_publisher: Optional[DataUpdatePublisher] = None\n if self.publisher is not None:\n data_update_publisher = DataUpdatePublisher(self.publisher)\n\n # Init api routers with required dependencies\n data_updates_router = init_data_updates_router(\n data_update_publisher,\n self.data_sources_config,\n authenticator\n )\n webhook_router = init_git_webhook_router(self.pubsub.endpoint, authenticator)\n security_router = init_security_router(self.signer, StaticBearerAuthenticator(self.master_token))\n\n # mount the api routes on the app object\n app.include_router(bundles_router, tags=[\"Bundle Server\"], dependencies=[Depends(authenticator)])\n app.include_router(data_updates_router, tags=[\"Data Updates\"], dependencies=[Depends(authenticator)])\n app.include_router(webhook_router, tags=[\"Github Webhook\"])\n app.include_router(security_router, tags=[\"Security\"])\n app.include_router(self.pubsub.router, tags=[\"Pub/Sub\"])\n\n if self.jwks_endpoint is not None:\n # mount jwts (static) route\n self.jwks_endpoint.configure_app(app)\n\n # top level routes (i.e: healthchecks)\n @app.get(\"/healthcheck\", include_in_schema=False)\n @app.get(\"/\", include_in_schema=False)\n def healthcheck():\n return {\"status\": \"ok\"}\n\n return app", "def create_app():\n\n app = FastAPI()\n add_root_route(app)\n\n return app", "def get_api(self):\n from geoffrey.utils import get_api\n return get_api(self.app.routes, prefix='/api')", "def getRoutes(self):\n pass", "def __init__(self, router):\n\n self.router = router", "def get_routers(self):", "def autodiscover_api_routers():\n # TODO: Support multiple API versions by allowing \"router\" to contain a dictionary\n api_router = SharedRootDefaultRouter()\n\n for app_config in apps.get_app_configs():\n app = app_config.name\n if app.startswith('django.'):\n # skip Django core apps to avoid false warnings\n continue\n\n api_module = _try_import_api(app)\n router = _try_get_router(app, api_module)\n if router:\n # if router is not None it is good\n api_router.register_router(router)\n logger.debug('registered \"%s\"', app_config.name)\n\n return api_router", "def __init__(self, router):\n self._router = router", "def get_router(self, containers):\n for container in containers:\n if container.name == 'router':\n return container\n return None", "def useRouter(self, predicate, router):\n return self.mountRouter(predicate, router)", "def _try_get_router(app, api_module):\n if not api_module:\n return\n\n router = getattr(api_module, 'router', None)\n\n if not router:\n logger.warn('%s contains an api module but it is missing a \"router\" variable.', app)\n return None\n\n if not isinstance(router, BaseRouter):\n logger.warn('%s contains an api.router, but the router is not derived from BaseRouter', app)\n return None\n\n return router", "def get_next_router(self):\n if not self._routers:\n self._can_failover = False\n router_settings = self._settings.copy()\n router_settings[\"host\"] = self._settings.get(\"host\", \"localhost\")\n router_settings[\"port\"] = self._settings.get(\"port\", 33060)\n return Router(router_settings)\n\n cur_priority = self.routers_priority_list[self._cur_priority_idx]\n routers_priority_len = len(self.routers_priority_list)\n\n search = True\n while search:\n router = self._get_random_connection_params(cur_priority)\n\n if router is not None or self._cur_priority_idx >= routers_priority_len:\n if (\n self._cur_priority_idx == routers_priority_len - 1\n and len(self._get_available_routers(cur_priority)) < 2\n ):\n self._can_failover = False\n break\n\n # Search on next group\n self._cur_priority_idx += 1\n if self._cur_priority_idx < routers_priority_len:\n cur_priority = self.routers_priority_list[self._cur_priority_idx]\n\n return router", "def app(self) -> traits.RESTAware:", "def app(self) -> traits.RESTAware:", "def mvcRouter(self, router):\n pass", "def routers():\n routers = []\n\n for app_controller in __app_controllers__:\n routers.append(app_controller.router())\n\n return routers", "def create_router(self, body=None):\r\n return self.post(self.routers_path, body=body)", "def get_router(self, ns):\r\n desc = self.sendAndRecv(\"GETINFO desc/id/\" + ns.idhex + \"\\r\\n\")[0][2]\r\n sig_start = desc.find(\"\\nrouter-signature\\n\")+len(\"\\nrouter-signature\\n\")\r\n fp_base64 = sha1(desc[:sig_start]).digest().encode(\"base64\")[:-2]\r\n r = Router.build_from_desc(desc.split(\"\\n\"), ns)\r\n if fp_base64 != ns.orhash:\r\n plog(\"INFO\", \"Router descriptor for \"+ns.idhex+\" does not match ns fingerprint (NS @ \"+str(ns.updated)+\" vs Desc @ \"+str(r.published)+\")\")\r\n return None\r\n else:\r\n return r", "def _init_app(self):\n\n self._app = FastAPI(**self._app_kws)\n\n for rt, kwargs in self._app_routers:\n self._app.include_router(rt, **kwargs)\n\n self._app.dependency_overrides[get_dataset] = lambda: self._obj\n self._app.dependency_overrides[get_cache] = lambda: self.cache\n\n return self._app", "def _create_router(self, method, api, header, data):\n self._execute_api(method, api, header, data)", "def create_app():\n app = FastAPI()\n configure_rest_server(app=app, router_configs=WEB_SERVICES_ROUTER_CONFIGS, db_configs=DB_CONFIGS)\n return app", "def _init_routes(self):\n before_hooks = [\n helpers.require_accepts_json,\n helpers.extract_project_id,\n\n # NOTE(kgriffs): Depends on project_id being extracted, above\n functools.partial(helpers.validate_queue_name,\n self._validate.queue_name)\n ]\n\n self.app = falcon.API(before=before_hooks)\n\n queue_controller = self._storage.queue_controller\n message_controller = self._storage.message_controller\n claim_controller = self._storage.claim_controller\n\n # Home\n self.app.add_route('/v1', v1.V1Resource())\n\n # Queues Endpoints\n queue_collection = queues.CollectionResource(self._validate,\n queue_controller)\n self.app.add_route('/v1/queues', queue_collection)\n\n queue_item = queues.ItemResource(queue_controller, message_controller)\n self.app.add_route('/v1/queues/{queue_name}', queue_item)\n\n stats_endpoint = stats.Resource(queue_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/stats', stats_endpoint)\n\n # Metadata Endpoints\n metadata_endpoint = metadata.Resource(self._wsgi_conf, self._validate,\n queue_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/metadata', metadata_endpoint)\n\n # Messages Endpoints\n msg_collection = messages.CollectionResource(self._wsgi_conf,\n self._validate,\n message_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/messages', msg_collection)\n\n msg_item = messages.ItemResource(message_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/messages/{message_id}', msg_item)\n\n # Claims Endpoints\n claim_collection = claims.CollectionResource(self._wsgi_conf,\n self._validate,\n claim_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/claims', claim_collection)\n\n claim_item = claims.ItemResource(self._wsgi_conf, self._validate,\n claim_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/claims/{claim_id}', claim_item)\n\n # Health\n self.app.add_route('/v1/health', health.HealthResource())", "def useRouterPath(self, path, router):\n return self.mount(path, router)", "def route(self):\n pass", "def mountRouterOnly(self, router):\n pass", "def dev_handler_hook(self):\n return RoutesDevHandlerHook(self)", "def get_blueprint():\n return REQUEST_API", "def _make_app():\n app = web.Application(middlewares=[middleware.error_middleware])\n admin_routes.setup(app)\n return app", "def _get_route_reflector_client(self):\n return self.__route_reflector_client", "def _init_fast_api_app(self):\n app = FastAPI(\n title=\"Opal Server\",\n description=\"OPAL is an administration layer for Open Policy Agent (OPA), detecting changes\" +\n \" to both policy and data and pushing live updates to your agents. The opal server creates\" +\n \" a pub/sub channel clients can subscribe to (i.e: acts as coordinator). The server also\" +\n \" tracks a git repository (via webhook) for updates to policy (or static data) and accepts\" +\n \" continuous data update notifications via REST api, which are then pushed to clients.\",\n version=\"0.1.0\",\n )\n configure_middleware(app)\n self._configure_api_routes(app)\n self._configure_lifecycle_callbacks(app)\n return app", "def show_router(self, router, **_params):\r\n return self.get(self.router_path % (router), params=_params)", "def getRouterOptions(self):\n pass", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GatewayApiRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def _dispatch(req):\n match = req.environ['wsgiorg.routing_args'][1]\n if not match:\n return webob.exc.HTTPNotFound()\n app = match['controller']\n return app", "def add_root_route(app: FastAPI):\n\n @app.get(\"/\")\n async def root():\n \"\"\"Check if the server is running and responds with the version.\"\"\"\n return \"Hello from TradingSignal Version: {}. Happy trading!!\".format(tradingsignal.__version__)", "def __init__(self, init_routes_with_config_db=False, config_db='routes_config.db'):\n print('Initializing FastAPI_Wrapper...')\n \n super().__init__()\n\n self.config_db = config_db\n\n origins = settings.CORS_ALLOW_ORIGINS\n\n self.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n def custom_openapi():\n from fastapi.openapi.utils import get_openapi\n from .__init__ import __version__ as version\n\n print('Running custom_openapi...')\n \n if self.openapi_schema:\n return self.openapi_schema\n openapi_schema = get_openapi(\n title=\"FastAPI Wrapper for CSV & Excel Files\",\n version=version,\n description=\"Custom API enpoints available for each converted file is listed below.\",\n routes=self.routes,\n )\n openapi_schema[\"info\"][\"x-logo\"] = {\n \"url\": \"https://www.oxfordeconomics.com/static/img/logo.png\"\n }\n self.openapi_schema = openapi_schema\n return self.openapi_schema\n\n self.openapi = custom_openapi\n\n # Add shutdown event (would only be of any use in a multi-process, not multi-thread situation)\n @self.get(\"/shutdown\")\n async def shutdown():\n import time\n import psutil\n import threading\n\n def suicide():\n time.sleep(1)\n # parent = psutil.Process(psutil.Process(os.getpid()).ppid())\n # parent.kill()\n myself = psutil.Process(os.getpid())\n myself.kill()\n\n threading.Thread(target=suicide, daemon=True).start()\n logging.info(f'>>> Successfully killed API <<<')\n print(f'>>> Successfully killed API <<<')\n return {\"success\": True} \n\n def signal_handler(e):\n print(f'>>> API Signal Received <<<')\n print(e)\n\n self.on_event('shutdown')(signal_handler)\n\n # ----- FIXED ROUTES -----\n\n # /download/{database}\n # \n # Add download database method as GET endpoint to fastapi\n # database is a path param\n def download(database: str,\n responses={ 200: { 'description': 'Download SQL database file.',\n 'content' : {'application/octet-stream' : {'example' : 'No example available.'}}\n }}\n ):\n db, _ = resolve_db(database)\n if os.path.exists(db):\n suffix = str(datetime.now())[:-10].replace(' ', '_').replace(':','_')\n db_suffix = db.replace('.db', f'_{suffix}.db')\n return FileResponse(db, media_type='application/octet-stream', filename=db_suffix)\n return {'error' : f'{database} file not found!'}\n\n route_path = '/download/{database}'\n route_name = 'download'\n self.get(route_path, name=route_name, tags=[route_name])(download)\n\n # /createdb?database=db&data_path=dp&data_format=<CSV | XLSX>&if_exists=<fail | replace | append>\n #\n # Add createdb method as GET endpoint to fastapi\n # NOTE: Not very useful for physical DBs except when run locally!\n def createdb(**query_kwargs):\n print(query_kwargs)\n\n database = query_kwargs.get('database', None) or ':memory:'\n\n data_path = query_kwargs.get('data_path', None)\n if data_path is None:\n return Response(f\"You must provide a data_path value\", status_code=418) # I'm a teapot!\n\n data_format = query_kwargs.get('data_format', None) or 'CSV'\n if data_format.upper() not in ['CSV', 'XLSX']:\n return Response(f\"data_format parameter must be one of ['CSV', 'XLSX']\", status_code=418) # I'm a teapot!\n\n if_exists = query_kwargs.get('if_exists', None) or 'replace'\n if if_exists not in ['fail', 'replace', 'append']:\n return Response(f\"if_exists parameter must be one of ['fail', 'replace', 'append']\", status_code=418) # I'm a teapot!\n\n try:\n self.create_database(database, data_path, data_format=data_format, if_exists=if_exists)\n except Exception as ex:\n return Response(f'Failed: {str(ex.msg)}', status_code=418)\n\n table_name=Path(data_path).stem.lower().replace(' ', '_').replace('.', '_')\n database_name=Path(database).stem.lower().replace(' ', '_').replace('.', '_')\n\n route_name = f'/{database_name}/{table_name}'\n query_params = self._get_query_params(route_name)\n query_params = [model_field.name for model_field in query_params]\n\n return Response(json.dumps({'status:': 'success', 'endpoint': route_name, 'params': query_params}), status_code=200)\n\n route_path = '/createdb'\n route_name = 'createdb'\n self.get(route_path, name=route_name, tags=[route_name])(createdb)\n self._clear_query_params(route_path)\n self._add_query_param(route_path, 'database', str)\n self._add_query_param(route_path, 'data_path', str)\n self._add_query_param(route_path, 'data_format', str)\n self._add_query_param(route_path, 'if_exists', str)\n\n\n config_db, _ = resolve_db(self.config_db)\n # Explicit routes initialization case\n # (uses default config database if not explicitly given a different one in constructor)\n if init_routes_with_config_db:\n self.initialize_routes_with_config_db(config_db)\n\n # Case where we're not initializing via config, so will always create a config database\n # (uses default config database if not explicitly given a different one in constructor)\n else:\n # Will create DB if it doesn't exist\n connection_for_db(config_db)\n # If it exists, drop the routes_config table\n delete_table(config_db, 'routes_config')", "def router(paramstring):\r\n channels()", "def route(self, env):\n return None", "def initialize_routes(api):\n api.add_resource(WatchlistsApi, '/api/watchlists')\n api.add_resource(WatchlistApi, '/api/watchlist/<id>')\n api.add_resource(RegisterUserApi, '/api/auth/register')\n api.add_resource(LoginUserApi, '/api/auth/login')\n api.add_resource(ResetPassword, '/api/auth/reset')\n api.add_resource(ResetFogottenPassword, '/api/auth/reset/password')\n api.add_resource(ForgotPassword, '/api/auth/forgot')\n api.add_resource(ForgotPasswordReset, '/reset/password/<token>')\n api.add_resource(Home, '/')\n api.add_resource(Logout, '/logout')\n api.add_resource(Dashboard, '/dashboard')\n api.add_resource(DashboardSearch, '/dashboard/search')\n api.add_resource(SearchMovies, '/search/movies/<title>')\n api.add_resource(SearchMovieDetails, '/search/movie/details/<id>')\n api.add_resource(SearchTvShows, '/search/shows/<title>')\n api.add_resource(SearchShowDetails, '/search/show/details/<id>')\n api.add_resource(SearchTrendingMovies, '/search/trending/movies')\n api.add_resource(Recommend, '/recommend')", "def create_routes():\n app_dir = os.path.dirname(os.path.abspath(__file__))\n controller_dir = os.path.join(app_dir, \"controllers\")\n routes = Mapper(directory=controller_dir)\n routes.connect(\"/\", controller=\"root\", action=\"index\")\n routes.connect(\"/body\", controller=\"root\", action=\"body\")\n routes.connect(\"/raise_exception\", controller=\"root\", action=\"raise_exception\")\n routes.connect(\"/raise_wrong_code\", controller=\"root\", action=\"raise_wrong_code\")\n routes.connect(\"/raise_custom_code\", controller=\"root\", action=\"raise_custom_code\")\n routes.connect(\"/raise_code_method\", controller=\"root\", action=\"raise_code_method\")\n routes.connect(\"/render\", controller=\"root\", action=\"render\")\n routes.connect(\"/path-params/{year:\\d+}/{month}/\", controller=\"root\", action=\"path_params\") # noqa: W605\n routes.connect(\"/render_exception\", controller=\"root\", action=\"render_exception\")\n routes.connect(\"/response_headers\", controller=\"root\", action=\"response_headers\")\n routes.connect(\"/identify\", controller=\"root\", action=\"identify\")\n return routes", "def get_router(self, name_or_id, filters=None):\n if not filters:\n filters = {}\n return self.network.find_router(\n name_or_id=name_or_id, ignore_missing=True, **filters\n )", "def _get(self):\n return self.request(method=\"get\", path=self.router.fields)", "def app(app_environment: EnvVarsDict) -> FastAPI:\n return init_app()", "def getGlobalSelf(self):\r\n transports = self.reactor.getReaders()\r\n for transport in transports:\r\n try:\r\n resource = transport.factory.resource\r\n if isinstance(resource, self.__class__) and resource.port == self.port:\r\n return resource\r\n except AttributeError:\r\n pass\r\n return", "def get_router(admin_state_up: Optional[bool] = None,\n description: Optional[str] = None,\n distributed: Optional[bool] = None,\n enable_snat: Optional[bool] = None,\n name: Optional[str] = None,\n region: Optional[str] = None,\n router_id: Optional[str] = None,\n status: Optional[str] = None,\n tags: Optional[Sequence[str]] = None,\n tenant_id: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRouterResult:\n __args__ = dict()\n __args__['adminStateUp'] = admin_state_up\n __args__['description'] = description\n __args__['distributed'] = distributed\n __args__['enableSnat'] = enable_snat\n __args__['name'] = name\n __args__['region'] = region\n __args__['routerId'] = router_id\n __args__['status'] = status\n __args__['tags'] = tags\n __args__['tenantId'] = tenant_id\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('openstack:networking/getRouter:getRouter', __args__, opts=opts, typ=GetRouterResult).value\n\n return AwaitableGetRouterResult(\n admin_state_up=pulumi.get(__ret__, 'admin_state_up'),\n all_tags=pulumi.get(__ret__, 'all_tags'),\n availability_zone_hints=pulumi.get(__ret__, 'availability_zone_hints'),\n description=pulumi.get(__ret__, 'description'),\n distributed=pulumi.get(__ret__, 'distributed'),\n enable_snat=pulumi.get(__ret__, 'enable_snat'),\n external_fixed_ips=pulumi.get(__ret__, 'external_fixed_ips'),\n external_network_id=pulumi.get(__ret__, 'external_network_id'),\n id=pulumi.get(__ret__, 'id'),\n name=pulumi.get(__ret__, 'name'),\n region=pulumi.get(__ret__, 'region'),\n router_id=pulumi.get(__ret__, 'router_id'),\n status=pulumi.get(__ret__, 'status'),\n tags=pulumi.get(__ret__, 'tags'),\n tenant_id=pulumi.get(__ret__, 'tenant_id'))", "def add_app_routes(app):\n\n # Routes for demo pages to visit with a web browser\n @app.route('/')\n def index():\n return render_template('index.html')\n\n @app.route('/video_stream_demo')\n def video_stream_demo():\n \"\"\"Video streaming demo page.\"\"\"\n return render_template('video_stream_demo.html')\n\n @app.route('/image_capture_demo')\n def image_capture_demo():\n \"\"\"Image capture demo page.\"\"\"\n return render_template('image_capture_demo.html')\n\n\n\n # Routes to use to use for programmatic connectivity\n @app.route('/video_feed')\n def video_feed():\n \"\"\"Video streaming route.\"\"\"\n return Response(gen(Camera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n @app.route('/image')\n def image():\n \"\"\"Image capture route.\"\"\"\n return Response(gen_image(Camera()),\n mimetype='image/jpeg')\n\n # TODO: Probably makes more sense to have a POST url \n # so it'll be easier to set multiple settings\n @app.route('/settings')\n def settings():\n \"\"\"Settings route\"\"\"\n stop_req = request.args.get('stop')\n frame_sleep_req = request.args.get('frame_sleep')\n\n global stop\n if stop_req == '1':\n stop = True\n elif stop_req == '0':\n stop = False\n\n global frame_sleep\n if frame_sleep_req:\n frame_sleep = int(frame_sleep_req)\n\n return jsonify({'message': 'Set settings: {}'.format(request.args)})\n\n\n return app", "def API(root, **routes):\n\n # this creats a dict of properties that create Senders for\n # all the friendly name --> suffix combidantions passed in\n props = {'root': Route(root)}\n for (short, path) in routes.iteritems():\n props[short] = lambda self, content: self.sender.send_path(path, content)\n\n return type('API', (GenericAPI,), props)", "def __create_routes__(self):\n self.app.add_url_rule('/', 'main_page', self.main_page)\n self.app.add_url_rule('/day', 'day', self.get_current_iteration, methods=['GET'])\n self.app.add_url_rule('/traders', 'traders', self.register_trader, methods=['POST'])\n self.app.add_url_rule('/traders/<id>', 'trader', self.get_trader_state, methods=['GET'])\n self.app.add_url_rule('/stock/price', 'price', self.get_stock_price, methods=['GET'])\n self.app.add_url_rule('/stock/history', 'history', self.get_stock_price_history, methods=['GET'])\n self.app.add_url_rule('/stock/buy', 'buy', self.place_buy_order, methods=['POST'])\n self.app.add_url_rule('/stock/sell', 'sell', self.place_sell_order, methods=['POST'])\n self.app.add_url_rule('/simulation/step', 'step', self.market_session_step, methods=['POST'])\n self.app.add_url_rule('/simulation/run', 'run', self.run_simulation, methods=['POST'])", "def _dispatch(req):\n match = req.environ['wsgiorg.routing_args'][1]\n if not match:\n msg = ('(%(url)s): The resource could not be found.' %\n {'url': req.url})\n return render_exception(exception.NotFound(msg))\n app = match['controller']\n return app", "def create_routes(self):\r\n self._app.route('/api/autoconf',\r\n methods=['GET'],\r\n endpoint='api_autoconf')(self.entrypoint)\r\n self._app.route('/api/autoconf/<string:session_id>',\r\n methods=['GET', 'POST', 'DELETE'],\r\n endpoint='api_autoconf_status')(self.entrypoint)\r\n self._app.route('/api/autoconf/rgc',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_rgc')(self.entrypoint)\r\n self._app.route('/api/autoconf/pd',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_pd')(self.entrypoint)", "def get_routers_directory(self):\n return self._routers_directory", "def get_routes():\n return sum([load_module(m).ROUTES for m in settings.INSTALLED_HANDLERS], []) + ROUTES", "def swiftclient_for(self, router, class_name):\n # See if class_name is taken by another model\n from collections import deque\n out = [f\"class {class_name} : ApiBase {{\"]\n def visit(r, path = \"\"):\n for httpmethod,method in r.methods.items():\n out.append(self.func_for_router_method(httpmethod, method, path))\n\n for prefix,child in r.children:\n visit(child, path + prefix)\n visit(router, \"/\")\n out.append(\"}\")\n return \"\\n\".join(out)", "def wsgi_app():\n return bottle.default_app()", "def wsgi_app():\n return bottle.default_app()", "def routes(self):\n return self._routes", "def root(self):\n return self.app.get('/',headers=self.headers)", "def api_factory(global_config, **local_conf):\n\treturn make_app(blueprints.api_server, settings.ProductionConfig)", "def req():\n return Request()", "def create_router_for_backend(backend):\n\n router = SimpleRouter()\n\n for name, collection in backend.collections.viewitems():\n\n class NewViewSet(ModelViewSet):\n permission_classes = [IsAuthenticated, TokenHasReadWriteScope]\n\n queryset = collection.model.objects.all()\n serializer_class = collection.serializer_class\n\n def update(self, request, *args, **kwargs):\n # Here we call Daryl's Code to hax it!\n return daz_update(self, request, *args, **kwargs)\n\n NewViewSet.__name__ = collection.model.__name__ + 'ViewSet'\n\n router.register(name, NewViewSet)\n\n return router", "def get_route(function):\n return '/%s%s' % (app.config['PUBLIC_API_PREFIX'], app.config['PUBLIC_API_ROUTES'][function])", "def setup(app, obj, is_core=False):\n \n # Basic api common to all microservices\n app.router.add_route('GET', '/fledge/service/ping', obj.ping)\n app.router.add_route('POST', '/fledge/service/shutdown', obj.shutdown)\n app.router.add_route('POST', '/fledge/change', obj.change)\n\n if is_core:\n # Configuration\n app.router.add_route('GET', '/fledge/service/category', obj.get_configuration_categories)\n app.router.add_route('POST', '/fledge/service/category', obj.create_configuration_category)\n app.router.add_route('GET', '/fledge/service/category/{category_name}', obj.get_configuration_category)\n app.router.add_route('DELETE', '/fledge/service/category/{category_name}', obj.delete_configuration_category)\n app.router.add_route('GET', '/fledge/service/category/{category_name}/children', obj.get_child_category)\n app.router.add_route('POST', '/fledge/service/category/{category_name}/children', obj.create_child_category)\n app.router.add_route('GET', '/fledge/service/category/{category_name}/{config_item}',\n obj.get_configuration_item)\n app.router.add_route('PUT', '/fledge/service/category/{category_name}/{config_item}',\n obj.update_configuration_item)\n app.router.add_route('DELETE', '/fledge/service/category/{category_name}/{config_item}/value',\n obj.delete_configuration_item)\n\n # Service Registration\n app.router.add_route('POST', '/fledge/service', obj.register)\n app.router.add_route('DELETE', '/fledge/service/{service_id}', obj.unregister)\n app.router.add_route('PUT', '/fledge/service/{service_id}/restart', obj.restart_service)\n app.router.add_route('GET', '/fledge/service', obj.get_service)\n app.router.add_route('GET', '/fledge/service/authtoken', obj.get_auth_token)\n\n # Interest Registration\n app.router.add_route('POST', '/fledge/interest', obj.register_interest)\n app.router.add_route('DELETE', '/fledge/interest/{interest_id}', obj.unregister_interest)\n app.router.add_route('GET', '/fledge/interest', obj.get_interest)\n\n # Asset Tracker\n app.router.add_route('GET', '/fledge/track', obj.get_track)\n app.router.add_route('POST', '/fledge/track', obj.add_track)\n\n # Audit Log\n app.router.add_route('POST', '/fledge/audit', obj.add_audit)\n\n # enable/disable schedule\n app.router.add_route('PUT', '/fledge/schedule/{schedule_id}/enable', obj.enable_disable_schedule)\n\n # Internal refresh cache\n app.router.add_route('PUT', '/fledge/cache', obj.refresh_cache)\n\n # Service token verification\n app.router.add_route('POST', '/fledge/service/verify_token', obj.verify_token)\n\n # Service token refresh\n app.router.add_route('POST', '/fledge/service/refresh_token', obj.refresh_token)\n\n app.router.add_route('GET', '/fledge/ACL/{acl_name}', obj.get_control_acl)\n\n # Proxy API setup for a microservice\n proxy.setup(app)\n\n # enable cors support\n enable_cors(app)", "def get_handler(cls):\n if cls.__instance is None:\n cls.__instance = AliceBlueApi()\n return cls.__instance", "def add_routes(self):\n pass", "def normalise(self) -> \"Route\":\n pass", "def __init__(self, loop=None, config=None):\n\n self.logger = logging.getLogger(self.__class__.__name__)\n self.loop = loop if loop is not None else asyncio.get_event_loop()\n self.config = config\n\n swagger_url = self.prefix_context_path(\"/doc\")\n\n self.app = aiohttp.web.Application(loop=loop, middlewares=[functools.partial(api_middlewares.rest_error_middleware, logger=self.logger)])\n self.app.factory = self\n\n self.app.router.add_route(\"GET\", \"/\", lambda x: aiohttp.web.HTTPFound(swagger_url))\n if self.config.context_path != \"/\":\n self.app.router.add_route(\"GET\", self.config.context_path, lambda x: aiohttp.web.HTTPFound(swagger_url))\n self.app.router.add_route(\"GET\", self.config.context_path + \"/\", lambda x: aiohttp.web.HTTPFound(swagger_url))\n self.app.router.add_route(\n \"GET\",\n self.prefix_context_path(\"/appointments/{user_type}/{control_type}/{vehicle_type}/{organism}/{site}/{start_date}/{end_date}\"),\n resources.RestAppointments().get,\n )\n self.app.router.add_route(\"GET\", self.prefix_context_path(\"/sites\"), resources.RestSites().get)\n self.app.router.add_route(\"GET\", self.prefix_context_path(\"/vehicles\"), resources.RestVehicles().get)\n self.app.router.add_route(\"GET\", self.prefix_context_path(\"/appointments/ws\"), resources.WsAppointments().get)\n\n # Setup Swagger\n # bundle_params and schemes are a GitHub patch not released\n # in any aiohttp_swagger package\n setup_swagger_sign = inspect.signature(aiohttp_swagger.setup_swagger)\n kwargs = {}\n if \"bundle_params\" in setup_swagger_sign.parameters:\n kwargs[\"bundle_params\"] = {\"layout\": \"BaseLayout\", \"defaultModelExpandDepth\": 5}\n if \"schemes\" in setup_swagger_sign.parameters:\n kwargs[\"schemes\"] = self.config.swagger_ui_schemes\n\n aiohttp_swagger.setup_swagger(\n app=self.app,\n description=\"API for finding appointments timeslots for SNCT vehicule inspection\",\n title=\"SNCT Appointments API\",\n api_version=\"1.0\",\n contact=\"[email protected]\",\n swagger_url=swagger_url,\n **kwargs\n )\n\n # Setup CORS\n if self.config.allow_origin:\n self.cors = aiohttp_cors.setup(\n self.app,\n defaults={self.config.allow_origin: aiohttp_cors.ResourceOptions(allow_credentials=True, expose_headers=\"*\", allow_headers=\"*\")},\n )\n for route in self.app.router.routes():\n if not isinstance(route.resource, aiohttp.web_urldispatcher.StaticResource):\n self.cors.add(route)\n\n # Print configured routes\n self.print_routes()\n\n # Setup services\n self.app.on_startup.append(self.setup_appointment_dispatcher)\n self.app.on_startup.append(self.setup_snct_appointment_scrapper)\n self.app.on_shutdown.append(self.close_snct_appointment_scrapper)\n self.app.on_startup.append(self.setup_ws_stream_coros)\n self.app.on_shutdown.append(self.close_ws_stream_coros)", "def __init__(self, output_file, dry_run=False):\n _Router.__init__(self)\n self.output_file = output_file\n self._dry_run = dry_run", "def rebase(base):\n if not base: # no rebase needed\n return bottle.app()\n\n oldApp = bottle.app.pop()\n newApp = bottle.app.push()\n for route in oldApp.routes:\n route.rule = \"{0}{1}\".format(base, route.rule)\n newApp.add_route(route)\n route.reset() # reapply plugins on next call\n return newApp", "def __init__(self, mapper):\n self.map = mapper\n self._router = routes.middleware.RoutesMiddleware(self._dispatch,\n self.map)", "def route(self, path, **params):\n\n def decorate(func):\n \"\"\"\n A function returned as a object in load time,\n which set route to given url along with decorated function.\n \"\"\"\n from aha.dispatch.router import get_router\n r = get_router()\n r.connect(None, path, controller = func, **params)\n return func\n \n return decorate", "def __get_parent_routes(self, router: APIRouter):\n for route in router.routes:\n options = {key: getattr(route, key) for key in __router_params__}\n\n # inherits child tags if presents\n if len(options[\"tags\"]) == 0 and self.openapi_tag:\n options[\"tags\"].append(self.openapi_tag[\"name\"])\n\n self.router.add_api_route(route.path, route.endpoint, **options)", "def inner(func):\r\n\r\n service = func.__qualname__.split(\".\")[0]\r\n _Router().add_route(\r\n service=service,\r\n grpc_method=func.__name__,\r\n url_path=url,\r\n http_method=method\r\n )\r\n if pre_request is not None and len(pre_request) > 0:\r\n _MiddlewareManager().add_route_pre_middleware(pre_request, url)\r\n if pos_request is not None and len(pos_request) > 0:\r\n _MiddlewareManager().add_route_pre_middleware(pos_request, url)\r\n return func", "def list_router(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/routers.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing routers.\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"List router Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Router List : %s \" % output)\n\n return output[\"routers\"]", "def _make_core_app():\n app = web.Application(middlewares=[middleware.error_middleware])\n management_routes.setup(app, is_core=True)\n return app", "def get(self, pattern, handler):\n return self.route(Router.GET, pattern, handler)", "def configure_app(self):\n self.app.route('/', callback=self.get_api)", "def create_routes(api: Api):\n api.add_resource(SignUpApi, '/user/signup/')\n api.add_resource(LoginApi, '/user/login/')\n\n api.add_resource(UsersApi, '/users/')\n\n api.add_resource(CafeteriasCreationAPI, '/createcafeteria/')\n api.add_resource(CreateItemsAPI, '/createcafeteriaitems/')", "def setup_routes():\n root = CherryTarball()\n d = cherrypy.dispatch.RoutesDispatcher()\n d.connect('main', '/', controller=root)\n # This enumerates the tarball and connects each file within to a URL in the dispatcher\n tar = tarfile.open(tarball_path)\n for tarobj in tar.getmembers():\n if tarobj.isdir():\n pass # Skip directories\n else:\n d.connect(tarobj.name, tarobj.name, controller=root, action='static', filepath=tarobj.name)\n dispatcher = d\n return dispatcher", "def get_app(self):\n return make_app(store=Store('http://localhost/'), no_auth=True)", "def _create_router_port(self, method, api, header, data):\n self._execute_api(method, api, header, data)", "def init(loop):\n tasks = JobsHandler()\n config = ConfigHandler()\n task = TaskHandler()\n\n\n\n app = web.Application(loop = loop)\n app.router.add_route('*', '/tasks/{do_something}', tasks.handle)\n app.router.add_route('*', '/config/{do_something}', config.handle)\n app.router.add_route('*', '/task/{id}/{do_something}', task.handle)\n\n handler = app.make_handler()\n srv = yield from loop.create_server(handler, '0.0.0.0', 8080)\n print(\"Server started at http://0.0.0.0:8080\")\n return srv, handler", "def test_http_we_provide_default_route_prefix_cls(serve_instance):\n with InputNode() as dag_input:\n m1 = Model.bind(1)\n m2 = Model.bind(1)\n m1_output = m1.forward.bind(dag_input[0])\n m2_output = m2.forward.bind(dag_input[0])\n combine_output = combine.bind(m1_output, m2_output)\n serve_dag = Driver.bind(combine_output)\n\n deployments = pipeline_build(serve_dag)\n ingress_deployment = get_and_validate_ingress_deployment(deployments)\n assert ingress_deployment.route_prefix == \"/\"\n for deployment in deployments[:-1]:\n assert deployment.route_prefix is None", "def wrapped(func):\n self.routes.append((path, {\n 'regex': re.compile('^' + re.sub(self._part_matcher,'(.*?)',path) + '$'),\n 'function':func,\n 'reqs':req,\n 'kwargs':kwargs,\n 'parts':parts_info,\n 'generate':generate\n }))\n\n return func", "def register_routes(self):\n @inlineCallbacks\n def registered(response):\n if response.code != 200:\n text = yield response.text()\n self._env.logger.error('{} {}'.format(response.code, text))\n\n try:\n api_register = '{}://{}:{}/api/1.0.0/register'.format(\n self._env.api_protocol,\n self._env.api_host,\n self._env.api_port\n )\n remote_ms = self._env.get('remote_ms', None)\n\n for path in self._env.swagger.paths:\n uri = self._env.swagger.base + path.split('{')[0].rstrip('/')\n if remote_ms:\n route = {\n 'protocol': 'https',\n 'host': remote_ms,\n 'port': 443,\n }\n else:\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n }\n route = dict(route, **{'uri': uri, 'key': self._key})\n #self._env.logger.info('Route> {}'.format(str(route)))\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n swagger_paths = ['/ui/css', '/ui/lib', '/ui/images', '/swagger.json']\n ui = '/' + self._env.get('swagger_ui', 'ui')+'/'\n swagger_paths.append(ui)\n\n for path in swagger_paths:\n uri = self._env.swagger.base\n if len(uri):\n if uri[-1] == '/':\n uri = uri[:-1]\n uri += path\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n return True\n except Exception as e:\n self._env.logger.error('error registering routes \"{}\"'.format(str(e)))", "def get_routes(self):\n return [\n (route, handler.handler_class, handler.init_kwargs)\n for route, handler in self._routes.iteritems()\n ]", "def get_api(self):\n return self.api", "def get_routes():\n output = [f'{\"S. No.\":6}\\t{\"Endpoint\":50}\\t{\"Method\":8}\\n']\n\n for index, rule in enumerate(app.url_map.iter_rules()):\n for i, method in enumerate(rule.methods):\n output.append(f'{index + 1 if i == 0 else \"\":<6}\\t{rule.rule:50}\\t{method:10}')\n\n try:\n output.append(f'\\n{eval(rule.endpoint).__doc__}\\n')\n except NameError:\n output.append('\\n')\n\n return Response('\\n'.join(output), 200, mimetype='text/plain')", "def get(self, url_pattern):\n return self.route(url_pattern, methods=['GET'])", "def _get_or_create_midonet_router(self, media_type):\n router_api = \"routers\"\n col_media_type = media_type[:28] + 'collection.' + media_type[28:]\n header_get = {\"Accept\": col_media_type, \"X-Auth-Token\": \"%s\"\n % AUTH_TOKEN}\n router_details = self._execute_api(\"GET_ALL\", router_api, header_get)\n if not len(router_details) == 0:\n router_id = router_details[0][\"id\"]\n else:\n header_post = {\"Content-Type\": media_type, \"X-Auth-Token\": \"%s\"\n % AUTH_TOKEN}\n # create a random name for router\n router_name = ''.join(random.choice(string.ascii_lowercase)\n for x in range(6))\n data = {\"name\": router_name, \"tenantId\": \"\"}\n # create router\n self._execute_api(\"POST\", router_api, header_post, data)\n\n # get the router id\n router_details = self._execute_api(\"GET_ALL\", router_api,\n header_get)\n router_id = router_details[0][\"id\"]\n\n return router_id", "def app(self):\n return self.__app", "def get_routes(self, ApiId: str, MaxResults: str = None, NextToken: str = None) -> Dict:\n pass", "def app_factory():\n app = web.Application()\n app.add_routes([\n web.get('/ping', handle_ping),\n ])\n return app", "def getInstance():\n return net()" ]
[ "0.75668126", "0.62573475", "0.6220323", "0.61085886", "0.6030978", "0.602395", "0.60006815", "0.59523207", "0.5933353", "0.58949184", "0.5879139", "0.5846796", "0.5825418", "0.5804262", "0.57789963", "0.5770383", "0.5753781", "0.57239616", "0.56978655", "0.5636582", "0.5636582", "0.5620236", "0.5565216", "0.5535793", "0.55327725", "0.5522818", "0.5452654", "0.5436301", "0.54036504", "0.5396294", "0.53572434", "0.5327283", "0.53185064", "0.5294366", "0.5247929", "0.52423847", "0.522762", "0.5216989", "0.52099335", "0.5203864", "0.5201179", "0.52001864", "0.51996654", "0.51978177", "0.5184448", "0.5171964", "0.51635545", "0.51243025", "0.51046103", "0.50825566", "0.508055", "0.5066403", "0.50638515", "0.5057859", "0.50496453", "0.5044506", "0.5030685", "0.5025864", "0.5021554", "0.501836", "0.50140816", "0.50140816", "0.5010291", "0.50001395", "0.49850112", "0.49831378", "0.49811366", "0.49790907", "0.4971224", "0.49671963", "0.49649712", "0.4963711", "0.49443248", "0.49403936", "0.49338043", "0.492829", "0.4910058", "0.4903115", "0.4895488", "0.48914328", "0.4890797", "0.4884705", "0.4883287", "0.4857878", "0.48577043", "0.4856832", "0.4850181", "0.4850175", "0.48483133", "0.48464158", "0.48202217", "0.4814605", "0.48130032", "0.47942322", "0.47836873", "0.47718072", "0.47679815", "0.4759517", "0.47582018", "0.4758147" ]
0.81418586
0
Checks if two shards overlap.
def _check_shard_metadata_pair_overlap(shard1: ShardMetadata, shard2: ShardMetadata): # For each dim of each shard, check if one shard resides on the other # end of second shard with respect to that dim. As an example for a 2D # shard, we would check if one shard is above or on the left of the # other shard. ndims = len(shard1.shard_offsets) for i in range(ndims): if shard1.shard_offsets[i] >= shard2.shard_offsets[i] + shard2.shard_lengths[i]: return False if shard2.shard_offsets[i] >= shard1.shard_offsets[i] + shard1.shard_lengths[i]: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_overlap(self):\n return False", "def overlaps(self, other): # -> bool:\n ...", "def check_overlap(a, b):\n if a[0] >= b[2] or a[1] >= b[3] or a[2] <= b[0] or a[3] <= b[1]:\n return False\n return True", "def span_overlap(a: Tuple[int, int], b: Tuple[int, int]) -> bool:\n return not (a[0] > b[1] or a[1] < b[0])", "def validate_non_overlapping_shards_metadata(shards: List[ShardMetadata]):\n # TODO: evaluate optimizing this if needed.\n for i in range(len(shards)):\n for j in range(i + 1, len(shards)):\n if _check_shard_metadata_pair_overlap(shards[i], shards[j]):\n raise ValueError(f'Shards {shards[i]} and {shards[j]} overlap')", "def overlap(component1, component2):\n if component1[0].start <= component2[0].stop and component2[0].start <= component1[0].stop:\n if component1[1].start <= component2[1].stop and component2[1].start <= component1[1].stop:\n return True\n return False", "def hypercubes_overlap(hypercube1, hypercube2):\n if not isinstance(hypercube1, Volume) or \\\n not isinstance(hypercube2, Volume):\n raise TypeError()\n\n lowercorner1, uppercorner1 = hypercube1.get_corners()\n lowercorner2, uppercorner2 = hypercube2.get_corners()\n nb_dims = len(uppercorner1)\n \n for i in range(nb_dims):\n if not uppercorner1[i] > lowercorner2[i] or \\\n not uppercorner2[i] > lowercorner1[i]:\n return False\n\n return True", "def _overlap(x1, w1, x2, w2):\r\n if x1+w1 < x2-w2: return False\r\n if x1-w1 > x2+w2: return False\r\n\r\n return True", "def overlap(a, b):\n return not(a[2]<=b[0] or a[3]<=b[1] or a[0]>=b[2] or a[1]>=b[3])", "def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if all([abs(first[0] - second[0]) <= th,\n abs(first[2] - second[2]) <= th,\n abs(first[3] - second[3]) <= th,\n first[1] == second[1],\n first[4] == second[4]]):\n return True\n else:\n return False", "def overlap(range1, range2):\n if range1[0] <= range2[1] and range2[0] <= range1[1]:\n return True\n return False", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if all([abs(first[0] - second[0]) <= th,\n abs(first[3] - second[3]) <= th,\n first[1] == second[1],\n first[2] == second[2],\n first[5] == second[5],\n first[6] == second[6]]):\n return True\n else:\n return False", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if all([abs(first[0] - second[0]) <= th,\n abs(first[2] - second[2]) <= th,\n abs(first[5] - second[5]) <= th,\n first[1] == second[1],\n first[4] == second[4]]):\n return True\n else:\n return False", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if all([abs(first[0] - second[0]) <= th,\n first[1] == second[1],\n first[2] == second[2],\n abs(first[3] - second[3]) <= th]):\n return True\n else:\n return False", "def overlap(id1, id2, th):\n\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n\n if all(map(lambda x: abs(x[0] - x[1]) <= th, zip(first, second))):\n return True\n else:\n return False", "def do_overlap(r1, r2):\n r1_s, r1_e = r1\n r2_s, r2_e = r2\n\n return r1_s <= r2_s <= r1_e or r2_s <= r1_s <= r2_e", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if first[0] == second[0] and abs(first[1] - second[1]) <= th:\n return True\n else:\n return False", "def is_overlap(box_1, box_2, iou_th):\n return box_1.iou(box_2) > iou_th", "def overlap(id1, id2, th):\n\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if first[0] == second[0] and abs(first[1] - second[1]) <= th:\n return True\n else:\n return False", "def doesNotOverlap( self, other):\n return not self.overlaps( other)", "def overlaps(self, other):\n return (self.right > other.left and self.left < other.right and\n self.top < other.bottom and self.bottom > other.top)", "def overlap(start_idx1, end_idx1, start_idx2, end_idx2):\n head = min(end_idx1, end_idx2)\n tail = max(start_idx1, start_idx2)\n return head >= tail", "def overlap(indices1, indices2):\n assert (len(indices1) == 2 and len(indices2) == 2)\n indices1 = sorted(indices1)\n indices2 = sorted(indices2)\n if (indices2[0] <= indices1[0] <= indices2[1]) or \\\n (indices2[0] <= indices1[1] <= indices2[1]) or \\\n (indices1[0] <= indices2[0] <= indices1[1]) or \\\n (indices1[0] <= indices2[1] <= indices1[1]):\n return True\n else:\n return False", "def have_overlap(self,\n entry1: Union[Annotation, int],\n entry2: Union[Annotation, int]) -> bool:\n entry1_: Annotation = self._entry_index[\n entry1] if isinstance(entry1, (int, np.integer)) else entry1\n entry2_: Annotation = self._entry_index[\n entry2] if isinstance(entry2, (int, np.integer)) else entry1\n\n if not isinstance(entry1_, Annotation):\n raise TypeError(f\"'entry1' should be an instance of Annotation,\"\n f\" but get {type(entry1)}\")\n\n if not isinstance(entry2_, Annotation):\n raise TypeError(f\"'entry2' should be an instance of Annotation,\"\n f\" but get {type(entry2)}\")\n\n return not (entry1_.span.begin >= entry2_.span.end or\n entry1_.span.end <= entry2_.span.begin)", "def overlaps(self, other):\n pass", "def overlaps(self, other):\n\n if self.start.equal(other.start) or self.stop.equal(other.stop):\n return True\n elif self.start.before(other.start) and self.stop.after(other.start):\n return True\n elif other.stop.after(self.start) and other.stop.before(self.stop):\n return True\n else:\n return False", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def is_overlap(self, transposon):\n if self.first <= transposon.last <= self.last:\n return True\n elif self.first <= transposon.first <= self.last:\n return True\n else:\n return False", "def can_overlap(self):\n return self.is_open", "def can_overlap(self):\n return self.is_open", "def overlaps(self, other):\n return _binary_op(arctern.ST_Overlaps, self, other).astype(bool, copy=False)", "def overlap(t1start, t1end, t2start, t2end):\n\n return (t1start <= t2start <= t1end) or (t2start <= t1start <= t2end)", "def is_overlapping(box1, box2):\n if box1[2] <= box2[0]: # If box1 is to the left of box2\n return False\n elif box1[0] >= box2[2]: # If box1 is to the right of box2\n return False\n elif box1[3] <= box2[1]: # If box1 is below box2\n return False\n elif box1[1] >= box2[3]: # If box1 is above box2\n return False\n else:\n return True", "def overlap_with(self, other):", "def overlaps(self, other):\n return self.start <= other.end and self.end >= other.start", "def is_colliding(network, allocations):\n for allocation in allocations:\n if network.overlaps(allocation):\n return True\n return False", "def strong_overlapping(time_1, time_2):\n\n if (time_1[0] <= time_2[0] < time_1[1]) or (time_2[0] <= time_1[0] < time_2[1]):\n return True\n\n return False", "def overlap(p1: Tuple, p2: Tuple) -> bool:\n if (p2[1] - p1[0]) * (p2[0] - p1[1]) <= 0:\n return True\n else:\n return False", "def is_overlap(bb1, bb2):\n l1, t1, r1, b1 = bb1['x'], bb1['y'], bb1['x']+bb1['w'], bb1['y']+bb1['h']\n l2, t2, r2, b2 = bb2['x'], bb2['y'], bb2['x']+bb2['w'], bb2['y']+bb2['h']\n\n if r1 > l2 and r2 > l1 and b2 > t1 and b1 > t2:\n return True\n else:\n return False", "def overlap_conflict(out, *inputs):\n from . import _bh\n\n for i in inputs:\n if not np.isscalar(i):\n if np.may_share_memory(out, i) and not _bh.same_view(out, i):\n return True\n return False", "def iOverlap (a1, a2, b1, b2):\n if b1<=a1<=b2 or b1<=a2<=b2 or a1<=b1<=a2 or a1<=b2<=a2:\n return True\n elif a1>a2 or b1>b2:\n return False\n else:\n return False", "def overlaps(self, other):\n\n if self.ll.x >= other.ur.x:\n return False\n \n if self.ll.y >= other.ur.y:\n return False\n \n if self.ur.x <= other.ll.x:\n return False\n \n if self.ur.y <= other.ll.y:\n return False\n \n return True", "def test_overlap(query, reference):\n return (reference[0] <= query[0] <= reference[1] or\n reference[0] <= query[1] <= reference[1] or\n query[0] <= reference[0] <= reference[1] <= query[1])", "def check_overlap(self, a, b):\n return utils.is_point_in_circle(b.get_pos(), a.get_pos(), a.radius)", "def _validate_no_overlap(params, error_callback):\n dhcp_set = netaddr.IPSet(netaddr.IPRange(params['dhcp_start'],\n params['dhcp_end']))\n inspection_set = netaddr.IPSet(netaddr.IPRange(params['inspection_start'],\n params['inspection_end']))\n # If there is any intersection of the two sets then we have a problem\n if dhcp_set & inspection_set:\n message = ('Inspection DHCP range \"%s-%s\" overlaps provisioning '\n 'DHCP range \"%s-%s\".' %\n (params['inspection_start'], params['inspection_end'],\n params['dhcp_start'], params['dhcp_end']))\n error_callback(message)", "def can_combine(self, first, second):\n # Need to check out of order issues as\n # blocks are sorted by where they start in a\n mismatch_ab = (first.a_end <= second.a\n and second.b_end <= first.b)\n mismatch_ba = (second.a_end <= first.a\n and first.b_end <= second.b)\n out_of_order = mismatch_ab or mismatch_ba\n return not out_of_order and self.jump_gap(second)", "def check_collisions(self):", "def ranges_overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2", "def is_overlapping(self, region):\n if self.x2 < region.x1:\n return False # this box is left the other\n if self.x1 > region.x2:\n return False # this box is right the other\n if self.y2 < region.y1:\n return False # this box is above the other\n if self.y1 > region.y2:\n return False # this box is below the other\n return True", "def overlaps(self, chrom, start, end, strand=None):\n if (self.chrom != chrom \n or min(self.end, end) - max(self.start, start) <= 0 \n or (strand is not None and self.strand != strand)): \n return False\n return True", "def detect_overlap_1d(first, first_length, second, second_length):\n first_end = first + first_length - 1\n second_end = second + second_length - 1\n return second_end >= first and first_end >= second", "def is_overlap_sorted_values(v1, v2, w1, w2):\r\n if (v2 < w1) or (v1 > w2):\r\n return False\r\n else:\r\n return True", "def check_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_overlaps - something went wrong... no data?\")\n pass", "def scaffold_overlap(indices_1: Set[int],\n indices_2: Set[int],\n index_to_scaffold: Dict[int, str]) -> float:\n scaffolds_1 = {index_to_scaffold[index] for index in indices_1}\n indices_in_2_with_scaffold_in_1 = {index for index in indices_2 if index_to_scaffold[index] in scaffolds_1}\n overlap = len(indices_in_2_with_scaffold_in_1) / len(indices_2)\n\n return overlap", "def has_atomic_overlaps(self):\n atomic_overlaps = self._get_atomic_overlaps()\n return len(atomic_overlaps) > 0", "def test_compute_overlap(self):\n # box1 contained in box2\n box1 = ((1, 2), (1, 2), (1, 2))\n box2 = ((1, 3), (1, 3), (1, 3))\n mapping = {box1: [1, 2, 3, 4], box2: [1, 2, 3, 4, 5]}\n # box1 in box2, so complete overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box1, box2), 1)\n # 4/5 atoms in box2 in box1, so 80 % overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box2, box1), .8)", "def isOverlap(peak, ref_distance_map, ref_distance_indexmap):\n chromosome = peak[0]\n start = int(peak[1])\n end = int(peak[2])\n\n if chromosome not in ref_distance_indexmap:\n return False\n\n indexes = ref_distance_indexmap[chromosome]\n\n left_index = bisect(indexes, start)\n right_index = bisect(indexes, end)\n\n # the rational is if overlap, the distance is zero\n candidate_regions = set()\n\n potential_indexes = []\n\n left_index = left_index - 10 if left_index - 10 >= 0 else 0\n for index in indexes[left_index - 1: right_index+10]:\n potential_indexes.append(index)\n\n for feature_position in potential_indexes:\n candidate_regions = candidate_regions.union(ref_distance_map[chromosome][feature_position])\n\n for region in candidate_regions:\n if start <= region.start <= end:\n return True\n if start <= region.end <= end:\n return True\n if region.start <= start and end <= region.end:\n return True\n return False", "def does_overlap(self, start, stop):\n\n ranges = [list(range(key, self.map[key] + 1)) for key in self.map]\n all_coords = [item for sublist in ranges for item in sublist]\n # removing all_coords implementation until we write some tests\n for i in range(start, stop + 1):\n if i in all_coords:\n return True\n return False", "def overlap(x,y):\n if (x[0]<=y[-1] and x[-1]>y[0]) or (y[0]<=x[-1] and y[-1]>x[0]):\n return 1\n else: return 0", "def overlaps(self, other: \"Availability\", strict: bool) -> bool:\n\n if not isinstance(other, Availability):\n raise Exception(\"Please provide an Availability object\")\n\n if strict:\n return (\n (self.start <= other.start < self.end)\n or (self.start < other.end <= self.end)\n or (other.start <= self.start < other.end)\n or (other.start < self.end <= other.end)\n )\n return (\n (self.start <= other.start <= self.end)\n or (self.start <= other.end <= self.end)\n or (other.start <= self.start <= other.end)\n or (other.start <= self.end <= other.end)\n )", "def check_overlap(lorentz_params_1, lorentz_params_2):\n if lorentz_params_1 is None or lorentz_params_2 is None:\n return False\n [low_lorentz, high_lorentz] = sorted(\n [lorentz_params_1, lorentz_params_2], key=lambda l: l[1])\n low_fit_range = find_single_fit_range(low_lorentz)\n high_fit_range = find_single_fit_range(high_lorentz)\n return low_fit_range[2] > high_fit_range[1]", "def test_overlap(self):\r\n rect1 = Rectangle(10, 20, 30, 40)\r\n rect2 = Rectangle(50, 60, 70, 80)\r\n\r\n # overlap should be commutative\r\n assert not rect1.overlap_with(rect2)\r\n assert not rect2.overlap_with(rect1)\r\n assert not Rectangle.overlap(rect1, rect2)\r\n assert not Rectangle.overlap(rect2, rect1)\r\n\r\n rect1 = Rectangle(-10, -20, 10, 60)\r\n rect2 = Rectangle(0, 50, 100, 200)\r\n assert rect1.overlap_with(rect2)\r\n assert rect2.overlap_with(rect1)\r\n assert Rectangle.overlap(rect1, rect2)\r\n assert Rectangle.overlap(rect2, rect1)\r\n\r\n # rectangles with only same boarder are not considered overlapped\r\n rect1 = Rectangle(-30, -10, -20, 0)\r\n rect2 = Rectangle(-20, -5, 30, 20)\r\n rect3 = Rectangle(-40, 0, 30, 20)\r\n assert not rect1.overlap_with(rect2)\r\n assert not rect1.overlap_with(rect3)\r\n assert not Rectangle.overlap(rect2, rect1)\r\n assert not Rectangle.overlap(rect3, rect1)", "def if_overlap(self, x, y) -> bool:\n if self.pos[y][x] != '-':\n print('此坐标已有棋子,请仔细观察棋盘')\n return True\n return False", "def covers(self, other):\n return self._start <= other._start and self._end >= other._end", "def rOverlap (x1, y1, w1, h1, x2, y2, w2, h2):\n if x1<=x2<=(x1+w1) or y1<=y2<=(y1+h1):\n return True\n elif x1<=(x2+w2)<=(x1+w1):\n return True\n else:\n return False", "def _bbox_overlap(self, other):\n reg0 = self.bbox\n reg1 = other.bbox\n return (reg0[0] <= reg1[2] and reg1[0] <= reg0[2] and\n reg0[1] <= reg1[3] and reg1[1] <= reg0[3])", "def overlaps(self, other):\n\n isOverlaps = False\n\n if self.ipv4 is not None:\n isOverlaps = self.ipv4.overlaps(other.ipv4) \n\n if isOverlaps is False:\n if self.ipv6 is not None:\n isOverlaps = self.ipv6.overlaps(other.ipv6) \n\n return isOverlaps", "def isIntvOverlapped(rOne, rTwo):\n\tclear = rOne[1] <= rTwo[0] or rOne[0] >= rTwo[1] \n\treturn not clear", "def overlaps(x1, x2, y1, y2):\n\n return x1 <= y2 and y1 <= x2", "def overlap(table1, table2):\n out = np.zeros(np.size(table1, axis=0), dtype='bool')\n for i in range(np.size(table1, axis=0)):\n s1_s2 = table1[i, 0] < table2[:, 0] \n s1_e2 = table1[i, 0] <= table2[:, 1]\n e1_s2 = table1[i, 1] < table2[:, 0]\n e1_e2 = table1[i, 1] < table2[:, 1]\n # no overlap occurs when all four parameters above either == 0 or 1\n sum_params = np.sum(np.array([s1_s2, s1_e2, e1_s2, e1_e2]), axis=0)\n olap = (sum_params == 1) | (sum_params == 2) | (sum_params == 3)\n out[i] = np.any(olap)\n return out", "def calculate_overlap(self, r1, r2):\n\n # We know that reads that can be glued,\n # share at least half of their length.\n # Make sure one is not shorter than\n # the half of another.\n\n if len(r1) / 2 + len(r1) % 2 <= len(r2) \\\n and len(r2) / 2 + len(r2) % 2 <= len(r1):\n\n # prepare second halves for overlap pre-check\n\n tail1 = r1[len(r1) / 2:]\n tail2 = r2[len(r2) / 2:]\n \n # case 1: r1 contains r2 completely\n #\n # For example,\n #\n # ATCGCCGGAT\n # TCGCCGGA\n \n pos = r1.find(r2)\n if pos != -1:\n self.reads[r1].overlaps[r2] = pos + len(r2) - len(r1)\n \n # case 2: r2 contains r1 completely\n #\n # For example,\n #\n # TCGCCGGA\n # ATCGCCGGAT\n \n pos = r2.find(r1)\n if pos != -1:\n self.reads[r2].overlaps[r1] = pos + len(r1) - len(r2)\n \n # case 3: end of r1 overlaps with beginning of r2\n #\n # For example,\n #\n # ATCGCCGGAT\n # TCGCCGGATGC\n #\n # First check that at least half of r1 is in r2\n # If there is a match, calculate the expected length \n # of overlap and check if they indeed overlap.\n\n \n pos = r2.find(tail1)\n if pos != -1:\n overlap = pos + len(tail1)\n if r1[-overlap:] == r2[:overlap]:\n self.reads[r1].overlaps[r2] = len(r2) - overlap\n \n # case 4: end of r2 overlaps with beginning of r1\n #\n # For example,\n #\n # CGCCGGATCC\n # TCGCCGGAT\n #\n # First check that at least half of r2 is in r1\n # If there is a match, calculate the expected length \n # of overlap and check if they indeed overlap.\n \n pos = r1.find(tail2)\n if pos != -1: \n overlap = pos + len(tail2)\n if r2[-overlap:] == r1[:overlap]:\n self.reads[r2].overlaps[r1] = len(r1) - overlap", "def is_overlapping_lane_seq(lane_seq1: Sequence[int], lane_seq2: Sequence[int]) -> bool:\n\n if lane_seq2[0] in lane_seq1[1:] and lane_seq1[-1] in lane_seq2[:-1]:\n return True\n elif set(lane_seq2) <= set(lane_seq1):\n return True\n return False", "def overlaps(self, other, particle_r):\n return self.distance_to(other) < (particle_r ** 2)", "def overlap(a: Pos, b: Pos, exact: bool = False) -> bool:\n if a == b:\n return True\n elif exact:\n return False\n s0, e0 = a\n s1, e1 = b\n if in_interval(s1, s0, e0):\n return True\n if in_interval(e1, s0, e0):\n return True\n if in_interval(s0, s1, e1):\n return True\n if in_interval(e0, s1, e1):\n return True\n return False", "def intersects(self, other): # -> bool:\n ...", "def bbox_overlap(bbox_1: Sequence, bbox_2: Sequence) -> bool:\n if (bbox_1[0] > bbox_2[0]) or (bbox_1[1] > bbox_2[1]):\n return False\n if (bbox_1[2] < bbox_2[2]) or (bbox_1[3] < bbox_2[3]):\n return False\n\n return True", "def is_crossed(self):\n left_boundary_clusters = np.extract(self.cluster[0] > 0,\n self.cluster[0])\n right_boundary_clusters = np.extract(self.cluster[-1] > 0,\n self.cluster[-1])\n return np.in1d(left_boundary_clusters, right_boundary_clusters).any()", "def overlap(\n state: State, # pylint: disable=unused-argument\n action: Action, # pylint: disable=unused-argument\n next_state: State,\n *,\n object_type: Type[GridObject],\n) -> bool:\n return isinstance(next_state.grid[next_state.agent.position], object_type)", "def overlap(line1, line2):\n\tx1, x2 = line1\n\tx3, x4 = line2\n\tonLeft = min(x1, x2) <= min(x3, x4)\n\tif onLeft:\n\t\treturn max(max((x1, x2)) - min((x3, x4)), 0) > 0\n\treturn max(max((x3, x4)) - min((x1, x2)),0) > 0", "def check_overlap(x1,x2,x3,x4):\r\n if x3<x2 and x4>=x2:\r\n return True\r\n elif x3<=x1 and x4>x1:\r\n return True\r\n elif x3>x1 and x4<x2:\r\n return True\r\n elif x3<=x1 and x4>=x2:\r\n return True\r\n else:\r\n return False", "def time_overlap(d1, d2):\n gt1, gt2, vt1, vt2 = parse_date(d1[\"t1\"]), parse_date(d1[\"t2\"]), parse_date(d2[\"t1\"]), parse_date(d2[\"t2\"])\n return (gt1 != vt2) and (vt1 != gt2) and (gt1 <= vt2) and (vt1 <= gt2)", "def tOverlap(ts1, ts2, *args, **kwargs):\n idx_1in2 = tOverlapHalf(ts2, ts1, *args, **kwargs)\n idx_2in1 = tOverlapHalf(ts1, ts2, *args, **kwargs)\n if len(idx_2in1) == 0:\n idx_2in1 = None\n if len(idx_1in2) == 0:\n idx_1in2 = None\n return idx_1in2, idx_2in1", "def overlaps(a, b, **kwargs):\n return lib.overlaps(a, b, **kwargs)", "def is_subspan(a: Tuple[int, int], b: Tuple[int, int]) -> bool:\n if a[0] >= b[0] and a[1] <= b[1]:\n return True\n else:\n return False", "def overlap(self, other):\n\t\toverlap = self.contains(other.startX, other.startY) or \\\n\t\t\tself.contains(other.startX, other.endY) or \\\n\t\t\tself.contains(other.endX, other.startY) or \\\n\t\t\tself.contains(other.endX, other.endY)\n\n\t\tintersectY1 = self.startY <= other.startY <= self.endY and \\\n\t\t\tself.startY <= other.endY <= self.endY and \\\n\t\t\t(other.startX <= self.startX <= other.endX or \\\n\t\t\tother.startX <= self.endX <= other.endX)\n\n\t\tintersectY2 = other.startY <= self.startY <= other.endY and \\\n\t\t\t other.startY <= self.endY <= other.endY and \\\n\t\t\t (self.startX <= other.startX <= self.endX or \\\n\t\t\t self.startX <= other.endX <= self.endX)\n\n\t\tintersectY = intersectY1 or intersectY2\n\n\t\tintersectX1 = self.startX <= other.startX <= self.endY and \\\n\t\t\tself.startX <= other.endX <= self.endX and \\\n\t\t (other.startY <= self.startY <= other.endY or \\\n\t\t\tother.startY <= self.endY <= other.endY)\n\n\t\tintersectX2 = other.startX <= self.startX <= other.endX and \\\n\t\t\tother.startX <= self.endX <= other.endX and \\\n\t\t (self.startY <= other.startY <= self.endY or \\\n\t\t\tself.startY <= other.endY <= self.endY)\n\n\t\tintersectX = intersectX1 or intersectX2\n\n\t\treturn overlap or intersectX or intersectY", "def check_recon_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'recon_spec'):\n for i, spectrum in enumerate(self.recon_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.recon_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.recon_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.recon_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.recon_spec[spectrum]._add_to_overlapping_filters(filtername)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass", "def overlap(l1, l2):\n l1, l2 = list(l1), list(l2)\n l1.sort()\n l2.sort()\n lines = [l1, l2]\n lines.sort(key=lambda x: x[0])\n if lines[0][1] >= lines[1][0]:\n return True\n return False", "def overlap_rect(rec1, rec2):\n # true if rec2 is left of rec1\n a = rec2[2] <= rec1[0]\n \n # true if rec2 is right of rec1\n b = rec1[2] <= rec2[0]\n\n # true if rec2 is below rec1\n c = rec2[3] <= rec1[1]\n\n # true if rec2 is above rec1\n d = rec1[3] <= rec2[1]\n\n return not (a or b or c or d)", "def overlaps(self,b):\n if b.chr != self.chr :return False\n if (self.start <= b.start and b.start <=self.end) or (self.start >= b.start and self.start <= b.end):\n return True\n else:\n return False", "def do_box_overlap(coord1, coord2):\n return (\n (coord1[0] - 2 < coord2[0] and coord1[1] + 2 > coord2[0]\n or coord2[0] - 2 < coord1[0] and coord2[1] + 2 > coord1[0]) \n and (coord1[2] - 2 < coord2[2] and coord1[3] + 2 > coord2[2]\n or coord2[2] - 2 < coord1[2] and coord2[3] + 2 > coord1[2]))", "def is_overlappedResort(self, resort):\n for corner in resort.corners:\n if self.is_point_in(corner):\n return True\n for corner in self.corners:\n if resort.is_point_in(corner):\n return True\n if self.intersection_area(resort) > 0:\n return True\n return False", "def check_sim_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.sim_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.sim_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.sim_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.sim_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.sim_spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass", "def intersects(self, other: RectangularRoom) -> bool:\n return (\n self.x1 <= other.x2\n and self.x2 >= other.x1\n and self.y1 <= other.y2\n and self.y2 >= other.y1\n )", "def overlapping(x,y):\n for i in range(0,len(x)):\n for j in range(0,len(y)):\n if x[i] == y[j]:\n return True\n else:\n continue#reapet until finished all number in the list\n return False", "def collision_check(self):\n return True", "def feat_overlap(f1, f2):\n f1start = int(f1[3])\n f1end = int(f1[4])\n f2start = int(f2[3])\n f2end = int(f2[4])\n\n if f1start <= f2end and f1end >= f2start:\n return True\n return False", "def check_overlap(current, hit, overlap = 200):\n for prev in current:\n p_coords = prev[2:4]\n coords = hit[2:4]\n if get_overlap(coords, p_coords) >= overlap:\n return True\n return False", "def dates_intervals_are_overlapped(start_1, end_1, start_2, end_2):\n return end_1 >= start_2 and end_2 >= start_1", "def overlaps(self, region):\n region = as_region(region)\n\n if region.chromosome != self.chromosome:\n return False\n\n if self.end is None or region.start is None or region.start <= self.end:\n if self.start is None or region.end is None or region.end >= self.start:\n return True\n return False" ]
[ "0.72638744", "0.7062225", "0.7060286", "0.69966316", "0.6907729", "0.68349934", "0.6770295", "0.6767552", "0.67658687", "0.6728309", "0.66423035", "0.6638168", "0.66367257", "0.6632385", "0.66321814", "0.6605467", "0.6596014", "0.65437245", "0.65411097", "0.6536112", "0.6519489", "0.6512231", "0.6496759", "0.6479603", "0.64673066", "0.6425286", "0.6416054", "0.6410895", "0.6409679", "0.637295", "0.637295", "0.6361526", "0.63483495", "0.6339052", "0.63353115", "0.6309295", "0.62984705", "0.6294561", "0.6290621", "0.6275958", "0.6217237", "0.6212334", "0.6210587", "0.61857724", "0.61849606", "0.61836386", "0.6182477", "0.6168232", "0.61663395", "0.611331", "0.611274", "0.6111306", "0.61060864", "0.6101429", "0.6086714", "0.6082333", "0.603873", "0.60335875", "0.60321593", "0.6030186", "0.6024905", "0.60125315", "0.6010601", "0.60069996", "0.59914786", "0.59810203", "0.59758216", "0.59688437", "0.5962899", "0.59618235", "0.5957931", "0.594542", "0.59451616", "0.5937292", "0.5936066", "0.59277976", "0.5922359", "0.5920192", "0.5919372", "0.59109557", "0.5889809", "0.5867466", "0.5867004", "0.58546686", "0.5842321", "0.5831906", "0.5821266", "0.5814703", "0.58133966", "0.578955", "0.5784993", "0.5772935", "0.576818", "0.5766341", "0.57525945", "0.5751193", "0.57430553", "0.57400113", "0.5728027", "0.5715962" ]
0.7538921
0
Ensures none of the shards overlap with each other.
def validate_non_overlapping_shards_metadata(shards: List[ShardMetadata]): # TODO: evaluate optimizing this if needed. for i in range(len(shards)): for j in range(i + 1, len(shards)): if _check_shard_metadata_pair_overlap(shards[i], shards[j]): raise ValueError(f'Shards {shards[i]} and {shards[j]} overlap')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_overlap(self):\n return False", "def _check_shard_metadata_pair_overlap(shard1: ShardMetadata, shard2: ShardMetadata):\n\n # For each dim of each shard, check if one shard resides on the other\n # end of second shard with respect to that dim. As an example for a 2D\n # shard, we would check if one shard is above or on the left of the\n # other shard.\n ndims = len(shard1.shard_offsets)\n for i in range(ndims):\n if shard1.shard_offsets[i] >= shard2.shard_offsets[i] + shard2.shard_lengths[i]:\n return False\n if shard2.shard_offsets[i] >= shard1.shard_offsets[i] + shard1.shard_lengths[i]:\n return False\n\n return True", "def overlaps(self, other): # -> bool:\n ...", "def load_overlapping_shards():\n while not event_heap and shards:\n # Try to pull events from unread shards.\n load_next_shards(shards[0].cmp_id)\n\n if event_heap and shards:\n # Pull events from all shards that overlap with the next event to be\n # yielded.\n load_next_shards(event_heap[0].id)\n elif not iterators:\n # No events in the heap and no active iterators? We're done!\n return\n\n shards_with_events = set(event.stream_shard for event in event_heap)\n for shard in iterators.keys():\n if shard in shards_with_events:\n continue\n try:\n it = iterators[shard]\n event = it.next()\n heapq.heappush(event_heap, event)\n except StopIteration:\n del iterators[shard]", "def check_collisions(self):", "def sstable_marking_test_not_intersecting_all_ranges(self):\n cluster = self.cluster\n cluster.populate(4).start(wait_for_binary_proto=True)\n node1, node2, node3, node4 = cluster.nodelist()\n\n debug(\"Inserting data with stress\")\n node1.stress(['write', 'n=3', 'no-warmup', '-rate', 'threads=1', '-schema', 'replication(factor=3)'])\n\n debug(\"Flushing nodes\")\n cluster.flush()\n\n repair_options = '' if self.cluster.version() >= '2.2' else '-inc -par'\n\n debug(\"Repairing node 1\")\n node1.nodetool(\"repair {}\".format(repair_options))\n debug(\"Repairing node 2\")\n node2.nodetool(\"repair {}\".format(repair_options))\n debug(\"Repairing node 3\")\n node3.nodetool(\"repair {}\".format(repair_options))\n debug(\"Repairing node 4\")\n node4.nodetool(\"repair {}\".format(repair_options))\n\n for out in (node.run_sstablemetadata(keyspace='keyspace1').stdout for node in cluster.nodelist() if len(node.get_sstables('keyspace1', 'standard1')) > 0):\n self.assertNotIn('Repaired at: 0', out)", "def overlap_with(self, other):", "def _assert_no_scope_overlap(children) -> None: # noqa: ANN001\n for c0, c1 in itertools.combinations(children, 2):\n if set(c0.scope) & set(c1.scope):\n raise OverlappingScopesException(\n \"Children {} and {} have overlapping scopes\".format(c0, c1)\n )", "def overlaps(self, other):\n pass", "def doesNotOverlap( self, other):\n return not self.overlaps( other)", "def conflict_check() ->None:\r\n global conflict_space\r\n conflict_space = np.zeros(mShape)\r\n for x in range(shape):\r\n for y in range(shape):\r\n for z in range(y+1, shape):\r\n if example[x, y] == example[x, z]:\r\n conflict_space[x, y] = example[x, y]\r\n conflict_space[x, z] = example[x, z]\r\n if example[y, x] == example[z, x]:\r\n conflict_space[y, x] = example[y, x]\r\n conflict_space[z, x] = example[z, x]", "def validate_collision(self):\n pass", "def overlap_conflict(out, *inputs):\n from . import _bh\n\n for i in inputs:\n if not np.isscalar(i):\n if np.may_share_memory(out, i) and not _bh.same_view(out, i):\n return True\n return False", "def check_overlap(a, b):\n if a[0] >= b[2] or a[1] >= b[3] or a[2] <= b[0] or a[3] <= b[1]:\n return False\n return True", "def span_overlap(a: Tuple[int, int], b: Tuple[int, int]) -> bool:\n return not (a[0] > b[1] or a[1] < b[0])", "def _validate_no_overlap(params, error_callback):\n dhcp_set = netaddr.IPSet(netaddr.IPRange(params['dhcp_start'],\n params['dhcp_end']))\n inspection_set = netaddr.IPSet(netaddr.IPRange(params['inspection_start'],\n params['inspection_end']))\n # If there is any intersection of the two sets then we have a problem\n if dhcp_set & inspection_set:\n message = ('Inspection DHCP range \"%s-%s\" overlaps provisioning '\n 'DHCP range \"%s-%s\".' %\n (params['inspection_start'], params['inspection_end'],\n params['dhcp_start'], params['dhcp_end']))\n error_callback(message)", "def scaffold_overlap(indices_1: Set[int],\n indices_2: Set[int],\n index_to_scaffold: Dict[int, str]) -> float:\n scaffolds_1 = {index_to_scaffold[index] for index in indices_1}\n indices_in_2_with_scaffold_in_1 = {index for index in indices_2 if index_to_scaffold[index] in scaffolds_1}\n overlap = len(indices_in_2_with_scaffold_in_1) / len(indices_2)\n\n return overlap", "def overlap(a, b):\n return not(a[2]<=b[0] or a[3]<=b[1] or a[0]>=b[2] or a[1]>=b[3])", "def test_overlap(self):\r\n rect1 = Rectangle(10, 20, 30, 40)\r\n rect2 = Rectangle(50, 60, 70, 80)\r\n\r\n # overlap should be commutative\r\n assert not rect1.overlap_with(rect2)\r\n assert not rect2.overlap_with(rect1)\r\n assert not Rectangle.overlap(rect1, rect2)\r\n assert not Rectangle.overlap(rect2, rect1)\r\n\r\n rect1 = Rectangle(-10, -20, 10, 60)\r\n rect2 = Rectangle(0, 50, 100, 200)\r\n assert rect1.overlap_with(rect2)\r\n assert rect2.overlap_with(rect1)\r\n assert Rectangle.overlap(rect1, rect2)\r\n assert Rectangle.overlap(rect2, rect1)\r\n\r\n # rectangles with only same boarder are not considered overlapped\r\n rect1 = Rectangle(-30, -10, -20, 0)\r\n rect2 = Rectangle(-20, -5, 30, 20)\r\n rect3 = Rectangle(-40, 0, 30, 20)\r\n assert not rect1.overlap_with(rect2)\r\n assert not rect1.overlap_with(rect3)\r\n assert not Rectangle.overlap(rect2, rect1)\r\n assert not Rectangle.overlap(rect3, rect1)", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def is_colliding(network, allocations):\n for allocation in allocations:\n if network.overlaps(allocation):\n return True\n return False", "def hypercubes_overlap(hypercube1, hypercube2):\n if not isinstance(hypercube1, Volume) or \\\n not isinstance(hypercube2, Volume):\n raise TypeError()\n\n lowercorner1, uppercorner1 = hypercube1.get_corners()\n lowercorner2, uppercorner2 = hypercube2.get_corners()\n nb_dims = len(uppercorner1)\n \n for i in range(nb_dims):\n if not uppercorner1[i] > lowercorner2[i] or \\\n not uppercorner2[i] > lowercorner1[i]:\n return False\n\n return True", "def collision_check(self):\n return True", "def check_collisions(self):\n for tail in self.tail:\n if tail.position == self.head.position:\n self.die()\n\n future_pos = Position(self.head_x + self.direction.move_x * Const.SQUARE_SIZE,\n self.head_y + self.direction.move_y * Const.SQUARE_SIZE)\n\n if future_pos.x < 0 or future_pos.x > Const.G_B_W - Const.SQUARE_SIZE or \\\n future_pos.y < 0 or future_pos.y > Const.G_B_H - Const.SQUARE_SIZE:\n self.die()", "def test_overlapping_alignments_2():\n generate_bam_file(gqd.sam_content, gqd.sam_bam_prefix)\n gqd.gene_wise_quantification._min_overlap = 5\n sam = pysam.Samfile(gqd.sam_bam_prefix + \".bam\")\n # 1 overlapping base in the 5' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 10))) == []\n # 4 overlapping base in the 5' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 13))) == []\n # 5 overlapping base in the 5' end of the reads => okay\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 14))) == [\n \"myread:01\", \"myread:02\", \"myread:03\", \"myread:04\", \"myread:05\"]\n # 1 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 19, 23))) == []\n # 4 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 16, 23))) == []\n # 5 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 15, 23))) == [\n \"myread:01\", \"myread:02\", \"myread:03\", \"myread:04\", \"myread:05\"]", "def _overlap(x1, w1, x2, w2):\r\n if x1+w1 < x2-w2: return False\r\n if x1-w1 > x2+w2: return False\r\n\r\n return True", "def test_compute_overlap(self):\n # box1 contained in box2\n box1 = ((1, 2), (1, 2), (1, 2))\n box2 = ((1, 3), (1, 3), (1, 3))\n mapping = {box1: [1, 2, 3, 4], box2: [1, 2, 3, 4, 5]}\n # box1 in box2, so complete overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box1, box2), 1)\n # 4/5 atoms in box2 in box1, so 80 % overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box2, box1), .8)", "def check_consistent(self):\n # * END LIST The end list itself must be consistent.\n # ** Each end must be of understood type\n # ** Each end must have a valid sequence or no sequence\n # ** There must be no more than one instance of each name\n # ** WARN if there are ends with no namecounts\n # * TILE LIST\n # ** each tile must be of understood type (must parse)\n # ** ends in the tile list must be consistent (must merge)\n # ** there must be no more than one tile with each name\n # self.tiles.check_consistent()\n endsfromtiles = self.tiles.glues_from_tiles()\n\n # ** WARN if any end that appears does not have a complement used or vice versa\n # ** WARN if there are tiles with no name\n # * TILE + END\n # ** The tile and end lists must merge validly\n # (checks sequences, adjacents, types, complements)\n self.glues | endsfromtiles\n\n # ** WARN if tilelist has end references not in ends\n # ** WARN if merge is not equal to the endlist\n # ** WARN if endlist has ends not used in tilelist\n # * ADAPTERS / SEEDS\n # SEED stuff was here", "def __has_conflicting_node_names(self):\n # check length of sets to determine if overlap exists\n return len({node.get_name() for node in self.get_nodeset()}) != len(self.get_nodeset())", "def decrease_overlap(indices_1: Set[int],\n indices_2: Set[int],\n index_to_scaffold: Dict[int, str],\n scaffold_to_indices: Dict[str, Set[int]],\n indices_1_size: float) -> Tuple[Set[int], Set[int]]:\n # Make copies to prevent altering input set\n indices_1 = deepcopy(indices_1)\n indices_2 = deepcopy(indices_2)\n\n # Determine scaffolds in each of the two sets\n scaffolds_1 = {index_to_scaffold[index] for index in indices_1}\n scaffolds_2 = {index_to_scaffold[index] for index in indices_2}\n union = scaffolds_1 | scaffolds_2\n intersection = scaffolds_1 & scaffolds_2\n\n # Return indices in cases when overlap can't be changed\n if len(union) <= 1 or len(intersection) == 0:\n return indices_1, indices_2\n\n # If only one scaffold in intersection, randomly choose which set to move it to\n if len(intersection) == 1:\n scaffold = intersection.pop()\n indices = scaffold_to_indices[scaffold]\n\n indices_1 -= indices\n indices_2 -= indices\n\n indices_set = random.choice([indices_1, indices_2])\n indices_set |= indices\n\n return indices_1, indices_2\n\n # Select random scaffold and move all indices to indices_2\n scaffold_to_2 = random.choice(sorted(list(intersection)))\n indices_to_2 = scaffold_to_indices[scaffold_to_2]\n indices_1 -= indices_to_2\n indices_2 |= indices_to_2\n intersection.remove(scaffold_to_2)\n\n # Select scaffold which is closest in size to above scaffold\n scaffold_to_2_length = len(indices_to_2)\n best_size_diff = float('inf')\n best_scaffold = None\n\n # Guarantee consistent randomness\n intersection = sorted(list(intersection))\n random.shuffle(intersection)\n\n for scaffold in intersection:\n scaffold_to_1_length = len(scaffold_to_indices[scaffold])\n size_diff = abs(scaffold_to_1_length / (scaffold_to_1_length + scaffold_to_2_length) - indices_1_size)\n\n if size_diff < best_size_diff:\n best_size_diff = size_diff\n best_scaffold = scaffold\n\n # Move all indices of this scaffold to indices_1\n indices = scaffold_to_indices[best_scaffold]\n indices_2 -= indices\n indices_1 |= indices\n\n return indices_1, indices_2", "def strong_overlapping(time_1, time_2):\n\n if (time_1[0] <= time_2[0] < time_1[1]) or (time_2[0] <= time_1[0] < time_2[1]):\n return True\n\n return False", "def check_conflicts(overlapping, vocab_used):\n conflicts = {}\n for square in overlapping:\n overlapped_blanks = overlapping.get(square)\n # if word at blank is used multiple times add a conflict for the corresponding blank\n for blank in overlapped_blanks:\n if blank.index not in conflicts:\n conflicts[blank.index] = vocab_used[blank.word] - 1\n # examine for letter conflict at the overlapped square\n # assume there are no nested words so there can only be two overlapping blanks at a square\n blankA = overlapped_blanks[0]\n blankB = overlapped_blanks[1]\n # word letters in blank are aligned with it's spanning squares\n blankA_letter = blankA.word[blankA.squares.index(square)]\n blankB_letter = blankB.word[blankB.squares.index(square)]\n if blankA_letter != blankB_letter:\n # not the same so add conflicts\n conflicts[blankA.index] += 1\n conflicts[blankB.index] += 1\n return conflicts", "def overlaps(self, other):\n return _binary_op(arctern.ST_Overlaps, self, other).astype(bool, copy=False)", "def increase_overlap(indices_1: Set[int],\n indices_2: Set[int],\n index_to_scaffold: Dict[int, str],\n scaffold_to_indices: Dict[str, Set[int]],\n indices_1_size: float) -> Tuple[Set[int], Set[int]]:\n # Make copies to prevent altering input set\n indices_1 = deepcopy(indices_1)\n indices_2 = deepcopy(indices_2)\n\n # Determine scaffolds in each of the two sets which have at least two indices\n scaffolds_1 = {index_to_scaffold[index] for index in indices_1\n if len(scaffold_to_indices[index_to_scaffold[index]]) >= 2}\n scaffolds_2 = {index_to_scaffold[index] for index in indices_2\n if len(scaffold_to_indices[index_to_scaffold[index]]) >= 2}\n union = scaffolds_1 | scaffolds_2\n\n # If 0 or 1 scaffolds, can't increase overlap so just return indices\n if len(union) <= 1:\n return indices_1, indices_2\n\n # Determine scaffolds which are only in one set or the other\n scaffolds_1_only = scaffolds_1 - scaffolds_2\n scaffolds_2_only = scaffolds_2 - scaffolds_1\n\n # Select one scaffold from each set if possible\n selected_scaffolds = []\n\n if len(scaffolds_1_only) != 0:\n selected_scaffolds.append(random.choice(sorted(list(scaffolds_1_only))))\n if len(scaffolds_2_only) != 0:\n selected_scaffolds.append(random.choice(sorted(list(scaffolds_2_only))))\n\n # Share indices from selected scaffolds\n for scaffold in selected_scaffolds:\n indices = scaffold_to_indices[scaffold]\n\n indices_1 -= indices\n indices_2 -= indices\n\n indices = sorted(list(indices))\n random.shuffle(indices)\n\n # Divide up indices proportionally according to size_ratio\n size_1 = int(indices_1_size * len(indices))\n indices_1.update(indices[:size_1])\n indices_2.update(indices[size_1:])\n\n return indices_1, indices_2", "def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)", "def test_overlapping_events_places_contained_in_unchecked(\n sample_events, blacksmithing, forge, metalshop, caplog):\n caplog.set_level(logging.INFO)\n places = [blacksmithing, forge, metalshop]\n event1, event2 = sample_events.make_overlapping_events()\n overlap_events(event1, event2, blacksmithing, blacksmithing, places)\n assert len(caplog.messages) == 2\n message = caplog.messages[0]\n assert \"Schedule conflict: place='Forge'\" in message \\\n or \"Schedule conflict: place='Metal Shop'\" in message\n expected_conflict_times = \"Conflict(start_time='{}', end_time='{}',\".format(\n event2.start_time, event1.end_time)\n assert expected_conflict_times in message\n assert event1.meetup_id in message\n assert event2.meetup_id in message", "def can_combine(self, first, second):\n # Need to check out of order issues as\n # blocks are sorted by where they start in a\n mismatch_ab = (first.a_end <= second.a\n and second.b_end <= first.b)\n mismatch_ba = (second.a_end <= first.a\n and first.b_end <= second.b)\n out_of_order = mismatch_ab or mismatch_ba\n return not out_of_order and self.jump_gap(second)", "def test_no_bleed(scheduler):\n d1 = (datetime(2011, 1, 1, 15, 0), datetime(2011, 1, 1, 16, 0))\n d2 = (datetime(2011, 1, 1, 16, 0), datetime(2011, 1, 1, 17, 0))\n\n a1 = scheduler.allocate(d1)[0]\n a2 = scheduler.allocate(d2)[0]\n\n scheduler.commit()\n\n assert not a1.overlaps(*d2)\n assert not a2.overlaps(*d1)\n\n # expect no exceptions\n scheduler.reserve('[email protected]', d2)\n scheduler.reserve('[email protected]', d1)", "def can_overlap(self):\n return self.is_open", "def can_overlap(self):\n return self.is_open", "def is_overlap(box_1, box_2, iou_th):\n return box_1.iou(box_2) > iou_th", "def consistent(self, assignment):\n for node1 in assignment:\n for node2 in assignment:\n\n if node1 != node2:\n #returns False if any assignmed words are the same\n if assignment[node1] == assignment[node2]:\n return False\n\n overlap= self.crossword.overlaps[node1,node2]\n if overlap != None:\n #checks if words assigned to node overlaps are the same letter\n if assignment[node1][overlap[0]] != assignment[node2][overlap[1]]:\n return False\n\n return True", "def is_crossed(self):\n left_boundary_clusters = np.extract(self.cluster[0] > 0,\n self.cluster[0])\n right_boundary_clusters = np.extract(self.cluster[-1] > 0,\n self.cluster[-1])\n return np.in1d(left_boundary_clusters, right_boundary_clusters).any()", "def overlaps(self, other):\n return (self.right > other.left and self.left < other.right and\n self.top < other.bottom and self.bottom > other.top)", "def _validate_shards(self, phase_steps):\n step_expanded_flags = [step.data.get('expanded', False) for step in phase_steps]\n assert all(step_expanded_flags) or not any(step_expanded_flags), \\\n \"Mixed expanded and non-expanded steps in phase!\"\n expanded = step_expanded_flags[0]\n if not expanded:\n # This was the initial phase, not the expanded phase. No need to\n # check shards.\n return Result.passed\n\n step_shard_counts = [step.data.get('shard_count', 1) for step in phase_steps]\n assert len(set(step_shard_counts)) == 1, \"Mixed shard counts in phase!\"\n shard_count = step_shard_counts[0]\n if len(phase_steps) != shard_count:\n # TODO(josiah): we'd like to be able to record a FailureReason\n # here, but currently a FailureReason must correspond to a JobStep.\n logging.error(\"Build failed due to incorrect number of shards: expected %d, got %d\",\n shard_count, len(phase_steps))\n return Result.unknown\n return Result.passed", "def overlap(component1, component2):\n if component1[0].start <= component2[0].stop and component2[0].start <= component1[0].stop:\n if component1[1].start <= component2[1].stop and component2[1].start <= component1[1].stop:\n return True\n return False", "def is_overlapping(box1, box2):\n if box1[2] <= box2[0]: # If box1 is to the left of box2\n return False\n elif box1[0] >= box2[2]: # If box1 is to the right of box2\n return False\n elif box1[3] <= box2[1]: # If box1 is below box2\n return False\n elif box1[1] >= box2[3]: # If box1 is above box2\n return False\n else:\n return True", "def check_tensor(shards_metadata, tensor_dims) -> None:\n\n # If the tensor's volume matches the total volume of all shards and\n # all shard boundaries are within tensor dims, we have a compatible\n # sharding spec for this tensor. Note that we have already verified\n # we don't have overlapping shards.\n tensor_rank = len(tensor_dims)\n shards_rank = len(shards_metadata[0].shard_offsets)\n if tensor_rank != shards_rank:\n raise ValueError(f'Rank of tensor is {tensor_rank}, but shards rank is {shards_rank}')\n\n total_shard_volume = 0\n for shard in shards_metadata:\n shard_volume = 1\n for i, shard_length in enumerate(shard.shard_lengths):\n shard_volume *= shard_length\n if shard.shard_offsets[i] + shard.shard_lengths[i] > tensor_dims[i]:\n raise ValueError(\n f'Shard offset {shard.shard_offsets[i]} and length '\n f'{shard.shard_lengths[i]} exceeds tensor dim: {tensor_dims[i]} for shard {shard}')\n total_shard_volume += shard_volume\n\n tensor_volume = 1\n for size in tensor_dims:\n tensor_volume *= size\n\n if total_shard_volume != tensor_volume:\n # TODO: Can we improve this error message to point out the gaps?\n raise ValueError(\n f'Total volume of shards: {total_shard_volume} '\n f'does not match tensor volume: {tensor_volume}, in other words '\n f'all the individual shards do not cover the entire tensor')", "def test_no_sync_correctness(self):\n self.run_subtests(\n {\n \"sharding_strategy\": [\n ShardingStrategy.FULL_SHARD,\n ShardingStrategy.SHARD_GRAD_OP,\n ShardingStrategy.NO_SHARD,\n ],\n },\n self._test_no_sync_correctness,\n )", "def _print_overlapping_guards(self, model):\n has_overlap_guards = model.labeling.get_states(\"overlap_guards\")\n if has_overlap_guards.number_of_set_bits() == 0:\n return\n\n print(\"OVERLAP!\")\n print(has_overlap_guards)\n\n assert model.has_choice_origins()\n choice_origins = model.choice_origins\n conflicting_sets = []\n for state in model.states:\n if has_overlap_guards[state.id]:\n for action in state.actions:\n conflicting_sets.append(choice_origins.get_edge_index_set(state.id + action.id))\n\n for cs in conflicting_sets:\n print(choice_origins.model.restrict_edges(cs))\n exit(1)", "def checkConflicts(self):\n\t\treturn", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if all([abs(first[0] - second[0]) <= th,\n abs(first[2] - second[2]) <= th,\n abs(first[3] - second[3]) <= th,\n first[1] == second[1],\n first[4] == second[4]]):\n return True\n else:\n return False", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if all([abs(first[0] - second[0]) <= th,\n abs(first[3] - second[3]) <= th,\n first[1] == second[1],\n first[2] == second[2],\n first[5] == second[5],\n first[6] == second[6]]):\n return True\n else:\n return False", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if all([abs(first[0] - second[0]) <= th,\n abs(first[2] - second[2]) <= th,\n abs(first[5] - second[5]) <= th,\n first[1] == second[1],\n first[4] == second[4]]):\n return True\n else:\n return False", "def do_overlap(r1, r2):\n r1_s, r1_e = r1\n r2_s, r2_e = r2\n\n return r1_s <= r2_s <= r1_e or r2_s <= r1_s <= r2_e", "def is_overlap(self, transposon):\n if self.first <= transposon.last <= self.last:\n return True\n elif self.first <= transposon.first <= self.last:\n return True\n else:\n return False", "def test_collisions_index(self):\n processed_collision_output = collisions_clean(\n \"seattlecollision/data/raw_data/raw_collisions_input.csv\")\n processed_collision_output_dup = processed_collision_output.drop_duplicates([\"c_id\"])\n self.assertTrue(\n processed_collision_output.shape[0] == processed_collision_output_dup.shape[0])", "def overlap(\n state: State, # pylint: disable=unused-argument\n action: Action, # pylint: disable=unused-argument\n next_state: State,\n *,\n object_type: Type[GridObject],\n) -> bool:\n return isinstance(next_state.grid[next_state.agent.position], object_type)", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if all([abs(first[0] - second[0]) <= th,\n first[1] == second[1],\n first[2] == second[2],\n abs(first[3] - second[3]) <= th]):\n return True\n else:\n return False", "def overlaps(self, other: \"Availability\", strict: bool) -> bool:\n\n if not isinstance(other, Availability):\n raise Exception(\"Please provide an Availability object\")\n\n if strict:\n return (\n (self.start <= other.start < self.end)\n or (self.start < other.end <= self.end)\n or (other.start <= self.start < other.end)\n or (other.start < self.end <= other.end)\n )\n return (\n (self.start <= other.start <= self.end)\n or (self.start <= other.end <= self.end)\n or (other.start <= self.start <= other.end)\n or (other.start <= self.end <= other.end)\n )", "def overlaps(self, other):\n\n if self.ll.x >= other.ur.x:\n return False\n \n if self.ll.y >= other.ur.y:\n return False\n \n if self.ur.x <= other.ll.x:\n return False\n \n if self.ur.y <= other.ll.y:\n return False\n \n return True", "def have_overlap(self,\n entry1: Union[Annotation, int],\n entry2: Union[Annotation, int]) -> bool:\n entry1_: Annotation = self._entry_index[\n entry1] if isinstance(entry1, (int, np.integer)) else entry1\n entry2_: Annotation = self._entry_index[\n entry2] if isinstance(entry2, (int, np.integer)) else entry1\n\n if not isinstance(entry1_, Annotation):\n raise TypeError(f\"'entry1' should be an instance of Annotation,\"\n f\" but get {type(entry1)}\")\n\n if not isinstance(entry2_, Annotation):\n raise TypeError(f\"'entry2' should be an instance of Annotation,\"\n f\" but get {type(entry2)}\")\n\n return not (entry1_.span.begin >= entry2_.span.end or\n entry1_.span.end <= entry2_.span.begin)", "def overlap(id1, id2, th):\n\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n\n if all(map(lambda x: abs(x[0] - x[1]) <= th, zip(first, second))):\n return True\n else:\n return False", "def test_overlap(query, reference):\n return (reference[0] <= query[0] <= reference[1] or\n reference[0] <= query[1] <= reference[1] or\n query[0] <= reference[0] <= reference[1] <= query[1])", "def _prune_non_overlapping_boxes(self, boxes1, boxes2, min_overlap=0.0):\n with tf.name_scope('prune_non_overlapping_boxes'):\n ioa = self._ioa(boxes2, boxes1) # [M, N] tensor\n ioa = tf.reduce_max(ioa, axis=0) # [N] tensor\n keep_bool = tf.greater_equal(ioa, tf.constant(min_overlap))\n keep_inds = tf.squeeze(tf.where(keep_bool), axis=1)\n boxes = tf.gather(boxes1, keep_inds)\n return boxes, keep_inds", "def agent_overlap(t_drs, h_drs, replacements):\n t_agents = get_agent(t_drs) \n h_agents = get_agent(h_drs)\n length = len(t_agents) + len(h_agents)\n if len(t_agents) is 0:\n return 0\n common = 0\n for agent in t_agents:\n if agent in h_agents:\n h_agents.pop(h_agents.index(agent))\n common =+ 1\n if common > 1:\n print(common)\n \n return len(h_agents)/len(t_agents) #seems to work better then real comparison\n '''\n else:\n for replacement in replacements:\n if get_agent(replacement[15]) == get_agent(replacement[16]):\n return 1\n '''", "def resolve_self_collisions2(offsets):\n offsets = copy(offsets)\n no_collisions = []\n\n while len(offsets) > 0:\n offset_1 = offsets.pop(0)\n evict = []\n new = []\n\n add = True\n for ind_2, offset_2 in enumerate(no_collisions):\n if overlap(offset_1, offset_2):\n # keep smallest\n if (offset_1[1] - offset_1[0]) <= (offset_2[1] - offset_2[0]):\n evict.append(ind_2)\n new.append(offset_1)\n else:\n pass\n add = False\n\n if add:\n new.append(offset_1)\n\n for ind in sorted(evict, reverse=True):\n no_collisions.pop(ind)\n\n no_collisions.extend(new)\n\n no_collisions = list(set(no_collisions))\n\n return no_collisions", "def _check_integrity(self):\n\n count = 0\n for (x, y) in self.__players[ChessGame.BLACK].union(\n self.__players[ChessGame.WHITE]):\n assert (x, y) in self.__board\n count += 1\n\n assert count == len(self.__board)", "def _validate_merge_col_overlaps(self):\n mc = ColNameFormatter.fmt(MERGE_COLUMN)\n merge_col = self.solar_meta.columns[self.__solar_cols == mc].item()\n solar_vals = set(self.solar_meta[merge_col].values)\n merge_col = self.wind_meta.columns[self.__wind_cols == mc].item()\n wind_vals = set(self.wind_meta[merge_col].values)\n self.merge_col_overlap_values = solar_vals & wind_vals\n\n if not self.merge_col_overlap_values:\n msg = (\"No overlap detected in the values of {!r} across the \"\n \"input files. Please ensure that at least one of the \"\n \"{!r} values is the same for input files {!r} and {!r}\")\n e = msg.format(merge_col, merge_col, self.solar_fpath,\n self.wind_fpath)\n logger.error(e)\n raise FileInputError(e)", "def check_consistency(self, es):", "def assert_mapping_consistency(layout):\n values = sorted(layout.values())\n keys = list(layout)\n ref_keys = [\"q\" + str(i) for i in range(len(keys))]\n if keys != ref_keys:\n raise PlacementError(\"Some physical qubits in the layout may be missing or duplicated.\")\n if values != list(range(len(values))):\n raise PlacementError(\"Some logical qubits in the layout may be missing or duplicated.\")", "def should_grow_on_food_collision(self):\n return True", "def isdisjoint(self, other):\n self._check_title(other)\n\n # sort by top-left vertex\n if self.bounds > other.bounds:\n i = self\n self = other\n other = i\n\n return (self.max_col, self.max_row) < (other.min_col, other.max_row)", "def check_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_overlaps - something went wrong... no data?\")\n pass", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if first[0] == second[0] and abs(first[1] - second[1]) <= th:\n return True\n else:\n return False", "def overlap(id1, id2, th):\n\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if first[0] == second[0] and abs(first[1] - second[1]) <= th:\n return True\n else:\n return False", "def consistent(self, assignment):\n # for each of the current assignments\n for word in assignment:\n # if the word does not fit in the gaps\n if len(assignment[word]) != word.length:\n # reject attempt\n return False\n # if the word is already in the assignment\n if list(assignment.values()).count(assignment[word]) > 1:\n # reject attempt\n return False\n # for each of the overlaps\n for overlap in self.crossword.overlaps:\n # if the overlap isn't empty and is an overlap for the word\n # overlaps are a superset: if the overlap of (x, y) is in the set, so is (y, x), so we can just go by the first overlap element\n if self.crossword.overlaps[overlap] is not None and overlap[0] == word:\n # try to access the word assignment for the other overlap target\n try:\n test_word = assignment[overlap[1]]\n # if it does not exist in the assignment\n except KeyError:\n # continue to the next overlap\n continue\n # if the other overlap target has been assigned\n else:\n # extract the letter we want to match for the overlap\n test_letter = test_word[self.crossword.overlaps[overlap][1]]\n # if the letters do not match\n if assignment[word][self.crossword.overlaps[overlap][0]] != test_letter:\n # reject attempt\n return False\n return True", "def test_are_duplicates_bounds(self):\n rules = [\n pd.Series({\"A\": \"high\", \"B\": Bounds(lower=1, upper=1), \"C\": Bounds(lower=1, upper=1), \"Class\": \"apple\"},\n name=1),\n pd.Series({\"A\": \"high\", \"B\": Bounds(lower=0.8, upper=1), \"C\": Bounds(lower=1, upper=1),\n \"Class\": \"apple\"}, name=2)\n ]\n duplicate = _are_duplicates(rules[0], rules[1])\n self.assertTrue(duplicate is False)", "def has_atomic_overlaps(self):\n atomic_overlaps = self._get_atomic_overlaps()\n return len(atomic_overlaps) > 0", "def _validate_random_seeds(self):\n if self.random_seeds:\n if len(self.random_seeds) != len(self.sampler):\n raise ValueError(\"Number of given range objects in random_seeds\"\\\n \"and number of sampler objects need to be equal!\")\n if len(set(list(map(len,self.random_seeds)))) != 1:\n raise ValueError(\"Length of range objects in random_seeds\"\\\n \"list must be equal!\")", "def is_overlap_sorted_values(v1, v2, w1, w2):\r\n if (v2 < w1) or (v1 > w2):\r\n return False\r\n else:\r\n return True", "def no_collisions(data, affected_points):\n return", "def check_bounds(self):\n for i, missile in enumerate(self.missile_list):\n if missile.out_of_bounds(self.world):\n del self.missile_list[i]\n self.gameevents.add(\"bounds_remove\", \"missile\")\n for i, shell in enumerate(self.shell_list):\n if shell.out_of_bounds(self.world):\n del self.shell_list[i]\n self.gameevents.add(\"bounds_remove\", \"shell\")", "def time_conflict(self, schedule):\n for timerange in self._excluded_times:\n if timerange.conflicts_with(schedule):\n return False\n return True", "def new_input_does_not_overlap_original_board(self, col, row):\n return self.puzzle[row][col] == 0", "def overlap(start_idx1, end_idx1, start_idx2, end_idx2):\n head = min(end_idx1, end_idx2)\n tail = max(start_idx1, start_idx2)\n return head >= tail", "def test_common_origin_search(self):\n mc = MergeCrystals()\n mc.add_crystal(self.data1, self.cell)\n fs, score = mc.merge_phases(self.data2, self.cell, fshifts_list=self.fshifts_list)\n assert np.allclose(fs, 1-self.shifts2)", "def validate_overlap_for(doc, doctype, fieldname, value=None):\n\n existing = get_overlap_for(doc, doctype, fieldname, value)\n if existing:\n frappe.throw(_(\"This {0} conflicts with {1} for {2} {3}\").format(doc.doctype, existing.name,\n doc.meta.get_label(\n fieldname) if not value else fieldname,\n value or doc.get(fieldname)), OverlapError)", "def is_overlapping(self, region):\n if self.x2 < region.x1:\n return False # this box is left the other\n if self.x1 > region.x2:\n return False # this box is right the other\n if self.y2 < region.y1:\n return False # this box is above the other\n if self.y1 > region.y2:\n return False # this box is below the other\n return True", "def isIntvOverlapped(rOne, rTwo):\n\tclear = rOne[1] <= rTwo[0] or rOne[0] >= rTwo[1] \n\treturn not clear", "def overlap(indices1, indices2):\n assert (len(indices1) == 2 and len(indices2) == 2)\n indices1 = sorted(indices1)\n indices2 = sorted(indices2)\n if (indices2[0] <= indices1[0] <= indices2[1]) or \\\n (indices2[0] <= indices1[1] <= indices2[1]) or \\\n (indices1[0] <= indices2[0] <= indices1[1]) or \\\n (indices1[0] <= indices2[1] <= indices1[1]):\n return True\n else:\n return False", "def repair(\n self,\n start_key=None,\n end_key=None,\n shard=0,\n nshards=1,\n callback_on_progress=None,\n ):\n pass", "def test_emtpy_conflict_places(conflict_places):\n assert conflict_places.named_place(\"Woodshop\") == None", "def test_overlapping_events_contained_place(sample_events, shops, woodshop, metalshop, caplog):\n caplog.set_level(logging.INFO)\n places = [shops, woodshop, metalshop]\n event1, event2 = sample_events.make_overlapping_events()\n overlap_events(event1, event2, shops, woodshop, places)\n assert len(caplog.messages) == 1\n message = caplog.messages[0]\n assert \"Schedule conflict: place='Woodshop'\" in message\n expected_conflict_times = \"Conflict(start_time='{}', end_time='{}',\".format(\n event2.start_time, event1.end_time)\n assert expected_conflict_times in message\n assert event1.meetup_id in message\n assert event2.meetup_id in message", "def test_non_overlapping_events(sample_events, woodshop, caplog):\n caplog.set_level(logging.INFO)\n event1, event2 = sample_events.make_non_overlapping_events()\n woodshop.start_event(event1)\n woodshop.log_conflicts(event1.start_time)\n woodshop.end_event(event1)\n woodshop.log_conflicts(event1.end_time)\n woodshop.start_event(event2)\n woodshop.log_conflicts(event2.start_time)\n woodshop.end_event(event2)\n woodshop.log_conflicts(event2.end_time)\n assert caplog.text == \"\"", "def _bbox_overlap(self, other):\n reg0 = self.bbox\n reg1 = other.bbox\n return (reg0[0] <= reg1[2] and reg1[0] <= reg0[2] and\n reg0[1] <= reg1[3] and reg1[1] <= reg0[3])", "def calculate_overlap(self, r1, r2):\n\n # We know that reads that can be glued,\n # share at least half of their length.\n # Make sure one is not shorter than\n # the half of another.\n\n if len(r1) / 2 + len(r1) % 2 <= len(r2) \\\n and len(r2) / 2 + len(r2) % 2 <= len(r1):\n\n # prepare second halves for overlap pre-check\n\n tail1 = r1[len(r1) / 2:]\n tail2 = r2[len(r2) / 2:]\n \n # case 1: r1 contains r2 completely\n #\n # For example,\n #\n # ATCGCCGGAT\n # TCGCCGGA\n \n pos = r1.find(r2)\n if pos != -1:\n self.reads[r1].overlaps[r2] = pos + len(r2) - len(r1)\n \n # case 2: r2 contains r1 completely\n #\n # For example,\n #\n # TCGCCGGA\n # ATCGCCGGAT\n \n pos = r2.find(r1)\n if pos != -1:\n self.reads[r2].overlaps[r1] = pos + len(r1) - len(r2)\n \n # case 3: end of r1 overlaps with beginning of r2\n #\n # For example,\n #\n # ATCGCCGGAT\n # TCGCCGGATGC\n #\n # First check that at least half of r1 is in r2\n # If there is a match, calculate the expected length \n # of overlap and check if they indeed overlap.\n\n \n pos = r2.find(tail1)\n if pos != -1:\n overlap = pos + len(tail1)\n if r1[-overlap:] == r2[:overlap]:\n self.reads[r1].overlaps[r2] = len(r2) - overlap\n \n # case 4: end of r2 overlaps with beginning of r1\n #\n # For example,\n #\n # CGCCGGATCC\n # TCGCCGGAT\n #\n # First check that at least half of r2 is in r1\n # If there is a match, calculate the expected length \n # of overlap and check if they indeed overlap.\n \n pos = r1.find(tail2)\n if pos != -1: \n overlap = pos + len(tail2)\n if r2[-overlap:] == r1[:overlap]:\n self.reads[r2].overlaps[r1] = len(r1) - overlap", "def _undo_overlap(self, agent1, agent2, dist, combined_sizes, **kwargs):\n overlap = (combined_sizes - dist) / combined_sizes\n self.position_state.modify_position(agent1, -agent1.velocity * overlap)\n self.position_state.modify_position(agent2, -agent2.velocity * overlap)", "def clean_overlapping(overlapping):\n remove = []\n for square in overlapping:\n if len(overlapping[square]) == 1:\n remove.append(square)\n for square in remove:\n overlapping.pop(square)\n return overlapping", "def overlap(range1, range2):\n if range1[0] <= range2[1] and range2[0] <= range1[1]:\n return True\n return False" ]
[ "0.6780764", "0.66072226", "0.6300442", "0.62059945", "0.62039727", "0.618798", "0.6161772", "0.61248296", "0.60674876", "0.60540277", "0.59775466", "0.59660643", "0.5925951", "0.59135264", "0.58842605", "0.58602405", "0.5835803", "0.5722045", "0.5703409", "0.57028174", "0.56958723", "0.5679249", "0.56744343", "0.5671604", "0.56381536", "0.5625889", "0.56201446", "0.5612965", "0.55906594", "0.55785084", "0.5563826", "0.55494326", "0.5546876", "0.553332", "0.5518812", "0.55144274", "0.54772526", "0.54642963", "0.5456628", "0.5456628", "0.5442119", "0.5438703", "0.54359525", "0.5434774", "0.54258806", "0.5420234", "0.54183066", "0.540603", "0.53809315", "0.5378322", "0.53701305", "0.53692514", "0.53676766", "0.53654957", "0.5363588", "0.5354162", "0.5352703", "0.5342369", "0.53419477", "0.5340877", "0.5340179", "0.53232104", "0.5322968", "0.53146255", "0.5308072", "0.530422", "0.5291823", "0.5287305", "0.52812254", "0.52804655", "0.527914", "0.52601767", "0.5257457", "0.52568775", "0.5247314", "0.5246625", "0.5243854", "0.5243419", "0.52312696", "0.52083457", "0.52080625", "0.52034813", "0.5198065", "0.51878077", "0.51783764", "0.5173194", "0.517098", "0.5162158", "0.51604015", "0.51533693", "0.51466686", "0.5145623", "0.51415056", "0.5138451", "0.51335996", "0.51287884", "0.51286256", "0.51183647", "0.5115581", "0.51079917" ]
0.7294196
0
Checks if the shards_metadata is compatible with the provided tensor dims.
def check_tensor(shards_metadata, tensor_dims) -> None: # If the tensor's volume matches the total volume of all shards and # all shard boundaries are within tensor dims, we have a compatible # sharding spec for this tensor. Note that we have already verified # we don't have overlapping shards. tensor_rank = len(tensor_dims) shards_rank = len(shards_metadata[0].shard_offsets) if tensor_rank != shards_rank: raise ValueError(f'Rank of tensor is {tensor_rank}, but shards rank is {shards_rank}') total_shard_volume = 0 for shard in shards_metadata: shard_volume = 1 for i, shard_length in enumerate(shard.shard_lengths): shard_volume *= shard_length if shard.shard_offsets[i] + shard.shard_lengths[i] > tensor_dims[i]: raise ValueError( f'Shard offset {shard.shard_offsets[i]} and length ' f'{shard.shard_lengths[i]} exceeds tensor dim: {tensor_dims[i]} for shard {shard}') total_shard_volume += shard_volume tensor_volume = 1 for size in tensor_dims: tensor_volume *= size if total_shard_volume != tensor_volume: # TODO: Can we improve this error message to point out the gaps? raise ValueError( f'Total volume of shards: {total_shard_volume} ' f'does not match tensor volume: {tensor_volume}, in other words ' f'all the individual shards do not cover the entire tensor')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_dims(xobj, dims, kind):\n if isinstance(dims, str):\n dims = [dims]\n\n if not all(dim in xobj.dims for dim in dims):\n raise DimensionError(\n f'Your {kind} object must contain the '\n f'following dimensions at the minimum: {dims}'\n )\n return True", "def _check_dimensions(self, workspace_to_check):\n for i in range(self._raw_ws.getNumDims()):\n if self._raw_ws.getDimension(i).getNBins() != workspace_to_check._raw_ws.getDimension(i).getNBins():\n return False\n return True", "def _check_dimensions(self) -> None:\n dims = (self.y_dim, self.x_dim)\n da = self._obj[self.vars[0]] if isinstance(self._obj, xr.Dataset) else self._obj\n extra_dims = [dim for dim in da.dims if dim not in dims]\n if len(extra_dims) == 1:\n dims = tuple(extra_dims) + dims\n self.set_attrs(dim0=extra_dims[0])\n elif len(extra_dims) == 0:\n self._obj.coords[GEO_MAP_COORD].attrs.pop(\"dim0\", None)\n elif len(extra_dims) > 1:\n raise ValueError(\"Only 2D and 3D data arrays supported.\")\n if isinstance(self._obj, xr.Dataset):\n check = np.all([self._obj[name].dims == dims for name in self.vars])\n else:\n check = self._obj.dims == dims\n if check == False:\n raise ValueError(\n f\"Invalid dimension order ({da.dims}). \"\n f\"You can use `obj.transpose({dims}) to reorder your dimensions.\"\n )", "def _check_tensor_shapes(tensors):\n for tensor in tensors:\n tensor = tf.convert_to_tensor(value=tensor)\n tensor.get_shape().assert_has_rank(2)\n tensor.get_shape().assert_is_compatible_with(\n tf.convert_to_tensor(value=tensors[0]).get_shape())", "def check_qim_dim_match(cls, qim, dim):\n return len(qim) == len(dim)", "def has_all_dims(dp_or_event, dims):\n return dims.items() <= {d.key: d.value for d in dp_or_event.dimensions}.items()", "def check_has_dims(hdr):\n try:\n return (hdr['startX'], hdr['startY'])\n except KeyError:\n return False", "def has_dimension(self, dim):\n\n return self.units.dimensions == dim", "def _check_shard_metadata_pair_overlap(shard1: ShardMetadata, shard2: ShardMetadata):\n\n # For each dim of each shard, check if one shard resides on the other\n # end of second shard with respect to that dim. As an example for a 2D\n # shard, we would check if one shard is above or on the left of the\n # other shard.\n ndims = len(shard1.shard_offsets)\n for i in range(ndims):\n if shard1.shard_offsets[i] >= shard2.shard_offsets[i] + shard2.shard_lengths[i]:\n return False\n if shard2.shard_offsets[i] >= shard1.shard_offsets[i] + shard1.shard_lengths[i]:\n return False\n\n return True", "def _check_dim_array(array, ndim):\n # enlist the number of expected dimensions\n if isinstance(ndim, int):\n ndim = [ndim]\n\n # check the number of dimensions of the array\n if array.ndim not in ndim:\n raise ValueError(\"Array can't have {0} dimension(s). Expected \"\n \"dimensions are: {1}.\".format(array.ndim, ndim))", "def _metadata_is_consistent(metadata):\n checks = []\n required = ('version', 'fields', 'size', 'width', 'height', 'points',\n 'viewpoint', 'data')\n for f in required:\n if f not in metadata:\n print('%s required' % f)\n checks.append((lambda m: all([k in m for k in required]),\n 'missing field'))\n checks.append((lambda m: len(m['type']) == len(m['count']) ==\n len(m['fields']),\n 'length of type, count and fields must be equal'))\n checks.append((lambda m: m['height'] > 0,\n 'height must be greater than 0'))\n checks.append((lambda m: m['width'] > 0,\n 'width must be greater than 0'))\n checks.append((lambda m: m['points'] > 0,\n 'points must be greater than 0'))\n checks.append((lambda m: m['data'].lower() in ('ascii', 'binary',\n 'binary_compressed'),\n 'unknown data type:'\n 'should be ascii/binary/binary_compressed'))\n ok = True\n for check, msg in checks:\n if not check(metadata):\n print('error:', msg)\n ok = False\n return ok", "def check_dim(gr, DIM):\n l = len(gr)\n if(l != DIM):\n return False\n\n for i in range(0, DIM):\n if(len(gr[i]) != l):\n return False \n return True", "def check_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert 4 <= len(X.dims) <= 5, 'XCast requires a dataset to be 4-Dimensional'\n\tassert x_lat_dim in X.dims, 'XCast requires a dataset_lat_dim to be a dimension on X'\n\tassert x_lon_dim in X.dims, 'XCast requires a dataset_lon_dim to be a dimension on X'\n\tassert x_sample_dim in X.dims, 'XCast requires a dataset_sample_dim to be a dimension on X'\n\tassert x_feature_dim in X.dims, 'XCast requires a dataset_feature_dim to be a dimension on X'", "def _validate_dimensions(config):\n logging.info(\"Checking provided dimensions are valid\")\n for feature in config.get(\"test-suites\").values():\n for test_name, test in feature.items():\n for dimensions_config in test[\"dimensions\"]:\n _validate_schedulers(config, dimensions_config.get(\"schedulers\", []))\n if [] in dimensions_config.values():\n logging.error(\"Values assigned to dimensions in test %s cannot be empty\", test_name)\n raise AssertionError", "def _check_dims(cls, values):\n ndim = values['ndim']\n\n # Check the range tuple has same number of elements as ndim\n if len(values['range']) < ndim:\n values['range'] = ((0, 2, 1),) * (\n ndim - len(values['range'])\n ) + values['range']\n elif len(values['range']) > ndim:\n values['range'] = values['range'][-ndim:]\n\n # Check the current step tuple has same number of elements as ndim\n if len(values['current_step']) < ndim:\n values['current_step'] = (0,) * (\n ndim - len(values['current_step'])\n ) + values['current_step']\n elif len(values['current_step']) > ndim:\n values['current_step'] = values['current_step'][-ndim:]\n\n # Check the order tuple has same number of elements as ndim\n if len(values['order']) < ndim:\n values['order'] = tuple(\n range(ndim - len(values['order']))\n ) + tuple(o + ndim - len(values['order']) for o in values['order'])\n elif len(values['order']) > ndim:\n values['order'] = reorder_after_dim_reduction(\n values['order'][-ndim:]\n )\n\n # Check the order is a permutation of 0, ..., ndim - 1\n if not set(values['order']) == set(range(ndim)):\n raise ValueError(\n trans._(\n \"Invalid ordering {order} for {ndim} dimensions\",\n deferred=True,\n order=values['order'],\n ndim=ndim,\n )\n )\n\n # Check the axis labels tuple has same number of elements as ndim\n if len(values['axis_labels']) < ndim:\n # Append new \"default\" labels to existing ones\n if values['axis_labels'] == tuple(\n map(str, range(len(values['axis_labels'])))\n ):\n values['axis_labels'] = tuple(map(str, range(ndim)))\n else:\n values['axis_labels'] = (\n tuple(map(str, range(ndim - len(values['axis_labels']))))\n + values['axis_labels']\n )\n elif len(values['axis_labels']) > ndim:\n values['axis_labels'] = values['axis_labels'][-ndim:]\n\n return values", "def check_dimension(dim, meta, trace=False):\n if dim == \"..\":\n meta[\"dimension\"] = declast.AssumedRank()\n meta[\"assumed-rank\"] = True\n else:\n meta[\"dimension\"] = declast.ExprParser(dim, trace=trace).dimension_shape()", "def is_dimension_dynamic(dim) -> bool:\n return dim is None or dim <= 0", "def has_datapoint_with_all_dims(fake_ingest, dims):\n for datapoint in fake_ingest.datapoints:\n if has_all_dims(datapoint, dims):\n return True\n return False", "def _test_obsmdsize(t):\n md = t.metadata(axis='observation')\n return t.shape[0] != len(md) if md is not None else False", "def _assert_float32(tensors):\n if not isinstance(tensors, dict):\n tensors = [tensors]\n else:\n tensors = tensors.values()\n for tensor in tensors:\n if tensor.dtype.base_dtype != dtypes.float32:\n raise TypeError('Expected dtype=float32, %s.' % tensor)", "def _verify_space(self) -> None:\n\n for dimension in self.space.values():\n\n if dimension.type != \"fidelity\" and dimension.prior_name not in [\n \"uniform\",\n \"reciprocal\",\n \"int_uniform\",\n \"int_reciprocal\",\n \"choices\",\n ]:\n raise ValueError(\n \"TPE now only supports uniform, loguniform, uniform discrete \"\n f\"and choices as prior: {dimension.prior_name}\"\n )\n\n shape = dimension.shape\n if shape and len(shape) != 1:\n raise ValueError(\"TPE now only supports 1D shape.\")", "def is_tensor_spec(self) -> bool:\n return self.inputs and isinstance(self.inputs[0], TensorSpec)", "def valid_ndim_assertion(expected_dimentions, actual_dimention, name):\n\tassert (actual_dimention in expected_dimentions), \"Invalid ndim of {} should be {}\".format(name, str(expected_dimentions))", "def _is_tensor_equal(input_tensor, cache_tensor):\n if input_tensor.dtype != cache_tensor.dtype:\n return False\n\n if input_tensor.shape != cache_tensor.shape:\n return False\n\n if len(input_tensor.shape) != len(cache_tensor.shape):\n return False\n\n return True", "def validate_non_overlapping_shards_metadata(shards: List[ShardMetadata]):\n # TODO: evaluate optimizing this if needed.\n for i in range(len(shards)):\n for j in range(i + 1, len(shards)):\n if _check_shard_metadata_pair_overlap(shards[i], shards[j]):\n raise ValueError(f'Shards {shards[i]} and {shards[j]} overlap')", "def validate_dimensions(self, dimensions):\n\n #safety checking\n if len(dimensions) != self.dimensionality:\n raise ValueError(f\"The number of dimensions provided {len(dimensions)}\"\n f\"do not match that of this coordinate system \"\n f\"{self.dimensionality}.\")\n\n if not all(isinstance(elem, int) for elem in dimensions):\n raise ValueError(f\"Not all dimensions are ints {dimensions}\")\n\n if not all(elem > 0 for elem in dimensions):\n raise ValueError(f\"Dimensions must be greater than 1 {dimensions}\")\n\n if not checkallequal(dimensions):\n raise ValueError(f\"Not all dimensions are equal {dimensions}. They \"\n f\"must be equal. This will be changed in a future version\")", "def check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits):\n def _check_pair(a, b):\n if a != b:\n raise ValueError(\"Shape mismatch: %s vs %s.\" % (a, b))\n if len(a) != 2 or len(b) != 2:\n raise ValueError(\"Rank: expected 2, got %s and %s\" % (len(a), len(b)))\n\n if (d_real is not None) and (d_fake is not None):\n _check_pair(d_real.shape.as_list(), d_fake.shape.as_list())\n if (d_real_logits is not None) and (d_fake_logits is not None):\n _check_pair(d_real_logits.shape.as_list(), d_fake_logits.shape.as_list())\n if (d_real is not None) and (d_real_logits is not None):\n _check_pair(d_real.shape.as_list(), d_real_logits.shape.as_list())", "def validate_dimensions(self, dimensions):\n #safety checking\n if len(dimensions) != self.dimensionality:\n raise ValueError(f\"The number of dimensions provided {len(dimensions)}\"\n f\"do not match that of this coordinate system \"\n f\"{self.dimensionality}.\")\n\n if not all(isinstance(elem, int) for elem in dimensions):\n raise ValueError(f\"Not all dimensions are ints {dimensions}\")", "def HasTensor(tensor):\n return _C.HasTensor(_stringify_tensor(tensor))", "def check_dimensionality(quantity, compatible_units):\n if unit.is_quantity(compatible_units) or unit.is_unit(compatible_units):\n try:\n from simtk.unit.quantity import is_dimensionless\n except ModuleNotFoundError:\n from openmm.unit.quantity import is_dimensionless\n if not is_dimensionless(quantity / compatible_units):\n raise ValueError('{} does not have units compatible with expected {}'.format(quantity, compatible_units))\n elif compatible_units == float:\n if not (isinstance(quantity, float) or isinstance(quantity, np.ndarray)):\n raise ValueError(\"'{}' expected to be a float, but was instead {}\".format(quantity, type(quantity)))\n else:\n raise ValueError(\"Don't know how to handle compatible_units of {}\".format(compatible_units))\n\n # Units are compatible if they pass this point\n return True", "def _check_data_shape_to_num_outputs(preds: Tensor, target: Tensor, num_outputs: int) ->None:\n if preds.ndim > 2 or target.ndim > 2:\n raise ValueError(f'Expected both predictions and target to be either 1- or 2-dimensional tensors, but got {target.ndim} and {preds.ndim}.')\n if num_outputs == 1 and preds.ndim != 1 or num_outputs > 1 and num_outputs != preds.shape[1]:\n raise ValueError(f'Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs} and {preds.shape[1]}.')", "def _is_tensor(x: Any) -> bool:\n if has_tensorflow and isinstance(x, _TfTensor):\n return True\n if has_pytorch and isinstance(x, _PtTensor):\n return True\n if isinstance(x, np.ndarray):\n return True\n return False", "def has_min_len(arr, len_, kind):\n arr_len = len(arr)\n if arr_len < len_:\n raise DimensionError(\n f'Your {kind} array must be at least {len_}, '\n f'but has only length {arr_len}!'\n )\n return True", "def haveDimension(self, dim):\r\n haveIt = False\r\n try:\r\n self.data.getDimensionValue(dim, 0)\r\n haveIt = True\r\n except:\r\n # Assume it's not there...\r\n pass\r\n return haveIt", "def check_input_dimension_consistency(self, session_data: \"SessionDataType\"):\n\n if self.share_hidden_layers:\n num_text_features = self._get_num_of_features(session_data, \"text_features\")\n num_intent_features = self._get_num_of_features(\n session_data, \"label_features\"\n )\n\n if num_text_features != num_intent_features:\n raise ValueError(\n \"If embeddings are shared, \"\n \"text features and label features \"\n \"must coincide. Check the output dimensions of previous components.\"\n )", "def _verify_data(inputs, targets):\n check_value_type('inputs', inputs, Tensor)\n if len(inputs.shape) != 4:\n raise ValueError(f'Argument inputs must be 4D Tensor, but got {len(inputs.shape)}D Tensor.')\n check_value_type('targets', targets, (Tensor, int, tuple, list))\n if isinstance(targets, Tensor):\n if len(targets.shape) > 2:\n raise ValueError('Dimension invalid. If `targets` is a Tensor, it should be 0D, 1D or 2D. '\n 'But got {}D.'.format(len(targets.shape)))\n if targets.shape and len(targets) != len(inputs):\n raise ValueError(\n 'If `targets` is a 2D, 1D Tensor, it should have the same length as inputs {}. But got {}.'.format(\n len(inputs), len(targets)))", "def _autocheck_dimensions(self):\n # W dimensions check list\n assert len(self.W.shape) == 2, f\"W shape should be (N, N) but is {self.W.shape}.\"\n assert self.W.shape[0] == self.W.shape[1], f\"W shape should be (N, N) but is {self.W.shape}.\"\n\n # Win dimensions check list\n assert len(self.Win.shape) == 2, f\"Win shape should be (N, input) but is {self.Win.shape}.\"\n err = f\"Win shape should be ({self.W.shape[1]}, input) but is {self.Win.shape}.\"\n assert self.Win.shape[0] == self.W.shape[0], err\n\n # Wout dimensions check list\n assert len(self.Wout.shape) == 2, f\"Wout shape should be (output, nb_states) but is {self.Wout.shape}.\"\n nb_states = self.Win.shape[1] + self.W.shape[0] + 1 if self.use_raw_inp else self.W.shape[0] + 1\n err = f\"Wout shape should be (output, {nb_states}) but is {self.Wout.shape}.\"\n assert self.Wout.shape[1] == nb_states, err\n\n # Wfb dimensions check list\n if self.Wfb is not None:\n assert len(self.Wfb.shape) == 2, f\"Wfb shape should be (input, output) but is {self.Wfb.shape}.\"\n err = f\"Wfb shape should be ({self.Win.shape[0]}, {self.Wout.shape[0]}) but is {self.Wfb.shape}.\"\n assert (self.Win.shape[0],self.Wout.shape[0]) == self.Wfb.shape, err", "def _tensors_defined(self):\n tensors = [self.tensor_u, self.tensor_v, self.tensor_w]\n return all((tensor is not None for tensor in tensors))", "def _check_dataset(self, dataset):\n if not isinstance(dataset, Dataset):\n raise ValueError('wrong training_set or validation_set are not instances of the nn.Dataset class')\n\n if dataset.inputs.shape[1] != self.arch[0]:\n raise ValueError('dataset inputs shape is inconsistent with number of network input nodes.')\n\n if dataset.targets.shape[1] != self.arch[-1]:\n raise ValueError('dataset targets shape is inconsistent with number of network output nodes.')", "def check_param(self):\n check_tuple = (\"float16\", \"float32\", \"int32\")\n check_shape(self.shape_x, param_name=\"x\")\n check_shape(self.shape_indices, param_name=\"indices\")\n check_shape(self.shape_v, param_name=\"v\")\n check_dtype(self.dtype_x, check_tuple, param_name=\"x\")\n check_dtype(self.dtype_indices, (\"int32\",), param_name=\"indices\")\n check_dtype(self.dtype_v, check_tuple, param_name=\"v\")\n if len(self.shape_x) != len(self.shape_v):\n raise RuntimeError(\"The number of dimension x must\"\n \" be same as dimension v\")\n\n if self.shape_v[0] != self.shape_indices[0]:\n raise RuntimeError(\"The length of rank 0 of tensor v must\"\n \" be the same as length of indices\")\n\n if len(self.shape_indices) != 1:\n raise RuntimeError(\"The length of indices only support 1\")\n for i in range(1, len(self.shape_v)):\n if self.shape_x[i] != self.shape_v[i]:\n if not self.check_special():\n raise RuntimeError(\"The length of each rank of tensor x\"\n \" must be the same as length of\"\n \" each or next rank of tensor v\")", "def dimension_check():\n print(\"### DIMENSION CHECK ###\")\n print(X.shape,\n y.shape,\n X_train.shape,\n y_train.shape,\n X_test.shape,\n y_test.shape,\n weights.shape)\n print(\"### END ###\")", "def _check_sizes(self, space):\n my_dimension = self.get_total_dimension()\n other_dimension = space.get_total_dimension()\n if my_dimension != other_dimension:\n if isinstance(space, Conv2DSpace):\n if my_dimension * space.shape[0] !=\\\n other_dimension:\n raise ValueError(str(self)+\" with total dimension \" +\n str(my_dimension) +\n \" can't format a batch into \" +\n str(space) + \"because its total dimension\\\n is \" +\n str(other_dimension))", "def nd_shape_checking(x, y, mvaxis, traxis):\n assert x.ndim == y.ndim\n dims = np.delete(np.arange(x.ndim), -2)\n assert all([x.shape[k] == y.shape[k] for k in dims])", "def assert_valid_dtypes(tensors):\n valid_dtypes = get_valid_dtypes()\n for t in tensors:\n dtype = t.dtype.base_dtype\n if dtype not in valid_dtypes:\n raise ValueError(\n \"Invalid type %r for %s, expected: %s.\" % (\n dtype, t.name, [v for v in valid_dtypes]))", "def check_dims(self, data):\n if np.ndim(data) != 2:\n raise ValueError('Input data must be a two dimensional numpy array. '\n 'Data received has shape (%g, %g).' % data.shape)", "def check_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert X.shape[list(X.dims).index(x_lat_dim)] == len(X.coords[x_lat_dim].values), \"XCast requires a dataset's x_lat_dim coordinate to be the same length as its x_lat_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_lon_dim)] == len(X.coords[x_lon_dim].values), \"XCast requires a dataset's x_lon_dim coordinate to be the same length as its x_lon_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_sample_dim)] == len(X.coords[x_sample_dim].values), \"XCast requires a dataset's x_sample_dim coordinate to be the same length as its x_sample_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_feature_dim)] == len(X.coords[x_feature_dim].values), \"XCast requires a dataset's x_feature_dim coordinate to be the same length as its x_feature_dim dimension\"", "def test_dim_empty_list(a, b, metrics):\n if metrics in correlation_metrics:\n metric, _metric = metrics\n with pytest.raises(ValueError) as excinfo:\n metric(a, b, dim=[])\n assert \"requires `dim` not being empty, found dim\" in str(excinfo.value)\n elif metrics in distance_metrics:\n metric, _metric = metrics\n res = metric(a, b, dim=[])\n assert len(res.dims) == len(a.dims), print(res.dims)", "def _assert_is_batched(self, *arrays):\n shape_list = []\n for array in arrays:\n if isinstance(array, tf.Tensor):\n shape_list.append(array.shape.as_list())\n else:\n shape_list.append(np.shape(array))\n # All arrays should have at least two dimensions.\n assert all([len(shape) >= 2 for shape in shape_list])\n # All arrays should have the same batch size.\n assert len(set([shape[0] for shape in shape_list])) == 1", "def has_tensor(obj) -> bool:\n if isinstance(obj, torch.Tensor):\n return True\n elif isinstance(obj, dict):\n return any(has_tensor(value) for value in obj.values())\n elif isinstance(obj, (list, tuple)):\n return any(has_tensor(item) for item in obj)\n else:\n return False", "def has_tensors(ls):\n # Note: at some point in time ragged tensors didn't count as tensors, so this\n # returned false for ragged tensors. Making this return true fails some tests\n # which would then require a steps_per_epoch argument.\n if isinstance(ls, (list, tuple)):\n return any(\n tensor_util.is_tf_type(v) and\n not isinstance(v, ragged_tensor.RaggedTensor) for v in ls)\n if isinstance(ls, dict):\n return any(\n tensor_util.is_tf_type(v) and\n not isinstance(v, ragged_tensor.RaggedTensor)\n for _, v in ls.items())\n return tensor_util.is_tf_type(ls) and not isinstance(\n ls, ragged_tensor.RaggedTensor)", "def _check_shape(placeholder_shape, data_shape):\n\n return True", "def _test_sampmdsize(t):\n md = t.metadata(axis='sample')\n return t.shape[1] != len(md) if md is not None else False", "def validate_ndarray(ndarray, expected_dtypes, expected_dimentions, name):\n\tvalid_dtype_assertion(expected_dtypes, ndarray.dtype, name)\n\tvalid_ndim_assertion(expected_dimentions, ndarray.ndim, name)", "def is_keras_tensor(x):\n if not is_tensor(x):\n raise ValueError('Unexpectedly found an instance of type `' +\n str(type(x)) + '`. '\n 'Expected a symbolic tensor instance.')\n return hasattr(x, '_keras_history')", "def _check_same_shape(preds: Tensor, target: Tensor) ->None:\n if preds.shape != target.shape:\n raise RuntimeError(f'Predictions and targets are expected to have the same shape, but got {preds.shape} and {target.shape}.')", "def check_input_dims(df: pd.DataFrame,\n input_check_flag: str,\n input_file_name: str = None,\n raise_err: bool = True\n ) -> bool:\n messages = []\n file_name = input_file_name or input_check_flag\n target_dims = INPUT_CHECKS[input_check_flag]\n if df.shape[0] != target_dims[0]:\n messages.append(f\"Incorrect number of rows in {file_name}: Should be \"\n f\"{target_dims[0]} but found {df.shape[0]}\")\n if df.shape[1] != target_dims[1]:\n messages.append(f\"Incorrect number of columns in {file_name}: Should \"\n f\"be {target_dims[1]} but found {df.shape[1]}\")\n if len(messages) > 0:\n if raise_err:\n raise ValueError(\"::\".join(messages))\n else:\n warnings.warn(\"::\".join(messages))\n return False\n return True", "def check_metadata(metadata):\n message = 'The given metadata contains unsupported types.'\n assert all([item['type'] in ['category', 'value'] for item in metadata['details']]), message", "def _AssertShapesMatch(op_name, in_tensor, out_tensor):\n in_shape = in_tensor.get_shape()\n out_shape = out_tensor.get_shape()\n\n if not in_shape.is_compatible_with(out_shape):\n raise ValueError('%s should not change tensor shape: input %s, '\n 'output %s' % (op_name, in_shape, out_shape))", "def HasTensor(tensor):\n return HasTensorCC(_stringify_tensor(tensor))", "def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])", "def check_topology(self, ninputs: \"int\", noutputs: \"int\") -> \"bool\":\n return _beamforming_swig.phasedarray_sptr_check_topology(self, ninputs, noutputs)", "def do_grid_check(self,):\n self.ydim, self.xdim = self.data_fcst.shape \n if self.data_obs.shape != (self.ydim,self.xdim):\n raise FormatError(\"Obs and forecast data not same size.\")\n return", "def assert_spec_compatibility(input_spec: TensorSpec, other_spec: TensorSpec):\n if not input_spec:\n return False\n if isinstance(input_spec, (tuple, list)) and all([isinstance(item, numbers.Integral) for item in input_spec]):\n input_spec = TensorSpec(shape=to_tensor(input_spec))\n\n if isinstance(other_spec, (tuple, list)) and all([isinstance(item, numbers.Integral) for item in other_spec]):\n other_spec = TensorSpec(shape=to_tensor(other_spec))\n\n if (input_spec.ndim is not None or\n input_spec.min_ndim is not None or\n input_spec.max_ndim is not None):\n if other_spec.ndim is None:\n print('Other_spec ' + ' is incompatible with input_spec: '\n 'its rank is undefined, but input_spec requires a '\n 'defined rank.')\n return False\n\n # Check ndim.\n if input_spec.ndim is not None:\n ndim = other_spec.ndim\n if ndim != input_spec.ndim:\n print('Other_spec is incompatible with the input_spec: expected ndim=' + str(input_spec.ndim) + ', found ndim=' +\n str(ndim) + '. Full shape received: ' +\n str(other_spec._shape_tuple))\n return False\n if input_spec.max_ndim is not None:\n ndim = other_spec.ndim\n if ndim is not None and ndim > input_spec.max_ndim:\n print('Other_spec is incompatible with the input_spec: expected max_ndim=' + str(input_spec.max_ndim) +\n ', found ndim=' + str(ndim))\n return False\n if input_spec.min_ndim is not None:\n ndim = other_spec.ndim\n if ndim is not None and ndim < input_spec.min_ndim:\n print('Other_spec is incompatible with the input_spec: expected min_ndim=' + str(input_spec.min_ndim) +\n ', found ndim=' + str(ndim) +\n '. Full shape received: ' +\n str(other_spec._shape_tuple))\n return False\n # Check dtype.\n if input_spec.dtype is not None:\n if other_spec.dtype != input_spec.dtype:\n print('Other_spec is incompatible with the input_spec: expected dtype=' + str(input_spec.dtype) +\n ', found dtype=' + str(other_spec.dtype))\n return False\n # Check specific shape axes.\n if input_spec.axes:\n shape = other_spec._shape_tuple\n if shape is not None:\n for axis, value in input_spec.axes.items():\n if hasattr(value, 'value'):\n value = value.value\n if value is not None and shape[int(axis)] not in {value, None}:\n print(\n 'Other_spec is incompatible with input_spec: expected axis ' + str(axis) +\n ' of input shape to have value ' + str(value) +\n ' but received input with shape ' + str(shape))\n return False\n # Check shape.\n if input_spec.shape is not None:\n shape = other_spec._shape_tuple\n is_compatible=TensorShape(input_spec.shape).is_compatible_with(TensorShape(other_spec._shape_tuple))\n if is_compatible:\n return is_compatible\n if shape is not None:\n for spec_dim, dim in zip(other_spec._shape_tuple, input_spec._shape_tuple):\n if spec_dim is not None and dim is not None:\n if spec_dim != dim:\n print('Other_spec is incompatible with input_spec: expected shape=' + str(input_spec._shape_tuple) +\n ', found shape=' + str(shape))\n return False\n return True", "def _check_and_resize_input_tensor(self, input_data_map):\r\n is_need_reshape = False\r\n input_shape_list = []\r\n\r\n for model_input in self.model_inputs:\r\n tensor_name = model_input.name.rstrip()\r\n input_data = input_data_map.get(tensor_name, None)\r\n if input_data is None:\r\n raise ValueError(f'{tensor_name} is not in model inputs')\r\n if model_input.shape != list(input_data.shape):\r\n self.logger.warning(f'model input shape: {model_input.shape} is not equal'\r\n f'with input data shape: {input_data.shape}, model input shape'\r\n f'would be reshaped')\r\n is_need_reshape = True\r\n input_shape_list.append(list(input_data.shape))\r\n\r\n if is_need_reshape:\r\n self.model_session.resize(self.model_inputs, input_shape_list)\r\n self.model_inputs = self.model_session.get_inputs()", "def check_image_before_load(self,image_dims):\n\n if image_dims[0]*image_dims[1]*image_dims[2]*4 < self.check_available_memory():\n return True\n else:\n return False", "def check_ndim(a, d):\n if ndim(a) != d:\n raise ValueError('expected {}d value, got {}d'.format(d, ndim(a)))", "def safe_check(dicts, kernel_name):\n x_shape = dicts[0].get(\"shape\")\n x_dtype = dicts[0].get(\"dtype\").lower()\n rois_shape = dicts[1].get(\"shape\")\n rois_dtype = dicts[1].get(\"dtype\").lower()\n\n y_dtype = dicts[3].get(\"dtype\").lower()\n y_shape = dicts[3].get(\"shape\")\n\n profile = tik.Dprofile()\n tik_name_check = tbe_platform.cce_conf.get_soc_spec(\"SOC_VERSION\")\n if tik_name_check in (\"Ascend310\", \"Ascend910\", \"Hi3796CV300ES\", \"Hi3796CV300CS\"):\n util.check_dtype_rule(x_dtype, (\"float16\",))\n util.check_dtype_rule(rois_dtype, (\"float16\",))\n else:\n util.check_dtype_rule(x_dtype, (\"float16\", \"float32\"))\n util.check_dtype_rule(rois_dtype, (\"float16\", \"float32\"))\n\n if x_dtype != rois_dtype or x_dtype != y_dtype:\n raise RuntimeError(\"dtype in x, rois and y must be equal\")\n\n util.check_shape_rule(x_shape, min_dim=5, max_dim=5)\n util.check_tensor_shape_size(x_shape)\n util.check_shape_rule(rois_shape, min_dim=3, max_dim=3)\n util.check_tensor_shape_size(rois_shape)\n util.check_shape_rule(y_shape, min_dim=5, max_dim=5)\n util.check_tensor_shape_size(y_shape)\n\n roi_max_num = rois_shape[2]\n if roi_max_num > 6000 or roi_max_num % 16 != 0:\n raise ValueError(\"the dim 2 of rois_shape is illegal\")\n\n util.check_kernel_name(kernel_name)", "def _validate_shards(self, phase_steps):\n step_expanded_flags = [step.data.get('expanded', False) for step in phase_steps]\n assert all(step_expanded_flags) or not any(step_expanded_flags), \\\n \"Mixed expanded and non-expanded steps in phase!\"\n expanded = step_expanded_flags[0]\n if not expanded:\n # This was the initial phase, not the expanded phase. No need to\n # check shards.\n return Result.passed\n\n step_shard_counts = [step.data.get('shard_count', 1) for step in phase_steps]\n assert len(set(step_shard_counts)) == 1, \"Mixed shard counts in phase!\"\n shard_count = step_shard_counts[0]\n if len(phase_steps) != shard_count:\n # TODO(josiah): we'd like to be able to record a FailureReason\n # here, but currently a FailureReason must correspond to a JobStep.\n logging.error(\"Build failed due to incorrect number of shards: expected %d, got %d\",\n shard_count, len(phase_steps))\n return Result.unknown\n return Result.passed", "def test_check_X_too_many_dims():\n with pytest.raises(ValueError):\n check_X(np.ones((5,4,3)))", "def _is_all_tensor_equal(self, input_shape_tuple, cache_shape_tuple):\n for i, elem in enumerate(cache_shape_tuple):\n res = self._is_tensor_equal(input_shape_tuple[i], elem)\n if not res:\n return False\n return True", "def _test_obssize(t):\n return t.shape[0] != len(t.ids(axis='observation'))", "def _validate_compatibility(self):\r\n for dm in self.DistanceMatrices:\r\n for samp_id in dm.ids:\r\n if samp_id not in self.MetadataMap.SampleIds:\r\n raise ValueError(\"The sample ID '%s' was not found in the \"\r\n \"metadata map.\" % samp_id)\r\n for cat in self.Categories:\r\n if cat not in self.MetadataMap.CategoryNames:\r\n raise ValueError(\"The category '%s' was not found in the \"\r\n \"metadata map.\" % cat)", "def ensure_dims(array: xr.DataArray, *dimensions: Hashable) -> xr.DataArray:\n missing_dims = set(dimensions) - set(array.dims)\n\n new_dims = defaultdict(list)\n for coord in missing_dims:\n cdim_tuple = array.coords[coord].dims\n\n if len(cdim_tuple) > 1:\n raise ValueError('Multi dimensional coordinates are not supported')\n\n cdim = cdim_tuple[0]\n\n new_dims[cdim].append(coord)\n\n for dim, coords in new_dims.items():\n array = array.set_index({cdim: tuple(coords)}) # type: ignore[assignment]\n\n if len(coords) > 1:\n array = array.unstack(dim)\n\n return array.drop_vars(array.coords.keys() - set(array.dims))", "def verify_dimensions(self, value, exception=True, from_string=False):\n\n\t\tif from_string:\n\t\t\tvalue = Quantity(1, value)\n\n\t\ttry:\n\t\t\tif self.units is not None:\n\t\t\t\ttry:\n\t\t\t\t\tvalue.assert_dimensions(self.units)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\traise TypeError('Expected a Quantity, not \"{0}\"'.format(value))\n\t\t\t\texcept IncompatibleDimensions:\n\t\t\t\t\traise TypeError('Expected dimensions matching \"{0}\", not \"{1}\"'.format(self.units, value))\n\t\t\telse:\n\t\t\t\tif isinstance(value, Quantity):\n\t\t\t\t\traise TypeError('Unexpected Quantity \"{0}\"'.format(value))\n\t\texcept Exception:\n\t\t\tif exception:\n\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn False\n\n\t\treturn True", "def check_consistency(self):\n assert len(self.shape) == len(self.qhape) == len(self.dirs)\n # Qnums must be unique within a qim and correspond one-to-one with\n # dimensions in dim.\n assert all(\n (\n len(dim) == len(qim) == len(set(qim))\n for dim, qim in zip(self.shape, self.qhape)\n )\n )\n assert all(d == 1 or d == -1 for d in self.dirs)\n assert all(q == self._qod_func(q) for q in sum(self.qhape, []))\n # Check that every sect has a valid key and the correct shape and\n # dtype.\n for k, v in self.sects.items():\n assert v.dtype == self.dtype\n assert self.is_valid_key(k)\n block_shp_real = v.shape\n qnum_inds = tuple(\n self.qhape[i].index(qnum) for i, qnum in enumerate(k)\n )\n block_shp_claimed = tuple(\n [self.shape[i][j] for i, j in enumerate(qnum_inds)]\n )\n assert block_shp_claimed == block_shp_real\n if self.invar and (self.charge != 0 or not self.isscalar()):\n assert self.defval == 0\n return True", "def _check_dimensions(self, a, b):\n units_a = self._get_units(a)\n units_b = self._get_units(b)\n dim_a = units_a.dimensions\n dim_b = units_b.dimensions\n if dim_a != dim_b:\n raise UnitConversionError(units_a, dim_a, units_b, dim_b)", "def check_matching_unit_dimension(\n ureg: UnitRegistry, base_units: str, units_to_check: List[str]\n) -> None:\n\n base_unit = getattr(ureg, base_units)\n\n for unit_string in units_to_check:\n unit = getattr(ureg, unit_string)\n if unit.dimensionality != base_unit.dimensionality:\n raise DimensionalityError(base_unit, unit)", "def isscalar(cls, dataset, dim, per_geom=False):\n dim = dataset.get_dimension(dim)\n if (dim in cls.geom_dims(dataset)):\n return False\n elif per_geom:\n return all(isscalar(v) or len(list(unique_array(v))) == 1\n for v in dataset.data[dim.name])\n dim = dataset.get_dimension(dim)\n return len(dataset.data[dim.name].unique()) == 1", "def ensure_memory_shared(*tensors):\n for tensor_dict in tensors:\n for _, _, t in iterate_recursively(tensor_dict):\n assert t.is_shared()", "def check_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert x_lat_dim in X.coords.keys(), 'XCast requires a dataset_lat_dim to be a coordinate on X'\n\tassert x_lon_dim in X.coords.keys(), 'XCast requires a dataset_lon_dim to be a coordinate on X'\n\tassert x_sample_dim in X.coords.keys(), 'XCast requires a dataset_sample_dim to be a coordinate on X'\n\tassert x_feature_dim in X.coords.keys(), 'XCast requires a dataset_feature_dim to be a coordinate on X'", "def test_dim_None(a, b, metrics):\n metric, _metric = metrics\n if metric in [effective_sample_size, spearman_r_eff_p_value, pearson_r_eff_p_value]:\n with pytest.raises(ValueError) as excinfo:\n metric(a, b, dim=None)\n assert (\n \"Effective sample size should only be applied to a singular time dimension.\"\n in str(excinfo.value)\n )\n else:\n metric, _metric = metrics\n res = metric(a, b, dim=None)\n assert len(res.dims) == 0, print(res.dims)", "def _check_valid_sparse_tensor(indices: Union[_SparseComponentType,\n List[_SparseComponentType]],\n values: _SparseComponentType,\n size: Union[int, List[int]], name: str):\n # Check that all indices are in range.\n for current_indices in indices:\n if isinstance(current_indices, np.ndarray):\n current_indices = [current_indices]\n for dim, indices_array in enumerate(current_indices):\n if indices_array.size and size[dim] >= 0:\n i_min, i_max = min(indices_array), max(indices_array)\n if i_min < 0 or i_max >= size[dim]:\n i_bad = i_min if i_min < 0 else i_max\n raise ValueError(\n 'Sparse column {} has index {} out of range [0, {})'.format(\n name, i_bad, size[dim]))\n\n if len(indices) != len(values):\n raise ValueError(\n 'Sparse column {} has indices and values of different lengths: '\n 'values: {}, indices: {}'.format(name, values, indices))", "def validate_product_tensor_lists(conn_graph: ConnectedGraph):\n for product in conn_graph.get_all_products().values():\n # products going to branch ops will not have tensors associated with them\n if product.consumers[0].type != 'branch':\n if len(product.consumers) != len(product.tensor_dict.keys()):\n return False\n return True", "def check_shapes(arrs):\r\n shps = [i.shape for i in arrs]\r\n eq = np.all(np.array([shps[0] == i for i in shps[1:]]))\r\n err = \"Arrays arr not of the same shape...\"\r\n if not eq:\r\n raise ValueError(\"{}\\n{}\".format(err, shps))", "def check_for_index_targets(targets: torch.Tensor) -> bool:\n index_dtypes = [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]\n return targets.dtype in index_dtypes", "def validate_dimensions(m, n):\n if not (isinstance(m, int) and isinstance(n, int)):\n raise TypeError(\"dimensions must be integral\")\n if m <= 0 or n <= 0:\n raise ValueError(\"dimensions must be positive\")", "def _implicit_tensor_dimensions(dimensions):\n if not isinstance(dimensions, list):\n dimensions = [dimensions]\n flat = flatten(dimensions)\n if not all(isinstance(x, numbers.Integral) and x >= 0 for x in flat):\n raise ValueError(\"All dimensions must be integers >= 0\")\n return np.prod(flat), [dimensions, dimensions]", "def IsDimTol(self, *args):\n return _XCAFDoc.XCAFDoc_DimTolTool_IsDimTol(self, *args)", "def _assert_same_size(outputs, output_size):\n nest.assert_same_structure(outputs, output_size)\n flat_output_size = nest.flatten(output_size)\n flat_output = nest.flatten(outputs)\n\n for (output, size) in zip(flat_output, flat_output_size):\n if isinstance(size, tf.TensorShape):\n if output.shape == size:\n pass\n elif output[0].shape != tf.TensorShape(size):\n raise ValueError(\n \"The output size does not match the the required output_size\")", "def _check_variables(datasets, necessary_short_names):\n dataset_name = datasets[0]['dataset']\n necessary_short_names = set(necessary_short_names)\n short_names = set(group_metadata(datasets, 'short_name').keys())\n if short_names != necessary_short_names:\n raise ValueError(\n f\"Expected variables {necessary_short_names} for dataset \"\n f\"'{dataset_name}', got {short_names}\")", "def is_dimension_error(self):\n return self._tag == 'dimension_error'", "def check_metadata(layer_name, neuron_indices, ideal_activation,\n multiply_by_input):\n\n error_checking.assert_is_string(layer_name)\n error_checking.assert_is_integer_numpy_array(neuron_indices)\n error_checking.assert_is_geq_numpy_array(neuron_indices, 0)\n error_checking.assert_is_numpy_array(neuron_indices, num_dimensions=1)\n error_checking.assert_is_not_nan(ideal_activation)\n error_checking.assert_is_boolean(multiply_by_input)", "def ndim(tensor):\n raise NotImplementedError", "def check_array_dim(logger, arr, name, expected_dim, dim_idx):\n dim = arr.shape[dim_idx]\n # The second needs to be equal to expected_dim.\n # We raise an error if it less than expected_dim\n dim_msg = str(dim_idx)\n if dim_idx == 0:\n dim_msg = 'The first dimension of '\n elif dim_idx == 1:\n dim_msg = 'The second dimension of '\n\n check_value(is_valid=(dim >= expected_dim), error_msg=\n dim_msg + name + ' is ' + str(dim) + ' but is expected to be ' + str(expected_dim))\n # If it is greater than expected_dim we warn the user\n if dim > expected_dim:\n logger.warn(dim_msg + name + 'is ' + str(dim) + ' but is expected to be ' + str(expected_dim))\n\n return arr", "def check_form_match(\n cls,\n tensor1=None,\n tensor2=None,\n qhape1=None,\n shape1=None,\n dirs1=None,\n qhape2=None,\n shape2=None,\n dirs2=None,\n qodulus=None,\n ):\n if tensor1 is not None:\n qhape1 = tensor1.qhape\n shape1 = tensor1.shape\n dirs1 = tensor1.dirs\n if tensor2 is not None:\n qhape2 = tensor2.qhape\n shape2 = tensor2.shape\n dirs2 = tensor2.dirs\n if not (\n len(qhape1)\n == len(qhape2)\n == len(shape1)\n == len(shape2)\n == len(dirs1)\n == len(dirs2)\n ):\n return False\n # Loop over the indices of both tensors in tandem.\n for d1, qim1, dim1, d2, qim2, dim2 in zip(\n dirs1, qhape1, shape1, dirs2, qhape2, shape2\n ):\n # This is almost like compatible_indices, but for the missing minus\n # sign when building o_qim.\n qim2 = [d1 * d2 * q for q in qim2]\n if qodulus is not None:\n qim2 = [q % qodulus for q in qim2]\n qimdim1 = set(zip(qim1, dim1))\n qimdim2 = set(zip(qim2, dim2))\n if not qimdim1 == qimdim2:\n return False\n return True", "def test_dimension_size_infer(self, nt=100):\n i, j, k = dimify('i j k')\n shape = tuple([d.size for d in [i, j, k]])\n a = DenseData(name='a', shape=shape).indexed\n b = TimeData(name='b', shape=shape, save=True, time_dim=nt).indexed\n eqn = Eq(b[time, x, y, z], a[x, y, z])\n op = Operator(eqn)\n\n _, op_dim_sizes = op.arguments()\n assert(op_dim_sizes[time.name] == nt)", "def _assert_same_size(outputs: TensorStruct, output_size: OutputSize):\n flat_output_size = nest.flatten(output_size)\n flat_output = nest.flatten(outputs)\n for output, size in zip(flat_output, flat_output_size):\n if isinstance(size, torch.Size):\n if output[0].size() != size:\n raise ValueError('The output size does not matchthe required output_size')\n elif output[0].size()[-1] != size:\n raise ValueError('The output size does not match the required output_size')", "def _check_reflection_axis(self, reflection_axis):\n if (reflection_axis.shape.ndims is not None and\n reflection_axis.shape.ndims < 1):\n raise ValueError(\n \"Argument reflection_axis must have at least 1 dimension. \"\n \"Found: %s\" % reflection_axis)", "def is_schema_valid(self, schema):\n for k, v in schema.items():\n if v[0] == \"var_len\":\n assert len(v) == 2\n assert v[1] in TF_VALUE\n\n if v[0] == \"fixed_len\":\n assert len(v) == 3\n assert v[1] in TF_VALUE\n assert isinstance(v[2], list)", "def _check_shape(input_shape):\n msg = ('Input to SpatioTemporalExpansion must be 5D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_temporal, '\n 'n_features), but received shape: {}'.format(input_shape))\n assert len(input_shape) == 5, msg" ]
[ "0.70428336", "0.6533978", "0.6469752", "0.6321749", "0.6268698", "0.6185657", "0.6185021", "0.605668", "0.5926702", "0.5918645", "0.5908207", "0.58480775", "0.57827824", "0.5772328", "0.5762425", "0.57193136", "0.57082486", "0.5685897", "0.5651482", "0.5640664", "0.562222", "0.5601665", "0.5599323", "0.5589772", "0.5564222", "0.55534214", "0.54885685", "0.5481102", "0.54796255", "0.5443435", "0.5428443", "0.54155666", "0.53924865", "0.5391556", "0.5379972", "0.53506386", "0.5341175", "0.5333629", "0.5328989", "0.5328503", "0.53143907", "0.52835894", "0.52656376", "0.5263784", "0.5259956", "0.5254858", "0.5241122", "0.5232929", "0.52235276", "0.52152467", "0.52095157", "0.5202101", "0.5193668", "0.5186573", "0.51701665", "0.5163894", "0.5156043", "0.51521295", "0.51455826", "0.5145357", "0.5144867", "0.51372504", "0.51351774", "0.5127367", "0.51246166", "0.51224345", "0.51212806", "0.5120271", "0.5115157", "0.51079893", "0.5107248", "0.50951236", "0.5081934", "0.5074302", "0.5065345", "0.50649434", "0.5057024", "0.5054946", "0.50520235", "0.5046867", "0.50391304", "0.50320613", "0.5029322", "0.50171506", "0.5011857", "0.5008443", "0.50008816", "0.4995199", "0.49878913", "0.49871027", "0.49766496", "0.49728152", "0.49710405", "0.49548906", "0.49516892", "0.49468464", "0.4932622", "0.4932444", "0.49302274", "0.49234286" ]
0.83699447
0
Shorthand way to compose context, for maximum reuse.
def getContext(self, form): context = { 'form': form, 'projectList': self.projectList, 'subnav_location': self.subnav_location, 'curr_project': self.curr_project } return context
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_context(self, engine, args):\n args = self.normalize_args(args)\n _, ctx = self._make_argkey_and_context(engine, args)\n return ctx", "def __call__(self, **kwargs):\n return Context(self, kwargs)", "def _createContext(instance, args, kwargs, settings):\n context = kwargs.copy()\n args = list(args)\n context.update({name:getattr(instance, name, None) for name in settings.get('context', [])})\n context.update({key:args.pop(0) for key in settings.get('argsTokwargs', [])})\n return context", "def context_local(context=None):\n class manager(object):\n def __init__(self, ctx):\n \"\"\"\n :type ctx: Context\n \"\"\"\n self.context = ctx.copy()\n\n def __enter__(self):\n self.orig_context = context_get()\n context_set(self.context)\n return self.context\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n context_set(self.orig_context)\n\n if context is None:\n context = context_get()\n return manager(context)", "def _make_context():\n return {'app': app, 'db': db, 'User': User}", "def get_context(self, extra_ctx=None, **kwargs):\n ctx = {\n 'user': self.user,\n }\n if extra_ctx:\n ctx.update(extra_ctx)\n ctx.update(kwargs)\n return ctx", "def _make_context():\n return {'app': app, 'db': db, 'User': User}", "def _make_context():\n\n return {\n 'app': app,\n 'db': db,\n 'User': User\n }", "def _make_context():\n return {'app': app, 'db': db}", "def context(self) -> CONTEXT:", "def _make_context():\n return {'app': app,\n 'db': db,\n 'User': User\n }", "def get_context(self, *args, **kwargs):\n ctx = {}\n for k, v in kwargs.iteritems():\n ctx[k] = v\n if args:\n for idx, arg in enumerate(args):\n ctx['arg_%d' % idx] = arg\n return ctx", "def _make_context():\n return {\n 'api': application.mounts['/api'],\n 'db': db,\n 'User': User,\n 'admin': application.mounts['/admin']\n }", "def localcontext(ctx: 'Optional[Context]' = None, **kwargs: 'Any') -> '_ContextManager':\n with _localcontext(ctx) as lc:\n for attr, value in kwargs.items():\n setattr(lc, attr, value)\n yield lc", "def currentCtx(*args, **kwargs)->AnyStr:\n pass", "def run(context: components.Components):\n\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n def _child_context():\n buvar_context.set(context)\n return func(*args, **kwargs)\n\n ctx = contextvars.copy_context()\n return ctx.run(_child_context)\n\n return wrapper\n\n return decorator", "def create_context(cls):\n pass", "def _make_context():\n return {'User': User, 'CreditCard': CreditCard, 'Transaction': Transaction, 'db': db, 'jsonify':jsonify}", "def set(**args):\n return Context(args)", "def enrich_context(self, ctx: Context) -> Context:\n new_ctx = Context(ctx.expressions[:], ctx.namespace)\n for _ in range(self.expression_levels):\n new_ctx.extend(list(self.properties(new_ctx)))\n new_ctx.extend(list(self.unary_ops(new_ctx)))\n new_ctx.extend(list(self.binary_ops(new_ctx)))\n new_ctx.extend(list(self.calls(new_ctx)))\n new_ctx.extend(list(self.comparisons(new_ctx)))\n new_ctx.extend(list(self.bool_ops(new_ctx)))\n return new_ctx", "def create_context_in_tuple(request):\n params = request.param\n if isinstance(params, tuple):\n cc = params[0]\n remainder = tuple(params[1:])\n else:\n cc = params\n remainder = tuple()\n\n ctx = cc()\n def finalizer():\n ctx.release()\n gc.collect()\n request.addfinalizer(finalizer)\n\n if isinstance(params, tuple):\n return (ctx,) + remainder\n else:\n return ctx", "def context():\n return dict()", "def get_context(self):\n return self.context.generate()", "def _make_context(frames, cameras):\n return Context(cameras=cameras, frames=frames)", "def generate_context(self) -> Context:\n self._transient_context = Context()\n return self._transient_context", "def current_context():\n return Context.default_ctx", "def make_context(context, request=None, **kwargs):\n if context is not None and not isinstance(context, dict):\n raise TypeError(\n \"context must be a dict rather than %s.\" % context.__class__.__name__\n )\n if request is None:\n context = Context(context, **kwargs)\n else:\n # The following pattern is required to ensure values from\n # context override those from template context processors.\n original_context = context\n context = RequestContext(request, **kwargs)\n if original_context:\n context.push(original_context)\n return context", "def make_context(self, info_name, args, parent=None, **extra):\n # log.info(term.blue('MAKE CONTEXT'))\n for key, value in click._compat.iteritems(self.context_settings):\n if key not in extra:\n extra[key] = value\n ctx = Context(self, info_name=info_name, parent=parent, **extra)\n with ctx.scope(cleanup=False):\n self.parse_args(ctx, args)\n return ctx", "def _context_new(self):\n assert self._pa_mainloop is not None\n app_name = self._get_app_name()\n context = pa.pa_context_new(self._pa_mainloop,\n app_name.encode('ASCII')\n )\n return context", "def mkcontext(self,\n context= [],\n contextobj=None):\n if contextobj == None:\n raise ValueError, \"mkcontext: contextobj is None\"\n return jsoncall.do_call(\"mkcontext\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password,\\\n 'context':context,\\\n 'contextobj':contextobj.__dict__},\n self.connection)", "def context_get():\n global __context\n if __context is None:\n __context = Context()\n return __context", "def context(self) -> _C_out:\n return self._context", "def _context(name, func):\n\tpush_aspect(name, func)\n\tyield\n\tpop_aspect(name)", "def context_combination(\n log_level,\n worker_port,\n include_schema_registry,\n include_rocksdb,\n ci_provider,\n):\n return {\n \"log_level\": log_level,\n \"worker_port\": worker_port,\n \"include_schema_registry\": include_schema_registry,\n \"include_rocksdb\": include_rocksdb,\n \"ci_provider\": ci_provider,\n }", "def _extra_context(self):\r\n return {}", "def setContext(self, context: Any, /) -> Any:\n ...", "def make_context(self, cls_name=\"Context\"):\n\n tmp = {n: fn for n, fn in self._funcs.items()}\n return type(cls_name, (object,), tmp)()", "def _create_request_context(self, *args, **kwargs) -> Context:\n return Context()", "def context(self, *metaArgs, **kw):\n if not hasattr(self, 'pastInfo'):\n raise Exception(\n \"Can't use a context manager without saving call info\")\n ID = self.setCall(*metaArgs, **kw).ID\n yield InfoHolder(self, ID)\n self.forgetID(ID)", "def context(subcontext=None) -> None:\n if subcontext is None:\n subcontext = []\n args = subcontext\n\n if len(args) == 0:\n args = config_context_sections.split()\n\n sections = [(\"legend\", lambda *args, **kwargs: [M.legend()])] if args else []\n sections += [(arg, context_sections.get(arg[0], None)) for arg in args]\n\n result = defaultdict(list)\n result_settings: DefaultDict[str, dict] = defaultdict(dict)\n for section, func in sections:\n if func:\n target = output(section)\n # Last section of an output decides about output settings\n settings = output_settings.get(section, {})\n result_settings[target].update(settings)\n with target as out:\n result[target].extend(\n func(\n target=out,\n width=settings.get(\"width\", None),\n with_banner=settings.get(\"banner_top\", True),\n )\n )\n\n for target, res in result.items():\n settings = result_settings[target]\n if len(res) > 0 and settings.get(\"banner_bottom\", True):\n with target as out:\n res.append(pwndbg.ui.banner(\"\", target=out, width=settings.get(\"width\", None)))\n\n for target, lines in result.items():\n with target as out:\n if result_settings[target].get(\"clearing\", config_clear_screen) and lines:\n clear_screen(out)\n out.write(\"\\n\".join(lines))\n if out is sys.stdout:\n out.write(\"\\n\")\n out.flush()", "def make_context(source, frmat='table'):\n return Context.fromstring(source, frmat=frmat)", "def context():\n return EnvContext(\n vars={'workflow': config.FLOWSERV_APP, 'group': config.FLOWSERV_GROUP}\n )", "def current_context():\n return _current.get()", "def create_context(config='', options=None, target_roots=None, **kwargs):\r\n config = config if isinstance(config, Config) else create_config(config)\r\n run_tracker = create_run_tracker()\r\n target_roots = maybe_list(target_roots, Target) if target_roots else []\r\n return Context(config, create_options(options or {}), run_tracker, target_roots, **kwargs)", "def context(self) -> Any:\n ...", "def make_context(self, args, **kwargs):\n #The following headers will be available from Auth filter:\n #'X-Tenant-Id', 'X-Tenant-Name', 'X-User-Id',\n #'X-User-Name', 'X-Roles'\n context_params = {'auth_tok' : args.headers['X-Auth-Token'],\n 'user' : args.headers['X-User-Id'],\n 'tenant' : args.headers['X-Tenant-Id'] }\n\n LOG.debug(\"Building context with params: %s\" % context_params)\n \n return ReddwarfContext(**context_params)", "def generate_context(name='', argspec='', note='', math=False, collapse=False,\n img_path=''):\n \n context = \\\n {\n # Arg dependent variables\n 'math_on': 'true' if math else '',\n 'name': name,\n 'argspec': argspec,\n 'note': note,\n 'collapse': collapse,\n 'img_path': img_path,\n \n # Static variables\n 'css_path': CSS_PATH,\n 'js_path': JS_PATH,\n 'jquery_path': JQUERY_PATH,\n 'mathjax_path': MATHJAX_PATH,\n 'right_sphinx_version': '' if sphinx.__version__ < \"1.1\" else 'true',\n 'platform': sys.platform\n }\n \n return context", "def contextbound(self, _cls=_StackBound):\n return _cls(self, self.push_context, self.pop_context)", "def _context():\n global _trident_context\n if _trident_context is None:\n _trident_context = _Context()\n return _trident_context", "async def copy_context_with(ctx: commands.Context, *, author=None, **kwargs):\n\n # copy context and update attributes\n alt_message = copy.copy(ctx.message)\n alt_message._update(alt_message.channel, kwargs)\n\n if author is not None:\n alt_message.author = author\n\n # obtain and return a new context of the same type\n return await ctx.bot.get_context(alt_message, cls=type(ctx))", "def context_set(context):\n global __context\n if context == DefaultContext:\n context = context.copy()\n __context = context", "def make_context(self, info_name, args, parent=None, **extra):\n for key, value in click._compat.iteritems(self.context_settings):\n if key not in extra:\n extra[key] = value\n ctx = SectionedContext(\n self, info_name=info_name, parent=parent, sections=self.sections, **extra\n )\n with ctx.scope(cleanup=False):\n self.parse_args(ctx, args)\n return ctx", "def logWithContext(**context):\n destination = get_destination(context)\n def _log(message=\"\", **kwargs):\n myContext = {}\n myContext.update(context)\n myContext.update(kwargs)\n log(message, **myContext)\n return _log", "def make_shell_context():\n return {'db': db, 'User': User, 'Post': Post}", "def _build_context(cls, context=None, vocab=None, base=None, language=None):\n VOCAB, BASE, LANGUAGE = \"@vocab\", \"@base\", \"@language\"\n if context is None:\n context = {}\n if vocab:\n context[VOCAB] = str(vocab)\n else:\n for x in cls.__mro__:\n if hasattr(x, \"vocab\"):\n v = x.vocab()\n if v:\n context[VOCAB] = v\n break\n\n if base:\n context[BASE] = str(base)\n\n if language:\n context[LANGUAGE] = language\n if context:\n cls.__annotations__.update(context)", "def provide_context(self) -> Optional[Dict[Text, Any]]:", "def add_context(self):\n return {}", "def merge_context(self, context):\n context.update(self._context)\n self._context = context", "def prepare_context(self, activity, context, typename=None):\n context.update({\n 'activity': activity,\n 'object': activity.snapshot,\n 'typename': typename,\n })\n return context", "def new_context(self):\n return dict()", "def ctx():\n return None", "def copy(self):\n rv = Context(self.decimal, self.year_mode, self.quantize_interest,\n self.quantize_currency)\n return rv", "def _extra_context(self):\r\n return {'queue_len': self.queue_len, }", "def get_final_context(context, form_errors=None):\n\tlogo = redis_cache.get('logo')\n\tif not logo:\n\t\tlogo = UpImages.objects.get(image_title=\"Logo\")\n\t\tredis_cache.set('logo', logo)\t\n\n\tcategories = redis_cache.get('categories')\n\tif not categories:\n\t\tcategories = Category.objects.all()\n\t\tredis_cache.set('categories', categories, timeout=600)\n\tcontext['logo'] = logo\n\tcontext['categories'] = categories\n\tif form_errors:\n\t\tcontext['form_errors']=form_errors\n\treturn context", "def _context(use_tls=False):\n if use_tls is False:\n return None\n config = Config()\n ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n ctx.load_cert_chain(config.tls_cert, config.tls_key)\n ctx.options |= ssl.OP_NO_SSLv2\n ctx.options |= ssl.OP_NO_SSLv3\n ctx.options |= ssl.OP_NO_COMPRESSION\n ctx.options |= ssl.OP_CIPHER_SERVER_PREFERENCE\n if not config.args.less_secure:\n ctx.options |= ssl.OP_SINGLE_DH_USE\n ctx.options |= ssl.OP_SINGLE_ECDH_USE\n ctx.set_ciphers(\":\".join(ciphers))\n if config.tls_dhparams:\n ctx.load_dh_params(config.tls_dhparams)\n return ctx", "def get_context_dict(context):\n if isinstance(context, RequestContext):\n ctx = {}\n list(map(ctx.update, context.dicts))\n else:\n ctx = context\n return ctx", "def _get_context(data):\n try:\n docker_options = DockerRunCommandOptions(cmd=\"docker run --help\",\n start=\"Options:\",\n end=None).get_options_json()\n except Exception as ex:\n print(ex)\n docker_options = {}\n context = DEFAULT_DATA.copy()\n context[\"docker_options\"] = docker_options\n context.update(data)\n context[\"registry\"][\"address_select\"] = \"\"\n if context[\"registry\"][\"address\"] in context[\"registry_options\"].keys():\n context[\"registry\"][\"address_select\"] = context[\"registry\"][\"address\"]\n return context", "def set_thread_call_context(ctx):\n _local.context = ctx\n return ctx", "async def _create_context(self) -> ssl.SSLContext:\n context = utils.server_context_modern()\n\n await self.cloud.run_executor(\n context.load_cert_chain,\n self._acme.path_fullchain,\n self._acme.path_private_key,\n )\n\n return context", "def get_context(x, w=2, normalize=True):\n\n # check if context exists\n# if os.path.isfile('contextdata.npy'):\n# print('loading context data from file')\n# return np.load('contextdata.npy')\n#\n input_dim = x.shape\n\n if normalize:\n x = np.reshape(x, [input_dim[0]*input_dim[1], input_dim[2]]) # for ease of normalization\n x = sklearn.preprocessing.normalize(x, norm='l2', axis=1)\n x = np.reshape(x, [input_dim[0], input_dim[1], input_dim[2]])\n\n # padding\n p = Context.pad(x, w)\n\n # extract context\n c = Context.slide(p, w)\n\n# np.save('contextdata.npy', c)\n\n return c", "def resolveContext(self, context):\n if context is None:\n return context\n elif isinstance(context, tuple):\n return context\n elif isinstance(context, tuple):\n return tuple(context.split('/'))\n else:\n return context.getPhysicalPath()", "def make_shell_context():\n\n return dict(app=app, db=db, User=User)", "def make_shell_context():\n\n return dict(app=app, db=db, User=User)", "def make_shell_context():\n return dict(server=server,\n db=db,\n User=User,\n Article=Article,\n Topic=Topic,\n Comment=Comment,\n Subscription=Subscription)", "def _exprep(self, context):\n return `self`", "def get_contexts(self):\n return tuple(getattr(self, name) for name in self.__argnames__)", "def capture_context(\n context: Optional[Union[int, Mapping[str, Any]]] = 0\n) -> Optional[Mapping[str, Any]]:\n if isinstance(context, int):\n if hasattr(sys, \"_getframe\"):\n frame = sys._getframe(context + 1)\n context = LayeredMapping(frame.f_locals, frame.f_globals)\n else:\n context = None # pragma: no cover\n return context", "def get_context(self, entity):\n\t\tc = entity.get_context()\n\t\tif c == None:\n\t\t\tentity.context = self.app\n\t\treturn c", "def _extra_context(self):\r\n extra_context = {\r\n 'queue_len': str(self.queue_len),\r\n 'queue_msg': self.queue_msg,\r\n 'button_enabled': self.button_enabled(),\r\n 'matlab_editor_js': '{static_url}js/vendor/CodeMirror/octave.js'.format(\r\n static_url=self.capa_system.STATIC_URL),\r\n }\r\n return extra_context", "def make_context(\n container: ServiceContainer,\n component_name: str,\n **kwargs\n) -> Dict[str, Any]:\n\n from wired_components.component import IWrapComponents, IComponent\n\n # Start with all the wrapped components\n context: Dict[str, Any] = container.get(IWrapComponents)\n\n # We get the component again in case there are multiple components\n # registered with the same name, but for more specific contexts.\n component_factory = container.get(IComponent, name=component_name)\n\n # TODO Try to replace this part with DI+props in wired.components\n # (see above in component_factory)\n component_instance = component_factory(**kwargs)\n\n # Copy all the fields into the context dict\n for field in dataclasses.fields(component_instance):\n context[field.name] = getattr(component_instance, field.name)\n\n return context", "def _compose(inner, outer):\n @functools.wraps(outer)\n def composed(*a, **kw ): #pylint: disable=C0111\n return outer(inner(*a, **kw))\n return composed", "def init_with_context(self, context):\n pass", "def get_thread_call_context(create=False):\n rv = getattr(_local, 'context', None)\n if rv is not None:\n return rv\n if not create:\n return\n return set_thread_call_context(contextlib.new_call_context())", "def application_core_context(verbosity_for_session):\n with ApplicationCoreContext(verbosity_for_session):\n yield", "def getContext(namespace):", "def concat_context(context_d):\n return context_d['COALICION'] + context_d['PARTIDO'] + context_d['SENTIMIENTO'] + context_d['ENTIDADES'] + context_d['HASHTAGS']+ context_d['FRASES'] + context_d['TWEET']", "def compose_expanded_args(f,g):\n def composed(*args):\n return f(*(g(*args)))\n\n return composed", "def get_context(self):\r\n ctx = {}\r\n for clause in self.where_clauses or []:\r\n clause.update_context(ctx)\r\n return ctx", "def context(\n self,\n namespace=key_module.UNDEFINED,\n cache_policy=None,\n global_cache=None,\n global_cache_policy=None,\n global_cache_timeout_policy=None,\n legacy_data=True,\n ):\n context = context_module.get_context(False)\n if context is not None:\n raise RuntimeError(\"Context is already created for this thread.\")\n\n context = context_module.Context(\n self,\n namespace=namespace,\n cache_policy=cache_policy,\n global_cache=global_cache,\n global_cache_policy=global_cache_policy,\n global_cache_timeout_policy=global_cache_timeout_policy,\n legacy_data=legacy_data,\n )\n with context.use():\n yield context\n\n # Finish up any work left to do on the event loop\n context.eventloop.run()", "def nested(*contexts):\n with ExitStack() as stack:\n for ctx in contexts:\n stack.enter_context(ctx())\n yield contexts", "def get_context():\n return dict(app=app, db=db, models=models, forms=forms)", "def context():\n\n class FakeContext:\n function_name = \"FUNCTION_NAME\"\n memory_limit_in_mb = 1024\n invoked_function_arn = \"INVOKED_FUNCTION_ARN\"\n aws_request_id = \"AWS_REQUEST_ID\"\n log_group_name = \"LOG_GROUP_NAME\"\n log_stream_name = \"LOG_STREAM_NAME\"\n\n def get_remaining_time_in_millis(self):\n # 5 minutes\n return 300000\n\n return FakeContext()", "async def get_context(self, message, *, cls=NewCtx):\n return await super().get_context(message, cls=cls)", "def cooked_mode(self) -> ContextManager[None]:", "def context(self):\n LOGGER.debug('Getting context: %s', self._context)\n return self._context", "async def context(wrapped: AsyncGenerator) -> AsyncGenerator:\n invocation = Invocation.current\n async with wrapped: # type: ignore\n invocation._become_current() # pylint: disable=protected-access\n yield ()\n invocation._become_current() # pylint: disable=protected-access", "def get_context_data(self, request, **kwargs):\n for piece_name in self.pieces.keys():\n piece = getattr(self, piece_name)\n self.context = piece.get_context_data(self.context, **kwargs)\n return self.context", "def get_context(self):\n return {}", "def compose(\n context, command, user=get_local_user(), remote=False, instance=None, stack=None\n):\n run_command(context, user, remote, instance, stack, command)", "def fromContext(cls, ctx):\n raise NotImplementedError(\n 'fromContext is not implemented on %r' % (cls.__name__,))", "def get_context(self):\n return {\"request\": self.request, \"format\": self.format_kwarg, \"view\": self}" ]
[ "0.68892765", "0.6741356", "0.650719", "0.6487625", "0.647903", "0.64450336", "0.641441", "0.64143896", "0.63976085", "0.6372777", "0.63546556", "0.63230544", "0.6281274", "0.62207276", "0.6192012", "0.61908066", "0.61687595", "0.6161329", "0.6132779", "0.609645", "0.6085159", "0.6068595", "0.6041928", "0.5981409", "0.5950089", "0.5930338", "0.5845173", "0.5825334", "0.5785761", "0.5767405", "0.5767256", "0.5763274", "0.57371455", "0.5726633", "0.5718152", "0.5704528", "0.57016355", "0.56694007", "0.5659687", "0.5659347", "0.56442255", "0.5590874", "0.5587521", "0.5571998", "0.55464727", "0.5522206", "0.5513052", "0.55093265", "0.5505426", "0.5504825", "0.5502203", "0.5466836", "0.5465178", "0.54627836", "0.54604685", "0.54467297", "0.5446635", "0.5430316", "0.5429686", "0.54257065", "0.5412433", "0.5410199", "0.53707606", "0.5368678", "0.53549576", "0.5353663", "0.5349394", "0.53480595", "0.5343228", "0.5341787", "0.53384316", "0.53265905", "0.53265905", "0.53210926", "0.5317046", "0.5314303", "0.5311892", "0.5311499", "0.5305514", "0.53002286", "0.52927", "0.5291133", "0.52850264", "0.5285001", "0.528175", "0.5278812", "0.5273114", "0.52719486", "0.5267705", "0.52528024", "0.5251081", "0.5247835", "0.52463204", "0.5220052", "0.521458", "0.52141917", "0.52124053", "0.5208873", "0.52067214", "0.519563", "0.51938486" ]
0.0
-1
The GET view method.
def get(self, request): context = self.getContext(GeoPostForm()) return render(request, 'geopost/home.html', context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, *args, **kwargs):\n return self._hit(\"GET\", *args, **kwargs)", "def get(self, *args, **kwargs):\n self.request(\"get\", *args, **kwargs)", "def get(self, *args, **kwargs):\n return self._request('get', *args, **kwargs)", "def get(self):\n self.get_or_post(method='GET')", "def _get(self, *args, **kwargs):\n return self._request('get', *args, **kwargs)", "def get(self, request):\n pass", "def http_method_get():\n return 'GET'", "def get(self, *path, **data):\n\t\treturn self.request('GET', *path, **data)", "def get(self, *args, **kw):\n kw['method'] = 'GET'\n return self.open(*args, **kw)", "def get(self, path):\n return self.request(path, method='GET')", "def get(self, *args, **kwargs):\n pass", "def get(self, *args, **kwargs):\n pass", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, *args, **kwargs):\n return self.handle_get_request()", "def retrieve(self,request, pk = None):\n return Response({'http_method': 'GET'})", "def do_GET(self):\r\n self._send_handler_response('GET')", "def _get(self):\n return self.request(method=\"get\", path=self.router.fields)", "def do_GET(self):\n self.http_method = 'GET'\n self.response()", "def retrieve(self,request , pk=None):\r\n return Response({'HTTP method':'GET'})", "def retrieve(self, request, pk=None):\n\n return Response({'http_method': 'GET'})", "def get(self, *args, **kwargs):", "def retrieve(self, request, pk=None):\n return Response({\"retrieve_get\": 'GET'})", "def retrieve(self, request, pk=None):\n return Response({'http_method': 'GET'})", "def retrieve(self, request, pk=None):\n\n return Response({'http_method':'GET'})", "def get(self, url):\n return self._request('GET', url)", "def get(self, path):\n response = self._request(\"GET\", path)\n return self._handle_response(response)", "def _get(self, request_obj):\n return self._execute_action(request_obj, 'GET')", "def get(self, request, *args, **kwargs):\n items = self.get_items()\n return self.print(request, items)", "def get(self, *args, **kwargs):\n if len(args) != 1:\n raise TypeError('wrong number of arguments')\n return self._geturl.get(*args, **kwargs)", "def do_get(self, *args):\n raise NotImplementedError()", "def get(self, path, req = None, **kwargs):\n req = req or []\n return self.route(path, req=req+[filter_method(['GET'])], **kwargs)", "def do_GET(self):\n self.log.debug('do_GET called')\n self.HeadGet('GET')", "def get(self, url_pattern):\n return self.route(url_pattern, methods=['GET'])", "def get(self, *args):", "def do_GET(self):\n self._try_to_process_request(self._handle_get_request)", "def get(self):\n return self.request().get()", "def get(self):\n return self.request().get()", "def api_get(self, path, query=None):\n return self._api_request(path, 'GET', query=query)", "def get(self, request, format=None):\n \n return Response(\"ahla Rami\")", "def _get(self, request_obj):\n return self._execute_action(request_obj, [AbstractModelGetAction, AbstractModelItemGetAction], 'GET')", "def get(self, request, format=None):\n an_apiview = [\n 'Uses HTTP methods as functions (get,post,patch,put,delete)',\n 'Is similar to a traditional django view',\n 'Gives you the most control over the applicaton logic',\n 'Is mapped manually to the URLs',\n ]\n return Response({'message': 'get method', 'an_apiview': an_apiview})", "def list(self):\n return self.request(\"GET\")", "def get(self):\r\n return http.Request('GET', self.get_url()), parsers.parse_json", "def get(self):\r\n return http.Request('GET', self.get_url()), parsers.parse_json", "def view_get():\n\n return jsonify(get_dict(\"url\", \"args\", \"headers\", \"origin\"))", "def do_GET(self):\n parsed_path = urlparse.urlparse(self.path)\n if parsed_path.path == '/books':\n return self.books()\n elif parsed_path.path == '/my_loans':\n return self.my_loans()\n return self.send_response(404)", "def get(self, name):\r\n return self.format_inline('GET', name)", "def get(self, *args):\n return self.docs.get(*args)", "def get(self, *args, **kwargs):\n url = urljoin(self.instance(), args[0])\n return self._requests_call(util.requests_get, url, *args[1:], **kwargs)", "def get(self, **kwargs):\n if not hasattr(self, \"_get\"):\n flask_restful.abort(405, message=f\"Method not allowed\")\n self.is_html = False # pylint: disable=attribute-defined-outside-init\n\n try:\n # GET on individual resources doesn't support search. The 'make_search_query'\n # function correctly raises an error if it's called on resources that don't\n # support search. Therefore, we just call it here for that error side effect.\n _ = self.make_search_query(flask.request.args)\n # We are using kwargs, because the object ID in the URL has different names\n # depending on the resource. The resource _get() implementations therefore use\n # different keyword parameter names, which we don't know here in the base class.\n # allow an exception.\n # pylint: disable=no-member\n return self._get(**kwargs)\n except ValueError as ex:\n flask_restful.abort(400, message=f\"Bad Request - {str(ex)}\")", "def get(self, url, params=None):\n return self.session.get(url=self.base_url + url, params=params)", "def get(self, url: str, **kwargs: Dict[str, Any]) -> Any:\n pass", "def get(self, api_path, *args, **kwargs):\n\n\t\treturn self._do_operation(u'get', api_path, *args, **kwargs)", "def get(self, path: str, params: dict) -> dict:\n return self.request(\"GET\", path, params)", "def _get(self, url):\n return self._request(url)", "def retrieve(self, request, pk=None): #Equals to -> GET/{primarykey}\n return Response({'http_method': 'GET'})", "def get(self, url, query=None):\n # Perform get request with query filter\n if query is not None:\n return self._query(url, 'GET', params=quote(f'query=\"{query}\"'))\n\n # Perform simple get request\n return self._query(url, 'GET')", "def http_get(self, **kwargs):\n return self.rabjcallable.get(**kwargs)", "def get(self):\n return self.request({\n \"path\": \"/\" + UUID\n })", "def _get(self, path=\"\", query={}, **kwargs):\n qs = urllib.urlencode(query)\n uri = force_json(self.uri + path) + \"?\" + qs\n return self.client.request(uri, method=\"GET\", **kwargs)", "def get(self):\n return self.handler(url=self.url)", "def http_get(self) -> Optional[pulumi.Input['HTTPGetActionArgs']]:\n return pulumi.get(self, \"http_get\")", "def do_GET(self):\n if not self.path or self.path == \"/\":\n self.redirect()\n elif self.is_viewvc():\n try:\n self.run_viewvc()\n except IOError:\n # ignore IOError: [Errno 32] Broken pipe\n pass\n else:\n self.send_error(404)", "def do_GET(self):\r\n if not self._client_allowed():\r\n return\r\n\r\n try:\r\n (_, _, path, query, _) = urlparse.urlsplit(self.path)\r\n params = urlparse.parse_qs(query)\r\n # Give each handler a chance to respond.\r\n for prefix, handler in self._GET_handlers:\r\n if self._maybe_handle(prefix, handler, path, params):\r\n return\r\n # If no path specified, default to showing the list of all runs.\r\n if path == '/':\r\n self._handle_runs('', {})\r\n return\r\n\r\n self._send_content('Invalid GET request %s' % self.path, 'text/html')\r\n except (IOError, ValueError):\r\n pass # Printing these errors gets annoying, and there's nothing to do about them anyway.\r\n #sys.stderr.write('Invalid GET request %s' % self.path)\r", "def get(self, *args, **kwargs):\n return Response({'foo': 'bar'})", "def get(self, url, params=None):\n # TODO: handle params\n path = self.get_path(url)\n return self.build_response_for(path)", "def Get(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def ng_get(self, request, *args, **kwargs):\r\n return self.build_json_response(self.get_object())", "def get(cls, *args):\n return cls.query.get(*args)", "def get():", "def get():", "def httpGet(self, url, parameters=None):\r\n return self.auth.get(url, parameters)", "def get(self, **kwargs):\n if not hasattr(self, \"_get\"):\n flask_restful.abort(405, message=f\"Method not allowed\")\n self.is_html = False # pylint: disable=attribute-defined-outside-init\n\n try:\n # Create a TinyDB query out of the search query expression in the URL. If none was\n # provided then search_query is None.\n search_query = self.make_search_query(flask.request.args)\n # We are using kwargs, because the object ID in the URL has different names\n # depending on the resource. The resource _get() implementations therefore use\n # different keyword parameter names, which we don't know here in the base class.\n # _get() is defined in the child class, we don't want pylint to complain, so we\n # allow an exception.\n # pylint: disable=no-member\n return self._get(query=search_query, **kwargs)\n except ValueError as ex:\n flask_restful.abort(400, message=f\"Bad Request - {str(ex)}\")", "async def get(self):\n\n pass", "def get(self, url_or_path):\n return self.request.get(url_or_path).json()", "def get(self, path='', **kwargs):\n\n r = self.session.get(self.url(path), **kwargs)\n self.log_request(r)\n return r", "def get(self):\n self.post()", "def get(self):\n self.post()", "def get(self, path: str) -> Response:\n endpoint_ = checkEndpoint(\"GET\", path)\n if not endpoint_[\"method\"]:\n # If endpoint and Get method not supported in the API\n abort(endpoint_[\"status\"])\n return item_collection_get_response(path)", "def get(self, request, format=None):\n an_apiview = [\n \"User HTTp methods get, put, post, delete method\",\n \"very similar to previous Django view\",\n \"gives you more control on api logic\",\n 'Is mapped to manually to urls'\n ]\n\n return Response({'message':\"hello\", \"an_apiview\": an_apiview} )", "def get(self, request, pk):\n return self.retrieve(request, pk)", "def get(self, app_prefix, path):\n return self.handle_request('get', app_prefix, path)", "def get(self, pattern, handler):\n return self.route(Router.GET, pattern, handler)", "def get(self, request, format=None):\n an_apiview = [\n 'Uses HTTP methos as function (get, post, patch, put, delete)',\n 'Is similar to a traditional Django View',\n 'Gives you the most control over your application logic',\n 'Is mapped manually to URLs',\n ]\n\n return Response({'message': 'Hello', 'an_apiview': an_apiview}) # Dictionary or List only", "def get(self, api_path, *args, **kwargs):\n\n return self._do_operation(u'get', api_path, *args, **kwargs)", "def get(self, url):\n return self.app.get(get_url(url), follow_redirects=True)", "def get(self, request):\n return self.execute_query()", "def get(id=None):\n return requests.get(\"/{}\".format(id))", "def sr_get(self, route_or_uri, params=None, query=None, **kwargs):\n return self.__req(\n route_or_uri,\n params=params,\n query=query,\n op=self.get,\n raw_response=True,\n **kwargs,\n )", "def do_GET(self):\n path = self.path.split('/')\n if len(path) == 3:\n key.key_events(path[2])\n self.send_head()", "async def get(self, path, params=None, json_data=None):\n response = await self.request('GET', path, params, json_data)\n return response", "def GET(self):\n content = requests.get(self._url)\n if content.status_code != 200:\n print(\"There was a problem with the get request. Error code is: %s.\" % content.status_code)\n return False\n else:\n print(\"The status code is %s.\" % content.status_code)\n return content", "def GET(self, *args):\n if not args:\n self.logger.error('No arguments were given')\n return json.dumps({\"results\":{}})\n return json.dumps(self.get_single_new(args[0]))", "def do_GET(self):\n url_parsed = urlparse.urlparse(self.path)\n path = url_parsed.path.lstrip(\"/\")\n print url_parsed, path\n #import pdb; pdb.set_trace()\n if path == '':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(self.page)\n elif path == 'search':\n query = urlparse.parse_qs(url_parsed.query)\n keyword = query.setdefault(\"keyword\", \"python\")\n tqx = dict([q.split(':') for q in query['tqx'][0].split(';')])\n \n self.send_response(200)\n self.send_header(\"Content-type\", \"text/plain\")\n self.end_headers()\n data_table = get_datatable(keyword)\n content = data_table.ToJSonResponse(req_id=int(tqx['reqId']))\n self.wfile.write(content)", "def do_GET(self):\r\n path = self.path\r\n status_code, res = webServer.handle_get_msg(path)\r\n self.send_response(status_code)\r\n self.end_headers()\r\n self.wfile.write(res.encode())", "async def get(self, **context):\n return {}", "def get(self):\n raise tornado.web.HTTPError(404, reason=\"Page not found\")" ]
[ "0.8008264", "0.7953128", "0.79350144", "0.7772235", "0.7749338", "0.77399784", "0.77047", "0.7682195", "0.7652674", "0.7584354", "0.7562804", "0.7562804", "0.74978745", "0.74978745", "0.74978745", "0.74978745", "0.74978745", "0.7471668", "0.74600196", "0.745688", "0.7411558", "0.7396937", "0.7363619", "0.72946614", "0.7242386", "0.7214859", "0.7199099", "0.71900636", "0.7158742", "0.7045834", "0.70094925", "0.6982049", "0.6980644", "0.6959083", "0.6939902", "0.69339436", "0.6924849", "0.6918077", "0.6908792", "0.6897199", "0.6897199", "0.6895797", "0.68823135", "0.68680084", "0.68546396", "0.6830466", "0.68030417", "0.68030417", "0.68024266", "0.6798787", "0.67966783", "0.67959964", "0.6768677", "0.676293", "0.6761409", "0.6754133", "0.67458296", "0.6738101", "0.672288", "0.6722234", "0.6715701", "0.67114085", "0.66947293", "0.6692763", "0.66860086", "0.66802025", "0.66694725", "0.6654568", "0.66438913", "0.6632341", "0.66185355", "0.66129965", "0.66031617", "0.65995854", "0.65995854", "0.6595254", "0.65945196", "0.6594242", "0.6594119", "0.65882534", "0.65866894", "0.65866894", "0.65769", "0.6574546", "0.6561728", "0.65574956", "0.65567183", "0.65524507", "0.6541307", "0.6527405", "0.65268654", "0.6517646", "0.6517291", "0.65111136", "0.64941525", "0.6489261", "0.64864445", "0.6458512", "0.6454861", "0.64536643", "0.64390206" ]
0.0
-1
Render with blank form...
def get(self, request): context = self.getContext(GeoPostForm()) return render(request, 'geopost/entry.html', context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_form():", "def render_form_content(self):\n return mark_safe(self.render_as_object() + self.render_missing_fields())", "def form():\n return render_template(\n 'form.html'\n )", "def render_form(self, title=\"\", body=\"\", error=\"\"):\n self.render(\"newpost.html\", title=title, body=body, error=error)", "def NBF():\n return render_template('new_beer_form.html')", "def show_form():\n\n return render_template(\"form.html\")", "def test_blank(self):\n form_data = self.form_data('')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def make_form(self):", "def display_form():\n return render_template(\"form.html\",\n title=\"Welcome Form\",\n heading=\"Please fill in this form\",)", "def view_form(self):\n if not self.valid:\n raise MudderyError(\"Invalid form: %s.\" % self.form_name)\n\n context = self.get_context()\n\n return render(self.request, self.template_file, context)", "def display_form():\n\n return render_template('add_new_student.html')", "def render_creation_form(request: Request):\n return templates.TemplateResponse(\"creation_form.html\",{'request': request})", "def get_form(self):\n form = super(AjaxCreateView, self).get_form()\n\n if form.initial.get('part', None):\n # Hide the part field\n form.fields['part'].widget = HiddenInput()\n\n return form", "def index():\n return render_template('form.html')", "def get(self):\n self.render(\"signup-form.html\")", "def form_InputNoneValue(request):\n schema = schemaish.Structure()\n schema.add('inputStrip', schemaish.String(default=''))\n\n form = formish.Form(schema, 'form')\n form['inputStrip'].widget = formish.Input(strip=True, none_value='BANG')\n return form", "def form_invalid(self, form):\n if self.unsucess_template:\n self.template_name = self.unsucess_template\n return self.render_to_response(self.get_context_data(form=form))", "def meme_form():\n return render_template('meme_form.html')", "def meme_form():\n return render_template('meme_form.html')", "def NBRF():\n return render_template('new_brass_form.html')", "def NBAF():\n return render_template('new_bar_form.html')", "def render(self):\r\n super().render()", "def show_form():\n\n prompts = story.prompts\n\n return render_template(\"base.html\", prompts = prompts )", "def form_invalid(self, form):\n return self.render_to_response(self.get_context_data(form=form))", "def form_invalid(self, form):\n return self.render_to_response(self.get_context_data(form=form))", "def index(request):\n\n if request.method != \"POST\":\n #No data submitted: render home page\n form = SearchForm()\n\n #Display a blank form\n context = {\n \"form\": form,\n }\n\n return render(request, \"news_site/index.html\", context)", "def render_form(self):\n s = [f.render() for f in self.fields]\n r = [f.render() for f in self.refs]\n return view.render_form('form.html', {\n 'fields': ''.join(s),\n 'refs': ''.join(r),\n 'errors': self.get_errors(),\n 'id': self.model.id\n })", "def show_new_user_form():\r\n return render_template('user-form.html')", "def registration_form():\n\n return render_template(\"/registration_form.html\")", "def application_form():\n\n\treturn render_template(\"application-form.html\")", "def __init__(self, *args, **kwargs):\n kwargs.pop('widget_syntax')\n\n super(TemplateForm, self).__init__( *args, **kwargs)\n print self.fields", "def get(self, request):\n form = self.form_class()\n return render(request, self.template_name, {\"form\": form})", "def app_form():\n\n return render_template(\"application-form.html\")", "def blank(self):\n pass", "def form_StringDifferentEmpty(request):\n schema = schemaish.Structure()\n schema.add('myStringField', schemaish.String())\n form = formish.Form(schema, 'form')\n form['myStringField'].widget = formish.Input(empty='')\n return form", "def test_context_data_with_blank_form(self):\n response = self.client.get(self.get_url(self.study.pk), {'description': ''})\n context = response.context\n self.assertTrue(context['form'].is_bound)\n self.assertFalse(context['has_results'])\n self.assertIn('results_table', context)", "def test_context_data_with_blank_form(self):\n response = self.client.get(self.get_url(self.study.pk), {'description': ''})\n context = response.context\n self.assertTrue(context['form'].is_bound)\n self.assertFalse(context['has_results'])\n self.assertIn('results_table', context)", "def show_register_form():\n return render_template(\"register-form.html\")", "def form_InputDateNoneValue(request):\n schema = schemaish.Structure()\n schema.add('inputStrip', schemaish.Date(default=datetime.date(1900,1,1)))\n\n form = formish.Form(schema, 'form')\n form['inputStrip'].widget = formish.Input(empty=datetime.date(1900,1,1),roundtrip_empty=True)\n return form", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n \"first_name\": \"First Name\",\n \"last_name\": \"Last Name\",\n \"default_phone_num\": \"Phone Number\",\n \"default_passport_num\": \"Passport Number\",\n }\n\n self.fields[\"default_phone_num\"].widget.attrs[\"autofocus\"] = True\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs[\"placeholder\"] = placeholder\n self.fields[field].widget.attrs[\n \"class\"\n ] = \"border-black rounded-0 \\\n all-form-input\"\n self.fields[field].label = False\n self.helper = FormHelper()\n self.helper.form_tag = True\n self.helper.layout = Layout(\n Div(\n Field(\n \"first_name\",\n ),\n Field(\n \"last_name\",\n ),\n Field(\n \"default_phone_num\",\n ),\n Field(\n \"default_passport_num\",\n ),\n ),\n ButtonHolder(\n Submit(\"submit\", \"Save\", css_class=\"m-0 btn btn-outline\"),\n ),\n )", "def render_POST(self, request):", "def test_context_data_with_blank_form(self):\n response = self.client.get(self.get_url(), {'description': ''})\n context = response.context\n self.assertTrue(context['form'].is_bound)\n self.assertFalse(context['has_results'])\n self.assertIn('results_table', context)", "def test_context_data_with_blank_form(self):\n response = self.client.get(self.get_url(), {'description': ''})\n context = response.context\n self.assertTrue(context['form'].is_bound)\n self.assertFalse(context['has_results'])\n self.assertIn('results_table', context)", "def test_context_data_with_blank_form(self):\n response = self.client.get(self.get_url(), {'description': ''})\n context = response.context\n self.assertTrue(context['form'].is_bound)\n self.assertFalse(context['has_results'])\n self.assertIn('results_table', context)", "def form_invalid(self, form, factura_form, ot_linea_form):\n return self.render_to_response(\n self.get_context_data(form=form,\n factura_form=factura_form,\n ot_linea_form=ot_linea_form))", "def form_invalid(self, form, factura_form, ot_linea_form):\n return self.render_to_response(\n self.get_context_data(form=form,\n factura_form=factura_form,\n ot_linea_form=ot_linea_form))", "def form_invalid(self, form, factura_form, remito_form, ot_linea_form):\n return self.render_to_response(\n self.get_context_data(form=form,\n factura_form=factura_form,\n remito_form=remito_form,\n ot_linea_form=ot_linea_form))", "def form_invalid(self, form, factura_form, remito_form, ot_linea_form):\n return self.render_to_response(\n self.get_context_data(form=form,\n factura_form=factura_form,\n remito_form=remito_form,\n ot_linea_form=ot_linea_form))", "def test_checkout_renders_form(self):\n self.fill_session_cart()\n response = self.client.get(self.CHECKOUT_URL)\n rendered_fields = list(response.context['form'].fields.keys())\n for field in self.CHECKOUT_FIELDS:\n rendered_fields.remove(field)\n self.assertEqual(len(rendered_fields), 0)", "def index_page():\n\n\n return render_template(\"application-form.html\")", "def show_add_student_form():\n\n return render_template(\"add_student_form.html\")", "def render_form(self, studentname=\"\", semail=\"\", sphone=\"\", startdate=\"\", error=\"\"):\n t = jinja_env.get_template(\"createstudent.html\")\n response = t.render(student=student, semail=semail, sphone=sphone, startdate=startdate, error=error)\n self.response.out.write(response)", "def show_forms():\n\n return render_template(\"signup_login.html\")", "def test_blank(self):\n form_data = {\n 'username': 'testuser',\n 'password1': '',\n 'password2': ''\n }\n form = StrictUserCreationForm(data=form_data)\n self.assertFalse(form.is_valid())", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def register_form():\n\n return render_template(\"register-form.html\")", "def form_invalid(self, form, ot_linea_form):\n return self.render_to_response(\n self.get_context_data(form=form,\n ot_linea_form=ot_linea_form))", "def form_invalid(self, form, ot_linea_form):\n return self.render_to_response(\n self.get_context_data(form=form,\n ot_linea_form=ot_linea_form))", "def form_invalid(self, *args, **kwargs):\n\t\tcontext = self.get_context_data()\n\t\tcontext.update(kwargs)\n\t\treturn self.render_to_response(context)", "def form_invalid(self, *args, **kwargs):\n\t\tcontext = self.get_context_data()\n\t\tcontext.update(kwargs)\n\t\treturn self.render_to_response(context)", "def form_invalid(self, form, ot_linea_form):\n return self.render_to_response(\n self.get_context_data(form=form,\n ot_linea_form=ot_linea_form))", "def form_invalid(self, form, ot_linea_form):\n return self.render_to_response(\n self.get_context_data(form=form,\n ot_linea_form=ot_linea_form))", "def get_form(self, request, obj=None, **kwargs):\n if not obj:\n kwargs['form'] = ASCreationForm\n return super().get_form(request, obj, **kwargs)", "def test_create_form_with_no_fields(self):\n with pytest.raises(ValidationError):\n SurveyForm.create('badform', '<p>no input fields here</p>')\n\n with pytest.raises(ValidationError):\n SurveyForm.create('badform', '<input id=\"input_without_name\" />')", "def get_student_add_form():\n\n return render_template(\"student_add.html\")", "def form_DateDifferentEmpty(request):\n schema = schemaish.Structure()\n schema.add('myDateField', schemaish.Date())\n form = formish.Form(schema, 'form')\n form['myDateField'].widget = formish.Input(empty=datetime.date.today())\n return form", "def get_form(self):\n form = super(StickerCreate, self).get_form()\n\n form.fields.pop('label')\n\n if self.kwargs.get('sprint_number'):\n board = Board.objects.get(\n desk__owner__user=self.user,\n sequence=self.kwargs['board_sequence']\n )\n form.initial = {\n 'sprint': Sprint.objects.get(\n number=self.kwargs['sprint_number'], board=board\n )\n }\n form.fields['sprint'].widget = HiddenInput()\n else:\n form.fields['sprint'].empty_label = 'Backlog'\n\n return form", "def form_invalid(self, form, formsets):\n return self.render_to_response(\n self.get_context_data(form=form, formsets=formsets)\n )", "def prepare(self, form):\n \n return form", "def render_form(form):\n return {\n 'form': form,\n }", "def register_form():\n\n return render_template(\"register.html\")", "def form_invalid(self, form, instrumento_linea_form):\n return self.render_to_response(\n self.get_context_data(form=form,\n instrumento_linea_form=instrumento_linea_form))", "def form_invalid(self, form, instrumento_linea_form):\n return self.render_to_response(\n self.get_context_data(form=form,\n instrumento_linea_form=instrumento_linea_form))", "def form_IntegerNoneDefault(request):\n schema = schemaish.Structure()\n schema.add('myIntegerField', schemaish.Integer())\n form = formish.Form(schema, 'form')\n form.defaults = {'myIntegerField':None}\n return form", "def index(request):\n form = textForm()\n return render(request, 'index.html', {'form': form})", "def get_add_student_form():\n\n return render_template(\"student_add.html\")", "def form_invalid(self, form):\n return self.render_to_response(\n self.get_context_data(\n form=form,\n teams_formset=self.teams_formset,\n )\n )", "def form_Input(request):\n schema = schemaish.Structure()\n schema.add('inputStrip', schemaish.String())\n\n form = formish.Form(schema, 'form')\n form['inputStrip'].widget = formish.Input(strip=True)\n return form", "def render_form(self, request, step, form, context):\n return render_to_response(self.get_template(request, step, form),\n context, RequestContext(request))", "def _render(self) -> None:\n pass", "def get_student_form():\n\n return render_template(\"student_search.html\")", "def get_student_form():\n\n return render_template(\"student_search.html\")", "def get_student_form():\n\n return render_template(\"student_search.html\")", "def get_student_form():\n\n return render_template(\"student_search.html\")", "def get_student_form():\n\n return render_template(\"student_search.html\")", "def get_student_form():\n\n return render_template(\"student_search.html\")", "def get_student_form():\n\n return render_template(\"student_search.html\")", "def get_student_form():\n\n return render_template(\"student_search.html\")", "def get_student_form():\n\n return render_template(\"student_search.html\")", "def form_invalid(self, form, formset):\n return self.render_to_response(self.get_context_data(\n form=form,\n application_candidate_form=formset,\n ))", "def form_SelectChoiceWithEmptyString(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.String())\n options = [('','empty string'),('b','b'),('c','c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(options, none_value='BANG')\n return form", "def __init__(self):\n self.model = self.load_model()\n self.form_html = self.create_form_html()", "def test_context_data_with_empty_form(self):\n response = self.client.get(self.get_url())\n context = response.context\n self.assertFalse(context['form'].is_bound)\n self.assertFalse(context['has_results'])\n self.assertIn('results_table', context)", "def test_osimportname_form_empty(self):\n\n # get object\n form = OsimportnameForm(data = {})\n # compare\n self.assertFalse(form.is_valid())", "def form():\n with Config() as config:\n F = create_form_type(config.database.fields)\n f: flask_wtf.FlaskForm = F()\n if f.validate_on_submit():\n with db.Connection(config) as con:\n con.write_entry(**f.data)\n return flask.redirect(flask.url_for('ui.map'))\n else:\n f.process(flask.request.args)\n return flask.render_template(\"form.html\", form=f, title=\"Eingabe\")", "def getSuppressForm(request):\n\t\n\tlogger = logging.getLogger(__name__)\n\t\n\tcontext = {}\n\t\n\ttry:\n\t\t# Get a complete list of sensors.\n\t\tcontext['allsensors'] = Sensor.objects.all()\n\t\n\texcept Sensor.DoesNotExist:\n\t\tlogger.warning(\"No sensors found.\")\n\t\traise Http404\n\t\n\t# Send to template.\n\treturn render(request, 'tuning/suppressForm.tpl', context)" ]
[ "0.75872487", "0.67509174", "0.6683415", "0.663064", "0.651448", "0.6472087", "0.6457773", "0.63893735", "0.63662237", "0.6326461", "0.6302796", "0.6278228", "0.6275536", "0.6245083", "0.623302", "0.6206928", "0.617771", "0.61495304", "0.61495304", "0.6138811", "0.60955083", "0.6075429", "0.6037592", "0.6031355", "0.6031355", "0.60285133", "0.6023652", "0.60095775", "0.5999382", "0.5978764", "0.59141666", "0.59140074", "0.59038603", "0.59033114", "0.5877302", "0.5803877", "0.5803877", "0.58012134", "0.57893384", "0.57862467", "0.5784285", "0.57770413", "0.57770413", "0.57770413", "0.5776119", "0.5776119", "0.577333", "0.577333", "0.57697827", "0.57664967", "0.5759757", "0.574872", "0.5743648", "0.5739065", "0.5727866", "0.5727866", "0.5727866", "0.5727866", "0.5727866", "0.5727866", "0.57189035", "0.57056385", "0.57056385", "0.569856", "0.569856", "0.56982017", "0.56982017", "0.56759244", "0.56737804", "0.5662452", "0.5634687", "0.562042", "0.5619077", "0.560503", "0.5599212", "0.5588964", "0.5582844", "0.5582844", "0.55826056", "0.5582127", "0.5575337", "0.55620855", "0.5561951", "0.5557839", "0.5552116", "0.55477333", "0.55477333", "0.55477333", "0.55477333", "0.55477333", "0.55477333", "0.55477333", "0.55477333", "0.55477333", "0.5534478", "0.55296713", "0.55293477", "0.55267376", "0.55171263", "0.55154794", "0.55001116" ]
0.0
-1
Process newly submitted GeoPost entry... PROCEEDURE 1) Get data from POST body 2) Validate form 3) Upload photo to bucket 4) Make WFS transaction with GeoServer
def post(self, request): # GET REQUEST DATA fid = request.POST.get('fid', False) uuid = request.POST.get('uuid', False) title_text = request.POST.get('title', False) body = request.POST.get('body', False) photo = request.FILES.get('photo', False) # FOR STORAGE wfsxml = request.POST.get('wfsxml', False) # FOR GEOSERVER data = { 'uuid': uuid, 'title_text': title_text, 'body': body, 'wfsxml': wfsxml } # VALIDATE FORM form = GeoPostForm(data, request.FILES) logger.info("\ninstantiate Geopost form\n") # IF FORM VALIDATION ERROR if not form.is_valid(): return server_error(request.body) #context = self.getContext(form) #return render(request, 'geopost/entry.html', context) else: pass # GET CLEAN VALUES uuid = form.cleaned_data['uuid'] wfsxml = form.cleaned_data['wfsxml'] # UPLOAD PHOTO TO BUCKET # if editing existing entry, first delete existing photo if fid: delete_from_bucket(uuid, self.imageBucket) else: pass photo.open('rb') error = upload_to_bucket( photo, self.imageBucket, photo.content_type, uuid) photo.close() # IF ERROR UPLOADING IMAGE if error: return server_error(error) else: pass # MAKE GEOSERVER WFS TRANSACTION error = post_to_geoserver(wfsxml, self.wfsURL) # ALL GOOD if not error: return HttpResponseRedirect(reverse('geopost_home')) # IF WFS TRANSACTION ERROR else: delete_from_bucket(uuid, self.imageBucket) return server_error(error)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_POST(self):\n global pages, devices, settings\n try:\n ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))\n if ctype == 'application/x-www-form-urlencoded':\n length = int(self.headers.getheader('content-length'))\n postvars = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)\n #if(self.path != '/simple/updateGPSCoordinates'):\n #print postvars\n #print self.path\n #now call the function that is meant to process this request\n if(self.path == '/simple/selectedHousehold'):\n #print 'need to get all cows in household #%s ' % postvars['household'][0]\n output = pages[postvars['page'][0]].selectedHousehold(postvars['household'][0], devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/selectedSite'):\n #print 'need to get all the households from the site #%s ' % postvars['sites'][0]\n output = pages[postvars['page'][0]].selectedSite(postvars['sites'][0], devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/nextAnimal'):\n #print 'we have finalized saving samples for one animal, now we need to go to the next animal'\n output = pages[postvars['page'][0]].nextAnimal(postvars, devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/sampleCow'):\n #print 'we sampling the cow'\n #we have the cow that we want to sample...now proceed with the sampling\n output = pages[postvars['page'][0]].collectSample(postvars, devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/saveSample'):\n #print 'we saving a new sample'\n #the user have entered a sample for an animal\n output = pages[postvars['page'][0]].saveSample(postvars, devices['gps'], settings['barcode_use'])\n self.wfile.write(output)\n elif(self.path == '/simple/updateGPSCoordinates'):\n #we want to get the current GPS position\n output = pages[postvars['page'][0]].curPosition(devices['gps']) #for the sake of consistence, we just using the passed 'page' variable\n self.wfile.write(output)\n elif(self.path == '/simple/deleteSample'):\n #print 'we need to delete the sample %s ' % postvars['sample'][0]\n #the user have entered a sample for an animal\n output = pages[postvars['page'][0]].deleteSample(postvars['sample'][0], devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/deleteAnimal'):\n #print postvars\n #print 'we need to delete the anial %s ' % postvars['curAnimalRead'][0]\n #the user have entered a sample for an animal\n output = pages[postvars['page'][0]].deleteAnimal(postvars['curAnimalRead'][0], devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/showAllSites'):\n #print postvars\n #print 'we either to show all sites or just the households within a certain radius'\n #the user have entered a sample for an animal\n output = pages[postvars['page'][0]].showSites(postvars, devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/refreshSampler'):\n #print 'I really dont know what to do here, so we shall evaluate it a case on case basis'\n output = pages[postvars['page'][0]].refreshSampler(postvars, devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/updateHouseholds'):\n #print 'The radius of interest has changed...lets update the households'\n output = pages[postvars['page'][0]].updateSites(postvars, devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/admin'):\n #print 'admin page'\n \n if ctype == 'multipart/form-data':\n self.send_response(301)\n form = cgi.parse_multipart(self.rfile, pdict)\n #print form\n pages[form['page'][0]].parse_form(form, info, devices)\n self.send_header('Location', 'http://localhost:%s/%s' % (settings['port'], form['page'][0]))\n self.end_headers()\n except IOError:\n self.send_error(501, 'Unsupported Method')", "def post(self):\n data = request.form.to_dict() # data is a dict with multipart/form-data\n if(not data):\n return BAD(err4,msg19,400 )\n \n if dataKey not in data:\n return BAD(err4, msg4, 400)\n \n d = data[dataKey]\n Json = ReadJson(d)\n dictionary = Json.Decode()\n if(dictionary is False):\n return BAD(json_error,msg18,400)\n if(Json.Validate(dictionary)):\n img = SaveImage(ALLOWED_EXTENSIONS)\n media = request.files\n if(imageKey not in media and imageKey in data):\n return BAD(err1, msg3, 400)\n\n if(imageKey not in media):\n return BAD(err3, msg2, 400)\n\n \n img.Save(imageKey, folder)\n ImageId = img.name\n \n if(ImageId is not None):\n client = ManagePsb(credentials, databaseName)\n query = {\n \"latitude\": dictionary[\"latitude\"],\n \"longitude\": dictionary[\"longitude\"],\n }\n Projection = {\n \"status\": 1,\n \"_id\" : 0\n }\n cursor = client.Filter(\n collection, query=query, Projection=Projection)\n c = cursor.count()\n if(c == 0):\n json = Json.Decode()\n client.Save(json, collection, img.name)\n img.Upload()\n\n else:\n return BAD(msg5, warning, 409)\n else:\n return BAD(err1, msg, 400)\n\n return OK(msg1, 201)\n\n else:\n return BAD(msg6, Json.missing, 400)", "def process_form(request):\n raw_data = request.form\n data = raw_data.copy()\n data['resources'] = request.form.getlist('resources')\n if request.remote_addr == '127.0.0.1':\n data['ip'] = '100.7.27.72'\n else:\n data['ip'] = request.remote_addr\n data['user_agent'] = request.user_agent.string\n data['@timestamp'] = datetime.utcnow()\n latitude = float(data['latitude'])\n longitude = float(data['longitude'])\n data['location'] = [latitude, longitude]\n return data", "def fetchGeoData():\n if request.method ==\"POST\":\n result = {}\n if request.get_json():\n post_requests = request.get_json()\n print(post_requests)\n result = db.getmapdata(post_requests['attr']) \n return result", "def submit_fishfry():\n logging.info(\"\\nsubmit ----------\")\n # pdb.set_trace()\n form = FishFryForm()\n # logging.info(json.dumps(request.form, indent=2))\n # ffid = form['ffid']\n if form.validate_on_submit():\n\n # ---------------------------------------------------------------------\n # get the form data and plug it into the geojson.\n # some of that data requires post-processing; that is done here.\n\n # feature_dict = postprocess_submit(request.form.to_dict())\n\n properties = {\n \"venue_name\": form.venue_name.data,\n \"venue_address\": form.venue_address.data,\n \"venue_type\": form.venue_type.data,\n \"venue_notes\": form.venue_notes.data,\n \"website\": form.website.data,\n \"email\": form.email.data,\n \"phone\": form.phone.data,\n \"etc\": form.etc.data,\n \"handicap\": postbool(form.handicap.data),\n \"alcohol\": postbool(form.alcohol.data),\n \"homemade_pierogies\": postbool(form.homemade_pierogies.data),\n \"lunch\": postbool(form.lunch.data),\n \"take_out\": postbool(form.take_out.data),\n \"validated\": form.validated.data,\n \"publish\": form.publish.data,\n \"menu\": {\n \"text\": form.menu_txt.data,\n \"url\": form.menu_url.data\n },\n \"events\": postprocess_events(form.events.data)\n }\n geometry = {\n \"type\": \"Point\",\n \"coordinates\": [form.lng.data, form.lat.data]\n }\n\n feature = {\n \"type\": \"Feature\",\n \"properties\": properties,\n \"geometry\": geometry\n }\n\n logging.info(json.dumps(feature, indent=2))\n\n # OPTOINAL: validate with Marshmallow here\n # (WTForms is also providing validation)\n # try:\n # result = Feature().load(feature)\n # except ValidationError as err:\n # logging.warning(err.messages)\n # logging.warning(err.data)\n\n # ---------------------------------------------------------------------\n # if there is an id already provided by the form, then this is an\n # existing record, and we're doing an update.\n ffid = form.ffid.data\n if ffid and ffid != \"None\":\n logging.info(\"This is an existing record ({0})\".format(ffid))\n onefry = update_one_fishfry(\n ffid,\n properties,\n geometry\n )\n logging.info(json.dumps(onefry, indent=2))\n\n flash('Fish Fry updated! ({0})'.format(ffid), \"info\")\n return redirect(url_for('load_fishfry', ffid=ffid))\n\n # ----------------------------------------------------------------------\n # Otherwise this is a new record. An FFID will be assigned\n # closer to the metal.\n else:\n logging.info(\"This is a new record\")\n\n # submit to the db\n onefry = make_one_fishfry(\n properties=properties,\n geometry=geometry\n )\n if 'id' in onefry.keys():\n ffid = onefry['id']\n # once the record create is submitted, reload this page with the data.\n flash('Fish Fry added! ({0})'.format(ffid), \"success\")\n return redirect(url_for('load_fishfry', ffid=ffid))\n else:\n flash(\n \"There was an 500-level error when adding data to the database.\", \"danger\")\n return render_template(\n 'pages/fishfryform.html',\n form=form,\n )\n # flash(\"Invalid data:\\n\"{0}.format(\"\\n\".join([error for error in form.errors])))\n # flash(\"You can only submit data through the form via POST request.<br>Consider using the API if you want to work with data programmatically.\", \"info\")\n # return redirect(url_for('load_fishfry', ffid=ffid))\n return render_template(\n 'pages/fishfryform.html',\n form=form\n )", "def parse_post(request):\n\n fp = StringIO(request.raw_body)\n\n headers = {}\n headers['content-type'] = request.message.get('content-type')\n headers['content-length'] = request.message.get('content-length')\n\n environ = {}\n environ['REQUEST_METHOD'] = request.method\n\n boundary = request.message.get('boundary')\n\n post = cgi.FieldStorage( fp = fp\n , headers = headers\n , outerboundary = boundary\n , environ = environ\n , keep_blank_values = True\n , strict_parsing = False\n )\n\n return post", "def delete(request):\n wfsxml = request.POST.get('wfsxml', False) # FOR GEOSERVER\n uuid = request.POST.get('uuid', False)\n # MAKE GEOSERVER WFS TRANSACTION\n error = post_to_geoserver(wfsxml, GeoPostBase.wfsURL)\n # ALL GOOD\n if error:\n return server_error(error)\n # IF WFS TRANSACTION ERROR\n else:\n pass\n # Delete photo from bucket\n delete_from_bucket(uuid, GeoPostBase.imageBucket)\n return HttpResponseRedirect(reverse('geopost_home'))", "def upload(context, request):\n if request.method == 'POST':\n if not hasattr(request.POST['content'], 'file'):\n raise RuntimeError('No file attached')\n\n fieldstorage = request.POST['content']\n filename = fieldstorage.filename\n logger.info(\"%s posted\", filename)\n\n with bm(\"%s released\" %filename):\n dest = path(request.file_root) / request.namer(filename)\n dest.write_bytes(fieldstorage.file.read())\n try:\n request.registry.notify(event.PackageAdded(request.index, path=dest))\n request.response.headers['X-Swalow-Status'] = 'SUCCESS'\n try:\n for ep in pkg_resources.iter_entry_points('cheeseprism.on_upload'):\n func = ep.load()\n func(context, request, dest)\n except Exception as e:\n logger.exception('Entry point %r failed', ep)\n return request.response\n except :\n logger.exception(\"Processing of %s failed\", filename)\n raise\n return {}", "def submit(self):\n data = self.getFSNDataDict()\n if data != []:\n MOSES.addToPiggyBank(data, self.user_id, self.password)", "def post(self, request, *args, **kwargs):\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n gw_location_file = request.FILES.get('gw_location_file')\n gw_level_file = request.FILES.get('gw_level_file')\n\n if form.is_valid():\n if gw_location_file:\n gw_location_file.seek(0)\n if str(gw_location_file).split('.')[-1] == \"xls\":\n sheet = xls_get(gw_location_file, column_limit=4)\n elif str(gw_location_file).split('.')[-1] == \"xlsx\":\n sheet = xlsx_get(gw_location_file, column_limit=4)\n sheetname = next(iter(sheet))\n records = sheet[sheetname]\n for record in records:\n if record[0].lower() == 'id well':\n continue\n\n point = Point(x=record[3], y=record[2], srid=4326)\n well = GWWell.objects.create(\n gwwellname=record[0],\n gwwelllocation=point,\n gwwelltotallength=record[1]\n )\n\n if gw_level_file:\n gw_level_file.seek(0)\n if str(gw_level_file).split('.')[-1] == \"xls\":\n sheet = xls_get(gw_level_file, column_limit=4)\n elif str(gw_level_file).split('.')[-1] == \"xlsx\":\n sheet = xlsx_get(gw_level_file, column_limit=4)\n sheetname = next(iter(sheet))\n records = sheet[sheetname]\n for record in records:\n if record[0].lower == 'time':\n continue\n\n try:\n well = GWWell.objects.get(gwwellname=record[3])\n time = dateparse.parse_datetime(record[0])\n well_level_log = GWGeologyLog.objects.create(\n phenomenonTime=time,\n resultTime=time,\n gw_level=record[2],\n reference=record[1]\n )\n well.gwwellgeology.add(well_level_log)\n except GWWell.DoesNotExist:\n pass\n pass\n return self.form_valid(form)\n\n else:\n return self.form_invalid(form)", "def handle_request(self):\n try:\n content_type = self.headers.get('content-type')\n\n if content_type != 'application/json':\n self.write_empty_response(400)\n return\n\n content_len = int(self.headers.get('content-length', 0))\n\n # If content was provided, then parse it\n if content_len > 0:\n message = json.loads(self.rfile.read(content_len))\n else:\n self.write_empty_response(400)\n return\n\n helper.log_info(f'Incoming POST from {self.client_address[0]}: {message}')\n\n aspect_type = message['aspect_type']\n object_id = message['object_id']\n object_type = message['object_type']\n # make owner_id a str to avoid issues with athlete_checkpoint dict\n owner_id = str(message['owner_id'])\n\n athlete_checkpoint = helper.get_check_point(\"webhook_updates\") or {}\n\n # We only care about activity updates. New activities are pulled in automatically as strava_api input restarts.\n if aspect_type == 'update' and object_type == 'activity':\n if owner_id not in athlete_checkpoint:\n athlete_checkpoint[owner_id] = []\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n else:\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n helper.log_debug(f'webhooks_updates checkpoint: {helper.get_check_point(\"webhook_updates\")}')\n\n # Send data to Splunk\n data = json.dumps(message)\n event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)\n ew.write_event(event)\n\n # Strava API expects a 200 response\n self.write_empty_response(200)\n\n # Restart strava_api inputs to pull in the data unless it's a delete, as the input doesn't do anything with that anyway.\n if aspect_type != 'delete':\n self.restart_input('strava_api', self.SESSION_KEY)\n helper.log_info(f'Reloading Strava API input to retrieve updated activity {object_id} for athlete {owner_id}.')\n\n except Exception as ex:\n helper.log_error(f'Something went wrong in handle request: {ex}')", "def post(self):\n\n upload_files = self.get_uploads('file')\n blob_info = upload_files[0]\n self.redirect('/?upload_info=%s' % urllib.quote(blob_info.filename))", "def upload_finish(self, cloud_file):", "def do_POST(self):\n self._try_to_process_request(self._handle_post_request)", "def submit_plugin_form_data(self, form_entry, request, form,\n form_element_entries=None, **kwargs):", "def on_post(self, req, resp, account, container):\n _handle_script_upload(req, resp, account, container)", "def handle_new_post(post_data, user_agent, remote_addr):\n \n for required in POST_REQUIRED_PARAMS:\n if required not in post_data:\n return None, None\n\n try:\n value = int(string_from_interwebs(post_data.getfirst(\"code\", \"\")))\n except ValueError:\n return None, None\n \n if value != 98098098098:\n return None, None\n\n # not yet safe to use.\n location = post_data.getfirst(\"location\", \"\")\n tags = string_from_interwebs(post_data.getfirst(\"tags\")) \n author = post_data.getfirst(\"author\")\n \n split_tags = [string_from_interwebs(tag).strip().lower() for tag in tags.split(\",\")] # temporary\n \n if len(split_tags) > 3:\n return None, None\n \n author_id = string_from_interwebs(author).strip()\n \n with Connection('localhost', 27017) as connection:\n reply_to = string_from_interwebs(post_data.getfirst(\"reply_to\"))\n \n if not verify_author(author_id, connection):\n return None, None\n\n if not verify_post(reply_to, connection):\n return None, None\n\n # if reply then it's verified.\n # XXX: I need to make a standard object structure for this, so that I don't \n # have to update separate things.\n\n post = {\"viewed\" : 0,\n \"comments\" : 0,\n \"flagged\" : 0,\n \"disliked\" : 0,\n \"enjoyed\" : 0,\n \"num_replies\" : 0,\n \"num_reposts\" : 0,\n \"content-type\" : \"image\", # need to pull this from the mime lookup\n \"file\" : \"placeholder\",\n \"user_agent\" : user_agent,\n \"remote_addr\" : remote_addr,\n \"created\" : datetime.utcnow(),\n \"location\" : string_from_interwebs(location).strip(),\n \"author\" : ObjectId(author_id),\n \"reply_to\" : ObjectId(reply_to),\n \"tags\" : split_tags}\n\n update_post(reply_to, connection)\n\n return post_data.getfirst(\"data\"), post", "def post(self):", "def _postproc(self, request):\n if request.status_code != 200: raise Exception('wrong error code: {0}'.format(request.status_code))\n data = request.json()\n self.data = self._finalize_data(data)", "def submission():\n\n # @ToDo: Something better than this crude check\n if not auth.s3_logged_in():\n auth.permission.fail()\n\n from io import StringIO\n import cgi\n from lxml import etree\n\n source = request.post_vars.get(\"xml_submission_file\", None)\n if isinstance(source, cgi.FieldStorage):\n if source.filename:\n xmlinput = source.file\n else:\n xmlinput = source.value\n\n if isinstance(xmlinput, str):\n xmlinput = StringIO(xmlinput)\n elif request.env.request_method == \"HEAD\":\n raise HTTP(204)\n else:\n raise HTTP(400, \"Invalid Request: Expected an XForm\")\n\n tree = etree.parse(xmlinput)\n tablename = tree.getroot().tag\n\n resource = s3db.resource(tablename)\n\n stylesheet = os.path.join(request.folder, \"static\", \"formats\", \"odk\",\n \"import.xsl\")\n\n try:\n result = resource.import_xml(source=tree, stylesheet=stylesheet)\n except (IOError, SyntaxError):\n raise HTTP(500, \"Internal server error\")\n\n # Parse response\n status = json.loads(result)[\"statuscode\"]\n\n if status == \"200\":\n r = HTTP(201, \"Saved\") # ODK Collect only accepts 201\n r.headers[\"Location\"] = request.env.http_host\n raise r\n else:\n raise HTTP(status, result)", "def postPoint(request, Form):\n\tform = Form(request.POST)\n\tform.data = form.data.copy()\n\n\t# Convert coords to valid geometry\n\ttry:\n\t\tform.data['geom'] = normalizeGeometry(form.data['geom'])\n\texcept(ValueError):\n\t\t# TODO provide error message to user here\n\t\tJsonResponse({'success': False})\n\t\t# messages.error(request, '<strong>' + _('Error') + '</strong><br>' + _('No point was selected for this type of report.'))\n\n\t# Validate and submit to db\n\tif form.is_valid():\n\t\tpoint = form.save()\n\t\t# Errors with push notifications should not affect reporting\n\t\tif not settings.DEBUG:\n\t\t\ttry: pushNotification.pushNotification(point)\n\t\t\texcept: pass\n\n\t\treturn JsonResponse({\n\t\t\t'success': True,\n\t\t\t'point': GeoJSONSerializer().serialize([point,]),\n\t\t\t'point_type': point.p_type,\n\t\t\t'form_html': render_crispy_form(Form())\n\t\t})\n\telse:\n\t\tlogger.debug(\"Form not valid\")\n\n\t# Else: error occurred\n\tform.data['geom'] = form.data['geom'].json\n\tform_html = render_crispy_form(form)\n\treturn JsonResponse({'success': False, 'form_html': form_html})", "def post():\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def _submit_plugin_form_data(self, form_entry, request, form,\n form_element_entries=None, **kwargs):\n if DEBUG:\n return self.submit_plugin_form_data(\n form_entry=form_entry,\n request=request,\n form=form,\n form_element_entries=form_element_entries,\n **kwargs\n )\n else:\n try:\n return self.submit_plugin_form_data(\n form_entry=form_entry,\n request=request,\n form=form,\n form_element_entries=form_element_entries,\n **kwargs\n )\n except Exception as err:\n logger.debug(str(err))", "def post(self):\n source = 'uploaded by user'\n upload_files = self.get_uploads('file')\n blob_key = upload_files[0].key()\n name = self.request.get('name')\n\n user = users.get_current_user()\n\n username = 'admin'\n date = datetime.datetime.now()\n str_blob_key = str(blob_key)\n key = FileMetadata.get_key_name(username, date, str_blob_key)\n\n ctx = ndb.get_context()\n meta = FileMetadata(key_name=key, parent=_PARENT)\n meta.owner = user\n meta.filename = name\n meta.uploaded_on = date\n meta.source = source\n meta.blobkey = str_blob_key\n meta.put()\n ctx.clear_cache()\n self.redirect('/admin')", "def post(self):\n code, status = run_handlers.handle_data_post(self.request.headers, self.request.body)\n self.set_status(code)\n self.write(status)\n self.finish()", "def post_update(self):\n\t\tlogging.info(\"Beginning\")\n\t\toptions=dict(\n\t\t\tapi_key = self.apiKey\n\t\t)\n\t\tcounter = 0\n\t\tfor key, value in self.field.items():\n\t\t\tif value != None:\n\t\t\t\tcounter += 1\n\t\t\t\toptions[key] = value\n\t\tif counter == 0:\n\t\t\tlogging.error(\"There was nothing to update. Check the field values\")\n\t\t\treturn\n\t\turl = '{ts}update'.format(\n\t\t\tts=self.tsRUL,\n\t\t)\n\t\tlogging.debug(\"Options = \" + str(options))\n\t\ttry:\n\t\t\tresults = requests.post(url, params=options)\n\t\t\tif results.ok != True:\n\t\t\t\tlogging.error(\"The update failed\")\n\t\t\t\treturn False\n\t\texcept:\n\t\t\tlogging.error(\"There was an error trying to update the values\")\n\t\t\treturn False\n\t\tself.clear_field_values()\n\t\treturn True", "def post(self):\n if validate(request.form):\n handle_upload(request.files['qqfile'], request.form)\n return make_response(200, {\"success\": True})\n else:\n return make_response(400, {\"error\": \"Invalid request\"})", "def post(self):\n postUrl = 'http://' + self.ws + ':80/cgi-bin/post.py'\n\n # Create the form with simple fields\n logform = MultiPartForm()\n logfilename = string.rsplit(self.fullLogFile, '/', 1)[1]\n logform.add_file('file', logfilename, open(self.fullLogFile))\n body = str(logform)\n\n # Build the request\n request = urllib2.Request(postUrl)\n request.add_header('Content-type', logform.get_content_type())\n request.add_header('Content-length', len(body))\n request.add_data(body)\n\n # print request.get_data()\n urllib2.urlopen(request).read()\n\n htmlFile = self.format_html()\n htmlform = MultiPartForm()\n htmlfilename = string.rsplit(htmlFile, '/', 1)[1]\n htmlform.add_file('file', htmlfilename, open(htmlFile))\n\n request = urllib2.Request(postUrl)\n body = str(htmlform)\n request.add_header('Content-type', htmlform.get_content_type())\n request.add_header('Content-length', len(body))\n request.add_data(body)\n # request.get_data()\n response = urllib2.urlopen(request)\n data = response.read()\n\n s = re.search(\"^file location: (.+)\", data, re.MULTILINE)\n location = s.group(1)\n\n print \"http://%s%s\\n\" % (self.ws, location)", "def _post_request(self):\n # check if input file size was not exceeded\n maxsize = configuration.get_config_value('server', 'maxrequestsize')\n maxsize = configuration.get_size_mb(maxsize) * 1024 * 1024\n if self.http_request.content_length > maxsize:\n raise FileSizeExceeded('File size for input exceeded.'\n ' Maximum request size allowed: %i megabytes' % maxsize / 1024 / 1024)\n\n try:\n doc = lxml.etree.fromstring(self.http_request.get_data())\n except Exception as e:\n if PY2:\n raise NoApplicableCode(e.message)\n else:\n raise NoApplicableCode(e.msg)\n\n operation = doc.tag\n request_parser = self._post_request_parser(operation)\n request_parser(doc)", "def preprocess_body(self) -> None:\n self._verify_archive_url_and_zip_path()\n self._verify_upload_url_and_zip_path()\n self._verify_upload_url_and_no_zip_path()\n if self.upload_function is None:\n self.upload_function = False", "def _submit(self, endpoint, data):\n full_url = self._prepare_url(endpoint)\n req = self._request(full_url, self._username, self._apikey)\n req.post(data)", "def parse_multipart(request):\n\n # This code will process each non-file field in the form\n fields = {}\n data = request.form.to_dict()\n for field in data:\n fields[field] = data[field]\n print(\"Processed field: %s\" % field)\n\n # This code will process each file uploaded\n files = request.files.to_dict()\n for file_name, file in files.items():\n # Note: GCF may not keep files saved locally between invocations.\n # If you want to preserve the uploaded files, you should save them\n # to another location (such as a Cloud Storage bucket).\n file.save(get_file_path(file_name))\n print(\"Processed file: %s\" % file_name)\n\n # Clear temporary directory\n for file_name in files:\n file_path = get_file_path(file_name)\n os.remove(file_path)\n\n return \"Done!\"", "def do_POST(self): # pylint: disable=invalid-name\n self.handle_request()", "def post(self, request):\n pass", "def submit_blogpost(request):\n from_address = request.POST.get('from_address')\n message = request.POST.get('message')\n rpc_raw = rpcRawProxy(helpers.get_rpc_url())\n\n if request.POST.get('wallet_passphrase', False):\n rpc_raw.walletpassphrase(request.POST.get('wallet_passphrase'), 60)\n try:\n message += \"|\" + helpers.sign_string(rpc_raw, message, from_address)\n except JSONRPCException, e:\n if \"passphrase\" in e.error['message']:\n return HttpResponse(json.dumps({\n \"status\": \"error\",\n \"message\":\"Wallet locked.\",\n \"type\":\"wallet_locked\"\n }, default=helpers.json_custom_parser), content_type='application/json')\n else:\n return HttpResponse(json.dumps({\n \"status\": \"error\",\n \"message\":\"Error while trying to sign public key.\"\n }, default=helpers.json_custom_parser), content_type='application/json')\n\n message = helpers.format_outgoing(message)\n opreturn_key = external_db.post_data(message)\n\n op_return_data = \"pm\" #program code (peermessage), 2 chars\n op_return_data += \"blg\" #opcode (blogpost), 3 chars\n op_return_data += opreturn_key #key pointing to external datastore\n\n rpc_processed = rpcProcessedProxy()\n blockchain_func.submit_opreturn(rpc_processed, from_address, op_return_data)\n return HttpResponse(json.dumps({\n \"status\": \"success\"\n }, default=helpers.json_custom_parser), content_type='application/json')", "def upload_point(x, y, label=\"\"):\n\n conn = None\n cur = None\n\n try:\n # check the point is inside the usa, both point and states must be WGS84\n conn = utils.pgconnect(**settings.DEFAULT_CONNECTION)\n cur = conn.cursor()\n #if the point is inside this will return (True,) otherwise None\n cur.execute(\"\"\"select result from\n (select st_contains(s.geom,ST_GeomFromText('POINT(%s %s)', 4326)) as result \n from %s as s) as subquery\n where result is true\"\"\",(AsIs(x),AsIs(y), AsIs(settings.STATES_TABLE_NAME)))\n\n result = cur.fetchone()\n #print(result)\n\n if result: # if result is not None\n\n #check numbers size, crop to 4 digits, define the marker size\n\n # size symbol\n size=None\n\n # store number of decimal digits\n lx = 0\n ly = 0\n\n # convert numbers to string\n #x = str(x);y = str(y)\n\n if ',' in x or ',' in y:\n raise Exception(\"decimal numbers should not contain ','\")\n\n # check the number of decimal digits and crop to 4\n if '.' in x: # do only for float number\n lx = len(x.split('.')[1]) # get decimals\n if lx > 4: # crop size to 4\n x = x[:(4 - lx)]\n lx = 4\n if '.' in y: # do only for float number\n ly = len(y.split('.')[1])\n if ly > 4:\n y = y[:(4 - ly)]\n ly = 4\n\n # select a symbol size according\n # for the size take the bigger number of digits of the two numbers\n ndigits = max([lx, ly])\n if ndigits == 0:\n size = 5\n elif ndigits == 1:\n size = 4\n elif ndigits == 2:\n size = 3\n elif ndigits == 3:\n size = 2\n elif ndigits == 4:\n size = 1\n\n #upload to database\n cur.execute(\n \"\"\"INSERT INTO %s(lat,lon,label,size) VALUES (%s,%s,%s,%s) RETURNING id\"\"\",\n ( AsIs(settings.BOOKMARKS_TABLE_NAME), y, x, label, size))\n #id = cur.fetchone()[0]\n #print(id)\n cur.execute(\"\"\"UPDATE %s SET geom = ST_PointFromText('POINT(' || lon || ' ' || lat || ')', 4326)\"\"\", (AsIs(settings.BOOKMARKS_TABLE_NAME),))\n conn.commit()\n\n else:\n raise Exception(\"the point is not inside USA\")\n\n except Exception as e:\n raise Exception(e)\n\n else:\n return x, y, size #return the cropped coordinates and marker size\n\n finally:\n if cur: cur = None\n if conn: conn = None", "def index():\n vegform = Upload() \n imageform = ImageUpdate() \n stormform = Storm() \n form = Landscape() \n \n # Set image defaults to zero so template knows what's going on\n result_im = None\n result_bin = None \n result_RF = None\n result_vmax = None\n\n feature_dir = '/'.join([os.getcwd(), 'feature' + request.remote_addr])\n feature_path = '/'.join([feature_dir, 'features.json'])\n os.mkdir(feature_dir) if os.path.isdir(feature_dir) == False else True\n if 'features.json' in os.listdir(feature_dir):\n with open(feature_path, 'r') as fp:\n features = json.load(fp)\n \n else:\n features = reset_features()\n\n p_choices = {'0.5' : ['2.5', '5.0', '7.5', '10'],\n '1' : ['1.7','3.4','5.1','6.8'],\n '3' : ['0.8','1.6','2.4','3.2'],\n '6' :['0.5','1.0','1.5','2.0']\n }\n\n\n if request.method == 'POST' and form.reset.data == True :\n flash('Reset!')\n features = reset_features() \n with open(feature_path, 'w') as fp:\n json.dump(features, fp, indent = 2) \n \n if 'tr' in features.keys():\n form.p.choices = [(str(p),str(p)) for p in p_choices[features['tr']]] \n \n if vegform.validate_on_submit():\n \n if vegform.delete.data == True:\n flash('File deleted ')\n \n #os.remove(os.path.join( UPLOAD_FOLDER, features['filename'] ))\n if 'filename' in features:\n features.pop('filename')\n if 'filepath' in features:\n features.pop('filepath') \n result_bin = None\n result_im = None \n \n features = reset_features() \n \n if vegform.submit.data == True:\n \n flash('File upload ')\n f = vegform.veg.data\n filename = secure_filename(f.filename)\n filepath = os.path.join( UPLOAD_FOLDER, filename)\n f.save(filepath)\n \n features['filename'] = filename \n features['filepath'] = filepath \n\n #### image handling \n if imageform.submit.data and imageform.validate_on_submit():\n \n flash('Image update')\n keys = [key for key in request.form.keys() if key not in ['csrf_token', 'ascii']]\n #for key in keys: \n if 'grid' in keys:\n features['grid'] = request.form['grid']\n if 'threshold' in keys:\n features['threshold'] = request.form['threshold']\n \n if request.form['submit'] == 'rotate-right':\n features['rotate'] = np.mod(features['rotate'] - 90., 360)\n print \"rotate right\"\n\n if request.form['submit'] == 'rotate-left':\n features['rotate'] = np.mod(features['rotate'] + 90., 360)\n print \"rotate left\"\n \n #### precip handling \n if stormform.submit.data and stormform.validate_on_submit():\n \n flash('Rain duration added ') \n \n features['tr'] = (request.form['tr'])\n if 'p' in features.keys():\n features['rainD'] = float(features['tr'])*float(features['p'])\n \n \n form.p.choices = [(str(p),str(p)) for p in p_choices[features['tr']]] \n keys = [key for key in request.form.keys() if key not in ['csrf_token', 'ascii']]\n \n\n #### update landscape \n if form.update.data and form.validate_on_submit():\n \n flash('Landscape featuress added')\n \n keys = [key for key in request.form.keys() if key not in ['csrf_token', 'ascii']]\n \n if 'tr' not in features.keys():\n features['tr'] = '0.5'\n \n features['slope'] = request.form['slope']\n features['p'] = (request.form['p'])\n features['KsV'] = (request.form['KsV']) \n features['rainD'] = float(features['tr'])*float(features['p'])\n \n if 'filepath' in features.keys():\n \n result_im = png_plot(features['filepath'])\n \n threshold = features['threshold'] if 'threshold' in features.keys() else 0.5 \n result_bin, bw = binarize(features['filepath'], features = features)\n features['Lx'] = bw.shape[0]\n features['Ly'] = bw.shape[1] \n features['fV'] = np.round(np.mean(bw), 2)\n \n if form.submit.data and form.validate_on_submit():\n flash('Running the random forest model!')\n \n keys = [key for key in request.form.keys() if key not in ['csrf_token', 'ascii']]\n \n if 'tr' not in features.keys():\n features['tr'] = '0.5'\n \n features['slope'] = request.form['slope']\n features['p'] = (request.form['p'])\n features['KsV'] = (request.form['KsV']) \n features['rainD'] = float(features['tr'])*float(features['p'])\n \n result_RF, zinflc = run_RF(features['filepath'], features = features, target_col = 'zinflc')\n result_vmax, vmax = run_RF(features['filepath'], features = features, target_col = 'vmax') \n \n features['inflDveg'] = np.round(np.mean(zinflc[bw == 1]),2)\n features['inflD'] = np.round(np.mean(zinflc),2)\n features['vmax'] = np.round(np.mean(vmax),2)\n features['vmaxmax'] = np.round(np.percentile(vmax, 95),2) \n\n with open(feature_dir + '/features.json', 'w') as fp:\n json.dump(features, fp, indent = 2) \n\n keys = [key for key in features.keys() if key not in ['filepath', 'filename', 'tr']]\n\n for key in ['grid', 'threshold']:\n if key in features.keys():\n imageform[key].default = float(features[key] ) \n \n imageform.process()\n\n for key in ['tr']:\n if key in features.keys():\n stormform[key].default = features[key]\n \n stormform.process() \n \n for key in [ 'p', 'slope', 'KsV']:\n if key in features.keys():\n form[key].default = features[key]\n\n form.process()\n \n return render_template('home.html', form = form, \\\n vegform = vegform, \n imageform = imageform, \n stormform = stormform, \n features = features, \n result_bin = result_bin,\n result_im = result_im,\n result_RF = result_RF,\n result_vmax = result_vmax\n )", "def post(self):\n blob_key = self.request.get(\"blobkey\")\n\n database_creation.run(blob_key)", "def test_if_posted(self):\n reqdata = {\"lat\": 17.726675,\n \"long\": 83.312320,\n \"address\": \"CBM Compound\",\n \"state\": \"Andhra Pradesh\",\n \"pin\": 530003\n }\n\n res = req.post(post_loc_url, json=jsonify(reqdata))\n print(\"RES\", res.text)\n self.assertEqual(\"200\", json.loads(res.text)[\"Status\"])", "def main():\n\n datastore_client = datastore.Client(namespace=DS_NAMESPACE)\n\n storage_client = storage.Client()\n\n query = datastore_client.query(kind=DS_KIND)\n query_paperform = datastore_client.query(kind=DS_KIND_PAPERFORM)\n\n excluded = load_excluded_postal_codes()\n keys = load_keys()\n\n sanitisor = sanitisation.sanitisation.Sanitisor(excluded, keys)\n # todo - potentially shift to writing to disk if / when we move off off app engine\n output = csv.StringIO()\n output_paperform = csv.StringIO()\n writer = csv.DictWriter(output, fieldnames=sanitisor.field_names)\n writer_paperform = csv.DictWriter(output_paperform, fieldnames=sanitisor.field_names)\n writer.writeheader()\n writer_paperform.writeheader()\n\n for entity in query.fetch():\n l = sanitisor.sanitise_account(entity)\n for obj in l:\n writer.writerow(obj)\n\n for entity in query_paperform.fetch():\n l = sanitisor.sanitise_paperform(entity)\n for obj in l:\n writer.writerow(obj)\n writer_paperform.writerow(obj)\n\n curr_time_ms = str(int(time.time() * 1000))\n \n for bucket_name, path in zip(GCS_BUCKETS, GCS_PATHS):\n bucket = storage_client.bucket(bucket_name)\n file_name = os.path.join(path, \"-\".join([curr_time_ms, END_FILE_NAME]))\n file_name_paperform = os.path.join('paperform-' + path, \"-\".join([curr_time_ms, END_FILE_NAME]))\n upload_blob(bucket, output.getvalue(), file_name)\n upload_blob(bucket, output_paperform.getvalue(), file_name_paperform)", "def do_POST(self):\n try:\n if self.path.endswith(\"/restaurant/new\"):\n ctype, pdict = cgi.parse_header(self.headers.getheader('Content-type'))\n if ctype == 'multipart/form-data':\n fields = cgi.parse_multipart(self.rfile, pdict)\n restaurantArray = fields.get('restaurant')\n\n # create a new Restaurant\n newRestaurantObject = Restaurant()\n newRestaurantObject.save(restaurantArray[0])\n\n self.send_response(301)\n self.send_header('Content-Type', 'text/html')\n self.send_header('Location', '/restaurants')\n self.end_headers()\n return\n except:\n pass", "def process_address():\n #get address info from form\n user_details = request.form\n #validate address with google geocoding\n update_details = apiapijoyjoy.validate_address(user_details)\n #update ino in db\n dbwrangler.newaddress(update_details)\n \n return redirect(\"/\")", "def prepare_submission(self):\n ## class Submit2Page\n if (self.form.has_key(\"pdbfile\") == False or \\\n self.form[\"pdbfile\"].file is None or \\\n self.form[\"pdbfile\"].value <= ' '):\n jobid = self.prepare_pdbid_entry()\n return jobid, False\n\n ## allocate a new JobID\n job_id = mysql.job_new()\n\n ## record user's IP address\n ip_addr = os.environ.get(\"REMOTE_ADDR\", \"Unknown\")\n mysql.job_set_remote_addr(job_id, ip_addr)\n\n ## read in all of the lines in the structure file\n infil = self.form[\"pdbfile\"].file\n line_list = []\n while True:\n ln = infil.readline()\n if not ln:\n break\n line_list.append(ln)\n\n ## proceed no further if there were not sufficient lines in uploaded\n ## structure file\n if len(line_list) < 10:\n webtlsmdd.remove_job(job_id)\n raise SubmissionException('Only Recieved %d lines of upload' % (\n len(line_list)))\n\n ## basic sanity checks (for non-via-pdb.org structures)\n run_mainchain_only = False\n r, tmpfile = check_upload(job_id, line_list, mainchain = False)\n if r != '':\n ## \"All atoms\" failed the sanity check. Let's try just the\n ## mainchain atoms.\n r, garbage = check_upload(job_id, line_list, mainchain = True)\n if r != '':\n ## No good. The structure failed both sanity checks.\n ## Can not proceed with this structure.\n raise SubmissionException(str(r))\n else:\n run_mainchain_only = True\n\n ## TODO: Figure out how to do this without webtlsmdd, 2009-05-29\n ## pass the PDB file to the application server\n result = webtlsmdd.set_structure_file(job_id, xmlrpclib.Binary(\"\".join(line_list)))\n if result != \"\":\n raise SubmissionException(result)\n\n return job_id, run_mainchain_only", "def deliver_post(data, access=None):\n\n schema = get_post_schema(data)\n return deliver_fields(schema, data, access)", "def upload_function(request):\n application_key = request.args.get('applicationKey', None)\n api_key = request.args.get('apiKey', None)\n\n sensor_mapping = json.loads(request.environ.get(\"sensor_mapping\", \"{}\"))\n pprint.pprint('Using this sensor mapping: \\n' + pprint.pformat(sensor_mapping))\n\n host = request.environ.get(\"influxdb_host\")\n port = request.environ.get(\"influxdb_port\")\n user = request.environ.get(\"influxdb_user\")\n password = request.environ.get(\"influxdb_password\")\n dbname = request.environ.get(\"influxdb_database\")\n\n return process_data(sensor_mapping, application_key, api_key, host, port, user, password, dbname)", "def post(self):\n data = self.post_parser.parse_args()\n\n try:\n LOGGER.debug('Trying to upload file to storage')\n self.storage.upload(data.file)\n LOGGER.debug('The file was uploaded with success')\n return {\n 'filename': data.file.filename,\n 'message': 'The file was uploaded with success'\n }\n except BaseException:\n abort(500, message='The file was not uploaded')\n LOGGER.error('A generic exception has occurred.', exc_info=True)", "def on_put(self, request, response):\n try:\n \n user_data = UserAuthentication().authenticateUserFromRESTRequest(request)\n \n nffg_dict = json.load(request.stream, 'utf-8')\n ValidateNF_FG().validate(nffg_dict)\n nffg = NF_FG()\n nffg.parseDict(nffg_dict)\n \n controller = UpperLayerOrchestratorController(user_data)\n response.body = controller.put(nffg)\n\n response.status = falcon.HTTP_202\n \n except wrongRequest as err:\n logging.exception(err)\n raise falcon.HTTPBadRequest(\"Bad Request\", err.description)\n except unauthorizedRequest as err:\n logging.debug(\"Unauthorized access attempt from user \"+request.get_header(\"X-Auth-User\"))\n raise falcon.HTTPUnauthorized(\"Unauthorized\", err.message)\n except requests.HTTPError as err:\n logging.exception(err)\n if err.response.status_code == 401:\n raise falcon.HTTPInternalServerError('Unauthorized.',err.message)\n elif err.response.status_code == 403:\n raise falcon.HTTPInternalServerError('Forbidden.',err.message)\n elif err.response.status_code == 404: \n raise falcon.HTTPInternalServerError('Resource Not found.',err.message)\n raise err\n except Exception as err:\n logging.exception(err)\n raise falcon.HTTPInternalServerError('Contact the admin. ', err.message)", "def PostInputsFile(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def upload(\n self, map_info: MapInfo, json_string: str, verbose: bool = False\n ) -> None:\n with self.__synch_mutex:\n processed_map_filename = (\n os.path.basename(map_info.map_json_blob_name)[:-5] + \"_processed.json\"\n )\n processed_map_full_path = (\n f\"{self.PROCESSED_UPLOAD_TO}/{processed_map_filename}\"\n )\n if verbose:\n print(\n f\"Attempting to upload {map_info.map_name} to the __bucket blob \\\n {processed_map_full_path}\"\n )\n\n processed_map_blob = self.__bucket.blob(processed_map_full_path)\n processed_map_blob.upload_from_string(json_string)\n\n ref = db.reference(\"maps\")\n if map_info.uid is not None:\n ref = ref.child(map_info.uid)\n ref.child(map_info.map_name).child(\"map_file\").set(processed_map_full_path)\n\n if verbose:\n print(\n f\"Successfully uploaded database reference maps/{map_info.map_name}/\"\n f\"map_file to contain the blob path\"\n )\n CacheManagerSingleton.cache_map(\n CacheManagerSingleton.PROCESSED_UPLOAD_TO,\n map_info,\n json_string,\n verbose=verbose,\n )", "def upload2(request):\n uploaded = request.read\n fileSize = int(uploaded.im_self.META[\"CONTENT_LENGTH\"])\n fileName = uploaded.im_self.META[\"HTTP_X_FILE_NAME\"] \n fileContent = uploaded(fileSize)\n \n \"\"\"Write image to disk.\"\"\"\n fn, ext = os.path.splitext(fileName)\n name = fn + timezone.now().strftime(\"%Y_%m_%d_%H_%M_%S_%f\") + base64.urlsafe_b64encode(os.urandom(settings.SALT_LENGHT)) + ext\n fileHandler = open(settings.MEDIA_ROOT + \"images/\" + name, \"wb\")\n fileHandler.write(fileContent)\n fileHandler.close()\n \n \"\"\"Create md5hash digest for image.\"\"\"\n base64string = base64.b64encode(fileContent)\n mdfive = md5.new(base64string).hexdigest()\n \n \"\"\"Write image data to db.\"\"\"\n latitude = request.GET.get('lat')\n longitude = request.GET.get('lon')\n tags = request.GET.get('tags').split(' ')\n\n image = Image(title = name, md5hash = mdfive, pub_date = timezone.now(), lat = latitude, lon = longitude)\n image.save()\n\n for tagtext in tags:\n if Tag.objects.filter(name=tagtext).exists():\n t = Tag.objects.get(name=tagtext)\n else:\n t = Tag(name = tagtext)\n t.save()\n image.tags.add(t)\n image.save()\n\n return HttpResponse('{\"success\": true}')", "def push_isochrone(request):\n \n points = simplejson.loads(request.POST['points'])\n gateway = int(request.POST['gateway_id'])\n print points\n print gateway\n\n gateway = models.Gateway.objects.get(gateway_id = gateway)\n \n fence_posts = gateway.fencepost_set.all()\n fence_posts.delete()\n\n idx = 0\n for point in points:\n print point\n print point['x']\n fencepost = models.FencePost.objects.create(gateway = gateway, lat = point['x'], lng = point['y'], sequence = idx)\n idx += 1\n fencepost.save()\n\n fence_posts = gateway.fencepost_set.all()\n for fp in fence_posts:\n print fp.sequence\n print fp.lat\n print fp.lng\n print fp.gateway.description\n\n json_str = simplejson.dumps({\"status\":True})\n return HttpResponse(json_str)", "def post_floating_ip_create(self, resource_dict):\n pass", "def handle_post(cls, **kwargs):\n raise NotImplementedError", "def upload(self, upload_request):\n raise NotImplementedError", "def _process_request(self, request_type, params, marker_elems=None):\r\n response = self.make_request(request_type, params, verb='POST')\r\n return self._process_response(response, marker_elems)", "def post(self):\n if validate(request.form):\n handle_upload(request.files['qqfile'], request.form)\n filepath = 'static/images/{}/{}'.format(request.form['qquuid'], request.form['qqfilename'])\n session['img_upload_filepath'] = filepath\n return make_response(200, {\"success\": True})\n else:\n return make_response(400, {\"error\": \"Invalid request\"})", "def post_sensor_data(request, key):\n\tif request.method == 'POST':\n\t\tgiles_url = 'http://localhost:8079/add/MYAPIKEY'\n\t\tresponse = requests.post(giles_url, data=json.dumps(request.data))\n\t\t# print json.dumps(request.data)\n\t\treturn Response(response.content)", "def post_provider_attachment_update(self, resource_id, resource_dict):\n pass", "def handle_upload(f, attrs):\n\n # chunked = False\n dest_folder = os.path.join(app.config['UPLOAD_DIRECTORY'], attrs['qquuid'])\n dest = os.path.join(dest_folder, attrs['qqfilename'])\n save_upload(f, dest)", "def upload_photo(self, file, title='', geocode_x=None, geocode_y=None,\n location='', country='',\n REQUEST=None):\n script = self.manage_addProduct['Peterbecom'].manage_addOutandaboutItem\n \n # I don't want it to accidentally get have the same id \n # so I'm going to dictate that here\n oid = None\n __, ext = os.path.splitext(file.filename)\n if title:\n if isinstance(title, str):\n title = unicode(title, 'utf-8')\n title = unaccent_string(title)\n \n oid_start = title.strip().replace(' ','_').replace('!','')\n oid_start = oid_start.replace('(','').replace(')','')\n while oid_start.startswith('_'):\n oid_start = oid_start[1:]\n \n oid = oid_start + ext\n c = 0\n while getattr(self, oid, None):\n # it already exists!!\n c += 1\n oid = oid_start + '-%s' % c + ext\n else:\n # just use a timestamp\n oid = str(time()) + ext\n c = 0 \n while getattr(self, oid, None):\n c += 1\n oid = str(time()+c) + ext\n \n \n # NB! This is a hack until I can figure\n if not geocode_x and not geocode_y and not location:\n logger.warn(\"Randomesque geocode in action!\")\n from random import choice, random\n default_x = 51.5087157485\n default_y = -0.128424167633\n geocode_x = default_x + choice([-1,1]) * random()*0.3\n geocode_y = default_y + choice([-1,1]) * random()*0.3\n \n photo = script(id=oid, title=title, file=file, \n geocode_x=geocode_x, geocode_y=geocode_y,\n location=location, country=country)\n \n\n \n return '%s/view' % photo.absolute_url()", "def ProcessFormData(self, mr, post_data):\n raise MethodNotSupportedError()", "def handle_post_file(self, post_file) -> Tuple[Object, bool]:\n _, ext = os.path.splitext(post_file.name)\n # Remove leading dot from extension\n ext = ext[1:] if ext.startswith(\".\") else ext\n # Generate hashes first to check if upload exists already\n hashes = generate_hashes(post_file)\n # Reset reading position so we can read the file again\n post_file.seek(0)\n # Check if hashes already exists\n existing = Object.objects.filter(sha512=hashes.get(\"sha512\"))\n if existing.exists():\n LOGGER.debug(\"De-duped existing upload %s\", existing.first().filename)\n return existing.first(), False\n # Create new upload object\n new_upload = Object(file=save_from_post(post_file.read(), extension=ext))\n new_upload.save()\n LOGGER.info(\"Uploaded %s\", new_upload.filename)\n return new_upload, True", "def do_POST(self):\n ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))\n self.body = cgi.FieldStorage(fp=self.rfile,\n headers=self.headers, environ = {'REQUEST_METHOD':'POST'},\n keep_blank_values = 1, strict_parsing = 1)\n # throw away additional data [see bug #427345]\n while select.select([self.rfile._sock], [], [], 0)[0]:\n if not self.rfile._sock.recv(1):\n break\n self.handle_data()", "def post_config_upload(self, req, **_kwargs):\n if req.POST:\n meters = req.json.get('meters', None)\n groups = req.json.get('groups', None)\n flows = req.json.get('flows', None)\n\n rm = self.api.process_meter_upload(meters) if meters else ''\n gm = self.api.process_group_upload(groups) if groups else ''\n fm = self.api.process_flow_upload(flows) if flows else ''\n res = Response()\n s = \"{}, {}, {}\".format(rm, gm, fm)\n res.text = s if PYTHON3 else unicode(s, \"utf-8\")\n return res\n\n return Response(status=400) # bad request", "def handle_post_msg(body):\r\n request = json.loads(body)\r\n type = request['type']\r\n subtype = request['subtype']\r\n if type == 'get':\r\n if subtype == 'job':\r\n job = {'job':get_next_job()}\r\n return webServer.add_json_successfull_status(job)\r\n elif subtype == 'tasks':\r\n res = []\r\n for task in tasks:\r\n res.append(task.get_task())\r\n res = {'tasks':res}\r\n return webServer.add_json_successfull_status(res)\r\n\r\n elif type == 'post':\r\n if subtype == 'result':\r\n start_point = request['start_point']\r\n end_point = request['end_point']\r\n found_keyword_bool = request['found_keyword_bool']\r\n keyword_found = request['keyword_found']\r\n task_id = request['task_id']\r\n if found_keyword_bool:\r\n for task in tasks:\r\n if task.id == task_id:\r\n task.set_finished()\r\n task.keyword_found = keyword_found\r\n return webServer.make_json_status(1, \"Result delivered successfully\")\r\n return webServer.make_json_status(0, \"Couldn't find task\")\r\n\r\n elif type == 'create':\r\n if subtype == 'task':\r\n start_point = request['start_point']\r\n end_point = request['end_point']\r\n keyword = request['keyword']\r\n chars = request['chars']\r\n searchwidth = request['searchwidth']\r\n algorithm = request['algorithm']\r\n tasks.append(Task(start_point,end_point,keyword,chars,searchwidth,algorithm,gen_task_id(tasks)))\r\n return webServer.make_json_status(1, \"Successfully created new task\")\r\n\r\n elif type == 'test':\r\n if subtype == 'post':\r\n return webServer.make_json_status(1, \"Successful\")\r\n\r\n #If correct type cannot be found\r\n return webServer.make_json_status(0, \"Task Failed\")", "def handle_post(self, args):\n\n session_key = args['session']['authtoken']\n current_user = args['session']['user']\n post_args = args.get('form', [])\n\n # Validate parameters (may raise ValueError).\n status, comment, urgency, searchID, newOwner, ruleUIDs = self.validate_parameters(post_args, session_key)\n\n if not self.BATCH_SAVE_LIMIT:\n self.get_batch_save_limit(session_key)\n\n self.DEFAULT_STATUS = self.getDefaultStatus(session_key)\n\n # Make the call\n response_data = self.makeChanges(status, comment, session_key, newOwner, urgency, ruleUIDs, searchID, current_user)\n\n return {\n 'status': httplib.OK,\n 'payload': response_data\n }", "def post_create():\n req_data = request.get_json()\n\n print('This is the request itself \\n', req_data)\n name = req_data['name']\n chapter = req_data['chapter']\n site = req_data['site']\n print('\\nThe function that is selected: {0} {1} {2}\\n'.format(name, chapter, site))\n flask_wms.write_new_data(name, chapter, site, \"False\")\n return 'Request recieved, create method'", "def post_multipart(host, selector, fields):\n\treturn post_multipart_formdata(host, selector, fields)[3]", "def post(self):\n data = request.json\n create_entry(data)\n return None, 201", "def post(self, *args, **kwargs):\n return self.handle_post_request()", "def do_POST(self):\r\n SimpleXMLRPCRequestHandler.do_POST(self)\r\n try:\r\n # shut down the connection\r\n self.connection.shutdown()\r\n except:\r\n pass", "def post():\n raise NotImplementedError", "def _handleRequestPostChargeParameters(self, data):\r\n print(\"\\\"Request Post Charge Parameters\\\" received\")\r\n message = self.whitebeet.v2gParseRequestPostChargeParameters(data)\r\n if 'dc' in message:\r\n print(\"SOC: {}%\".format(message['dc']['soc']))\r\n try:\r\n self.whitebeet.v2gSetDcPostChargeParameters(0, 1, int(self.charger.getEvsePresentVoltage()))\r\n except Warning as e:\r\n print(\"Warning: {}\".format(e))\r\n except ConnectionError as e:\r\n print(\"ConnectionError: {}\".format(e))", "def post_floating_ip_update(self, resource_id, resource_dict):\n pass", "def post(self, slug = None):\n filename = self.request.form.get(\"filename\")\n imgdata = base64.b64decode(self.request.form['data'])\n stream = StringIO.StringIO(imgdata)\n content_length = len(imgdata)\n content_type = \"image/png\"\n\n asset = self.app.module_map.uploader.add(\n stream, \n filename = filename,\n content_type = content_type,\n content_length = content_length,\n )\n\n asset_id = unicode(asset._id)\n return {\n 'url' : self.url_for(\"asset\", asset_id = asset.variants['medium_user']._id),\n 'status' : \"success\",\n 'asset_id' : asset_id\n }", "def post(self, controllerfs):\n raise exception.OperationNotPermitted", "def HandlePost(self): # pylint: disable=R0201\n return BaseHandler.CreateError('Not implemented yet!', 501)", "def prepare_pdbid_entry(self):\n ## class Submit2Page\n pdbid = self.form[\"pdbid\"].value.upper()\n if vet_pdb_id(pdbid) == False:\n if pdbid is None or pdbid == \"\":\n raise SubmissionException(\"No PDB file uploaded and no PDB ID given. Please try again.\")\n else:\n raise SubmissionException(\"Invalid PDB ID '\"+pdbid+\"'. Please try again.\")\n\n ## allocate a new JobID\n job_id = mysql.job_new()\n\n ## record user's IP address\n ip_addr = os.environ.get(\"REMOTE_ADDR\", \"Unknown\")\n mysql.job_set_remote_addr(job_id, ip_addr)\n\n ## Fetch and upload PDB entry by PDB ID for custom analysis\n if not vet_struct_id(pdbid, 4):\n raise SubmissionException(\"Not a valid PDB structure ID\")\n\n pdbfile_bin = webtlsmdd.fetch_pdb(pdbid)\n pdbentry = pdbfile_bin.data\n if len(pdbentry) == 0:\n raise SubmissionException(\"Could not download PDB entry \"+pdbid+\" from RCSB.\")\n\t## Custom analysis from PDB ID: simple sanity check\n\n ## basic sanity checks\n ## If check_upload returns anything but a empty string, the server will\n ## inform the user of the problem and not proceed any further.\n ln = pdbentry.split(\"\\n\")\n r, garbage = check_upload(job_id, ln, mainchain = False)\n if r != '':\n raise SubmissionException(str(r))\n\n result = webtlsmdd.set_structure_file(job_id, xmlrpclib.Binary(pdbentry))\n if result != \"\":\n raise SubmissionException(\"Failed to submit structure for PDB ID \"+pdbid+\": \" + str(result) + \"<br>Please try again.\")\n\n return job_id", "def post(self):\n raise exceptions.NotImplemented", "def post(self):\n file_ = self.verify_param('file', cgi.FieldStorage)\n data, filemask = self.build_post_data(file_)\n return data, filemask", "def handle_upload(f, attrs):\n\n # chunked = False\n print 'UPLOAD DIRECTORY:', UPLOAD_DIRECTORY\n dest_folder = os.path.join(UPLOAD_DIRECTORY, attrs['qquuid'])\n dest = os.path.join(dest_folder, attrs['qqfilename'])\n save_upload(f, dest)", "def on_post(self):\n return \"Ok, the stuff is being saved\"", "def submitFiles(self):\n formData =__new__(FormData)();\n \"\"\"\n Iteate over any file sent over appending the files\n to the form data.\n \"\"\"\n i=0\n console.log(self.vue.files)\n while i < self.vue.files.length:\n file = self.vue.files[i];\n formData.append('files[' + i + ']', file);\n i+=1\n \"\"\"\n Make the request to the POST /file-drag-drop URL\n \"\"\"\n formData.append(\"type\",\"upload\")\n __pragma__ ('jsiter') \n fetch('/json/plugins/',\n {\n \"method\":\"POST\",\n \"body\":formData,\n })\\\n .then(lambda res:res.json())\\\n .then(self.uploaded)\\\n .catch(lambda e:console.log('FAILURE!!',e));\n __pragma__ ('nojsiter')", "def post_provider_attachment_create(self, resource_dict):\n pass", "def create(self, validated_data):\n \"\"\" Create post with a location \"\"\"\n location_data = validated_data.pop('location')\n\n # create a new one or get a old for reference\n this_location = Location.objects.get_or_create(\n **location_data\n )\n\n # pop the photo url's data\n photo_data = validated_data.pop('photo')\n\n # must pop the tags data before it would used to create a post \n tags_data = validated_data.pop('tag')\n # create a instance of this post\n this_post = Post.objects.create(\n location = this_location[0],\n **validated_data)\n\n \"\"\"Associate tag's informatiion to post\"\"\"\n for tag in tags_data:\n this_tag = Tag.objects.get_or_create(name = tag.get('name'))\n print(tag.get('name'))\n print(this_tag)\n # attach this tag to this photos_datapost \n this_post.tag.add(this_tag[0])\n\n \"\"\"Associate the photo url \"\"\"\n for photo in photo_data:\n this_post.photo.create(name = photo.get('name'))\n # return the created post \n this_post.save()\n return this_post", "def post(self, *args):\n mapname = self.get_argument('map')\n path = rospkg.RosPack().get_path('rtcrobot_navigation') + '/maps/' + mapname\n if(os.path.isdir(path)):\n if(os.path.exists(path + '/data.dat')):\n dataFile = open(path + '/data.dat', 'r+')\n # useful code goes here\n self.write(dataFile.read())\n self.finish()\n else:\n imgFile = open(path + '/navigation.png', 'r+')\n navdata = base64.standard_b64encode(imgFile.read())\n data = {\n 'navData': navdata,\n 'wallData': {\n 'pointdata': []\n },\n 'zoneData': []\n }\n self.write(json.dumps(data))\n else:\n self.write_error(404)\n #self.finish()" ]
[ "0.6062715", "0.59630567", "0.58975375", "0.58711004", "0.5809544", "0.5728217", "0.5689963", "0.55832034", "0.5559245", "0.5515752", "0.54731184", "0.54603994", "0.54473406", "0.5440826", "0.5434373", "0.54309976", "0.5366671", "0.53177875", "0.5264689", "0.5257757", "0.5244774", "0.5193859", "0.5181997", "0.5181997", "0.5181997", "0.5181997", "0.5181997", "0.5181997", "0.5181997", "0.5181997", "0.5181997", "0.5181997", "0.5181997", "0.5181997", "0.5181997", "0.5181997", "0.5160495", "0.5159521", "0.5156674", "0.51405805", "0.51391613", "0.5137841", "0.5137", "0.5134139", "0.5127493", "0.5111189", "0.5108238", "0.51059186", "0.5091214", "0.5082866", "0.50795335", "0.50690305", "0.5066295", "0.50392514", "0.50306886", "0.5027892", "0.5017143", "0.50083506", "0.50014716", "0.49833986", "0.49810034", "0.49790382", "0.49783152", "0.4971402", "0.49595156", "0.49589923", "0.49532205", "0.4945988", "0.4942209", "0.49402264", "0.4934717", "0.49197686", "0.49153337", "0.4914018", "0.49132106", "0.49129125", "0.4911579", "0.489661", "0.48903733", "0.48850822", "0.48829198", "0.4877845", "0.48691216", "0.48637307", "0.48611686", "0.4858967", "0.4858024", "0.48575687", "0.48568478", "0.48467657", "0.48394898", "0.48383686", "0.48363805", "0.48285374", "0.4826163", "0.48257038", "0.48211026", "0.48156396", "0.4813745", "0.48129046" ]
0.8015848
0
Delete an entry and its associated photo. A workaround to AJAX.
def delete(request): wfsxml = request.POST.get('wfsxml', False) # FOR GEOSERVER uuid = request.POST.get('uuid', False) # MAKE GEOSERVER WFS TRANSACTION error = post_to_geoserver(wfsxml, GeoPostBase.wfsURL) # ALL GOOD if error: return server_error(error) # IF WFS TRANSACTION ERROR else: pass # Delete photo from bucket delete_from_bucket(uuid, GeoPostBase.imageBucket) return HttpResponseRedirect(reverse('geopost_home'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def photo_delete(sender, instance, **kwargs):\n\tinstance.photo.delete(False)", "def picture_delete(request, pk):\n picture = get_object_or_404(Picture, pk=pk)\n\n if picture.author != request.user:\n data = {\n 'status': 'failed',\n 'details': 'Not allowed'\n }\n return JsonResponse(data, status=403)\n\n data = {\n 'status': 'success',\n 'data': PictureDetailSerializer(picture).data\n }\n picture.delete()\n\n return JsonResponse(data, status=200)", "def delete_image(self):\n Image.objects.get(id = self.id).delete()", "def delete(id):\n # Get the photo requested\n photo = Photo.query.filter(Photo.id == id).one_or_none()\n\n # Did we find a photo?\n if photo is not None:\n db.session.delete(photo)\n db.session.commit()\n return make_response(\n \"Photo {id} deleted\".format(id=id), 200\n )\n\n # Otherwise, nope, didn't find that photo\n else:\n abort(\n 404,\n \"Photo not found for Id: {id}\".format(id=id),\n )", "def delete(self, req, id):\n context = None\n try:\n db_api.image_destroy(context, id)\n except exception.NotFound:\n return exc.HTTPNotFound()", "def delete(self, *args, **kwargs):\n super(Image, self).delete(*args, **kwargs)", "def delete_photo(request, object_id):\n character = get_character_from_ob(object_id)\n user = request.user\n if not (user == character.player_ob or user.is_staff):\n raise Http404(\"Only owners or staff may delete photos.\")\n try:\n photo = Photo.objects.get(pk=request.POST[\"select_photo\"])\n except Exception as err:\n raise Http404(err)\n cloudinary.api.delete_resources([photo.image.public_id])\n photo.delete()\n return HttpResponseRedirect(reverse(\"character:gallery\", args=(object_id,)))", "def delete_file(sender, instance, *args, **kwargs):\n if instance.photo:\n _delete_file(instance.photo.path)", "def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n self.object.delete()\n if request.is_ajax():\n response = JSONResponse(True, {}, response_mimetype(self.request))\n response['Content-Disposition'] = 'inline; filename=files.json'\n return response\n else:\n return HttpResponseRedirect('/upload/new')", "def delete(self, *args, **kwargs):\n self.image.delete()\n super(Recipe, self).delete(*args, **kwargs)", "def delete_photo(photo):\n\n\tfilename = \"%s/%s\" % (current_app.instance_path, photo.url)\n\ttry:\n\t\tos.remove(filename)\n\texcept:\n\t\t# The file doesn't exist.\n\t\tpass\n\n\tdb = get_database()\n\tdb.session.delete(photo)\n\tdb.session.commit()", "def delete_entry():\n u_id = request.args(0) or redirect(URL('moderation', 'new_entries'))\n db(db.lioli_main.unique_id == u_id).delete()\n redirect(URL('new_entries'))\n return dict()", "def delete_image(request):\n \n try:\n parsed_data = get_json_data(request)\n if not authenticate_user(parsed_data):\n ret = Response(AUTHENTICATION_FAIL, error_code[AUTHENTICATION_FAIL])\n return HttpResponse(ret.serialize(f))\n image = CarImage(image_id=parsed_data[\"image_id\"])\n image.delete()\n ret = Response(SUCCESS, error_code[SUCCESS]) \n except ObjectDoesNotExist as e:\n ret = Response(NONEXIST_DATA, error_code[NONEXIST_DATA].format(e.message))\n except Exception as e:\n ret = Response(INPUT_FORMAT, error_code[INPUT_FORMAT])\n return HttpResponse(ret.serialize(f))", "def DeleteRow(self, entry):\n for a_link in entry.link:\n if a_link.rel == 'edit':\n return self.Delete(a_link.href)", "def delete_entry(self, id, **args):\n args.update(id=id)\n return self.fetch(\"/entry/delete\", post_args=args)", "def delete(self, name):\n params = {\n 'method': 'flickr.photos.delete',\n 'photo_id': name,\n }\n response = self.oauth_session.post(self.API_ENDPOINT, params=params)\n json_response = response.json()\n if json_response['stat'] == 'fail' and json_response['code'] != 1:\n raise FlickrError(json_response['message'])", "def sorl_delete(**kwargs):\n from sorl.thumbnail import delete\n delete(kwargs['file'])", "def test_delete_image(self):\n # Upload the image first\n self.test_upload_image()\n im = ImageAttachment.objects.all()[0]\n r = post(self.client, 'upload.del_image_async', args=[im.id])\n\n eq_(200, r.status_code)\n json_r = json.loads(r.content)\n eq_('success', json_r['status'])\n eq_(0, ImageAttachment.objects.count())", "def delete_like(self, entry, **args): \n args.update(entry=entry)\n return self.fetch(\"/like/delete\", post_args=args)", "def delete(self, req, id):\n context = req.environ['nova.context']\n self._image_service.delete(context, id)\n return webob.exc.HTTPNoContent()", "def __on_delete(self):\n self.image.delete()", "def __on_delete(self):\n self.image.delete()", "def POST_delete_link_img(self, res, link, name):\r\n # just in case we need to kill this feature from XSS\r\n if g.css_killswitch:\r\n return self.abort(403,'forbidden')\r\n link.del_image(name)\r\n link._commit()\r\n # hide the image and it's container\r\n res._hide(\"img-li_%s\" % name)\r\n # reset the status\r\n res._update('img-status', innerHTML = _(\"Deleted\"))", "def delete(self, *args, **kwargs):\n\n user_n=str(self.sujeto.user.pk)\n img_name=str(self.sujeto.pk)\n \n file_path=settings.MEDIA_ROOT+self.path[len('/media'):]\n\n os.remove(file_path)\n super(img_to_show, self).delete(*args, **kwargs)", "def delete_image(request, image_id):\n\n if not request.user.is_superuser:\n messages.error(\n request,\n \"Access denied! Only store admin can delete a image.\")\n return redirect(reverse(\"home\"))\n\n image = get_object_or_404(GalleryImages, pk=image_id)\n image.delete()\n messages.success(request, \"Image deleted!\")\n\n return redirect(reverse(\"gallery\"))", "def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n self.object.delete()\n return JsonResponse({'status': 'ok'})", "def spatialitedbs_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)", "def delete_file(sender, instance, *args, **kwargs):\n if instance.image:\n _delete_file(instance.image.path)", "def delete_entry(id):\n if not session.get('logged_in'):\n abort(401)\n\n db = get_db()\n cur = db.execute('select id, title from entries where id = ?',\n [id.strip()])\n entries = cur.fetchall()\n title = entries[0]['title']\n db = get_db()\n db.execute('delete from entries where id = ?', [id.strip()])\n db.commit()\n flash('Recipe, ' + escape(title) + ', has been deleted', 'success')\n return redirect(url_for('show_entries'))", "def beforeDelete(sender, **kwargs):\n toDelete = kwargs['instance']\n\n logger.debug('Deleted image %s', toDelete)", "def delImg(img_name):\n img = Image.objects.raw({\"_id\": img_name}).first()\n img.delete()\n return", "def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)", "def test_deletion_of_user_photo_succeeds(self):\n\t\tself.name = 'media.png'\n\t\tself.image = File(open('static/img/media.png', 'rb'))\n\t\tself.created_image = UserPhoto(image=self.image, name=self.name, created_by=self.user)\n\t\tself.created_image.save()\t\t\t\n\t\tresponse = self.client.delete('/api/modify_photo/?id={}'.format(self.created_image.id))\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def delete():", "def delete(self, _id):", "def removePostFromDb(photo_name):\n connection = sqlite3.connect(homePath + DBname)\n cursor = connection.cursor()\n cursor.execute(\"DELETE FROM photo WHERE photo_name == (?);\", (photo_name,))", "def auto_delete_file_on_delete(sender, instance, **kwargs):\n to_delete = [\n instance.photo,\n instance.photo2,\n instance.photo3\n ]\n for photo in to_delete:\n if photo:\n if os.path.isfile(photo.path):\n os.remove(photo.path)", "def delete_image(filename):\n referrer = request.referrer\n path = \"/Users/ericmontague/sponsormatch/app/static/images/\" + filename\n image = Image.query.filter_by(path=path).first_or_404()\n event = Event.query.get_or_404(image.event_id)\n if not current_user.is_organizer(event) and not current_user.is_administrator():\n return redirect(url_for(\"main.index\"))\n db.session.delete(image)\n db.session.commit()\n flash(\"Your event image was successfully deleted.\", \"success\")\n return redirect(referrer)", "def delete(self, trash=True, **kwargs):\n if self.is_trashed or not trash:\n super(Picture, self).delete()\n return\n\n self.trashed_time = datetime.now()\n self.save()", "def photo_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def delete_entry(entry):\n\n # Llama a view_entries despues de haber añadido nueva funcionalidad\n\n response = input(\"Estás seguro? [yN]\").lower()\n\n if response == 'y':\n entry.delete_instance()\n print('Entrada borrada.')", "def view_delete():\n\n return jsonify(\n get_dict(\"url\", \"args\", \"form\", \"data\", \"origin\", \"headers\", \"files\", \"json\")\n )", "def delete(self, request , pk=None): \n return Response({'message':'DELETE'})", "def delete(request, slug, username):\n delete_album_contributor(slug, username)\n \n response = HttpResponse(status=204)\n response['Cache-Control'] = 'no-cache'\n return response", "def update_path_image_on_remove(sender, **kwargs):\n instance = kwargs.pop('instance', None)\n action = kwargs.pop('action', None)\n pk_set = kwargs.pop('pk_set', None)\n if action == \"post_remove\" and len(instance.content.all()) != 0:\n content = Content.objects.get(pk=list(pk_set)[0])\n if instance.image == content.image or not instance.image:\n content = instance.content.all()[0]\n instance.image = content.image\n instance.save()", "def delete_image(db, filename, usernick):\n cur = db.cursor()\n sql = \"\"\"\n delete from images where filename=? and usernick=?;\n \"\"\"\n cur.execute(sql, (filename, usernick))\n db.commit()\n\n sql = \"\"\"\n delete from likes where filename=?;\n \"\"\"\n cur.execute(sql, (filename,))\n db.commit()", "def delete(self, *args, **kwargs):\n self.image.storage.delete(self.image.name)\n delete(self.image)\n super().delete(*args, **kwargs)", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def photo_edit_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.upload.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def auto_delete_image_and_thumbnail_on_delete(sender, instance, **kwargs):\n if instance.image:\n if os.path.isfile(instance.image.path):\n os.remove(instance.image.path)\n\n if instance.thumbnail:\n if os.path.isfile(instance.thumbnail.path):\n os.remove(instance.thumbnail.path)\n\n return False", "def post_delete(self, *args, **kw):\n #obtenemos el id de la fase para hacer el filtrado despues de la redireccion\n item_to_del = DBSession.query(Item).filter_by(id_item=args[0]).one()\n fid = item_to_del.id_fase_fk\n pks = self.provider.get_primary_fields(self.model)\n d = {}\n for i, arg in enumerate(args):\n d[pks[i]] = arg\n self.provider.delete(self.model, d)\n\n path = './' + '../' * (len(pks) - 1) + '?fid=' + str(fid)\n\n redirect(path)", "def POST_delete_sr_img(self, res, name):\r\n # just in case we need to kill this feature from XSS\r\n if g.css_killswitch:\r\n return self.abort(403,'forbidden')\r\n c.site.del_image(name)\r\n c.site._commit()\r\n # hide the image and it's container\r\n res._hide(\"img-li_%s\" % name)\r\n # reset the status\r\n res._update('img-status', innerHTML = _(\"Deleted\"))", "def deleteImage(username,imagename):\n if g.user == username:\n delete_blob(username,imagename)\n return redirect(url_for('landing'))", "def delete(self, *args, **kwargs):\n self.file.delete(save=False)\n self.thumbnail.delete(save=False)\n\n super(File, self).delete(*args, **kwargs)", "def otherfiles_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)", "def delete_thumbnail(self, thumbnail_name):", "def delete(self):\n self.request().delete()", "def do_command(self, args):\n imageops = dbops.Images()\n imageops.delete(args)", "def _delete(self, *args, **kwargs):\n return self._request('delete', *args, **kwargs)", "def delete_entry(entry_id):\n\n # grabs the specific entry id\n entry = Entry.query.get(entry_id)\n\n # grabs the user id in the session\n user_id = session.get(\"user_id\")\n\n # prevents the public for accessing user specific information\n if not session.get(\"user_id\") or session[\"user_id\"] != user_id:\n return redirect(\"/\")\n\n # removes an entry from the database\n db.session.delete(entry)\n db.session.commit()\n\n # flash a message to show confirmation for the user\n flash(\"You have successfully deleted an entry!\")\n\n return redirect(f\"all-entries/{user_id}\")", "def delete(request, post, **kwargs):\n user = request.user\n url, msg = delete_post(post=post, user=user)\n messages.info(request, mark_safe(msg))\n db_logger(user=user, text=f\"{msg} ; post.uid={post.uid}.\")\n\n return url", "def delete(self, *args, **kwargs):\n\t\tself.emo_img.delete(False)\n\t\tsuper(Emotion, self).delete(*args, **kwargs)", "def destroy(self):\n url = \"/images/%s/destroy\" % (str(self.id))\n\n data = self._conn.request(url)\n\n log.debug(data)", "def delete(self):\n ...", "def delete(self, id):\n delete_entry(id)\n return None, 204", "def cmd_image_delete(client, args):\n image_to_delete = client.delete_image(args.image_id)\n generate_output({'deleted': image_to_delete})", "def delete(self, request, *args, **kwargs):\r\n self.object = self.get_object()\r\n success_url = self.get_success_url()\r\n self.object.delete()\r\n messages.success(self.request, self.success_message)\r\n return HttpResponseRedirect(success_url)", "def delete(self, *args, **kwargs):\n return self.handle_delete_request()", "def delete_upload(request):\r\n\tgame_id = request.GET['id']\r\n\tgame = Game.objects.get(id = game_id)\r\n\tif(request.user.profile == game.developer):\r\n\t\tif request.method == 'POST':\r\n\t\t\tgame.delete()\r\n\t\t\tprint('game deleted')\r\n\t\t\treturn redirect('developer_dashboard')\r\n\t\telse:\r\n\t\t\treturn render(request, 'confirm_delete.html', {'game':game})\r\n\telse:\r\n\t\treturn redirect('home')", "def auto_delete_image_lecture_on_delete(sender, instance, **kwargs):\n if instance.file:\n instance.file.delete(save=False)", "def delete_image(Name=None):\n pass", "def post_provider_attachment_delete(self, resource_id, resource_dict):\n pass", "def obj_delete(self, request=None, **kwargs):\n self.get_collection(request).remove({ \"_id\": ObjectId(kwargs.get(\"pk\")) })", "def delete_item(request):\n if request.json_body[u'type'] == u'post':\n if DBSession.query(Post).filter(Post.name==request.json_body[u'name']).delete() == 1:\n return {\"deletion_status\":\"success\"}\n import ipdb; impdb.set_trace()\n return {\"deletion_status\":\"error\"}", "def deletePost(self, editLink, entryId): #$NON-NLS-1$\r\n\r\n deleteAtomEntry = self.createDeleteBlogEntry()\r\n deleteAtomEntry.setId(entryId)\r\n return self.deleteAtomEntry(editLink, deleteAtomEntry)", "def delete(self, *args, **kwargs) -> Any:\n pass", "def delete(id):\r\n get_post(id)\r\n db = get_db()\r\n db.cursor().execute('DELETE FROM novel.post WHERE id = %s', id)\r\n db.commit()\r\n return redirect(url_for('novel.index'))", "def delete_recipe(request, recipe, **_kwargs):\n pass", "def delete_or_hide_photo_from_photo_settings(request):\n\tif request.method == \"POST\":\n\t\tdecision = request.POST.get('dec',None)\n\t\timage_data = request.POST.get('imgd',None)\n\t\ttarget_id = request.POST.get('tid',None)\n\t\tif decision is None:\n\t\t\timage_url = request.POST.get('phturl',None)\n\t\t\tphoto_action = request.POST.get('actpht',None)\n\t\t\tif photo_action == '1':\n\t\t\t\taction = 'del'\n\t\t\telif photo_action == '2':\n\t\t\t\taction = 'undel'\n\t\t\telif photo_action == '3':\n\t\t\t\taction = 'hide'\n\t\t\telif photo_action == '4':\n\t\t\t\taction = 'unhide'\n\t\t\treturn render(request,\"personal_group/photos/photo_settings/delete_photo_from_settings.html\",\\\n\t\t\t\t{'action':action,'img_url':image_url,'imgd':image_data,'tid':target_id})\n\t\telif decision == '1':\n\t\t\town_id = request.user.id\n\t\t\tgroup_id, exists = personal_group_already_exists(own_id, target_id)\n\t\t\tif exists:\n\t\t\t\ttry:\n\t\t\t\t\timage_data = image_data.split(\":\")\n\t\t\t\t\tblob_id, idx, img_id = image_data[0],image_data[1],image_data[2]\n\t\t\t\texcept (AttributeError,IndexError):\n\t\t\t\t\tpass\n\t\t\t\taction = request.POST.get('act',None)\n\t\t\t\tdeleted, ttl = delete_or_hide_photo_from_settings(own_id, group_id, blob_id, idx, img_id, action=action)\n\t\t\t\tif deleted:\n\t\t\t\t\tif action in ('del','undel'):\n\t\t\t\t\t\tupdate_notif_object_del.delay(action=action,blob_id=blob_id,idx=idx,group_id=group_id)\n\t\t\t\t\telif action in ('hide','unhide'):\n\t\t\t\t\t\tupdate_notif_object_hide.delay(action,blob_id,idx,group_id)\n\t\t\t\telif not deleted and ttl:\n\t\t\t\t\treturn render(request,\"personal_group/deletion/personal_group_cant_delete_chat.html\",{'ttl':ttl,'act':action,\\\n\t\t\t\t\t\t'one_photo':True,'tid':target_id})\n\t\t\t\trequest.session[\"personal_group_tid_key\"] = target_id\n\t\t\t\trequest.session[\"personal_group_gid_key:\"+target_id] = group_id\n\t\t\t\trequest.session.modified = True\n\t\t\t\treturn redirect(\"enter_personal_group\")\n\t\t\telse:\n\t\t\t\treturn redirect(\"personal_group_user_listing\")\n\t\telif decision == '0':\n\t\t\trequest.session[\"personal_group_tid_key\"] = target_id\n\t\t\trequest.session.modified = True\n\t\t\treturn redirect(\"enter_personal_group\")\n\t\telse:\n\t\t\treturn redirect(\"personal_group_user_listing\")\n\telse:\n\t\treturn redirect(\"enter_personal_group\")", "def _delete_image_volume(self,\n context: context.RequestContext,\n cache_entry: dict) -> None:\n volume = objects.Volume.get_by_id(context, cache_entry['volume_id'])\n\n # Delete will evict the cache entry.\n self.volume_api.delete(context, volume)", "def delete(self, *args, **kwargs):\n self.portrait.delete()\n super(Giza, self).delete(*args, **kwargs)", "def delete_upload(sender, **kwargs):\n instance = kwargs['instance']\n path_to_delete = '%s/%s.%s' % (instance.path,instance.uuid,instance.ext)\n if not os.path.isdir(path_to_delete):\n os.unlink(path_to_delete)", "def auto_delete_file_on_delete(sender, instance, **kwargs):\n if instance.img:\n if os.path.isfile(instance.img.path):\n os.remove(instance.img.path)", "def delete(self, data):\r\n pass", "def delete(id):\n result = delete_post(id)\n flash(result)\n return redirect(url_for(\"show\"))", "def basemap_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)", "def on_delete_record(event):\n keep_old_files = asbool(utils.setting_value(event.request, 'keep_old_files', default=False))\n\n # Retrieve attachments for these records using links.\n resource_name = event.payload['resource_name']\n filter_field = '%s_uri' % resource_name\n uri = event.payload['uri']\n utils.delete_attachment(event.request, link_field=filter_field, uri=uri,\n keep_old_files=keep_old_files)", "def delete(self, request, id, format=None):\n posts = self.get_object(id)\n posts.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def delete(self, request, *args, **kwargs):\n return super(PageElementDetail, self).delete(request, *args, **kwargs)", "def delete(self, entry): # Hashmap.delete\n\n entry.delete()\n\n # remove the entry from the hashmap\n list=self.contentHash[entry.hexdigest]\n newlist = []\n for e in list:\n if e != entry:\n newlist.append(e)\n\n # if there are no more entries for this hashval, remove\n # it from the dictionary m\n if len(newlist):\n self.contentHash[entry.hexdigest] = newlist\n else:\n del self.contentHash[entry.hashval]\n\n # also remove all the deleted children from the hashmap\n self.prune()", "def delete(self, request, *args, **kwargs):\n if self.json:\n self.object = self.get_object()\n obj = copy.deepcopy(self.object)\n self.object.delete()\n return self.json_to_response(data=obj)\n else:\n return super().delete(request, *args, **kwargs)", "def delete(request):\n issue = request.issue\n tbd = [issue]\n for cls in [models.PatchSet, models.Patch, models.Comment,\n models.Message, models.Content]:\n tbd += cls.query(ancestor=issue.key)\n ndb.delete_multi(entity.key for entity in tbd)\n return HttpResponseRedirect(reverse(mine))", "async def delete(self, request):\n\n userid = await authenticated_userid(request)\n project = await request.app.context_project(request, userid)\n\n await request.post()\n\n camera_id = request.match_info['camera_file_id']\n\n log = request['slog']\n log.debug('request: camera delete', camera_id=camera_id)\n\n camera = await Camera.get(request,\n camera_id=camera_id,\n project_id=project.project_id,\n userid=userid)\n if not camera:\n raise web.HTTPNotFound(text=\"Camera file '%s' not found\" % camera_id)\n\n await request.app.task_broker.publish('camera_delete', {\n 'userid': userid,\n 'project_id': project.project_id,\n 'camera_id': camera_id,\n }, log=log)\n\n await camera.set_status(request, 'DELETING')\n\n return web.HTTPNoContent()", "def delete(self, request, url_id, *args, **kwargs):\n url_instance = self.get_object(url_id, request.user.id)\n if not url_instance:\n return Response(\n {\"detail\": \"Object with url id does not exists\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n url_instance.delete()\n return Response(\n {\"detail\": \"Object deleted!\"}, status=status.HTTP_200_OK\n )", "def delete(self, obj):", "def remove_recipe(request, pk):\n\n url = reverse('fridge:fridge_detail')\n recipe = get_object_or_404(Recipe, pk=pk)\n fridge = request.user.fridge\n fridge.recipes.remove(recipe)\n\n return HttpResponseRedirect(url)", "def delete_entry_from_db(entry):\n db.session.delete(entry)\n db.session.commit()", "def tag_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)", "def delete_from_db(image):\n db.session.delete(image)\n db.session.commit()" ]
[ "0.73588145", "0.70873886", "0.69900006", "0.69221896", "0.68318427", "0.68305916", "0.67875415", "0.67865944", "0.67163795", "0.6713444", "0.6629518", "0.65691507", "0.65677196", "0.6564613", "0.6494717", "0.64877266", "0.6412482", "0.6381852", "0.6367966", "0.6347049", "0.63287324", "0.63287324", "0.6300359", "0.62942326", "0.6293191", "0.6292806", "0.6279675", "0.6275485", "0.6266456", "0.6245881", "0.6241854", "0.62405235", "0.62305504", "0.6229503", "0.6226984", "0.621895", "0.61981094", "0.618502", "0.61833584", "0.6158931", "0.61581445", "0.6155783", "0.61555165", "0.61535925", "0.6146404", "0.6141087", "0.6133208", "0.6122527", "0.6122527", "0.61216044", "0.61158043", "0.61069673", "0.60886735", "0.6080639", "0.60794103", "0.60791487", "0.60741425", "0.60736716", "0.60392964", "0.6030265", "0.6024806", "0.6021634", "0.6017667", "0.60118216", "0.6008055", "0.60068375", "0.6002422", "0.59926385", "0.5992405", "0.5984024", "0.59816515", "0.5970457", "0.5967769", "0.5946023", "0.5940922", "0.5938707", "0.59376866", "0.59359145", "0.5927426", "0.5926171", "0.5915319", "0.5912312", "0.59098136", "0.5903115", "0.5886246", "0.58829284", "0.58748275", "0.5870855", "0.5864155", "0.5860704", "0.58596855", "0.5843659", "0.58430237", "0.58384335", "0.5835508", "0.58198076", "0.5815683", "0.5814656", "0.58140206", "0.5799653" ]
0.6766004
8
The GeoPost view method for retrieving photos
def photo(request, entry_uuid): resp = HttpResponse() metadata, photo = download_from_bucket(entry_uuid, GeoPostBase.imageBucket) resp.write(base64.b64encode(photo)) resp['Content-Type'] = metadata['contentType'] return resp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def feed(self, request, pk):\n photographer = Photographer.objects.get(id=pk)\n photo_feed = (Photo\n .objects\n .filter(Q(location__istartswith=photographer.location) |\n Q(uploaded_by__in=photographer.following.all()))\n .exclude(uploaded_by=photographer))\n serializer = PhotoSerializer(photo_feed,\n many=True,\n context={'request': request})\n return Response(serializer.data)", "def get(self, request, format=None):\n photos = Photo.objects.all()\n serializer = PhotoSerializer(photos, many=True)\n return self.render_json_response(serializer.data)", "def get(self):\n return PhotoGalleryService().get_all(), 200", "def get_photos(self,query=None):\n if self._access_token is None:\n raise RequiresAccessTokenError()\n \n parameters = self.__get_default_oauth_params()\n base_url = CONTENT_ROOT_URL + 'photos/'\n\n if query is not None:\n query_post = simplejson.dumps(query, cls=JSONFactories.encoders.get_encoder_for(query))\n parameters['query'] = query_post\n self.response = self.__make_oauth_request(base_url, parameters, token=self._access_token, signed=True, method=\"POST\")\n else:\n self.response = self.__make_oauth_request(base_url, parameters, token=self._access_token, signed=True, method=\"GET\")\n \n results = simplejson.loads(self.response.read())\n nice_result = make_nice.make_it_nice(results)\n return nice_result", "def movie_photos(request, pk):\n if request.method == 'GET':\n photos_list = MoviePhotos.objects.filter(movie_id=pk)\n serializer = MoviePhotosSerializer(photos_list, many=True)\n return Response(serializer.data)", "def get_featured_photos(self, count = 30, page = 1):\n uri = 'photos/featured'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def get_queryset(self):\n return Photo.objects.filter(user=self.request.user)", "def view_images(request):\n user_root = request.session['user_root']\n search_id = request.session['search_id']\n with open(os.path.join(user_root, search_id, 'info.json')) as f:\n info = json.load(f)\n object_id_list = info['object_id_list']\n image_type_list = info['image_type_list']\n search_pattern = info['search_pattern']\n image_dir = scan_images(user_root, search_id, image_type_list,relative_path=True)\n\n # Add flag for conditional representation.\n flag_scan = False\n flag_classifier=info['flag_classifier']\n if search_pattern == \"scan\":\n flag_scan = True\n bounding_box_dict = scan_bb_images(\n user_root, search_id, folder_name=\"scans\")\n else:\n bounding_box_dict = scan_bb_images(user_root, search_id)\n\n return render(request, 'gallery.html',\n {\"object_id_list\": object_id_list,\n \"image_dir\": image_dir,\n \"bounding_box\": bounding_box_dict,\n \"flag_scan\": flag_scan,\n \"flag_classifier\":flag_classifier,\n \"image_type_list\":image_type_list})", "def photo_list(request, queryset=None, **kwargs):\n if queryset is None:\n queryset = Photo.objects.all()\n \n if 'queryset' in kwargs:\n del kwargs['queryset']\n \n if 'template_name' not in kwargs:\n kwargs['template_name'] = 'flickrsets/photo/list.html'\n \n if 'template_object_name' not in kwargs:\n kwargs['template_object_name'] = 'photo'\n \n if 'paginate_by' not in kwargs:\n kwargs['paginate_by'] = getattr(\n app_settings,\n 'PHOTO_LIST_VIEW_PAGINATE_BY')\n \n return list_detail.object_list(request, queryset, **kwargs)", "def get_images(self, page_number):", "def tag_photo_list(request, name, queryset=None, **kwargs):\n if queryset is None:\n queryset = Tag.objects.all()\n \n if 'queryset' in kwargs:\n del kwargs['queryset']\n \n if 'template_name' not in kwargs:\n kwargs['template_name'] = 'flickrsets/tag/photo_list.html'\n \n if 'template_object_name' not in kwargs:\n kwargs['template_object_name'] = 'photo'\n \n if 'paginate_by' not in kwargs:\n kwargs['paginate_by'] = getattr(\n app_settings,\n 'TAG_PHOTO_LIST_VIEW_PAGINATE_BY')\n \n tag = shortcuts.get_object_or_404(queryset, name=name)\n\n if 'extra_context' not in kwargs:\n kwargs['extra_context'] = {}\n kwargs['extra_context']['tag'] = tag\n \n queryset = tag.photos.all()\n \n return list_detail.object_list(request, queryset, **kwargs)", "def photos(self):\n return self._photos", "def get_photo(self, photo_id):\n uri = 'photos/' + photo_id\n return self.make_request(uri)", "def get_context_data(self, **kwargs):\n context = super(self.__class__, self).get_context_data(**kwargs)\n fullname = self.object.gecos.replace(' ', '_')\n #context['photoUrl'] = fullname + '/' + fullname + '-200x200.jpg'\n return context", "def photos(self):\n return self.waypoints.filter(photo_id__isnull=False)", "def get_context_data(self):\n photo = Photo.objects.order_by('?').first()\n return {'photo': photo}", "def person_photo_list(request, flickr_id, queryset=None, **kwargs):\n if queryset is None:\n queryset = Person.objects.all()\n \n if 'queryset' in kwargs:\n del kwargs['queryset']\n \n if 'template_name' not in kwargs:\n kwargs['template_name'] = 'flickrsets/person/photo_list.html'\n \n if 'template_object_name' not in kwargs:\n kwargs['template_object_name'] = 'photo'\n \n if 'paginate_by' not in kwargs:\n kwargs['paginate_by'] = getattr(\n app_settings,\n 'PERSON_PHOTO_LIST_VIEW_PAGINATE_BY')\n \n person = shortcuts.get_object_or_404(queryset, flickr_id=flickr_id)\n\n if 'extra_context' not in kwargs:\n kwargs['extra_context'] = {}\n kwargs['extra_context']['person'] = person\n \n queryset = person.photos.all()\n \n return list_detail.object_list(request, queryset, **kwargs)", "def get(request):\n return Response(\n GallerySerializer(\n request.user.gallery.all(),\n many=True\n ).data\n )", "def photos(self):\n if \"photos\" in self._prop_dict:\n return PhotosCollectionPage(self._prop_dict[\"photos\"])\n else:\n return None", "def get(self, request):\n success_message = ''\n form = PhotoForm()\n\n context = {\n 'form': form,\n 'msg': success_message\n }\n return render(request, 'photos/add.html', context)", "def photo_detail(request, flickr_id, queryset=None, **kwargs):\n if queryset is None:\n queryset = Photo.objects.all()\n \n for key in ('queryset', 'slug'):\n if key in kwargs:\n del kwargs[key]\n \n kwargs['slug'] = flickr_id\n \n if 'slug_field' not in kwargs:\n kwargs['slug_field'] = 'flickr_id'\n \n if 'template_name' not in kwargs:\n kwargs['template_name'] = 'flickrsets/photo/detail.html'\n \n if 'template_object_name' not in kwargs:\n kwargs['template_object_name'] = 'photo'\n \n return list_detail.object_detail(request, queryset, **kwargs)", "def get_queryset(self):\n return Picture.objects.all()", "def get_image(request):\n data = [{'model': i.bike_model, 'image': i.image, 'id': i.id} for i in BikeDetails.objects.all()]\n data = {'data': data}\n return render(request, 'show_image.html', data)", "def get_photo(self, i):\r\n return self.__photos[i]", "def test_get_photos_paging(self):\n pass", "def photos(self, query, page=1, per_page=10):\n url = \"/search/photos\"\n data = self._search(url, query, page=page, per_page=per_page)\n data[\"results\"] = PhotoModel.parse_list(data.get(\"results\"))\n return data", "def index(request):\n\n #Collect all photos owned by the user and not associated with albums for display\n photos = Photo.objects.filter(owner = request.user, album_id=None)\n\n #Collect all albums owned by the user for display\n albums = Album.objects.filter(owner = request.user)\n\n #Form to upload multiple images extends Djangos form.ImageField()\n #Fields taken 'file' (image)\n form = UploadFileForm()\n\n #Form to upload album\n #Fields taken are 'title'(charfield)\n formAlbum = AlbumForm()\n return render_to_response('photos/index.html',\n {\n 'form': form,\n 'formAlbum': formAlbum,\n 'photos': photos,\n 'albums':albums,\n },\n context_instance=RequestContext(request))", "def get_vr_photos(self, count = 30, page = 1):\n uri = 'photos/vr'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def photoset_photo_list(request, flickr_id, queryset=None, **kwargs):\n if queryset is None:\n queryset = Photoset.objects.all()\n \n if 'queryset' in kwargs:\n del kwargs['queryset']\n \n if 'template_name' not in kwargs:\n kwargs['template_name'] = 'flickrsets/photoset/photo_list.html'\n \n if 'template_object_name' not in kwargs:\n kwargs['template_object_name'] = 'photo'\n \n if 'paginate_by' not in kwargs:\n kwargs['paginate_by'] = getattr(\n app_settings,\n 'PHOTOSET_PHOTO_LIST_VIEW_PAGINATE_BY')\n \n photoset = shortcuts.get_object_or_404(queryset, flickr_id=flickr_id)\n\n if 'extra_context' not in kwargs:\n kwargs['extra_context'] = {}\n kwargs['extra_context']['photoset'] = photoset\n \n queryset = photoset.photos.all()\n \n return list_detail.object_list(request, queryset, **kwargs)", "def geotag_photo(self, xml_photo):\n photo_dt = datetime.strptime(xml_photo['datetaken'], '%Y-%m-%d %H:%M:%S')\n prev_wp = None\n prev_photo = None\n found = False\n for w in self.waypoints.all():\n # Do we have a waypoint to compare with?\n\n if prev_wp:\n\n prev_adjusted = (prev_wp.localtime + self._offset_timedelta)\n w_adjusted = (w.localtime + self._offset_timedelta)\n # If this photo is taken between the two waypoints in the loop\n if prev_adjusted < photo_dt and w_adjusted > photo_dt:\n\n print prev_adjusted\n print 'p', photo_dt\n print w_adjusted\n\n # get the timedelata between the waypoints\n td = w_adjusted - prev_adjusted\n total_difference = td.seconds\n\n # calculate the timedelta between the first waypoint and the phot being taken\n td = photo_dt - prev_adjusted\n\n photo_difference = td.seconds\n\n # create a factor that plots the point between the two waypoint timings the photo was taken\n dfactor = photo_difference / float(total_difference)\n\n # multiply the difference between the lat/lon/alt of the waypoins by this factor\n photo_lat = float(prev_wp.latitude) + ((float(w.latitude) - float(prev_wp.latitude)) * dfactor)\n photo_lon = float(prev_wp.longitude) + ((float(w.longitude) - float(prev_wp.longitude)) * dfactor)\n photo_alt = float(prev_wp.altitude) + ((float(w.altitude) - float(prev_wp.altitude)) * dfactor)\n\n # Prepare a waypoint object to store the results of the geotagging calculations\n\n photo_waypoints = WayPoint.objects.filter(\n\n photo_id=xml_photo['id'],\n\n );\n if len(photo_waypoints) > 0:\n\n photo_waypoint = photo_waypoints[0]\n\n else:\n photo_waypoint = WayPoint()\n photo_waypoint.photo_id = xml_photo['id']\n\n photo_waypoint.photo_title = xml_photo['title']\n photo_waypoint.photo_secret = xml_photo['secret']\n photo_waypoint.photo_farm = xml_photo['farm']\n photo_waypoint.photo_server = xml_photo['server']\n\n photo_waypoint.latitude = str(photo_lat)\n photo_waypoint.longitude = str(photo_lon)\n photo_waypoint.altitude = str(photo_alt)\n photo_waypoint.gmtime = photo_dt\n photo_waypoint.localtime = photo_dt - self._offset_timedelta\n\n photo_waypoint.save()\n # except:\n # assert False, '.' + xml_photo['id'] + '.'\n return photo_waypoint\n\n prev_wp = w\n\n return False", "def post(self, request, *args, **kwargs):\n paginate_b = request.POST.get('pages_elements', None)\n filter_dict = {'paginate_b': paginate_b}\n\n # If request havent data in fields, then taked all data\n allowed_fields = ['zoom',\n 'matrix_resol',\n 'color',\n 'matrix_size',\n 'country']\n for x, y in [(x, request.POST.getlist(x)) for x in allowed_fields]:\n if y != []:\n filter_dict[x] = y\n else:\n filter_dict[x] = PhotoView._all_data_fields(x)\n\n return PhotoView.get_object_page_filter(self, request, filter_dict)", "def get_user_photos(self, user_id, count = 30, page = 1):\n uri = 'users/' + user_id + '/photos'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def get_photo(mesh, movement, resolution, cmap, plotter, camera, title=None, title_location=\"upper_edge\",\n background_photos=None,cam_noise_lambda=None, background_scale=1, title_color=\"black\"):\n return Mesh.get_many_photos(mesh, movement, resolution, cmap,\n plotter, [camera], title, title_location, background_photos=background_photos,cam_noise_lambda=cam_noise_lambda,\n background_scale=background_scale, title_color=title_color)[0]", "def test_retrieval_of_user_photos(self):\t\n\t\tget_response = self.client.get(reverse('photos'))\n\n\t\tself.assertEqual(get_response.status_code, status.HTTP_200_OK)\n\t\tdata = [i.values() for i in get_response.data]\n\t\tself.assertIn(u'{}'.format(self.image_name), data[0])", "def post(self, request):\n # GET REQUEST DATA\n fid = request.POST.get('fid', False)\n uuid = request.POST.get('uuid', False)\n title_text = request.POST.get('title', False)\n body = request.POST.get('body', False)\n photo = request.FILES.get('photo', False) # FOR STORAGE\n wfsxml = request.POST.get('wfsxml', False) # FOR GEOSERVER\n data = {\n 'uuid': uuid,\n 'title_text': title_text,\n 'body': body,\n 'wfsxml': wfsxml\n }\n # VALIDATE FORM\n form = GeoPostForm(data, request.FILES)\n logger.info(\"\\ninstantiate Geopost form\\n\")\n # IF FORM VALIDATION ERROR\n if not form.is_valid():\n return server_error(request.body)\n #context = self.getContext(form)\n #return render(request, 'geopost/entry.html', context)\n else:\n pass\n # GET CLEAN VALUES\n uuid = form.cleaned_data['uuid']\n wfsxml = form.cleaned_data['wfsxml']\n # UPLOAD PHOTO TO BUCKET\n # if editing existing entry, first delete existing photo\n if fid:\n delete_from_bucket(uuid, self.imageBucket)\n else:\n pass\n photo.open('rb')\n error = upload_to_bucket(\n photo, self.imageBucket, photo.content_type, uuid)\n photo.close()\n # IF ERROR UPLOADING IMAGE\n if error:\n return server_error(error)\n else:\n pass\n # MAKE GEOSERVER WFS TRANSACTION\n error = post_to_geoserver(wfsxml, self.wfsURL)\n # ALL GOOD\n if not error:\n return HttpResponseRedirect(reverse('geopost_home'))\n # IF WFS TRANSACTION ERROR\n else:\n delete_from_bucket(uuid, self.imageBucket)\n return server_error(error)", "def image_list(request):\n return render_to_response('wainz/image_list.html', {\"images_and_votes\": ordered_images(0, 30, request.user)}, context_instance = RequestContext(request))", "def gallery(request):\n\n gallery_images = GalleryImages.objects.all()\n gallery_images = gallery_images.order_by(\"-updated_at\")\n paginator = Paginator(gallery_images, 9) # Show 9 images per page.\n page_number = request.GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n\n context = {\"gallery_images\": gallery_images, \"page_obj\": page_obj}\n return render(request, \"gallery/gallery.html\", context)", "def get_latest_photos(self, count = 30, page = 1):\n uri = 'photos/latest'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def actorphotos(request , actor_id):\n try:\n actors = ActorPhotos.objects.filter(actor_id=actor_id)\n except ActorPhotos.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = ActorPhotosSerializer(actors , many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = ActorPhotosSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def browse(request):\n klass = media_klass_from_request(request)\n filter_args, filter_kwargs = filter_args_from_request(request)\n press_gallery = request.GET.get('press_gallery', '')\n press_gallery = press_gallery.lower() == 'true'\n if press_gallery:\n if not request.user.has_perm('gallery.can_see_press_gallery'):\n raise PermissionDenied\n # for now we only allow photos in the press gallery, not\n # artifacts or any other types. it's okay if the search query\n # doesn't specify a type, or if it specifies the photo type,\n # but we disallow other types of searches. it's a little ugly\n # to be doing explicit type checking, but expedience demands\n # it :P\n if klass is models.MediaBase:\n klass = models.Photo\n if klass is not models.Photo:\n return handler400(request)\n filter_kwargs['in_press_gallery'] = True\n status = request.GET.get('status')\n if not status:\n filter_kwargs['status'] = 'approved'\n elif status != 'approved':\n if not request.user.has_perm('gallery.can_review'):\n # only reviewers can arbitrarily query based on status;\n # non-reviewers are restricted to seeing their own\n # submissions if they search on a status other than\n # 'approved'\n if request.user.is_anonymous():\n raise PermissionDenied\n filter_kwargs['owner'] = request.user\n filter_kwargs['status'] = status\n else:\n filter_kwargs['status'] = status\n\n full_results = klass.objects.filter(*filter_args, **filter_kwargs)\n # reverse the order so most recent is first\n full_results = full_results.order_by('id').reverse()\n tag = request.GET.get('tag')\n if tag:\n full_results = tagmodels.TaggedItem.objects.get_by_model(full_results,\n tag)\n text = request.GET.get('text')\n if text:\n full_results = apply_searchable_text_filter(full_results, text)\n paginator = BetterPaginator(full_results, settings.PAGINATION_BATCH_SIZE)\n try:\n page = int(request.GET.get('page', '1'))\n except ValueError:\n page = 1\n try:\n page_results = paginator.page(page)\n except (InvalidPage, EmptyPage):\n page_results = paginator.page(paginator.num_pages)\n query_string = request.META.get('QUERY_STRING')\n # remove 'page' from query string so it doesn't get used in the template\n query_map = parse_qs(query_string)\n query_map.pop('page', None)\n query_string = urlencode(query_map, doseq=True)\n for extra_filter in ('owner', 'tag', 'press_gallery'):\n if extra_filter in query_map:\n del(query_map[extra_filter])\n qs_no_extra_filters = urlencode(query_map, doseq=True)\n no_extra_filters_url = reverse('bm.gallery.views.browse')\n if qs_no_extra_filters:\n no_extra_filters_url = '%s?%s' % (no_extra_filters_url,\n qs_no_extra_filters)\n template_map = {'page_results': page_results,\n 'total_count': full_results.count(),\n 'query_string': query_string,\n 'no_extra_filters_url': no_extra_filters_url,\n 'paginator': paginator.get_context(page),\n 'page': page}\n owner_username = request.GET.get('owner')\n if owner_username:\n template_map['owner'] = authmodels.User.objects.get(username=owner_username)\n if tag:\n template_map['tag'] = tag\n if press_gallery:\n template_map['press_gallery'] = True\n context = RequestContext(request, template_map)\n return render_to_response('gallery/browse.html', context)", "def maps(request):\n #convert image locations to google maps parsable points\n now = datetime.utcnow().replace(tzinfo=utc)\n latlngs = search_utils.filter_date(search_utils.min_date, now)\n points = [search_utils.to_map_point(image) for image in latlngs]\n #load the search form sidebar\n t = loader.get_template(\"wainz/search_form.html\")\n ctx = Context({})\n search_form = t.render(ctx)\n\n return render_to_response('wainz/maps.html', {\"latLngs\":points, \"search_form\":search_form, \"typeAheadTags\":Tag.objects.all()}, context_instance = RequestContext(request))", "def render_to_response(self, context, **response_kwargs):\n if 'json' in self.request.GET.get('format', ''):\n s = serializers.serialize('json', context.get('photos'))\n return HttpResponse(s, content_type=\"application/json\")\n\n # Business as usual otherwise\n else:\n return super(PropertyPhotosView, self).render_to_response(context, **response_kwargs)", "def get_photos_url(self, page_number=1):\n return \"{}{}?client_id={}&per_page={}&page={}\".format(\n self.base_url, self.home_url, self.client_id, self.limit, page_number\n )", "def get_photos_by_hashtag(self, hashtag, count = 30, page = 1):\n uri = 'hashtags/' + hashtag + '/photos'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def object_get(self, request):\n _view = _object_view(self, request)\n queried = ObjectPostings(self.cdb_object_id, request.params.mixed()).query()\n postings = [request.view(obj) for obj in queried[0]]\n\n _view.update({\n \"postings\": postings,\n \"result_complete\": queried[1]\n })\n return _view", "def random_photos(self):\n return self.waypoints.filter(photo_id__isnull=False).order_by('?')[:10]", "def images(request, name):\n return render(request, 'images.html', {\"images\": get_images(name)})", "def testimage_handler(self):\n\t\t\n\t\tthings = Thing.objects.all()\n\t\tif len( things ):\n\t\t\tthing = things[0]\n\t\telse:\n\t\t\tc = Client()\n\t\t\tdata = parse_qs( 'title=&tags=&lattitude=32.82248&longitude=-96.762986&duration=&parent=&privacy=U&lifespan=&format=txt' )\n\t\t\tdata[ 'media' ] = open( MEDIA_ROOT + 'unittest_image.jpg' )\n\t\t\tc.post( '/api/place/', data )\n\t\t\t\n\t\t\tthing = Thing.objects.all()[0]\n\n\t\t\n\t\turi = thing.media.replace( 'http://' + DOMAIN, '' )\n\t\t\n\t\tc = Client()\n\t\tresponse = c.get( uri )\n\t\tself.failUnlessEqual(response.status_code, 200)", "def index(request):\n photos = Image.objects.all().order_by('-created')\n no_of_photos = Image.objects.all().count()\n context = {\"photos\": photos, 'no_of_photos': no_of_photos}\n return render(request, 'photo/index.html', context=context)", "def images(self, **kwargs):\n\n raise NotImplementedError", "def download_instagram_photos( venue_id, start_timestamp, end_timestamp, client = default_client()):\n instagram_id = client.location_search(foursquare_v2_id = venue_id)\n instagram_id = instagram_id[0].id\n print 'instagram id ', instagram_id\n gen = client.location_recent_media(count = 200, location_id = instagram_id, max_pages=500, min_timestamp=start_timestamp, max_timestamp=end_timestamp, as_generator = True)\n for page in gen:\n yield (page[0], venue_id, instagram_id)", "def image_api():\n PAGE_SIZE=50\n page = int(request.args.get('page', 0))\n print page\n userid = current_user.id\n out= []\n query = db_session.query(Image, Batch.status).\\\n outerjoin(Batch, Image.batch==Batch.batch_id).\\\n filter(Image.user==userid)\n\n count = query.count()\n for row in query.limit(PAGE_SIZE).offset(page*PAGE_SIZE):\n out.append({\n \"url\": url_for('image_views.raw_image', image_path=row.Image.path),\n \"page\": url_for('image_views.view_image', image_id=row.Image.id),\n \"title\": row.Image.title,\n \"status\": row.status\n })\n\n return jsonify({\"images\": out, \"count\": count})", "def get_album_photos_json():\n args = request.args.to_dict()\n # print(args)\n if request.method == 'GET':\n if len(args) > 0:\n\n if 'limit' not in args.keys():\n args['limit'] = 20\n\n if 'offset' not in args.keys():\n args['offset'] = 0\n\n album_data = a.get_album_photos_in_range(\n args['album_id'],\n args['limit'],\n args['offset']\n )\n json_data = album_data\n return jsonify(json_data)\n\n if request.method == 'POST':\n # print('test', request.get_json())\n data = request.get_json()\n a.remove_photos_from_album(data['albumId'], data['photos'])\n return redirect(\"/albums/{}\".format(data['albumId']), code=302)", "def test_get_photos(self):\n recipe = Recipes.objects.create(chef=self.user, draft=False, private=False)\n photo = Photos.objects.create(recipe=recipe, photo_order=1)\n\n url = '/0/chefs/%i/photos' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('photos', resp.data)\n self.assertEqual(1, len(resp.data['photos']))\n keys = ('edit_date', 'creation_date', 'id', u'temperature', 'url', 'recipe', 'cover',\n 'time', 'instructions', 'order', 'quantity')\n self.assertEqual(set(keys), set(resp.data['photos'][0].keys()))", "def test_users_photos_view_set_get_own_photos(self):\n # Create user and data\n user = account_models.User.objects.create_user(email='[email protected]', password='pass', username='aov_hov')\n category = photo_models.PhotoClassification.objects\\\n .create_or_update(name='Test', classification_type='category')\n\n photo1 = photo_models.Photo(image=Photo(open('apps/common/test/data/photos/photo1-min.jpg', 'rb')), user=user)\n photo1.save()\n photo1.category.set([category])\n photo1.save()\n\n photo2 = photo_models.Photo(image=Photo(open('apps/common/test/data/photos/photo2-min.jpg', 'rb')), user=user)\n photo2.save()\n photo2.category.set([category])\n photo2.save()\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/users/{}/photos'.format(user.id), format='json')\n result = request.data['results']\n\n self.assertEquals(request.status_code, 200)\n self.assertEquals(len(result), 2)\n self.assertEquals(result[0]['id'], photo2.id) # newest first\n self.assertIn('dimensions', result[0])\n self.assertIn('image_blurred', result[0])\n self.assertIn('image', result[0])", "def getimage(self):", "def photos(self, photos):\n\n self._photos = photos", "def get(self):\n\n self.render_posts()", "def get(owner_id=None, album_id=None, photo_ids=None, rev=None, extended=None,\\\n feed_type=None, feed=None, photo_sizes=None, offset=None, count=None):\n params = {\n 'owner_id': owner_id,\n 'album_id': album_id,\n 'photo_ids': photo_ids,\n 'rev': rev,\n 'extended': extended,\n 'feed_type': feed_type,\n 'feed': feed,\n 'photo_sizes': photo_sizes,\n 'offset': offset,\n 'count': count\n }\n result = call('photos.get', **params)\n return parse_response(result)", "def picture_list(request):\n paginator = Paginator(Picture.objects.all(), 9)\n page = request.GET.get('page') or 1\n\n picture_page = paginator.get_page(page)\n\n data = {\n \"page\": picture_page.number,\n \"pageCount\": picture_page.paginator.num_pages,\n \"data\": PictureListSerializer(picture_page, many=True).data\n }\n\n return JsonResponse(data, status=200)", "def get_image(self):\r\n posts_with_images = self.post_set.filter(image__gt='')\r\n if posts_with_images:\r\n return posts_with_images[0].image", "def get_context_data(self, **kwargs):\n context = super(PhotoList, self).get_context_data()\n return context", "def get_image(request):\n collected_values = {}\n\n # Only allow GET requests for this endpoint\n if request.method != 'GET':\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Wrong HTTP verb\"\n return JsonResponse(collected_values, status=400)\n\n image_type = request.GET['image_type']\n image_index = request.GET['image_index']\n\n # Check the DB for an image with the same image_type and id\n images = Images.objects.filter(image_type=image_type, image_index=image_index)\n if not images:\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Image doesn't exist\"\n return JsonResponse(collected_values, status=400)\n\n collected_values[\"image_index\"] = images[0].image_index\n collected_values[\"image_id\"] = images[0].iid\n collected_values[\"image_type\"] = images[0].image_type\n collected_values[\"image_category\"] = images[0].image_category\n collected_values[\"link\"] = images[0].link\n collected_values[\"message\"] = images[0].message\n collected_values[\"success\"] = True\n\n LOGGER.info(\"Get Image Result: %s\", collected_values)\n return JsonResponse(collected_values, status=200)", "def get_images(self, ctx, page):\n is_imgur = 'source' in page.meta and page.meta['source'] == 'imgur'\n if 'type' in page.meta and page.meta['type'] == 'album':\n album = page.meta\n images = []\n if is_imgur:\n pp.pprint(page.meta)\n # bind to template via json\n images = self.get_imgur_album_images(page)\n self.albums[album['slug']] = images\n else:\n # get paths of all of the images in the album\n srcs = []\n # get absolute paths of images in album for each file type\n for file_type in FILE_TYPES:\n imgs = glob.glob(\n GALLERY_DIR + album['slug'] + '/*.' + file_type\n )\n\n for img in imgs:\n img_rel_path = (\n REL_GALLERY_DIR +\n album['slug'] + '/' + img.split('/')[-1]\n )\n srcs.append(img_rel_path)\n\n # split full srcs and thumb srcs from srcs into two lists\n images = []\n thumb_srcs = filter(\n lambda src: src.split('/')[-1].startswith(THUMB_PREFIX),\n srcs\n )\n for thumb_src in thumb_srcs:\n src = thumb_src.replace(THUMB_PREFIX, '')\n thumb_width, thumb_height = self.calc_img_hw(thumb_src)\n width, height = self.calc_img_hw(src)\n images.append({\n 'thumb_src': thumb_src,\n 'thumb_width': thumb_width,\n 'thumb_height': thumb_height,\n\n 'src': src,\n 'width': width,\n 'height': height,\n })\n self.albums[album['slug']] = images", "def photoset_list(request, queryset=None, **kwargs):\n if queryset is None:\n queryset = Photoset.objects.all()\n \n if 'queryset' in kwargs:\n del kwargs['queryset']\n \n if 'template_name' not in kwargs:\n kwargs['template_name'] = 'flickrsets/photoset/list.html'\n \n if 'template_object_name' not in kwargs:\n kwargs['template_object_name'] = 'photoset'\n \n if 'paginate_by' not in kwargs:\n kwargs['paginate_by'] = getattr(\n app_settings,\n 'PHOTOSET_LIST_VIEW_PAGINATE_BY')\n \n return list_detail.object_list(request, queryset, **kwargs)", "def post_list(request):\n\n if request.method == 'GET':\n posts = Post.objects.all()\n serializer = PostSerializer(posts, many=True)\n return Response(serializer.data)\n elif request.method == 'POST':\n context = {}\n uploaded_file = request.FILES['file']\n fs = FileSystemStorage()\n name = fs.save(uploaded_file.name, uploaded_file)\n context['url'] = fs.url(name)\n request.data['file'] = context['url']\n request.data['user_id'] = 1\n # request.data['category_id'] = 1\n serializer = PostSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def get_flickr_photos(size=\"big\", tags='endangered species, butterfly'):\n # Get the ID of the photos and load it in the output var\n print('Contacting Flickr for photos')\n url = \"https://api.flickr.com/services/rest/\"\n parameters = {\n 'method': 'flickr.photos.search',\n 'api_key': flickr.API_KEY,\n 'license': '1',\n 'format': 'json',\n 'tags': tags,\n 'nojsoncallback': 1}\n\n query = url + \"?\" + urllib.urlencode(parameters)\n print query\n urlobj = urllib2.urlopen(query)\n data = urlobj.read()\n print data\n urlobj.close()\n # The returned JSON object by Flickr is not correctly escaped,\n # so we have to fix it see\n # http://goo.gl/A9VNo\n regex = re.compile(r'\\\\(?![/u\"])')\n fixed = regex.sub(r\"\\\\\\\\\", data)\n output = json.loads(fixed)\n print('Data retrieved from Flickr')\n\n # For each photo ID create its direct URL according to its size:\n # big, medium, small (or thumbnail) + Flickr page hosting the photo\n photos = []\n url = 'https://api.flickr.com/services/rest'\n for photo in output['photos']['photo']:\n # Get photo info\n parameters = {\n 'method': 'flickr.photos.getInfo',\n 'api_key': flickr.API_KEY,\n 'photo_id': photo['id'],\n 'secret': photo['secret'],\n 'format': 'json',\n 'nojsoncallback': 1\n }\n query = url + \"?\" + urllib.urlencode(parameters)\n print query\n urlobj = urllib2.urlopen(query)\n data = urlobj.read()\n #print data\n urlobj.close()\n photo_data = json.loads(data)\n\n imgUrl_m = \"https://farm%s.staticflickr.com/%s/%s_%s_m.jpg\" % (photo['farm'], photo['server'], photo['id'], photo['secret'])\n imgUrl_b = \"https://farm%s.staticflickr.com/%s/%s_%s_b.jpg\" % (photo['farm'], photo['server'], photo['id'], photo['secret'])\n photos.append({'url_m': imgUrl_m,\n 'url_b': imgUrl_b,\n 'photo_info': photo_data})\n return photos", "def image(request, image_id):\n\n image = get_object_or_404(GalleryImages, pk=image_id)\n\n context = {\n \"image\": image,\n }\n return render(request, \"gallery/image.html\", context)", "async def gallery_post(self, num: int) -> GalleryPost:\n return GalleryPost(**await self.get(f\"/gallery/view/{num}\"))", "def get(self, request):\n context = self.getContext(GeoPostForm())\n return render(request, 'geopost/entry.html', context)", "def fetch_photos(n):\n\n # This is the list we will use the pass back the photo information.\n data = []\n\n # First, we search for photos taken in Manchester.\n response = requests.get(f'https://api.flickr.com/services/rest/?method=flickr.photos.search&api_key={FLICKR_API_KEY}&lat=53.48&lon=-2.23&radius=10&radius_units=km&format=json&nojsoncallback=1')\n\n # Now loop through the photos.\n for photo in sample(response.json()['photos']['photo'], n):\n\n # We will search with the photo ID.\n id = photo['id']\n\n # Get the photo details. We can get the URL to the photo from here.\n response = requests.get(f'https://api.flickr.com/services/rest/?method=flickr.photos.getSizes&api_key={FLICKR_API_KEY}&photo_id={id}&format=json&nojsoncallback=1')\n\n # Extract the photo URL from the response.\n url = response.json()['sizes']['size'][-1]['source']\n\n # Store our photo ID and URL.\n data.append({\n 'title': photo['title'],\n 'id': photo['id'],\n 'url': url,\n })\n\n # Send back our list of photos.\n return data", "def test_users_photos_view_set_get_no_photos(self):\n # Create user\n user = account_models.User.objects.create_user(email='[email protected]', password='pass', username='aov_hov')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/users/{}/photos'.format(user.id), format='json')\n result = request.data['results']\n\n self.assertEquals(request.status_code, 200)\n self.assertEquals(len(result), 0)", "def upload2(request):\n uploaded = request.read\n fileSize = int(uploaded.im_self.META[\"CONTENT_LENGTH\"])\n fileName = uploaded.im_self.META[\"HTTP_X_FILE_NAME\"] \n fileContent = uploaded(fileSize)\n \n \"\"\"Write image to disk.\"\"\"\n fn, ext = os.path.splitext(fileName)\n name = fn + timezone.now().strftime(\"%Y_%m_%d_%H_%M_%S_%f\") + base64.urlsafe_b64encode(os.urandom(settings.SALT_LENGHT)) + ext\n fileHandler = open(settings.MEDIA_ROOT + \"images/\" + name, \"wb\")\n fileHandler.write(fileContent)\n fileHandler.close()\n \n \"\"\"Create md5hash digest for image.\"\"\"\n base64string = base64.b64encode(fileContent)\n mdfive = md5.new(base64string).hexdigest()\n \n \"\"\"Write image data to db.\"\"\"\n latitude = request.GET.get('lat')\n longitude = request.GET.get('lon')\n tags = request.GET.get('tags').split(' ')\n\n image = Image(title = name, md5hash = mdfive, pub_date = timezone.now(), lat = latitude, lon = longitude)\n image.save()\n\n for tagtext in tags:\n if Tag.objects.filter(name=tagtext).exists():\n t = Tag.objects.get(name=tagtext)\n else:\n t = Tag(name = tagtext)\n t.save()\n image.tags.add(t)\n image.save()\n\n return HttpResponse('{\"success\": true}')", "def fetchGeoData():\n if request.method ==\"POST\":\n result = {}\n if request.get_json():\n post_requests = request.get_json()\n print(post_requests)\n result = db.getmapdata(post_requests['attr']) \n return result", "def get_photos(self, user_id):\n\n json_photos = self._receive_photos_from_vk(user_id)\n return self._parse_photos(json_photos)", "def get_a_national_geographic_archive_wallpaper_remote():\r\n\r\n logging.debug('get_a_national_geographic_archive_wallpaper_remote()')\r\n\r\n # get image url\r\n if use_proxy:\r\n response = requests.get(\"https://www.nationalgeographic.com/photography/photo-of-the-day/\", proxies=proxies, timeout=5, verify=False)\r\n match = re.search('.*\\\"endpoint\\\":\\\"([^\\\"]*gallery\\.json)\\\".*', response.text)\r\n gallery_json = match.group(1)\r\n response = requests.get(gallery_json, proxies=proxies, timeout=5, verify=False)\r\n else:\r\n response = requests.get(\"https://www.nationalgeographic.com/photography/photo-of-the-day/\")\r\n match = re.search('.*\\\"endpoint\\\":\\\"([^\\\"]*gallery\\.json)\\\".*', response.text)\r\n gallery_json = match.group(1)\r\n response = requests.get(gallery_json)\r\n image_data = json.loads(response.text)\r\n for i in range(0, len(image_data[\"items\"])):\r\n full_image_url = image_data[\"items\"][i][\"image\"][\"uri\"]\r\n \r\n # image's name\r\n image_name = get_generated_image_name(full_image_url)\r\n \r\n # Check and maintain DB\r\n if not exists_image_in_database(full_image_url) and i+1 < len(image_data[\"items\"]):\r\n add_image_to_database(full_image_url, image_name, \"nationalarchive\")\r\n # download and save image\r\n full_image_path = download_image(full_image_url, image_name)\r\n update_image_in_database(full_image_url, full_image_path)\r\n\r\n # Return full path to image\r\n logging.debug('get_a_national_geographic_archive_wallpaper_remote - full_image_path = {}'.format(full_image_path))\r\n return full_image_path\r\n elif i+1 == len(image_data[\"items\"]):\r\n full_image_path = get_image_path_from_database(full_image_url)\r\n\r\n # Return full path to image\r\n logging.debug('get_a_national_geographic_archive_wallpaper_remote - full_image_path = {}'.format(full_image_path))\r\n return full_image_path", "def get_context_data(self, **kwargs):\n user = ImagerProfile.objects.get(user__username=self.request.user.username)\n # import pdb;\n context = super(ProfileView, self).get_context_data(**kwargs)\n photos = self.request.user.photos.all()\n ph_public = len(photos.filter(published=\"Public\"))\n ph_private = len(photos.filter(published=\"Private\"))\n albums = self.request.user.albums.all()\n al_public = len(albums.filter(published=\"Public\"))\n al_private = len(albums.filter(published=\"Private\"))\n context = {'user': user, 'ph_public': ph_public, 'ph_private': ph_private,\n 'al_public': al_public, 'al_private': al_private}\n return context", "def get(self, request):\n context = self.getContext(GeoPostForm())\n return render(request, 'geopost/home.html', context)", "def home(request):\n current_user = request.user\n\n # return_list = []\n # for image in all_images:\n # return_list.append((image, image.image_likes.filter(profile_owner=request.user)))\n\n return render(request,'main_templates/landing.html',{'user':current_user})", "def list_images(self):\n raise NotImplementedError()", "def image(request, img_id):\n image = Image.objects.get(pk=img_id)\n if request.user.is_staff or image.is_approved:\n comments = ImageComment.objects.filter(image_id=img_id).order_by('-submission_date')\n comments_and_votes = Vote.objects.get_weighted_scores_in_bulk(comments, request.user)\n\n ctx = {\"img\":image,\n \"comments_and_votes\":comments_and_votes,\n \"image_tags\":image.tags.all(),\n \"all_tags\":Tag.objects.all(),\n \"site\":get_current_site(request)\n }\n return render_to_response('wainz/image.html', ctx , context_instance = RequestContext(request))\n else:\n return HttpResponseRedirect(reverse('wainz.views.composite'))", "def GetPicturesForAll(self, limit = -1, since = -1):\n\n if (limit < 1):\n limit = self.limit\n\n url = self.__BuildGetUrl(\"pictures\", \"\", limit, since)\n return self.__GetJson(url, False)", "def sharing_get(self, request):\n _view = _object_view(self, request)\n queried = SharingCollection(request.params.mixed()).query()\n objs = [request.view(obj) for obj in queried[0]]\n _view.update({\n \"postings\": objs,\n \"result_complete\": queried[1]\n })\n return _view", "def get(self, query=None):\n\n if not 'job_id' in query and not 'url' in query:\n return http_error('400 Bad Request')\n\n if int == type(query['job_id']):\n images = self.images_model.get_by_job_id(query['job_id'])\n else:\n images = self.images_model.get_by_url(query['url'])\n\n result_view = view('result.json', {'images': json.dumps(images)})\n return responder(result_view, 'application/json')", "def photos():\n cwd = os.getcwd()\n db_path = os.path.join(cwd, CLI_PHOTOS_DB)\n return PhotosDB(db_path).photos(intrash=True)", "def get(self, request, *args, **kwargs):\n\n liked_photos = None\n l_squarefollowings_count = None\n l_best_media = None\n\n # Common for all members views ===================================================\n l_categories = Category.objects.all()\n l_attributes = Attribute.objects.all()\n try:\n logged_member = Member.objects.get(django_user__username=request.user)\n show_describe_button = logged_member.is_editor(request)\n is_monthly_member = logged_member.is_monthly_member()\n is_yearly_member = logged_member.is_yearly_member()\n except ObjectDoesNotExist:\n logged_member = None\n except:\n raise HttpResponseNotFound\n\n l_squarefollowings_count = SquareFollowing.objects.filter(member_id2=logged_member).count()\n if l_squarefollowings_count >= MIN_SQUAREFOLLOWINGS:\n\n # END Common for all members views ===============================================\n l_squarefollowing_queryset = SquareFollowing.objects.all()\n\n l_token = logged_member.get_member_token(request)\n instagram_session = InstagramSession(p_is_admin=False, p_token=l_token['access_token'])\n instagram_session.init_instagram_API()\n\n l_smart_feed_helper = SmartFeedHelper(\n p_feed_owner_instagram_id=logged_member.instagram_user_id,\n p_instagram_session=instagram_session,\n p_batch_size=SMART_FEED_BATCH_SIZE,\n p_min_id=logged_member.smartfeed_last_seen_instagram_photo_id\n )\n l_best_media = l_smart_feed_helper.find_best_media(\n p_media_to_return=SMART_FEED_BATCH_SIZE,\n p_starting_media_id=None,\n p_logged_member=logged_member,\n p_max_days=30\n )\n\n liked_photos = []\n for x_media in l_best_media:\n my_likes = MyLikes(request.user.username, x_media.id, instagram_session )\n has_user_liked_media, no_of_likes = my_likes.has_user_liked_media()\n if has_user_liked_media:\n liked_photos.extend([x_media.id])\n\n\n # Limit calculation --------------------------------------------------------------\n logged_member.refresh_api_limits(request)\n x_ratelimit_remaining, x_ratelimit = logged_member.get_api_limits()\n\n x_ratelimit_used = x_ratelimit - x_ratelimit_remaining\n if x_ratelimit != 0:\n x_limit_pct = (x_ratelimit_used / x_ratelimit) * 100\n else:\n x_limit_pct = 100\n # END Limit calculation ----------------------------------------------------------\n\n return render(request,\n self.template_name,\n dict(\n best_media=l_best_media,\n liked_photos=liked_photos,\n squarefollowings_count=l_squarefollowings_count,\n new_friends_interaction=0,\n\n is_monthly_member=is_monthly_member,\n is_yearly_member=is_yearly_member,\n logged_member=logged_member,\n x_ratelimit_remaining=x_ratelimit_remaining,\n x_ratelimit=x_ratelimit,\n x_limit_pct=x_limit_pct,\n categories=l_categories,\n attributes=l_attributes,\n )\n )", "def get_photos_by_category(self, category_id, count = 30, page = 1):\n uri = 'categories/' + category_id + '/photos'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def detail(self, req):\n params = {\n 'filters': self._get_filters(req),\n 'limit': self._get_limit(req),\n }\n\n if 'marker' in req.str_params:\n params['marker'] = self._get_marker(req)\n\n images = db_api.image_get_all_public(None, **params)\n\n image_dicts = [make_image_dict(i) for i in images]\n return dict(images=image_dicts)", "def list_all_size(self, path=None):\n user_id = path or self.user_id\n if not user_id:\n raise ValueError(\"You must either specify a user ID at \"\n \"storage instanciation or at \"\n \"list_image_and_thumb launching.\")\n url_types = ['url_o', 'url_s', 'url_q', 'url_t', 'url_l', 'url_m', 'url_n', 'url_z', 'url_c']\n params = {\n 'method': 'flickr.people.getPublicPhotos',\n 'user_id': user_id,\n 'extras': ','.join(url_types)\n }\n response = self.oauth_session.get(self.API_ENDPOINT, params=params)\n json_response = response.json()\n if json_response['stat'] == 'fail':\n raise FlickrError(json_response['message'])\n urls = [pho for pho in json_response['photos']['photo']]\n return urls", "def getAll(owner_id=None, extended=None, offset=None, count=None, photo_sizes=None,\\\n no_service_albums=None, need_hidden=None, skip_hidden=None):\n params = {\n 'owner_id': owner_id,\n 'extended': extended,\n 'offset': offset,\n 'count': count,\n 'photo_sizes': photo_sizes,\n 'no_service_albums': no_service_albums,\n 'need_hidden': need_hidden,\n 'skip_hidden': skip_hidden\n }\n result = call('photos.getAll', **params)\n return parse_response(result)", "def _build_photos_url(self, photo):\n extension = '.jpg'\n base_url = 'https://farm%s.staticflickr.com/%s/%s_%s' % (\n photo['farm'],\n photo['server'],\n photo['id'],\n photo['secret']\n )\n\n return {\n 'thumbnail': base_url + '_t' + extension,\n 'original': base_url + '_b' + extension\n }", "def recipes_list(request):\n recipes = Recipe.objects.all().order_by('published_date')\n\n # prawidlowy sposob zbierania URLa - object.image.url\n # recipe = recipes[0]\n # print(\"path: \" + recipe.image.url)\n\n paginator = Paginator(recipes, INITIAL_PAGE_SIZE)\n page = paginator.page(1)\n\n context = {\n 'page': page,\n 'display_likes': True,\n }\n\n return render(request, 'recipes_index.html', context)", "def explore_popular_search(request):\n if request.method == \"POST\":\n token = request.data.get('token')\n post_id = request.data.get('post_id')\n type_ = request.data.get('type')\n keyword = request.data.get('keyword')\n radius = int(request.data.get('radius'))\n\n if Token.objects.filter(key=token).exists():\n token = get_object_or_404(Token, key=token)\n up = UserProfile.objects.get(user_id=token.user_id)\n user_ids = []\n\n for u_p in UserProfile.objects.all():\n try:\n if radius < 0 or get_straight_distance(up.latitude, up.longitude, u_p.latitude, u_p.longitude) < radius:\n user_ids.append(u_p.user_id)\n except Exception, e:\n pass\n\n posts_ids = PostHashtag.objects.filter(hashtag__contains=keyword). \\\n values_list(\"post_id\", flat=True)\n posts = Post.objects.filter(Q(text__contains=keyword) | Q(pk__in=posts_ids)).filter(author_id__in=user_ids)\n\n posts = list(posts.order_by(\"-count_likes\", \"-id\"))\n if post_id == -1:\n posts = posts[:PAGE_OFFSET]\n for i in range(len(posts)):\n if posts[i].id == post_id:\n if type_ == 'old':\n posts = posts[i+1:i+PAGE_OFFSET+1]\n else:\n posts = posts[max(0, i-PAGE_OFFSET):i]\n break\n\n serializer = PostSerializer(posts, context={'user_id': token.user_id}, many=True)\n return Response({\"success\": 63,\n \"posts\": serializer.data})\n else:\n return Response({\"error\": 17})", "def add_photo(self):\n scroll_to_top()\n click_imageview_by_id('photo')\n # choose photo from gallery\n click_textview_by_index(0)\n camera.get_picture_by_camera()\n sleep(6)\n activityName = get_activity_name()\n if activityName == 'com.android.gallery3d.app.CropImage':\n click_textview_by_id('save')\n sleep(5)\n scroll_to_bottom()\n scroll_to_top()\n\n return", "def get_photos_by_tag(self, tag_name):\n # q_data = None\n\n query_string = '''\n select photo_id, photo_title, views, tag_name, large_square from photo\n join photo_tag using(photo_id)\n join images using(photo_id)\n where tag_name = \"{}\"\n order by views desc\n '''.format(tag_name)\n\n tag_data = self.db.get_query_as_list(query_string)\n\n # print(tag_data)\n\n rtn_dict = {\n 'tag_info': {'number_of_photos': self.get_photo_count_by_tag(tag_name)}\n }\n\n count = 0\n for t in tag_data:\n rtn_dict[count] = t\n rtn_dict[count]['human_readable_tag'] = name_util.make_decoded(\n rtn_dict[count]['tag_name'])\n count += 1\n\n return rtn_dict", "def search(request):\n user = request.user\n request_data = json.loads(request.body)\n if request_data.get('latitude') and request_data.get('longitude'):\n print('getting type')\n print (type(request_data.get('latitude')))\n print (type(request_data.get('longitude')))\n if not hasattr(user, 'hugposting'):\n # Create hug posting\n posting = HugPosting(latitude=request_data.get('latitude'), longitude=request_data.get('longitude'),\n user_origin=user)\n else:\n posting = user.hugposting\n posting.latitude = request_data.get('latitude')\n posting.longitude = request_data.get('longitude')\n posting.save()\n if not posting.paired:\n total_posts = {}\n for hposts in HugPosting.objects.all():\n if not (hposts == user.hugposting):\n if not hposts.paired:\n distance = (posting.latitude - float(hposts.latitude))**2 + (posting.longitude - float(hposts.longitude))**2\n total_posts[distance] = hposts\n if total_posts:\n # Got a pair, return coordinate\n posting.pair = total_posts[min(total_posts)]\n posting.save()\n if not posting.paired:\n # Show wait thing\n response_data = {'status': 'wait'}\n return Response(data=response_data, status=status.HTTP_200_OK)\n else:\n # Getting midpoint\n # Compute path from 1 to 2# Define the path from 1 to 2\n l = Geodesic.WGS84.InverseLine(posting.paired.latitude, posting.paired.longitude,\n posting.latitude, posting.longitude)\n m = l.Position(0.5 * l.s13)\n # lat = (posting.paired.latitude + posting.latitude)/2\n # lng = (posting.paired.longitude + posting.longitude)/2\n response_data = {'status': 'success',\n 'latitude': m['lat2'],\n 'longitude': m['lon2'],\n }\n return Response(data=response_data, status=status.HTTP_200_OK)", "def get_queryset(self):\n queryset = ArticleImage.objects.all()\n id = self.request.query_params.get('artile_id', None)\n if id is not None:\n queryset = queryset.filter(artile_id=id)\n return queryset", "def get_many_photos(mesh, movement, resolution, cmap, plotter, camera, title=None, title_location=\"upper_edge\",\n background_photos=None, background_scale=1, title_color=\"black\", cam_noise_lambda=None):\n to_return = np.zeros(shape=(len(camera), resolution[1], resolution[0], 4))\n num_of_mesh = len(mesh)\n if background_photos:\n plotter.add_background_image(random.choice(background_photos), scale=background_scale)\n if cam_noise_lambda:\n cam_noise = np.zeros((len(camera), 3, 3))\n cam_noise[:,0] += np.random.normal(0, cam_noise_lambda[0], (len(camera), 3))\n cam_noise[:,1] += np.random.normal(0, cam_noise_lambda[1], (len(camera), 3))\n cam_noise[:,2] += np.random.normal(0, cam_noise_lambda[2], (len(camera), 3))\n camera = np.array(camera) + cam_noise\n\n if num_of_mesh == 1:\n mesh = [mesh]\n for i in range(num_of_mesh):\n if not mesh[i].texture:\n plotter.add_mesh(mesh[i].pv_mesh, cmap=cmap,\n name='get_photo_' + str(i))\n else:\n plotter.add_mesh(mesh[i].pv_mesh, texture=mesh[i].texture, name='get_photo_mesh_' + str(i))\n plotter.update_coordinates(movement[i], mesh=mesh[i].pv_mesh)\n if title:\n plotter.add_text(title, position=title_location, font_size=10, color=title_color, name=\"title\", shadow=True)\n plotter.set_background(color=\"white\")\n plotter.show(auto_close=False, window_size=resolution)\n for idx, cam in enumerate(camera):\n plotter.set_position(cam[0])\n plotter.set_focus(cam[1])\n plotter.set_viewup(cam[2])\n depth = plotter.get_image_depth(fill_value=None)\n depth = np.abs(depth)\n screen = plotter.screenshot(window_size=resolution)\n screen = screen / 255\n to_return[idx] = np.append(screen, depth.reshape(resolution[1], resolution[0], 1), axis=-1)\n if background_photos:\n plotter.remove_background_image()\n return np.asarray(to_return, np.float32)", "def get(self, request, *args, **kwargs):\n my_normal_post_lists = NormalPosts.objects.filter(uploded_by=request.user.normalprofile).order_by(\"-id\")\n return render(request, self.template_name, {\n 'my_normal_post_lists': my_normal_post_lists,\n })", "def pic (self, list) : \n result = []\n for pmod in list :\n result.append (pmod.photo_uri)\n return result" ]
[ "0.6741567", "0.6559008", "0.6350902", "0.6310838", "0.62246984", "0.6191355", "0.618758", "0.618606", "0.6176454", "0.61532885", "0.61340106", "0.6112136", "0.61067706", "0.6101333", "0.6097831", "0.6021922", "0.6020295", "0.5962198", "0.5938344", "0.5936891", "0.59121317", "0.58791775", "0.5854914", "0.58522993", "0.58309203", "0.58243054", "0.58223534", "0.57800037", "0.5749416", "0.57103354", "0.5706823", "0.56941324", "0.5690874", "0.56804043", "0.5679457", "0.5676631", "0.5663746", "0.5653374", "0.56460077", "0.56433046", "0.56245583", "0.5598046", "0.5591853", "0.5581662", "0.55673337", "0.555486", "0.5554542", "0.5546678", "0.5545557", "0.55013794", "0.54994273", "0.5496599", "0.549603", "0.5491168", "0.5490901", "0.54892606", "0.5488983", "0.54755604", "0.5467708", "0.5463706", "0.5445234", "0.54239833", "0.54203236", "0.54200387", "0.5392972", "0.5368776", "0.5365204", "0.53532743", "0.5342272", "0.5337124", "0.53106326", "0.5293852", "0.5283254", "0.52814907", "0.52714384", "0.5253556", "0.5253379", "0.5252121", "0.5247475", "0.5240006", "0.522602", "0.522555", "0.522313", "0.52214694", "0.52195823", "0.5218693", "0.5213765", "0.5209146", "0.52059186", "0.5199018", "0.51967144", "0.5194111", "0.5190909", "0.5186329", "0.5181798", "0.51679", "0.51615375", "0.5152304", "0.51377434", "0.5137474" ]
0.61628646
9
Download pdf of VanTechy presentation slideshow.
def vantechy(request): return FileResponse(open('/files/presentation.pdf', 'rb'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_pdf(url, browser):\n\tpass\n\n\t# grab link page\n\n\t# search soup for pdf file\n\n\t# grab pdf file and return it", "def download(filename):\n return send_from_directory(directory='pdf', filename=filename)", "def download(request, ef_id):\n ef = get_object_or_404(ExamFile, id=ef_id)\n path = os.path.join(settings.MEDIA_ROOT, ef.path.path)\n response= HttpResponse(content=file(path, 'rb').read(), \n mimetype='application/pdf')\n # fn = os.path.split(ef.path.path)[1]\n # response['Content-Disposition'] = \"attachment; filename=%s\" % (fn)\n return response", "def downlaod():\r\n filename = str(uuid.uuid4()) + '.pdf'\r\n filename = os.path.join('./output' , filename)\r\n\r\n config = pdfkit.configuration(wkhtmltopdf = PRG_Path)\r\n options = {\r\n 'page-size': 'Letter'\r\n ,'margin-top': '0.75in'\r\n ,'margin-right': '0.75in'\r\n ,'margin-bottom': '0.75in'\r\n ,'margin-left': '0.75in'\r\n ,'no-outline': None\r\n ,'encoding':'UTF-8'\r\n ,'enable-local-file-access':None\r\n ,'quiet': ''\r\n # ,'javascript-delay':2000000\r\n }\r\n\r\n\r\n html = create_html_report()\r\n pdf = pdfkit.from_string(input=html, output_path=filename,configuration=config, options=options)\r\n pdfDownload = open(filename,'rb').read()\r\n\r\n response: Response = Response (\r\n pdfDownload\r\n ,mimetype=\"application/pdf\"\r\n ,headers={\r\n \"Content-disposition\": \"attachment; filename=\" + filename\r\n ,\"Content-type\": \"application/force-download\"\r\n }\r\n )\r\n return response", "def download_presentation(epObject, uc):\r\n fileDict = make_file_dict()\r\n fileDict = populate_file_dict(epObject, uc, fileDict)\r\n now = str(datetime.datetime.now().hour) + \\\r\n str(datetime.datetime.now().minute) + \\\r\n str(datetime.datetime.now().second)\r\n directoryName = epObject.Name.replace(\" \", \"\") + \"_presentation_\" + now\r\n os.mkdir(directoryName)\r\n os.chdir(directoryName)\r\n temp = tempfile.TemporaryFile()\r\n temp.write(urllib.request.urlopen(fileDict['pageUrls'][0]).read())\r\n temp.seek(0)\r\n update_page(temp, fileDict, \"index.html\", index=True)\r\n temp.close()\r\n os.mkdir(\"Pages\")\r\n os.chdir(\"Pages\")\r\n for (pageUrl, pageFileName) in zip(fileDict['pageUrls'][1:], \r\n fileDict['pageFileNames'][1:]):\r\n temp = tempfile.TemporaryFile()\r\n temp.write(urllib.request.urlopen(pageUrl).read())\r\n update_page(temp, fileDict, pageFileName)\r\n temp.close()\r\n os.chdir(\"../\")\r\n os.mkdir(\"Content\")\r\n os.chdir(\"Content\")\r\n for (fileUrl, fileId) in zip(fileDict['fileUrls'], fileDict['fileIds']):\r\n fileName = eportfolio.get_ep_object_properties(uc, fileId).\\\r\n FileName.strip()\r\n urllib.request.urlretrieve(fileUrl, fileName)\r\n os.chdir(\"../\")\r\n os.mkdir(\"Formatting\")\r\n os.chdir(\"Formatting\")\r\n for (cssUrl, cssFileName) in zip(fileDict['cssUrls'],\r\n fileDict['cssFileNames']):\r\n temp = tempfile.TemporaryFile()\r\n temp.write(urllib.request.urlopen(cssUrl).read())\r\n temp.seek(0)\r\n update_css_file(cssUrl, temp, cssFileName)\r\n temp.close()\r\n for imgUrl in fileDict['imgUrls']:\r\n fileName = imgUrl[imgUrl.rfind(\"/\"): ]\r\n if fileName.find(\"?\") > 0:\r\n fileName = fileName[: fileName.find(\"?\")]\r\n urllib.request.urlretrieve(imgUrl, fileName)\r\n os.chdir(\"../\")\r\n print(str(fileDict))\r\n return fileDict", "def download_file():\r\n global title_dict\r\n title=ResultsListbox.get(ResultsListbox.curselection())\r\n link=title_dict[title]\r\n file_dl=urllib.URLopener()\r\n file_dl.retrieve(link,str(title)+\".pdf\")", "def download_page(url, destination):\n\n # Set and verify destination path\n destination = directory_resolve_home(directory_slash(destination))\n directory_exists(destination)\n\n # Set output name\n filename = generate_filename(url=url, title=get_page_title(read_page(url)))\n\n pdfkit.from_url(url, destination + filename)\n\n return destination + filename", "def download(texttitle):\n try:\n body = current_file.analysed_texts['Regular']\n rendered = render_template('pdf_template.html', title=texttitle, body=body)\n options = {'encoding': \"UTF-8\"}\n pdf = pdfkit.from_string(rendered, False, options=options)\n response = make_response(pdf)\n response.headers[\"Content-Type\"] = 'application/pdf'\n response.headers[\"Content-Disposition\"] = 'attachment; filename=output.pdf'\n\n return response\n except Exception as e:\n flash(\"Something went wrong, please try again\")\n return redirect(request.referrer)", "def download_pdfs_from_site(url: str, verbose=True):\n site_url = get_site_url(url)\n html = requests.get(url).text\n\n\n all_links = get_links(html)\n pdf_links = [link for link in all_links if link.endswith('pdf')]\n pdf_links = maybe_add_full_links(pdf_links, site_url)\n \n if verbose:\n print('Found the following pdf links')\n print(pdf_links)\n pdf_links = tqdm.tqdm(pdf_links)\n for link in pdf_links:\n download_from_link(link)", "def tutorial(request):\n try:\n file_path = (settings.BASE_DIR\n + '/website_files/metropolis_tutorial.pdf')\n with open(file_path, 'rb') as f:\n response = HttpResponse(f, content_type='application/pdf')\n response['Content-Disposition'] = \\\n 'attachment; filename=\"how_to.pdf\"'\n return response\n except FileNotFoundError:\n # Should notify an admin that the file is missing.\n raise Http404()", "def download_pdf( url, filename = None ):\n r = urlopen( Request( url ) )\n try:\n if filename is None:\n filename = give_filename( url )\n with open( filename, 'wb' ) as f:\n shutil.copyfileobj( r, f )\n finally:\n r.close()", "def download_latex(self):\n try:\n # $ Set the Arxiv Object to ensure Proper extraction\n identity,paper = self.extract_meta_from_remote(self.paper_id)\n self.identity = identity\n\n if not dir_exists(self.paper_root_path):\n os.makedirs(self.paper_root_path)\n # $ Download the paper. \n downloaded_data = arxiv.download(paper,dirpath=self.paper_root_path,slugify=lambda paper: paper.get('id').split('/')[-1],prefer_source_tarfile=True)\n return downloaded_data\n except Exception as e:\n raise ArxivAPIException(self.paper_id,str(e))", "def download_pdf(pdf_url):\n response = requests.get(pdf_url, allow_redirects=True)\n open('./data/raw/full.pdf', 'wb').write(response.content)", "def response_pdf(self, filename):\n now = DateTime()\n nice_filename = '%s_%s' % (filename, now.strftime('%Y%m%d'))\n self.request.response.setHeader(\"Content-Type\", \"application/pdf\")\n self.request.response.setHeader(\"Content-Disposition\", \"attachment\")\n self.request.response.setHeader(\"filename\", nice_filename)\n self.request.response.setHeader('Last-Modified',\n DateTime.rfc822(DateTime()))\n self.request.response.setHeader(\"Cache-Control\", \"no-store\")\n self.request.response.setHeader(\"Pragma\", \"no-cache\")\n return open(filename, 'rb').read()", "def download(filename):\n path = os.path.join(\n current_app.root_path, current_app.config['UPLOAD_FOLDER'], filename)\n path_default = current_app.config[\"PDF_TEMPLATE_PATH\"]\n\n def generate():\n try:\n with open(path, \"rb\") as f:\n yield from f\n os.remove(path)\n except FileNotFoundError:\n with open(path_default, \"rb\") as f:\n yield from f\n\n r = current_app.response_class(generate(), mimetype='application/pdf')\n r.headers.set(\n 'Content-Disposition', 'attachment', filename=PDF_OUT_FILENAME\n )\n return r", "def download_pdf_file(download_url):\n web_file = urllib.urlopen(download_url)\n filename = \"/tmp/\" + str(uuid.uuid4()) + \".pdf\"\n local_file = open(filename, 'w')\n local_file.write(web_file.read())\n web_file.close()\n local_file.close()\n return filename", "def download_pdfs():\n try:\n # create the download folder if it does not exist already\n Path(paho_raw_reports_dir).mkdir(parents=True, exist_ok=True)\n # remove all current pdfs in the download folder\n filelist = [ f for f in os.listdir(paho_raw_reports_dir) if f.endswith(\".pdf\") ]\n for f in filelist:\n os.remove(os.path.join(paho_raw_reports_dir, f))\n # open the browser\n logging.info(\"Now opening the Firefox browser\")\n options = Options()\n options.headless = True\n options.accept_insecure_certs = True\n profile = FirefoxProfile()\n profile.set_preference('security.tls.version.enable-deprecated', True)\n # set the download location of the pdfs and remove the download prompt\n profile.set_preference(\"browser.altClickSave\", True)\n profile.set_preference(\"browser.download.folderList\", 2)\n profile.set_preference(\"browser.download.panel.shown\", False)\n profile.set_preference(\"browser.download.manager.showWhenStarting\", False)\n profile.set_preference(\"browser.download.dir\", paho_raw_reports_dir)\n profile.set_preference(\"browser.download.useDownloadDir\", True)\n profile.set_preference(\"browser.helperApps.neverAsk.saveToDisk\", \n \"application/pdf,application/x-pdf,application/octet-stream,application/x-winzip,application/x-gzip\")\n profile.set_preference(\"browser.download.manager.alertOnEXEOpen\", False)\n profile.set_preference(\"browser.download.manager.showWhenStarting\", False);\n profile.set_preference(\"browser.download.manager.focusWhenStarting\", False);\n profile.set_preference(\"browser.helperApps.alwaysAsk.force\", False);\n profile.set_preference(\"browser.download.manager.alertOnEXEOpen\", False);\n profile.set_preference(\"browser.download.manager.closeWhenDone\", True);\n profile.set_preference(\"browser.download.manager.showAlertOnComplete\", False);\n profile.set_preference(\"browser.download.manager.useWindow\", False);\n profile.set_preference(\"services.sync.prefs.sync.browser.download.manager.showWhenStarting\", False);\n profile.set_preference(\"pdfjs.disabled\", True)\n driver = webdriver.Firefox(profile, options=options)\n # Go the PAHO website that holds the reports\n reports_present_on_page = True\n page_number = 0\n pahoreporturl = \"https://www.paho.org/en/technical-reports?topic=4922&d%5Bmin%5D=&d%5Bmax%5D=&page=\"+str(page_number)\n while reports_present_on_page:\n logging.info(\"Navigating to \"+pahoreporturl)\n driver.get(pahoreporturl)\n # get all urls containing certain keywords on this page\n report_links_elements = driver.find_elements_by_partial_link_text(\"COVID-19 cases\")\n # store all of the urls in each element\n report_links = []\n for report_link_element in report_links_elements:\n report_links.append(report_link_element.get_attribute('href'))\n # now go through each url in the list\n for report_link in report_links:\n # navigate to each url\n driver.get(report_link)\n # once the page has loaded, click the download link\n download_link = driver.find_element_by_link_text(\"DOWNLOAD\")\n download_link.click()\n logging.info(\"File downloaded from: \"+download_link.get_attribute('href'))\n # check if we have any elements that we're interested in on this page, to control the loop\n if report_links_elements:\n reports_present_on_page = True\n page_number += 1\n pahoreporturl = \"https://www.paho.org/en/technical-reports?topic=4922&d%5Bmin%5D=&d%5Bmax%5D=&page=\"+str(page_number)\n else:\n reports_present_on_page = False\n logging.info(\"No more reports on page. Breaking loop.\")\n return 0\n except:\n logging.info(\"Encountered an issue while trying to download the pdfs.\")\n raise\n finally:\n if 'driver' in locals() and driver is not None:\n # Always close the browser\n driver.quit()\n logging.info(\"Successfully closed web browser.\")\n logging.info(\"Completed downloading of all COVID19 pdfs from PAHO website.\")", "def scrape_pdfs(db):\n process = CrawlerProcess()\n process.crawl(PdfSpider, db=db)\n process.start()", "def click_ver_pdf(self):\n self.button.click(liquidaciones_historicas_catalog.VINCULO_VER_PDF)", "def pdf(self):\n\n for attachment in self.find('guidle:offerDetail//guidle:attachment'):\n url = self.get('guidle:url', root=attachment)\n\n if not url.endswith('.pdf'):\n return None, None\n\n name = self.get('guidle:description', root=attachment)\n name = name.strip().split('\\n')[0]\n\n return url, f'{name}.pdf'\n\n return None, None", "def download_resume(self, links):\n\t\tbot = self.bot\n\n\t\tfor link in links:\n\t\t\tbot.get(link)\n\t\t\ttime.sleep(5)\n\t\t\tmore = bot.find_element_by_class_name(\"pv-s-profile-actions__overflow-toggle.artdeco-button\").click()\n\t\t\ttime.sleep(2)\n\t\t\tsave_pdf = bot.find_element_by_class_name(\"pv-s-profile-actions--save-to-pdf\").click()\n\t\t\ttime.sleep(5)", "def _on_articles_reveal_pdf(self, evt=None, path=None):\n \n # get path from selection\n if not path:\n \n # get selected articles\n articles = self._articles_view.GetSelectedArticles()\n if not articles:\n return\n \n # get PDF path\n path = articles[0].pdf_path\n \n # check path\n if not path or not os.path.exists(path):\n wx.Bell()\n dlg = mwx.MessageDlg(self, -1, \"PDF file is not available.\", path)\n dlg.ShowModal()\n dlg.Destroy()\n return\n \n # try to reveal PDF\n try:\n if wx.Platform == '__WXMAC__':\n subprocess.Popen([\"open\", \"-R\", path])\n elif wx.Platform == '__WXMSW__':\n subprocess.Popen('explorer /select, \"%s\"' % path)\n else:\n pass\n except:\n pass", "def download_from_website(DATA_PDFS_DIR, name, url):\n # Create a folder for the PDFs if it does not exist\n if not os.path.exists(os.path.join(DATA_PDFS_DIR, name)):\n os.makedirs(os.path.join(DATA_PDFS_DIR, name))\n \n # Setup the download parameters\n start_time = time.perf_counter()\n print('Downloading from \"{}\"'.format(url))\n log_name = os.path.join(DATA_PDFS_DIR, 'log_{}.txt'.format(name))\n \n # Download using the wget command (UNIX only)\n save_to = os.path.join(DATA_PDFS_DIR, name)\n subprocess.run([\"wget\", \n url, \n \"--directory-prefix={}\".format(save_to),\n \"-nd\", \n \"--accept=pdf\", \n \"-r\", \n \"-t 3\", \n \"-e robots=off\", \n \"-nc\",\n \"-nv\",\n \"--append-output={}\".format(log_name)])\n \n # Print information about download times\n print('Downloading from \"{}\" DONE'.format(url))\n dl_time = round(time.perf_counter() - start_time, 1)\n print(' -> Downloading took {} seconds'.format(dl_time))", "def download():\n now_dt = dt.datetime.now()\n return render_template(\n 'resume/home.html',\n age=relativedelta(now_dt, dt.datetime(day=19, month=3, year=1983)).years,\n current_year=now_dt.year,\n )", "def download_file(session_requests, file_url, job_num, file_num, ext):\n \n filename = \"job_\" + str(job_num) + \"_file_\" + str(file_num) + ext\n pathname = Path(OUTPUT_PDF_PATH + filename) \n response = session_requests.get(file_url)\n pathname.write_bytes(response.content)\n \n return filename", "def regular_download(self) -> NoReturn:\n\n if not path.isdir(self.name):\n mkdir(self.name)\n\n for chapter in self.chapters.keys():\n\n chapter_folder = f\"{self.name}/{chapter}/\"\n curr_chapter = self.chapters[chapter]\n base_url = f\"{curr_chapter['server']}{curr_chapter['hash']}/\"\n\n if not path.isdir(chapter_folder):\n mkdir(chapter_folder)\n\n for image in curr_chapter[\"images\"]:\n\n image_url = f\"{base_url}{image}\"\n image_file = f\"{chapter_folder}{image}\"\n response = requests.get(image_url, headers={\"Connection\":\"close\"})\n\n if response and response.status_code == 200:\n with open(image_file, \"wb\") as img_file:\n img_file.write(response.content)\n else:\n print(f\"Error downloading chapter: {curr_chapter['num']} Image: {image}\")", "def downloadPdfs(soup, full_path, pattern, subdir):\n # Create subdir, exams or solutions, if not already exists\n path_to_pdfs = os.path.join(full_path, subdir)\n if not os.path.exists(path_to_pdfs):\n os.makedirs(path_to_pdfs)\n\n # Download all the pdfz!\n for x in soup.find_all('a', text=re.compile(pattern)):\n url_to_exam = x['href']\n if url_to_exam.endswith('.pdf'):\n print download_file(url_to_exam, path_to_pdfs), ' downloaded'", "def _produce_pdf_as_a_response(self, html):\n # Create a Django response object, and specify content_type as pdf\n response = HttpResponse(content_type='application/pdf')\n # Define that this is an attachment. \n response['Content-Disposition'] = 'attachment;'\n pisaStatus = pisa.CreatePDF(html, dest=response)\n \n return response", "def download_participants_document(cupASSistName):\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.CookieJar()), urllib2.HTTPRedirectHandler())\n opener.open(\"http://www.cupassist.com/pamelding/redirect.php?tknavn=\" + cupASSistName)\n return opener.open(\"http://www.cupassist.com/pamelding/vis_paamelding.php\").read()", "def download_pdf(url):\n # Extracts the last part of the URL to be used as the name of the file\n local_filename = url.split('/')[-1].replace('%','')\n \n if local_filename not in REPORTS:\n with urllib.request.urlopen(url) as r:\n with open(f'reports/{local_filename}', 'wb') as f:\n f.write(r.read())\n \n # updates report files in the directory\n return f'reports/{local_filename}'\n else:\n print(f'Already in the database - {local_filename}')\n return False", "def show_trailer(self):\r\n\r\n webbrowser.open(self.trailer_youtube_url)", "def _get_url(self, docket_number: str, docketEntryId: str) -> str:\n self.url = f\"https://public-api-green.dawson.ustaxcourt.gov/public-api/{docket_number}/{docketEntryId}/public-document-download-url\"\n if self.test_mode_enabled():\n # Don't fetch urls when running tests. Because it requires\n # a second api request.\n return self.url\n pdf_url = super()._download()[\"url\"]\n return pdf_url", "def download_engine(fcsd): #fcsd = first comic strip date\n\n url_list = get_comic_strip_url(fcsd)\n\n for url in url_list:\n session = requests.Session()\n response = session.get(url)\n download_url = get_image_comic_url(session, response)\n# download_dilbert(session, download_url)\n return download_url", "def show_trailer(self):\r\n webbrowser.open(self.trailer_youtube_url)", "def download():\n \n browser.find_element_by_xpath('//*[@id=\"ctl00_contentPlaceHolder_divAllVariablesPerYear2012\"]/div[2]/div[2]/div[1]/a').click()", "def download_episode(self, filmid):\n self.logger.debug('download_episode')\n if not self._test_download_path(self.settings.getDownloadPathEpisode()):\n return\n film = self.database.retrieve_film_info(filmid)\n if film is None:\n return\n\n (filmurl, extension,) = self._get_film_url_and_extension(film)\n\n # detect season and episode\n (season, episode, fninfo,) = self._season_and_episode_detect(film)\n\n # determine names\n showname = mvutils.cleanup_filename(film.show)[:64]\n namestem = mvutils.cleanup_filename(film.title)[:80]\n if not namestem:\n namestem = u'Episode-{}'.format(film.filmid)\n if not showname:\n showname = namestem\n\n # review name\n if self.settings.getReviewName():\n (namestem, confirmed) = self.notifier.get_entered_text(namestem, 30986)\n namestem = mvutils.cleanup_filename(namestem)\n if len(namestem) < 1 or confirmed is False:\n return\n\n # prepare download directory and determine sequence number\n pathname = self.settings.getDownloadPathEpisode() + showname + '/'\n sequence = 1\n if xbmcvfs.exists(pathname):\n (_, epfiles,) = xbmcvfs.listdir(pathname)\n for epfile in epfiles:\n match = re.search(r'^.* - \\(([0-9]*)\\)\\.[^/]*$', epfile)\n if match and match.groups():\n if sequence <= int(match.group(1)):\n sequence = int(match.group(1)) + 1\n else:\n xbmcvfs.mkdir(pathname)\n\n filename = showname + ' - ' + fninfo + \\\n namestem + (u' - (%04d)' % sequence)\n # download the stuff\n if self._download_files(film, filmurl, pathname, filename, extension):\n self._make_series_nfo_files(\n film, filmurl, pathname, filename, season, episode, sequence)", "def show_trailer(self):\n\t\twebbrowser.open(self.trailer_youtube_url)", "def show_trailer(self):\n\t\twebbrowser.open(self.trailer_youtube_url)", "def show_trailer(self):\n\t\twebbrowser.open(self.trailer_youtube_url)", "def get_pdf(self, analysis_request):\n return self.get_last_attachment_pdf(analysis_request, \"Delivery\")", "def pdf(self, identifier):\n return self.client.request_with_method(Methods.PDF % (self.name, identifier,))", "def show_trailer(self):\r\n webbrowser.open(self.trailer_youtube_url)", "def show_trailer(self):\r\n webbrowser.open(self.trailer_youtube_url)", "def show_trailer(self):\r\n webbrowser.open(self.trailer_youtube_url)", "def show_trailer (self):\n webbrowser.open (self.trailer_youtube_url)", "def download():\n env_banner()\n\n download_data = Download()\n download_data()\n click.echo('Download done.')", "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "def createPDFDoc(self, filepath):\n print(\"Starting pdf creation\")\n strMD=\"\"\n for fileMD,data in self.graph.nodes(data=True):\n if not os.path.isfile(fileMD):\n sys.exit(\"Error: \" + fileMD + \" does not exist\")\n if not fileMD.endswith(\"md\" or \"markdown\"):\n sys.exit(fileMD + \" is not a markdown file\");\n print(\"Found file: \" + fileMD)\n strMD = strMD + \" \" + fileMD\n cmd = \"pandoc --latex-engine=xelatex -s -o \" + filepath + strMD\t\n print(\"Starting file conversion.\")\n if subprocess.call(cmd) != 0:\n print(\"Conversion failed\")\n else:\n print(\"Saving pdf file to: \" + filepath)\n print(\"Conversion successfull\")", "def get(self, request, document_id, **kwargs):\n document = get_object_or_404(Document, id=document_id)\n with open(document.file.path, 'rb') as f:\n response = HttpResponse(f.read(), content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % document.file.name\n return response", "def show_trailer(self):\n\n webbrowser.open(self.trailer_youtube_url)", "def _download(item):\n\n filename = item.filename()\n filename = os.path.join(item.vdir(), filename)\n logger.info(\"Downloading '%s' to %s\" % (item.show, filename))\n\n f = open(filename, \"wb\")\n\n buf = net.tivoget(item.show.url)\n for chunk in buf:\n f.write(chunk)\n\n f.close()\n\n item.downloaded = True\n item.save()", "def download():\n\treturn response.download(request, db)", "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url) # Open trailer in webbrowser", "async def to_pdf(\n self,\n url: str,\n output: Union[None, str, Path] = None,\n ) -> Path:\n return await (asyncio.wait_for(self._run(url, output), self.timeout)\n if self.timeout >= 0.01 else self._run(url, output)\n )", "def save_pdf(self, response):\n\n # get metadata\n file_type = \"__comprovante_de_acesso__\"\n\n # options to save pdf\n file_id = str(uuid.uuid4())\n filename = \"{file_id}.pdf\".format(file_id=file_id)\n file_path = os.path.join(path, \"downloads\", self.scrape_id, filename)\n with open(file_path, 'wb') as f:\n f.write(response.body)\n\n # upload pdf to s3 and call the webhook\n self.upload_file(file_id)\n\n # update values in result\n self.result.update({file_type: {\"file_id\": file_id}})", "def _docs():\n url = \"https://vanheeringen-lab.github.io/seq2science\"\n if not webbrowser.open(url):\n print(url)", "def convert_to_pdf(self, news_list):\n self.logger.info(\"Converting news to PDF...\")\n self.prepare_storage()\n self.process_news_list_with_images(news_list)\n content = self.generate_html_template(news_list)\n pdf = io.BytesIO()\n pisa.pisaDocument(content, pdf)\n self.write_to_file(pdf.getvalue())", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)" ]
[ "0.63570714", "0.6327665", "0.6183838", "0.60757995", "0.6060038", "0.60584295", "0.6054394", "0.59414095", "0.586487", "0.5839018", "0.5790814", "0.57728595", "0.57418793", "0.57275844", "0.57084584", "0.56971765", "0.56889516", "0.5683005", "0.56304324", "0.5624967", "0.5597374", "0.5512267", "0.54995424", "0.549733", "0.54856753", "0.5470825", "0.54656774", "0.5442143", "0.5431172", "0.5430179", "0.5413016", "0.53977954", "0.53915256", "0.53900725", "0.5383406", "0.5371453", "0.53649503", "0.53649503", "0.53649503", "0.5350349", "0.53461397", "0.5341502", "0.5341502", "0.5341502", "0.5338521", "0.53357923", "0.5331415", "0.5331415", "0.5331415", "0.5331415", "0.5331415", "0.5331415", "0.5331415", "0.5331415", "0.5331415", "0.5331415", "0.5331415", "0.5331415", "0.5331415", "0.5331415", "0.5330901", "0.53237253", "0.53134537", "0.5295208", "0.52943325", "0.5290357", "0.5281203", "0.5264886", "0.52628165", "0.5260825", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881", "0.5258881" ]
0.658064
0
Records the hosts and connects to one of them
def setup( hosts, default_keyspace, consistency=ConsistencyLevel.ONE, lazy_connect=False, retry_connect=False, **kwargs): global cluster, session, default_consistency_level, lazy_connect_args if 'username' in kwargs or 'password' in kwargs: raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") if not default_keyspace: raise UndefinedKeyspaceException() from cqlengine import models models.DEFAULT_KEYSPACE = default_keyspace default_consistency_level = consistency if lazy_connect: kwargs['default_keyspace'] = default_keyspace kwargs['consistency'] = consistency kwargs['lazy_connect'] = False kwargs['retry_connect'] = retry_connect lazy_connect_args = (hosts, kwargs) return cluster = Cluster(hosts, **kwargs) try: session = cluster.connect() except NoHostAvailable: if retry_connect: kwargs['default_keyspace'] = default_keyspace kwargs['consistency'] = consistency kwargs['lazy_connect'] = False kwargs['retry_connect'] = retry_connect lazy_connect_args = (hosts, kwargs) raise session.row_factory = dict_factory
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def establish_hosts(self):\n scheme = self._config['scheme']\n hosts = self._config['hosts']\n port = self._config['port']\n for hostname in hosts:\n url = '{}://{}:{}/gremlin'.format(scheme, hostname, port)\n host = await driver.GremlinServer.open(\n url, self._loop, **dict(self._config))\n self._hosts.append(host)\n self._hostmap[hostname] = host", "def connectionMade(self, hostname, projects, ip, port):\n for project in [i.id for i in projects if i != None]:\n sc = self.getScanner(hostname, project)\n sc.logConnection(time.time(), ip, port, 'made')", "def do_connect(self, args):\r\n for host in self.host:\r\n client = paramiko.SSHClient()\r\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n client.connect(host[0], username=host[1], password=host[2])\r\n self.connections.append(client)", "def hosts(self, hosts):\n self._hosts = hosts", "def add_hosts(self, hosts):\n for host in hosts:\n if host not in self.__hosts__:\n self.__hosts__.append(KnownHostsHost(host))", "def connect_servers(self):\r\n\r\n for srvr in self.server_list_text:\r\n try:\r\n self.connect_server(srvr[\"serverID\"], srvr[\"serverIP\"], int(srvr[\"serverPort\"]), srvr[\"nickname\"])\r\n except Exception as exp:\r\n print(\"Error occurred.\\nWhy: {0}\".format(exp)) # TOOD: posílat takovéhle errory klientům\r", "def all_hosts(self):\n ...", "def connect(self, host):\n if not self.app.connect(host):\n command = \"Connect({0})\".format(host).encode(\"utf-8\")\n self.exec_command(command)\n self.last_host = host", "def connect():", "def connect(self):\n\n # Get destination Host object\n selected_vm = Selector(self.account_obj, self.filters).select_host_from_state_file()\n\n try: # host file found\n self.connect_with_host_data(selected_vm)\n except HostNotFound: # host file not found\n try:\n self.connect_without_host_data(selected_vm, bounce=self.bounce)\n except ConnectionError: # could not connect at all.\n print_orange(\"Failed connecting.\")", "def add_host(self, name, ip):\n rdataa = dns.rdata.from_text(dns.rdataclass.IN,dns.rdatatype.A,str(ip))\n rdataseta = dns.rdataset.from_rdata(300,rdataa)\n self.update.add(name,rdataseta)\n return dns.query.tcp(self.update,self.server_address)", "async def start(self, collection, hostmap):\n records = []\n tmpl = Template(\"--host-record=$name,$ip\")\n for name, ips in hostmap.items():\n for ip in ips:\n records.append(tmpl.substitute(name=name, ip=ip))\n\n cmd = \"--user=root \" + \" \".join(records)\n ports = {(53, \"udp\"): 53}\n\n results = await self.docker.run_containers(\n collection, self.info.name, cmd, ports=ports, local_dns=False)\n\n # Add the dns info to the instances\n for inst, response in zip(collection.instances, results):\n state = inst.state\n if hasattr(state, \"dns_server\"):\n continue\n dns_ip = response[\"NetworkSettings\"][\"IPAddress\"]\n state.dns_server = dns_ip", "def add_connection_entry(self,client_id, display_name,session_id,host,conn,addr):\n self.connections[client_id] = {\n \"display_name\" : display_name,\n \"session_id\" : session_id,\n \"host\" : host,\n \"CONN\" : conn,\n \"ADDR\" : addr,\n \"connected\" : True\n }", "def serviceConnects(self):\n #log.debug(f\"{self.name}: servicing new connections for.\")\n for ca, ix in list(self.server.ixes.items()):\n if ix.cutoff:\n self.closeConnection(ca)\n continue\n\n if ca not in self.connections:\n log.debug(f\"Adding new connection for {ix}.\")\n self.connections[ca] = Requester(self.dhtdoer, remoter=ix, name=ca)\n\n if ix.timeout > 0.0 and ix.tymer.expired:\n self.closeConnection(ca)", "def add(self, host, auth, conn):\n self.conns[(host, auth)] = conn", "def connect_to(self, inf1, router2, inf2):\n self.interfaces[inf1]['connect'] = [router2.hostname, inf2]\n router2.interfaces[inf2]['connect'] = [self.hostname, inf1]", "def connection_made(self, transport):\n self.transport = transport\n peername = transport.get_extra_info('peername')\n self.ip = peername[0]\n self.client = \"{:s}:{:d}\".format(*peername)\n logger.debug('Connection from {}'.format(peername))\n clients.append(self)\n self.env = envs[self.ip]", "def set_one(self, host_name, ip_address):\n self.hosts[host_name] = ip_address", "def sync_dns(self,):\n\n for server_name, server_ip in self.get_instances():\n self.dnsmanager.ensure_a_record(server_name, server_ip)", "def insert_host(self, host):\n if host['host'] and host['user'] and host['passw']:\n hosts = Config().hosts\n cred = {'username': host['user'], 'password': host['passw']}\n hosts[host['host']] = cred\n Config().hosts = hosts", "def create_host_list(self):\n # Get first network address and add to list\n net_address = input('What is a network address you want to ping? ')\n self.hosts.append(net_address)\n\n # Find out if user wants to add more network addresses\n while True:\n add_another = input('Add another? (y/n) ')\n print()\n if add_another.lower() == 'n' or add_another.lower() == 'no':\n break\n elif add_another.lower() == 'y' or add_another.lower() == 'yes':\n net_address = input(\"What is a network address you want to ping? \")\n self.hosts.append(net_address)\n else:\n print(\"That is an invalid input.\")\n print()\n os.system('cls')", "def _acquireHosts(self):\n # the tcp socket that receives the ACK\n ackSocket = socket.socket(socket.AF_INET,\n socket.SOCK_STREAM, 0)\n ackSocket.bind(('', Globals.ACK_PORT))\n ackSocket.settimeout(1)\n ackSocket.listen(5)\n \n # UDP BROADCAST\n Globals.Print('Broadcasting for other hosts...')\n self.broadcast('host up')\n \n # WAIT: RESPONSES, TIMEOUT\n self.hosts = []\n while 1:\n try:\n # TCP ACK\n clientsocket, (host, port) = ackSocket.accept()\n value = clientsocket.recv(256)\n if value == 'host up ack':\n self.addHost(host)\n clientsocket.close()\n clientsocket = None\n except:\n break\n \n ackSocket.close()", "def set_all(self, host_names, ip_address):\n for host_name in host_names:\n self.set_one(host_name, ip_address)", "def setup(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request #TCP socket object for the client\n self.server.clients[(self.ip, self.port)] = self\n self.server.peers.append((self.connection)) \n for client in self.server.clients:\n print(\"Connected client: \", client)\n\n #for peer in self.server.peers:\n # print(\"Peers: \", peer)", "def get_hosts(self, target, listener_type):", "def active_failover_detect_hosts(self):\n self.check_that_instance_is_alive()\n # this is the way to detect the master starter...\n lfs = self.get_log_file()\n if lfs.find(\"Just became master\") >= 0:\n self.is_master = True\n else:\n self.is_master = False\n regx = re.compile(r\"Starting resilientsingle on port (\\d*) .*\")\n match = regx.search(lfs)\n if match is None:\n raise Exception(timestamp() + \"Unable to get my host state! \" + self.basedir + \" - \" + lfs)\n\n self.frontend_port = match.groups()[0]", "def sniff_hosts(self):\n previous_sniff = self.last_sniff\n hosts = []\n try:\n # reset last_sniff timestamp\n self.last_sniff = time.time()\n try:\n hosts = self.get_es_node_addresses()\n except Exception:\n raise TransportError(\"N/A\", \"Unable to sniff hosts.\" + traceback.format_exc())\n except:\n # keep the previous value on error\n self.last_sniff = previous_sniff\n raise\n\n # we weren't able to get any nodes, maybe using an incompatible\n # transport_schema or host_info_callback blocked all - raise error.\n if not hosts:\n raise TransportError(\"N/A\", \"Unable to sniff hosts - no viable hosts found.\" + traceback.format_exc())\n\n self.set_connections(hosts)", "def select_host(host_list):\n if len(host_list) > 1:\n print('[+] Detected multiple hosts: (Port 62001 will be the first and default port for adb devices)')\n for host_id, host in enumerate(host_list):\n print(f\" > ({host_id+1}) {host}\")\n print('[+] Input the number of host you want to connect (1, 2....)')\n inp = input('>>> ')\n try:\n return [host_list[int(inp) - 1]]\n except Exception:\n raise ADBError('Error input')\n else:\n return host_list[0]", "def list_hosts():\n task_run(\"/bin/hostname -f\",RING_1_dev__allnodes)", "def add_connection(self, ip, port, key):\n\n # Socket declaration\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((ip, port))\n\n # Adding connection to the list\n self.connections[key] = sock", "def enable_host(self, name):\n from soppa.local import aslocal\n self.guest_ip = self.guest_ip()\n self.guest_host_name = name\n # Host (remote) change\n self.file.set_setting('/etc/hosts', '{0} {1}'.format('127.0.0.1', self.guest_host_name))\n # local change\n aslocal()\n self.file.set_setting('/etc/hosts', '{0} {1}'.format(self.guest_ip, name))", "def connect(self, host):\n return False", "def connect_to_master():", "def insert_host_states(hosts):\n IMPL.insert_host_states(hosts)", "def set_host(host_index):\n env.hosts = [public_dns_names[int(host_index)]]\n env.password = [public_pwds[int(host_index)]]", "def hosts_some(self, hosts_some):\n\n self._hosts_some = hosts_some", "def open(self):\n clients.append(self)\n self.logger.info(\"New connection\")", "def _host_in_event(self, ev):\n self._update_nodes()\n\n if not self.nodes:\n return\n\n for node in self.nodes:\n if node.ip in ev.host.ipv4:\n datapath = self.dpset.get(ev.host.port.dpid)\n node.setPortInformation(ev.host.port.dpid, datapath, ev.host.port.port_no, ev.host.port)\n self._install_cdnengine_matching_flow(datapath, node.ip, node.port)\n self.logger.info('New Node connected the network. Matching rules were installed ' + node.__str__())", "def connect(self):\n\t\tpass", "def connect(self):\n for mapset in self.tgis_mapsets.keys():\n driver, dbstring = self.tgis_mapsets[mapset]\n conn = self.connections[mapset]\n if conn.is_connected() is False:\n conn.connect(dbstring)\n\n self.connected = True", "def set_hosts(self, host_list: t.List[str]) -> None:\n if isinstance(host_list, str):\n host_list = [host_list.strip()]\n if not isinstance(host_list, list):\n raise TypeError(\"host_list argument must be a list of strings\")\n if not all(isinstance(host, str) for host in host_list):\n raise TypeError(\"host_list argument must be list of strings\")\n # TODO check length\n if self.batch:\n if hasattr(self, \"batch_settings\") and self.batch_settings:\n self.batch_settings.set_hostlist(host_list)\n\n if self.launcher == \"lsf\":\n for db in self.dbnodes:\n db.set_hosts(host_list)\n else:\n for host, db in zip(host_list, self.dbnodes):\n if isinstance(db.run_settings, AprunSettings):\n if not self.batch:\n db.run_settings.set_hostlist([host])\n else:\n db.run_settings.set_hostlist([host])\n\n if db.is_mpmd and hasattr(db.run_settings, \"mpmd\"):\n for i, mpmd_runsettings in enumerate(db.run_settings.mpmd):\n mpmd_runsettings.set_hostlist(host_list[i + 1])", "def update_cluster_hosts(self, hosts):\n self._hosts = hosts\n self._collect_hosts_d = True", "def register_router(self, hostname, expire=-1):", "def remotes():", "def find_connections():\n # print \"External\"\n # print findservices('00:0D:93:19:C8:68')\n # print findservices('bc:f5:ac:84:81:0c')\n # print finddevices()\n # print findservices(gethostaddr())\n # print gethostclass()\n print \"Your address: \", lb.gethostaddr()\n print lb.finddevicename(lb.gethostaddr())\n s = lb.socket()\n #s.bind((\"\", 0)) # RFCOMM port\n #s.bind((\"\", 1)) # RFCOMM port\n s.bind((\"\", 2)) # RFCOMM port\n print \"About to listen\"\n s.listen(1)\n print \"About to advertise\"\n lb.advertise(\"LightBlueService\", s, lb.RFCOMM)\n print \"Advertised at {} and listening on channel {}...\".format(s.getsockname()[0], s.getsockname()[1])\n print \"Waiting to accept\"\n # s.setblocking(1)\n try:\n conn, addr = s.accept()\n except KeyboardInterrupt:\n print \"Closing connection due to keyboard intterupt\"\n s.close()\n raise KeyboardInterrupt\n # Set timeout for 1 second\n # s.settimeout(1.0)\n print \"Connected by\", addr\n return conn, addr, s", "def accept_connections():\n while True:\n client, client_address = server.accept()\n print(\"%s:%s has connected.\" % client_address)\n client.send(bytes(\"Hello honey! Please type your name and press Enter.\", \"utf8\")) \n addresses[client] = client_address\n Thread(target=manage_connections, args=(client,)).start()", "def add_endpoints(self, hostip, username=None, password=None):\n if hostip in self.endpoints:\n log.info(\"%s is already added.\", hostip)\n return\n\n username = username or self._ep_username\n password = password or self._ep_password\n\n try:\n with LydianClient(hostip) as client:\n # fetch regular interfaces\n self._add_endpoints(client, hostip)\n\n self._ep_hosts[hostip] = hostip\n\n except Exception as err:\n log.error(\"Error in adding endpoint %s - %r\", hostip, err)", "def setupHostConnection(self, host_ip):\n if host_ip != self.ip and host_ip != '':\n host_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n indicator = host_socket.connect_ex((host_ip, 9090))\n if indicator != 0:\n return False\n else:\n new_host_msg = Message(\"NHST\", self.ip, '\\0')\n host_socket.sendall(new_host_msg.generateByteMessage())\n print('NHST message sent to Host at ' + host_ip)\n area_message = self.parseMessage(host_socket)\n if(area_message.type == 'AREA'):\n print('AREA message received from ' + area_message.origin)\n payload_array = area_message.payload.split(':')\n curr_host_ip = area_message.origin\n host_min_x = int(payload_array[0])\n host_max_x = int(payload_array[1])\n self.x_min = host_max_x\n self.x_max = self.x_min + 50\n if host_max_x > self.curr_x_max:\n self.curr_x_max = host_max_x\n if self.x_min == host_max_x:\n self.l_neighbor = curr_host_ip\n if host_min_x <= self.curr_x_min:\n self.curr_x_min = host_min_x\n self.curr_x_min_ip = curr_host_ip\n new_thread = Thread(target=lambda: self.listenToHost(host_socket))\n new_thread.daemon = True\n new_thread.start()\n new_connection = Connection(host_ip, host_socket, new_thread)\n self.connections.append(new_connection)\n return True\n else:\n print('Invalid message type received from ' + area_message.origin + ' - Host corrupt')\n return False\n return True", "def connect(self, host, port):\n pass", "def connect_to_host_with_dictionary(self, host_details):\n self.connect_to_host(**host_details)", "def host_list(self):\n try:\n scode, hosts = Rest.get('Host')\n except Exception as e:\n Console.error(e.message)\n return\n if len(hosts) == 0:\n print(\"No hosts exist\")\n return\n\n n = 1\n e = {}\n for host in hosts:\n d = {}\n d['Ip'] = str(host['Ip'])\n d['Name'] = str(host['Name'])\n d['Port'] = str(host['Port'])\n d['Swarmmode'] = str(host['Swarmmode'])\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Name', 'Port', 'Swarmmode'])))", "def updateHost(self, *hosts):\n localhost_name = None\n old_hostnames = []\n for old_host in self.hosts.values():\n old_hostnames.append(old_host.name)\n if isinstance(old_host, LocalHost):\n if localhost_name is not None:\n logger.warning('Duplicate localhost found in lab.hosts')\n localhost_name = old_host.name\n for new_host in hosts:\n # Updating localhost\n if (isinstance(new_host, LocalHost) and localhost_name is not None):\n # Check for localhost clash\n if new_host.name != localhost_name:\n logger.warning('Localhost is already present: ' +\n '%s\\n' +\n 'Not updating host %s!', localhost_name, new_host.name)\n continue\n else:\n localhost_name = new_host.name\n # Will an update happen?\n if new_host.name in old_hostnames:\n logger.info('Overwriting host: %s', new_host.name)\n # Will it end up removing the localhost?\n if (new_host.name == localhost_name and\n not isinstance(new_host, LocalHost)):\n localhost_name = None\n self.hosts[new_host.name] = new_host\n if localhost_name is None:\n logger.warning('Localhost not yet present')", "def hosts(self, hosts):\n return self._set_list_field(\"hosts\", hosts)", "def inject_hosts_files(self):\n self.log.info(\"Injecting host files\")\n hosts = dict()\n for i in self.all_nodes:\n hosts[i.name] = i.get_public_addr()\n #add the host names to etc/hosts\n orchestrator.inject_hostnames(hosts, delete=self.cluster_name)\n for i in self.all_nodes:\n i.inject_hostnames(hosts, delete=self.cluster_name)\n self.all_nodes[0].run_command(\"service ganglia-monitor restart; service gmetad restart\", silent=True)\n orchestrator.run_command(\"service ganglia-monitor restart; service gmetad restart\", silent=True)", "def set_servers(self, servers):\n kwargs = dict(io_loop = self.io_loop)\n #if self.connect_timeout:\n # kwargs['connect_timeout'] = self.connect_timeout \n if self.dead_retry:\n kwargs['dead_retry'] = self.dead_retry \n self.servers = [_Host(s, self.debuglog, **kwargs) for s in servers]\n self._init_buckets()", "def __addNewClients(self):\n while True:\n client = self.nextPendingConnection()\n if (client == None):\n break\n \n # Add this socket to our list of clients\n self.__clients.append(client);\n \n # When the client disconnects, remove it from our list of clients.\n QObject.connect(client, SIGNAL(\"disconnected()\"), self.__removeClient)\n\n print \"connection from\", self.__clientName(client)", "def connect(self):\n return 1", "def add(self, host, **kwargs):\n self.configs_[0][1].add(host, **kwargs)", "def test_all_servers_connection():\n task_data = dict(const.TEST_TASK)\n task_data[\"client_list\"] = list()\n agents = models.Agent.objects.all()\n for agent in agents:\n task_data[\"client_list\"].append({\"id\": agent.id, \"ip_address\": agent.ip_address})\n message_queue.push_task(task_data)\n logger.info(\"create tasks to test all agents' connection status\")", "def connect(self):\n self.ipv4 = socket.gethostbyname(socket.gethostname())\n self.addr = (self.ipv4, HttpServer.PORT)\n self.server.bind(self.addr)\n print(\"[SETUP] server bound to IPv4 address\", self.ipv4, \"on port\", HttpServer.PORT)\n self.server.listen()\n print(\"[SETUP] server listening for connections\")", "def get_hosts(self, filename):\n\n data = parse_inventory(filename)\n\n for host in data['routers']['hosts']:\n self.router_names.append(str(host))\n for host in data['brokers']['hosts']:\n self.broker_names.append(str(host))", "def show_hosts(self, show_hosts):\n\n self._show_hosts = show_hosts", "def connect_with_host_data(self, host: Host):\n host_obj = self.content.load_host(host.instanceId)\n\n if host_obj.connectionString:\n print_light_grey('Found host data, trying to connect...')\n\n # Has a bounce host.\n if host_obj.connectionString.bounce_host:\n bounce_host = DiscoverHost(self.account_obj, bounce=True).get_bounce()\n\n if not DoConnectAndSave(host_obj, self.account_obj).bounce_regular_connect(bounce_host):\n sys.exit(0)\n else:\n if not DoConnectAndSave(host_obj, self.account_obj).regular_connect():\n sys.exit(0)\n\n print_orange('Found host data is obsolete, trying to find a new path...')\n\n raise HostNotFound", "def hosts(self, value):\n if not isinstance(value, NodeSet):\n raise TypeError(\"Invalid fio host NodeSet: {} ({})\".format(value, type(value)))\n self._hosts = value.copy()", "def __enable_connections(self):\r\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self) -> None:", "def set_hosts(hostfile='allhosts'):\n\n remote_servers = []\n\n file = open(hostfile, 'r')\n for line in file.readlines():\n remote_servers.append(line.strip('\\r\\n'))\n\n env.hosts = remote_servers", "def _display_hosts(self):\n if (\n self.check_valid_result_data(\"hosts\", silent=True)\n and self._last_result.hosts\n ):\n nb_markdown(f\"Hosts connecting to {self.url}\", \"bold\")\n display(self._last_result.hosts)\n else:\n nb_markdown(f\"No hosts found connecting to {self.url}\")", "def __init__(self, hosts: List[IPv4Address], loop: asyncio.AbstractEventLoop):\n super().__init__()\n self.hosts = hosts\n self.loop = loop", "def add_host_entries(hosts_file=None):\n from fabric.contrib.files import append\n if hosts_file:\n try:\n hosts = open(hosts_file)\n for line in hosts:\n append(\"/etc/hosts\", line.rstrip(\"\\n\"), use_sudo=True)\n except IOError:\n print \"ERROR: defined hosts file is missing!\"", "def set_host_aliases():\n with open('/tmp/hosts', 'w') as f:\n uname = os.uname()\n f.write(f'{uname.nodename} localhost\\n')\n os.environ['HOSTALIASES'] = '/tmp/hosts'", "def __init__(self, host=\"127.0.0.1\", port=5037):\n self._devices = []", "def __init__(self, host=\"127.0.0.1\", port=5037):\n self._devices = []", "def __init__(self) :\n self.remoteConnections = {}", "def host(self, host) :\n\t\ttry :\n\t\t\tself._host = host\n\t\texcept Exception as e:\n\t\t\traise e", "def set_hostname(self, path, hostname):\n\n f = open(os.path.join(path, 'etc', 'hostname'), 'w')\n f.write(hostname + \"\\n\")\n f.close()\n\n hosts = os.path.join(path, 'etc', 'hosts')\n\n with open(hosts, 'rb') as f:\n reader = csv.reader(f, delimiter=\"\\t\")\n rows = [row for row in reader]\n\n for row in rows:\n if len(row) > 1 and row[0] == '127.0.1.1':\n row[1] = hostname\n break\n\n with open(hosts, 'w') as f:\n for row in rows:\n f.write(\"\\t\".join(row) + \"\\n\")", "def __init__(self):\n self.connections = {}", "def _start(self, host):\n pass", "def add_host_and_groups(self, host_name, group_paths = None):\n if len([h for h in self.hosts if h.name == host_name]) > 0:\n raise ValueError('Failed to add host \\'%s\\'. Host with the same name already exists.' % host_name)\n if not group_paths or len(group_paths) == 0:\n group_paths = ['all']\n host = Host(host_name)\n self.hosts.append(host)\n for group_path in group_paths:\n group = self.get_or_add_group(group_path)\n group.hosts.append(host)", "def connect(self, host, port):\n if self._connectedTo is not None:\n raise ValueError(\"Already connected\")\n self._connectedTo = (host, port)", "def add_host(self, ipv4, rem_dpid, rem_port):\n assert(ipv4 is not None)\n assert(rem_dpid is not None)\n assert(rem_port is not None)\n LOG.info(\"Try to add host=%s -> (%s:%d)\" % (ipv4, rem_dpid, rem_port))\n\n ip_ = convert_ipv4_to_int(ipv4)\n self.add_node(ip_)\n self.add_link(ip_, 0, rem_dpid, rem_port)\n self.add_link(rem_dpid, rem_port, ip_, 0)", "def connect(ctx, config):\n log.info('Opening connections...')\n remotes = []\n machs = []\n for name in ctx.config['targets'].iterkeys():\n machs.append(name)\n for t, key in ctx.config['targets'].iteritems():\n t = misc.canonicalize_hostname(t)\n log.debug('connecting to %s', t)\n try:\n if ctx.config['sshkeys'] == 'ignore':\n key = None\n except (AttributeError, KeyError):\n pass\n remotes.append(\n remote.Remote(name=t, host_key=key, keep_alive=True, console=None))\n ctx.cluster = cluster.Cluster()\n\n remotes2 = []\n remotes3 = []\n found = 1\n for host in ctx.config['targets'].iterkeys():\n\tremotes2.append(host)\n remotes3 = sorted_nicely (remotes2)\n if 'roles' in ctx.config:\n for rem, roles in zip(remotes3, ctx.config['roles']):\n assert all(isinstance(role, str) for role in roles), \\\n \"Roles in config must be strings: %r\" % roles\n \t for objs in remotes:\n\t\tif rem == objs.name:\n \t ctx.cluster.add(objs, roles)\n found = 0\n\t\t break;\n\t if found == 1:\n\t\tlog.error('role matching error %s' % rem)\n log.info('roles: %s - %s' % (rem, roles))\n else:\n for rem in remotes:\n ctx.cluster.add(rem, rem.name)", "def dispatch_host(name, data):\n\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)", "def first_connect(self, dbapi_connection, connection_record):", "def connect_to_server(self):\n\t\tself.outside.start()\n\t\tself.outside.register(self.config.server_ip, self.config.server_port)\n\n\t\tself.thin.start()\n\t\tself.thin.register(self.config.server_ip, self.config.server_port)", "def clients():\n pass", "def ssh_connection(hostname, username):\n\n #We testing if the username can to connect to the hostname\n if username == \"company1\":\n if hostname in open(\"./servers_list_company1.txt\", \"r\").read():\n pass\n else:\n return 1\n elif username == \"company2\":\n if hostname in open(\"./servers_list_company2.txt\", \"r\").read():\n pass\n else:\n return 1\n else:\n return 1\n\n #Connexion au serveur (nb, il faut que l'échange de clé ssh est eu lieu)\n try:\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(hostname, username=\"postfix\", timeout=4)\n print \"Connected to %s\" % hostname\n except paramiko.AuthenticationException:\n return 1\n except:\n return 1\n\n return ssh", "def handle_connect(self):\n pass", "def do_servers(self, line):\n\n for i in range(len(self.__servers)):\n print >> self.stdout, '%s: %s' % (i+1, self.__servers[i])", "def accept_in_connections():\r\n while True:\r\n client, client_address = SERVER.accept()\r\n print(\"%s:%s si è collegato.\" % client_address)\r\n\r\n #information about the game\r\n msg_client('Benvenuto!', client)\r\n msg_client('Rispondi più velocemente possibile alle domande', client)\r\n msg_client('Chi ha il punteggio più alto alla fine del tempo, vince!', client)\r\n \r\n \r\n #Dictionary for client\r\n address[client] = client_address\r\n #Thread - one for each client\r\n Thread(target=manage_client, args=(client,)).start()", "def hosts_cmd(args):\n r = requete(\"Hosts.Host:get\")\n if not r:\n return\n if len(args) > 0:\n for i in range(0, len(args)):\n for _, host in r['status'].items():\n if (host['MACAddress'].lower() == args[i].lower()\n or host['HostName'].lower() == args[i].lower()\n or host['IPAddress'] == args[i]):\n # pprint.pprint(host)\n json.dump(host, sys.stdout, indent=4)\n else:\n #pprint.pprint(r['status'])\n for _, host in r['status'].items():\n actif = \" \" if host['Active'] else \"*\"\n if mac_parser is None:\n s = \"%-18s %-15s %c %-35s %s\" % (host['MACAddress'], host['InterfaceType'], actif, host['HostName'], host['IPAddress'])\n else:\n s = \"%-18s %-12s %-15s %c %-35s %s\" % (host['MACAddress'], mac_parser.get_manuf(host['MACAddress']), host.get('InterfaceType', \"\"), actif, host['HostName'], host['IPAddress'])\n print(s)", "def connect(self):\n pass", "def init_host(self, host):\n LOG.debug(\"init_host\")" ]
[ "0.66659623", "0.6463537", "0.64295864", "0.63148797", "0.6137244", "0.5938947", "0.59179175", "0.59135944", "0.5890323", "0.58860373", "0.5849516", "0.5825648", "0.5824897", "0.5793935", "0.5786246", "0.57649606", "0.57589364", "0.57583195", "0.57563883", "0.5731571", "0.57298714", "0.5709022", "0.5699717", "0.5698402", "0.5695063", "0.5684929", "0.56804436", "0.5653947", "0.5639448", "0.5630657", "0.56192285", "0.5618884", "0.56134534", "0.5613015", "0.55921715", "0.5579168", "0.5571859", "0.5559513", "0.5552593", "0.55433697", "0.5543173", "0.5531385", "0.5519695", "0.55170393", "0.55022275", "0.5501252", "0.5495049", "0.54798573", "0.5469267", "0.54459697", "0.5435139", "0.54304725", "0.54288477", "0.54286313", "0.54224056", "0.54200923", "0.5419909", "0.5419715", "0.54153943", "0.54126465", "0.5412584", "0.54026896", "0.5396578", "0.53949636", "0.53913224", "0.539028", "0.539028", "0.539028", "0.539028", "0.539028", "0.539028", "0.539028", "0.539028", "0.5388452", "0.53879684", "0.53859055", "0.53840405", "0.5368849", "0.53675395", "0.5366567", "0.5366567", "0.53614825", "0.53588676", "0.53568524", "0.53399277", "0.53387904", "0.533239", "0.53266853", "0.53266335", "0.5326414", "0.53241473", "0.53208125", "0.5318637", "0.5313561", "0.53045315", "0.5298966", "0.52970016", "0.5296687", "0.5295765", "0.5290689", "0.5290362" ]
0.0
-1
List all available charts
def list_charts(): charts_root = Path(R".\charm\data\charts") charts = list(charts_root.rglob("*.chart")) return charts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_charts(self, app):\n return self._list(self._path() + '?app_name=' + app, 'charts')", "def charts(self):\n return self.container['charts']", "def list(self, **params):\n\n _, _, account_charts = self.http_client.get(\"/accountcharts\", params=params)\n return account_charts", "def charts(self):\n return self._charts", "def charts(self):\n return self.properties.get('charts',\n EntityCollection(self.context, WorkbookChart,\n ResourcePath(\"charts\", self.resource_path)))", "def charts(self, charts):\n\n self.container['charts'] = charts", "def getCharts(self):\n \n # code from Jerry to strip off irrelevant headings\n results = []\n flag = False\n for line in self.ResultsForCSVfile:\n if flag:\n results.append(line)\n if len(line) == 0:\n flag = True\n # create charts\n charts = {}\n for (eachFrameSize,eachILOAD) in map(None,self.FrameSizeList,self.ILOADlist):\n c = self.CreateRateVsRangeGraph( eachFrameSize, eachILOAD, results )\n t = c.title\n charts[t] = c\n return charts", "def list():\n cmd = 'qri list --format json'\n result, err = shell_exec(cmd)\n if err:\n raise RuntimeError(err)\n datasets = dataset.DatasetList([dataset.Dataset(d) for d in json.loads(result)])\n datasets.sort(key=lambda d: d.human_ref())\n return datasets", "def get_charts(self, period=\"d\", size=\"l\", chart_type=\"c\", ta=\"1\"):\n\n encoded_payload = urlencode(\n {\"ty\": chart_type, \"ta\": ta, \"p\": period, \"s\": size}\n )\n\n sequential_data_scrape(\n scrape.download_chart_image,\n [\n f\"https://finviz.com/chart.ashx?{encoded_payload}&t={row.get('Ticker')}\"\n for row in self.data\n ],\n self._user_agent,\n )", "def show_available_datasets(params: DownloadCommandParameters):\n print(f\"\\nDatasets available in '{params.metadata_file}':\\n\")\n datasets = pd.read_csv(params.metadata_file)[\"dataset\"]\n items = datasets.value_counts()\n print(pd.DataFrame({\"Datasets\": items.index,\n \"Instances\": items.values}))", "def my_charts(page_num=1):\n # Download charts that belong to the current user\n charts = Chart.query.filter_by(owner_id=current_user.id).paginate(page_num)\n return render_template('reports/my_charts.html', charts=charts)", "def chart_finder(self, keyword):\n\n data, _ = self.helm_client.search(keyword)\n return data", "def charts(self, charts):\n\n self._charts = charts", "def available_plots(self):\n return self.visualizer.available_plots()", "def get_weekly_chart_list(self) -> ListModel[Chart]:\n return self.retrieve(\n bind=Chart,\n flatten=\"chart\",\n params=dict(method=\"user.getWeeklyChartList\", user=self.name),\n )", "def test_read_charts(self, chart, charts):\n self.chart = charts\n chart_objects = chart.objects.all()\n if not chart_objects:\n raise AssertionError(\"Could not read charts.\")", "def list_datasets():\n return METADATA.keys()", "def charts(self,\n time_period='day',\n chart_genre='all',\n per_page=None,\n page=None,\n text_format=None,\n type_='songs'):\n endpoint = type_ + '/chart'\n params = {'time_period': time_period,\n 'chart_genre': chart_genre,\n 'per_page': per_page,\n 'page': page,\n 'text_format': text_format or self.response_format}\n return self._make_request(path=endpoint, params_=params, public_api=True)", "async def allseries(self, ctx):\n\n await self.all_series_db.call(ctx)", "def my_charts(request):\n\n logger.debug('called')\n\n context = {}\n\n simulations = request.user.simulations.all().exclude(\n name__icontains=settings.STANDARD_CHART_NAME\n ).select_related(\n 'fight_style',\n 'result',\n 'simulation_type',\n 'wow_class',\n 'wow_spec',\n 'queue',\n )\n\n context['charts'] = simulations\n\n return render(request, 'general_website/my_charts.html', context=context)", "def get_available_datasets():\n files = [file for file in glob.glob(os.path.join(MODULE_ROOT, \"datasets/*.json\"))]\n datasets = []\n for file in files:\n with open(file, \"r\") as f:\n dataset_info = json.load(f)\n datasets.append(dataset_info)\n return datasets", "def allAxes( mv ):\n if mv is None: return None\n return mv.getAxisList()", "def list_all_datasets(client=None):\n datasets = []\n try:\n datasets_list = list(client.list_datasets())\n if datasets_list:\n for dataset in datasets_list:\n datasets.append(dataset.dataset_id)\n except Exception as error:\n print(\n \"Exception occurred at function {}: {}\".format(\"list_all_datasets\", error)\n )\n finally:\n return datasets", "def allGraphs(date):\n g = getGraph()\n for uri, label, filename in subgraphs(date):\n if not label:\n label = \"(no label provided)\"\n g.parse(filename, format=SUBGRAPH_FORMAT)\n return g", "def available_datasets(self) -> List[str]:\n return sorted(self.__by_name.keys())", "def getDependenciesCharts(self) -> Mapping[str, 'ChartVersionInfo']:\n deps = self.getDependenciesList()\n ret: Dict[str, 'ChartVersionInfo'] = {}\n for dep in deps:\n ret[dep['name']] = self.getDependencyChart(dep['name'])\n return ret", "def get(self):\n graph_plugins = manager.GraphManager.get_graphs()\n graphs = []\n for name, graph_class in graph_plugins:\n graph_plugin = {\n \"name\": name,\n \"display_name\": graph_class.DISPLAY_NAME,\n \"description\": graph_class.DESCRIPTION,\n }\n graphs.append(graph_plugin)\n\n return jsonify(graphs)", "def index():\n return render_template(\"charts.html\")", "def charts(self, time_span=None):\n assert time_span in (None, '6 hrs', '12 hrs', '24 hrs'), time_span\n selector = '#chart option'\n for element in self.doc.cssselect(selector):\n label = element.text.strip()\n chart_id = element.attrib['value']\n hidden_input = self.doc.get_element_by_id('ae-dash-graph-' +\n chart_id)\n url = hidden_input.attrib['value']\n if not url:\n continue\n if time_span is None:\n yield label, url\n elif label.endswith(' (%s)' % time_span):\n yield label.replace(' (%s)' % time_span, ''), url", "def list_datasets(project=None):\n bigquery_client = bigquery.Client(project=project)\n\n for dataset in bigquery_client.list_datasets():\n print(dataset.name)", "def getAllData(client, plotFlag):\n converterDF = ResponseConverterDF()\n\n resultConfirmed = converterDF.responseConversion(\"SELECT * FROM confirmed_cases\", client)\n resultConfirmed.columns = ['Date', 'Confirmed Cases', 'State']\n resultConfirmed[\"Date\"] = pd.to_datetime(resultConfirmed[\"Date\"]).dt.date\n\n resultDeath = converterDF.responseConversion(\"SELECT * FROM death_cases\", client)\n resultDeath.columns = ['Date', 'Death Cases', 'State']\n resultDeath[\"Date\"] = pd.to_datetime(resultDeath[\"Date\"]).dt.date\n\n resultVaccinated = converterDF.responseConversion(\"SELECT * FROM vaccinated_cases\", client)\n resultVaccinated.columns = ['Date', 'State', 'Vaccinated Person']\n resultVaccinated[\"Date\"] = pd.to_datetime(resultVaccinated[\"Date\"]).dt.date\n\n if plotFlag == 1:\n plotBarChart(resultConfirmed, resultDeath, resultVaccinated)\n elif plotFlag == 2:\n showTreeMap(resultConfirmed, resultDeath, resultVaccinated)\n else:\n plotBarChart(resultConfirmed, resultDeath, resultVaccinated)\n showTreeMap(resultConfirmed, resultDeath, resultVaccinated)", "def list_spectrographs(self) -> None:\n for key, item in self.spectrographs.items():\n item.summary()\n print(\"\\n\")", "def charts():\n\n global show_gaps\n global timespan\n\n form = ChartForm(\n request.form,\n graph_type=timespans.index(timespan),\n graph_gaps=show_gaps\n )\n\n if request.method == 'POST':\n if form.submit_button.data:\n timespan = timespans[int(form.graph_type.data)]\n show_gaps = form.graph_gaps.data\n else:\n flash('Unknown Event', 'error')\n\n chart = Chart(app)\n data_values1, data_values2, data_values3, data_labels = \\\n chart.get_data(timespan, show_gaps)\n\n if len(data_values3) > 0:\n cb = np.array(data_values3)\n peaks = peakutils.indexes(cb, thres=0.02 / max(cb), min_dist=5)\n\n starts_total = len(peaks)\n starts_per_h = int(round(float(starts_total) / \\\n float(hourtable[timespan]), 0))\n else:\n starts_total = 0\n starts_per_h = 0\n\n return render_template(\n 'charts.html',\n form=form,\n user=current_user,\n values1=data_values1,\n values2=data_values2,\n values3=data_values3,\n labels=data_labels,\n burner_total=starts_total,\n burner_ph=starts_per_h,\n )", "def list(self, all=False):\n return self.client.containers.list(all=all)", "def getAllWidgets(self):\n \n visualisations = Visualisation.objects.filter(dataSource=self)\n widgets = []\n for vis in visualisations:\n widgets.append(vis.getWidget())\n return widgets", "async def getAll():\n return [cluster.export() for cluster in clusters.get_all()]", "def dashboards(self):\r\n return resources.Dashboards(self)", "def load_plots(seriesname):\n LOG.debug(\"Calling load_plots() with the following arguments:\")\n LOG.debug(\"seriesname = %s\"%seriesname)\n\n plots = []\n return plots", "def charting(lim=2020):\r\n for indic in ['FLR ', 'CRE ', 'TISA', 'SSPI', 'US7 ']:\r\n for c in ['A', 'M', 'P', 'T', 'all']:\r\n # TODO: fix charting for SSPI - it returns three values\r\n data = chart_data(indic, '2018-09-01', 12*5, c, lim=lim).set_index('date').sort_index()\r\n y = ['SP1', 'SP2', 'SP5', 'SSPI'] if indic == 'SSPI' else ['Perc.idv', 'Perc.ids']\r\n data.plot(kind='line', y=y)\r\n plt.xticks(range(len(data)), data.index.tolist(), rotation=30)\r\n plt.xlabel(None)\r\n plt.axhline(y=100, color='r', linestyle='-', label='Individual target')\r\n plt.axhline(y=75, color='b', linestyle='-', label='Industry target')\r\n plt.title(centres[c] + ' ' + indic)\r\n plt.savefig('pic/' + str(lim) + c + indic.strip() + '.png')\r\n logging.info('pic/' + str(lim) + c + indic.strip() + '.png saved')", "def __show_all(self):\n print(\"\\nEvents:\\n\")\n self.__show_all_events()\n print(\"\\nMetrics:\\n\")\n self.__show_all_metrics()", "def get_plots(self):\n return list(self.plots.values())", "def domain_data_chart(request):\n current_date = datetime.today()\n months = [i for i in range(1, 13)]\n data = {\n 'series': [],\n 'labels': settings.CHART_MONTHS_LABELS,\n }\n\n domain_count = []\n for month in months:\n domain_count.append(\n Domain.objects.filter(\n acquisition_date__month=month,\n acquisition_date__year=current_date.year,\n ).values('id').count()\n )\n data['series'].append({\n \"name\": _(\"Domain\"),\n \"data\": domain_count,\n })\n\n return JsonResponse(data)", "def index():\n graphs = [\n message_genre_bar_chart(df),\n category_bar_chart(df),\n top_words_bar_chart(df)\n ]\n \n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n \n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)", "def get_all(self):\n res = self.es.search(index=self.index, doc_type=self.doc_type, body={'query': {'match_all': {}}})\n if not res['hits']['total']:\n return []\n return [Dashboard.from_kibana(hit) for hit in res['hits']['hits']]", "def custom_graphs(self) -> List[Component]:\n graphs = []\n # TODO: Figure this out\n for i, go_data in enumerate(self.config.overview_graphs):\n groupby = go_data.pop('groupby', None)\n agg = go_data.pop('agg', None)\n if groupby and agg:\n data = getattr(self.summary.groupby(groupby), agg)()\n else:\n data = self.summary\n graphs.append(\n dbc.Row(\n dbc.Col(\n dcc.Graph(\n id=f'graph_{i}',\n figure=self.graph(data, go_data.pop('graph_type'), **go_data)\n )\n )\n )\n )\n return graphs", "def alldemos():\n rundemo(24,fig=True)\n rundemo(30,fig=True)\n rundemo(31,fig=True)\n rundemo(33)\n rundemo(34)", "def chart(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"chart\")", "def all_datasets():\n query = db.session.query(Dataset)\n result = [dict(name=q.name, description=q.description, id=q.id) for q in query]\n return result", "def get(self, name, session):\n name = normalize_series_name(name)\n matches = db.shows_by_name(name, session=session)\n\n args = series_list_parser.parse_args()\n begin = args.get('begin')\n latest = args.get('latest')\n\n shows = []\n for match in matches:\n shows.append(series_details(match, begin, latest))\n\n return jsonify(shows)", "def get_dashboards(resource_root):\n return call(resource_root.get, DASHBOARDS_PATH, ApiDashboard, \\\n ret_is_list=True)", "def chartdata():\n chart = billboard.ChartData('hot-100')\n chart_data = []\n for song in chart:\n song_data = (song.title, song.artist)\n chart_data.append(song_data)\n \n return chart_data", "def show_charts_simulator(simulator):\n\tstats_t, stats_s, stats_v, stats_a, stats_j = simulator.stats_t, simulator.stats_s, simulator.stats_v, simulator.stats_a, simulator.stats_j\n\tdatasets = get_datasets(stats_t, stats_s, stats_v, stats_a, stats_j)\n\tplot_datasets(datasets)", "def view_all(options, client):\n if options.show_events:\n return display_events(client.events())\n\n return \"\".join([\n display.DisplayServices().format(client.services()),\n '\\n',\n display.DisplayJobs(options).format(client.jobs())\n ])", "def charts(self,req):\n self.player.overviewing=True", "def all_bugs_chart(request):\n labels = []\n data = []\n\n queryset = Bug.objects.values('title', 'id').order_by('-created').exclude(status='Resolved').annotate(\n bug_votes=Count('votes'))[:5]\n for entry in queryset:\n labels.append(entry['title'])\n data.append(entry['bug_votes'])\n\n return JsonResponse(data={\n 'labels': labels,\n 'data': data,\n })", "def list_datasets(self):\n if self.list_type == \"base\":\n ds = Dataset(f\"{self.pool}/iocage/releases\").get_dependents()\n elif self.list_type == \"template\":\n ds = Dataset(\n f\"{self.pool}/iocage/templates\").get_dependents()\n else:\n ds = Dataset(f\"{self.pool}/iocage/jails\").get_dependents()\n\n ds = list(ds)\n\n if self.list_type in ('all', 'basejail', 'template'):\n if self.quick:\n _all = self.list_all_quick(ds)\n else:\n _all = self.list_all(ds)\n\n return _all\n elif self.list_type == \"uuid\":\n jails = {}\n\n for jail in ds:\n uuid = jail.name.rsplit(\"/\", 1)[-1]\n try:\n jails[uuid] = jail.properties[\"mountpoint\"]\n except KeyError:\n iocage_lib.ioc_common.logit(\n {\n 'level': 'ERROR',\n 'message': f'{jail.name} mountpoint is '\n 'misconfigured. Please correct this.'\n },\n _callback=self.callback,\n silent=self.silent\n )\n\n template_datasets = Dataset(\n f'{self.pool}/iocage/templates').get_dependents()\n\n for template in template_datasets:\n uuid = template.name.rsplit(\"/\", 1)[-1]\n jails[uuid] = template.properties['mountpoint']\n\n return jails\n elif self.list_type == \"base\":\n bases = self.list_bases(ds)\n\n return bases", "def make_charts(self):\n\n def _insert_pie_chart(wbook, wsheet, title, cell_pos, series):\n piechart = wbook.add_chart({\"type\": \"pie\"})\n piechart.set_title({\"name\": title})\n piechart.set_style(10)\n piechart.add_series(series)\n wsheet.insert_chart(cell_pos, piechart, {\"x_offset\": 25, \"y_offset\": 10})\n\n def _data_frame_days_to_excel(writer, sheet_name, data_frame_days):\n data_frame_days.to_excel(writer, sheet_name=sheet_name, startrow=1, header=False)\n self._set_workbook_layout(writer.book, (writer.sheets[sheet_name]), data_frame_days)\n\n with pd.ExcelWriter(\"Hive Metrics.xlsx\", engine=\"xlsxwriter\", options={\"strings_to_urls\": False}) as writer:\n workbook = writer.book\n worksheet = workbook.add_worksheet(\"Summary Charts\")\n worksheet.hide_gridlines(2)\n\n _insert_pie_chart(\n workbook,\n worksheet,\n title=\"New vs. Closed Cases\",\n cell_pos=\"D2\",\n series={\n \"name\": \"Open vs. Closed Cases Last 30\",\n \"categories\": \"=Tracking!$B$1:$C$1\",\n \"values\": \"=Tracking!$B$2:$C$2\",\n },\n )\n _insert_pie_chart(\n workbook,\n worksheet,\n title=\"Case Ownership\",\n cell_pos=\"M19\",\n series={\n \"name\": \"Case Ownership Last 30\",\n \"categories\": \"=Tracking!$A$3:$A$9\",\n \"values\": \"=Tracking!$D$3:$D$9\",\n },\n )\n _insert_pie_chart(\n workbook,\n worksheet,\n title=\"Case Resolution\",\n cell_pos=\"D19\",\n series={\n \"name\": \"Case Resolution Last 30\",\n \"categories\": \"=Tracking!$A$10:$A$12\",\n \"values\": \"=Tracking!$E$10:$E$12\",\n },\n )\n _insert_pie_chart(\n workbook,\n worksheet,\n title=\"Case Severities\",\n cell_pos=\"M2\",\n series={\n \"name\": \"Severity Last 30\",\n \"categories\": \"=Tracking!$A$13:$A$15\",\n \"values\": \"=Tracking!$F$13:$F$15\",\n },\n )\n\n _data_frame_days_to_excel(\n writer, sheet_name=\"Cases newer than 30 Days\", data_frame_days=self._data_frame_30days,\n )\n _data_frame_days_to_excel(\n writer, sheet_name=\"Cases older than 60 days\", data_frame_days=self._data_frame_60days,\n )\n _data_frame_days_to_excel(\n writer, sheet_name=\"Cases newer than 90 Days\", data_frame_days=self._data_frame_90days,\n )\n\n self._data_frame_counts.to_excel(writer, sheet_name=\"Tracking\")\n writer.save()", "def webservice_data_chart(request):\n current_date = datetime.today()\n active_type_of_services = TypeOfService.objects.filter(active=True)\n months = [i for i in range(1, 13)]\n data = {\n 'series': [],\n 'labels': settings.CHART_MONTHS_LABELS,\n 'colors': [type_service.color for type_service in active_type_of_services]\n }\n\n for type_service in active_type_of_services:\n services_count = []\n for month in months:\n services_count.append(\n WebService.objects.filter(\n date__month=month,\n date__year=current_date.year,\n type_of_service=type_service\n ).values('id').count()\n )\n data['series'].append({\n \"name\": type_service.name,\n \"data\": services_count,\n })\n\n return JsonResponse(data)", "def daily_reports_chart(env):\n certname = request.args.get('certname')\n result = get_or_abort(\n get_daily_reports_chart,\n db=puppetdb,\n env=env,\n days_number=app.config['DAILY_REPORTS_CHART_DAYS'],\n certname=certname,\n )\n return jsonify(result=result)", "def overview_series(request, addon, group, start, end, format):\n date_range = check_series_params_or_404(group, start, end, format)\n check_stats_permission(request, addon)\n\n return fake_app_stats(request, addon, group, start, end, format)\n\n series = get_series(Installed, addon=addon.id, date__range=date_range)\n\n return render_json(request, addon, series)", "def draw_all_plots(self):\n\n plot_names = []\n e = self.find_di_tri(self.lang_found)\n letter_dct = e[1]\n di_dct = e[2]\n tri_dct = e[3]\n\n plot_name = self.lang_found + '_letters'\n self.wykres(letter_dct, 'Wyres liter', 'litera', plot_name, 0)\n plot_names.append(plot_name)\n plot_name = self.lang_found + '_digram'\n self.wykres(di_dct, 'Wykres digramów', 'digram', plot_name, 1)\n plot_names.append(plot_name)\n plot_name = self.lang_found + '_trigram'\n self.wykres(tri_dct, 'Wykres trigramów', 'trigram', plot_name, 2)\n plot_names.append(plot_name)\n\n for cnt, plt_scn in enumerate(self.plot_scenes):\n pic = QtGui.QPixmap(self.img_dir + '/' + plot_names[cnt] + \".png\")\n plt_scn.setPixmap(pic.scaled(427, 320, Qt.KeepAspectRatio))", "def get_list_chart_queryset(self, result_list):\n return result_list", "def get_available_figures(self):\n return sorted((method[5:], func) \\\n for method, func in self.__class__.__dict__.iteritems() \\\n if method.startswith(\"plot_\") and callable(func))", "def get_all_adapters(self):\n pass", "def result(self):\n\n chart_series = [] # will hold all the series created\n\n # determine the sensor to plot from the sensor selected by the user.\n the_sensor = bmsapp.models.Sensor.objects.get(pk=self.request_params['select_sensor'])\n\n # get the requested averaging interval in hours\n averaging_hours = float(self.request_params['averaging_time'])\n\n # determine the start time for selecting records\n st_ts, end_ts = self.get_ts_range()\n\n # get the database records\n df = self.reading_db.dataframeForOneID(the_sensor.sensor_id, st_ts, end_ts, pytz.timezone(self.timezone))\n\n if not df.empty:\n\n # info needed to create each series (selection list, series name, visible)\n if self.schedule:\n occupied_times = df.ts.apply(self.schedule.is_occupied)\n unoccupied_times = -occupied_times\n\n series_info = [(None, 'All Data', True),\n (occupied_times, 'Occupied Periods', False),\n (unoccupied_times, 'Unoccupied Periods', False)]\n else:\n # no schedule, so just return the 'All Data' series\n series_info = [(None, 'All Data', True)]\n\n for mask, series_name, visibility in series_info:\n if mask is None:\n select_df = df\n else:\n select_df = df[mask]\n\n if averaging_hours:\n select_df = bmsapp.data_util.resample_timeseries(select_df, averaging_hours)\n\n histogram_series = bmsapp.data_util.histogram_from_series(select_df.val)\n\n chart_series.append({'x': [x for x,y in histogram_series],\n 'y': [y for x,y in histogram_series],\n 'type': 'scatter',\n 'mode': 'lines', \n 'name': series_name, \n 'visible': 'true' if visibility else 'legendonly'\n })\n\n opt = self.get_chart_options('plotly')\n opt['data'] = chart_series\n opt['layout']['title'] = the_sensor.title + ' Histogram: ' + self.building.title\n opt['layout']['xaxis']['title'] = the_sensor.unit.label\n opt['layout']['xaxis']['type'] = 'linear'\n opt['layout']['yaxis']['title'] = '% of Readings'\n opt['layout']['yaxis']['rangemode'] = 'tozero'\n\n html = basechart.chart_config.chart_container_html(opt['layout']['title'])\n\n return {'html': html, 'objects': [('plotly', opt)]}", "def has_charts(self):\n return self.__charts is not None", "def __show_all_metrics(self):\n for obj in self.metrics_list:\n self.__print_metrics_info(obj.get_name())\n print()", "def _charts(self):\n # lazy instantiation here to avoid creating the charts object unless needed.\n if self.__charts is None:\n self.__charts = Charts(self)\n self.AddObserver(\"StartEvent\", partial(try_callback, self._before_render_event))\n return self.__charts", "def all_features_chart(request):\n labels = []\n data = []\n\n queryset = Feature.objects.values('title').order_by('-created').exclude(status='Implemented').annotate(\n feature_purchases=Sum('purchases'))[:5]\n for entry in queryset:\n labels.append(entry['title'])\n data.append(entry['feature_purchases'])\n\n return JsonResponse(data={\n 'labels': labels,\n 'data': data,\n })", "def spark_list():\n api.list()", "def listAllBuckets(self):\n print self.getAllBuckets()", "def index_figures(): \n # extract data needed for visuals\n # TODO: Below is an example - modify to extract data for your own visuals\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n \n # create visuals\n # TODO: Below is an example - modify to create your own visuals\n graph_one = []\n graph_one.append(\n go.Bar(\n x = genre_names,\n y = genre_counts\n )\n ) \n layout_one = dict(title = 'Distribution of Message Genres',\n yaxis = dict(title = 'Count'),\n xaxis = dict(title = 'Genre')\n )\n \n category_values = df.iloc[:,4:].sum().sort_values(ascending=False).head()\n category_names = list(category_values.index)\n \n graph_two = []\n graph_two.append(\n go.Pie(\n values=category_values,\n labels=category_names\n )\n )\n layout_two = dict(title = 'Top Categories',\n yaxis = dict(title = 'Count'),\n xaxis = dict(title = 'Category')\n )\n \n graphs = []\n graphs.append(dict(data=graph_one, layout=layout_one))\n graphs.append(dict(data=graph_two, layout=layout_two))\n return graphs", "def get_all_data():\n return jsonify(service.get_all_data())", "def datasources(request):\n return render(request, 'vaxcharts/datasources.html')", "def dashboards(self) -> dict:\n return Config.get_dashboards()", "def show_graphs(self):\n self.frequency_plot_graph.show()\n self.resistance_graph.show()\n self.temperature_plot_graph.show()\n self.pressure_plot_graph.show()\n self.humidity_plot_graph.show()\n self.overview_graph.show()\n self.overview_graph.setXRange(-1000, 5000)", "def get_my_graphs(self, tags=None, limit=20, offset=0):\n\t\tquery = {\n\t\t\t'owner_email': self.username,\n\t\t\t'limit': limit,\n\t\t\t'offset': offset\n\t\t}\n\n\t\tif tags is not None:\n\t\t\tquery.update({'tags[]': tags})\n\n\t\treturn self._make_request(\"GET\", '/api/v1/graphs/', url_params=query).json()", "def ls():\n # TODO: listing all availabe containers form sequence\n return", "def show_pie_chart(self):\n\n chart_type_index = self.ui.comboBox_pie_charts.currentIndex()\n if chart_type_index < 1:\n return\n self.get_selected_categories_and_codes()\n if chart_type_index == 1: # Code frequency\n self.piechart_code_frequency()\n if chart_type_index == 2: # Code by characters\n self.piechart_code_volume_by_characters()\n if chart_type_index == 3: # Code by image area\n self.piechart_code_volume_by_area()\n if chart_type_index == 4: # Code by audio/video segments\n self.piechart_code_volume_by_segments()\n self.ui.comboBox_pie_charts.setCurrentIndex(0)", "def avail_categories(self):\n # retrieve categories\n categories = self.show_all_categories()\n # for each category, retrieve packages\n output = {}\n for category in categories:\n packages = self.show_category(category)\n output[category] = packages\n\n return output", "def print_charts(dataset, title, weekday=False):\n chart = []\n keys = sorted(dataset.keys())\n mean = numpy.mean(list(dataset.values()))\n median = numpy.median(list(dataset.values()))\n if args.json is False:\n export_string(title)\n\n for key in keys:\n if (dataset[key] >= median * 1.33):\n displayed_key = \"%s (\\033[92m+\\033[0m)\" % (int_to_weekday(key) if weekday else key)\n elif (dataset[key] <= median * 0.66):\n displayed_key = \"%s (\\033[91m-\\033[0m)\" % (int_to_weekday(key) if weekday else key)\n else:\n displayed_key = (int_to_weekday(key) if weekday else key)\n if args.json is False:\n export_string(\"%s - %s\" % (dataset[key], (int_to_weekday(key) if weekday else key)))\n chart.append((displayed_key, dataset[key]))\n\n thresholds = {\n int(mean): Gre, int(mean * 2): Yel, int(mean * 3): Red,\n }\n\n data = hcolor(chart, thresholds)\n\n graph = Pyasciigraph(\n separator_length=4,\n multivalue=False,\n human_readable='si',\n )\n\n if args.json is False:\n for line in graph.graph(title, data):\n if not color_supported:\n ansi_escape = re.compile(r'\\x1B\\[[0-?]*[ -/]*[@-~]')\n line = ansi_escape.sub('', line)\n print(line)\n cprint(\"\")", "def list_engines(self, current=1, size=20):\n data = { 'page': { 'current': current, 'size': size } }\n return self.swiftype_session.request('get', 'engines', json=data)", "def _generate_all_charts(spec, input_data):\n\n def _generate_chart(_, data_q, graph):\n \"\"\"Generates the chart.\n \"\"\"\n\n logs = list()\n\n logging.info(\" Generating the chart '{0}' ...\".\n format(graph.get(\"title\", \"\")))\n logs.append((\"INFO\", \" Generating the chart '{0}' ...\".\n format(graph.get(\"title\", \"\"))))\n\n job_name = graph[\"data\"].keys()[0]\n\n csv_tbl = list()\n res = list()\n\n # Transform the data\n logs.append((\"INFO\", \" Creating the data set for the {0} '{1}'.\".\n format(graph.get(\"type\", \"\"), graph.get(\"title\", \"\"))))\n data = input_data.filter_data(graph, continue_on_error=True)\n if data is None:\n logging.error(\"No data.\")\n return\n\n chart_data = dict()\n chart_tags = dict()\n for job, job_data in data.iteritems():\n if job != job_name:\n continue\n for index, bld in job_data.items():\n for test_name, test in bld.items():\n if chart_data.get(test_name, None) is None:\n chart_data[test_name] = OrderedDict()\n try:\n chart_data[test_name][int(index)] = \\\n test[\"result\"][\"receive-rate\"]\n chart_tags[test_name] = test.get(\"tags\", None)\n except (KeyError, TypeError):\n pass\n\n # Add items to the csv table:\n for tst_name, tst_data in chart_data.items():\n tst_lst = list()\n for bld in builds_dict[job_name]:\n itm = tst_data.get(int(bld), '')\n if not isinstance(itm, str):\n itm = itm.avg\n tst_lst.append(str(itm))\n csv_tbl.append(\"{0},\".format(tst_name) + \",\".join(tst_lst) + '\\n')\n\n # Generate traces:\n traces = list()\n index = 0\n groups = graph.get(\"groups\", None)\n visibility = list()\n\n if groups:\n for group in groups:\n visible = list()\n for tag in group:\n for test_name, test_data in chart_data.items():\n if not test_data:\n logs.append((\"WARNING\",\n \"No data for the test '{0}'\".\n format(test_name)))\n continue\n if tag in chart_tags[test_name]:\n message = \"index: {index}, test: {test}\".format(\n index=index, test=test_name)\n test_name = test_name.split('.')[-1]\n try:\n trace, rslt = _generate_trending_traces(\n test_data,\n job_name=job_name,\n build_info=build_info,\n name='-'.join(test_name.split('-')[2:-1]),\n color=COLORS[index])\n except IndexError:\n message = \"Out of colors: {}\".format(message)\n logs.append((\"ERROR\", message))\n logging.error(message)\n index += 1\n continue\n traces.extend(trace)\n visible.extend([True for _ in range(len(trace))])\n res.append(rslt)\n index += 1\n break\n visibility.append(visible)\n else:\n for test_name, test_data in chart_data.items():\n if not test_data:\n logs.append((\"WARNING\", \"No data for the test '{0}'\".\n format(test_name)))\n continue\n message = \"index: {index}, test: {test}\".format(\n index=index, test=test_name)\n test_name = test_name.split('.')[-1]\n try:\n trace, rslt = _generate_trending_traces(\n test_data,\n job_name=job_name,\n build_info=build_info,\n name='-'.join(test_name.split('-')[2:-1]),\n color=COLORS[index])\n except IndexError:\n message = \"Out of colors: {}\".format(message)\n logs.append((\"ERROR\", message))\n logging.error(message)\n index += 1\n continue\n traces.extend(trace)\n res.append(rslt)\n index += 1\n\n if traces:\n # Generate the chart:\n try:\n layout = deepcopy(graph[\"layout\"])\n except KeyError as err:\n logging.error(\"Finished with error: No layout defined\")\n logging.error(repr(err))\n return\n if groups:\n show = list()\n for i in range(len(visibility)):\n visible = list()\n for r in range(len(visibility)):\n for _ in range(len(visibility[r])):\n visible.append(i == r)\n show.append(visible)\n\n buttons = list()\n buttons.append(dict(\n label=\"All\",\n method=\"update\",\n args=[{\"visible\": [True for _ in range(len(show[0]))]}, ]\n ))\n for i in range(len(groups)):\n try:\n label = graph[\"group-names\"][i]\n except (IndexError, KeyError):\n label = \"Group {num}\".format(num=i + 1)\n buttons.append(dict(\n label=label,\n method=\"update\",\n args=[{\"visible\": show[i]}, ]\n ))\n\n layout['updatemenus'] = list([\n dict(\n active=0,\n type=\"dropdown\",\n direction=\"down\",\n xanchor=\"left\",\n yanchor=\"bottom\",\n x=-0.12,\n y=1.0,\n buttons=buttons\n )\n ])\n\n name_file = \"{0}-{1}{2}\".format(spec.cpta[\"output-file\"],\n graph[\"output-file-name\"],\n spec.cpta[\"output-file-type\"])\n\n logs.append((\"INFO\", \" Writing the file '{0}' ...\".\n format(name_file)))\n plpl = plgo.Figure(data=traces, layout=layout)\n try:\n ploff.plot(plpl, show_link=False, auto_open=False,\n filename=name_file)\n except plerr.PlotlyEmptyDataError:\n logs.append((\"WARNING\", \"No data for the plot. Skipped.\"))\n\n data_out = {\n \"job_name\": job_name,\n \"csv_table\": csv_tbl,\n \"results\": res,\n \"logs\": logs\n }\n data_q.put(data_out)\n\n builds_dict = dict()\n for job in spec.input[\"builds\"].keys():\n if builds_dict.get(job, None) is None:\n builds_dict[job] = list()\n for build in spec.input[\"builds\"][job]:\n status = build[\"status\"]\n if status != \"failed\" and status != \"not found\" and \\\n status != \"removed\":\n builds_dict[job].append(str(build[\"build\"]))\n\n # Create \"build ID\": \"date\" dict:\n build_info = dict()\n tb_tbl = spec.environment.get(\"testbeds\", None)\n for job_name, job_data in builds_dict.items():\n if build_info.get(job_name, None) is None:\n build_info[job_name] = OrderedDict()\n for build in job_data:\n testbed = \"\"\n tb_ip = input_data.metadata(job_name, build).get(\"testbed\", \"\")\n if tb_ip and tb_tbl:\n testbed = tb_tbl.get(tb_ip, \"\")\n build_info[job_name][build] = (\n input_data.metadata(job_name, build).get(\"generated\", \"\"),\n input_data.metadata(job_name, build).get(\"version\", \"\"),\n testbed\n )\n\n work_queue = multiprocessing.JoinableQueue()\n manager = multiprocessing.Manager()\n data_queue = manager.Queue()\n cpus = multiprocessing.cpu_count()\n\n workers = list()\n for cpu in range(cpus):\n worker = Worker(work_queue,\n data_queue,\n _generate_chart)\n worker.daemon = True\n worker.start()\n workers.append(worker)\n os.system(\"taskset -p -c {0} {1} > /dev/null 2>&1\".\n format(cpu, worker.pid))\n\n for chart in spec.cpta[\"plots\"]:\n work_queue.put((chart, ))\n work_queue.join()\n\n anomaly_classifications = list()\n\n # Create the header:\n csv_tables = dict()\n for job_name in builds_dict.keys():\n if csv_tables.get(job_name, None) is None:\n csv_tables[job_name] = list()\n header = \"Build Number:,\" + \",\".join(builds_dict[job_name]) + '\\n'\n csv_tables[job_name].append(header)\n build_dates = [x[0] for x in build_info[job_name].values()]\n header = \"Build Date:,\" + \",\".join(build_dates) + '\\n'\n csv_tables[job_name].append(header)\n versions = [x[1] for x in build_info[job_name].values()]\n header = \"Version:,\" + \",\".join(versions) + '\\n'\n csv_tables[job_name].append(header)\n\n while not data_queue.empty():\n result = data_queue.get()\n\n anomaly_classifications.extend(result[\"results\"])\n csv_tables[result[\"job_name\"]].extend(result[\"csv_table\"])\n\n for item in result[\"logs\"]:\n if item[0] == \"INFO\":\n logging.info(item[1])\n elif item[0] == \"ERROR\":\n logging.error(item[1])\n elif item[0] == \"DEBUG\":\n logging.debug(item[1])\n elif item[0] == \"CRITICAL\":\n logging.critical(item[1])\n elif item[0] == \"WARNING\":\n logging.warning(item[1])\n\n del data_queue\n\n # Terminate all workers\n for worker in workers:\n worker.terminate()\n worker.join()\n\n # Write the tables:\n for job_name, csv_table in csv_tables.items():\n file_name = spec.cpta[\"output-file\"] + \"-\" + job_name + \"-trending\"\n with open(\"{0}.csv\".format(file_name), 'w') as file_handler:\n file_handler.writelines(csv_table)\n\n txt_table = None\n with open(\"{0}.csv\".format(file_name), 'rb') as csv_file:\n csv_content = csv.reader(csv_file, delimiter=',', quotechar='\"')\n line_nr = 0\n for row in csv_content:\n if txt_table is None:\n txt_table = prettytable.PrettyTable(row)\n else:\n if line_nr > 1:\n for idx, item in enumerate(row):\n try:\n row[idx] = str(round(float(item) / 1000000, 2))\n except ValueError:\n pass\n try:\n txt_table.add_row(row)\n except Exception as err:\n logging.warning(\"Error occurred while generating TXT \"\n \"table:\\n{0}\".format(err))\n line_nr += 1\n txt_table.align[\"Build Number:\"] = \"l\"\n with open(\"{0}.txt\".format(file_name), \"w\") as txt_file:\n txt_file.write(str(txt_table))\n\n # Evaluate result:\n if anomaly_classifications:\n result = \"PASS\"\n for classification in anomaly_classifications:\n if classification == \"regression\" or classification == \"outlier\":\n result = \"FAIL\"\n break\n else:\n result = \"FAIL\"\n\n logging.info(\"Partial results: {0}\".format(anomaly_classifications))\n logging.info(\"Result: {0}\".format(result))\n\n return result", "def graphs(self):\n return self.__graphs", "def get_chart_one(request):\r\n json_str = []\r\n \r\n usuarios = Usuario.objects.all()\r\n for usuario in usuarios:\r\n peticiones = Peticion.objects.filter(usuario=usuario)\r\n json_str.append({ \r\n 'name': u'%s %s' % (usuario.persona.nombre,\r\n usuario.persona.apellidos),\r\n 'data': len(peticiones)\r\n }) \r\n json_obj = json.dumps(json_str, sort_keys=True, indent=4)\r\n response = HttpResponse(json_obj, mimetype=\"application/json\") \r\n return response", "def download_all_datasets():\n print(\"Downloading all datasets ...\")\n for dataset in get_available_datasets():\n download_dataset(dataset)", "def clients(self):\n\n try:\n req = requests.get(self.root_url + \"/clients\")\n except requests.exceptions.ConnectionError as e:\n req = None\n print(str(e), file=sys.stderr)\n except Exception as e:\n print(\"Unknown error making a request to the Sensu API\", file=sys.stderr)\n print(str(e), file=sys.stderr)\n\n if req and req.status_code == 200:\n dat = req.json()\n for host in dat:\n self.metrics.append(('sensu_status', host['status'], {'host': host['name'], 'dc': host['dc']}))", "def plot_list(self):\n wrapper = TextWrapper(subsequent_indent = \" \" * 22,\n width = 78)\n for method, func in self.get_available_figures():\n if method != \"list\":\n wrapper.initial_indent = (\"%-20s \" % method).ljust(22)\n print wrapper.fill(func.figure_name)", "def show_data_files(self):\n for idx in self.plot_data:\n self.plot_data[idx].show()", "def create_education_chart(region_list, comparison):\n print('education chart HI')\n print(comparison)\n if comparison == 'field':\n qty_data = create_data_by_field_qty(region_list, 'education')\n qty_chart = {\n 'chartType': 'bar',\n 'chartName': 'Status Pendidikan menurut Jumlah Orang',\n 'dataFields': qty_data,\n 'dataOptions': {\n 'fieldAxis': 'Status Pendidikan',\n 'measureAxis': 'Jumlah Orang',\n 'tooltipStringFormat': ['_', ' ', 'Orang']\n }\n }\n\n dataset_total = sum(qty_data['values'])\n pct_data = create_data_by_field_pct(qty_data, dataset_total)\n pct_chart = {\n 'chartType': 'doughnut',\n 'chartName': 'Status Pendidikan menurut Persentase Orang',\n 'dataFields': pct_data,\n 'dataOptions': {\n 'fieldAxis': 'Status Pendidikan',\n 'measureAxis': 'Persentase Orang',\n 'tooltipStringFormat': ['_', '%']\n } \n }\n\n chart_list = {'chartList': [qty_chart, pct_chart]}\n jsonprint(chart_list)\n return chart_list\n\n elif comparison == 'region':\n (qty_list, label_list) = \\\n create_data_by_region_qty(region_list, 'education')\n\n print(qty_list, label_list)\n\n dataset_total_list = get_dataset_total_list(qty_list)\n pct_list = create_data_by_region_pct(qty_list, \n dataset_total_list)\n\n chart_list = {'chartList': [], 'labelList': label_list}\n for index, chart in enumerate(qty_list):\n pct_list[index]['dataOptions'] = {\n 'tooltipStringFormat': ['_', '%'],\n 'fieldAxis': 'Status Pendidikan',\n 'measureAxis': 'Persentase Orang'\n }\n qty_list[index]['dataOptions'] = {\n 'tooltipStringFormat': ['_', ' ', 'Orang'],\n 'fieldAxis': 'Status Pendidikan',\n 'measureAxis': 'Jumlah Orang'\n }\n\n field = pct_list[index]['field']\n pct_list[index]['chartName'] = \\\n \"Persentase Orang dengan Status Pendidikan '\" + field + \\\n \"' menurut Kecamatan\"\n qty_list[index]['chartName'] = \\\n \"Jumlah Orang dengan Status Pendidikan '\" + \\\n field + \"' menurut Kecamatan\"\n\n chart_list['chartList'].append(pct_list[index])\n chart_list['chartList'].append(qty_list[index])\n\n jsonprint(chart_list)\n return chart_list", "def list(self):\n return 'Decks available: \\n{}'.format(\"\\n\".join([\n 'Deck {}: {} ({} cards)'.format(deck['id'], deck['title'], len(deck['cards']))\n for key, deck in self.decks.items()\n ]))", "def get_all(broker: Broker = None) -> list:\n if not broker:\n broker = get_broker()\n stats = []\n packs = broker.get_stats(f\"{Conf.Q_STAT}:*\") or []\n for pack in packs:\n try:\n stats.append(SignedPackage.loads(pack))\n except BadSignature:\n continue\n return stats", "def spark_list(provider):\n api.available(provider)", "def list_history_datasets(self, trans, **kwargs):\n kwargs['show_item_checkboxes'] = 'True'\n return self._history_datasets_grid(trans, **kwargs)", "def get_visualizations( self, dataset ):\n\n return [ 'phyloviz' ]", "def all(self) -> list[dict[str, Any]]:\n return self.client.get(self._url())", "def show_instances():\n return get_instances()", "def contract_data_chart(request):\n current_date = datetime.today()\n months = [i for i in range(1, 13)]\n data = {\n 'series': [],\n 'labels': settings.CHART_MONTHS_LABELS,\n }\n\n contract_count = []\n for month in months:\n contract_count.append(\n Contract.objects.filter(\n start_date__month=month,\n start_date__year=current_date.year,\n ).values('id').count()\n )\n data['series'].append({\n \"name\": _(\"Contract\"),\n \"data\": contract_count,\n })\n\n return JsonResponse(data)", "def read_all_chart_versions(constants: dict) -> dict:\n magma_root = constants['magma_root']\n chart_versions = {}\n for chart_name, chart_fn in charts_fn_map.items():\n with open(f'{magma_root}/{chart_fn}') as chart_f:\n chart_info = yaml.load(chart_f, Loader=yaml.FullLoader)\n chart_name = chart_name.replace('-', '_')\n chart_versions[chart_name] = chart_info['version']\n return chart_versions", "def list_library_datasets(self, trans, **kwargs):\n kwargs['show_item_checkboxes'] = 'True'\n return self._library_datasets_grid(trans, **kwargs)" ]
[ "0.7755798", "0.7449958", "0.71319413", "0.7110359", "0.66801405", "0.6495061", "0.63652325", "0.6342188", "0.6272101", "0.62555903", "0.6250946", "0.6006704", "0.60066587", "0.6001703", "0.597355", "0.59719837", "0.58516073", "0.5850192", "0.5838608", "0.5827216", "0.5812616", "0.5787245", "0.5769391", "0.5727296", "0.56889516", "0.5683649", "0.56771404", "0.5655422", "0.56289655", "0.56041414", "0.5587156", "0.5587053", "0.55829775", "0.5577918", "0.55594313", "0.55554605", "0.5538849", "0.5533471", "0.5510556", "0.5490465", "0.54842556", "0.54836273", "0.5472463", "0.54615957", "0.5458608", "0.5451115", "0.5450028", "0.5435676", "0.5432613", "0.54270846", "0.54247123", "0.5412671", "0.54085684", "0.5405344", "0.53984284", "0.5392044", "0.53893244", "0.5386627", "0.5378175", "0.5364515", "0.53643465", "0.5348499", "0.5339318", "0.5327138", "0.5323371", "0.53184664", "0.5317909", "0.5317119", "0.53163093", "0.53102523", "0.5307569", "0.52983195", "0.52923626", "0.5290947", "0.52844816", "0.52820987", "0.5277186", "0.52748", "0.5271513", "0.52589", "0.5258478", "0.5236189", "0.5224988", "0.52157134", "0.5213016", "0.5211768", "0.52103347", "0.5209355", "0.5206729", "0.5188861", "0.5185907", "0.518479", "0.51846063", "0.51787806", "0.51762867", "0.51739067", "0.51736975", "0.5169622", "0.5166232", "0.5161673" ]
0.8125694
0
r""" Convert a chart Path object to a string path relative to .\charm\data\charts
def strch(chart): charts_root = Path(R".\charm\data\charts") return str(chart.relative_to(charts_root))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data_path(path):\n\n data_path = Path(self.kard.meta.get('data_path', 'data'))\n\n if data_path.is_absolute():\n return str(data_path / path)\n\n return str(self.kard_folder_path / self.kard.name / data_path /\n path)", "def get_path(self):\n raise NotImplementedError(\"This asset does not support absolute paths\")", "def data_path(path: str, createdir: bool = False) -> str:\n path_obj = Path(path)\n if not path_obj.is_absolute():\n if inside_project():\n path_obj = Path(project_data_dir(), path)\n else:\n path_obj = Path(\".scrapy\", path)\n if createdir and not path_obj.exists():\n path_obj.mkdir(parents=True)\n return str(path_obj)", "def completePath(path):\n return os.getcwd() + convertString(path)", "def _path_to_string(path):\n return '.'.join(path)", "def get_realpath(cls, path_str):\n if path_str.startswith('/'):\n return path_str\n return os.path.abspath(os.path.join(cls.apollo_root, path_str))", "def __str__(self):\n return str(self.path.relative_to(os.getcwd()))", "def get_relative_path(self):\n if self.dip or self.sip or self.replica:\n raise PackageError(\n \"Get relative path for sip or replica packages not yet implemented\"\n )\n if self.deleted:\n raise PackageError(\"There are no relative paths for deleted packages\")\n if self.uuid is None:\n raise PackageError(\"Cannot generate a relative path without a package UUID\")\n rel = \"\"\n left_offset = len(self.default_pair_tree)\n right_offset = -len(self.compressed_ext)\n try:\n if self.current_path.endswith(self.compressed_ext):\n rel = self.current_path[left_offset:right_offset]\n else:\n rel = self.current_path[left_offset:]\n except AttributeError:\n raise PackageError(\"Current path doesn't exist for the package\")\n return \"{}/data/METS.{}.xml\".format(rel, self.uuid)", "def graph_path(scope=\"session\"):\n return join(dirname(__file__), pardir, \"new_data\", \"graph\")", "def path(self):\n\t\treturn os.path.join(*self._string_values(limit=4))", "def abspath(self, path):\n return DataSource.abspath(self, self._fullpath(path))", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def get_path(data_path):\n\treturn os.path.dirname(os.path.realpath(__file__)) + os.sep + data_path", "def _GetRelativeLabelPath(self, label):\n\n if self._AreLabelsPaths():\n return label\n\n return os.path.join(*self.GetLabelComponents(label))", "def to_file_path(self, resourcePath: str) -> PurePath:\n rel = resourcePath.replace('res://', '')\n return self._root.joinpath(rel)", "def _GetRelativeLabelPath(self, label):\n\n if self._AreLabelsPaths():\n return label\n\n path = \"\"\n components = self.GetLabelComponents(label)\n if not components:\n return path\n \n for c in components[:-1]:\n path = os.path.join(path, c + self.suite_extension)\n path = os.path.join(path, components[-1])\n return path", "def path(self):\n path = os.path.join(self.base_dir, self.store().replace(' ', '_'), self.package_name())\n return os.path.abspath(path)", "def path_to_related(self, path):\n # self.path = \"...functional/fixtures/img/logo.png\"\n # path = \"...functional/fixtures/docs/index.md\"\n current = self.dir\n\n while not path.startswith(current.dir.path):\n current = current.dir.parent.dir\n\n remaining = current.relative(self.path)\n\n level = current.relative(path).count(os.sep)\n\n way_back = os.sep.join(['..'] * level) or '.'\n result = \"{0}/{1}\".format(way_back, remaining)\n\n return result", "def path_to_string(path: Path) -> str:\n assert_continuous(path)\n\n pieces = [\"M {} {}\".format(path[0].p0[0], path[0].p0[1])]\n for curve in iter(path): # iter cast not strictly necessary\n piece = \"C {} {} {} {} {} {}\".format(\n int(round(curve.c0[0])), int(round(curve.c0[1])),\n int(round(curve.c1[0])), int(round(curve.c1[1])),\n int(round(curve.p1[0])), int(round(curve.p1[1]))\n )\n pieces.append(piece)\n\n return \" \".join(pieces)", "def path(self, name):\n raise NotImplementedError(\"This backend doesn't support absolute paths.\")", "def path(self, name):\n raise NotImplementedError(\"This backend doesn't support absolute paths.\")", "def path_to_str(path):\n if hasattr(path, '__fspath__'):\n path = as_str_any(path.__fspath__())\n return path", "def get_relative_regression_path(cls) -> str:\n # Get the fully-qualified name of the subject (in dotted form)\n fully_qualified_name: str = cls.subject_type().__module__ + '.' + cls.subject_type().__qualname__\n\n # Replace the dots with platform-dependent slashes\n return fully_qualified_name.replace(\".\", os.sep)", "def dataPath(self):\n return ''", "def dag_file_path(self, string):\n if not self.has_dag_field(string):\n return None\n # TODO handle url\n root_dir = self.root_dir()\n if root_dir:\n path = os.path.join(root_dir, self.dag_field(string))\n return os.path.realpath(path)\n return os.path.realpath(self.dag_field(string))", "def _get_as_path(self):\n return self.__as_path", "def __relative_path(self, p4file):\n return self.ctx.depot_path(p4file.depot_path).to_gwt()", "def absolute_physical_path(self) -> str:\n return self._path", "def ruta_archivo(path):\n return os.path.abspath(path)", "def path(self):\n p = self\n\n name = [p.name()]\n offsets = set([p._offset])\n while p.has_parent_key():\n p = p.parent_key()\n if p._offset in offsets:\n name.append(\"[path cycle]\")\n break\n name.append(p.name())\n offsets.add(p._offset)\n return '\\\\'.join(reversed(name))", "def full_path(self):\n return os.path.abspath(self.path)", "def path(self):\n if self.parent and self.parent.category_id:\n return self.parent.path + '/' + self.basename\n return self.basename", "def path_convert(self):\n pub_path = Exp_msg()\n for i in self.path:\n epoint = Cordi()\n (epoint.x, epoint.y) = i\n pub_path.bliss.append(epoint)\n return(pub_path)", "def path(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"path\")", "def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str:\r\n path_template = get_template_from_path(path)\r\n path = get_path_from_template(path_template, path_type)\r\n return path", "def path(self) -> str:\n return self.src + \"/\"", "def fileUrl(self) -> str:\n if self.urls is None or len(self.urls) == 0:\n raise InputOutputError('Chart version does not have file urls')\n\n if is_absolute_url(self.urls[0]):\n return self.urls[0]\n return posixpath.join(self.chart.repository.url, self.urls[0])", "def _purepath_to_str(\n self, path: Union[Path, PurePath, str]\n ) -> Union[Path, PurePath, str]:\n if isinstance(path, PurePath):\n path = str(path)\n return path", "def output_path(self):\n\n output_path = stringify(self._output_path)\n if output_path is None:\n with current_context() as ctx:\n output_path_relative = stringify(self.output_path_relative)\n if output_path_relative is not None:\n output_path = join_path(ctx.paths.output, output_path_relative)\n else:\n output_path = ctx.current.project.get_output_path(self.executor.output_type)\n return output_path", "def getPath(self): #$NON-NLS-1$\r", "def _unpack_chart(self, repochart_name, app_name, namespace_dir):\n\n shutil.rmtree(self.tmp_dir, ignore_errors=True)\n\n repo_name, chart_name = repochart_name.split(\"/\")\n if self.allowed_repos and repo_name not in self.allowed_repos:\n raise PermissionError(\"Repo %s not in allowed list\" % repo_name)\n\n self.helm_client.pull(repochart_name, chart_dir=self.tmp_dir)\n\n chart_dir = Path(\"%s/%s\" % (self.tmp_dir, chart_name))\n app_dir = Path(\"%s/%s\" % (namespace_dir, app_name))\n if app_dir.exists():\n raise ValueError(\"An app with the same name already exist \"\n \"in the current namespace\")\n shutil.move(chart_dir, app_dir)\n\n return app_dir", "def path_addons_data(self) -> Path:\n return self.path_supervisor / ADDONS_DATA", "def rel_path(self) -> str:\n return os.path.dirname(self._filepath_oracle.filepath())", "def abs_path(self) -> str:\n full_path = '/'.join(folder.name for folder in reversed(self.ancestors))\n return f'/{full_path}/'", "def getPath(self):\n path = '/'.join(self.getPhysicalPath())\n return path", "def pathFromCompetition(competition):\n config = Config()\n ret = Path(config.d['data_path']+'/'+competition+'/')\n if not ret.exists(): raise Exception('Please download the competition data first.')\n return ret", "def path(self):\n return os.path.join(FLOWJS_PATH, self.filename)", "def data_path(self):\n raise NotImplementedError", "def convertString(path):\n if (\"win\" in sys.platform):\n return path.replace(\"/\",\"\\\\\")\n elif (\"linux\" in sys.platform):\n return path.replace(\"\\\\\",\"/\")", "def __fspath__(self):\n return str(self)", "def path(self):\n # type: () -> string_types\n return self._path", "def _get_local_src(self, path: Path) -> Path:\n src = \"\"\n\n if str(path).startswith(\"~\"):\n path = Path(str(path).replace(\"~/\", \"\"))\n\n if self.category == \"global\":\n src = f\"{self.local_base}/global{path}\"\n elif self.category == \"local\":\n src = f\"{self.local_base}/local/{path}\"\n else:\n src = f\"{self.local_base}/custom/{path}\"\n\n return Path(src)", "def getpath(self, path):\n return self._join(path)", "def path_addons_local(self) -> Path:\n return self.path_supervisor / ADDONS_LOCAL", "def get_cottrell_path(self) -> Path:\n return self.figure_data_paths.cottrell_path", "def getFilePathInBackend(self, hostPath):\n return hostPath", "def _path(name: str):\n return os.path.join(ASSET_PATH, name)", "def get_path(self, path):\n return abspath(join(self.origin, *path))", "def __get_path(self):\n return self.path", "def _get_filepath(self) -> str:\n return os.path.join(\n os.sep.join(\n [\n self.period.value,\n 'activities',\n f'activities_{self._dt_string}.json'\n ]\n )\n )", "def get_dataset_path(dataset: str = \"MVTec\") -> str:\n # Initially check if `datasets` directory exists locally and look\n # for the `dataset`. This is useful for local testing.\n path = os.path.join(\"./datasets\", dataset)\n\n # For docker deployment or a CI that runs on server, dataset directory\n # may not necessarily be located in the repo. Therefore, check anomalib\n # dataset path environment variable.\n if not os.path.isdir(path):\n path = os.path.join(os.environ[\"ANOMALIB_DATASET_PATH\"], dataset)\n return path", "def path_str(path):\n\toutput = \"PATH: \"\n\tif path:\n\t\tfor i in path:\n\t\t\toutput += str(i.data) + \" -> \"\n\telse:\n\t\toutput += \"Empty\"\n\treturn output", "def component_docker_path(self, name: str) -> str:\n return str(self.component_path(name).parent.absolute())", "def _rrd_path(self, obj):\n obj_type = objtype(obj)\n obj_pk = str(obj.pk).replace(':', '')\n return os.path.join(self.rrd_root, obj_type, obj_pk, '%s.rrd' % self.pk)", "def out_path(dataset, work_dir=consts.WORK_DIR):\r\n return join_path(dataset_path(dataset, work_dir), consts.OUTPUT_DIR)", "def __path(self):\n if self.parent:\n return self.parent.__path() + os.sep + self.__sanitize(self.name)\n return self.__sanitize(self.name)", "def relative(self, path):\n return re.sub(self.path_regex, '', path).lstrip(os.sep)", "def ospath(self, vPath):\n if not vPath.startswith('/'):\n raise OSError(vPath)\n parts = vPath.split('/')\n toppath = self._top_paths[parts[1]]\n return os.path.join(toppath, *parts[2:])", "def get_actual_path(self, path):\n if self._params.path_to_dir[-1] != '/':\n if path:\n path = self._params.path_to_dir + '/' + path\n path = path.replace('//', '/')\n return path", "def get_target_object_path(data_path: str) -> str:\n path_split = data_path.rsplit('.', 1)\n self_targeting = len(path_split) < 2\n if self_targeting:\n return \"\"\n return path_split[0]", "def path(self) -> str:\n return self._path", "def path(self) -> str:\n return self._path", "def path(self) -> str:\n return self._path", "def path(self) -> str:\n return self._path", "def as_pathlib(self):\n return Path(self.absolute)", "def get_png_abs_path() -> pathlib.Path:\n return PathManager._ROOT.joinpath(\n PathManager._TILINGS_GUI, PathManager._RESOURCES, \"img\", \"png\"\n )", "def getAbsolutePath(relPath):\n currDir = os.path.dirname(__file__)\n return os.path.join(currDir, relPath)", "def fake_full_path(self) -> PurePath:\n if self.category:\n # Giza wrote out yaml file artifacts under a directory. e.g. steps-foo.yaml becomes\n # steps/foo.rst\n return self.source_path.parent.joinpath(\n PurePath(self.category), self.output_filename\n )\n return self.source_path", "def path(self):\n if self.filename:\n return os.path.join(self.season.path, self.filename)", "def nameToDagPath(name):\n\n pass", "def symbol_to_path(symbol, base_dir= proj_path + '/data/'): \n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def path(self):\r\n raise NotImplementedError()", "def get_path(name: str) -> str:\n return _pooch.fetch(name)", "def get_path(name: str) -> str:\n return _pooch.fetch(name)", "def dataset_path(dataset, work_dir=consts.WORK_DIR):\r\n return join_path(work_dir, consts.DATA_DIR, dataset)", "def get_full_path(self):\n return self.path_display", "def base_path(self):\n return Path(self.path)", "def relativize(path: str):\n return join('.', path)", "def relative_path(self, data=None):\n path_data = data.copy()\n for i in ['prefix', 'source_name']:\n if not i in path_data:\n path_data[i] = getattr(self, i)\n return constants.DEB_FILENAME % path_data", "def get_oc_path(cfg):\n return os.path.join(\n BASE_DATA_DIR,\n \"castp\",\n \"pH\" + str(cfg.pH),\n str(cfg.mut),\n \"oc\" + str(cfg.probe) + \".csv\")", "def getDataPath():\n\treturn \"..\" + os.sep + \"data\" + os.sep", "def path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"path\")", "def path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"path\")", "def get_regression_path(cls) -> str:\n return os.path.join(cls.get_regression_root_path(), cls.get_relative_regression_path())", "def chart(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"chart\")", "def _output_path(name):\n output = Path(\"../Analysis Results/\").joinpath(str(date.today()))\n output.mkdir(parents=True, exist_ok=True)\n return output.joinpath(f\"{name}.png\")", "def path(self) -> Optional[str]:\n return pulumi.get(self, \"path\")", "def path(self) -> Optional[str]:\n return pulumi.get(self, \"path\")", "def path(self) -> Optional[str]:\n return pulumi.get(self, \"path\")" ]
[ "0.6196704", "0.5926172", "0.5841862", "0.56423295", "0.56181204", "0.5596763", "0.5596409", "0.5583181", "0.5562562", "0.55594707", "0.552106", "0.55162907", "0.55162907", "0.54929805", "0.54887694", "0.54884666", "0.54857177", "0.5473863", "0.5469934", "0.54631054", "0.5450903", "0.5450903", "0.5448774", "0.5429692", "0.54215", "0.54077876", "0.54003716", "0.5392164", "0.5383128", "0.53688675", "0.5358862", "0.5355097", "0.53540593", "0.5352968", "0.53527415", "0.5339607", "0.5332519", "0.53311825", "0.5319138", "0.5318325", "0.5315678", "0.53001344", "0.5282574", "0.5281571", "0.52745384", "0.5270799", "0.5260806", "0.52503324", "0.52382064", "0.5237469", "0.5237424", "0.5233959", "0.5231853", "0.5229738", "0.5227502", "0.5227355", "0.52243704", "0.5219245", "0.521396", "0.52107155", "0.5208198", "0.51974744", "0.5192289", "0.5187721", "0.5184579", "0.5180791", "0.5179922", "0.51792127", "0.5178588", "0.51781976", "0.51763594", "0.51750964", "0.51750964", "0.51750964", "0.51750964", "0.51707876", "0.51620287", "0.51485705", "0.51451683", "0.514235", "0.5136683", "0.5130694", "0.5128814", "0.5128324", "0.5128324", "0.5127883", "0.5126357", "0.5122044", "0.5118399", "0.51180804", "0.51146024", "0.51098645", "0.5100371", "0.5100371", "0.50997895", "0.5098026", "0.50976324", "0.50939167", "0.50939167", "0.50939167" ]
0.7617498
0
Set the map grid cell as obstacle
def set_obstacle(self, pos: tuple): if self.within_map(pos): self.map[round(pos[0]), round(pos[1])] = OBSTACLE return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_obstacle(self, x, y):\n self.BOARD[y][x].traversable = False\n self.board_array[y][x] = 1", "def add_obstacle(self, x, y):\n self.BOARD[y][x].traversable = False\n self.board_array[y][x] = 1", "def update_obstacles(self, new_obs):\n self.obstacles = new_obs", "def set_cell_to_hole(self):\n self.tick = \"H\"\n self.is_hole = True\n self.is_active = False", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def change_cell(self, event):\n try:\n (x, y) = self.get_id_from_coor(event.x, event.y)\n if self._board[x][y]:\n self._board[x][y] = False\n else:\n self._board[x][y] = True\n if self._board[x][y]:\n self.canvas.itemconfig(self.rect[y,x], fill=self._secondary_color)\n else:\n self.canvas.itemconfig(self.rect[y,x], fill=self._primary_color)\n except KeyError:\n pass # tkinter bug", "def set_tile(self, row, col, value):\n # replace with your code\n self.grid[row][col] = value", "def setCell(self, (xIndex, yIndex)):\n changed = self.grid[xIndex][yIndex] == False\n self.grid[xIndex][yIndex] = True\n if changed:\n self.drawSquare((xIndex, yIndex))", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value;", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid_2048[row][col] = value", "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._cells[row][col] = value", "def make_cell_change(self, x, y):\n self.cells[x][y] = 1 if not self.cells[x][y] else 0", "def set_cell(self, point, cell):\n self._grid[point.x][point.y] = cell", "def set_tile(self, row, col, value):\n # replace with your code\n self._cells[row][col] = value", "def click_cell(self, event):\n if (self.world_setable):\n x, y = event.x, event.y\n row = y / self.cell_size\n col = x / self.cell_size\n if ((row in range(self.cell_row)) and\n (col in range(self.cell_col))):\n status_now = not self.world_status.now[row, col]\n if (status_now):\n color = self.color_alive\n else:\n color = self.color_dead\n item_id = self.world[row, col]\n self.canvas.itemconfig(item_id, fill=color)\n self.world_status.now[row, col] = status_now\n self.world_status.next = self.world_status.now.copy()\n self.init_world = self.world_status.now.copy()", "def init_map(self, obstacle_rate=0.9):\n n = self.size()\n\n map_obstacles = [] # np.zeros((n, n)) # 1: obstacle, 0: non-obstacle\n \n for i in range(n):\n # We only need 2 bit to encode 1/0 for each element of NumberArray\n row = NumberArray(2, n)\n for j in range(n):\n if i == j:\n # map_obstacles[i][j] = 0\n row[j] = 0\n elif i > j:\n # map_obstacles[i][j] = map_obstacles[j][i]\n row[j] = map_obstacles[j][i]\n else:\n # map_obstacles[i][j] = 1 if random.random() > 0.9 else 0\n row[j] = 1 if random.random() > obstacle_rate else 0\n map_obstacles.append(row)\n\n self.map_obstacle = map_obstacles", "def set_tile(self, row, col, value):\r\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._grid_tile[row][col] = value", "def __init__(self, grid_height, grid_width, obstacle_list = None, \r\n zombie_list = None, human_list = None):\r\n poc_grid.Grid.__init__(self, grid_height, grid_width)\r\n if obstacle_list != None:\r\n for cell in obstacle_list:\r\n self.set_full(cell[0], cell[1])\r\n if zombie_list != None:\r\n self._zombie_list = list(zombie_list)\r\n else:\r\n self._zombie_list = []\r\n if human_list != None:\r\n self._human_list = list(human_list) \r\n else:\r\n self._human_list = []", "def set_tile(self, row, col, value):\r\n self._grid[row][col]=value", "def set_tile(self, row, col, value):\n # replace with your code\n if col < self.grid_height and row < self.grid_width:\n self.board[row][col] = value", "def __init__(self, grid_height, grid_width, obstacle_list = None,\n zombie_list = None, human_list = None):\n poc_grid.Grid.__init__(self, grid_height, grid_width)\n if obstacle_list != None:\n for cell in obstacle_list:\n self.set_full(cell[0], cell[1])\n if zombie_list != None:\n self._zombie_list = list(zombie_list)\n else:\n self._zombie_list = []\n if human_list != None:\n self._human_list = list(human_list) \n else:\n self._human_list = []", "def __init__(self, grid_height, grid_width, obstacle_list = None, \n zombie_list = None, human_list = None):\n poc_grid.Grid.__init__(self, grid_height, grid_width)\n if obstacle_list != None:\n for cell in obstacle_list:\n self.set_full(cell[0], cell[1])\n if zombie_list != None:\n self._zombie_list = list(zombie_list)\n else:\n self._zombie_list = []\n if human_list != None:\n self._human_list = list(human_list) \n else:\n self._human_list = []", "def set_tile(self, row, col, value):\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\n self.grid[row][col] = value", "def updateHardObstacles(self):\r\n global_obs = self.calcGlobalObstaclePosition([[10, 20],[10, 0],[10, -20]])\r\n self.globalHardObstaclesList.extend(global_obs)", "def setNeighbors(self):\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n\n #Checks the 8 cells around the living one. \n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n\n #If the position is outside the world, loop around.\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n\n #Skipping itself. Becouse we do not want to calculate itself as a neighbor\n if(neighborsX == cell.x and neighborsY == cell.y):\n continue\n else:\n #Checks if a cell exist at neighborsX, neighborsY\n cellToCheck = self.getCellFromPosition(neighborsX, neighborsY)\n if(cellToCheck != False):\n #Add one to the neighbor var if there already exist and cell for the given position.\n cellToCheck.numOfNeighbor += 1\n else:\n #Creates a new cell if it do not exist any.\n newCell = Cell(self.screen, neighborsX, neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)", "def change_cell(self):\n\n x, mu = self.update_position_direction(self.l_edge)\n mu_mean = self.calculate_mean_mu(self.x, x, self.l_edge)\n self.update_estimators(self.l_edge, mu_mean)\n\n if self.next_cell_index == self.grid.Ncells:\n # packet escapes\n self.is_escaped = True\n self.is_active = False\n self.mu = mu\n self.x = self.cell_xr\n\n elif self.next_cell_index == -1:\n\n raise GeometryException(\"No inner boundary in homogeneous sphere\")\n\n else:\n # packet is transported into target cell\n\n self.mu = mu\n\n if self.next_cell_index > self.cell_index:\n # packet is moved one cell to the right\n\n self.x = self.grid.xl[self.next_cell_index]\n\n else:\n # packet is moved one cell to the left\n\n self.x = self.grid.xr[self.next_cell_index]\n\n # reset cell-based properties for easy access\n self.cell_index = self.next_cell_index\n self.cell_chi = self.grid.chi[self.cell_index]\n self.cell_xl = self.grid.xl[self.cell_index]\n self.cell_xr = self.grid.xr[self.cell_index]\n self.cell_dx = self.grid.dx[self.cell_index]\n self.cell_dV = self.grid.dV[self.cell_index]\n\n # recalculate distances\n self.calculate_and_set_propagation_distances()", "def in_cell(self):\n for player in self.players:\n for cell in self.cell_lst:\n if player.x in cell[0] and player.y in cell[1]:\n player.current_cell = cell\n break", "def changeTile (self, posY, posX, tile=\"t\"):\r\n self.grid[posY][posX] = tile", "def set_observed_class(self, cell):\n try:\n if cell.coords in [c.coords for c in self.clses[0]]:\n self.obs = self.clses[0]\n elif cell.coords in [c.coords for c in self.clses[1]]:\n self.obs = self.clses[1]\n elif cell.coords in [c.coords for c in self.clses[2]]:\n self.obs = self.clses[2]\n elif cell.coords in [c.coords for c in self.clses[3]]:\n self.obs = self.clses[3]\n else:\n print self.clses \n raise OutOfBoardError()\n except IndexError:\n print self.clses \n raise OutOfBoardError()", "def set_tile(self, row, col, value):\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\n if row >= 0 and row < self.get_grid_height():\n if col >= 0 and col < self.get_grid_width():\n # Only set if the row and column are ok\n self._grid[row][col] = value", "def cut(self,cell):\r\n self.grid[cell[0]][cell[1]] = 1", "def put_cell(self, x, y, num):\n if self.is_empty(x,y):\n self.grid[y][x] = num\n return True\n return False", "def __init__(self, costmap):\n # Copy the map metadata\n self.resolution = costmap.info.resolution\n self.min_x = costmap.info.origin.position.x\n self.min_y = costmap.info.origin.position.y\n self.y_width = costmap.info.height\n self.x_width = costmap.info.width\n self.max_x = self.min_x + self.x_width *self.resolution\n self.max_y = self.min_y + self.y_width *self.resolution\n print self.min_x, self.min_y\n print self.max_x, self.max_y\n print \"Resolution: \", self.resolution\n print self.x_width, self.y_width\n \n\n self.motion = self.get_motion_model()\n \n # Copy the actual map data from the map\n x = 0\n y = 0\n ox = list()\n oy = list()\n # obstacle map generation\n self.obstacle_map = [[False for _ in range(self.y_width)]\n for _ in range(self.x_width)]\n obstacles = 0\n for value in costmap.data:\n if value >95:\n obstacles += 1\n self.obstacle_map[x][y] = True\n ox.append(float(x)*self.resolution +self.min_x)\n oy.append(float(y)*self.resolution +self.min_y)\n # Update the iterators\n x += 1\n if x == self.x_width:\n x = 0\n y += 1\n print \"Loaded %d obstacles\"%(obstacles)\n if show_animation: # pragma: no cover\n plt.plot(ox, oy, \".k\")\n plt.grid(True)\n \n # plt.axis(\"equal\")", "def set_cells(self, val=None):\t\r\n self._cells = \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)", "def __init__(self):\n self.board = {} # dict of (x,y) to PlacedTile\n self.board[(0,0)] = STARTING_PIECE", "def reset_obstacles(self):\n self.obstacles = np.array([])", "def update_obstacle_location(self):\n\n # find the previous location of the obstacle\n old_y = self.map_obstacle.y\n old_x = self.map_obstacle.x\n\n # remove it from the main graph\n self.main_graph[old_y][old_x].contents.remove(self.map_obstacle)\n\n # get the latest location\n self.map_obstacle.update_location()\n (new_y, new_x) = (self.map_obstacle.y, self.map_obstacle.x)\n\n # add it back into the main graph\n self.main_graph[new_y][new_x].contents.add(self.map_obstacle)\n\n # update the map obstacle (not necessary, but it doesn't hurt)\n self.map_obstacle.y = new_y\n self.map_obstacle.x = new_x", "def set_cell(frame, data):\n\twith data.cell_:\n\t\tdata.cell_[:,0] = [L, 0., 0.]\n\t\tdata.cell_[:,1] = [0., L, 0.]\n\t\tdata.cell_[:,2] = [0., 0., L]\n\t\t#cell origin\n\t\tdata.cell_[:,3] = [0, 0 , 0]\n\t\t#set periodic boundary conditions\n\t\tdata.cell_.pbc = (True, True, True)", "def set_cell(self, x, y, val):\n pass", "def set_cell(self, x, y, val):\n pass", "def set_tile(self, row, col, value):\r\n self._cells[row][col] = value", "def test_set_cell_north(mock_amg):\n\n # change the neighbour to the north.\n # this is not the correct neighbour\n mock_amg.cells[0].north = mock_amg.cells[1]\n assert mock_amg.cells[0].north == mock_amg.cells[1]", "def setTile(self, cell, tile):\n assert isinstance(cell, tuple)\n cellx, celly = cell\n\n if cellx < 0 or cellx > self.map_array.shape[0]-1 or celly < 0 or celly > self.map_array.shape[1]-1:\n return\n\n if self.tile_dict.get((cellx, celly)):\n self.canvas.delete(self.tile_dict[(cellx, celly)])\n\n if tile:\n self.map_array[cellx, celly] = tile.tid\n if tile.tid == 0.0:\n return\n map_posx, map_posy = iso(cellx * self.cell_width, celly * self.cell_height)\n image = self.main.main_tilelist.images[tile.tid]\n self.tile_dict[(cellx, celly)] = self.canvas.create_image(map_posx, map_posy, image=image, anchor=tk.N)", "def __init__(self, map_obstacle, main_graph):\n\n self.map_obstacle = map_obstacle\n self.main_graph = main_graph\n\n self.sight_range = self.calculate_sight_range()\n\n self.top_left_y = None\n self.top_left_x = None\n self.bottom_right_y = None\n self.bottom_right_x = None\n self.height = None\n self.width = None\n self.size = self.calculate_size()\n\n # nodes specific to this threat zone\n self.nodes = []", "def set_tile(self, row, col, value):\n # replace with your code\n pass", "def remove_obstacle(self, x, y):\n self.BOARD[y][x].traversable = True\n self.board_array[y][x] = 0", "def update_poi (POIn, POInm1, new, current_cell_mask):\n row, col = cuda.grid(2)\n\n if row < POIn.shape[0] and col < POIn.shape[1]:\n POIn[row,col] = 0 \n if current_cell_mask[row,col] == True:\n POIn[row,col] = POInm1[row,col] + new[row,col]", "def set_sensible_obstacles(self, obstacles):\n self.sensible_obstacles = obstacles", "def test_set_cell_south(mock_amg):\n\n # change the neighbour to the south.\n # this is not the correct neighbour\n mock_amg.cells[4].south = mock_amg.cells[2]\n assert mock_amg.cells[4].south == mock_amg.cells[2]", "def set_grid(self,ug):\n self.grd=ug\n self.set_topology()", "def place_obstacles():\n #Randomly generate different sized rectangles\n #Soem may overlap, which gives more variety in shape of obstacles\n xvals = np.random.randint(0,self.map_dimensions[1],size=self.N_obstacles)\n yvals = np.random.randint(0,self.map_dimensions[0],size=self.N_obstacles)\n lower_left = zip(xvals,yvals)\n rects = []\n for LL in lower_left:\n x = LL[0]\n y = LL[1]\n wmax = self.map_dimensions[1] - x\n w = np.random.randint(0,wmax,size=1)[0]\n hmax = self.map_dimensions[0] - y\n h = np.random.randint(0,hmax,size=1)[0]\n rects += [(x,y,w,h)]\n self.coordinates__obstacles = rects", "def change_cell(self):\n # TODO: assess whether this may partly moved into the base class\n\n x, mu = self.update_position_direction(self.l_edge)\n mu_mean = self.calculate_mean_mu(self.x, x, self.l_edge)\n self.update_estimators(self.l_edge, mu_mean)\n\n if self.next_cell_index == self.grid.Ncells:\n # packet escapes\n self.is_escaped = True\n self.is_active = False\n self.x = self.cell_xr\n\n elif self.next_cell_index == -1:\n # packets gets reflected\n\n self.x = self.cell_xl\n self.mu = -self.mu\n\n self.calculate_and_set_propagation_distances()\n\n else:\n # packet is transported into target cell\n if self.next_cell_index > self.cell_index:\n # packet is moved one cell to the right\n\n self.x = self.grid.xl[self.next_cell_index]\n\n else:\n # packet is moved one cell to the left\n\n self.x = self.grid.xr[self.next_cell_index]\n\n # reset cell-based properties for easy access\n self.cell_index = self.next_cell_index\n self.cell_chi = self.grid.chi[self.cell_index]\n self.cell_xl = self.grid.xl[self.cell_index]\n self.cell_xr = self.grid.xr[self.cell_index]\n self.cell_dx = self.grid.dx[self.cell_index]\n\n # recalculate distances\n self.calculate_and_set_propagation_distances()", "def set_tile(self, row, col, value):\n self._cells[row][col] = value", "def recreate_obstacles(self):\n self.board_matrix = np.full(Dimension.board_size(), 1)\n self.obstacles = self.create_obstacles()", "def setPossible(self,possible):\n \n for coord in possible:\n if (coord[1] >=0 and coord[1]<=9) and (coord[0] >=0 and coord[0]<=9): # Validate that position is within bounds\n self.board[coord[1]][coord[0]] = \"? \"", "def __init__(self, map_config):\n self.current_obstacles = []\n self.current_goal = None\n self.cfg = map_config", "def new_tile(self):\r\n random_row = random.randrange(0, self._grid_height)\r\n random_col = random.randrange(0, self._grid_width)\r\n random_choice = random.choice([2]*90 + [4] * 10)\r\n \r\n if 0 in [num for elem in self._cells for num in elem]: \r\n if self._cells[random_row][random_col] == 0:\r\n self._cells[random_row][random_col] = random_choice \r\n else:\r\n self.new_tile()\r\n else:\r\n pass", "def _check_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n alive_neighbours = self._get_neighbours(row_number,col_number)\n \n self.to_be_updated[row_number][col_number] = False\n if self.cells[row_number][col_number].get_status():\n if alive_neighbours < 2:\n self.to_be_updated[row_number][col_number] = True\n elif alive_neighbours > 3:\n self.to_be_updated[row_number][col_number] = True\n else:\n if alive_neighbours == 3:\n self.to_be_updated[row_number][col_number] = True", "def set_our_tile(self, x, y, value):\n\t\tif x >= 0 and x < self.w and y >= 0 and y < self.h:\n\t\t\tself.our_tiles[x][y] = value", "def increased_obstacles_map(occupancy_grid):\n\n nb_rows = len(occupancy_grid)\n nb_cols = len(occupancy_grid[0])\n increased_occupancy_grid = np.zeros([nb_rows + 6, nb_cols + 6])\n\n for i in range(nb_rows):\n for j in range(nb_cols):\n\n if occupancy_grid[i, j] == OCCUPIED:\n increased_occupancy_grid[i:i + 7, j:j + 7] = np.ones([7, 7])\n\n final_occupancy_grid = increased_occupancy_grid[3:(LENGTH_case + 3), 3:(WIDTH_case + 3)]\n return final_occupancy_grid", "def random_map(self, world):\n obstacles = []\n if self.cfg[\"obstacle\"][\"octagon\"][\"enabled\"]:\n obstacles += self.__generate_octagon_obstacles(world)\n if self.cfg[\"obstacle\"][\"rectangle\"][\"enabled\"]:\n obstacles += self.__generate_rectangle_obstacles(world)\n\n # update the current obstacles and goal\n self.current_obstacles = obstacles\n self.add_new_goal()\n\n # apply the new obstacles and goal to the world\n self.apply_to_world(world)", "def flagCell(self, row, col):\n self.flagged[row, col] = 1", "def set_tile(self, row, col, value):\r\n self._board[row][col] = value", "def locateRobot(self):\n logging.info(\"Display Carte : {}\".format(self.name))\n for r, row in enumerate(self.map):\n #print(row)\n for c, cell in enumerate(row):\n if (cell == \"X\"):\n logging.info(\"r={} / c={}\".format(r, c))\n self.robot.posX = c\n self.robot.posY = r", "def set_cell(self, cell, val):\n a = b = 0\n try:\n a, b = self.__ret_cell(cell)\n self._grid[a][b] = val\n except IndexError as e:\n self.perror(\"Error: '%s'.\" % e, cell, a, b, 5)\n self.perror(\"Error.\", cell, a, b, 5)\n sys.exit()", "def draw_obstacles(self):\n for obstacle in self.obstacles:\n obstacle.draw(self.window, Colors.BLACK.value)", "def move_cell(self):\n return self.abivars.optcell != 0", "def draw_obstacles():\n for obstacle in obstacles:\n plt.gca().add_patch(obstacle)", "def is_map_obstacle_in_screen_range(self):\n raise NotImplementedError", "def test_set_cell_west(mock_amg):\n\n # change the neighbour to the west.\n # this is not the correct neighbour\n mock_amg.cells[4].west = mock_amg.cells[2]\n assert mock_amg.cells[4].west == mock_amg.cells[2]", "def occupied_cells(self):\n\n for lm in self.landmarks:\n if self.cell_size < 1:\n # expand the range the landmark exists\n lm_x_range = np.arange(lm[0]-self.R, lm[0]+self.R, self.cell_size)\n lm_y_range = np.arange(lm[1]-self.R, lm[1]+self.R, self.cell_size)\n\n # loop through expanded ranges and compute grid positions\n for lm_x in lm_x_range:\n for lm_y in lm_y_range:\n\n row, col = self.cell_index([lm_x, lm_y])\n\n # apply cost of occupied cell\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass\n\n else:\n # apply cost of occupied cell\n row, col = self.cell_index(lm)\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass", "def flag_cell(self, index):\n if self.get_game()[index] == FLAG:\n self._game_board = self.replace_character_at_index(index, UNEXPOSED)\n\n elif self.get_game()[index] == UNEXPOSED:\n self._game_board = self.replace_character_at_index(index, FLAG)\n\n return self._game_board", "def emptyCell (self, row, column, gameGrid=None, emptyValue=0):\n if not gameGrid:\n gameGrid = self.gameGrid\n self.addObject(emptyValue, row, column, gameGrid=gameGrid)", "def add_obstacle(self, obstacle_to_add):\n if self.obstacles.size != 0:\n self.obstacles = np.hstack((self.obstacles, obstacle_to_add))\n else:\n self.obstacles = np.array([obstacle_to_add])", "def reset(self):\r\n # replace with your code\r\n self._cells = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\r\n self.new_tile()\r\n self.new_tile()", "def create_room(room):\n global map\n for x in range(room.x1+1, room.x2):\n for y in range(room.y1+1, room.y2):\n map[x][y].blocked = False\n map[x][y].block_sight = False", "def drawCell(self,land,uland,vland,marked):\n from math import sqrt, pow\n #--Tranlate grid point (u,v) to pixel point\n if not self.changed: self.edit()\n #--u/v max/min are grid range of visible map. \n #--wcell is bit width of cell. 512 is bit width of visible map.\n (umin,umax,vmin,vmax,wcell,wmap) = (-28,27,-27,28,9,512)\n if not ((umin <= uland <= umax) and (vmin <= vland <= vmax)):\n return\n #--x0,y0 is bitmap coordinates of top left of cell in visible map.\n (x0,y0) = (4 + wcell*(uland-umin), 4 + wcell*(vmax-vland))\n #--Default to deep\n mapc = [Fmap.DEEP]*(9*9)\n heights = land and land.getHeights()\n if heights:\n #--Land heights are in 65*65 array, starting from bottom left. \n #--Coordinate conversion. Subtract one extra from height array because it's edge to edge.\n converter = [(65-2)*px/(wcell-1) for px in range(wcell)]\n for yc in range(wcell):\n ycoff = wcell*yc\n yhoff = (65-1-converter[yc])*65\n for xc in range(wcell):\n height = heights[converter[xc]+yhoff]\n if height >= 0: #--Land\n (r0,g0,b0,r1,g1,b1,scale) = (66,48,33,32,23,16,sqrt(height/3000.0))\n scale = int(scale*10)/10.0 #--Make boundaries sharper.\n r = chr(max(0,int(r0 - r1*scale)) & ~1)\n else: #--Sea\n #--Scale color from shallow to deep color.\n (r0,g0,b0,r1,g1,b1,scale) = (37,55,50,12,19,17,-height/2048.0)\n r = chr(max(0,int(r0 - r1*scale)) | 1)\n g = chr(max(0,int(g0 - g1*scale)))\n b = chr(max(0,int(b0 - b1*scale)))\n mapc[xc+ycoff] = r+g+b\n #--Draw it\n mapd = self.mapd\n for yc in range(wcell):\n ycoff = wcell*yc\n ymoff = wmap*(y0+yc)\n for xc in range(wcell):\n cOld = mapd[x0+xc+ymoff]\n cNew = mapc[xc+ycoff]\n rOld = ord(cOld[0])\n #--New or old is sea.\n if (ord(cNew[0]) & 1) or ((rOld & 1) and\n (-2 < (1.467742*rOld - ord(cOld[1])) < 2) and\n (-2 < (1.338710*rOld - ord(cOld[2])) < 2)):\n mapd[x0+xc+ymoff] = cNew\n if marked:\n self.drawBorder(Fmap.MARKED,x0+2,y0+2,x0+7,y0+7,1)\n pass", "def solve_interior_tile(self, target_row, target_col):\r\n whole_move = ''\r\n # replace with your code\r\n if self._grid[target_row][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA\"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[target_row][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (target_row - zero_row) * 'd'\r\n #path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd'\r\n zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n assert self.lower_row_invariant(target_row, target_col), \"Invarian is False\"\r\n \r\n #current_position = self.current_position(target_row, target_col)\r\n #current_row, current_col = self.current_position(target_row, target_col)\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, target_col)\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (target_row - self.current_position(target_row, target_col)[0]) * 'u'\r\n zero_row = target_row - len(path_up)\r\n if target_col < self.current_position(target_row, target_col)[1]: # Right move\r\n path_side = (self.current_position(target_row, target_col)[1] - target_col) * 'r'\r\n zero_col = target_col + len(path_side)\r\n else: # Left move\r\n path_side = (target_col - self.current_position(target_row, target_col)[1]) * 'l'\r\n zero_col = target_col - len(path_side)\r\n \r\n #path_for_zero = path_up + path_side\r\n # print '------------------------------------------'\r\n # print 'Move to ZERO =', path_for_zero\r\n \r\n self.update_puzzle(path_up + path_side)\r\n \r\n # print 'Grid after move:'\r\n # print self\r\n # current_position = self.current_position(target_row, target_col) \r\n # current_row, current_col = current_position\r\n # print 'Updated Target tile position=',current_position\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, target_col)\r\n # print '-----------------------------------------'\r\n \r\n\r\n ###### New moves after moving ZERO tile into target tile\r\n # counter = 0\r\n whole_move += path_up + path_side\r\n while self.current_position(target_row, target_col) != \\\r\n (target_row, target_col) or zero_col != target_col - 1:\r\n # tt_in_home = self.current_position(target_row, target_col) == \\\r\n # (target_row, target_col)\r\n\r\n cyclic_moves = ''\r\n # counter += 1\r\n #current_position = self.current_position(target_row, target_col) \r\n #current_col = self.current_position(target_row, target_col)[1]\r\n # print 'Zero coloumn', zero_col, '== Target coloumn', target_col\r\n # print zero_col == target_col \r\n \r\n #### Case 1 if ZERO located in ther right of\r\n #### target tile (after it)\r\n if zero_col > self.current_position(target_row, target_col)[1]:\r\n # print ' Look in the up puzzle, zero on the right side'\r\n # if self.current_position(target_row, target_col)[1] != target_col:\r\n # # print 'not under target place'\r\n # cyclic_moves = 'dllur'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n if self.current_position(target_row, target_col)[1] == target_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # elif self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # # print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n # cyclic_moves = 'ul'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #### Case 2 if ZERO located under the target tile, and both\r\n #### of them located under the target position of the target tile\r\n elif zero_col == self.current_position(target_row, target_col)[1] and zero_col == target_col:\r\n # print 'Both under the target place'\r\n # print 'TT in home=', tt_in_home\r\n if self.current_position(target_row, target_col) == \\\r\n (target_row, target_col):\r\n cyclic_moves = 'ld'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n else:\r\n cyclic_moves = 'lddru'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n elif zero_col < self.current_position(target_row, target_col)[1]:\r\n # print 'ZERO tile located in the left side'\r\n if self.current_position(target_row, target_col)[1] != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self.current_position(target_row, target_col)[1] == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n #zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loot counter =',counter\r\n whole_move += cyclic_moves\r\n # if counter > 12:\r\n # break\r\n # print 'Tile is solved with move '+ whole_move\r\n assert self.lower_row_invariant(target_row, target_col-1), \"Invarian is False\"\r\n return whole_move", "async def update(self, robot):\r\n if self.first:\r\n robot.was_turning = False\r\n robot.was_driving = False\r\n\r\n rotation_rad = math.radians(robot.rotation)\r\n rotation_cos = math.cos(rotation_rad)\r\n rotation_sin = math.sin(rotation_rad)\r\n if robot.was_driving:\r\n speed_delta = robot.delta_time * robot.ROBOT_SPEED\r\n\r\n robot.add_odom_position(robot, (rotation_cos * speed_delta, rotation_sin * speed_delta))\r\n robot.grid.setStart(robot.grid_position)\r\n else:\r\n robot.drive_timer = robot.DRIVE_COOLDOWN\r\n if robot.was_turning:\r\n robot.add_odom_rotation(robot, robot.TURN_YAW * robot.delta_time)\r\n\r\n changed = False\r\n if robot.ball is not None:\r\n if robot.prev_ball is not None:\r\n robot.ball_grid = robot.grid.worldToGridCoords(robot.ball)\r\n robot.ball_prev_grid = robot.grid.worldToGridCoords(robot.prev_ball)\r\n changed = robot.ball_grid != robot.ball_prev_grid\r\n else:\r\n changed = True\r\n \r\n if not changed and robot.prev_grid_position != robot.grid_position:\r\n changed = True\r\n\r\n if self.first:\r\n changed = True\r\n self.first = False\r\n\r\n rounded_grid = (round(robot.grid_position[0]), round(robot.grid_position[1]))\r\n if changed:\r\n robot.grid.clearObstacles()\r\n if robot.ball is not None:\r\n grid_points = getGridPoints(robot.ball_grid[0], robot.ball_grid[1], robot)\r\n for point in grid_points:\r\n if robot.grid.coordInBounds(point):\r\n robot.grid.addObstacle(point)\r\n\r\n # Wall obstacles.\r\n for i in range(0, robot.grid.width):\r\n robot.grid.addObstacle((i, 0))\r\n robot.grid.addObstacle((i, robot.grid.height - 1))\r\n for i in range(1, robot.grid.height - 1):\r\n robot.grid.addObstacle((0, i))\r\n robot.grid.addObstacle((robot.grid.width - 1, i))\r\n\r\n goal_to_ball = np.subtract(robot.ball, robot.goal_position)\r\n goal_distance = np.linalg.norm(goal_to_ball)\r\n if goal_distance == 0:\r\n return\r\n goal_direction = np.divide(goal_to_ball, goal_distance)\r\n goal_direction = np.multiply(goal_direction, (robot.RADIUS + robot.BALL_RADIUS) * 1.2)\r\n robot.target_position = np.add(robot.ball, goal_direction)\r\n robot.target_position = robot.grid.worldToGridCoords(robot.target_position)\r\n\r\n if robot.target_position is not None:\r\n robot.grid.clearGoals()\r\n robot.grid.setStart(rounded_grid)\r\n rounded_target = (round(robot.target_position[0]), round(robot.target_position[1]))\r\n robot.grid.addGoal(rounded_target)\r\n astar(robot.grid, heuristic)\r\n\r\n path = robot.grid.getPath()\r\n robot.was_turning = False\r\n if path is not None and len(path) > 1:\r\n robot.next_cell = path[0]\r\n if path[0] == rounded_grid:\r\n robot.next_cell = path[1]\r\n\r\n turn = getTurnDirection(rotation_cos, rotation_sin, rounded_grid, robot.next_cell)\r\n if abs(turn) > robot.TURN_THRESHOLD and abs(2 * math.pi - abs(turn)) > robot.TURN_THRESHOLD:\r\n robot.stop_all_motors()\r\n await robot.turn_in_place(radians(turn), num_retries=3).wait_for_completed()\r\n robot.add_odom_rotation(robot, math.degrees(turn))\r\n robot.was_driving = False\r\n else:\r\n await robot.drive_wheels(robot.ROBOT_SPEED, robot.ROBOT_SPEED, robot.ROBOT_ACCELERATION, robot.ROBOT_ACCELERATION)\r\n robot.was_driving = True\r\n else:\r\n robot.was_driving = False\r\n\r\n turn = getTurnDirection(rotation_cos, rotation_sin, robot.grid_position, robot.target_position)\r\n robot.stop_all_motors()\r\n if abs(turn) > robot.TURN_THRESHOLD and abs(2 * math.pi - abs(turn)) > robot.TURN_THRESHOLD:\r\n await robot.turn_in_place(radians(turn), num_retries=3).wait_for_completed()\r\n robot.add_odom_rotation(robot, math.degrees(turn))\r\n\r\n robot.stop_all_motors()\r\n distance = grid_distance(robot.grid_position[0], robot.grid_position[1], robot.target_position[0], robot.target_position[1]) * robot.grid.scale\r\n await robot.drive_straight(distance_mm(distance), speed_mmps(robot.HIT_SPEED), should_play_anim = False).wait_for_completed()\r\n robot.add_odom_forward(robot, distance)\r\n\r\n turn = getTurnDirection(rotation_cos, rotation_sin, robot.grid_position, robot.ball_grid)\r\n robot.stop_all_motors()\r\n if abs(turn) > robot.TURN_THRESHOLD and abs(2 * math.pi - abs(turn)) > robot.TURN_THRESHOLD:\r\n await robot.turn_in_place(radians(turn), num_retries=3).wait_for_completed()\r\n robot.add_odom_rotation(robot, math.degrees(turn))\r\n return goto_ball.HitBall()", "def reset(self) -> None:\n self.map = []\n for col in range(self.width):\n self.map.append([])\n for cell in range(self.height):\n if col > 1 and col < self.width - 2:\n if cell == 0:\n # World Barrier - Top Middle\n self.map[col].append(StaticTile('wall_3', self.graphicsLibrary.get('wall_3'), (self.scaleWidth,self.scaleHeight), barrier=True))\n elif cell == self.height - 1:\n # World Barrier - Bottom Middle\n self.map[col].append(StaticTile('wall_12', self.graphicsLibrary.get('wall_12'), (self.scaleWidth,self.scaleHeight), barrier=True))\n else:\n # Playable Map Area\n if (col % 2) != 0 and (cell % 2) == 0:\n # Hard-Barrier Generation\n self.map[col].append(StaticTile('solid', self.graphicsLibrary.get('solid'), (self.scaleWidth,self.scaleHeight), barrier=True))\n elif (col,cell) in self.spawn_buffers:\n # Preserve Potential Spawn Points\n self.map[col].append(StaticTile('terrain', self.graphicsLibrary.get('terrain'), (self.scaleWidth,self.scaleHeight), barrier=False))\n elif random.randint(0, 2) == 0:\n # Soft-Barrier Generation\n self.map[col].append(DynamicTile('destructable_new', self.graphicsLibrary.get('destructable_new'), (self.scaleWidth,self.scaleHeight), destructable=\"True\", barrier=True, death_animation=self.animations_library.get('destructable_death')))\n else:\n # Fill Remaining Terrain\n self.map[col].append(StaticTile('terrain', self.graphicsLibrary.get('terrain'), (self.scaleWidth,self.scaleHeight), barrier=False))\n else:\n # World Barrier - Side Sections\n if col == 0 or col == self.width - 1:\n # Roof\n right_most_columns = False\n if col == self.width - 1:\n right_most_columns = True\n\n if cell == self.height - 1:\n self.map[col].append(StaticTile('wall_10', self.graphicsLibrary.get('wall_10'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == self.height - 2:\n self.map[col].append(StaticTile('wall_1', self.graphicsLibrary.get('wall_1'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 0:\n self.map[col].append(StaticTile('wall_1', self.graphicsLibrary.get('wall_1'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n else:\n self.map[col].append(StaticTile('wall_5', self.graphicsLibrary.get('wall_5'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif col == 1 or col == self.width - 2:\n # Floor \n right_most_columns = False\n if col == self.width - 2:\n right_most_columns = True\n\n if cell == self.height -1:\n self.map[col].append(StaticTile('wall_11', self.graphicsLibrary.get('wall_11'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == self.height - 2:\n self.map[col].append(StaticTile('wall_9', self.graphicsLibrary.get('wall_9'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 0:\n self.map[col].append(StaticTile('wall_2', self.graphicsLibrary.get('wall_2'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 1:\n self.map[col].append(StaticTile('wall_6', self.graphicsLibrary.get('wall_6'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n else:\n self.map[col].append(StaticTile('wall_7', self.graphicsLibrary.get('wall_7'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n self.map[col][cell].place_at(topleft=(self.scaleWidth * col, self.scaleHeight * cell))", "def __init__(self, island_map):\n self.map = island_map\n self.cells = None\n self.array_to_island()\n self.herbivores_on_island = None\n self.carnivores_on_island = None", "def set_at(self,x,y,set=True):\n\t\tif ( not self._validate(x,y )):\n\t\t\treturn\n\n\t\t# set the bit in the grid\n\t\tif set:\n\t\t\tself.Grid[y] = self.Grid[y] | (1 << x)\n\t\telse:\n\t\t\tself.Grid[y] = self.Grid[y] & ~(1 << x)", "def update_grid_pos(self):\n self.grid_pos = self.get_tile_of_position(self.tank.body.position)", "def update_positions(self, grid):\r\n self.grid = grid", "def mark_pos(self, position, marker):\n i, j = self.board[position]\n self.grid[i][j] = marker", "def cozmoBehavior(robot: cozmo.robot.Robot):\r\n\r\n global grid, stopevent\r\n\r\n robot.set_head_angle(cozmo.robot.MIN_HEAD_ANGLE + cozmo.util.degrees(15.0)).wait_for_completed()\r\n\r\n world = World(robot.pose, Vector2.fromCell(grid.getStart()), grid.scale)\r\n world.replan(robot, grid)\r\n\r\n mapThread = threading.Thread(target=updateMapThread, name=\"Map Update\", args=(robot, world, grid))\r\n movementThread = threading.Thread(target=updateMovementThread, name=\"Movement Update\", args=(robot, world))\r\n\r\n mapThread.start()\r\n movementThread.start()\r\n\r\n mapThread.join()\r\n movementThread.join()", "def __cell_is_in_map(self, x, y) -> bool:\n return x >= 0 and y >= 0 and x < self.occupancy_map.info.width and y < self.occupancy_map.info.height", "def update_board(self, coordinate, hit):\n \n if hit:\n self.board_state[coordinate.row_idx][coordinate.col_idx] = \"H\"\n else:\n self.board_state[coordinate.row_idx][coordinate.col_idx] = \"M\"", "def get_obstacles_map(obstacles, placed_pecies):\n \n #create a mask image to draw the obstacles on\n blocks = np.zeros(ARENA_SIZE[::-1], np.uint8)\n\n #get the grid points where the robot needs to placed\n grid = get_grid(ARENA_SIZE)\n\n #draw the obstacles and their safety region on the map\n for i in obstacles.keys():\n cv2.circle(blocks, i, int(CIRCULAR_SAFETY_FACTOR*BLOCK_SIZE[0]), 129, -1)\n cv2.rectangle(blocks, (i[0]-int(obstacles[i][0]/4), i[1]-int(obstacles[i][1]/4)), (i[0]+int(obstacles[i][0]/4), i[1]+int(obstacles[i][1]/4)), 255, -1)\n\n #draw the obstacles and their safety region on the map\n for i in placed_pecies.keys():\n try:\n if not i == grid[5]:\n cv2.circle(blocks, i, int(CIRCULAR_SAFETY_FACTOR*BLOCK_SIZE[0]), 129, -1)\n else:\n cv2.rectangle(blocks, (int(i[0]-7.4*placed_pecies[i][0]/4), int(i[1]-7.4*placed_pecies[i][1]/4)),\n (int(i[0]+7.4*placed_pecies[i][0]/4), int(i[1]+7.4*placed_pecies[i][1]/4)), 129, -1)\n cv2.rectangle(blocks, (i[0]-int(placed_pecies[i][0]/4), i[1]-int(placed_pecies[i][1]/4)), (i[0]+int(placed_pecies[i][0]/4), i[1]+int(placed_pecies[i][1]/4)), 255, -1)\n except Exception as e:\n print(e)\n\n return cv2.bitwise_not(blocks)", "def cells_on(self,ax):\n self.cells.on(ax,self.use_cell_coordinates_flag)", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def set(self,argument):\n if argument == \"X\" or \"O\":\n self.tile=argument", "def test_set_cell_east(mock_amg):\n\n # change the neighbour to the east.\n # this is not the correct neighbour\n mock_amg.cells[0].east = mock_amg.cells[2]\n assert mock_amg.cells[0].east == mock_amg.cells[2]", "def play_round_Conway_Cell(self):\n for x in self.board:\n for f in x:\n f.live_neighbors = 0\n\n for i in range(1, self.cols - 1):\n for j in range(1, self.rows - 1):\n status = self.board[i][j].status\n assert type(status)==int \n\n for m in range(i - 1, i + 2):\n for n in range(j - 1, j + 2):\n self.board[m][n].live_neighbors += status\n self.board[i][j].live_neighbors -= status", "def toggle_xy(self, x, y):\r\n\t\tself.grid[y, x] = False if self.grid[y,x] else True" ]
[ "0.6904903", "0.6904903", "0.6622904", "0.6531364", "0.6457762", "0.6457762", "0.6451341", "0.6433322", "0.6425923", "0.6391529", "0.637739", "0.63555276", "0.63377327", "0.6329692", "0.62948984", "0.6285746", "0.62847567", "0.625142", "0.62377983", "0.62250537", "0.6216072", "0.6204581", "0.6191628", "0.6181617", "0.61785793", "0.61785793", "0.61564744", "0.61298907", "0.610743", "0.610672", "0.61042804", "0.6101113", "0.6100387", "0.6098113", "0.60854995", "0.607509", "0.60663915", "0.6049053", "0.60400957", "0.6033093", "0.6018525", "0.6009834", "0.5987213", "0.5987213", "0.595558", "0.59477824", "0.594555", "0.5941952", "0.59371084", "0.5930305", "0.59119606", "0.58961827", "0.5885294", "0.5884688", "0.588266", "0.5881705", "0.5873106", "0.58365273", "0.5829704", "0.58035845", "0.5798188", "0.57975113", "0.57953566", "0.5784744", "0.5768271", "0.57654494", "0.57277274", "0.5726322", "0.5721167", "0.5720861", "0.5719416", "0.5711409", "0.57054", "0.570226", "0.56879663", "0.56714684", "0.56653655", "0.56616753", "0.5655954", "0.56503135", "0.5647275", "0.5639507", "0.5637867", "0.5637838", "0.5628691", "0.56214195", "0.56163585", "0.5612243", "0.5609804", "0.56074077", "0.559665", "0.55875844", "0.55858314", "0.5580763", "0.55776376", "0.55776376", "0.55768824", "0.55726045", "0.5570791", "0.55656844" ]
0.7163415
0
Set the map grid cell as free
def set_free(self, pos: tuple): if self.within_map(pos): self.map[round(pos[0]), round(pos[1])] = FREE return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\r\n # replace with your code\r\n self._cells = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\r\n self.new_tile()\r\n self.new_tile()", "def reset(self):\r\n # replace with your code\r\n for row in range(0, self._grid_height):\r\n for col in range(0, self._grid_width):\r\n self._grid_tile[row][col] = 0\r\n # at this step, all cells should be available\r\n self.new_tile()\r\n self.new_tile()", "def reset(self):\r\n self._cells = [ [0 for dummy_col in range(self._grid_width)] \r\n for dummy_row in range(self._grid_height) ]\r\n \r\n \r\n self.new_tile()\r\n self.new_tile()", "def reset(self):\n # replace with your code\n dummy_row = self._grid_height\n dummy_col = self._grid_width\n self._cells = [ [0 for dummy_col in range(self._grid_width)] \n for dummy_row in range(self._grid_height)]\n \n self.new_tile()\n self.new_tile()", "def reset(self):\r\n self.grid = [[0 for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]\r\n self.new_tile()\r\n self.new_tile()", "def reset(self):\n # replace with your code\n self._grid = [[0] * self._width for _ in xrange(self._height)]\n self.new_tile()\n self.new_tile()", "def clear(self):\r\n\t\tself.grid.fill(False)", "def reset(self):\n self.tile=\"\"", "def reset(self):\n # replace with your code\n self._grid = [[0 for dummy_column in range(self._grid_width)] for dummy_row in range(self._grid_height)]\n for dummy_num in range(2):\n self.new_tile()", "def setBlank(self, pos):\n self.tiles[-1] = pos", "def set_empty(self, row, col):\n self._cells[row][col] = EMPTY", "def reset(self):\n self._cells = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\n self.new_tile()\n self.new_tile()\n #return self._cells", "def set_full(self, row, col):\n self._cells[row][col] = FULL", "def reset(self):\n self._grid = [[0] * self._width for _ in range(self._height)]\n self.new_tile()\n self.new_tile()", "def reset(self):\n self._grid = [[0 for dummy_col in range(self._width)]\n for dummy_row in range(self._height)]\n self.new_tile()\n self.new_tile()", "def clearCell(self, (xIndex, yIndex)):\n changed = self.grid[xIndex][yIndex] == True\n self.grid[xIndex][yIndex] = False\n if changed:\n self.drawSquare((xIndex, yIndex))", "def make_free_cell_list():\r\n for row in range(9):\r\n for col in range(9):\r\n if (application.ui.__getattribute__(f'cell{col+1}{row+1}')).text() == \"\":\r\n lst_free_cells.append(Point(row, col))", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self.grid[row][col] = value", "def clear(self):\n self._grid = [[None]]", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid_2048[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value;", "def setCell(self, (xIndex, yIndex)):\n changed = self.grid[xIndex][yIndex] == False\n self.grid[xIndex][yIndex] = True\n if changed:\n self.drawSquare((xIndex, yIndex))", "def clear(self):\n self._cells = [[EMPTY for dummy_col in range(self._grid_width)]\n for dummy_row in range(self._grid_height)]", "def _clear_map(self, default=100):\r\n self.tiles = [\r\n [default\r\n for _ in range(self.height)]\r\n for _ in range(self.width)]\r\n\r\n for (x, y, score) in self.goals:\r\n self.tiles[x][y] = score\r\n\r\n for (x,y) in self.walls:\r\n self.tiles[x][y] = np.nan", "def set_tile(self, row, col, value):\r\n self.grid[row][col] = value", "def cell_setLeaf(self, curr):\r\n curr.n_count = 0\r\n return", "def reset(self) -> None:\n self.map = []\n for col in range(self.width):\n self.map.append([])\n for cell in range(self.height):\n if col > 1 and col < self.width - 2:\n if cell == 0:\n # World Barrier - Top Middle\n self.map[col].append(StaticTile('wall_3', self.graphicsLibrary.get('wall_3'), (self.scaleWidth,self.scaleHeight), barrier=True))\n elif cell == self.height - 1:\n # World Barrier - Bottom Middle\n self.map[col].append(StaticTile('wall_12', self.graphicsLibrary.get('wall_12'), (self.scaleWidth,self.scaleHeight), barrier=True))\n else:\n # Playable Map Area\n if (col % 2) != 0 and (cell % 2) == 0:\n # Hard-Barrier Generation\n self.map[col].append(StaticTile('solid', self.graphicsLibrary.get('solid'), (self.scaleWidth,self.scaleHeight), barrier=True))\n elif (col,cell) in self.spawn_buffers:\n # Preserve Potential Spawn Points\n self.map[col].append(StaticTile('terrain', self.graphicsLibrary.get('terrain'), (self.scaleWidth,self.scaleHeight), barrier=False))\n elif random.randint(0, 2) == 0:\n # Soft-Barrier Generation\n self.map[col].append(DynamicTile('destructable_new', self.graphicsLibrary.get('destructable_new'), (self.scaleWidth,self.scaleHeight), destructable=\"True\", barrier=True, death_animation=self.animations_library.get('destructable_death')))\n else:\n # Fill Remaining Terrain\n self.map[col].append(StaticTile('terrain', self.graphicsLibrary.get('terrain'), (self.scaleWidth,self.scaleHeight), barrier=False))\n else:\n # World Barrier - Side Sections\n if col == 0 or col == self.width - 1:\n # Roof\n right_most_columns = False\n if col == self.width - 1:\n right_most_columns = True\n\n if cell == self.height - 1:\n self.map[col].append(StaticTile('wall_10', self.graphicsLibrary.get('wall_10'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == self.height - 2:\n self.map[col].append(StaticTile('wall_1', self.graphicsLibrary.get('wall_1'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 0:\n self.map[col].append(StaticTile('wall_1', self.graphicsLibrary.get('wall_1'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n else:\n self.map[col].append(StaticTile('wall_5', self.graphicsLibrary.get('wall_5'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif col == 1 or col == self.width - 2:\n # Floor \n right_most_columns = False\n if col == self.width - 2:\n right_most_columns = True\n\n if cell == self.height -1:\n self.map[col].append(StaticTile('wall_11', self.graphicsLibrary.get('wall_11'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == self.height - 2:\n self.map[col].append(StaticTile('wall_9', self.graphicsLibrary.get('wall_9'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 0:\n self.map[col].append(StaticTile('wall_2', self.graphicsLibrary.get('wall_2'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 1:\n self.map[col].append(StaticTile('wall_6', self.graphicsLibrary.get('wall_6'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n else:\n self.map[col].append(StaticTile('wall_7', self.graphicsLibrary.get('wall_7'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n self.map[col][cell].place_at(topleft=(self.scaleWidth * col, self.scaleHeight * cell))", "def freeGridSave( self ):\n assert(self.hasSaveMemory)\n assert(not self.notSaved)\n self.notSaved = True", "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._cells[row][col] = value", "def m_fixed(self):\n self.mx_free = self.my_free = self.mz_free = False\n return self", "def set_tile(self, row, col, value):\r\n self._grid[row][col]=value", "def dec_gains_of_free_cells(self):\r\n for cell in self.cells:\r\n if not cell.locked:\r\n cell.gain -= 1\r\n cell.yank()", "def make_cell_change(self, x, y):\n self.cells[x][y] = 1 if not self.cells[x][y] else 0", "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._grid_tile[row][col] = value", "def reset(self):\n # self.grid = [[0] * self.grid_width] * self.grid_height\n self.grid = []\n for dummy_row in range(self.grid_height):\n new_row = []\n for dummy_col in range(self.grid_width):\n new_row.append(0)\n self.grid.append(new_row)\n self.new_tile()\n self.new_tile()", "def set_tile(self, row, col, value):\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._cells[row][col] = value", "def clear_tiles(self):\n for y in range(Settings.SIZE_Y):\n for x in range(Settings.SIZE_X):\n self.__tile_grid[y][x].configure(\n image=self.__marker_images[MarkerType.NONE])", "def put_cell(self, x, y, num):\n if self.is_empty(x,y):\n self.grid[y][x] = num\n return True\n return False", "def reset(self):\n \n #initiate all tiles' value to 0\n self._grid_2048 = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\n \n # two new tiles\n self.new_tile()\n self.new_tile()", "def set_cell_to_hole(self):\n self.tick = \"H\"\n self.is_hole = True\n self.is_active = False", "def updateMap(self,map):\n if not self.opened:\n col = int( self.world_rect.left / map.header_data['tilewidth'])\n row = int( self.world_rect.top / map.header_data['tileheight'])\n layerIndex = len(map.layer_data)-1\n while(layerIndex > 0):\n layer = map.layer_data[layerIndex]\n if(layer[row][col] > 1):\n layer[row][col] = 0\n break\n layerIndex -= 1\n for g in self.groups():\n g.remove(self)", "def set_tile(self, row, col, value):\n self._grid[row][col] = value", "def clear(self):\n board.change_grid(self.x, self.y, 0)", "def reset(self):\n width = len(self.cell)\n height = len(self.cell[0])\n self.cell = [ [EMPTY for r in range(height)] for c in range(width) ]", "def initialize(self):\r\n for cell in self.free_cell_list:\r\n cell.unlock()\r\n self.add_cell(cell)\r\n self.free_cell_list.clear()", "def cut(self,cell):\r\n self.grid[cell[0]][cell[1]] = 1", "def reset(self):\r\n # creating the grid with the values all initialized to zero\r\n \r\n self._grid = [[ 0 for dummy_col in range(self._width)]\r\n for dummy_row in range(self._height)]\r\n # introducing the two initial tiles\r\n self.new_tile()\r\n self.new_tile()\r\n #for testing purposes\r\n #print self.grid\r\n #print self\r", "def setTile(self, cell, tile):\n assert isinstance(cell, tuple)\n cellx, celly = cell\n\n if cellx < 0 or cellx > self.map_array.shape[0]-1 or celly < 0 or celly > self.map_array.shape[1]-1:\n return\n\n if self.tile_dict.get((cellx, celly)):\n self.canvas.delete(self.tile_dict[(cellx, celly)])\n\n if tile:\n self.map_array[cellx, celly] = tile.tid\n if tile.tid == 0.0:\n return\n map_posx, map_posy = iso(cellx * self.cell_width, celly * self.cell_height)\n image = self.main.main_tilelist.images[tile.tid]\n self.tile_dict[(cellx, celly)] = self.canvas.create_image(map_posx, map_posy, image=image, anchor=tk.N)", "def delete_value(loc):\r\n (application.ui.__getattribute__(f'cell{loc.column+1}{loc.row+1}')).setText(\"\")\r\n sudoku_grid[loc.row, loc.column] = 0\r\n global cnt_free_cells\r\n cnt_free_cells += 1", "def set_tile(self, row, col, value):\n if row >= 0 and row < self.get_grid_height():\n if col >= 0 and col < self.get_grid_width():\n # Only set if the row and column are ok\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n pass", "def mark_mine(self, cell):\n if cell in self.cells:\n self.cells.remove(cell)\n self.count=self.count-1", "def set_cells(self, val=None):\t\r\n self._cells = \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)", "def set_tile(self, row, col, value):\r\n del self.board[row][col]\r\n self.board[row].insert(col,value)\r\n return self.board", "def reset(self):\r\n\r\n self._board = [[0 for x in range(self._grid_width)]\r\n for y in range(self._grid_height)]\r\n self.new_tile()", "def gc(self):\n self._complete_grid = None", "def reset(self):\r\n self.grid = np.array([[' '] * self.width for row in range(self.height)])\r\n self.num_checkers = 0", "def delete_grid(self):\n\n\t\tself.a_grid = None\t\t# Deletes the object from memory", "def reset_map(self):\n self.x = None\n self.X = None\n self.y = None\n self.Y = None\n self.data = None\n self.sampling = None\n self.size = None", "def new_tile(self):\n while True:\n random_row = random.randrange(self._grid_height)\n random_column = random.randrange(self._grid_width)\n if self._grid[random_row][random_column] == 0:\n self._grid[random_row][random_column] = random.choice([2] * 9 + [4])\n break", "def set_grid(self,ug):\n self.grd=ug\n self.set_topology()", "def cells_off(self):\n self.plotter.cells_off(self.ax)\n self.fig.canvas.draw()", "def toggle_zero_grid(self, x):\r\n self.konfig.zero.set_grid(x)\r\n self.zeroGraf.toggle_grid(x)", "def set_tile(self, row, col, value):\r\n self._cells[row][col] = value", "def new_tile(self):\r\n random_row = random.randrange(0, self._grid_height)\r\n random_col = random.randrange(0, self._grid_width)\r\n random_choice = random.choice([2]*90 + [4] * 10)\r\n \r\n if 0 in [num for elem in self._cells for num in elem]: \r\n if self._cells[random_row][random_col] == 0:\r\n self._cells[random_row][random_col] = random_choice \r\n else:\r\n self.new_tile()\r\n else:\r\n pass", "def new_tile(self):\n col = random.choice(range(self.grid_width))\n row = random.choice(range(self.grid_height))\n if self.grid[row][col] == 0:\n if random.random() >= 0.9:\n self.grid[row][col] = 4\n else:\n self.grid[row][col] = 2\n else:\n self.new_tile()", "def set_tile(self, point, glyph=\".\"):\n self.matrix[point.y][point.x] = glyph", "def clear_cell(self, x, y):\n r = self.rect_area(x, y)\n background = pygame.Surface((75, 75)) # creates a white surface\n background.fill((255, 255, 255))\n self.screen.blit(background, (x * 80 + 3, 80 + y * 80 + 3)) # draw\n pygame.display.update(r) # update screen to showcase changes", "def new_tile(self):\n # Getting the list of positions of empty tiles\n indices_list = [(i, j) for i, l in enumerate(self._grid)\n for j in xrange(len(l)) if not l[j]]\n \n # Filling the the empty tile with a 2 or a 4\n if indices_list:\n self.set_tile(*choice(indices_list),\n value = 2 if random() <.9 else 4)", "def set_cell(self, x, y, val):\n pass", "def set_cell(self, x, y, val):\n pass", "def cells_off(self,ax):\n self.cells.off(ax)", "def mark_safe(self, cell):\n if cell in self.cells:\n self.cells.remove(cell)", "def set_tile(self, row, col, value):\n self._cells[row][col] = value", "def check(self):\n return self.tile==\"\"", "def set_cell(frame, data):\n\twith data.cell_:\n\t\tdata.cell_[:,0] = [L, 0., 0.]\n\t\tdata.cell_[:,1] = [0., L, 0.]\n\t\tdata.cell_[:,2] = [0., 0., L]\n\t\t#cell origin\n\t\tdata.cell_[:,3] = [0, 0 , 0]\n\t\t#set periodic boundary conditions\n\t\tdata.cell_.pbc = (True, True, True)", "def set_tile(self, row, col, value):\n # replace with your code\n if col < self.grid_height and row < self.grid_width:\n self.board[row][col] = value", "def my_free(self):\n self.imy_free = self.jmy_free = True\n return self", "def mark_mine(self, cell):\n if cell in self.cells:\n self.count -= 1\n self.cells.remove(cell)", "def fill_grid(self, gx, gy, color=Color['white']):\n area = [gx * self.px, gy * self.py, self.px, self.py]\n pygame.draw.rect(self.display, color, area)", "def emptyCell (self, row, column, gameGrid=None, emptyValue=0):\n if not gameGrid:\n gameGrid = self.gameGrid\n self.addObject(emptyValue, row, column, gameGrid=gameGrid)", "def set_cell(self, point, cell):\n self._grid[point.x][point.y] = cell", "def mark_safe(self, cell):\n \n if cell in self.cells:\n self.cells.discard(cell)", "def flagCell(self, row, col):\n self.flagged[row, col] = 1", "def clear(self):\n if libt.map_is_in_fov(self.handler.fov_map, self.x, self.y):\n libt.console_put_char(self.handler.game_map, self.x, self.y, \n \" \", libt.BKGND_NONE)", "def cleanTileAtPosition(self, pos):\n self.tiles[pos] = 'clean'", "def i_free(self):\n self.imx_free = self.imy_free = self.imz_free = True\n return self", "def mark_safe(self, cell):\n #if cell in self.cells, else do nothing\n if cell in self.cells:\n #remove the cell since known\n self.cells.discard(cell)", "def fill_grid_np(self):\n\n self.grid_np = [None for i in range(GRID_HEIGHT*GRID_HEIGHT*MAX_CELL_SIZE)]\n grid = self.grid_np\n # cell_size = self.cell_size\n for obj in self.levels[self.curient_level].objects:\n obj.position_grid[X], obj.position_grid[Y] = get_grid_xy(obj.position_np, ZOMBIE_SIZE)\n x, y = obj.position_grid[X], obj.position_grid[Y]\n grid[y*GRID_WIDTH + x] = obj\n # if cell_size[y*GRID_WIDTH + x] < MAX_CELL_SIZE:\n # cell_size[y*GRID_WIDTH + x] += 1", "def mark_safe(self, cell):\n if cell in self.cells:\n self.cells.remove(cell)\n #raise NotImplementedError", "def new_tile(self):\n zero_list = []\n zero_cell = ()\n # self._cells = [[0 for col in range(self._grid_width)] for row in range(self._grid_height)]\n for row in range(self._grid_height):\n for col in range(self._grid_width):\n if self._cells[row][col] == 0:\n zero_cell = (row, col)\n zero_list.append(zero_cell)\n if len(zero_list) > 0:\n chance = random.randrange(0,10)\n cell_idx = random.randrange(len(zero_list))\n if chance == 9:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 4\n else:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 2\n else:\n print(\"You lost! Better luck next time!\")", "def reset(self):\n for rows in range(self.height):\n for col in range(self.width):\n self.slots[rows][col] = ' '", "def mark_mine(self, cell):\n \n if cell in self.cells:\n self.cells.discard(cell)\n self.count -= 1", "def set_white(self):\n self.fill=Cell.EMPTY_COLOR_BG\n self.draw()", "def destroy(self):\r\n self.city_map.get_tile_at_position(self.position).car = None", "def update(self, display:pygame.display) -> None:\n for colNum, col in enumerate(self.map):\n for rowNum, tile in enumerate(col):\n if tile.graphicsLive:\n tile.update()\n display.blit(tile.image, tile.rect.topleft)\n if tile.state == 'dead':\n self.map[colNum][rowNum] = StaticTile('terrain', self.graphicsLibrary.get('terrain'), (self.scaleWidth,self.scaleHeight), barrier=False)\n self.map[colNum][rowNum].place_at(topleft=(self.scaleWidth * colNum, self.scaleHeight * rowNum))" ]
[ "0.6971801", "0.68636346", "0.68111646", "0.67715645", "0.65903765", "0.6576617", "0.65482694", "0.6525577", "0.6496585", "0.6484593", "0.6477412", "0.64727926", "0.6461644", "0.64541066", "0.6445413", "0.6372728", "0.6361504", "0.6346274", "0.6346274", "0.63288206", "0.6315538", "0.6276604", "0.62759566", "0.62712836", "0.6249723", "0.623785", "0.6231626", "0.6210841", "0.61967844", "0.61951005", "0.6183963", "0.6175984", "0.6172686", "0.61616486", "0.6157891", "0.61498445", "0.61464995", "0.61188555", "0.61188555", "0.61069036", "0.60935766", "0.6060452", "0.6034447", "0.60296273", "0.6020418", "0.60172665", "0.6016108", "0.6009717", "0.5995501", "0.59575397", "0.5946807", "0.59435904", "0.5938531", "0.593013", "0.5926076", "0.5906731", "0.59030694", "0.5896629", "0.5892193", "0.5888403", "0.58836395", "0.5873902", "0.58707905", "0.58603555", "0.5858114", "0.58580095", "0.58524245", "0.58471626", "0.5831299", "0.58141226", "0.5805512", "0.58037204", "0.57926375", "0.57501847", "0.57501847", "0.574876", "0.5746619", "0.57447886", "0.57433975", "0.57416433", "0.57412493", "0.5737951", "0.5734863", "0.5732191", "0.57299477", "0.57294023", "0.57093185", "0.5707732", "0.5707655", "0.5703327", "0.5701275", "0.56993294", "0.56983966", "0.5693251", "0.5681629", "0.5679703", "0.5673905", "0.56660104", "0.56624454", "0.56606877" ]
0.6799247
3
Check if the specified grid cell is occupied.
def is_obstacle(self, pos: tuple): if self.within_map(pos): return self.map[round(pos[0]), round(pos[1])] == OBSTACLE else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_occupied(self, col, row):\n if self.board[row - 1][col - 1] == EMPTY:\n return False\n else:\n return True", "def _is_occupied(\n grid: List[List[str]], row: int, col: int, dx: int, dy: int) -> bool:\n while 0 <= (row + dy) < len(grid) and 0 <= (col + dx) < len(grid[0]):\n row += dy\n col += dx\n if grid[row][col] == 'L':\n return False\n if grid[row][col] == '#':\n return True\n return False", "def __cell_is_occupied(self, x, y) -> bool:\n return self.occupancy_map.data[self.__get_cell_index(x, y)] != 0", "def is_unoccupied(self, row, col):\n return self.maze[row][col] is EMPTY", "def occupied(self, (xIndex, yIndex)):\n return xIndex < 0 or yIndex < 0 or \\\n xIndex >= self.xN or yIndex >= self.yN or \\\n self.grid[xIndex][yIndex]", "def is_in_the_grid(self, row: int, col: int) -> bool:\n return 0 <= row < self.n_row and 0 <= col < self.n_col", "def is_occupied(self, p):\r\n return 0 <= p[0] < self.width and 0 <= p[1] < self.height and self.grid[p[1]][p[0]] == '#'", "def testEmptyCell(self, row, column, gameGrid=None, emptyValue=0):\n if not gameGrid:\n gameGrid = self.gameGrid\n row = self.limitValue(row, 0, self.rows-1)\n column = self.limitValue(column, 0, self.columns-1)\n if gameGrid.getItem(row, column) == emptyValue:\n return True\n else:\n return False", "def check_tile_availability(self, row, col):\n return self.board[row][col] == 0", "def test_cell_existence(board: list, i: int, j: int) -> bool:\n return not (i < 0 or i > len(board)-1 or j < 0 or j > len(board)-1)", "def see_occupant(self, x, y, dx, dy):\r\n if dx == 0 and dy == 0: # Makes looping easier\r\n return False\r\n x += dx\r\n y += dy\r\n while 0 <= x < self.width and 0 <= y < self.height:\r\n if self.grid[y][x] == '#':\r\n return True\r\n if self.grid[y][x] == 'L':\r\n return False\r\n x += dx\r\n y += dy\r\n return False", "def isComplete(grid):\n for row in range(0,9):\n for col in range(0,9):\n if grid[row][col]==0:\n return False\n return True", "def _checkCells(self):\r\n if(self.startCell.isEmpty()):\r\n raise IllegalMoveException(\"No pawn in start cell\")\r\n if(self.endCell.isOccupied()):\r\n raise IllegalMoveException(\"Targeted cell is already occupied\")\r\n return True", "def tileOccupied(self, i, j):\n if self.tiles[i][j] == 1 or i == 0 or i == self.size[0] - 1 or j == 0 or j == self.size[1] - 1:\n return True\n for prop in self.props:\n if prop.i == i and prop.j == j:\n return True\n return False", "def checkAvailable(self, x, y):\n return 0 <= x < self.rows and 0 <= y < self.cols and not self.gridBusy[x][y]", "def check_grid(grid: List):\n for row in range(9):\n for col in range(9):\n if grid[row][col] == 0:\n return False\n return True", "def cellIsEmpty(self, x, y):\n\n\n #If x and y are out of bounds, we return False\n if x < 0 or y < 0:\n return False\n\n #Else we get the cell that interests us\n else:\n cell = self._get_grille()[y][x]\n\n\n if cell.element is None:\n print(\"Cell element is None\")\n return True\n \n #If it is a box, depart, arrivee, wall or spikes. The cell is not empty\n elif cell.element.name == \"box\" or cell.element.name == \"depart\" or cell.element.name == \"arrivee\" or cell.element.name == \"spikes\" or cell.element.name == \"mur\":\n print(\"Cell element : \"+cell.element.name)\n return False\n \n #Else, it means there is nothing in the cell or an element the box can cross\n else:\n return True", "def __cell_is_in_map(self, x, y) -> bool:\n return x >= 0 and y >= 0 and x < self.occupancy_map.info.width and y < self.occupancy_map.info.height", "def xy_occupied(xy, board):\n return True if board[xy[0]][xy[1]] else False", "def test(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer):\n if brickheight>wallheight or bricklength>walllength:\n return False\n elif over(brickheight,bricklength,row,column,walllength,wallheight):\n return False\n else:\n for x in range(column,column+bricklength):\n for y in range(row,row+brickheight):\n if (x,y) in occupied:\n return False \n break\n else:\n return True", "def game_over(self) -> bool:\n for row in range(9):\n for col in range(9):\n if self._grid_sol[row][col] != self.get_cell(row, col):\n return False\n return True", "def is_empty(self, row, col):\n return self._cells[row][col] != FULL", "def inBoard(self, row, col):\n return 0 <= row < self.rows and 0 <= col < self.cols", "def in_grid(self, tile):\n return 0 <= tile[0] < self.gs[0] and 0 <= tile[1] < self.gs[1]", "def has_cells(self):\n return len(self._cells) > 0", "def is_empty(self, x, y):\n if x in range(self.nx) and y in range(self.ny):\n if self.grid[y][x] == ' ':\n return True\n return False", "def cell_is_usable(cell):\n\tmaxcolors = tile_size[0] * tile_size[1]\n\tcolors = cell.getcolors(maxcolors)\n\t\n\tis_usable = False\n\t\n\tfor color in colors:\t\n\t\tif len(color[1]) == 3:\n\t\t\tif sum(color[1]) == 765 and color[0] < maxcolors/2:\n\t\t\t\t# less than a half are white\n\t\t\t\tis_usable = True\n\t\telse:\n\t\t\tif color[1][3] == 255 and color[0] >= 0:\n\t\t\t\tis_usable = True\n\t\t\t\tbreak\t\n\treturn is_usable", "def checkEmpty(grid):\n for x in range(len(grid.board)):\n for y in range(len(grid.board[0])):\n if grid.board[x][y] == 0:\n return True\n return False", "def _check_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n alive_neighbours = self._get_neighbours(row_number,col_number)\n \n self.to_be_updated[row_number][col_number] = False\n if self.cells[row_number][col_number].get_status():\n if alive_neighbours < 2:\n self.to_be_updated[row_number][col_number] = True\n elif alive_neighbours > 3:\n self.to_be_updated[row_number][col_number] = True\n else:\n if alive_neighbours == 3:\n self.to_be_updated[row_number][col_number] = True", "def is_empty(self, row, column):\n\n return self.board[row][column] == placeholder", "def check_grid_full(self):\n for row in self.game_state:\n for e in row:\n if e is None:\n return False\n return True", "def is_occupied(self, pos):\n return any([p == pos for p in self._workers.values()])", "def check_valid_placement(n: int, row: int, col: int, grid: List) -> bool:\n if SudokuGrid.in_square(n, row, col, grid) or \\\n SudokuGrid.in_row(n, row, col, grid) or \\\n SudokuGrid.in_col(n, row, col, grid):\n return True\n return False", "def check_won (grid):\r\n for i in range (4):\r\n for j in range (4):\r\n if grid[i][j] >= 32:\r\n return True\r\n return False", "def board_tiles_availability(self):\n for row in range(GameData.rows):\n for col in range(GameData.columns):\n if self.board[row][col] == 0:\n return False\n # Game is draw, no more moves left!\n return True", "def check_won(grid):\r\n for i in range(len(grid)):\r\n for j in range(len(grid[i])):\r\n if grid[i][j] >= 32:\r\n return True \r\n return False", "def check_won (grid):\r\n for i in range(4): \r\n for j in range(4):\r\n if grid[i][j] >= 32:\r\n return True\r\n return False", "def check_empty_neighbours(self, cell):\n\t\tneighbours = self.get_neighbours(cell)\n\t\tflag = True\n\t\tfor neighbour in neighbours:\n\t\t\tif neighbour.state != 0:\n\t\t\t\tflag = False\n\t\treturn flag", "def exist_adjacent_cell(board: list, cell_index: tuple) -> bool:\n #Each time the result = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\n possible_cells_direction = list(filter(lambda x: x[0] != 0 or x[1] != 0, list(product(range(-1, 2), range(-1, 2)))))\n\n for coord_couple in possible_cells_direction:\n i = cell_index[0] + coord_couple[0]\n j = cell_index[1] + coord_couple[1]\n\n if not test_cell_existence(board, i, j):\n continue\n\n # If a cell isn't empty\n if board[i][j] != 0:\n return True\n return False", "def is_unoccupied(self) -> bool:\n return self.piece == Piece() # Piece() creates an \"empty-piece\"", "def in_col(n: int, row: int, col: int, grid: List) -> bool:\n for x in range(9):\n if x!= row and n == grid[x][col]:\n return True\n return False", "def is_cell_on_board(cell, board_shape): # TODO: Remove\n return (0, 0) <= cell < board_shape", "def valid_coverage_cell(self, row, column):\n\n if (row < self.cov_grid.shape[0] and\n row >= 0) and \\\n (column < self.cov_grid.shape[1] and\n column >= 0):\n return True\n else:\n return False", "def check_won (grid):\r\n w=False\r\n for row in range(4):\r\n for col in range(4):\r\n if grid[row][col]>=32:\r\n w=True\r\n break\r\n return w", "def check_bounds(self, row: int, col: int) -> bool:\n return 0 <= row < self.row and 0 <= col < self.col", "def _empty_cell(self, i_row, i_col):\n return self._board[i_row][i_col] == \" \"", "def is_filled(self, x, y):\n if not (0 <= x and x < self.map_size[0]\n and 0 <= y and y < self.map_size[1]):\n return False\n\n # Is there something else than Floor?\n return (len(self.map.get_cell_nodes(x, y)) > 1)", "def is_cell_valid(board, r, c):\n return is_cell_row_valid(board, r, c) or is_cell_col_valid(board, r, c)", "def checkWithinBound(rowWithinBound,colWithinBound):\n if(rowWithinBound == 0 and colWithinBound == 0):\n return True\n else:\n return False", "def isMine(self, row, col):\n return self.board[row, col] == 1", "def _check_if_position_on_board(coord: tuple, board_size: int):\n in_row = coord[0] in range(board_size)\n in_col = coord[1] in range(board_size)\n return in_row and in_col", "def on_board(hexe):\n\n cube = axial_to_cube(hexe)\n\n # check each bound\n for axis in cube:\n if abs(axis) > BOARD_BOUND:\n return False\n return True", "def in_row(n: int, row: int, col: int, grid: List) -> bool:\n for x in range(9):\n if x != col and n == grid[row][x]:\n return True\n return False", "def _is_valid_land(x, y, grid):\n return (x >= 0) and (x < len(grid)) and (y >= 0) and (y < len(grid[0])) and grid[x][y]", "def surroundingIsEmpty(board, row, column):\n\n neighboursList = list(BoardUtils.neighborsList(board, 1, row, column))\n for element in neighboursList:\n if not BoardUtils.isEmpty(board, element[0], element[1]):\n return False\n return True", "def is_complete(self):\n for i in range(9):\n if len(self.rows[i]) != 0 or len(self.columns[i]) != 0 or len(self.groups[i]) != 0:\n return False\n\n for row in self.board:\n for col in row:\n if col == self.empty_cell_flag:\n return False\n\n return True", "def can_position_fleet(self, starting_cell, board):\n are_rows_available = starting_cell.row + self.total_rows_req <= board.rows\n are_columns_available = starting_cell.column + self.total_column_req <= board.columns\n if are_columns_available and are_rows_available:\n return True\n return False", "def is_on_board(x: int, y: int) -> bool:\n return x >= 0 and x < BOARDWIDTH and y < BOARDHEIGHT", "def is_possible_grid(self,row,col,user_value):\n start_row = row - (row % 3)\n start_col = col - (col % 3)\n for x in range(3):\n for y in range(3):\n if self.arr[x+start_row][y+start_col] == user_value:\n logging.debug(f\"is_posssible_grid(): (False) x: {x} y: {y} s_row: {start_row} s_col: {start_col} arr[x+start_row][y+start_col]: {self.arr[x+start_row][y+start_col]} == {user_value}\")\n return False\n logging.debug(f\"is_posssible_grid(): (True) x: {x} y: {y} s_row: {start_row} s_col: {start_col} arr[x+start_row][y+start_col]: {self.arr[x+start_row][y+start_col]} != {user_value}\")\n return True", "def checkwin(self):\n w = self.getWidth()\n h = self.getHeight()\n numOccupiedCell = 0 # counter for the number of occupied cells (use to detect a terminal condition of the game)\n for r in range(h):\n for c in range(w):\n if self.cell[c][r] == EMPTY:\n continue # this cell can't be part of winning segment\n # if we reach this point, the cell is occupied by a player stone\n numOccupiedCell = numOccupiedCell+1\n for dr,dc in [(1,0),(0,1),(1,1),(-1,1)]: # direction of search\n for i in range(NWIN):\n # test if there exists a segment of NWIN uniformly coloured cells\n if not(r+i*dr>=0) or not(r+i*dr<h) or not(c+i*dc<w) or not(self.cell[c+i*dc][r+i*dr] == self.cell[c][r]):\n break # segment broken\n else:\n # Python remark: notice that the else is attached to the for loop \"for i...\"\n # This block is executed if and only if 'i' arrives at the end of its range \n return self.cell[c][r] , True # found a winning segment \n return EMPTY, numOccupiedCell==w*h", "def valid_entry(grid, val, row, col):\n if grid[row][col] != 'x':\n return False\n\n # check subgrid\n if val in get_subgrid_values(grid, row, col):\n return False\n\n # check same row\n for c in grid[row]:\n if c == val:\n return False\n\n # check same col\n for r in range(len(grid)):\n if grid[r][col] == val:\n return False\n\n return True", "def full_board( self ):\n\n for x in self.__grid:\n if isinstance(x, int):\n return False\n else:\n continue\n\n return True", "def isvalid(board, num, cell):\r\n x, y = cell\r\n row = board[x]\r\n col = transpose(board)[y]\r\n\r\n # check row if num already present\r\n if any(row[i] == num for i in range(9)):\r\n return False\r\n # check col if num already present\r\n if any(col[i] == num for i in range(9)):\r\n return False\r\n \r\n # get start position of box\r\n Xbox = (x//3) * 3\r\n Ybox = (y//3) * 3\r\n for i in range(Xbox, Xbox+3):\r\n for j in range(Ybox, Ybox+3):\r\n if board[i][j] == num:\r\n return False\r\n \r\n return True", "def valid_move(x, y):\r\n if [x, y] in empty_cells(board):\r\n return True\r\n else:\r\n return False", "def check(self,a,x,y):\r\n return not self.exitsinrow(self.rows,x,a) and not self.existsincol(self.rows,y,a) and \\\r\n not self.exitsinblock(self.rows, x - x % 3, y - y % 3,a)", "def check_won (grid):\r\n p=0\r\n for k in range(len(grid)):\r\n for g in range(len(grid[k])): \r\n if grid[k][g]>=32:\r\n p+=1\r\n else:\r\n ()\r\n if p>0:\r\n return True\r\n else:\r\n return False", "def at_least_one_cell_is_empty(cell_list):\n for cell in cell_list:\n if self.environment.grid.out_of_bounds(cell) or self.environment.grid.is_cell_empty(cell):\n return True\n return False", "def __check_row(self, x: int, y: int) -> bool:\n return not any([self.__maze[x, y + i] for i in (-1, 0, 1)])", "def search_cell(self, row, col):\n isFound = self.board.search_cell(row, col)\n\n if isFound:\n return True\n\n return False", "def check_local_square(grid, num, i, j):\n assert i < len(grid), 'Row is out of grid!'\n assert j < len(grid[0]), 'Column is out of grid!' \n\n MINI_GRID_SIZE = 3\n\n top_left_row = MINI_GRID_SIZE * (i // MINI_GRID_SIZE)\n top_left_col = MINI_GRID_SIZE * (j // MINI_GRID_SIZE)\n\n found = False\n for row in range(top_left_row, top_left_row + MINI_GRID_SIZE):\n for col in range(top_left_col, top_left_col + MINI_GRID_SIZE):\n if grid[row][col] == num and (row, col) != (i, j):\n found = True \n return found", "def is_inacessible(cell):\n adj, count = num_adj_buildings(cell)\n return adj == count", "def occupied_cells(self):\n\n for lm in self.landmarks:\n if self.cell_size < 1:\n # expand the range the landmark exists\n lm_x_range = np.arange(lm[0]-self.R, lm[0]+self.R, self.cell_size)\n lm_y_range = np.arange(lm[1]-self.R, lm[1]+self.R, self.cell_size)\n\n # loop through expanded ranges and compute grid positions\n for lm_x in lm_x_range:\n for lm_y in lm_y_range:\n\n row, col = self.cell_index([lm_x, lm_y])\n\n # apply cost of occupied cell\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass\n\n else:\n # apply cost of occupied cell\n row, col = self.cell_index(lm)\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass", "def check_position_is_legal(grid, num, i, j):\n args = (grid, num, i, j)\n return (not check_row(*args)) and (not check_col(*args)) and (not check_local_square(*args))", "def occupiedNeighbor(self, xi, yi):\n\n xmax = self.mapData.og.info.width\n ymax = self.mapData.og.info.height\n\n if self.mapData.sampled:\n # Fails on an occupied cell\n assert self.mapData.mapArrayS[xi, yi] < 50\n for x in range(max(xi - 1, 0), min(xi + 1, xmax)):\n for y in range(max(yi - 1, 0), min(yi + 1, ymax)):\n if self.mapData.mapArrayS[x,y] > 50:\n return True\n return False\n else:\n # Fails on an occupied cell\n assert self.mapData.mapArray[xi, yi] < 50\n for x in range(max(xi - 1, 0), min(xi + 1, xmax)):\n for y in range(max(yi - 1, 0), min(yi + 1, ymax)):\n if self.mapData.mapArray[x,y] > 50:\n return True\n return False", "def _cell_in_boundary(self, i_row, i_col):\n return ((i_row, i_col) == self._tl_cell or\n (i_row, i_col) == self._tr_cell or\n (i_row, i_col) == self._bl_cell or\n (i_row, i_col) == self._br_cell or\n (i_row, i_col) in self._ls_cells or\n (i_row, i_col) in self._rs_cells or\n (i_row, i_col) in self._ts_cells or\n (i_row, i_col) in self._bs_cells)", "def _is_in_grid(self, atom_coordinate):\n return c_is_in_grid(atom_coordinate, self._origin_crd, self._uper_most_corner_crd)", "def is_valid_room(self, x, y):\r\n return 0 <= x < self.__nx and 0 <= y < self.__ny", "def valid_tile(self, i, j):\n if (i >= 0 and i < self.rows) and (j >= 0 and j < self.cols):\n return True\n return False", "def can_add_to(self, row, col):\r\n return (0<= row and row < self.height) and \\\r\n (0<= col and col < self.width) and \\\r\n (self.grid[row][col] == ' ')", "def __check_col(self, x: int, y: int) -> bool:\n return not any([self.__maze[x + i, y] for i in (-1, 0, 1)])", "def no_conflicts(self, row, col, number):\n\n\t\t# 1st Check if number is present in the given column\n\t\tfor i in range(9):\n\t\t\tif self.grid[row][i] == number:\n\t\t\t\treturn False\n\n\t\t# 2nd Check if number is present in the given row\n\t\tfor i in range(9):\n\t\t\tif self.grid[i][col] == number:\n\t\t\t\treturn False\n\n\t\t# 3rd Check if number is present in the 3x3 local grid\n\t\tx0 = (col // 3) * 3\n\t\ty0 = (row // 3) * 3\n\t\tfor i in range(3):\n\t\t\tfor j in range(3):\n\t\t\t\tif self.grid[y0 + i][x0 + j] == number:\n\t\t\t\t\treturn False\n\n\t\t# return true if pass all 3 checks\n\t\treturn True", "def check_grid(self) -> None:\n if not len(self.grid) == 81:\n raise ValueError(\"Grid does not have 81 elements. Aborting\")", "def check_if_board_full(self, board):\n for i in range(self.height // 80):\n for j in range(self.width // 80):\n if board[(j, i)] == 0:\n return False\n elif j == self.width // 80:\n break\n else:\n pass\n print(\"Board full! :(\")\n return True", "def check_if_solved(self):\n for cell in self.board.values():\n if not cell.value:\n return False\n return True", "def is_empty_grid(tv_grid):\n return not (tv_grid and (tv_grid.get(constants.HEIGHT) != 0 or\n tv_grid.get(constants.WIDTH) != 0 or\n tv_grid.get(constants.POSITION) != 0 or\n tv_grid.get(constants.DEVICE_IDS)))", "def is_occupied(self):\n return self.occupied", "def _count_seen_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if not (dx == 0 and dy == 0):\n count += 1 if _is_occupied(grid, row, col, dx, dy) else 0\n return count", "def is_valid_position(self, somerow, somecol):\n valid_row = 0 <= somerow <= (self.size-1)\n valid_col = 0 <= somecol <= (self.size-1)\n #need to use self.size - 1 because while we're thinking of an 8x8 chess board, the computer is thinking of a 0x7 chess board\n return valid_row and valid_col", "def check_col(grid, num, i, j):\n assert i < len(grid), 'Row is out of grid!'\n assert j < len(grid[0]), 'Column is out of grid!' \n\n found = False\n for pos, row in enumerate(grid):\n if row[j] == num and pos != i:\n found = True\n return found", "def game_tie(self):\n\n shape = self.board.shape\n if np.count_nonzero(self.board) == (shape[0] * shape[1]):\n # The board is full\n player = 0\n return True\n else:\n return False", "def available(self, position):\n if position is not None:\n x, y = position\n return self.grid[x][y] == 0", "def is_in_chessboard(row_or_col):\n\n nonzero, = row_or_col.nonzero()\n\n # compute the approximate number of crossed squares\n squares = 0\n for i, j in zip(nonzero, nonzero[1:]):\n if j - i >= min_square_dim:\n squares += 1\n\n return squares >= 8", "def is_alive(self, cell: Position) -> bool:\n return cell in self._state", "def check_if_valid(self, row, col, number):\n # Checks if all numbers in row occurs only once\n for i in range(len(self.grid[row])):\n if self.grid[row][i] == number and col != i:\n return False\n\n # Checks if all numbers in column occurs only once\n for i in range(len(self.grid)):\n if self.grid[i][col] == number and row != i:\n return False\n\n # Defines the 3x3 grid that needs to be checked\n square = [(row // 3) * 3, (col//3) * 3]\n \n # Checks if all numbers in the 3x3 square occurs only once\n for i in range(square[0] , square[0] + 3):\n for j in range(square[1], square[1] + 3):\n if number == self.grid[i][j] and i != row and j != col:\n return False\n return True", "def check_row(grid, num, i, j):\n assert i < len(grid), 'Row is out of grid!'\n assert j < len(grid[0]), 'Column is out of grid!'\n\n found = False\n for col in range(len(grid[i])):\n if grid[i][col] == num and col != j:\n found = True\n return found", "def is_cell(self, c, d):\n if (c[0] == self.nrows-1 and d == NORTH) or \\\n (c[1] == self.ncols-1 and d == EAST) or \\\n (c[0] == 0 and d == SOUTH) or \\\n (c[1] == 0 and d == WEST): return False\n else: return True", "def cell_status(self, pos):\n if pos in self._coordinates:\n if pos in self._hit_coors:\n return True\n return False\n return None", "def valid(grid, num, pos):\n # Check row\n for i in range(LEN_GRID):\n if grid[pos[0]][i] == num and pos[1] != i:\n return False\n\n for i in range(LEN_GRID):\n if grid[pos[0]][i] == num and pos[1] != i:\n return False\n\n # Check column\n for i in range(LEN_GRID):\n if grid[i][pos[1]] == num and pos[0] != i:\n return False\n\n # Check box\n box_x = pos[1] // 3\n box_y = pos[0] // 3\n\n for i in range(box_y*3, box_y*3 + 3):\n for j in range(box_x * 3, box_x*3 + 3):\n if grid[i][j] == num and (i, j) != pos:\n return False\n return True", "def test_is_occupied(self):\n self.assertFalse(self.test_square.is_occupied())\n\n self.test_square.piece = Piece(ChessColor.BLACK)\n self.assertTrue(self.test_square.is_occupied())", "def is_valid_position(self, x, y):\n if (x > self.n_cols-1 or y > self.n_rows-1) or (x < 0 or y < 0):\n return False\n\n elif self.grid[x][y] == 3:\n return False\n\n return True", "def is_game_over(self):\n if max([max(row) for row in self.grid]) == 2 ** (self.grid_size ** 2):\n raise GameException('Congrats, You won !')\n\n # If there is a zero then the game is not over\n for row in self.grid:\n if 0 in row:\n return False\n\n # Check if two consecutive number (vertically or horizontally) are\n # equal. In this case the game is not over.\n for i in range(self.grid_size):\n for j in range(self.grid_size):\n # horizontal check\n if (i < self.grid_size - 1 and\n self.grid[i][j] == self.grid[i + 1][j]):\n return False\n # vertical check\n if (j < self.grid_size - 1 and\n self.grid[i][j] == self.grid[i][j + 1]):\n return False\n\n return True" ]
[ "0.8251217", "0.7962079", "0.7710803", "0.7681166", "0.74803954", "0.727737", "0.7273842", "0.72004473", "0.71587", "0.71319115", "0.7091942", "0.7075044", "0.7071857", "0.69793457", "0.6965492", "0.69624925", "0.692557", "0.69222736", "0.6915808", "0.6904662", "0.6853385", "0.6853264", "0.68390924", "0.6816907", "0.6808662", "0.67863977", "0.6775677", "0.6761628", "0.674396", "0.67248213", "0.6723868", "0.6718746", "0.67121154", "0.6708621", "0.67085624", "0.6705626", "0.66797197", "0.66371334", "0.66268176", "0.66079795", "0.66022223", "0.6597042", "0.65939134", "0.6587256", "0.6577573", "0.65586895", "0.6553747", "0.65457886", "0.6511253", "0.65096223", "0.65030545", "0.64971066", "0.64962906", "0.6461958", "0.64402676", "0.64329123", "0.6431734", "0.64251983", "0.64237267", "0.64185166", "0.63847065", "0.63833296", "0.63827", "0.6381311", "0.6366128", "0.63653857", "0.63637006", "0.63514054", "0.63473123", "0.6344215", "0.6335192", "0.6333451", "0.633149", "0.6330625", "0.6324489", "0.63226146", "0.63219976", "0.6309514", "0.629946", "0.6297013", "0.62907517", "0.6287651", "0.6282219", "0.62702066", "0.62636197", "0.6258573", "0.6257035", "0.6251414", "0.62453693", "0.624099", "0.62399364", "0.6235876", "0.6224346", "0.62209415", "0.6202987", "0.6200156", "0.61938995", "0.6188017", "0.6184908", "0.61730903", "0.6160874" ]
0.0
-1
Check if the specified grid cell is free.
def is_free(self, pos: tuple): if self.within_map(pos): return self.map[round(pos[0]), round(pos[1])] == FREE else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkAvailable(self, x, y):\n return 0 <= x < self.rows and 0 <= y < self.cols and not self.gridBusy[x][y]", "def check_free(self, arr):\n cell_location = self.cartesian_to_cell(arr)\n cell = self.occ_matrix[cell_location[0], cell_location[1]]\n return cell == 0", "def isFree(point):\n global grid\n for i in point:\n if i < 0:\n return False\n try:\n value = grid[point[0]][point[1]][point[2]]\n # print value\n except:\n print \"point \", point, \"lies outside of grid\"\n value = False\n\n return value", "def is_free(self) -> bool:\n return self.places < self.total", "def is_free(self):\n return self._size > 0", "def _check_occupied(self, col, row):\n if self.board[row - 1][col - 1] == EMPTY:\n return False\n else:\n return True", "def isfree(col,queens):\n if col in queens:\n return False\n elif any([ abs(col-col1)==len(queens)-index for index,col1 in enumerate(queens)]):\n #c[r]==c[j]; r-j==c[r]-c[j]; r-j==c[j]-c[r]\n # col is the colomn to check; len(queens) just be the row index of col, dont subtract 1\n return False\n else:\n return True", "def is_free(self) -> tuple:\n if self.running_procs >= self.procs_no:\n return (False, None)\n if self.gpus:\n for gpu in self.gpus:\n if self.gpu_running_procs[gpu] < self.per_gpu[gpu]:\n return (True, gpu)\n return (False, None)\n return (True, None)", "def isComplete(grid):\n for row in range(0,9):\n for col in range(0,9):\n if grid[row][col]==0:\n return False\n return True", "def checkFree(self, x, y):\n for i in range(self.numPieces):\n new_x = x + self.pos[self.rotation][i][0]\n new_y = y + self.pos[self.rotation][i][1]\n if not self.checkAvailable(new_x, new_y):\n return self.colors['busy']\n return self.colors['free']", "def check_grid(grid: List):\n for row in range(9):\n for col in range(9):\n if grid[row][col] == 0:\n return False\n return True", "def _is_occupied(\n grid: List[List[str]], row: int, col: int, dx: int, dy: int) -> bool:\n while 0 <= (row + dy) < len(grid) and 0 <= (col + dx) < len(grid[0]):\n row += dy\n col += dx\n if grid[row][col] == 'L':\n return False\n if grid[row][col] == '#':\n return True\n return False", "def is_unoccupied(self, row, col):\n return self.maze[row][col] is EMPTY", "def check_position_free(self, pos=None):\n if pos is None:\n pos = self.draw.position\n return self.board.board[pos] == 0", "def check_free_space(environment, target_xy, fovea):\n temp_image = check_target_position(environment, target_xy, fovea)\n if np.array_equal(temp_image, np.zeros(temp_image.shape)):\n return True\n else:\n return False", "def check_tile_availability(self, row, col):\n return self.board[row][col] == 0", "def dec_gains_of_free_cells(self):\r\n for cell in self.cells:\r\n if not cell.locked:\r\n cell.gain -= 1\r\n cell.yank()", "def available(self, position):\n if position is not None:\n x, y = position\n return self.grid[x][y] == 0", "def has_cells(self):\n return len(self._cells) > 0", "def __cell_is_occupied(self, x, y) -> bool:\n return self.occupancy_map.data[self.__get_cell_index(x, y)] != 0", "def is_all_free(self):\n return self.pool_size == self.pool.qsize()", "def freePoint(self, X, Y):\n if X < 0 or Y < 0 or X > GSIZE or Y > GSIZE:\n return False\n if not self.allowSelfAvoidOnly:\n return True\n if self.segs == []:\n return True\n if self.segs[0].getStartPoint() == (X, Y):\n return False\n for seg in self.segs:\n if seg.getEndPoint() == (X, Y):\n return False\n return True", "def __FreeTiles(self, grid, log=False):\n\n x_pos, _ = np.where(grid == 0)\n return len(x_pos)", "def check_grid_full(self):\n for row in self.game_state:\n for e in row:\n if e is None:\n return False\n return True", "def check_all_cells_used(view):\n with pytest.raises(NoUnusedCellsError):\n view._available_cell()", "def in_grid(self, tile):\n return 0 <= tile[0] < self.gs[0] and 0 <= tile[1] < self.gs[1]", "def cell_is_usable(cell):\n\tmaxcolors = tile_size[0] * tile_size[1]\n\tcolors = cell.getcolors(maxcolors)\n\t\n\tis_usable = False\n\t\n\tfor color in colors:\t\n\t\tif len(color[1]) == 3:\n\t\t\tif sum(color[1]) == 765 and color[0] < maxcolors/2:\n\t\t\t\t# less than a half are white\n\t\t\t\tis_usable = True\n\t\telse:\n\t\t\tif color[1][3] == 255 and color[0] >= 0:\n\t\t\t\tis_usable = True\n\t\t\t\tbreak\t\n\treturn is_usable", "def is_free(self, degree, dart):\r\n return self.alphas[degree][dart] == dart", "def is_in_the_grid(self, row: int, col: int) -> bool:\n return 0 <= row < self.n_row and 0 <= col < self.n_col", "def is_occupied(self, p):\r\n return 0 <= p[0] < self.width and 0 <= p[1] < self.height and self.grid[p[1]][p[0]] == '#'", "def IsFree(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_IsFree(*args)", "def is_empty_grid(tv_grid):\n return not (tv_grid and (tv_grid.get(constants.HEIGHT) != 0 or\n tv_grid.get(constants.WIDTH) != 0 or\n tv_grid.get(constants.POSITION) != 0 or\n tv_grid.get(constants.DEVICE_IDS)))", "def testEmptyCell(self, row, column, gameGrid=None, emptyValue=0):\n if not gameGrid:\n gameGrid = self.gameGrid\n row = self.limitValue(row, 0, self.rows-1)\n column = self.limitValue(column, 0, self.columns-1)\n if gameGrid.getItem(row, column) == emptyValue:\n return True\n else:\n return False", "def is_empty(self, row, col):\n return self._cells[row][col] != FULL", "def _space_has_degrees_of_freedom(self) -> bool:\n return True", "def check_position_is_legal(grid, num, i, j):\n args = (grid, num, i, j)\n return (not check_row(*args)) and (not check_col(*args)) and (not check_local_square(*args))", "def is_empty(self, x, y):\n if x in range(self.nx) and y in range(self.ny):\n if self.grid[y][x] == ' ':\n return True\n return False", "def occupied(self, (xIndex, yIndex)):\n return xIndex < 0 or yIndex < 0 or \\\n xIndex >= self.xN or yIndex >= self.yN or \\\n self.grid[xIndex][yIndex]", "def is_complete(self):\n for i in range(9):\n if len(self.rows[i]) != 0 or len(self.columns[i]) != 0 or len(self.groups[i]) != 0:\n return False\n\n for row in self.board:\n for col in row:\n if col == self.empty_cell_flag:\n return False\n\n return True", "def cellIsEmpty(self, x, y):\n\n\n #If x and y are out of bounds, we return False\n if x < 0 or y < 0:\n return False\n\n #Else we get the cell that interests us\n else:\n cell = self._get_grille()[y][x]\n\n\n if cell.element is None:\n print(\"Cell element is None\")\n return True\n \n #If it is a box, depart, arrivee, wall or spikes. The cell is not empty\n elif cell.element.name == \"box\" or cell.element.name == \"depart\" or cell.element.name == \"arrivee\" or cell.element.name == \"spikes\" or cell.element.name == \"mur\":\n print(\"Cell element : \"+cell.element.name)\n return False\n \n #Else, it means there is nothing in the cell or an element the box can cross\n else:\n return True", "def check_valid_placement(n: int, row: int, col: int, grid: List) -> bool:\n if SudokuGrid.in_square(n, row, col, grid) or \\\n SudokuGrid.in_row(n, row, col, grid) or \\\n SudokuGrid.in_col(n, row, col, grid):\n return True\n return False", "def check_empty_neighbours(self, cell):\n\t\tneighbours = self.get_neighbours(cell)\n\t\tflag = True\n\t\tfor neighbour in neighbours:\n\t\t\tif neighbour.state != 0:\n\t\t\t\tflag = False\n\t\treturn flag", "def _check_free_block():\n \n # populate the block data fields\n status = _prepare_blocks()\n if(status != STATUS['OK']) :\n # some metadata file reading error\n return status\n \n # find which blocks are already consumed, \n # per the block level metadata\n _prepare_consumed_block()\n \n \n # TODO : remove debug statements\n # print consumedBlocks\n \n freeStart = blocks[0]['freeStart']\n freeEnd = blocks[0]['freeEnd']\n blknum = 0\n for fblkcnt in xrange(freeStart, freeEnd + 1, 1) :\n for fblk in blocks[fblkcnt] :\n # if free block is zero, then, it should not be in consumed blocks\n # else it definitely should be\n print fblkcnt, \" - \", fblk, \" - \", blknum\n if (fblk == 0 and (blknum in consumedBlocks)):\n # free blocks says it's free, but consumed block says it's not\n return STATUS['F_NFB']\n elif(fblk == 1 and not(blknum in consumedBlocks)) : \n # free block says it's not free, but consumed block says it is\n return STATUS['F_MFB']\n \n blknum += 1\n # consistency all ok\n return STATUS['OK']", "def set_free(self, pos: tuple):\n if self.within_map(pos):\n self.map[round(pos[0]), round(pos[1])] = FREE\n return True\n else:\n return False", "def is_inacessible(cell):\n adj, count = num_adj_buildings(cell)\n return adj == count", "def is_unoccupied(self) -> bool:\n return self.piece == Piece() # Piece() creates an \"empty-piece\"", "def is_free(self):\n\n try:\n return self.call(method='domainIsFree', args=[self.domainname])\n except DomainOccupiedError:\n return False", "def valid_coverage_cell(self, row, column):\n\n if (row < self.cov_grid.shape[0] and\n row >= 0) and \\\n (column < self.cov_grid.shape[1] and\n column >= 0):\n return True\n else:\n return False", "def checkEmpty(grid):\n for x in range(len(grid.board)):\n for y in range(len(grid.board[0])):\n if grid.board[x][y] == 0:\n return True\n return False", "def _is_valid(self):\n for cell in self._cells_iterable():\n if cell not in self._valid_elements:\n return False\n return True", "def guard_occupy_transition(self):\n if not self.get_free_positions:\n return True", "def is_valid(gr, pos, num):\n \n row = pos[0]\n col = pos[1]\n \n for i in range(0, 9):\n # test row\n if(i != col and gr[row][i] == num):\n return False\n # test col\n if(i != row and gr[i][col] == num):\n return False\n\n # test 3x3 square\n small_row = floor(row / 3) * 3\n small_col = floor(col / 3) * 3\n\n for i in range(small_row, small_row + 3):\n for j in range(small_col, small_col + 3):\n if((i != row and j != col) and gr[i][j] == num):\n return False\n \n return True", "def tileOccupied(self, i, j):\n if self.tiles[i][j] == 1 or i == 0 or i == self.size[0] - 1 or j == 0 or j == self.size[1] - 1:\n return True\n for prop in self.props:\n if prop.i == i and prop.j == j:\n return True\n return False", "def check_grid(self) -> None:\n if not len(self.grid) == 81:\n raise ValueError(\"Grid does not have 81 elements. Aborting\")", "def _check_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n alive_neighbours = self._get_neighbours(row_number,col_number)\n \n self.to_be_updated[row_number][col_number] = False\n if self.cells[row_number][col_number].get_status():\n if alive_neighbours < 2:\n self.to_be_updated[row_number][col_number] = True\n elif alive_neighbours > 3:\n self.to_be_updated[row_number][col_number] = True\n else:\n if alive_neighbours == 3:\n self.to_be_updated[row_number][col_number] = True", "def is_enough_space(self) -> bool:\n return self._free_space() > self.minimum_disk", "def board_tiles_availability(self):\n for row in range(GameData.rows):\n for col in range(GameData.columns):\n if self.board[row][col] == 0:\n return False\n # Game is draw, no more moves left!\n return True", "def is_all_free(self):\n return self.pool_size == self.sem._value", "def is_port_free(port):\n return _is_port_free(port)", "def free(self):\n\n return not self.moving and not self.queue.get(0) and not self.anims.get(0)", "def is_empty(self, row, column):\n\n return self.board[row][column] == placeholder", "def test(self, grid, flag):\n x = self.x+SPEED_X[flag]\n y = self.y+SPEED_Y[flag]\n return 0 <= x < self.n and 0 <= y < self.n and grid[y][x] == 1", "def valid_entry(grid, val, row, col):\n if grid[row][col] != 'x':\n return False\n\n # check subgrid\n if val in get_subgrid_values(grid, row, col):\n return False\n\n # check same row\n for c in grid[row]:\n if c == val:\n return False\n\n # check same col\n for r in range(len(grid)):\n if grid[r][col] == val:\n return False\n\n return True", "def can_position_fleet(self, starting_cell, board):\n are_rows_available = starting_cell.row + self.total_rows_req <= board.rows\n are_columns_available = starting_cell.column + self.total_column_req <= board.columns\n if are_columns_available and are_rows_available:\n return True\n return False", "def _is_full(self):\n if self.allocated_spaces == self.capacity:\n return True\n elif self.allocated_spaces < self.capacity:\n return False", "def is_valid_room(self, x, y):\r\n return 0 <= x < self.__nx and 0 <= y < self.__ny", "def isvalid(board, num, cell):\r\n x, y = cell\r\n row = board[x]\r\n col = transpose(board)[y]\r\n\r\n # check row if num already present\r\n if any(row[i] == num for i in range(9)):\r\n return False\r\n # check col if num already present\r\n if any(col[i] == num for i in range(9)):\r\n return False\r\n \r\n # get start position of box\r\n Xbox = (x//3) * 3\r\n Ybox = (y//3) * 3\r\n for i in range(Xbox, Xbox+3):\r\n for j in range(Ybox, Ybox+3):\r\n if board[i][j] == num:\r\n return False\r\n \r\n return True", "def is_full(self):\n return len(self.__occupied_slots__) >= self.__size__", "def is_valid_position(self, x, y):\n if (x > self.n_cols-1 or y > self.n_rows-1) or (x < 0 or y < 0):\n return False\n\n elif self.grid[x][y] == 3:\n return False\n\n return True", "def does_usage_charges_grid_have_no_records(self):\n return self.is_element_present(self.usage_charges_grid_no_record_found_message_locator)", "def check_end(case, no_case, grid):\n\n\t#TODO : Récupérer toutes les positions vides -> les tester avec le joueur courant ->si combinaison possible \n\t# False sinon True et continue !\n\n\tfree_cases = []\n\tfor key, value in grid.items():\n\t\tif value == ' + ':\n\t\t\tfree_cases.append(key)\n\n\tif len(free_cases) == 0:\n\t\treturn True\n\n\tfor elem in free_cases:\n\t\tstatus, useless2 = check_changes(elem, grid, case, no_case)\n\t\tif status == True:\n\t\t\treturn False\n\n\treturn True\n\n\n\tprint(occupied_cases)\n\tprint(free_cases)", "def valid_column(self, col: int) -> bool:\n\n return self.check_bounds(0, col) and self.grid[0][col] == \" \"", "def checkCollumn(self, y):\n used = []\n for x in range(len(self.board)):\n cur = self.board[x][y]\n if cur not in used:\n if cur!= 0:\n used += [cur]\n else:\n return False\n return True", "def _empty_cell(self, i_row, i_col):\n return self._board[i_row][i_col] == \" \"", "def see_occupant(self, x, y, dx, dy):\r\n if dx == 0 and dy == 0: # Makes looping easier\r\n return False\r\n x += dx\r\n y += dy\r\n while 0 <= x < self.width and 0 <= y < self.height:\r\n if self.grid[y][x] == '#':\r\n return True\r\n if self.grid[y][x] == 'L':\r\n return False\r\n x += dx\r\n y += dy\r\n return False", "def valid(grid, num, pos):\n # Check row\n for i in range(LEN_GRID):\n if grid[pos[0]][i] == num and pos[1] != i:\n return False\n\n for i in range(LEN_GRID):\n if grid[pos[0]][i] == num and pos[1] != i:\n return False\n\n # Check column\n for i in range(LEN_GRID):\n if grid[i][pos[1]] == num and pos[0] != i:\n return False\n\n # Check box\n box_x = pos[1] // 3\n box_y = pos[0] // 3\n\n for i in range(box_y*3, box_y*3 + 3):\n for j in range(box_x * 3, box_x*3 + 3):\n if grid[i][j] == num and (i, j) != pos:\n return False\n return True", "def check_cells_fit(cell_no, min_cell_distance, space_range=[[0,10],[0,10],None]):\n\n dim1, dim2, dim3 = space_range\n full_dim = 1.\n for dim in [dim1, dim2, dim3]:\n if dim != None:\n dim = dim[1]-dim[0]\n full_dim = full_dim*dim\n\n return full_dim / min_cell_distance >= cell_no", "def check(self):\n return self.tile==\"\"", "def is_valid(self,row,col) -> bool:\n if(row >=0 and col>=0 and row<self.row and col<self.col and self.array[row][col]==-1 ):\n return True\n return False", "def make_free_cell_list():\r\n for row in range(9):\r\n for col in range(9):\r\n if (application.ui.__getattribute__(f'cell{col+1}{row+1}')).text() == \"\":\r\n lst_free_cells.append(Point(row, col))", "def is_possible_grid(self,row,col,user_value):\n start_row = row - (row % 3)\n start_col = col - (col % 3)\n for x in range(3):\n for y in range(3):\n if self.arr[x+start_row][y+start_col] == user_value:\n logging.debug(f\"is_posssible_grid(): (False) x: {x} y: {y} s_row: {start_row} s_col: {start_col} arr[x+start_row][y+start_col]: {self.arr[x+start_row][y+start_col]} == {user_value}\")\n return False\n logging.debug(f\"is_posssible_grid(): (True) x: {x} y: {y} s_row: {start_row} s_col: {start_col} arr[x+start_row][y+start_col]: {self.arr[x+start_row][y+start_col]} != {user_value}\")\n return True", "def in_col(n: int, row: int, col: int, grid: List) -> bool:\n for x in range(9):\n if x!= row and n == grid[x][col]:\n return True\n return False", "def XCAFDoc_ShapeTool_IsFree(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_IsFree(*args)", "def check(self,a,x,y):\r\n return not self.exitsinrow(self.rows,x,a) and not self.existsincol(self.rows,y,a) and \\\r\n not self.exitsinblock(self.rows, x - x % 3, y - y % 3,a)", "def is_cell_safe(self, cell, board):\n # look at a cell and the cell's revealed neighbors\n # if any neighbors say there's 1 mine nearby, and that neighbor has neighbors which\n # contain a flag, it's safe to click here\n # TODO: this really needs to only check neighbors' neighbors that border the original cell.\n # right now more cells are considered than should be.\n safe = False\n neighbors = ms.Minesweeper.get_neighbors(cell.row, cell.col, board)\n revealed_neighbors = [n for n in neighbors if n.revealed or n.flagged]\n for n in revealed_neighbors:\n if n.neighbors > 0:\n n_neighbors = ms.Minesweeper.get_neighbors(n.row, n.col, board)\n flagged_n_neighbors = [n for n in n_neighbors if n.flagged]\n if len(flagged_n_neighbors) > 0:\n safe = True\n return safe", "def is_valid_position(self, current_piece, offset_x = 0, offset_y = 0):\n for x in range(pieces.Piece.TEMPLATE_WIDTH):\n pos_x = current_piece.get_pos_x() + x + offset_x\n for y in range(pieces.Piece.TEMPLATE_HEIGHT):\n pos_y = current_piece.get_pos_y() + y + offset_y\n if current_piece.get_template()[y][x]:\n if not utils.contains(0, 0, game_config.BOARD_BOX_COUNT_X, game_config.BOARD_BOX_COUNT_Y, pos_x, pos_y, 1, 1):\n return False\n if self.board.get_cell(pos_x, pos_y):\n return False\n return True", "def check_done(grid):\r\n for row in grid:\r\n for el in row:\r\n if not el.collapsed:\r\n return False\r\n else:\r\n return True", "def check_col(grid, num, i, j):\n assert i < len(grid), 'Row is out of grid!'\n assert j < len(grid[0]), 'Column is out of grid!' \n\n found = False\n for pos, row in enumerate(grid):\n if row[j] == num and pos != i:\n found = True\n return found", "def valid(self, col, rank):\n return rank <= 8 and rank > 0 and self.getColIdx(col) < 8 and self.getColIdx(col) >= 0", "def valid(self, col, rank):\n return rank <= 8 and rank > 0 and self.getColIdx(col) < 8 and self.getColIdx(col) >= 0", "def check_won(grid):\r\n for i in range(len(grid)):\r\n for j in range(len(grid[i])):\r\n if grid[i][j] >= 32:\r\n return True \r\n return False", "def check(self):\n for row in self.grid:\n for i in range(1, 10):\n if row.count(i) != 1:\n return False\n\n for col in range(9):\n lst = [row[col] for row in self.grid]\n for i in range(1, 10):\n if lst.count(i) != 1:\n return False\n \n for i in range(3):\n for j in range(3):\n lst = [row[j* 3:(j*3) + 3] for row in self.grid[i * 3:(i*3) + 3]] \n flat_list = []\n for k in lst:\n for number in k:\n flat_list.append(number)\n \n for check_number in range(1, 10):\n if flat_list.count(check_number) != 1:\n return False\n return True", "def valid(point):\n index = offset(point)\n if tiles[index] == 0:\n return False\n\n index = offset(point + 19)\n\n if tiles[index] == 0:\n return False\n\n return point.x % 20 == 0 or point.y % 20 == 0", "def not_empty(entry):\n gt_boxes = entry['boxes']\n return gt_boxes.shape[0] > 0", "def isFull(self) -> bool:\n return self.count == self.capacity", "def isFull(self) -> bool:\n return self.count == self.capacity", "def is_occupied(self, pos):\n return any([p == pos for p in self._workers.values()])", "def test(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer):\n if brickheight>wallheight or bricklength>walllength:\n return False\n elif over(brickheight,bricklength,row,column,walllength,wallheight):\n return False\n else:\n for x in range(column,column+bricklength):\n for y in range(row,row+brickheight):\n if (x,y) in occupied:\n return False \n break\n else:\n return True", "def check_won (grid):\r\n w=False\r\n for row in range(4):\r\n for col in range(4):\r\n if grid[row][col]>=32:\r\n w=True\r\n break\r\n return w", "def valid_tile(self, i, j):\n if (i >= 0 and i < self.rows) and (j >= 0 and j < self.cols):\n return True\n return False" ]
[ "0.719573", "0.71511495", "0.7138081", "0.7021418", "0.69615394", "0.6675433", "0.6662159", "0.6597767", "0.65975684", "0.65579695", "0.6411737", "0.6411211", "0.6347452", "0.6343232", "0.62552905", "0.6222179", "0.61862236", "0.61669767", "0.6163536", "0.61213267", "0.6112138", "0.6106918", "0.6098223", "0.60859567", "0.60799795", "0.6077502", "0.60739994", "0.6034673", "0.60228145", "0.600994", "0.59986323", "0.5949167", "0.5936495", "0.5931221", "0.59302944", "0.5927537", "0.5921759", "0.58522946", "0.5850694", "0.5845824", "0.5839763", "0.58308476", "0.582017", "0.5815735", "0.57984", "0.57932717", "0.5791093", "0.5786253", "0.5782171", "0.5760909", "0.5759526", "0.5752117", "0.5748661", "0.5744775", "0.57396513", "0.57301474", "0.57243025", "0.57070255", "0.56938636", "0.56915265", "0.5688945", "0.56709635", "0.56666803", "0.56539303", "0.565211", "0.5644993", "0.564227", "0.56399626", "0.56246614", "0.5624195", "0.5612748", "0.56126475", "0.5606322", "0.56060314", "0.55843556", "0.5574981", "0.5565074", "0.5563041", "0.5560818", "0.5544892", "0.5539642", "0.55363435", "0.5534285", "0.55295646", "0.55219257", "0.55212545", "0.5519402", "0.5517239", "0.5508978", "0.5508978", "0.5505496", "0.55009264", "0.55006987", "0.54934686", "0.5492608", "0.5492608", "0.5492175", "0.549201", "0.5491601", "0.5487963" ]
0.6975477
4
Get neighbouring vertices of a location, which are within map bounds.
def get_neighbours(self, pos: tuple): x, y = pos[0], pos[1] neighbours = [(x + 1, y), (x + 1, y + 1), (x, y + 1), (x - 1, y + 1), (x - 1, y), (x - 1, y - 1), (x, y - 1), (x + 1, y - 1)] return {k: self.move_cost(pos, k) for k in neighbours if self.within_map(k)}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_neighbour(self, loc):\n y_lim, x_lim = np.shape(self.map)\n y, x = loc\n neighbour_cords = [(y - 1, x), (y + 1, x), (y, x - 1), (y, x + 1)]\n neighbour_cells = []\n for cords in neighbour_cords:\n curr_y, curr_x = cords\n if curr_y < 0 or curr_y >= y_lim:\n pass\n elif curr_x < 0 or curr_x >= x_lim:\n pass\n else:\n neighbour_cells.append(self.map[cords])\n\n return neighbour_cells", "def _valid_neighbors(location, some_num):\n xloc, yloc = location\n vector = [(1, 0), (-1, 0), (0, 1), (0, -1)]\n ret_v = []\n for vect in vector:\n xpos = xloc + vect[0]\n ypos = yloc + vect[1]\n if xpos <= 0 or ypos <= 0:\n continue\n if xpos > some_num or ypos > some_num:\n continue\n ret_v.append((xpos, ypos))\n return ret_v", "def get_near(self,map):\n near_cells = []\n for i in range(self.x-1, self.x+2):\n for j in range(self.y-1, self.y+2):\n if(i>=0 and i<map.size and j>=0 and j<map.size): near_cells.append(map.search(i,j))\n return near_cells", "def get_neighbors(loc):\n dim = len(loc)\n offsets = product((-1, 0, 1), repeat=dim)\n neighbors = set()\n for offset in offsets:\n if offset == (0,) * dim:\n continue\n neighbors.add(tuple(a + b for a, b in zip(loc, offset)))\n return neighbors", "def get_neighbours(lat, long):\n # ns = north east, ew = east west (ratio between 1 feet and degree) \n # its different on diferent places on earth (sphere)!!\n ns = 0.0025\n ew = 0.0025\n walk = []\n for i in range(-2, 3):\n for j in range(-2, 3):\n thiscell = CellId.from_lat_lng(LatLng.from_degrees(lat + ns*i, long + ew*j)).parent(S2_CELL_LEVEL)\n if abs(i * j) < 4:\n walk.append(thiscell.id())\n return sorted(walk)", "def get_neighbours(self, row, col):\n neighbour_location_diffs = [(-1, -1),\n ( 0, -1),\n ( 1, -1),\n ( 1, 0),\n ( 1, 1),\n ( 0, 1),\n (-1, 1),\n (-1, 0)]\n neighbours = []\n for diff in neighbour_location_diffs:\n if (row + diff[0] >= 0 and\n row + diff[0] < self.height and\n col + diff[1] >= 0 and\n col + diff[1] < self.width):\n neighbours.append(self.cells[row + diff[0]][col + diff[1]])\n return neighbours", "def get_neighbours(self):\n return self.points_to.keys()", "def get_neighbors(self, x, y):\n\n if not self.has_vertex(x, y): return []\n neighbors = [(x + 1, y), (x, y - 1), (x - 1, y), (x, y + 1)]\n return [(x, y) for (x, y) in neighbors if self.has_vertex(x, y)]", "def get_neighbours(self, coords, filter = None, size = 1):\n\t\tif filter is None:\n\t\t\tfilter = lambda pos: True\n\t\tgrid = self.map\n\t\tresult = [pos for pos in grid.env_keys(coords, size) if filter(pos)]\n\t\tresult.sort(key = lambda pos: get_distance_2(pos, coords))\n\t\treturn result", "def get_neighbour_vertices(self, cur: Union[str, int]) -> list:\n\t\tvertices = [edge[0] if edge[1] == cur else edge[1] for edge in self.get_neighbour_edges(cur)]\n\t\treturn vertices", "def _get_neighbours(point):\n # Pull coords out of point.\n x = point[0]\n y = point[1]\n z = point[2]\n return ((x-1, y, z), (x+1, y, z), (x, y-1, z), (x, y+1, z), (x, y, z-1), (x, y, z+1))", "def get_all_vertices(self):\r\n for vertex in self.__neighbours.keys():\r\n yield vertex", "def _get_neighbors(self, p, min_x, max_x, min_y, max_y):\n x, y = p\n x0 = min_x if x == min_x else x - 1\n x1 = max_x + 1 if x == max_x else x + 2\n y0 = min_y if y == min_y else y - 1\n y1 = max_y + 1 if y == max_y else y + 2\n \n return [(x, y) for x in xrange(x0, x1) for y in xrange(y0, y1) if (x, y) != p]", "def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours", "def get_neighbours(self):\n shape=self.cubeshape[1:]\n neighboursx=np.arange(self.xpos-(self.blocksize-1)/2,(self.xpos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursx=[x if (x>=0) & (x<=shape[1]-1) else np.nan for x in neighboursx ]\n neighboursy=np.arange(self.ypos-(self.blocksize-1)/2,(self.ypos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursy=[y if (y>=0) & (y<=shape[0]-1) else np.nan for y in neighboursy ]\n keys=[np.ravel_multi_index([y,x], shape) if np.all(np.isfinite(np.asarray([y,x]))) else np.nan for y in neighboursy for x in neighboursx]\n\n return keys", "def get_neighbors(self):\n return list(map(self.game.square, [self.position - self.game.rules[\"row_len\"], self.position + 1, self.position + self.game.rules[\"row_len\"], self.position - 1]))", "def neighbours_of_position(coords):\n row = coords[0]\n col = coords[1]\n \n #assign each of neighbours corrds\n #top left to top rigt\n top_left = (row - 1, col - 1)\n top_center = (row - 1, col)\n top_right = (row - 1, col + 1)\n \n # left to right (center)\n left = (row, col - 1)\n # the (row, col) cordinates passed into this function are situated here\n right = (row, col + 1)\n \n #bottom-left to bottom-right\n bottom_left = (row +1, col -1)\n bottom_center = (row +1, col)\n bottom_right = (row +1, col +1)\n \n return [top_left, top_center, top_right,\n left , right ,\n bottom_left, bottom_center, bottom_right]", "def neighbours_of_position(coords):\n row = coords[0]\n col = coords[1]\n \n #Assign each of the neighbours\n # Top-left to the top-right\n top_left = (row - 1, col - 1)\n top_center = (row - 1, col)\n top_right = (row - 1, col + 1)\n \n # Left to right\n left = (row, col - 1)\n # The '(row, col)' coordinates passed to this\n # function are situated here\n right = (row, col + 1)\n \n # Bottom-left to bottom-right\n bottom_left = (row + 1, col - 1)\n bottom_center = (row + 1, col)\n bottom_right = (row + 1, col + 1)\n \n return [top_left, top_center, top_right,\n left, right,\n bottom_left, bottom_center, bottom_right]", "def neighbors(self):\n return self.mesh.neighbors()", "def get_neighbours(pos):\n neighbours = {tuple(sum(x) for x in zip(pos, offset)) for offset in relative_positions}\n return neighbours", "def get_neighbors(self, vertex_id):\n # TODO\n return self.vertices[vertex_id]", "def get_neighbors(self, row, col):\n neighbors = set()\n for d in [-1,1]:\n if row+d >= 0 and row+d < self._height and \\\n (row+d,col) in self._empty_spaces:\n neighbors.add((row+d,col))\n if col+d >= 0 and col+d < self._width and \\\n (row,col+d) in self._empty_spaces:\n neighbors.add((row,col+d))\n return neighbors", "def get_neighbours(coords):\n\n dxdy = [(-1,-2),(0,-2),(1,-2),(-2,-1),(-1,-1),(0,-1),(1,-1),(2,-1),\n (-2,0),(-1,0),(1,0),(2,0),(-2,1),(-1,1),(0,1),(1,1),(2,1),\n (-1,2),(0,2),(1,2),(0,0)]\n neighbours = []\n for dx, dy in dxdy:\n neighbour_coords = coords[0] + dx, coords[1] + dy\n if not (0 <= neighbour_coords[0] < nx and\n 0 <= neighbour_coords[1] < ny):\n # We're off the grid: no neighbours here.\n continue\n neighbour_cell = cells[neighbour_coords]\n if neighbour_cell is not None:\n # This cell is occupied: store this index of the contained point.\n neighbours.append(neighbour_cell)\n return neighbours", "def get_neighbors(self, vertex_id):\n return self.vertices[vertex_id]", "def get_neighbors(self, vertex_id):\n return self.vertices[vertex_id]", "def get_neighbors(self, vertex_id):\n return self.vertices[vertex_id]", "def get_neighbors(self, vertex_id):\n return self.vertices[vertex_id]", "def get_neighbors(self, vertex_id):\n return self.vertices[vertex_id]", "def get_neighbors(self, vertex_id):\n return self.vertices[vertex_id]", "def get_neighbors(self, vertex_id):\n pass # TODO", "def get_neighbours(self, coords):\n\n\t dxdy = [(-1,-2),(0,-2),(1,-2),(-2,-1),(-1,-1),(0,-1),(1,-1),(2,-1),\n\t (-2,0),(-1,0),(1,0),(2,0),(-2,1),(-1,1),(0,1),(1,1),(2,1),\n\t (-1,2),(0,2),(1,2),(0,0)]\n\t neighbours = []\n\t for dx, dy in dxdy:\n\t neighbour_coords = coords[0] + dx, coords[1] + dy\n\t if not (0 <= neighbour_coords[0] < self.nx and\n\t 0 <= neighbour_coords[1] < self.ny):\n\t # We're off the grid: no neighbours here.\n\t continue\n\t neighbour_cell = self.cells[neighbour_coords]\n\t if neighbour_cell is not None:\n\t # This cell is occupied: store this index of the contained point.\n\t neighbours.append(neighbour_cell)\n\t return neighbours", "def get_neighbors(grid, x, y):\n out = []\n if x > 0:\n out.append(grid[x-1, y])\n if y > 0:\n out.append(grid[x, y-1])\n if y < grid.shape[1] - 1:\n out.append(grid[x, y+1])\n if x < grid.shape[0] - 1:\n out.append(grid[x+1, y])\n return out", "def neighbor(self, start):\n x = start[0] + random.uniform(-20, 20)\n y = start[1] + random.uniform(-20, 20)\n x = max(min(x, xbounds[1]), xbounds[0])\n y = max(min(y, ybounds[1]), ybounds[0])\n return [x,y]", "def get_neighbours(self, cell: Position) -> Iterable[Position]:\n x, y = cell\n\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y), (x + 1, y),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1),\n ]", "def get_neighbors(self, vertex_id):\n # TODO\n\n return self.vertices[vertex_id]", "def neighbors_in(self, vertex):\n return list(self.neighbor_in_iterator(vertex))", "def get_neighbours_8(x, y):\n return [(x - 1, y - 1), (x, y - 1), (x + 1, y - 1), \\\n (x - 1, y), (x + 1, y), \\\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1)]", "def neighbors(position, cave, x_min=None, x_max=None, y_min=None, y_max=None):\n x, y = position\n if x_min is None:\n x_min = min(cave.keys(), key=lambda pos: pos[0])[0]\n if x_max is None:\n x_max = max(cave.keys(), key=lambda pos: pos[0])[0]\n if y_min is None:\n y_min = min(cave.keys(), key=lambda pos: pos[1])[1]\n if y_max is None:\n y_max = max(cave.keys(), key=lambda pos: pos[1])[1]\n new_positions = [(x, y-1), (x-1, y), (x+1, y), (x, y+1)]\n new_positions = [(x, y) for x, y in new_positions\n if x_min <= x <= x_max and y_min <= y <= y_max]\n return new_positions", "def neighbours((u,v)):\r\n return ((u,v+1), (u+1,v), (u,v-1), (u-1,v))", "def neighbours(pos):\r\n\t\tnbs = []\r\n\t\tfor direction in directions:\r\n\t\t\tnb = add(pos, direction)\r\n\t\t\tif is_inside(nb):\r\n\t\t\t\tnbs.append(nb)\r\n\t\treturn nbs", "def get_neighbours(self, grid):\n\t\tfor diff in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n\t\t\tres = Vector((self.row, self.col)) + diff\n\t\t\tif res[0] >= 0 and res[1] >= 0 and res[0] < len(grid) and res[1] < len(grid[0]):\n\t\t\t\tyield grid[res[0]][res[1]]", "def get_neighbourhood(self, source: int) -> Iterable[GraphEdge]:\n return filter(\n lambda e: e.fst == source,\n self.__edges\n )", "def locations_adjacent_to(loc):\n return [(loc[0] + direction[0], loc[1] + direction[1]) for direction in [(0,-1),(0,1),(-1,0),(1,0)]]", "def neighbours(x, y):\n n = []\n for c in ((y-1, x-1), (y-1, x), (y-1, x+1), (y, x-1), (y, x+1), (y+1, x-1), (y+1, x), (y+1, x+1)):\n n.append(c)\n return n", "def iter_neighbors(x: int, y: int) -> t.Generator[COORDINATE, None, None]:\n yield x - 1, y\n yield x + 1, y\n yield x, y - 1\n yield x, y + 1", "def get_neighbors(self, pos):\r\n neighbors = []\r\n if pos[0] + 1 < self.size:\r\n neighbors.append((pos[0] + 1, pos[1]))\r\n if pos[0] - 1 >= 0:\r\n neighbors.append((pos[0] - 1, pos[1]))\r\n if pos[1] + 1 < self.size:\r\n neighbors.append((pos[0], pos[1] + 1))\r\n if pos[1] - 1 >= 0:\r\n neighbors.append((pos[0], pos[1] - 1))\r\n return neighbors", "def list_neighbors(current_row, current_col, grid_size):\n neighbors = []\n for row_offset, col_offset in [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1)]:\n new_row = current_row + row_offset\n new_col = current_col + col_offset\n if (new_row >= 0 and new_row < grid_size and new_col >= 0\n and new_col < grid_size):\n neighbors.append((new_row, new_col))\n return neighbors", "def get_voxel_neighbors(self, voxel):\n\n x, y, z = np.unravel_index(voxel, self.x_y_z)\n\n valid_x = []\n valid_y = []\n valid_z = []\n if x - 1 >= 0:\n valid_x.append(x - 1)\n if y - 1 >= 0:\n valid_y.append(y - 1)\n if z - 1 >= 0:\n valid_z.append(z - 1)\n\n valid_x.append(x)\n valid_y.append(y)\n valid_z.append(z)\n\n if x + 1 < self.x_y_z[0]:\n valid_x.append(x + 1)\n if y + 1 < self.x_y_z[1]:\n valid_y.append(y + 1)\n if z + 1 < self.x_y_z[2]:\n valid_z.append(z + 1)\n\n valid_neighbor_indices = cartesian((valid_x, valid_y, valid_z))\n\n ravel_indices = np.ravel_multi_index((valid_neighbor_indices[:, 0],\n valid_neighbor_indices[:, 1],\n valid_neighbor_indices[:, 2]), self.x_y_z)\n voxel_n = np.ravel_multi_index([self.voxel_x, self.voxel_y, self.voxel_z], self.x_y_z)\n return [x for x in ravel_indices if x in np.unique(voxel_n)]", "def get_neighbour_squares_idx(self, pos):\n if pos:\n possible_values = {0, 1, 2}\n col_variation = zip( [pos[0], pos[0]], possible_values - {pos[1]} )\n row_variation = zip( possible_values - {pos[0]}, [pos[1], pos[1]] )\n return list(col_variation), list(row_variation)", "def get_neighbors(point):\n pt = point.copy()\n output= [point.copy() for i in range(4)]\n output[0:2] = map(Point.setY, output[0:2], [pt.getY()+ i for i in range(-1,2,2)])\n output[2:4]= map(Point.setX, output[2:4], [pt.getX()+ i for i in range(-1,2,2)])\n return output", "def get_voxel_neighbors(self, voxel):\n\n x, y, z = np.unravel_index(voxel, self.x_y_z)\n\n valid_x = []\n valid_y = []\n valid_z = []\n if x - 1 >= 0:\n valid_x.append(x - 1)\n if y - 1 >= 0:\n valid_y.append(y - 1)\n if z - 1 >= 0:\n valid_z.append(z - 1)\n\n valid_x.append(x)\n valid_y.append(y)\n valid_z.append(z)\n\n if x + 1 < self.x_y_z[0]:\n valid_x.append(x + 1)\n if y + 1 < self.x_y_z[1]:\n valid_y.append(y + 1)\n if z + 1 < self.x_y_z[2]:\n valid_z.append(z + 1)\n\n valid_neighbor_indices = cartesian((valid_x, valid_y, valid_z))\n\n ravel_indices = np.ravel_multi_index((valid_neighbor_indices[:, 0],\n valid_neighbor_indices[:, 1],\n valid_neighbor_indices[:, 2]), self.x_y_z)\n\n return [x for x in ravel_indices if x in np.unique(self.voxel_n)]", "def _get_neighbors(size, point):\n i, j = point\n\n neighbors = [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)]\n _valid_neighbor = lambda neighbor: all(0 <= x < size for x in neighbor)\n neighbors = list(filter(_valid_neighbor, neighbors))\n \n return neighbors", "def get_neighbors(start_square, visited=[]):\n neighbors = []\n\n # loop over possible x values\n for i in [start_square.x - 1, start_square.x, start_square.x + 1]:\n\n # drop neighbors outside of our region of interest\n if i < 0 or i > MAX_X:\n continue\n\n # loop over possible y values\n for j in [start_square.y - 1, start_square.y, start_square.y + 1]:\n\n # drop neighbors outside of our region of interest\n if j < 0 or j > MAX_Y:\n continue\n\n # Ignore ourself\n if i == start_square.x and j == start_square.y:\n continue\n\n # Ignore corner pieces\n if i == start_square.x - 1 and j != start_square.y:\n continue\n if i == start_square.x + 1 and j != start_square.y:\n continue\n\n # Deal with barriers\n found = False\n for square in visited:\n if square.pos == [i, j]:\n found = True\n break\n if found:\n continue\n\n neighbors.append(Square(i, j))\n\n return neighbors", "def get_neighbors(self):\n return self.neighbors", "def get_neighbors(self):\n return self.neighbors", "def get_neighbors(self):\n return self.neighbors", "def get_neighbors(self):\n return self.neighbors", "def get_neighbours(self):\n return self._neighbours", "def get_neighbours(self, name: Any) -> set:\n if name in self._vertices:\n v = self._vertices[name]\n return {(neighbour.name, v.neighbours[neighbour]) for neighbour in v.neighbours}\n else:\n raise ValueError", "def get_near_positions(self, position: tuple):\n\n return ((x, y) for x, y in (\n (position[0], position[1] + 1),\n (position[0], position[1] - 1),\n (position[0] + 1, position[1]),\n (position[0] - 1, position[1])\n ) if 0 <= x < self._map_height and 0 <= y < self._map_width)", "def get_neighbours(self):\n return self.neighbours", "def neighbors(self, u):\r\n return filter(lambda v: self.getCapacity((u,v)) > 0, self.adjacent[u])", "def query_region(self, point):\n result = []\n indexes = []\n for didx, dpoint in enumerate(self.data):\n if dpoint != point:\n if self.l2_distance(dpoint, point) <= self.eps:\n result.append(dpoint)\n indexes.append(didx)\n return result, indexes", "def get_neighbors(y, x, H, W):\n neighbors = []\n\n for i in (y-1, y, y+1):\n for j in (x-1, x, x+1):\n if i >= 0 and i < H and j >= 0 and j < W:\n if (i == y and j == x):\n continue\n neighbors.append((i, j))\n\n return neighbors", "def get_neighbors(y, x, H, W):\n neighbors = []\n\n for i in (y-1, y, y+1):\n for j in (x-1, x, x+1):\n if i >= 0 and i < H and j >= 0 and j < W:\n if (i == y and j == x):\n continue\n neighbors.append((i, j))\n\n return neighbors", "def get_neighbours(self, item: Any) -> set:\n if item in self._vertices:\n v = self._vertices[item]\n return {neighbour.item for neighbour in v.neighbours}\n else:\n raise ValueError", "def getSearchSpaceCoords(self):", "def get_neighbors(self, vertex_id):\n pass # TODO\n if vertex_id in self.vertices:\n # print(f' self.vertices[vertex_id] >> {self.vertices[vertex_id]} ')\n return self.vertices[vertex_id]\n else:\n # print(f' ERROR: vertex {vertex.id} does not exist') \n raise ValueError(\"get_neighbor cannot locate vertex {vertex_id}\")", "def vertices(self):\n return self.pointlist", "def get_neighbours(self, cell):\n\t\tx,y = cell.find_id()\n\t\tlength = self.space.shape[1]\n\t\twidth = self.space.shape[0]\n\t\tif (length == 0 or width == 0 or x < 0 or x >= length or y < 0 or y >= width):\n\t\t\treturn []\n\t\tneighs = [(i,j) for i in range(y-1,y+2) if 0<=i<width for j in range(x-1,x+2) if 0<=j<length]\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tneighbours.append(self.space[neigh[0],neigh[1]])\n\t\treturn neighbours", "def vertices(self):\r\n return self.adjacent.keys()", "def getNeighbours(seg,meta,inversedIndex):\n return np.unique(np.fromiter( (inversedIndex[x] for x in np.concatenate([meta.loc[seg]['ins'],meta.loc[seg]['outs']])),dtype=np.int))", "def get_neighbours(self):\n return []", "def neighbours(self):\n return [x.node for x in self.edges]", "def neighbors8(point):\n x, y = point\n return ((x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1),\n (x + 1, y + 1), (x - 1, y - 1), (x + 1, y - 1), (x - 1, y + 1))", "def get_neighbourhood(indices, map_shape):\n if isinstance(map_shape, int):\n nx = 1\n size = map_shape\n elif len(map_shape) == 2:\n nx = map_shape[1]\n size = map_shape[0] * map_shape[1]\n else:\n print(\"Check your `map_shape` value.\")\n return\n extended = list(indices)\n for s in extended:\n susjedi = np.unique(\n np.array([s-2*nx,\n s-nx-1, s-nx, s-nx+1,\n s-2, s-1, s, s+1, s+2,\n s+nx-1, s+nx, s+nx+1,\n s+2*nx]))\n susjedi_cor = susjedi[(susjedi >= 0) & (susjedi < size)]\n extended = extended + list(susjedi_cor)\n return np.sort(np.unique(extended))", "def _get_neighbours(self, position):\n grid = self._grid\n x, y = position\n neighbours = []\n offsets = [(0,1),(1,0),(0,-1),(-1,0)]\n shuffle(offsets)\n for offset in offsets:\n i, j = offset\n position = (x + i, y + j)\n if grid.valid_position(position) and position not in self.shots:\n neighbours.append(position)\n return neighbours", "def neighbors_of(\n self, vertex, color: T.Optional[TriColor] = None\n ) -> T.Collection[T.Tuple['Vertex', int]]:\n\n neighbors = [\n (edge.sink, edge.weight or edge.sink.weight)\n for edge in vertex.out_edges\n if color is None or edge.sink.color == color\n ]\n return neighbors", "def get_outer_boundary_of_voronoi(self):\n edge = [edge for edge in self.edges if not edge.nxt][0]\n # next(obj for obj in objs if obj.val==5)\n first_vertex = edge.origin\n outer_boundary = []\n while (not edge.get_destination() == first_vertex):\n if(edge.get_destination().is_infinity()):\n edge = edge.twin.nxt\n else:\n outer_boundary.append(edge)\n edge = edge.nxt\n outer_boundary.append(edge)\n return outer_boundary", "def get_candidate_locations(cur_location, radius, row_num, col_num):\n cur_y, cur_x = cur_location\n delta = int(radius)\n max_x = cur_x + delta if cur_x + delta < col_num else col_num - 1\n min_x = cur_x - delta if cur_x - delta >= 0 else 0\n max_y = cur_y + delta if cur_y + delta < row_num else row_num - 1\n min_y = cur_y - delta if cur_y - delta >= 0 else 0\n candidates = []\n for x in range(min_x, max_x + 1):\n for y in range(min_y, max_y + 1):\n if distance(cur_x, cur_y, x, y) < radius:\n candidates.append((y, x))\n return candidates", "def neighbour_pixels(x, y):\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y ), (x, y ), (x + 1, y ),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1)\n ]", "def get_neighbors(self):\n \n return self.adjacent.keys()", "def neighbours2((u,v)):\r\n\r\n return ((u-1, v+1), (u,v+1), (u+1,v+1), \r\n (u-1,v), (u+1,v),\r\n (u-1,v-1), (u,v-1), (u+1,v-1))", "def get_valid_neighbors(self, x, y):\n\t\tx_1, x_2 = max(x-1, 0), min(x+1, self.width-1)\n\t\ty_1, y_2 = max(y-1, 0), min(y+1, self.height-1)\n\t\treturn [(n, m) for n in range(x_1, x_2+1) \n\t\t for m in range(y_1, y_2+1) if x != n or y != m]", "def neighborhood((y, x), (height, width)):\n return [(yt, xt) for xt in [x + 1, x, x - 1]\n for yt in [y + 1, y, y - 1]\n if 0 <= xt < width and 0 <= yt < height\n and (xt, yt) != (x, y)]", "def neighbors_out(self, vertex):\n return list(self.neighbor_out_iterator(vertex))", "def findImmediateNeighbours(self):\n immediateNeighbours = []\n\n if self.xCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate - 1, self.yCoordinate))\n\n if self.xCoordinate + 1 < 395:\n immediateNeighbours.append(PixelPosition(self.xCoordinate + 1, self.yCoordinate))\n\n if self.yCoordinate + 1 < 500:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate + 1))\n\n if self.yCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate - 1))\n\n return immediateNeighbours", "def get_neighbors(self, vertex_id):\n if vertex_id in self.vertices:\n return self.vertices[vertex_id]\n else:\n return None", "def get_neighbors(self, vertex_id):\n if vertex_id in self.vertices:\n return self.vertices[vertex_id]\n else:\n return None", "def rs_evaluate_verts(road_shape):\n locs = []\n for i in [0, 2, 4, 6]:\n locs.append(road_shape.matrix_world @ road_shape.data.vertices[i].co)\n return locs", "def neighbors(self):\n return self.graph.neighbors(self.id)", "def get_neighbours(self, vertex):\n output = []\n \n if vertex in self.adjacency_list:\n for neighbour in self.adjacency_list[vertex]:\n output.append([neighbour.vertex.value, neighbour.weight])\n \n return output", "def get_neighbours_circular(lat, lng):\n origin = CellId.from_lat_lng(LatLng.from_degrees(lat, lng)).parent(S2_CELL_LEVEL)\n neighbors = {origin.id()}\n\n edge_neighbors = origin.get_edge_neighbors()\n surrounding_neighbors = [\n edge_neighbors[0], # North neighbor\n edge_neighbors[0].get_edge_neighbors()[1], # North-east neighbor\n edge_neighbors[1], # East neighbor\n edge_neighbors[2].get_edge_neighbors()[1], # South-east neighbor\n edge_neighbors[2], # South neighbor\n edge_neighbors[2].get_edge_neighbors()[3], # South-west neighbor\n edge_neighbors[3], # West neighbor\n edge_neighbors[0].get_edge_neighbors()[3], # North-west neighbor\n ]\n\n for cell in surrounding_neighbors:\n neighbors.add(cell.id())\n for cell2 in cell.get_edge_neighbors():\n neighbors.add(cell2.id())\n\n return neighbors", "def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)", "def get_neighbors(self):\n step_size = self.step_size\n return [\n SearchProblem(x, y, step_size, self.function)\n for x, y in (\n (self.x - step_size, self.y - step_size),\n (self.x - step_size, self.y),\n (self.x - step_size, self.y + step_size),\n (self.x, self.y - step_size),\n (self.x, self.y + step_size),\n (self.x + step_size, self.y - step_size),\n (self.x + step_size, self.y),\n (self.x + step_size, self.y + step_size),\n )\n ]", "def _get_row_col_neighbors(self, row_col: int) -> range:\n # inclusive start\n start = max(row_col - 1, 0)\n # exclusive limit\n limit = min(row_col + 2, len(self.puzzle))\n r = range(start, limit)\n return r", "def neighbors(self):\n return self._neighbors", "def get_neighbours(self, vertex):\r\n if not self.is_vertex_in_graph(vertex):\r\n raise GraphException(f\"The vertex {vertex} does not exist in the graph.\")\r\n for neighbour in self.__neighbours[vertex]:\r\n yield neighbour", "def get_outer_vertices(feature):\n return [\n point\n for part in Geometry.get_multipolygon(feature)\n for point in part[0][0:-1]\n ]", "def get_neighbour_edges(self, cur: Union[str, int]) -> list:\n\t\treturn [edge for edge in self.edges if cur in edge]" ]
[ "0.7313774", "0.6526953", "0.6480245", "0.6299469", "0.6191116", "0.6134625", "0.609304", "0.6080118", "0.60728604", "0.60651284", "0.606193", "0.6058646", "0.6022795", "0.60064405", "0.6004463", "0.6003909", "0.5977814", "0.59745324", "0.59657377", "0.594686", "0.59358627", "0.5923606", "0.59105736", "0.5900691", "0.5900691", "0.5900691", "0.5900691", "0.5900691", "0.5900691", "0.58990824", "0.5878734", "0.58784854", "0.5868995", "0.585206", "0.58307767", "0.5812599", "0.5790932", "0.5770803", "0.57617766", "0.5757514", "0.57518923", "0.57463866", "0.57445437", "0.5739542", "0.57294106", "0.5721134", "0.5720503", "0.57190686", "0.57012707", "0.5700886", "0.5700043", "0.5695137", "0.5685762", "0.5675441", "0.5675441", "0.5675441", "0.5675441", "0.56642574", "0.56600815", "0.56558335", "0.56392384", "0.5634038", "0.5630855", "0.5624628", "0.5624628", "0.56159204", "0.5594404", "0.5593292", "0.55820495", "0.5571249", "0.5560053", "0.5557107", "0.5553174", "0.5539355", "0.5536846", "0.5534062", "0.5527978", "0.55218524", "0.5520377", "0.5515605", "0.54976887", "0.5495508", "0.54904026", "0.54877573", "0.5483944", "0.5482544", "0.5482187", "0.5470235", "0.5470235", "0.5468144", "0.54597515", "0.54551816", "0.5452906", "0.5446789", "0.5442787", "0.54397607", "0.54367524", "0.5434927", "0.5431396", "0.54305947" ]
0.5916195
22