query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Sets the 'reindex_data' value in the REST API to 0 to clear it. Splunk then automatically restarts the input.
def clear_checkbox(session_key, stanza): url = f'https://localhost:8089/servicesNS/nobody/TA-strava-for-splunk/data/inputs/strava_api/{stanza}' headers = {'Authorization': f'Splunk {session_key}'} payload = 'reindex_data=0' helper.send_http_request(url, "POST", headers=headers, payload=payload, verify=False, use_proxy=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()", "def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()", "def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()", "async def reset_data(self) -> None:\n self.waf_retry = 0\n await super().reset_data()", "def reindex(self, using=None):\n self.clear(using=using)\n self.update(using=using)", "def reset_data(self):\n self.data = []", "def reset_data(self):\n self.data = None", "def reset_index(self):\n self.increments = 0", "def start_reindex(self):\n self.reindex_button.click() # lint-amnesty, pylint: disable=no-member", "def reset(self):\n self._idx = 0", "def reset(self):\n self._data = []", "def reset(self):\n self.reset_count += 1\n self._init_data()", "def reindex(self):", "def reindex(self):", "def reset(self):\r\n\t\tself.index = 0", "def reset(self):\n # Attempt to reset data loader\n self.data_loader_iter = iter(self.data_loader)\n self.num_batches = 0\n\n # Make sure calibrator will check the cache again when reset.\n self.cache_contents = None", "def reset(self):\n self._current_index = 0", "def reset(self):\n self.data = self._defaults", "def clear_index(self):\n self.index = None", "def reset(self):\n self.data = {}\n self.is_bound = False\n self._errors = None", "def reset_data(self):\n try:\n self._data = self._schema({})\n except vol.Invalid as ex:\n _LOGGER.error(\"Can't reset %s: %s\",\n self._file, humanize_error(self._data, ex))", "def reindex(self):\n raise NotImplementedError()", "def reset(self):\n for k in self.data_keys:\n setattr(self, k, [])\n self.size = 0", "def clear_index(self):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"POST\", \"/1/indexes/%s/clear\" % self.url_index_name, self.client.timeout)", "def clear(self):\r\n self._state[\"data\"].clear()\r\n self._state[\"session\"].request_rerun()", "def clear(self):\r\n self._state[\"data\"].clear()\r\n self._state[\"session\"].request_rerun()", "def reset(self):\n self.temp_data.clear()", "def reset(self):\n requests.put('{}/reset'.format(self._get_url()))", "def clear(self):\n self._state[\"data\"].clear()\n self._state[\"session\"].request_rerun()", "def clear(self):\n self._state[\"data\"].clear()\n self._state[\"session\"].request_rerun()", "def clear(self):\n self._state[\"data\"].clear()\n self._state[\"session\"].request_rerun()", "def reset(self):\n self._total_value = 0.0\n self._count = 0", "def reset_elasticsearch_endpoint():\n reset_elasticsearch()\n resp = Response(response=json.dumps({\"success\": True}),\n status=200,\n mimetype=\"application/json\")\n return resp", "def restart(self):\n self.idx = 0", "def reset(self):\n self.rows = deepcopy(self.empty_rows)\n self._update_max_row_info()", "def _reset(self):\n self._values = {}", "def clear(self):\n self.solr.delete_query(\"%s:%s\"\n % (self.index_uuid_field, self.index_uuid))\n self.solr.commit()", "def unindex_later(self):\n return", "def clearValue(self):\n self.data = []", "def resetDataRef(self, is_train):\n self.data_ref = 0", "def reset(self):\n self.complete_misses = 0\n return", "def clear_data(self):\n if isinstance(self.data, DataManager):\n self.data._update_keys(clear=True)\n else:\n self.data = {}", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\n self.table[:, :] = 0\n self.counts[:] = 0\n self.names = []\n self.hashesperid.resize(0)\n self.dirty = True", "def clear_data(cls):\n cls.__data.clear()\n cls.__counters.clear()", "def _reinit_indexes(self):\n print('Reinitializing indexes...')\n for identity in self.groundtruth_metadata.keys():\n self.groundtruth_metadata[identity]['index'] = 0\n print('Indexes reinitialized!')", "def restart(self):\n self.dominated_set = []\n self.np = 0\n self.rank = None\n self.crowding_distance = 0", "def reset(self):\n\n self.results = {}", "def reset(self):\n self.liidx = 0\n self.clidx = 0", "def _reset(self):\n self._set(\"_n_init_features\", None)\n self._set(\"_n_output_features\", None)\n self._set(\"_n_intervals\", None)\n self._set(\"_mapper\", {})\n self._set(\"_cpp_preprocessor\", None)\n self._set(\"_fitted\", False)", "def reset(self):\n self.accumulation = None", "def reset(self):\n self.accumulation = None", "def reset(self):\n self.fuzz_complete = False\n self.mutant_index = 0\n self.value = self.original_value", "def reset(self):\n for i in range(0, len(self.__counts)):\n self.__counts[i] = 0\n self.__overflow = 0\n self.__total_count = 0\n self.__total_values = 0\n self.__min = None\n self.__max = None", "def wipe_index(self, index):\n url = f'{self.host}{index}/_delete_by_query?conflicts=proceed'\n data = {'query': {'match_all': {}}}\n resp = requests.post(url, json=data)\n self.flush(index)\n return resp.json()", "def reset(self):\n \n pass", "def reset(self):\n self.values.clear()\n\n self.on_reset()", "def reset_index(self):\n self.df = self.df.reset_index()", "def reset (self):\n self.counter = 0", "def reset(self):\n self.counter = 0", "def reset(self):", "def reset(self):", "def reset(self):", "def reset(self):", "def clear():\r\n CURRENT_REQUEST_CONFIGURATION.data = {}", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self, *args, **kwargs):\n pass", "def reset(self) -> None:", "def reset(self) -> None:", "def reset(self) -> None:", "def reset(self):\n ...", "def reset(self):\n ...", "def reset(self):\n self.values = None\n self.keys = None\n self.mask = None", "def reset_values(self):\n\n self.values = []", "def reset(self):\n if self.getValidationState() != 'not_synchronized':\n self.drift()\n self.setPartialData(None)\n self.setTemporaryData(None)", "def Reset(self):\n self._results = []", "def clear(self, request):\n requests.put('{}/clear'.format(self._get_url()), data=request.json())", "def reset(self, *args, **kwargs):\n ...", "def _data_reset(self):\n conn = self.get_connection()\n\n elements = {\n **self.domain.registry.aggregates,\n **self.domain.registry.entities,\n **self.domain.registry.views,\n }\n for _, element_record in elements.items():\n provider = current_domain.providers[element_record.cls.meta_.provider]\n repo = self.domain.repository_for(element_record.cls)\n\n model_cls = repo._model\n if provider.conn_info[\n \"DATABASE\"\n ] == Database.ELASTICSEARCH.value and conn.indices.exists(\n model_cls._index._name\n ):\n conn.delete_by_query(\n refresh=True,\n index=model_cls._index._name,\n body={\"query\": {\"match_all\": {}}},\n )", "def reset(self):\n self.cardinality = 0\n self.sax_character = 0\n self.wildcardbits = 0", "def reset(self):\n solr = self._clone()\n solr.q = '*:*'\n solr.params = {\n 'fq': [],\n 'start': 0,\n 'rows': 10,\n 'sort': '',\n 'wt': 'json',\n 'facet': 'false',\n 'facet.field': [],\n 'facet.query': [],\n }\n return solr", "def clearRows(self):\n self.data['rows'] = []", "def reset(self):\n\n self._begin = 0\n self._end = 0\n self._size = 0", "def reset(self, **kwargs):\n # '_reset' depends on the 'backend'\n self._reset(**kwargs)", "def reset(self):\n self._clusters = {}\n self._clusters_val = {}\n self._centroids = {}\n self.store()", "def clear(self) -> None:\n self.data = {} # defaultdict fails (T282865)\n self.size = 0", "def deindex(self):\n self.deindex_value(self.proxy_get())", "def reset(self):\n self._buffer.fill(0)", "def reset(self):\n\n self._problem.reset()\n self._termination_criterion.reset()\n\n self._tabu_list = TabuList(self._list_size)\n\n if self.data is not None:\n self.data = []", "def reset(self):\n raise NotImplementedError", "def reset(self):\n raise NotImplementedError" ]
[ "0.6248837", "0.6248837", "0.6248837", "0.6241557", "0.62066907", "0.6205658", "0.6196166", "0.61584055", "0.6134224", "0.61318415", "0.610773", "0.6060318", "0.60403645", "0.60403645", "0.5998498", "0.5993323", "0.5940291", "0.5935767", "0.5879322", "0.5871808", "0.5868277", "0.5859847", "0.5856358", "0.5812885", "0.5762974", "0.5762974", "0.5760629", "0.5750743", "0.570962", "0.570962", "0.570962", "0.56637084", "0.5661645", "0.5637273", "0.5631555", "0.5628728", "0.5628276", "0.56184936", "0.56059676", "0.56037426", "0.5598718", "0.5586603", "0.55525815", "0.55525815", "0.55525815", "0.55525815", "0.55297464", "0.5520556", "0.5519053", "0.55017775", "0.5493583", "0.54924285", "0.54921794", "0.54915303", "0.54915303", "0.5487106", "0.5482161", "0.5481188", "0.54779065", "0.54768157", "0.5475496", "0.5471666", "0.5464318", "0.5462074", "0.5462074", "0.5462074", "0.5462074", "0.54605633", "0.5452037", "0.5452037", "0.5452037", "0.5452037", "0.5452037", "0.5452037", "0.5452037", "0.5452037", "0.5449693", "0.5446391", "0.5446391", "0.5446391", "0.54336923", "0.54336923", "0.54311615", "0.54258054", "0.54072255", "0.5406645", "0.54016733", "0.5393218", "0.5384004", "0.5382462", "0.5377367", "0.5371233", "0.53675616", "0.53634673", "0.53580797", "0.5357751", "0.5355989", "0.5354565", "0.5345116", "0.53442156", "0.53442156" ]
0.0
-1
Gets all activities, 30 per page as per Strava's default.
def get_activities(ts_activity, access_token): params = {'after': ts_activity, 'access_token': access_token} url = "https://www.strava.com/api/v3/activities" response = return_json(url, "GET", parameters=params) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_page_of_activities_return_all(self, StravaTokens1, Activity1, Activity2, Activity3):\n self.mock_get.return_value = Mock(ok=True)\n self.mock_get.return_value.json.return_value = [Activity1, Activity2, Activity3]\n strava_tokens = StravaTokens1\n response = get_page_of_activities(None, 1, strava_tokens)\n assert self.mock_get.called_with(\"https://www.strava.com/api/v3/activities?access_token=\"\n + strava_tokens[\"access_token\"]\n + \"&per_page=200&page=1\")\n assert response.ok is True\n assert response.json() == [Activity1, Activity2, Activity3]", "def get_activities():\n pass", "def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def __ui_list_all_activities(self):\n activities_list = self.__activity_service.service_get_list_of_activities()\n if len(activities_list) == 0:\n print(\"The list of activities is empty!\\n\")\n else:\n for activity in activities_list:\n print(activity)\n print(\"\")", "def get_activities(self, activity_ids=None, max_records=50):\r\n return self.connection.get_all_activities(self, activity_ids, max_records)", "def get_activities(self, user_id=None, group_id=None, app_id=None,\n activity_id=None, start_index=0, count=0):\n raise NotImplementedError()", "def activities(self):\n if \"activities\" in self._prop_dict:\n return ActivitiesCollectionPage(self._prop_dict[\"activities\"])\n else:\n return None", "def all_activity(self):\n\t\tself.db = DB()\n\t\tactivity_all = self.db.select_all_from(\"activity\")\n\t\ttmpl = lookup.get_template(\"activity.html\")\n\t\treturn (tmpl.render(activity=activity_all))", "def get_activities(cls):\n objs = cls.objects\n return objs", "def fetch_activities(access_token):\n\n headers = {\"Authorization\": \"Bearer \" + access_token}\n\n # Fetch list of athlete's activities\n activities = []\n page = 1\n while True:\n params = {\"per_page\": MAX_ACTIVITIES_PER_PAGE, \"page\": page}\n r = requests.get(API_URL + \"/athlete/activities\", headers=headers, params=params)\n new_activities = r.json()\n\n if \"errors\" in new_activities:\n raise AuthError(new_activities[\"message\"])\n activities.extend(new_activities)\n\n # Continue fetching activities if necessary\n if len(new_activities) == MAX_ACTIVITIES_PER_PAGE:\n page += 1\n else:\n break\n\n return activities", "def activities(self):\r\n return v3.Activities(self)", "def get_activities(self, type=None):\n return flattrclient._get_query_dict(type=type)", "def get_activity_list(self):\n return self._request_activity_list(self.athlete)", "def get_all_activities_list(self):\n self.__load_activities_from_file_into_memory()\n return super().get_all_activities_list()", "def collect_activities(self, user_id, release, params=None):\n params = params or {}\n filter_params = {'user_id': user_id, 'release': release}\n filter_params.update(params)\n activities = []\n while True:\n resp = requests.get(self.url, filter_params)\n content = json.loads(resp.content)\n activities.extend(content['activity'])\n filter_params['start_record'] += self.page_size\n if len(content['activity']) == 0:\n break\n return activities", "def test_api_get_all_activities(self):\n # create a bucket\n res = self.register_login_get_token()\n self.assertEqual(res.status_code, 201)\n\n # create a activity\n res = self.client().post('/bucketlist/1/activities',\n headers=dict(\n Authorization=\"Bearer \" + self.access_token),\n data=self.activity)\n self.assertEqual(res.status_code, 201)\n\n # get activities\n res = self.client().get('/bucketlist/1/activities',\n headers=dict(\n Authorization=\"Bearer \" + self.access_token))\n self.assertEqual(res.status_code, 200)", "def get(self, request):\n activities = (\n activitystreams.streams[\"local\"]\n .get_activity_stream(request.user)\n .filter(\n Q(comment__isnull=False)\n | Q(review__isnull=False)\n | Q(quotation__isnull=False)\n | Q(mention_books__isnull=False)\n )\n )\n\n large_activities = Paginator(\n activities.filter(mention_books__isnull=True)\n .exclude(content=None, quotation__quote=None)\n .exclude(content=\"\"),\n 6,\n )\n small_activities = Paginator(\n activities.filter(\n Q(mention_books__isnull=False) | Q(content=None) | Q(content=\"\")\n ),\n 4,\n )\n\n page = request.GET.get(\"page\")\n data = {\n \"large_activities\": large_activities.get_page(page),\n \"small_activities\": small_activities.get_page(page),\n }\n return TemplateResponse(request, \"discover/discover.html\", data)", "def activities(self):\r\n return resources.Activities(self)", "def activities(self):\n return self._activities", "def activities(self):\r\n return activities.Activities(self)", "def get_all(self, start_at, limit, order=None):", "def show_activities(self): \n database = Database('data/database.db')\n activities = database.read_activities()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name] for item in activities],\n pageTitle = \"Activités\",\n tableTitle = \"Liste de toutes les activités\",\n ths = [\"Numéro\", \"Nom\"]\n )", "def test_get_activities(self):\n pass", "def activities_list(self):\n self.__load_activities_from_file_into_memory()\n return self._activities_list", "def getUserActivities(context, request):\n mmdb = MADMaxDB(context.db)\n query = {}\n query['actor.username'] = request.actor['username']\n query['verb'] = 'post'\n chash = request.params.get('context', None)\n if chash:\n query['contexts.hash'] = chash\n\n is_head = request.method == 'HEAD'\n activities = mmdb.activity.search(query, sort=\"_id\", keep_private_fields=False, flatten=1, count=is_head, **searchParams(request))\n\n handler = JSONResourceRoot(activities, stats=is_head)\n return handler.buildResponse()", "def get_sorted_activities(self):\n return helpers.get_sorted_activities(self)", "def by_activity(cls,site_id=0,activity=None):\n return meta.DBSession.query(Activity).filter_by(site_id=site_id,activity=activity).all()", "def requests(self,max_r=1,p=\"snippet,contentDetails,id\"):\n request = self.youtube.activities().list(part=p,channelId=self.__channel_id,maxResults=max_r)\n try:\n response = request.execute()\n except httplib2.error.ServerNotFoundError:\n pass\n else:\n return response", "def projects_activity_json():\n limit = request.args.get('limit') or 10\n q = request.args.get('q') or None\n if q and len(q) < 3:\n q = None\n return jsonify(activities=get_event_activities(None, limit, q))", "def fetch(self):\n\n entries = []\n for activity in self.activities[\"entries\"]:\n entries.append(\n [\n element\n for element in [activity[\"title\"], activity[\"content\"][0][\"value\"]]\n ]\n )\n\n return entries[0 : self.max_entries]", "def query_nine_a(self, table_name_activities):\n query = (\n \"SELECT YEAR(start_date_time) as Year, MONTH(start_date_time) as Month, COUNT(*) AS ActivityCount \"\n \"FROM %s \"\n \"GROUP BY YEAR(start_date_time), MONTH(start_date_time) \"\n \"ORDER BY ActivityCount DESC \"\n \"LIMIT 1 \"\n )\n\n self.cursor.execute(query % table_name_activities)\n rows = self.cursor.fetchall()\n print(tabulate(rows, headers=self.cursor.column_names))\n return rows", "def get_all(self, start=0, count=-1, sort='', query='', view=''):\n return self._client.get_all(start, count, sort=sort, query=query, view=view)", "def getAllActivityLog(self):\n url=self._v2BaseURL + \"/api/v2/activity/activityLog\"\n headers = {'Content-Type': \"application/json\", 'Accept': \"application/json\",\"icSessionID\":self._v2icSessionID}\n infapy.log.info(\"GetAllActivityLog URL - \" + url)\n infapy.log.info(\"API Headers: \" + str(headers))\n infapy.log.info(\"Body: \" + \"This API requires no body\")\n # The below format is for post\n # bodyV3={\"username\": userName,\"password\": password}\n # r3 = re.post(url=urlV3, json=bodyV3, headers=headers)\n try:\n response = re.get(url=url, headers=headers)\n infapy.log.debug(str(response.json()))\n except Exception as e:\n infapy.log.exception(e)\n raise\n infapy.log.info(\"Fetched the all the Activity log from IICS\")\n data = response.json()\n return data", "def get(self):\n return [a._to_dict() for a in Activity().get_all()]", "async def get_your_puzzle_activity(self, limit: int = None) -> 'Response':\n headers = {\n 'Content-Type': 'application/x-ndjson',\n }\n parameters = {\n 'max': limit if limit is not None else 'null'\n }\n response = await self._client.request_stream(method=RequestMethods.GET,\n url=USERS_MY_PUZZLE_ACTIVITY_URL,\n headers=headers,\n params=parameters)\n return response", "def index(self):\n\n\t\tself.db = DB()\n\t\tactivityTuple = self.db.select_all_from(\"activity\")[1]\n\t\ttmpl = lookup.get_template(\"index.html\")\n\t\treturn (tmpl.render(activity=activityTuple))", "def load_exported_activities() -> List[DiscoveredActivities]:\n activities = []\n activities.extend(discover_actions(\"chaosgcp.gke.nodepool.actions\"))\n activities.extend(discover_probes(\"chaosgcp.gke.nodepool.probes\"))\n activities.extend(discover_actions(\"chaosgcp.sql.actions\"))\n activities.extend(discover_probes(\"chaosgcp.sql.probes\"))\n activities.extend(discover_probes(\"chaosgcp.storage.probes\"))\n activities.extend(discover_actions(\"chaosgcp.cloudbuild.actions\"))\n activities.extend(discover_probes(\"chaosgcp.cloudbuild.probes\"))\n activities.extend(discover_actions(\"chaosgcp.cloudrun.actions\"))\n activities.extend(discover_probes(\"chaosgcp.cloudrun.probes\"))\n activities.extend(discover_probes(\"chaosgcp.monitoring.probes\"))\n activities.extend(discover_probes(\"chaosgcp.cloudlogging.probes\"))\n activities.extend(discover_probes(\"chaosgcp.artifact.probes\"))\n activities.extend(discover_actions(\"chaosgcp.lb.actions\"))\n return activities", "def _get_allpages(self, url:str, paramsdict:Dict[str,str]):\n r1 = self._get_dict_from_url(url, paramsdict)\n r = [r1]\n #display(r)\n if 'total_pages' in r1:\n # print('more than one page')\n for next_page in range(2, r1['total_pages']+1):\n # print(f\"load page {next_page} \")\n r.append(self._get_dict_from_url(url, {**paramsdict, 'page':next_page}))\n # print(len(r))\n # print([len(rx['results']) for rx in r])\n results = [entry for rx in r for entry in rx['results'] ]\n\n return results", "def get_next_activities(self, n=None):\n\n if n > self.available():\n # !!! This is not quite as specified (see method docs) !!!\n raise IllegalState('not enough elements available in this list')\n else:\n next_list = []\n x = 0\n while x < n:\n try:\n next_list.append(next(self))\n except Exception: # Need to specify exceptions here!\n raise OperationFailed()\n x = x + 1\n return next_list", "def get_activities_dictionary(self):\r\n activities_dict_list = list()\r\n activities = self.get_specific_node_list('activity')\r\n for activity in activities:\r\n activities_dict = dict()\r\n activity_name = None\r\n category = None\r\n for key, val in activity.attrib.iteritems():\r\n if \"}name\" in key:\r\n activity_name = val.split(\".\")[-1]\r\n break\r\n if activity_name:\r\n intent_filter_node = self.get_specific_node_list('intent-filter', root_node=activity)\r\n if len(intent_filter_node) == 1:\r\n categories_nodes = self.get_specific_node_list('category', root_node=intent_filter_node[0])\r\n category = self.get_category_value(categories_nodes)\r\n else:\r\n category = None\r\n activities_dict[\"name\"] = activity_name\r\n activities_dict[\"category\"] = category\r\n activities_dict_list.append(activities_dict)\r\n return activities_dict_list", "def activities(self, activities):\n \n self._activities = activities", "def test_list_activity_occurrences(self):\n pass", "def club_activity(self):\n\n return APIRequest.objects.filter(user=self).order_by('-created_datetime')[:15]", "def test_get_page_of_activities_return_after_date(self, StravaTokens1, Activity1, Activity2, Activity3):\n self.mock_get.return_value = Mock(ok=True)\n self.mock_get.return_value.json.return_value = [Activity1, Activity2, Activity3]\n strava_tokens = StravaTokens1\n response = get_page_of_activities(\"2018-02-16T14:52:54Z\", 1, strava_tokens)\n assert self.mock_get.called_with(\"https://www.strava.com/api/v3/activities?access_token=\"\n + strava_tokens[\"access_token\"]\n + \"&after=2018-02-16T14:52:54Z&per_page=200&page=1\")\n assert response.ok is True\n assert response.json() == [Activity1, Activity2, Activity3]", "def get_activities_by_session_id(self, session_id):\n return self._db.get_all(\"\"\"\n SELECT * FROM activity_log\n WHERE session_id = ?\"\"\", (session_id, ))", "def __load_exported_activities() -> List[DiscoveredActivities]:\n activities = []\n activities.extend(discover_actions(\"chaosazure.machine.actions\"))\n activities.extend(discover_probes(\"chaosazure.machine.probes\"))\n activities.extend(discover_actions(\"chaosazure.aks.actions\"))\n activities.extend(discover_actions(\"chaosazure.vmss.actions\"))\n activities.extend(discover_actions(\"chaosazure.webapp.actions\"))\n activities.extend(discover_probes(\"chaosazure.webapp.probes\"))\n activities.extend(discover_actions(\"chaosazure.postgresql_flexible.actions\"))\n activities.extend(discover_probes(\"chaosazure.postgresql_flexible.probes\"))\n activities.extend(discover_actions(\"chaosazure.postgresql.actions\"))\n activities.extend(discover_probes(\"chaosazure.postgresql.probes\"))\n activities.extend(discover_actions(\"chaosazure.redis.actions\"))\n activities.extend(discover_probes(\"chaosazure.redis.probes\"))\n activities.extend(discover_actions(\"chaosazure.eventhub.actions\"))\n activities.extend(discover_probes(\"chaosazure.eventhub.probes\"))\n return activities", "def list(self, context, filters, marker, limit, sort,\n latest, list_all_artifacts=False):\n session = api.get_session()\n return api.get_all(context=context, session=session, filters=filters,\n marker=marker, limit=limit, sort=sort,\n latest=latest,\n list_all_artifacts=list_all_artifacts)", "def activity_logs(self) -> api.ActivityLogs:\n return self._get_model(model=api.ActivityLogs)", "def get_all(cls, request, page=None, limit=None):\n session = get_session(request)\n\n query = session.query(cls)\n\n if limit:\n query = query.limit(limit)\n\n if page and limit:\n offset = (page - 1) * limit\n query = query.offset(offset)\n\n return query", "def _get_list(self, url, params=None, method=\"GET\"):\n request_args = {'method': method, 'url': url}\n if params is not None:\n request_args['params'] = params\n\n return self._paginated_generator(request_args)", "def get_all(self, start=0, count=-1, filter='', query='', sort=''):\n return self._client.get_all(start, count, filter=filter, sort=sort, query=query)", "def get_all(self, start=0, count=-1, filter='', sort=''):\n return self._client.get_all(start=start, count=count, filter=filter, sort=sort)", "def activities(self, activities):\n\n self._activities = activities", "def get_list(self, *args, **kwargs):\r\n request_params = {\r\n 'headers': {\r\n 'User-Agent':'Google-Bot'\r\n },\r\n 'params': {\r\n 'page':kwargs.get('page', self.page),\r\n 'per_page':kwargs.get('per_page', self.per_page)\r\n }\r\n }\r\n if kwargs.get('proxy', None):\r\n request_params['proxies'] = kwargs['proxies']\r\n\r\n response = getattr(requests, 'get')('{api_endpoint}'.format(**kwargs), **request_params)\r\n return response.json()", "def _paginatedRequest(allPages, *args):\n data = []\n currentPage = 0\n while True:\n newData = Gw2Spidy._request(*(args + (str(currentPage),)))\n if not allPages:\n return newData['results']\n data.extend(newData['results'])\n currentPage = currentPage + 1\n if newData['page'] == newData['last_page']:\n break\n return data", "def get_activity_stream(token, activity, types, series_type='time', resolution='high'):\n types = ','.join(types)\n params = {'access_token': token}\n url = f'https://www.strava.com/api/v3/activities/{activity}/streams/{types}&series_type={series_type}&resolution={resolution}&key_by_type='\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response", "def fetch_all_scans(self, page=1, limit=50, href=None, scan_status_enum=None, scan_date=None):\n params = {}\n if scan_status_enum:\n params['scanStatusEnum'] = scan_status_enum\n if scan_date:\n params['scanDate'] = scan_date\n # If href (calling another page gives an href tag for next page in line)\n if href:\n return super().request('GET', '/api/network' + href, params=params)\n # First call\n return super().request('GET', f'/api/network/scans?_page={page}&_limit={limit}', params=params)", "def event_activity_csv(event_id):\n limit = request.args.get('limit') or 50\n q = request.args.get('q') or None\n if q and len(q) < 3:\n q = None\n csvstream = gen_csv(get_event_activities(event_id, limit, q))\n headers = {'Content-Disposition': 'attachment; filename=activity_list.csv'}\n return Response(stream_with_context(csvstream),\n mimetype='text/csv', headers=headers)", "def test_user_activities(self):\n url, parsed = self.prepare_urls(\n 'v1:activity-user-activities', subdomain=self.company.subdomain)\n \n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n \n content = json.loads(response.content)\n self.assertTrue(content.has_key('count'))\n self.assertTrue(content.has_key('next'))\n self.assertTrue(content.has_key('previous'))\n self.assertTrue(content.has_key('results'))", "def list(self, *, per_page: int = 10) -> Iterator[GenerativeDesignExecution]:\n return self._paginator.paginate(page_fetcher=self._fetch_page,\n collection_builder=self._build_collection_elements,\n per_page=per_page)", "def list(limit, export):\n GetArticles.get_all_articles(limit, export)", "def get(category, page=1, per_page=5):\r\n\r\n count = n_count(category)\r\n\r\n sql = text('''SELECT app.id, app.name, app.short_name, app.description,\r\n app.info, app.created, app.category_id, \"user\".fullname AS owner,\r\n featured.app_id as featured\r\n FROM \"user\", task, app\r\n LEFT OUTER JOIN category ON app.category_id=category.id\r\n LEFT OUTER JOIN featured ON app.id=featured.app_id\r\n WHERE\r\n category.short_name=:category\r\n AND app.hidden=0\r\n AND \"user\".id=app.owner_id\r\n AND app.info LIKE('%task_presenter%')\r\n AND task.app_id=app.id\r\n GROUP BY app.id, \"user\".id, featured.app_id ORDER BY app.name\r\n OFFSET :offset\r\n LIMIT :limit;''')\r\n\r\n offset = (page - 1) * per_page\r\n results = db.engine.execute(sql, category=category, limit=per_page, offset=offset)\r\n apps = []\r\n for row in results:\r\n app = dict(id=row.id,\r\n name=row.name, short_name=row.short_name,\r\n created=row.created,\r\n description=row.description,\r\n owner=row.owner,\r\n featured=row.featured,\r\n last_activity=pretty_date(last_activity(row.id)),\r\n last_activity_raw=last_activity(row.id),\r\n overall_progress=overall_progress(row.id),\r\n info=dict(json.loads(row.info)))\r\n apps.append(app)\r\n return apps, count", "def get_activities_response(self, user_id=None, group_id=None, app_id=None,\n activity_id=None, start_index=0, count=0,\n etag=None, min_id=None, cache=None,\n fetch_replies=False, fetch_likes=False,\n fetch_shares=False, fetch_events=False):\n if activity_id:\n # Sometimes Facebook requires post ids in USERID_POSTID format; sometimes\n # it doesn't accept that format. I can't tell which is which yet, so try\n # them all.\n ids_to_try = [activity_id]\n if '_' in activity_id:\n user_id_prefix, activity_id = activity_id.split('_', 1)\n ids_to_try.insert(0, activity_id)\n if user_id:\n ids_to_try.append('%s_%s' % (user_id, activity_id))\n\n for id in ids_to_try:\n try:\n posts = [json.loads(self.urlopen(API_OBJECT_URL % id).read())]\n break\n except urllib2.URLError, e:\n logging.warning(\"Couldn't fetch object %s: %s\", id, e)\n else:\n posts = []\n\n if posts == [False]: # FB returns false for \"not found\"\n posts = []\n\n else:\n url = API_SELF_POSTS_URL if group_id == source.SELF else API_HOME_URL\n url = url % (user_id if user_id else 'me', start_index)\n if count:\n url = util.add_query_params(url, {'limit': count})\n headers = {'If-None-Match': etag} if etag else {}\n try:\n resp = self.urlopen(url, headers=headers)\n etag = resp.info().get('ETag')\n posts = json.loads(resp.read()).get('data', [])\n except urllib2.HTTPError, e:\n if e.code == 304: # Not Modified, from a matching ETag\n posts = []\n else:\n raise\n\n activities = [self.post_to_activity(p) for p in posts]\n response = self._make_activities_base_response(activities)\n response['etag'] = etag\n return response", "def test_get_page_of_activities():\n \n # get activities on page that won't have activities\n r = get_page_of_activities(None, 50)\n assert r.status_code == 200\n assert r.json() is None\n\n # get values only after certain time\n r = get_page_of_activities(\"2020-12-27T19:45:05Z\", 1)\n assert r.status_code == 200\n # check that all necessary objects are included\n r_json = r.json()\n assert r_json is not None\n assert \"id\" in r_json\n assert \"type\" in r_json\n assert \"start_date\" in r_json\n # check that we only got objects that occured after last_activity_date param\n for x in r_json:\n assert datetime.strptime(x[\"start_date\"], \"%Y-%m-%dT%H:%M:%SZ\") < datetime.strptime(\"2020-12-27T19:45:05Z\", \"%Y-%m-%dT%H:%M:%SZ\")\n \n # check that returns all activities on page\n r_all_none = get_page_of_activities(None, 1)\n assert r_all_none.status_code == 200\n r_all_none_json = r_all_none.json()\n assert r_all_none_json is not None\n assert \"id\" in r_all_none_json\n assert \"type\" in r_all_none_json\n assert \"start_date\" in r_all_none_json\n # should return all activities because I have none prior to 2020\n r_all_date = get_page_of_activities(\"2019-12-22T19:41:05Z\", 1)\n assert r_all_date.status_code == 200\n r_all_date_json = r_all_date.json()\n assert r_all_date_json is not None\n assert \"id\" in r_all_date_json\n assert \"type\" in r_all_date_json\n assert \"start_date\" in r_all_date_json\n\n # test to see if both methods to return all activities on page worked\n assert len(r_all_date_json) == len(r_all_none_json)\n\n # try to get activities from future is error\n r = get_page_of_activities(\"2025-12-22T19:41:05Z\", 1)\n assert r.status_code != 200", "def _get_user_last_activities(self, user_id: int, activity_types: list, number: int, offset: int = 0):\n query = db.session.query(Activity). \\\n filter(Activity.user_id == user_id,\n func.date(Activity.datetime) >= self.SEASON.start_date,\n func.date(Activity.datetime) <= self.SEASON.end_date,\n Activity.type.in_(activity_types))\n count = query. \\\n count()\n items = query. \\\n order_by(Activity.datetime.desc()). \\\n limit(number). \\\n offset(offset). \\\n all()\n return [count, items]", "def get(self, base_url, observable, limit, credentials):\n\n url = url_join(base_url, self.filter(observable)) + f'&$top={limit}'\n\n response = get_data(url, credentials)\n\n return [\n self.sighting(observable, x) for x in response.get('value', [])\n ]", "def _get_pages(self,url,params,section):\n if self.verbose:\n print('Get Pages for {}'.format(url))\n print(params)\n page = 1\n maxPage = 1\n \n all_results = []\n this_batch = []\n while page <= maxPage: \n \n params['page']=page\n resp = self._get(url=url,params=params)\n maxPage = int(resp.headers.get('X-Total-Page-Count',0))\n try:\n results=resp.json()\n except:\n results=None\n if isinstance(results,(list,dict)):\n if 'errors' in results:\n print(results['errors'])\n return results\n \n this_batch = results[section]\n all_results.extend(this_batch)\n\n page+=1\n else:\n if self.verbose:\n print(\"PROBLEM\")\n return results\n\n return all_results", "def list():\n\n page_limit = app.config['PAGINATION_LIMIT']\n page = request.args.get('page') if 'page' in request.args else 1\n per_page = request.args.get('per_page') if 'per_page' in request.args else page_limit\n\n # TODO: Can be done in much more elegant way\n try:\n page = int(page)\n except:\n page = 1\n\n try:\n per_page = int(per_page)\n except:\n per_page = page_limit\n if per_page > page_limit:\n per_page = page_limit\n\n # Get all rows and order by published datetime and paginate by page count and per_page\n posts = YTSearch.query.order_by(desc(YTSearch.published_at)) \\\n .paginate(page, per_page, error_out=True)\n\n # Get JSON data from list of objects\n result = [i.serialize() for i in posts.items]\n return jsonify({'data': result, 'has_next': posts.has_next, 'next_page': posts.next_num,\n 'has_prev': posts.has_prev, 'prev_page': posts.prev_num, 'length': len(result)}), 200", "def get(self):\n args = pagination_arguments.parse_args(request)\n page = args.get('page', 1)\n per_page = args.get('per_page', 10)\n\n scenarios_query = Scenario.query\n scenarios_page = scenarios_query.paginate(page, per_page, error_out=False)\n\n return scenarios_page", "def get_incidents(self) -> tuple[list[Any], Any, Any | None]:\n timestamp = None\n fetch_limit = arg_to_number(self.fetch_limit)\n fetch_time = self.fetch_time\n if not fetch_limit or not fetch_time:\n raise DemistoException('Missing parameter - fetch limit or fetch time')\n last_run = demisto.getLastRun()\n if last_run and last_run.get('timestamp'):\n timestamp = last_run.get('timestamp', '')\n last_fetched_ids = last_run.get('last_fetched_ids', [])\n else:\n if last_fetch := arg_to_datetime(fetch_time, required=True):\n # convert to ISO 8601 format and add Z suffix\n timestamp = last_fetch.strftime(DATE_FORMAT)\n last_fetched_ids = []\n\n page_size = '100'\n # set the until argument to prevent duplicates\n until = get_now_time()\n response = self.list_incidents_request(page_size, '0', until, timestamp)\n if not response.get('items'):\n return [], last_fetched_ids, timestamp\n\n page_number = response.get('totalPages', 1) - 1\n total = 0\n total_items: list[dict] = []\n while total < fetch_limit and page_number >= 0:\n try:\n response = self.list_incidents_request(page_size, page_number, until, timestamp)\n except HTTPError as e:\n if e.response is not None and e.response.status_code == 429:\n raise DemistoException(\n 'Too many requests, try later or reduce the number of Fetch Limit parameter.'\n ) from e\n raise e\n\n items = response.get('items', [])\n new_items = remove_duplicates_for_fetch(items, last_fetched_ids)\n # items order is from old to new , add new items at the start of list to maintain order\n total_items = new_items + total_items\n total += len(new_items)\n page_number -= 1\n\n # bring the last 'fetch_limit' items, as order is reversed\n total_items = total_items[len(total_items) - fetch_limit:]\n return total_items, last_fetched_ids, timestamp", "def __get_all_pages(endpoint, query_params=None, log_msg=\"\"):\n query_params = query_params or {}\n resources = []\n page_num = 1\n while True:\n params = {\"results-per-page\": 100, \"page\": page_num}\n params.update(query_params)\n response = HttpClientFactory.get(CloudFoundryConfigurationProvider.get()).request(\n method=HttpMethod.GET,\n path=endpoint,\n params=params,\n msg=\"{} page {}\".format(log_msg, page_num),\n )\n resources.extend(response[\"resources\"])\n if page_num == response[\"total_pages\"]:\n break\n page_num += 1\n return resources", "def query_three(self, table_name_activities):\n\n query = (\n \"SELECT user_id, COUNT(*) as Count \"\n \"FROM %s \"\n \"GROUP BY user_id \"\n \"ORDER BY Count DESC \"\n \"LIMIT 10\"\n )\n\n self.cursor.execute(query % table_name_activities)\n rows = self.cursor.fetchall()\n print(tabulate(rows, headers=self.cursor.column_names))\n return rows", "def get_activities(self, filenames):\n return [self.get_activity(f) for f in filenames]", "def get(self):\n query = Campaign.query\n return paginate(Campaign.__tablename__, query, self.schema), HTTPStatus.OK", "def get_start_activities():\n # reads the session\n session = request.args.get('session', type=str)\n # reads the requested process name\n process = request.args.get('process', default='receipt', type=str)\n if check_session_validity(session):\n user = get_user_from_session(session)\n if lh.check_user_log_visibility(user, process):\n dictio = lh.get_handler_for_process_and_session(process, session).get_start_activities()\n for entry in dictio:\n dictio[entry] = int(dictio[entry])\n list_act = sorted([(x, y) for x, y in dictio.items()], key=lambda x: x[1], reverse=True)\n return jsonify({\"startActivities\": list_act})\n return jsonify({\"startActivities\": []})", "def get_visits(visit_container):\r\n return visit_container.visits.all()", "def get_all_cities(request, no_of_cities=8):\n cities = City.objects.annotate(visit_count=Count('logs')).order_by('-visit_count')[:no_of_cities]\n serializer = AllCitiesSerializer(cities, many=True)\n return Response(serializer.data)", "def _get_assets_for_page(request, course_key, current_page, page_size, sort):\r\n start = current_page * page_size\r\n\r\n return contentstore().get_all_content_for_course(\r\n course_key, start=start, maxresults=page_size, sort=sort\r\n )", "def test_get_activity(self):\n pass", "def test_get_activity(self):\n pass", "def get(self):\n args = pagination_arguments.parse_args(request)\n page = args.get('page', 1)\n per_page = args.get('per_page', 10)\n\n entries_query = Entry.query\n entries_page = entries_query.paginate(page, per_page, error_out=False)\n\n return entries_page", "def list(self, request, *args, **kwargs):\n\n queryset = self.filter_queryset(self.get_queryset())\n\n page = request.query_params.get('page', 1)\n paginator = Paginator(queryset, 8)\n\n try:\n queryset = paginator.page(page)\n\n except PageNotAnInteger:\n queryset = paginator.page(1)\n\n except EmptyPage:\n queryset = paginator.page(paginator.num_pages)\n\n page = int(page)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response({'items': serializer.data, 'page': page, 'pages': paginator.num_pages})", "def project_activity_json(project_id):\n limit = request.args.get('limit') or 10\n project = Project.query.filter_by(id=project_id).first_or_404()\n query = Activity.query.filter_by(project_id=project.id).order_by(\n Activity.id.desc()).limit(limit).all()\n activities = [a.data for a in query]\n return jsonify(project=project.data, activities=activities)", "def show_stories(self, limit=None):\n return self._get_page('showstories').json()[:limit]", "def get_all(self):\n total_expense_reports = []\n get_count = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'RECORDID',\n 'WHENCREATED',\n 'WHENPOSTED',\n 'TOTALENTERED',\n 'STATE',\n 'TOTALDUE',\n 'DESCRIPTION',\n 'CURRENCY',\n 'BASECURR',\n 'MEMO'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n expense_reports = self.format_and_send_request(data)['data']['EEXPENSES']\n total_expense_reports = total_expense_reports + expense_reports\n offset = offset + pagesize\n return total_expense_reports", "def list(self, page=0, results=200):\n\n query_string = urlencode(OrderedDict(size=results,position=page))\n url = self._api_url + '?' + query_string\n\n result = self._client.get(url)\n\n return result.json()", "def _request_activity_list(self, athlete):\n response = self._get_request(self._athlete_endpoint(athlete))\n response_buffer = StringIO(response.text)\n\n activity_list = pd.read_csv(\n filepath_or_buffer=response_buffer,\n parse_dates={'datetime': ['date', 'time']},\n sep=',\\s*',\n engine='python'\n )\n activity_list.rename(columns=lambda x: x.lower(), inplace=True)\n activity_list.rename(\n columns=lambda x: '_' + x if x[0].isdigit() else x, inplace=True)\n\n activity_list['has_hr'] = activity_list.average_heart_rate.map(bool)\n activity_list['has_spd'] = activity_list.average_speed.map(bool)\n activity_list['has_pwr'] = activity_list.average_power.map(bool)\n activity_list['has_cad'] = activity_list.average_heart_rate.map(bool)\n activity_list['data'] = pd.Series(dtype=np.dtype(\"object\"))\n return activity_list", "def pg_get_activities(self, duration_mode: int = 1) -> List[RunningProcess]:\n if self.pg_num_version >= 130000:\n qs = queries.get(\"get_pg_activity_post_130000\")\n elif self.pg_num_version >= 110000:\n qs = queries.get(\"get_pg_activity_post_110000\")\n elif self.pg_num_version >= 100000:\n qs = queries.get(\"get_pg_activity_post_100000\")\n elif self.pg_num_version >= 90600:\n qs = queries.get(\"get_pg_activity_post_090600\")\n elif self.pg_num_version >= 90200:\n qs = queries.get(\"get_pg_activity_post_090200\")\n else:\n qs = queries.get(\"get_pg_activity_oldest\")\n\n duration_column = self.get_duration_column(duration_mode)\n query = sql.SQL(qs).format(\n dbname_filter=self.dbname_filter,\n duration_column=sql.Identifier(duration_column),\n min_duration=sql.Literal(self.min_duration),\n )\n\n return pg.fetchall(\n self.pg_conn,\n query,\n {\n \"min_duration\": self.min_duration,\n \"dbname_filter\": self.filters.dbname,\n },\n mkrow=partial(RunningProcess.from_bytes, self.server_encoding),\n text_as_bytes=True,\n )", "def event_activity_json(event_id):\n limit = request.args.get('limit') or 50\n q = request.args.get('q') or None\n if q and len(q) < 3:\n q = None\n return jsonify(activities=get_event_activities(event_id, limit, q))", "def read_all_pages(self, url):\n\n result = []\n next_token = ''\n token_param = '&startToken=' if '?' in url else '?startToken='\n\n while True:\n paginated_url = url + token_param + next_token\n response = self.http_client.get(paginated_url)\n if response.status_code != 200:\n raise BackendException(\"Pagination failed with status=%s on \"\n \"URL=%s\" % (response.status_code, url))\n\n parsed = response.json()\n if 'data' in parsed and len(parsed['data']) > 0:\n result.extend(parsed['data'])\n else:\n break\n\n # Do not make another HTTP request if everything is here already\n if len(result) >= parsed['count']:\n break\n\n if 'nextToken' not in parsed:\n break\n next_token = parsed['nextToken']\n\n return result", "def list(self, *,\n page: Optional[int] = None,\n per_page: int = 100) -> Iterator[ResourceType]:\n return self._paginator.paginate(page_fetcher=self._fetch_page,\n collection_builder=self._build_collection_elements,\n page=page,\n per_page=per_page)", "def get(self):\r\n return get_all()", "def top_stories(self, limit=None):\n return self._get_page('topstories').json()[:limit]", "def job_stories(self, limit=None):\n return self._get_page('jobstories').json()[:limit]", "def paging_results(self):\n\n return 30", "def get_carton_activity_by_filter(self, **kwargs):\n\n all_params = ['filter', 'page', 'limit', 'sort']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_by_filter\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/beta/cartonActivity/search'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'filter' in params:\n query_params['filter'] = params['filter']\n if 'page' in params:\n query_params['page'] = params['page']\n if 'limit' in params:\n query_params['limit'] = params['limit']\n if 'sort' in params:\n query_params['sort'] = params['sort']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[CartonActivity]',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get_all_access_entries(self) -> QuerySet:\n return self.model.objects.all().order_by(\"created_at\")" ]
[ "0.72672695", "0.70619905", "0.704842", "0.704842", "0.704842", "0.704842", "0.7015815", "0.6921878", "0.6793498", "0.6755012", "0.67519844", "0.6731834", "0.6626525", "0.65563905", "0.6493611", "0.6484105", "0.6461981", "0.64476633", "0.642003", "0.638269", "0.6301335", "0.62308687", "0.62190247", "0.6199472", "0.6166813", "0.61327535", "0.6124084", "0.6045493", "0.60328436", "0.60289794", "0.5902464", "0.5843677", "0.5811307", "0.57349634", "0.569893", "0.5636268", "0.5604174", "0.55736685", "0.5573655", "0.55494493", "0.55465186", "0.5540592", "0.5524166", "0.5522033", "0.55092037", "0.55027753", "0.5492773", "0.5455964", "0.545548", "0.54422706", "0.5400829", "0.53990114", "0.5392246", "0.5386624", "0.5382877", "0.5381136", "0.536238", "0.5357624", "0.5354798", "0.5353921", "0.53486216", "0.53435636", "0.53356004", "0.53272647", "0.53169924", "0.5311122", "0.5309226", "0.5307726", "0.5305741", "0.53041226", "0.5303192", "0.53012383", "0.5286348", "0.5278397", "0.5275902", "0.5274185", "0.5272743", "0.5271084", "0.52678627", "0.5246586", "0.5236967", "0.5229773", "0.5229773", "0.5225573", "0.52167714", "0.52013975", "0.5197398", "0.5190837", "0.5181955", "0.5179518", "0.5168405", "0.51627177", "0.51478934", "0.51434374", "0.5135171", "0.5129724", "0.5122937", "0.5122934", "0.51221263", "0.5121045" ]
0.69375056
7
Gets the activity stream for given activity id.
def get_activity_stream(token, activity, types, series_type='time', resolution='high'): types = ','.join(types) params = {'access_token': token} url = f'https://www.strava.com/api/v3/activities/{activity}/streams/{types}&series_type={series_type}&resolution={resolution}&key_by_type=' response = return_json(url, "GET", parameters=params, timeout=10) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def activity(self, activity_id):\r\n return activities.Activity(self, activity_id)", "def get(self, id):\n activity = Activity().get(id)\n if not activity:\n abort(404, \"Activity not found\")\n return activity", "def activity(self, activity_id):\r\n return resources.Activity(self, activity_id)", "def get(self, id_stream):\n\n session = current_app.session\n\n stream = session.query(StreamDao).filter(StreamDao.id == id_stream).first()\n\n if stream is None:\n return None, 204\n\n return stream, 200", "def get_activities_by_session_id(self, session_id):\n return self._db.get_all(\"\"\"\n SELECT * FROM activity_log\n WHERE session_id = ?\"\"\", (session_id, ))", "def get_activity_with_id(cls, motion_id):\n obj = cls.objects(motion_id=motion_id).first()\n return obj", "def get_stream(self, video_id):\n stream = {}\n allowed_formats = ['ism', 'mpd']\n url = self.config['links']['vimondRestAPI'] + 'api/tve_web/asset/{0}/play.json'.format(video_id)\n params = {'protocol': 'VUDASH'}\n headers = {'Authorization': 'Bearer {0}'.format(self.get_credentials().get('vimond_token'))}\n data_dict = self.make_request(url, 'get', params=params, headers=headers)['playback']\n stream['drm_protected'] = data_dict['drmProtected']\n\n if isinstance(data_dict['items']['item'], list):\n for i in data_dict['items']['item']:\n if i['mediaFormat'] in allowed_formats:\n stream['mpd_url'] = i['url']\n if stream['drm_protected']:\n stream['license_url'] = i['license']['@uri']\n stream['drm_type'] = i['license']['@name']\n break\n else:\n stream['mpd_url'] = data_dict['items']['item']['url']\n if stream['drm_protected']:\n stream['license_url'] = data_dict['items']['item']['license']['@uri']\n stream['drm_type'] = data_dict['items']['item']['license']['@name']\n\n live_stream_offset = self.parse_stream_offset(video_id)\n if live_stream_offset:\n stream['mpd_url'] = '{0}?t={1}'.format(stream['mpd_url'], live_stream_offset)\n\n return stream", "def info_by_stream_id(stream_id):\n binding = {'stream_id': stream_id}\n url = 'https://sitestream.twitter.com/2b/site/c/01_225167_334389048B872A533002B34D73F8C29FD09EFC50/info.json'\n url = url.format(**binding)\n return _TwitterRequest('GET',\n url,\n 'streaming:c',\n 'get-c-stream-id-info',\n binding)", "def commentStream(id):\n stream = core.single(schema.commentStreams, id)\n if stream:\n return stream[\"_id\"]\n else:\n return None", "def find_activity(self, searched_activity_id):\n self.__load_activities_from_file_into_memory()\n return super().find_activity(searched_activity_id)", "def get_performed_activity_by_id(session, id:int):\n performed_activity = session.query(Performed_Activity).filter_by(id=id).first()\n return performed_activity", "def list_streams(self, session_id):\n endpoint = self.endpoints.get_stream_url(session_id)\n\n response = requests.get(\n endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout\n )\n\n if response.status_code == 200:\n return StreamList(response.json())\n elif response.status_code == 400:\n raise GetStreamError('Invalid request. This response may indicate that data in your request data is invalid JSON. Or it may indicate that you do not pass in a session ID or you passed in an invalid stream ID.')\n elif response.status_code == 403:\n raise AuthError('You passed in an invalid OpenTok API key or JWT token.')\n else:\n raise RequestError('An unexpected error occurred', response.status_code)", "def get_stream_id(self) -> str:\n return self.id", "def search_with_activitystream(query):\n request = requests.Request(\n method=\"GET\",\n url=settings.ACTIVITY_STREAM_API_URL,\n data=query).prepare()\n\n auth = Sender(\n {\n 'id': settings.ACTIVITY_STREAM_API_ACCESS_KEY,\n 'key': settings.ACTIVITY_STREAM_API_SECRET_KEY,\n 'algorithm': 'sha256'\n },\n settings.ACTIVITY_STREAM_API_URL,\n \"GET\",\n content=query,\n content_type='application/json',\n ).request_header\n\n # Note that the X-Forwarded-* items are overridden by Gov PaaS values\n # in production, and thus the value of ACTIVITY_STREAM_API_IP_WHITELIST\n # in production is irrelivant. It is included here to allow the app to\n # run locally or outside of Gov PaaS.\n request.headers.update({\n 'X-Forwarded-Proto': 'https',\n 'X-Forwarded-For': settings.ACTIVITY_STREAM_API_IP_WHITELIST,\n 'Authorization': auth,\n 'Content-Type': 'application/json'\n })\n\n return requests.Session().send(request)", "def create_activity(activity_id):\n\n # get the activity\n activity = Activity.query.filter_by(id=activity_id).first()\n\n if activity:\n # now get a valid token for the associated user\n access_token = refresh_access_token(user_id=activity.user_id)\n if access_token is None:\n # an error must have occurred\n current_app.logger.error('Cannot save activity {} to Strava as unable to refresh token'.format(activity_id))\n # let the app continue on as error has been logged\n return 200\n\n url = 'https://www.strava.com/api/v3/activities'\n headers = {'Authorization': 'Bearer {}'.format(access_token)}\n\n data = construct_strava_activity_data(activity)\n response = requests.post(url, headers=headers, data=data)\n strava_athlete = StravaAthlete.query.filter_by(user_id=activity.user_id).first()\n log_strava_event(strava_athlete.athlete_id, \"Activity\")\n\n # check the response, if there has been an error then need to log this\n if response.status_code != 200:\n current_app.logger.error('Strava Status code: {}'.format(response.status_code))\n current_app.logger.error('Strava Response: {}'.format(response.json))\n return response.status_code\n # log an error if the activity doesn't exist but allow app to continue on\n current_app.logger.error('Activity {} does not exist'.format(activity_id))\n return 200", "def test_api_get_activity_by_id(self):\n # create a bucket\n res = self.register_login_get_token()\n self.assertEqual(res.status_code, 201)\n\n # create a activity\n res = self.client().post('/bucketlist/1/activities',\n headers=dict(\n Authorization=\"Bearer \" + self.access_token),\n data=self.activity)\n self.assertEqual(res.status_code, 201)\n # get activity created\n activity_created = json.loads(res.data.decode())\n # get activity by its ID\n res = self.client().get('/bucketlist/1/activities/{}'.format(activity_created['id']),\n headers=dict(\n Authorization=\"Bearer \" + self.access_token))\n self.assertEqual(res.status_code, 200)\n self.assertIn('Shop in', str(res.data))", "def get(self, stream):\n\n return self._streams[stream]", "def by_activity(cls,site_id=0,activity=None):\n return meta.DBSession.query(Activity).filter_by(site_id=site_id,activity=activity).all()", "def get_stream_id(self) -> str:", "def get_activity():\n try:\n activity = Activity.objects.filter(active=1).latest('id')\n except Activity.DoesNotExist:\n activity = None\n return activity", "def getactivity(self) -> Optional[ba.Activity]:\n stats = self._stats()\n if stats is not None:\n return stats.getactivity()\n return None", "def get_carton_activity_by_id(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_carton_activity_by_id`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "async def get_stream(self) -> dict:\n\n data = await self._http.get_streams(channels=[self.name])\n\n try:\n return data[0]\n except IndexError:\n pass", "def stream(self):\n\t\tdata = self._client.get(\"streams\", self.name)['stream']\n\t\tif data is not None:\n\t\t\tdata.pop('channel', None)\n\t\treturn data", "def getStream(self,name):\n if (name in self._streams):\n return self._streams[name]\n return None", "def getactivity(self) -> Optional[ba.Activity]:\n if self._activity is None:\n return None\n return self._activity()", "def activity_type(self, type_id):\r\n return activities.ActivityType(self, type_id)", "def get(self, ds_id):\n try:\n query_parameters = request.args\n logging.debug(f\" query params - {query_parameters}\")\n top, skip, expand_code, selects = parse_args(query_parameters)\n datastream_entity = Datastreams.filter_by_id(ds_id, expand_code, selects)\n response = jsonify(datastream_entity)\n response.status_code = 200\n except Exception as e:\n logging.warning(e)\n result = {\"message\": \"error\"}\n response = jsonify(result)\n finally:\n return response", "def get_activities(ts_activity, access_token):\n params = {'after': ts_activity, 'access_token': access_token}\n url = \"https://www.strava.com/api/v3/activities\"\n response = return_json(url, \"GET\", parameters=params)\n return response", "def stream_for_actor(self, actor):\n return self.filter(\n actor_content_type = ContentType.objects.get_for_model(actor),\n actor_object_id = actor.pk,\n ).order_by('-timestamp')", "def get(self, id):\n try:\n query_parameters = request.args\n logging.debug(f\" query params - {str(query_parameters)}\")\n obs = Observations.find_observation_by_observation_id(id)\n\n top, skip, expand_code, selects = parse_args(query_parameters)\n if obs:\n ds_list = Datastreams.filter_by_id(\n obs.datastream_id, expand_code, selects\n )\n response = jsonify(ds_list)\n\n else:\n response = jsonify({\"message\": \"No Observations with given Id found\"})\n response.status_code = 200\n except Exception as e:\n logging.warning(e)\n response = jsonify({\"message\": \"error\"})\n response.status_code = 400\n return response\n\n finally:\n return response", "def get_stream(self):\n self.lock.acquire()\n stream=self.stream\n self.lock.release()\n return stream", "def get_repo_activity(configs, repo_id):\n print(\"--Getting staging repo activity...\")\n url = \"/\".join([configs[\"nexus\"][\"repoBaseURL\"], repo_id, \"activity\"])\n basic_auth = HTTPBasicAuth(configs[\"nexus\"][\"username\"], configs[\"passwords\"][\"nexus\"])\n response = requests.get(url, auth=basic_auth)\n\n if response.status_code == 200:\n return response.text\n else:\n raise Exception(\"----Failed at getting repo activity. Status code: \" +\n str(response.status_code) + \" Response content: \" + response.text)", "def createCommentStream(id):\n db = core.connect()\n theShift = db[id]\n commentStream = stream.create({\n \"meta\": \"comments\",\n \"objectRef\": ref(id),\n \"createdBy\": theShift[\"createdBy\"]\n })\n return commentStream[\"_id\"]", "def get_activities(self, activity_ids=None, max_records=50):\r\n return self.connection.get_all_activities(self, activity_ids, max_records)", "def get(self, id_reader=None):\n\n session = current_app.session\n\n if id_reader:\n streams = session.query(StreamDao).join(CategoryDao)\\\n .join(ThemeDao).join(ReaderDao)\\\n .filter(ReaderDao.id == id_reader).all()\n\n elif request.args.get('_categories'):\n streams = session.query(StreamDao).join(CategoryDao)\\\n .filter(CategoryDao.id.in_(request.args['_categories'])).all()\n\n elif request.args.get('_themes'):\n streams = session.query(StreamDao).join(CategoryDao)\\\n .join(ThemeDao)\\\n .filter(CategoryDao.id.in_(request.args['_themes'])).all()\n \n else:\n streams = session.query(StreamDao).all()\n\n if len(streams) is 0:\n return None, 204\n\n return streams, 200", "async def get_stream(self) -> dict:\n return await self.channel.get_stream()", "def get_video_stream(yt, resolution):\n global adaptive\n\n resolution_itag = {'360p':134, '480p':135, '720p':136}\n progressive_streams = yt.streams.filter(progressive=True)\n video_stream = progressive_streams.get_by_resolution(resolution)\n\n if video_stream is not None:\n return video_stream\n else:\n adaptive_streams = yt.streams.filter(adaptive=True, type='video')\n video_itag = resolution_itag[resolution]\n video_stream = adaptive_streams.get_by_itag(video_itag)\n adaptive = True\n return video_stream", "def asset_activity(self, asset_id):\n response = self._client.get('workbenches/assets/%(asset_id)s/activity',\n path_params={'asset_id': asset_id})\n return AssetActivityList.from_json(response.text)", "def get(self, id):\n\n return self.client.get(\"external-task/{0}\".format(id))", "def _get_stream(\n session: \"Session\", url_tail: str, params: Optional[Dict[str, Any]] = None\n) -> Any:\n response = _get(session, url_tail, params, stream=True)\n response.raw.decode_content = True\n return response.raw", "def get(self, session_id):\n if session_id is None:\n raise ValueError('session_id is required and was not provided')\n\n response, _, headers = self._client.request_with_headers('GET', 'sessions/%s' % session_id)\n return SessionResponse(response, headers)", "def sms_get_campaign_info(self, id):\n if not id:\n return self.__handle_error(\"Empty campaign id\")\n\n logger.info(\"Function call: sms_get_campaign_info from: {}\".format(id, ))\n return self.__handle_result(self.__send_request('/sms/campaigns/info/{}'.format(id, )))", "def add_stream(self, id: str, *, log_stream_name: typing.Optional[str]=None) -> \"LogStream\":\n props = StreamOptions(log_stream_name=log_stream_name)\n\n return jsii.invoke(self, \"addStream\", [id, props])", "def add_stream(self, id: str, *, log_stream_name: typing.Optional[str]=None) -> \"LogStream\":\n props = StreamOptions(log_stream_name=log_stream_name)\n\n return jsii.invoke(self, \"addStream\", [id, props])", "def event_activity_csv(event_id):\n limit = request.args.get('limit') or 50\n q = request.args.get('q') or None\n if q and len(q) < 3:\n q = None\n csvstream = gen_csv(get_event_activities(event_id, limit, q))\n headers = {'Content-Disposition': 'attachment; filename=activity_list.csv'}\n return Response(stream_with_context(csvstream),\n mimetype='text/csv', headers=headers)", "def get(self, id):\n try:\n query_parameters = request.args\n logging.debug(f\" query params - {query_parameters}\")\n top, skip, expand_code, selects = parse_args(query_parameters)\n\n ds_list = Datastreams.filter_by_sensor_id(\n id, top, skip, expand_code, selects\n )\n response = jsonify(ds_list)\n response.status_code = 200\n except Exception as e:\n logging.warning(e)\n response = jsonify({\"message\": \"error\"})\n response.status_code = 400\n return response\n\n finally:\n return response", "def get_activity_feed(context, term):\n if not term:\n raise ValueError('You have to provide a search term!')\n url = '{}{}'.format(context.test_url, term)\n response = requests.get(url, timeout=context.request_timeout)\n context.response = response\n logging.debug('Request URL: %s', response.request.url)\n logging.debug('Request headers:\\n%s', pformat(response.request.headers))\n logging.debug('Response headers:\\n%s', pformat(response.headers))\n logging.debug('Response content:\\n%s', pformat(response.json()))", "def get(self, id):\n try:\n query_parameters = request.args\n logging.debug(f\" query params - {query_parameters}\")\n top, skip, expand_code, selects = parse_args(query_parameters)\n\n ds_list = Datastreams.filter_by_thing_id(\n id, top, skip, expand_code, selects\n )\n response = jsonify(ds_list)\n response.status_code = 200\n except Exception as e:\n logging.warning(e)\n response = jsonify({\"message\": \"error\"})\n response.status_code = 400\n return response\n\n finally:\n return response", "def fetch_story(self, movie_id):\n movie = tmdbsimple.Movies(movie_id)\n request = movie.info()\n\n return movie.overview", "def put(self, id):\n activity = Activity().get(id)\n if not activity:\n abort(404, \"Activity not found\")\n\n return activity._update(request.json)", "def get_stream(self, channel_name):\n self.stream = json.loads(Stream().channel(channel_name).text)", "def get_activity(self, filename):\n return self._request_activity_data(self.athlete, filename)", "def add_stream(self, id: str, *, log_stream_name: typing.Optional[str]=None) -> \"LogStream\":\n ...", "def get_session(self, session_id):\n return self._cache[session_id]", "def get_activity_output(arn: Optional[pulumi.Input[Optional[str]]] = None,\n name: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetActivityResult]:\n ...", "def stream(self):\n return streams.Stream(self)", "def get_activities():\n pass", "def is_device_streaming(self, device_id):\n e = ctypes.POINTER(rs_error)()\n lrs.rs_get_device.restype = ctypes.POINTER(rs_device)\n dev = lrs.rs_get_device(self.ctx, device_id, ctypes.byref(e))\n _check_error(e)\n is_streaming = lrs.rs_is_device_streaming(dev, ctypes.byref(e))\n _check_error(e)\n return is_streaming", "def get_api_stream(url, params=None, headers=None):\n\n logging.debug(\"-> get_api_stream()\")\n logging.debug(\"Request url: %s\" % url)\n\n result = requests.get(url, params=params, headers=headers)\n\n logging.debug(\"Response content: %s\" % result.content)\n logging.debug(\"<- get_api_stream()\")\n\n return result", "def stream(self):\r\n return streams.Stream(self)", "def stream(self):\r\n return streams.Stream(self)", "def get_activity(arn: Optional[str] = None,\n name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetActivityResult:\n __args__ = dict()\n __args__['arn'] = arn\n __args__['name'] = name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('aws:sfn/getActivity:getActivity', __args__, opts=opts, typ=GetActivityResult).value\n\n return AwaitableGetActivityResult(\n arn=pulumi.get(__ret__, 'arn'),\n creation_date=pulumi.get(__ret__, 'creation_date'),\n id=pulumi.get(__ret__, 'id'),\n name=pulumi.get(__ret__, 'name'))", "def source(self):\n return self._client.group.stream", "def __init__(self, stream_id):\n self.stream_id = stream_id\n self._stream = None", "def get_share(self, activity_user_id, activity_id, share_id):\n return None", "def activity(self):\n return self._activity", "def new_datastream(self, device_id: str, ogc_obs_property: OGCObservedProperty):\n if device_id not in self._active_microphones:\n logging.error(\"Device \" + device_id + \" is not active.\")\n return False\n\n if ogc_obs_property not in self.get_ogc_config().get_observed_properties():\n ogc_obs_property = self._ogc_config.add_observed_property(ogc_obs_property)\n\n try:\n datastream_id = self._resource_catalog[device_id][ogc_obs_property.get_name()]\n except KeyError:\n device_coordinates = self._active_microphones[device_id][\"coordinates\"]\n device_name = self._active_microphones[device_id][\"name\"]\n device_description = self._active_microphones[device_id][\"description\"]\n\n datastream_id = self._new_datastream_slm(\n ogc_obs_property, device_id, device_name, device_coordinates, device_description)\n\n return datastream_id", "def create_stream_feeder(context=None):\n return StreamFeeder(context)", "def get_video(self, video_id):\n return self._videos.get(video_id, None)", "def add_stream(agentIdPath): # noqa: E501\n return multichain_client.createstream(agentIdPath)", "def _init_stream(self):\n stream = tweepy.Stream(self.auth, self)\n\n try:\n print('Trying to create stream...')\n # Cannot follow based on screen name, get ids\n self.trolling_ids = [\n str(self.twitter_api.get_user(screen_name=screen_name).id)\n for screen_name in SCREEN_NAMES_TO_FOLLOW\n ]\n\n stream.filter(follow=self.trolling_ids)\n\n except Exception as e:\n print('*****************************************************')\n print('**** Stream error, init_stream. Trying again... ****')\n print('*****************************************************')\n print(e)\n\n # Try again to create the stream\n time.sleep(30)\n self._init_stream()", "def get_camera_streaming(cam_id, w, h, fps):\n capture = cv2.VideoCapture(cam_id)\n capture.set(cv2.CAP_PROP_FRAME_WIDTH, w)\n capture.set(cv2.CAP_PROP_FRAME_HEIGHT, h)\n capture.set(cv2.CAP_PROP_FPS, fps)\n if not capture:\n print(\"Failed to initialize camera\")\n sys.exit(1)\n return capture", "def streams(self, config: Mapping[str, Any]) -> List[Stream]:\n api_key = config[\"api_key\"]\n start_date = config[\"start_date\"]\n return [\n Campaigns(api_key=api_key),\n Events(api_key=api_key, start_date=start_date),\n GlobalExclusions(api_key=api_key, start_date=start_date),\n Lists(api_key=api_key),\n Metrics(api_key=api_key),\n Flows(api_key=api_key, start_date=start_date),\n ]", "def get_duplicate_carton_activity_by_id(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_duplicate_carton_activity_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_duplicate_carton_activity_by_id`\")\n\n resource_path = '/beta/cartonActivity/duplicate/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get(self, *, recording_id):\n\n response = openvidu().get_recording(recording_id)\n\n if response.status_code == 200:\n recording = response.json()\n elif response.status_code == 404:\n abort(NotFound, query=f\"Recording `{recording_id}` does not exist\")\n elif response.status_code == 501:\n abort(NotImplemented, query=\"OpenVidu Server recording module is disabled\")\n else:\n abort(response)\n\n if recording[\"url\"] is None:\n abort(\n Conflict,\n query=\"The recording has not finished\",\n )\n request = openvidu().request.get(recording[\"url\"], stream=True)\n\n # Response does not accept `headers=request.headers` so we create them ourself\n headers = {}\n for header in request.headers:\n headers[header] = request.headers[header]\n\n return Response(\n stream_with_context(request.iter_content(chunk_size=2048)),\n headers=headers,\n )", "def Stream():\r\n \r\n config = config_create()\r\n CONSUMER_KEY = config.get('Auth', 'CONSUMER_KEY') \r\n CONSUMER_SECRET = config.get('Auth', 'CONSUMER_SECRET')\r\n ACCESS_KEY = config.get('Auth', 'ACCESS_KEY')\r\n ACCESS_SECRET = config.get('Auth', 'ACCESS_SECRET')\r\n searchterm = config.get('Filter','search')\r\n name = multiprocessing.current_process().name\r\n \"\"\"Function that will manage doing the twitter stream\"\"\"\r\n stream = MyStreamer(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET)\r\n stream.statuses.filter(track= searchterm)", "def activity(self, activity):\n if activity is None:\n raise ValueError(\"Invalid value for `activity`, must not be `None`\") # noqa: E501\n\n self._activity = activity", "def get_streams(namespace_url):\n streams = requests.get(namespace_url + '/Streams', headers=headers)\n return streams.json()", "def get_story(self, id):\n url = self.base_url + 'stories/{story}'\n\n req = requests.get(headers=self.headers, url=url.format(story=id))\n\n return req.json()", "def take(self, id, activity):\n\t\tif id not in self.linkfrom:\n\t\t\tself.linkfrom.append(id)\n\t\tself.activations[id] = activity", "def take(self, id, activity):\n\t\tif id not in self.linkfrom:\n\t\t\tself.linkfrom.append(id)\n\t\tself.activations[id] = activity", "def get_session(self, session_id):\n return Session(self.session_cache, self.sid, session_id, self._secret)", "def source(self):\n return self._group.stream", "def get(self, campaign_id):\n campaign = Campaign.query.filter_by(public_id=campaign_id).first()\n if campaign:\n return send_file(campaign.mailer_file)\n else:\n api.abort(404, message='Campaign does not exist', success=False)", "def get_single_media(media_id):\n return query_single(media_id, Media, media_schema)", "def _read_activity(session_path: Path):\n # Read activity file\n df_act = pd.read_csv(\n session_path / ACTIVITY_FILE,\n names=ACTIVITY_FILE_COLUMNS,\n usecols=[\n \"subject\",\n \"session_number\",\n \"start_time\",\n \"end_time\",\n \"gesture_scenario\",\n \"task_id\",\n ],\n header=None,\n engine=\"c\",\n )\n # Timestamps as additional datetime columns\n df_act[\"start_time_dt\"] = pd.to_datetime(df_act[\"start_time\"], unit=\"ms\")\n df_act[\"end_time_dt\"] = pd.to_datetime(df_act[\"end_time\"], unit=\"ms\")\n\n return df_act", "def get_movie_by_id(movie_id):\n search_url = 'https://api.themoviedb.org/3/movie/%s?api_key=%s' %(movie_id, api_key)\n print(search_url)\n with urllib.request.urlopen(search_url) as url:\n get_movie_data = url.read()\n get_movie_result = json.loads(get_movie_data)\n\n movie_idnum = get_movie_result.get('id')\n movie_name = get_movie_result.get('original_title')\n movie_overview = get_movie_result.get('overview')\n movie_backdrop = get_movie_result.get('backdrop_path')\n movie_average = get_movie_result.get('vote_average')\n movie_count = get_movie_result.get('vote_count')\n\n movie_object = Movie(movie_idnum, movie_name, movie_overview, movie_backdrop, movie_average, movie_count)\n\n return movie_object", "def export_download(self, file_id, stream=True, chunk_size=1024):\n response = self._client.get('workbenches/export/%(file_id)s/download',\n path_params={'file_id': file_id},\n stream=stream)\n return response.iter_content(chunk_size=chunk_size)", "def get_movie(self, id: int) -> Movie:\n raise NotImplementedError", "def getbyid(self, id):\n\n return esd.retrieve(id)", "async def stream_source(self):\n return self._stream_source", "def get_comment(self, comment_id, activity_id=None, activity_author_id=None):\n url = API_OBJECT_URL % comment_id\n return self.comment_to_object(json.loads(self.urlopen(url).read()),\n post_author_id=activity_author_id)", "def _get_stream_handler(self, stream_id, ourport, theirport):\n t = (stream_id, ourport, theirport)\n return self._handlers[t]", "def subscribers(id):\n return core.query(schema.streamBySubscribers, id)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'StreamingImage':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = StreamingImageArgs.__new__(StreamingImageArgs)\n\n __props__.__dict__[\"description\"] = None\n __props__.__dict__[\"ec2_image_id\"] = None\n __props__.__dict__[\"encryption_configuration\"] = None\n __props__.__dict__[\"eula_ids\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"owner\"] = None\n __props__.__dict__[\"platform\"] = None\n __props__.__dict__[\"streaming_image_id\"] = None\n __props__.__dict__[\"studio_id\"] = None\n __props__.__dict__[\"tags\"] = None\n return StreamingImage(resource_name, opts=opts, __props__=__props__)", "def send_stream(self, stream_id, get_result=False):\n\n stream = self.streams.get(stream_id)\n if not stream:\n raise TGException(\"Stream with ID {} was not configured\".format(stream_id))\n\n port = stream['iface']\n # Verify that there is no ports already used by another iperf instances\n if port and port in self.used_ifaces:\n raise TGException(\"There is an another iperf on port {}.\".format(port))\n\n if port and port in self.namespaces:\n stream['prefix'] = self.namespace_prefix.format(self.namespaces[port])\n\n if port:\n self.used_ifaces.add(port)\n\n cmd = stream.get('iperf_cmd')\n prefix = stream.get('prefix')\n iid = self._lhost.ui.iperf.start(prefix=prefix, command=cmd)\n stream['instance_id'] = iid\n\n if get_result:\n cmd_time = cmd.get('time', 10)\n time.sleep(int(cmd_time))\n\n # make sure we stopped correctly\n return self.stop_stream(stream_id, ignore_inactive=True)", "def get(id=None):\n return requests.get(\"/{}\".format(id))", "def get_displayed_streamer_by_twitch_id(client_id, twitch_id,\n\t\tprev_time=None, prev_match_id=None, next_time=None, next_match_id=None,\n\t\tpage_limit=None, now=None):\n\turl_by_id = common_db._get_twitch_url_by_id(twitch_id)\n\tdef _filter_adder(query):\n\t\treturn query.filter(User.url_by_id == url_by_id)\n\treturn _get_displayed_streamer_by_filter(client_id, _filter_adder,\n\t\t\tprev_time, prev_match_id, next_time, next_match_id, page_limit, now)", "def get_activity_object(activity_name, settings, logger, conn, token, activity_task):\n full_path = \"activity.\" + activity_name + \".\" + activity_name\n f = eval(full_path)\n # Create the object\n activity_object = f(settings, logger, conn, token, activity_task)\n return activity_object" ]
[ "0.67917997", "0.6699494", "0.65698034", "0.6276299", "0.59187925", "0.5873506", "0.58734196", "0.56921214", "0.56341624", "0.5584019", "0.54834574", "0.54745483", "0.5373628", "0.5367137", "0.53547525", "0.53142124", "0.52937365", "0.5270165", "0.52317965", "0.5202036", "0.51997656", "0.5185928", "0.51661634", "0.5136037", "0.51170474", "0.5114894", "0.5083489", "0.50492406", "0.5048543", "0.503841", "0.5010766", "0.49804652", "0.4962453", "0.4935476", "0.48991364", "0.48656893", "0.48599964", "0.48486766", "0.4846226", "0.48098493", "0.48097622", "0.47866493", "0.47602808", "0.47525206", "0.47525206", "0.47273278", "0.47266388", "0.47199932", "0.47144154", "0.47110784", "0.4698093", "0.46872184", "0.46823445", "0.46667612", "0.46628475", "0.4646143", "0.46357512", "0.46309856", "0.46266285", "0.46260992", "0.46149737", "0.46149737", "0.4594182", "0.45725644", "0.45692194", "0.4568665", "0.4565127", "0.45439622", "0.45415047", "0.4500618", "0.44983798", "0.44979155", "0.4489168", "0.44873747", "0.4479276", "0.4473663", "0.44726485", "0.44671834", "0.44666007", "0.4465179", "0.44565102", "0.44565102", "0.44484058", "0.44465333", "0.44442123", "0.44336903", "0.4431847", "0.44297984", "0.44272712", "0.4423439", "0.4412688", "0.44084206", "0.43965775", "0.4396457", "0.4393644", "0.43686166", "0.4363938", "0.4362895", "0.4355096", "0.4354735" ]
0.59823203
4
Gets details on currently logged in athlete.
def get_athlete(token): url = "https://www.strava.com/api/v3/athlete" params = {'access_token': token} response = return_json(url, "GET", parameters=params, timeout=10) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_athlete(response):\n name = response['athlete']['firstname'] + \" \" + response['athlete']['lastname']\n athlete = {\n 'id': response['athlete']['id'],\n 'name': name,\n 'access_token': response['access_token'],\n 'refresh_token': response['refresh_token'],\n 'expires_at': response['expires_at'],\n 'ts_activity': 0}\n return athlete", "def fetch_profile(access_token):\n\n headers = {\"Authorization\": \"Bearer \" + access_token}\n\n # Fetch athlete profile\n r = requests.get(API_URL + \"/athlete\", headers=headers)\n profile = r.json()\n if \"errors\" in profile:\n raise AuthError(profile[\"message\"])\n\n return {\n \"firstName\": profile[\"firstname\"],\n \"lastName\": profile[\"lastname\"],\n \"imgUrl\": profile[\"profile\"],\n \"profileUrl\": \"https://www.strava.com/athletes/{}\".format(profile[\"id\"]),\n }", "def get_account_details(self):\n pass", "def user_info(self):\n return self.auth.get_user_by_session()", "def getInfo(self):\n self.name, self.description = achievements[self.id]", "def getAccidental(self):\n return self.accidental", "def get_teacher(self) -> str :\n return self.teacher", "def display_accounts_details():\n return Credentials.display_credentials()", "def get_user_details(self, response):\n\n log.info(str(response) + \"-\" * 80)\n log.info(str(dir(self)) + \"-\" * 80)\n\n return response", "def get_teacher():\n\n rows = db.engine.execute(f\"SELECT * FROM teacher_login WHERE loginid = {g.user.loginid}\")\n res = []\n for row in rows:\n res.append(dict(row))\n return jsonify(res)", "def details(self):\n logging.info(self.user)", "def getDBAthletesUsingAPI():\n athletes_response = api_requester.getAthletes()\n\n # Parse response into Athlete db objects\n athletes_to_return = list()\n for athlete in athletes_response.json():\n athletes_to_return.append(\n getAthleteObjectFromJSON(athlete))\n\n return athletes_to_return", "def user_info(self):\n \n return self.auth.get_user_by_session()", "def get (self):\n\n logged_in, db_user = ADayThere.logged_in_user ()\n if not logged_in:\n self.response.status = 401\n return\n\n res = self.__build_response (db_user)\n self.response.write (json.dumps (res))", "def account_info(request):\r\n user = request.user\r\n\r\n return _api_response(request, user.safe_data())", "def get_user_info(self) -> str:\n return self._searcher.get_user_info()", "def _athlete_endpoint(self, athlete):\n return '{host}{athlete}'.format(\n host=self.host,\n athlete=quote_plus(athlete)\n )", "def get_adventure_detail(request):\n if request.is_ajax():\n user = request.user\n game_saved = user.game_saved\n adventure_id = game_saved.adventure_saved\n task_num = game_saved.task_saved\n adventure = Adventure.objects.get(adventure_id=adventure_id)\n Adventures_info = adventures_info.objects.get(adventure_name=adventure)\n task = Task.objects.get(adventure_name=adventure, task_number=task_num)\n\n\n alist =[\n {\n \"name\" : str(adventure.adventure_name),\n \"items\" : str(Adventures_info.items_needed),\n \"expenses\" : str(Adventures_info.expenses),\n \"locations\" : Adventures_info.locations,\n \"mapaddress\" : str(task.google_map),\n \"theme_character_url\" : str(adventure.theme_character_url)\n }\n\n ]\n\n return JsonResponse(alist, safe=False)\n else:\n raise PermissionDenied()", "def get(self):\r\n return get_user(request)", "def fetch_stats(access_token, athlete_id):\n\n headers = {\"Authorization\": \"Bearer \" + access_token}\n\n # Fetch athlete stats\n r = requests.get(API_URL + \"/athletes/{}/stats\".format(athlete_id), headers=headers)\n stats = r.json()\n if \"errors\" in stats:\n raise AuthError(stats[\"message\"])\n\n return {\n \"recentRuns\": stats[\"recent_run_totals\"],\n \"yearRuns\": stats[\"ytd_run_totals\"],\n \"allRuns\": stats[\"all_run_totals\"],\n }", "def get_self_account_details(self):\n return self.mrr_obj.get('/whoami')", "def get():\n return prepare_response(get_user_info())", "def fusion_api_get_active_user(self):\n return self.loginsession.get_active_user()", "def get(self):\n return self.context.as_dict(self.user)", "def get_account_info(self):\n resp = requests.get(\n self.URL + 'info/',\n headers={'Authorization': 'Token ' + self.api_key}\n )\n\n return self.__handle_response(resp)", "def get_profile_details(self):\n cursor = self.__connection.cursor()\n cursor.execute(\n \"select first_name, last_name, purchased_products from neutron_buyer where buyer_id=%s\",\n (self.__buyer_id,)\n )\n result = cursor.fetchone()\n if result:\n return result\n raise IDNotFoundException", "def get_user_details():\n rv = query_db('select * from user')\n return rv[0] if rv else None", "def get_amenity(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n return jsonify(amenity.to_dict())", "def get_amenity(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n return jsonify(amenity.to_dict())", "def user_data(self, access_token, *args, **kwargs):\n headers = {'Authorization': 'Bearer %s' % access_token}\n try:\n resp = requests.get(ASANA_USER_DETAILS_URL,\n headers=headers)\n resp.raise_for_status()\n return resp.json()['data']\n except ValueError:\n return None", "def current_user_info():\n\n return current_user", "def deauthorize_athlete(athlete_id):\n athlete = StravaAthlete.query.filter_by(athlete_id=athlete_id).first()\n if athlete:\n athlete.is_active = 0\n athlete.last_updated = datetime.utcnow()\n db.session.commit()\n log_strava_event(athlete_id, \"Deauthorize\")\n return True\n\n current_app.logger.error('Athlete {} does not exist'.format(athlete_id))\n return False", "def get_user_details(self, response):\n name = response.get(\"name\")\n return {\n \"username\": str(response.get(\"account_id\")),\n \"email\": response.get(\"email\"),\n \"fullname\": name.get(\"display_name\"),\n \"first_name\": name.get(\"given_name\"),\n \"last_name\": name.get(\"surname\"),\n }", "def apartaments(self, soup):\n logging.info('Getting hotel apartaments information.')\n apartaments = []\n if soup.select_one('table.hprt-table') is None:\n logging.error('Cant apartaments information.')\n return apartaments\n else:\n apartament_name = ''\n for apart in soup.select_one('table.hprt-table').findAll('tr')[1:]:\n apartament = {}\n try:\n logging.info('Getting apartaments name.')\n apartament['name'] = apartament_name = apart.select_one(\n 'span.hprt-roomtype-icon-link').text.strip()\n except AttributeError:\n logging.error('Cant apartaments name.')\n apartament['name'] = apartament_name\n try:\n logging.info('Getting apartaments price.')\n apartament['price'] = int(apart.select_one(\n 'div.bui-price-display__value.prco-inline-block-maker-helper.prco-font16-helper'\n ).text.strip()[:-5].replace(\" \", \"\"))\n except Exception:\n logging.error('Cant apartaments price.')\n continue\n try:\n logging.info('Getting apartaments capacity.')\n apartament['capacity'] = apart.select_one(\n 'div.c-occupancy-icons.hprt-occupancy-occupancy-info'\n ).select_one('span.bui-u-sr-only').text.strip().split(':')[1].strip()\n except AttributeError:\n logging.error('Cant apartaments capacity.')\n continue\n apartaments.append(apartament)\n\n return apartaments", "def get_user_details(self, response):\n return {\n \"username\": response.get(\"username\"),\n \"email\": response.get(\"email\"),\n \"fullname\": response.get(\"username\"),\n }", "def get_uname_and_avurl(target_id, their_anon_status):\n\ttarget_id = str(target_id)\n\tif their_anon_status:\n\t\treturn get_target_username(target_id), None\n\telse:\n\t\treturn get_single_user_credentials(target_id,as_list=False)", "def one_amenity(a_id):\n the_amenity = storage.get(Amenity, a_id)\n if the_amenity is not None:\n return jsonify(the_amenity.to_dict())\n abort(404)", "def getAnalysts(context):\n mtool = getToolByName(context, 'portal_membership')\n pairs = []\n analysts = mtool.searchForMembers(roles = ['Manager', 'LabManager', 'Analyst'])\n for member in analysts:\n uid = member.getId()\n fullname = member.getProperty('fullname')\n if fullname is None:\n fullname = uid\n pairs.append((uid, fullname))\n pairs.sort(lambda x, y: cmp(x[1], y[1]))\n return DisplayList(pairs)", "def get_account_details(account_id, writer, key):\n query = iroha.query(\n \"GetAccountDetail\", account_id=account_id, writer=writer, key=key\n )\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = json.loads(response.account_detail_response.detail)\n pprint(data)", "def get_tutee(self):\n return self.individual_session.session.tutee.get_full_name()", "def user_info(self):\n response = self.query('user_info')\n return response", "def get_amenity_obj(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity:\n return jsonify(amenity.to_dict())\n else:\n abort(404)", "def get_user_details(self, response):\n\n return {\n 'email': response.get('email'),\n 'id': response.get('id'),\n 'full_name': response.get('name')\n }", "def who():\n cleanup()\n return {'available': userlist(), 'eta': data['etas'], 'etd': data['etds'], 'lastlocation': data['lastlocation'], 'ceitloch': ceitloch(), 'reminder': data['reminder']}", "def display_profile(self):\n print(f\"Id: {self._id}\")\n print(f\"username: {self.username}\")\n print(f\"name: {self.name}\")\n print(f\"contact: {self.contact}\")\n print(f\"address: {self.address}\")", "def me():\n return current_user.get()", "def get(self, id):\n adm = Administration()\n lp = adm.get_learnprofile_by_id(id)\n return lp", "def show_amenity_with_id(amenity_id):\n\n data = storage.get(Amenity, amenity_id)\n if data is None:\n abort(404)\n return jsonify(data.to_dict())", "def teacher_dashboard(self, request, activity, session):\n if request.method == \"GET\" and request.GET.get(\"studentid\"):\n return self.student_summary(request.GET.get(\"studentid\"), request, activity)\n\n else:\n return self.course_summary(request, activity)", "def auth(self):\n return self.user.get('current')", "def get_teacher(teacher_account_id):\n query = 'SELECT * FROM teacher JOIN person ON teacher.teacher_account_id=person.account_id WHERE teacher.teacher_account_id=%s;'\n args = (teacher_account_id,)\n return database.connection.get_data(query, args)", "def aantalArmen(self):\n return self._aantalArmen.get_waarde()", "def aantalArmen(self):\n return self._aantalArmen.get_waarde()", "def get_user_profile(self):\n return self.request('get', 'id/users')", "def getAnonymizedUserData(self):\n\t\turl = \"https://habitica.com/api/v3/user/anonymized\"\n\t\treturn(getUrl(url, self.credentials))", "def get_specific_amenity(amenity_id):\n data = storage.all('Amenity')\n name = 'Amenity.' + amenity_id\n amenity = [v.to_dict() for k, v in data.items() if k == name]\n if len(amenity) != 1:\n abort(404)\n return jsonify(amenity[0])", "def profile(self, name=\"johndoe\"):\r\n url = \"/account/%s\" % name\r\n return self.app.get(url, follow_redirects=True)", "def get(self, id):\n adm = Administration()\n prof = adm.get_profile_by_id(id)\n return prof", "def get(self, id):\n adm = Administration()\n prof = adm.get_profile_by_id(id)\n return prof", "def get_user() -> str:\n user = (current_user\n if current_user.has_role('tagger')\n else g.demo_user)\n user_json: str = jsonify(email=user.email,\n active=user.active,\n confirmed_at=user.confirmed_at,\n auth_token=current_user.get_auth_token(),\n jwt=create_jwt_for_user(current_user),\n roles=[role.name for role in user.roles])\n return user_json", "def adventureslist(request):\n user = request.user\n context = get_adventure_info()\n\n return render(request, 'coreapp/adventureslist.html',context)", "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def get_logged_info():\n user = current_identity\n return make_response(dumps({\"status\": True, \"user\": user}), 200)", "def user(self):\n return self.getattr('user')", "def current_user(self):\n user_dict = self.auth.get_user_by_session()\n return self.auth.store.user_model.get_by_id(user_dict['user_id'])", "def get_details(self):", "def get_actor_from_lti(self, homepage, user_id):\n return {\n \"objectType\": \"Agent\",\n \"account\": {\"name\": user_id, \"homePage\": homepage},\n }", "def display_accounts_details():\n return Records.display_records()", "def describe_user(self):\n print(\"\\n Name: \" + self.f_name.title() + ' ' + self.l_name.title())\n print(\"Age: \" + str(self.age)) \n print(\"Birthplace: \" + self.birthplace.title())", "def get_all_access():\n\t# Get the email from the user making the request\n\temail = get_jwt_identity()\n\treturn get_all_access_helper(email)", "def get_user_profile(self):\n return self.user.profile", "def test_4_getautor(self):\n self.app.getAutor(1)", "async def get_self(self):\n if not \".ROBLOSECURITY\" in self.request.cookies:\n raise NotAuthenticated(\"You must be authenticated to preform that action.\")\n r = await self.request.request(url=\"https://www.roblox.com/my/profile\", method=\"GET\")\n data = r.json()\n return User(self.request, data[\"UserId\"], data[\"Username\"])", "def getInfo(self):\n request = self._connection.get('bookmarklet')\n userdata = self._userinfo_regex.search(request.text)\n if userdata is None: userdata = self._userinfo_regex_2.search(request.text)\n if userdata is None: raise errors.DiaspyError('cannot find user data')\n userdata = userdata.group(1)\n return json.loads(userdata)", "def get(self, id):\n adm = Administration()\n pers = adm.get_person_by_id(id)\n return pers", "def me(self):\n response = self._connection.session.get(self.url + \"/me\")\n return self._raise_or_return_json(response)", "def getUserDetails(self,name):\n raise BorkedGetUserDetails", "def __extract_athletes(self):\n for ath in self.athletes:\n if dl.get_squad_id(ath) not in self.data_engine:\n # Athlete has no squad. Just skip over it.\n continue\n\n team_criteria = \\\n self.data_engine[dl.get_squad_id(ath)][\"team_criteria\"]\n\n if not team_criteria:\n # Probably already generated a team for athlete[\"squad_id\"]\n continue\n\n if athlete_match(ath, make_athlete_criteria(team_criteria)):\n self.__update_team_criteria(team_criteria, ath)\n yield ath", "def get_current(self):\n auth_token = session.get(\"auth_token\")\n print(auth_token)\n if not auth_token:\n return None\n user = db.user.find_one({\"auth_token\":auth_token})\n\n return user", "def get_trip(request, trip_id):\n try:\n trip = Trip.objects.get(pk=trip_id)\n if request.user not in trip.users.all():\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n except Trip.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n serializer = TripSerializer(trip)\n return Response(serializer.data)", "def author(self):\r\n return self.user", "def fusion_api_get_login_details(self, api=None, headers=None):\n return self.logindetails.get(api=api, headers=headers)", "def on_get(self, req, resp):\n data = req.context['auth']\n tenant = dict(id=data.get('domain_id', None), name=data.get('domain_name', None))\n role = dict(name=data.get('roles')[0])\n user = dict(id=data.get('user_id', None), name=data.get('user_name', None), tenant=tenant, role=role)\n data = dict(user=user)\n req.context['result'] = dict(session=data)\n resp.status = HTTP_200", "def getUser(self):\n user = users.get_current_user()\n if not user:\n self.redirect(users.create_login_url(self.request.uri))\n else:\n return user", "async def get_user_account(self):\n uri = \"/v3/spot/assets\"\n success, error = await self.request(\"GET\", uri, auth=True)\n return success, error", "def userinfo(self):\n return self._userinfo", "def getUser(self):\n current_user = self.user\n return current_user", "def getUser(self):\n current_user = self.user\n return current_user", "def getUser(self):\n current_user = self.user\n return current_user", "def show_all_amenities():\n\n amenities = storage.all(Amenity).values()\n new_list = []\n for amenity in amenities:\n new_list.append(amenity.to_dict())\n return jsonify(new_list)", "def get_user_details(self, response):\n token = response.get('access_token')\n headers = {\"Authorization\": \"Bearer %s\" % token}\n endpoint = self.USER_INFO_URL\n response = requests.get(endpoint, headers=headers)\n return {'email': response.json()['email'] or '',\n # We'll need sub, the unique ID, for get_user_id.\n 'sub': response.json()['sub']}", "def profile():\n\n if not session.get('oauth_token'):\n return redirect(url_for('login'))\n tokenString = \"bearer {0}\".format(session['oauth_token']['access_token'])\n headers = {\"Authorization\": tokenString}\n profileInfo = {'access_token': session['oauth_token']['access_token']}\n\n # get user summary\n userinfourl = '{}/userinfo'.format(baseUAAurl)\n userinfo = json.loads(requests.get(\n userinfourl, headers=headers, verify=False).text)\n session['userinfo'] = userinfo\n profileInfo['userinfo'] = json.dumps(session['userinfo'])\n\n # Method 1 : get user roles by orgs and space\n usersummaryurl = '{0}/v2/users/{1}/summary'.format(\n baseAPIurl, userinfo['user_id'])\n usersummary = json.loads(requests.get(\n usersummaryurl, headers=headers, verify=False).text)\n\n if usersummary.get('entity'):\n spaceWiseUserRoles = getSpaceWiseUserRoles(usersummary['entity'])\n else:\n # Method 2 : get user roles by orgs and space\n spaceWiseUserRoles = {}\n spaceurl = baseAPIurl + '/v2/spaces'\n spaceresponse = requests.get(spaceurl, headers=headers, verify=False)\n space_json_data = json.loads(spaceresponse.text)\n for spaceresource in space_json_data['resources']:\n entity = spaceresource['entity']\n spaceGuid = spaceresource['metadata']['guid']\n\n # get all auditors\n auditorurl = baseAPIurl + entity['auditors_url']\n auditorresponse = json.loads(requests.get(\n auditorurl, headers=headers, verify=False).text)\n if isInThisRole(auditorresponse, userinfo['user_name']):\n spaceWiseUserRoles[spaceGuid] = {\n 'role': 'auditor',\n 'name': spaceresource['entity']['name']\n }\n\n # get all developers\n devurl = baseAPIurl + entity['developers_url']\n devresponse = json.loads(requests.get(\n devurl, headers=headers, verify=False).text)\n if isInThisRole(devresponse, userinfo['user_name']):\n spaceWiseUserRoles[spaceGuid] = {\n 'role': 'developer',\n 'name': spaceresource['entity']['name']\n }\n\n # get all managers\n managerurl = baseAPIurl + entity['managers_url']\n managerresponse = json.loads(requests.get(\n managerurl, headers=headers, verify=False).text)\n if isInThisRole(managerresponse, userinfo['user_name']):\n spaceWiseUserRoles[spaceGuid] = {\n 'role': 'manager',\n 'name': spaceresource['entity']['name']\n }\n\n profileInfo['spaceWiseUserRoles'] = json.dumps(spaceWiseUserRoles)\n session['spaceWiseUserRoles'] = spaceWiseUserRoles\n\n # get user apps from all spaces\n url = '{}/v2/apps'.format(baseAPIurl)\n response = requests.get(url, headers=headers, verify=False)\n appsData = json.loads(response.text)\n appsUrls = {}\n\n # user accessible app url\n for resource in appsData['resources']:\n routes_url = baseAPIurl + \\\n resource['entity']['routes_url']\n routes_url_response = json.loads(requests.get(\n routes_url, headers=headers, verify=False).text)\n for app in routes_url_response['resources']:\n hostname = app['entity']['host']\n appsUrls[hostname] = {\n 'url': 'http://{}.local.pcfdev.io'.format(hostname),\n 'space_guid': app['entity']['space_guid'],\n 'userRole': getSpaceRole(spaceWiseUserRoles, app['entity'][\n 'space_guid'], userinfo['user_name'])}\n profileInfo['apps'] = appsUrls\n\n organization_guid = getOrganizationId(session, appsData)\n profileInfo['org_id'] = organization_guid\n profileInfo['org_users'] = json.dumps(getOrganizationUsers(\n session, organization_guid))\n return render_template('profile.html', data=profileInfo)", "def details(self, identifier):\n return self.client.request_with_method(Methods.GET % (self.name, identifier,))", "def get_personal_info(self):\n self.get(\"INFO\",\"GetPersonalInfo\")\n response = self.send()\n return response", "def user_data(self, access_token, *args, **kwargs):\n return self.get_json(\n \"https://api.dropboxapi.com/2/users/get_current_account\",\n headers={\"Authorization\": f\"Bearer {access_token}\"},\n method=\"POST\",\n )", "def get_amenities():\n list_amenities = []\n for amenity in storage.all('Amenity').values():\n list_amenities.append(amenity.to_dict())\n return jsonify(list_amenities)", "def getProfile(self):\n # GET /profile\n debugMain('getProfile')\n return self._genericGet('/profile')", "def get_my_details(self, request):\n try:\n user = request.user\n staff = user.aro.staff\n polling_districts = []\n\n for pd in user.aro.polling_districts.all():\n polling_districts.append(pd.polling_district)\n\n polling_division = polling_districts[0].polling_division\n administrative_district = polling_division.administrative_district\n electoral_district = administrative_district.electoral_district\n election = request.user.aro.election\n polling_stations = []\n\n for pd in polling_districts:\n ps = pd.polling_stations.all()\n polling_stations.append(*ps.all())\n\n counting_centre = polling_division.counting_centres.filter(\n election=election)\n\n user_data = UserSerializer(user).data\n staff_data = StaffsSerializer(staff).data\n polling_districts_data = PollingDistrictSerializer(\n polling_districts, many=True).data\n polling_division_data = PollingDivisionSerializer(\n polling_division).data\n administrative_district_data = AdministrativeDistrictSerializer(\n administrative_district).data\n electoral_district_data = ElectoralDistrictSerializer(\n electoral_district).data\n election_data = ElectionSerializer(election).data\n polling_stations_data = PollingStationSerializer(\n polling_stations, many=True).data\n counting_centre_data = CountingCentreSerializer(\n counting_centre, many=True).data\n\n my_details = {\n \"profile\": dict(user_data, **staff_data),\n \"polling_districts\": polling_districts_data,\n \"polling_division\": polling_division_data,\n \"administrative_district\": administrative_district_data,\n \"electoral_district\": electoral_district_data,\n \"election\": election_data,\n \"polling_stations\": polling_stations_data,\n \"counting_centre\": counting_centre_data\n }\n\n return Response(my_details, status=status.HTTP_200_OK)\n except Exception as e:\n return Response(e,status=status.HTTP_500_INTERNAL_SERVER_ERROR)" ]
[ "0.62125313", "0.603639", "0.5979062", "0.5663854", "0.5658526", "0.5654593", "0.55721027", "0.5450002", "0.5400566", "0.5396792", "0.5345879", "0.5323987", "0.53042555", "0.52950203", "0.5274707", "0.5259616", "0.52581507", "0.5236326", "0.52285975", "0.52084494", "0.519947", "0.51918435", "0.5183672", "0.5168598", "0.51325107", "0.51316017", "0.5130489", "0.51264215", "0.51264215", "0.5110099", "0.51045215", "0.5092955", "0.50860196", "0.50836986", "0.5072682", "0.5062801", "0.50522965", "0.502766", "0.5024268", "0.50219643", "0.50203377", "0.5019557", "0.5017974", "0.50174004", "0.5013306", "0.500411", "0.49948075", "0.49916175", "0.4991443", "0.49874014", "0.4986926", "0.49823433", "0.49823433", "0.49821702", "0.49782914", "0.49763888", "0.4969356", "0.49641934", "0.49641934", "0.4956311", "0.49393356", "0.49385363", "0.49385363", "0.49385363", "0.49379024", "0.4930725", "0.4929198", "0.4925481", "0.4925083", "0.49245316", "0.49208963", "0.4913093", "0.49121726", "0.4910019", "0.4903531", "0.4893144", "0.48889765", "0.48851824", "0.4885083", "0.4878382", "0.48774257", "0.48755905", "0.48732737", "0.48705903", "0.48642185", "0.48616967", "0.486139", "0.48582557", "0.48560876", "0.48560876", "0.48560876", "0.48554754", "0.48458734", "0.4838129", "0.48376057", "0.4825904", "0.48184344", "0.48138502", "0.48123592", "0.48118073" ]
0.636169
0
Converts Strava datetime to epoch timestamp
def get_epoch(timestamp): timestamp_dt = datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%SZ") epoch = calendar.timegm(timestamp_dt.timetuple()) return epoch
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def epoch2datetime(t):\n return datetime.fromtimestamp(t/1000.0)", "def datetime_to_epoch(datetime):\n return datetime.astype('int64') // 1e9", "def epoch(value):\n if isinstance(value, datetime.datetime):\n return int(calendar.timegm(value.timetuple())*1000)\n return '' #fails silently for non-datetime objects", "def datetime2epoch(dt):\n return int(mktime(dt.timetuple())*1000)", "def datetime_to_epoch(datetime_obj):\n return int(datetime_obj.strftime(\"%s\")) * 1000", "def convert_to_epoch(event_time_date) -> int:\n pattern = '%Y-%m-%d %H:%M'\n return int(time.mktime(time.strptime(event_time_date, pattern)))#to epoch value", "def epoch():\n return datetime2epoch(datetime.now())", "def date_to_epoch(date):\n epoch_time = date.strftime('%s')\n return int(epoch_time)", "def to_epoch(datetime_obj):\n if sys.version_info[0:2] < (3, 3):\n import calendar\n\n return (\n calendar.timegm(datetime_obj.timetuple())\n + datetime_obj.microsecond / 1000000\n )\n else:\n return datetime_obj.timestamp()", "def epoch2datetime(epoch):\n return datetime.fromtimestamp(epoch)", "def str_to_epoch(dt: Union[str, datetime]) -> int:\n return int(str_to_datetime(dt).timestamp())", "def datetime_to_epoch(indate):\n origin = datetime.datetime(1970, 1, 1)\n if indate.tzinfo:\n origin = pytz.timezone('UTC').localize(origin)\n return (indate - origin).total_seconds()", "def datetime_to_epoch(indate):\n origin = datetime.datetime(1970, 1, 1)\n if indate.tzinfo:\n origin = pytz.timezone('UTC').localize(origin)\n return (indate - origin).total_seconds()", "def epoch2time(time):\n\tvalue = datetime.datetime.fromtimestamp(time)\n\tNormal = value.strftime('%Y-%m-%d %H:%M:%S')\n\tprint(normal)\n\treturn normal", "def parse_time(s):\n\n dt = dateutil.parser.parse(s)\n# epoch_time = int((dt - datetime(1970, 1, 1, tzinfo=timezone.utc)).total_seconds())\n epoch_time = int(dt.replace(tzinfo=timezone.utc).timestamp())\n\n return epoch_time", "def chrome_timestamp_to_epoch(chrome_timestamp):\n return (chrome_timestamp / 1000000) - 11644473600", "def convert_timestamp_to_epoch(cls, timestamp, tsformat):\n return time.mktime(time.strptime(timestamp, tsformat))", "def epoch_to_dt(epoch):\n if type(epoch) in (str, unicode):\n epoch = float(epoch)\n return datetime.fromtimestamp(epoch)", "async def toEpochTime(self, ctx, *, timeStr:str):\n\t\t_, time = (search_dates(\n\t\t\ttimeStr.upper(), settings={'RETURN_AS_TIMEZONE_AWARE': True})[0])\n\t\tawait ctx.send(f\"`{int(time.timestamp())}` is the timestamp for `{time.strftime('%c in timezone %Z')}`\\nThe basic timestamp would look like this: <t:{int(time.timestamp())}:F>\")", "def datetime_to_timestamp(dt):\n\n epoch = datetime.utcfromtimestamp(0).replace(tzinfo=UTC)\n return (dt - epoch).total_seconds()", "def epoch_time(when):\n if not when: return 0\n epoch = datetime.utcfromtimestamp(0)\n delta = when - epoch\n return int(delta.total_seconds())", "def convert_datetime(\n date: float | np.ndarray,\n epoch: str | tuple | list | np.datetime64 = _unix_epoch\n ):\n # convert epoch to datetime variables\n if isinstance(epoch, (tuple, list)):\n epoch = np.datetime64(datetime.datetime(*epoch))\n elif isinstance(epoch, str):\n epoch = np.datetime64(parse(epoch))\n # convert to delta time\n return (date - epoch) / np.timedelta64(1, 's')", "def convert_datetime(\n date: float | np.ndarray,\n epoch: str | tuple | list | np.datetime64 = _unix_epoch\n ):\n # convert epoch to datetime variables\n if isinstance(epoch, (tuple, list)):\n epoch = np.datetime64(datetime.datetime(*epoch))\n elif isinstance(epoch, str):\n epoch = np.datetime64(parse(epoch))\n # convert to delta time\n return (date - epoch) / np.timedelta64(1, 's')", "def IsoTimestampToEpoch(timestamp):\n try:\n dt = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')\n except ValueError:\n dt = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')\n return calendar.timegm(dt.timetuple()) + dt.microsecond / 1e6", "def ts_to_epoch_seconds(t) -> float:\n return t.astype(int) / 1e9", "def _hx_time_to_epoch(self, timestr: str) -> int: # pragma: no cover\n\n time_obj = datetime.datetime.strptime(timestr, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n return int(time_obj.strftime(\"%s\"))", "def epoch_seconds(date):\n td = date - epoch\n return td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)", "def epoch_seconds(date):\n td = date - epoch\n return td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)", "def convert_epoch_to_date(epoch_time):\n local_time = datetime.datetime.fromtimestamp(epoch_time).strftime(TIME_FORMAT_YSI) \n\n return local_time", "def epoch_time_now():\n return int(time.time())", "def epoch_seconds(date):\r\n td = date - epoch\r\n return td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)", "def UTCstr2epoch(datestr=epoch2UTCstr(), fmat=\"%Y-%m-%d %H:%M:%S\"):\n return timegm(strptime(datestr, fmat))", "def convertDateToTimeStamp(datetime_object):\n\tpattern = '%Y-%m-%d %H:%M:%S'\n\tepoch = int(time.mktime(time.strptime(str(datetime_object), '%Y-%m-%d %H:%M:%S')))\n\treturn epoch", "def _datetime_to_timestamp(self, dt):\n return time.mktime(dt.timetuple())", "def dt_epoch_msecs(value):\n return long(calendar.timegm(value.timetuple())) * 1000", "def localstr2epoch(datestr=epoch2UTCstr(), fmat=\"%Y-%m-%d %H:%M:%S\"):\n return mktime(strptime(datestr, fmat))", "def epoch_to_date(epoch_time):\n epoch_time = float(epoch_time_standardization(epoch_time))\n return datetime.datetime.fromtimestamp(epoch_time)", "def datetime_to_timestamp(dt):\n return calendar.timegm(dt.timetuple()) * 1000", "def epoch():\n\treturn time.time()", "def epochnow():\n return time.time()", "def datetime_to_timestamp(value):\n if not isinstance(value, datetime.datetime):\n raise ValueError(\n 'Expecting datetime object, got %s instead' % type(value).__name__)\n if value.tzinfo is not None:\n raise ValueError('Only UTC datetime is supported')\n dt = value - EPOCH\n return dt.microseconds + 1000 * 1000 * (dt.seconds + 24 * 3600 * dt.days)", "def convert_epoch(aepoch):\n\tprint \"time given: \" + aepoch\n\tepoch = time.strftime(\"%a %d %b %Y %H:%M:%S +0000\", time.gmtime(float(aepoch)))\n\tprint \"converted time: \" + epoch", "def rfc2822_to_epoch(datestr):\n return mktime_tz(parsedate_tz(datestr))", "def seconds_since_epoch(date_time, epoch=None):\n return microseconds_since_epoch(date_time) / 10.0**6", "def datetimeToUnix(date):\n return int(time.mktime(date.timetuple())*1000)", "def ConvertTransactionDateToEpochGM(date_input):\n try:\n temp = timegm(time.strptime(date_input, '%m%d%Y'))\n return temp\n except:\n return -1", "def twitter_created_at_to_epoch(created_at):\n try:\n return int(time.mktime(time.strptime(\n created_at,\"%a %b %d %H:%M:%S +0000 %Y\")))\n except ValueError as e:\n print(f\"something went wrong: {e}\")\n return None", "def timestamp_to_unix(timestamp):\n return timestamp / 1e6", "def epoch_to_str(epoch: int) -> str:\n return datetime_to_str(datetime.fromtimestamp(epoch, tz=timezone.utc))", "def date_to_timestamp(date):\n return int(calendar.timegm(date.utctimetuple()))", "def to_avro(date_time: datetime.datetime) -> float:\n if date_time.tzinfo:\n ts = (date_time - utils.epoch).total_seconds()\n else:\n ts = (date_time - utils.epoch_naive).total_seconds()\n\n return ts * 1000", "def timestamp(self, t):\n if isinstance(t, datetime):\n t = time.mktime(t.timetuple())\n return t - 631065600", "def convert_epoch_to_timestamp(cls, timestamp, tsformat):\n return time.strftime(tsformat, time.gmtime(timestamp))", "def datetime_to_float(t, epoch=None):\n if epoch is None:\n epoch = np.datetime64(0, \"s\")\n return (t - epoch) / np.timedelta64(1, \"s\")", "def epoch2UTCstr(timestamp=time(), fmat=\"%Y-%m-%d %H:%M:%S\"):\n return strftime(fmat, gmtime(timestamp))", "def convert_to_epoch(logvalues):\n\n # Example List: ['Aug', '1', '07:32:18', 'ip-172-31-29-11', ...]\n d = datetime.today()\n current_year = d.year\n\n log_month = logvalues[0]\n log_day = logvalues[1]\n log_time = logvalues[2]\n\n log_time_hh = log_time.split(':')[0]\n log_time_mm = log_time.split(':')[1]\n log_time_ss = log_time.split(':')[2]\n\n epoch_ts = time.mktime(current_year, log_month,\n log_day, log_time_hh,\n log_time_mm, log_time_ss)\n return epoch_ts", "def datetime_to_epoch_microseconds(obj: \"datetime\") -> float:\n td = datetime_to_epoch_timedelta(obj)\n return (td.days * 86400 + td.seconds) * 10**6 + td.microseconds", "def np_dt_epoch_msec(value):\n return value.astype(long) / 1000", "def stringTimeToUnix_NEW(st):\n y, m, d, h, n, s, ms = stringTimeToTuple(st)\n epochSecs = mktime(map(int ,(y, m, d, h, n, s, 0, 0, 0)))-timezone\n #print \"seconds is %f\\n\" % (epochSecs + int(ms)/1000.0)\n return epochSecs + int(ms)/1000.0", "def to_timestamp(dt):\n return time.mktime(dt.timetuple())", "def to_unix(cls, timestamp):\n if not isinstance(timestamp, datetime.datetime):\n raise TypeError(\"Time.milliseconds expects a datetime object\")\n return timestamp.timestamp()", "def datetime_to_timestamp(datetime_value: datetime) -> int:\n # Use calendar.timegm because the time.mktime assumes that the input is in your\n # local timezone.\n return calendar.timegm(datetime_value.timetuple())", "def timestamp_to_datetime(value):\n if not isinstance(value, (int, long, float)):\n raise ValueError(\n 'Expecting a number, got %s instead' % type(value).__name__)\n return EPOCH + datetime.timedelta(microseconds=value)", "def get_datetime(epoch):\n\n t = time.gmtime(epoch)\n dt = datetime.datetime(*t[:6])\n\n return dt", "def datetime2timestamp(value, millis=False):\n if millis:\n return int(time.mktime(value.timetuple()))\n else:\n return int(round((time.mktime(value.timetuple()) +\n value.microsecond / 1E6) * 1000))", "def convert_date_to_epoch(date_value, time_value, date_parameter_code=DATE_YY):\n\n return int(time.mktime(\n time.strptime(str(int(date_value))+str(int(time_value)), \n DATE_FORMAT[date_parameter_code]+TIME_FORMAT)))", "def convert_time(t):\n return datetime.fromtimestamp(t / 1e7 - 11644473600)", "def to_stamp(datetime_):\r\n try:\r\n return datetime_.timestamp()\r\n except AttributeError:\r\n return time.mktime(datetime_.timetuple()) + datetime_.microsecond / 1e6", "def forge_timestamp(value) -> int:\n assert isinstance(value, str)\n dt = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n return calendar.timegm(dt.utctimetuple())", "def epoch_time_standardization(epoch_time):\n epoch_time_string = str(epoch_time)\n # if the given epoch time appears to include milliseconds (or some other level of precision)...\n # and does not have a decimal in it, add a decimal point\n if len(epoch_time_string) > 10 and '.' not in epoch_time_string:\n epoch_time = f'{epoch_time_string[:10]}.{epoch_time_string[10:]}'\n return epoch_time", "def sbetime2unixtime(value):\n if not isinstance(value, int):\n raise InstrumentParameterException(\"value not a int\")\n\n return SBE_EPOCH + value", "def unix_time_nanos(dt):\n return timedelta_to_micros(dt - epoch)", "def _epoch_utc_to_datetime(epoch_utc):\n return datetime.fromtimestamp(epoch_utc)", "def to_unix_time(timestamp):\n date = datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S')\n return int(time.mktime(date.timetuple()))*1000", "def _get_timestamp(self, timestamp):\n return int(timestamp * 1e6)", "def checktime(dt=None):\n if dt is None:\n epoch_time = str(int(time()))\n else:\n epoch_time = str(int(mktime(dt.timetuple())))\n return epoch_time + \"000\"", "def dateToTimeStamp(dt):\n timestamp = dt.timestamp()\n timestamp = int(timestamp)\n return timestamp", "def get_epoch_time(utc_datetime=None):\n if not utc_datetime:\n utc_datetime = datetime.datetime.utcnow()\n return math.ceil((utc_datetime - EPOCH_START).total_seconds())", "def testEpochDate(self):\n golang_epoch = golang_time.GolangTimeEpoch()\n self.assertEqual(golang_epoch.year, 1)\n self.assertEqual(golang_epoch.month, 1)\n self.assertEqual(golang_epoch.day_of_month, 1)", "def datetime2ts(dt):\n return int(time.mktime(dt.timetuple()))", "def normalize_timestamp(timestamp_series):\n # convert datetime strings into milliseconds from epoch\n times = pd.to_datetime(timestamp_series, format='%Y-%m-%d %H:%M:%S').astype(np.int64) // int(1e6)\n return times", "def human_to_timestamp(time: str) -> int:\n try:\n return int(time)\n except ValueError:\n pass\n time = dateparser.parse(time)\n if time is None:\n raise ValueError\n return int(time.timestamp() * 1e6)", "def time_ISO8601_to_Influx_epoch(timeString):\n timeEpoch = int(time.mktime(time.strptime(timeString[:19], '%Y-%m-%dT%H:%M:%S')))\n # #print(\"timeEpoch without timezone correction: \", timeEpoch)\n # timeZoneOffset_HHMM = time.strftime(\"%z\", time.gmtime(time.time()))\n # #print(\"timezoneOffset_HHMM: \",timeZoneOffset_HHMM)\n # timeZoneOffset_s = (int(timeZoneOffset_HHMM[1:3]) * 60 + int(timeZoneOffset_HHMM[3:5])) * 60\n # print(\"timeZoneOffset_s: \", timeZoneOffset_s)\n # # timeEpoch = timeEpoch + timeZoneOffset_s\n # # print(\"timeEpoch with timezone correction: \", timeEpoch)\n decimalSeconds = timeString[19:].strip('.Z')\n #print(decimalSeconds)\n decimalSeconds = '{:0<9}'.format(decimalSeconds)\n #print(decimalSeconds)\n timeInflux = str(timeEpoch) + decimalSeconds\n #print(\"time_ISO8601_to_Influx_epoch(timeString): \", time_ISO8601_to_Influx_epoch(datestring))\n return int(timeInflux)", "def epoch_to_format(epoch, format='%Y-%m-%dT%H:%M:%SZ'):\n\n return datetime.fromtimestamp(int(epoch[:10]), tz=timezone.utc).strftime(format)", "def datetime2UnixTime(dt):\n\n # UTC unix timestamp\n unix_timestamp = (dt - datetime(1970, 1, 1)).total_seconds()\n\n return unix_timestamp", "def datetimeToUnixSec(date):\n return int(time.mktime(date.timetuple()))", "def unix_to_timestamp(unix):\n return int(round(unix * 1e6))", "def _current_epoch_secs():\n now = datetime.datetime.utcnow()\n epoch = datetime.datetime(1970, 1, 1)\n return (now - epoch).total_seconds()", "def convert_to_bson_timestamp(ts):\n lowpart = int(ts)\n return Timestamp(lowpart, 1)", "def convert_to_bson_timestamp(ts):\n lowpart = int(ts)\n return Timestamp(lowpart, 1)", "def datetime_utc_epoch_start() -> datetime:\n return timestamp_to_datetime(0)", "def convert_timestamp(data):\n try:\n return datetime.datetime.fromtimestamp(float(data))\n except ValueError:\n return datetime.datetime.fromisoformat(data.decode(\"utf-8\"))", "def get_epoch(year, month='01'):\n pattern = '%Y.%m.%d %H:%M:%S'\n return int(time.mktime(time.strptime(str(year) + '.' + str(month) + '.01 00:00:00', pattern)))", "def to_timestamp(value):\n if not isinstance(value, datetime.date):\n return None\n\n return time.mktime(value.timetuple())", "def epoch2localstr(timestamp=time(), fmat=\"%Y-%m-%d %H:%M:%S\"):\n return strftime(fmat, localtime(timestamp))", "def conv_unix(evt_time):\n st = time.strptime(evt_time, '%Y-%m-%d %H:%M')\n return time.mktime(st)", "def datetime_to_utc(timestamp):\n\n epoch = datetime.utcfromtimestamp(0)\n delta = timestamp-epoch\n\n return long(delta.total_seconds() * 1000)", "def epoch_to_format(format, epoch):\n\n return datetime.fromtimestamp(int(epoch[:10]), tz=timezone.utc).strftime(format)", "def _create_timestamp():\n return (datetime.utcnow() - datetime(1970,1,1)).total_seconds()", "def human_readable_time_from_epoch_time(epoch_time: int, utc_time: bool = False):\n result = datetime.fromtimestamp(epoch_time / 1e6).isoformat() if epoch_time else None\n if result:\n result += 'Z' if utc_time else ''\n return result" ]
[ "0.82143974", "0.8190802", "0.78351754", "0.7794575", "0.76652604", "0.7584979", "0.7570103", "0.7562643", "0.75359434", "0.7468649", "0.7423047", "0.73089755", "0.73089755", "0.724977", "0.71811444", "0.71131325", "0.71113986", "0.7073459", "0.7016049", "0.7004898", "0.6987215", "0.6981752", "0.6981752", "0.69765353", "0.69177485", "0.6891509", "0.6837414", "0.6837414", "0.68334335", "0.6832361", "0.68207043", "0.67531127", "0.6752107", "0.67361003", "0.67325974", "0.67192245", "0.6714412", "0.668908", "0.66845286", "0.66679966", "0.66459465", "0.66433334", "0.6554191", "0.65466136", "0.6530486", "0.6517804", "0.65161866", "0.65028256", "0.6488692", "0.6476108", "0.64753586", "0.64747703", "0.6470106", "0.6460709", "0.64428115", "0.640697", "0.640333", "0.640093", "0.64001125", "0.6386552", "0.63700306", "0.63695174", "0.6369163", "0.6366944", "0.6366276", "0.6339569", "0.6322222", "0.63084745", "0.62975436", "0.6276302", "0.62686664", "0.624479", "0.62184566", "0.62169784", "0.6205942", "0.6199923", "0.61844987", "0.61764765", "0.61630887", "0.61402375", "0.61325055", "0.6130532", "0.61172736", "0.6110181", "0.6096772", "0.60778165", "0.6070894", "0.60688305", "0.6060309", "0.6060309", "0.6041632", "0.6033455", "0.6027596", "0.6024865", "0.6023315", "0.6018699", "0.60077876", "0.5982937", "0.59777546", "0.59770185" ]
0.746134
10
Get or refresh access token from Strava API.
def get_token(client_id, client_secret, token, renewal): url = "https://www.strava.com/api/v3/oauth/token" if renewal: payload = { 'client_id': client_id, 'client_secret': client_secret, 'refresh_token': token, 'grant_type': 'refresh_token'} message = "Successfully refreshed Strava token." else: payload = { 'client_id': client_id, 'client_secret': client_secret, 'code': token, 'grant_type': 'authorization_code'} message = "Successfully authenticated with Strava using access code." response = return_json(url, "POST", payload=payload) helper.log_info(message) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh_token(self):\n # basic function to get an access token\n api_response = requests.get(\n self.api_config.get_api_url() + \"authentication/g?username=\" + self.api_config.get_api_username() + \"&password=\" + self.api_config.get_api_password())\n\n if api_response.status_code >= 200:\n self.API_TOKEN = api_response.content.decode()\n\n return self.API_TOKEN\n else:\n return None", "def get_access_token(self, renew=False):\n if self.access_token is None or renew:\n headers = {} # don't use json here, juse urlencode.\n url = self._url_for_op('token')\n data = urllib.urlencode({'grant_type': 'client_credentials',\n 'client_id':self.CLIENT_ID,\n 'client_secret':self.CLIENT_SECRET})\n req = urllib2.Request(url, data, headers)\n try:\n response = urllib2.urlopen(req).read()\n response = json.loads(response)\n except urllib2.HTTPError as e:\n raise ApiError(e.reason)\n except Exception, e:\n raise ApiError(e)\n self.access_token = response['access_token']\n return self.access_token", "def _get_token(self):\n if self._access_token is None or self._is_expired():\n self._refresh_token()\n return self._access_token", "def get(self):\n\n user = context_property.request_user\n Log.info(\"Refresh access token for %i\" % user.id)\n\n return {\n \"accessToken\" : create_access_token(user.id)\n }, 200", "def get_access_token(self):\n\n token_work = time.time() < self.expires\n\n if token_work:\n # No need update token\n return self.access_token\n\n data = {\n 'client_id': self.client_id,\n 'grant_type': 'implicit'\n }\n\n response = requests.post('https://api.moltin.com/oauth/access_token', data=data)\n raise_response_errors(response)\n\n response_json = response.json()\n\n self.access_token = response_json['access_token']\n self.expires = response_json['expires']\n\n logger.debug('elasticpathh access token was updated')\n\n return self.access_token", "def get_token():\n\turl = SPOTIFY_ACCOUNT_HOST + 'token'\n\tcurrent_refresh_token = config.get('spotify_credentials', 'refresh_token')\n\tbody = {'grant_type': 'refresh_token', 'refresh_token': current_refresh_token}\n\tauth_header = 'Basic ' + b64encode('{0}:{1}'.format(SPOTIFY_CLIENT_ID, \n\t\tSPOTIFY_CLIENT_SECRET))\n\theaders = {'Authorization': auth_header}\n\n\tresponse = requests.post(url, headers=headers, data=body).json()\n\tif response.has_key('refresh_token'):\n\t\tlogging.debug('Received new refresh token')\n\t\tconfig.set('spotify_credentials', 'refresh_token', \n\t\t\tresponse['refresh_token'])\n\treturn response['access_token']", "def refreshAccessToken(self):\r\n\r\n assert hasattr(self.oauthToken, \"getRefreshToken\")\r\n\r\n #turn the response into json\r\n\r\n response = self._oauth.refreshAccessToken(self.oauthToken)\r\n responseBody = json.loads(response['Body'])\r\n\r\n try:\r\n oauthToken = token.Token(responseBody)\r\n except TypeError:\r\n print (\"Bad response when refreshing the token \" + str(responseBody))\r\n sys.exit()\r\n\r\n return oauthToken", "def refresh(self):\n self._request_token(grant_type='client_credentials')", "def getAccessToken(self):\r\n\r\n #lets see if we have an oauth code\r\n if self.oauthToken is None:\r\n self.oauthToken = self.createAccessToken\r\n\r\n if self.oauthToken.isExpired(): #check to see if its expired if so refresh it\r\n self.oauthToken = self.refreshAccessToken()\r\n\r\n return self.oauthToken #return out access token\r", "def _refresh_access_token(self) -> None:\n response = httpx.post(\n f\"{self._base_url}/oauth2/token\",\n proxies=self._proxies,\n data={\n \"grant_type\": \"client_credentials\",\n \"client_id\": self._api_key,\n \"client_secret\": self._api_secret,\n },\n )\n response.raise_for_status()\n token = response.json()[\"access_token\"]\n c = httpx.Client()\n c.close()\n self._authorization_headers = {\"Authorization\": f\"Bearer {token}\"}", "def refreshAccessToken(self):\n params = {\"grant_type\": \"refresh_token\",\n \"refresh_token\": self.refreshToken}\n for i in [self.CLIENT_ID, self.CLIENT_SECRET]:\n params[i] = self.conf[i]\n data = urllib.parse.urlencode(params).encode(\"utf-8\")\n request = urllib.request.Request(self.conf[self.TOKEN_ENDPOINT])\n request.add_header(\"Content-Type\", \"application/x-www-form-urlencoded; charset=utf-8\")\n f = urllib.request.urlopen(request, data)\n root = json.loads(f.read().decode(\"utf-8\"))\n self.accessToken = root[self.ACCESS_TOKEN]\n self.__saveCacheTokens()\n return self.accessToken", "def get_access_token(self, refresh=False):\n return self._token_man.get_access_token(refresh)", "def refresh_access_token(self):\n parameters = {'client_id': self.CLIENT_ID,\n 'auth_code': self.auth_code,\n 'client_secret': self.CLIENT_SECRET,\n 'grant_type': 'authorization_code'}\n url = self.ACCESS_TOKEN_URL % parameters\n data = self._get_refresh_data()\n logging.info('url: %s, data: %s', url, data)\n\n try:\n # empty data to trigger a post\n req = urllib2.Request(url, data)\n req.add_header('Content-Type', 'application/x-www-form-urlencoded')\n result = urllib2.urlopen(req)\n result = json.load(result)\n logging.info('result: %s', result)\n except urllib2.HTTPError, err:\n result = json.load(err)\n logging.info(result)\n raise err\n\n self.access_token = result['access_token']\n self.expires = int(time.time() + result['expires_in'])\n self.refresh_token = result.get('refresh_token', None)", "def get_token(self, legs=2):\n if legs == 2:\n\n headers = {}\n\n headers.update({ 'Content-Type' : 'application/x-www-form-urlencoded' })\n\n data = {}\n\n data.update({'client_id' : self.clientId})\n data.update({'client_secret' : self.clientSecret})\n data.update({'grant_type' : 'client_credentials'})\n data.update({'scope' : self.scopes})\n\n resp = self.http.post(self.webAddress, headers=headers, data=data)\n\n if resp.status_code == 200:\n cont = resp.json()\n return (cont['access_token'], cont['expires_in'])\n\n raise ConnectionError(\"Request failed with code {}\".format(resp.status_code) +\n \" and message : {}\".format(resp.content) +\n \" during authentication.\")\n else:\n raise NotImplementedError(\"3-legged authentication has not been implemented.\")", "def obtain_access_token():\n\tpost_data = {'grant_type': 'client_credentials',\n\t\t\t\t 'client_id': conos_config['client_id'],\n\t\t\t\t 'client_secret': conos_config['client_secret']}\n\n\ttry:\n\t\tresponse = requests.post(url=conos_config['sts_url'], data=post_data, timeout=60) # 60 seconds\n\t\tif response.ok:\n\t\t\treturn 'Bearer ' + response.json()['access_token']\n\t\telse:\n\t\t\tprint('\\nERROR: Can not obtain access token')\n\t\t\tprint('\\nResponse error: ', response.json())\n\t\t\tresponse.raise_for_status()\n\texcept requests.exceptions.RequestException as e:\n\t\t# All exceptions that Requests explicitly raises inherit from requests.exceptions.RequestException\n\t\tprint(\"Root cause: \", e)\n\t\tsys.exit(1)", "def refresh():\n current_user = get_jwt_identity()\n ret = {\n 'access_token': create_access_token(identity=current_user)\n }\n return jsonify(ret), 200", "async def solicit_token(url, scope):\n rc = RestClient(url, \"\")\n result = await rc.request(\"GET\", f\"/token?scope={scope}\")\n print(result[\"access\"])", "def get_token():\n\theaders = {\n\t\t'Authorization': 'Basic ' + (base64.b64encode((client_id + ':' + client_secret).encode(\"utf-8\"))).decode(\"utf-8\")}\n\toptions = {\n\t\t'grant_type': 'client_credentials',\n\t\t'json': True,\n\t}\n\n\tresponse = requests.post(\n\t\t'https://accounts.spotify.com/api/token',\n\t\theaders=headers,\n\t\tdata=options\n\t)\n\tif response.status_code == 200:\n\t\tcontent = json.loads(response.content.decode('utf-8'))\n\t\taccess_token = content.get('access_token', None)\n\t\treturn access_token\n\telse:\n\t\treturn None", "async def _fetch_access_token(session: ClientSession) -> dict:\n LOGGER.debug('fetching access token...')\n password = config.get('WFWX_SECRET')\n user = config.get('WFWX_USER')\n auth_url = config.get('WFWX_AUTH_URL')\n async with session.get(auth_url, auth=BasicAuth(login=user, password=password)) as response:\n return await response.json()", "def _refresh_token(self):\n token_url = self._base_url + '/api/oauth2/token'\n params = {\n 'grant_type': 'client_credentials',\n 'client_id': self._client_id,\n 'client_secret': self._client_secret\n }\n headers = {'accept': 'application/json'}\n response = requests.post(token_url,proxies = self._proxy,params= params,headers = headers)\n logging.debug(response.text)\n parsed = response.json()\n self._access_token = parsed['access_token']\n self._refresh_token = parsed['refresh_token']\n expires_in = parsed['expires_in']\n ## Keep a buffer of 120 seconds to refresh token before expiry\n self._expires_at = datetime.now() + timedelta(seconds=(expires_in - 120))\n\n logging.debug('access_token %s expires at %s', self._access_token, self._expires_at)\n\n return", "def get_access_token(self):\n if self.token.is_expired():\n logging.debug('Requesting a new access token')\n self.token.load_from_json(json=self.__get_token_data__())\n else:\n logging.debug('Access token still valid')\n\n return self.token.access_token", "def get_access_token(self):\n payload = {\n 'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'resource': self.resource\n }\n res = requests.post(self.auth_url, data=payload)\n data = res.json()\n if res.status_code == 200:\n return data['access_token'], res\n\n return False, res", "def refresh_credentials():\n global auth_token\n auth_token = get_oauth_token()", "def access_token(self):\n if self.has_expired():\n self.update()\n\n return self.token['access_token']", "def refresh_token(self):\n token = json.loads(get_metadata(\n 'instance/service-accounts/%s/token' % self.service_account,\n ))\n seconds = token['expires_in'] - 60\n self._expiration_time = (\n datetime.datetime.now() + datetime.timedelta(seconds=seconds)\n )\n self._token = token['access_token']", "def refresh_access_token(self):\n self._access_token = self.generate_access_token()", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "async def async_get_access_token(self):\n if not self._oauth_session.valid_token:\n await self._oauth_session.async_ensure_token_valid()\n\n return self._oauth_session.token[\"access_token\"]", "def refresh_token():\n current_user = get_jwt_identity()\n if current_user is None:\n return abort(401)\n response = deepcopy(AUTH_OKAY)\n response['payload']['access_token'] = create_access_token(\n identity=current_user,\n expires_delta=EXPIRY_DURATION\n )\n response['payload']['expires_in'] = EXPIRY_DURATION.seconds\n response['payload']['not_before'] = int(time() + EXPIRY_DURATION.seconds)\n return jsonify(response['payload']), response['status_code']", "def refresh_access_token(self):\n if self.client_secret is None:\n raise Exception(\"client_secret must be set to execute \"\n \"refresh_access_token.\")\n if self.refresh_token is None:\n raise Exception(\"refresh_token must be set to execute \"\n \"refresh_access_token.\")\n params = {'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'grant_type': 'refresh_token',\n 'refresh_token': self.refresh_token}\n result = self._send_request(REFRESH_URL, params=params, method='POST',\n data_field=None)\n self.access_token = result['access_token']\n return self.access_token", "async def get_access_token(self):\n async with self._access_token_lock:\n if (not self._access_token\n or (not self._access_token_checked\n and not await self.check_access_token(\n self._access_token))):\n await self.receive_new_access_token()\n return self._access_token", "def get_access_token(client_id, refresh_token):\n h = {\n \"content-type\": 'application/x-www-form-urlencoded'\n }\n\n d = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": refresh_token,\n \"client_id\": client_id\n }\n\n r = requests.post(\"https://api.tdameritrade.com/v1/oauth2/token\", data=d, headers=h)\n\n return json.loads(r.text)[\"access_token\"]", "def _get_access_token(self):\n\n self._access_token = None\n if not self._refresh_token:\n raise ValueError(\"Refresh Token not set\")\n\n doc = minidom.Document()\n root = doc.createElement('tokenAuthRequest')\n doc.appendChild(root)\n aki = doc.createElement('accessKeyId')\n aki.appendChild(doc.createTextNode(self.publicAccessKey))\n root.appendChild(aki)\n pak = doc.createElement('privateAccessKey')\n pak.appendChild(doc.createTextNode(self.privateAccessKey))\n root.appendChild(pak)\n rt = doc.createElement('refreshToken')\n rt.appendChild(doc.createTextNode(self._refresh_token))\n root.appendChild(rt)\n data = doc.toprettyxml()\n\n resp = requests.post(BASE_URL + \"authorization\", data=data, headers=self._default_headers, verify=False)\n if resp.status_code >= 300:\n raise Exception(\"Failed to claim access token: {}\".format(resp))\n\n vals = etree_to_dict(ET.XML(resp.content.decode('utf-8')))\n\n self._access_token = resp.headers.get('Location', None)\n if not self._access_token:\n raise ValueError(\"Unable to get access token\")\n\n self._user_id = os.path.basename(vals.get('authorization').get('user'))\n\n # Always set the expiry 30 minutes from now so we dont have to deal with parsing timezones\n # self._access_token_expiry = dateutil_parser.parse(vals.get('authorization').get('expiration'))\n self._access_token_expiry = datetime.datetime.utcnow() + datetime.timedelta(minutes=30)", "def get_access_token(*args, **kwargs):\n return get_access_token_async(*args, **kwargs).get_result()", "def refresh():\n print(\"refresh request\")\n old_token = request.get_data()\n new_token = guard.refresh_jwt_token(old_token)\n ret = {'access_token': new_token}\n return ret, 200", "def get_access_token(self, token_url):\n # type: (str) -> str\n\n payload = {\n \"grant_type\" : \"client_credentials\",\n \"client_id\" : self.client_id,\n \"client_secret\" : self.client_secret,\n \"scope\" : self.client_scope,\n }\n headers = {\n \"accept\" : \"application/json\",\n }\n resp = requests.post(f\"{self.base_url}/{token_url}\", data=payload, headers=headers)\n try:\n if (resp.ok):\n return resp.json().get('access_token')\n except (ValueError):\n self.__log.error (\"Error obtaining access token with credentials\")", "def get_access_token(self, request) -> str or Exception:\n pass", "def refresh_token():\n json_request = request.json\n refresh_token = json_request.get('refresh_token')\n if not refresh_token:\n return msg.errors.bad_request(\n 'You should provide refresh token for this call')\n refresh_token_obj = RefreshToken.valid_token(refresh_token)\n if not refresh_token_obj:\n return msg.errors.unauthorized('Provided refresh token is not valid')\n access_token = generate_token(refresh_token_obj.user_id)\n return msg.success(\n message='New access token generated',\n access_token=access_token)", "def refresh_access_token(user_id):\n # Access tokens expire six hours after they are created, so they must be refreshed in order\n # for an application to maintain access to a user’s resources.\n # Every time you get a new access token, we return a new refresh token as well.\n # If you need to make a request, we recommend checking to see if the\n # short-lived access token has expired. If it has expired,\n # request a new short-lived access token with the last received refresh token\n\n # refresh_token is A OAuth2Token object (a dict too).\n # Need a key of 'access_token' set to the value\n # refresh_token(url, refresh_token=None, body='', auth=None, headers=None, **kwargs)\n\n # POST https://www.strava.com/oauth/token\n # client_id, client_secret, grant_type='refresh_token', refresh_token\n\n # retrieve the user's tokens from the database\n # if it has expired, then get a new one and update the database\n # otherwise, use the current one\n # return the token\n strava_athlete = StravaAthlete.query.filter_by(user_id=user_id).first()\n my_token = {'refresh_token': strava_athlete.refresh_token,\n 'access_token': strava_athlete.access_token,\n 'expires_at': strava_athlete.access_token_expires_at,\n 'expires_in': strava_athlete.access_token_expires_in}\n\n my_config = app_config[os.getenv('FLASK_CONFIG')]()\n\n oauth_session = OAuth2Session(my_config.STRAVA_CLIENT_ID,\n my_config.STRAVA_CLIENT_SECRET,\n authorization_endpoint=my_config.STRAVA_CLIENT_DOMAIN + '/oauth/authorize',\n token_endpoint=my_config.STRAVA_CLIENT_DOMAIN + '/oauth/token',\n token=my_token,\n grant_type='refresh_token')\n new_token = oauth_session.refresh_token(\n url=my_config.STRAVA_CLIENT_DOMAIN + '/oauth/token',\n client_id=my_config.STRAVA_CLIENT_ID,\n client_secret=my_config.STRAVA_CLIENT_SECRET)\n\n # save it to the database assuming there is no error\n if new_token is not None:\n strava_athlete.access_token = new_token['access_token']\n strava_athlete.access_token_expires_at = int(new_token['expires_at'])\n strava_athlete.access_token_expires_in = int(new_token['expires_in'])\n strava_athlete.refresh_token = new_token['refresh_token']\n strava_athlete.last_updated = datetime.utcnow()\n db.session.commit()\n return new_token['access_token']\n\n # must be an error to log this\n current_app.logger.error('Failed to refresh the token')\n return None", "def RefreshToken():\n params = {}\n params['client_id'] = Constants.USER['CLIENT_ID']\n params['client_secret'] = Constants.USER['CLIENT_SECRET']\n params['refresh_token'] = Constants.AUTH['REFRESH']\n params['grant_type'] = 'refresh_token'\n\n data = urllib.urlencode(params)\n\n headers = {\n 'User-Agent': 'LogoCert Client',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept': 'text/html, */*',\n }\n\n request_url = Constants.OAUTH_TOKEN\n\n request = urllib2.Request(request_url, data, headers)\n res = urllib2.urlopen(request)\n response = res.read()\n return json.loads(response)", "def _get_access_token(self, url):\n if self.access_token:\n return self.access_token\n data = \"client_id=%s&client_secret=%s&grant_type=password&username=%s&password=%s&scope=write\" %\\\n (self.client_id, self.client_secret, self.username, self.password)\n\n parsed = urlparse(url)\n path = urlunparse(ParseResult(parsed.scheme, parsed.netloc, \"/oauth2/access_token\", None, None, None))\n\n auth_resp = urlopen(Request(path, data), timeout=10)\n if auth_resp.getcode() != 200:\n self.logger.error(\"Error with client credentials\")\n return self.access_token\n auth_resp_data = json.loads(auth_resp.read())\n\n if \"access_token\" in auth_resp_data:\n self.access_token = auth_resp_data[\"access_token\"]\n else:\n self.logger.error(\"Error with client credentials\")\n return self.access_token", "def get_token(self):\n self.token = self._session.fetch_token(\n token_url=CLOUD_URLS[\"get_token\"][1],\n client_id=self._client_id,\n client_secret=self._client_secret\n )", "def access_token(self):\n return self.access_token_str", "def get_access_token():\n\n scopes = [\n 'https://www.googleapis.com/auth/cloud-platform', 'email', 'profile'\n ]\n\n credentials, _ = default()\n credentials = auth.delegated_credentials(credentials, scopes=scopes)\n\n request = req.Request()\n credentials.refresh(request)\n access_token = credentials.token\n\n return access_token", "def access_token(global_config, existing_user, id_api):\n yield id_api.get_access_token_for_user(existing_user.email, existing_user.password)", "def get_sts_token(current_refresh_token, url=None):\n\n if url is None:\n url = auth_url\n\n if not current_refresh_token: # First time through, send password\n data = {'username': user, 'password': password, 'client_id': clientid, 'grant_type': 'password', 'takeExclusiveSignOnControl': True,\n 'scope': scope}\n print(\"Sending authentication request with password to\", url, \"...\")\n else: # Use the given refresh token\n data = {'username': user, 'client_id': clientid, 'refresh_token': current_refresh_token, 'grant_type': 'refresh_token'}\n print(\"Sending authentication request with refresh token to\", url, \"...\")\n if client_secret != '':\n data['client_secret'] = client_secret;\n \n try:\n # Request with auth for https protocol \n r = requests.post(url,\n headers={'Accept': 'application/json'},\n data=data,\n auth=(clientid, client_secret),\n verify=cert_file,\r\n proxies={\r\n 'http':'http://'+proxy_hostname+':'+proxy_port,\r\n 'https':'http://'+proxy_hostname+':'+proxy_port\r\n },\n allow_redirects=False)\n\n except requests.exceptions.RequestException as e:\n print('Refinitiv Data Platform authentication exception failure:', e)\n return None, None, None\n\n if r.status_code == 200:\n auth_json = r.json()\n print(\"Refinitiv Data Platform Authentication succeeded. RECEIVED:\")\n print(json.dumps(auth_json, sort_keys=True, indent=2, separators=(',', ':')))\n\n return auth_json['access_token'], auth_json['refresh_token'], auth_json['expires_in']\n elif r.status_code == 301 or r.status_code == 302 or r.status_code == 307 or r.status_code == 308:\n # Perform URL redirect\n print('Refinitiv Data Platform authentication HTTP code:', r.status_code, r.reason)\n new_host = r.headers['Location']\n if new_host is not None:\n print('Perform URL redirect to ', new_host)\n return get_sts_token(current_refresh_token, new_host)\n return None, None, None\n elif r.status_code == 400 or r.status_code == 401:\n # Retry with username and password\n print('Refinitiv Data Platform authentication HTTP code:', r.status_code, r.reason)\n if current_refresh_token:\n # Refresh token may have expired. Try using our password.\n print('Retry with username and password')\n return get_sts_token(None)\n return None, None, None\n elif r.status_code == 403 or r.status_code == 451:\n # Stop retrying with the request\n print('Refinitiv Data Platform authentication HTTP code:', r.status_code, r.reason)\n print('Stop retrying with the request')\n return None, None, None\n else:\n # Retry the request to Refinitiv Data Platform \n print('Refinitiv Data Platform authentication HTTP code:', r.status_code, r.reason)\n print('Retry the request to Refinitiv Data Platform')\n return get_sts_token(current_refresh_token)", "def refresh_token(self, iam_client_id, iam_client_secret, refresh_token):\n\n data = HTTPHeaderDict()\n data.add('client_id', iam_client_id)\n data.add('client_secret', iam_client_secret)\n data.add('grant_type', 'refresh_token')\n data.add('refresh_token', refresh_token)\n \n self.log.info(\"refresh_token. data: %s\" % data)\n\n response = requests.post(self.token_endpoint, data=data, verify=True)\n\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError as e:\n # Whoops it wasn't a 200\n self.log.error(\"refresh_token() Error: %s \" %str(e))\n self.log.error(\"http error:\" + response.status_code)\n return response.status_code\n\n result = json.loads(response.content)\n return result[\"access_token\"]", "def refresh(self):\n self._request_token(grant_type='password', username=self._username,\n password=self._password)", "def request_access_token(self, *args, **kwargs):\n response = super().request_access_token(*args, **kwargs)\n if \"access_token\" not in response:\n response[\"access_token\"] = response[\"id_token\"]\n return response", "def refresh_token(self):\n url = 'https://www.yikyak.com/api/auth/token/refresh'\n token = self._request('POST', url)\n self.session.headers.update({'x-access-token': token})", "def get_access_token():\n\n account = get_account()\n\n account.EnsureCredentials(dbus_interface=GOA_ACCOUNT)\n access_token, _ = account.GetAccessToken(dbus_interface=GOA_ACCOUNT_OAUTH2)\n return str(access_token)", "def accessToken(self):\n if self.isExpired:\n self.refresh()\n\n return self._accessToken", "def _request_access_token(self):\n resp = requests.get(self.TOKEN_URL_FORMAT.format(\n self.creds().consumer_key(), self.creds().app_secret())\n )\n status = resp.status_code\n\n # If the token request fails, try to use the configured app id\n # and secret. This probably won't work, but the docs say that it\n # should. for more info, see:\n # https://developers.facebook.com/docs/facebook-login/access-tokens\n token = \"%s|%s\" % (self.creds().consumer_key(),\n self.creds().app_secret())\n if status == 200:\n token = resp.text.split('access_token=')[1]\n else:\n self.logger.error(\n \"Facebook token request failed with status %d\" % status\n )\n return token", "def test_access_token_get(self):\n client = oauth.Client(self.consumer, None)\n resp, content = client.request(self._uri('request_token'), \"GET\")\n\n self.assertEqual(int(resp['status']), 200)", "def _GetAccessToken(self):\n\n # Encoding client authorization \n pair = \"{client_key}:{client_secret}\".format(client_key=self.client_key, client_secret=self.client_secret)\n authorization = 'MUthRmpVa1JUaVlxbDVUTElUYVFnOlRENmpYMTdGbmhPSzNodWdqWUZqVDU0YzVjWGNQeko3'\n\n # Getting the access token\n access_token_headers = { \"Authorization\": \"Basic {authorization}\".format(authorization=authorization) }\n request_endpoint = \"/oauth/token?grant_type=authorization_code&code={code}&redirect_uri=https://80a3bb863001.ngrok.io\".format(code=self.code)\n print(request_endpoint)\n self.conn.request(\"POST\", request_endpoint, headers=access_token_headers)\n res = self.conn.getresponse()\n response = json.loads(res.read().decode(\"utf-8\"))\n\n try:\n return response[\"access_token\"]\n except KeyError:\n print(\"Request for access token failed for the following reason: {reason}\".format(reason=response[\"reason\"]))", "def get_token(self):\n oauth_provider = UserSocialAuth.objects.get(provider='drchrono')\n access_token = oauth_provider.extra_data['access_token']\n return access_token", "def getAccessToken( refresh_token):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=ApiJWTAuthentication.expirationTime_Access),\n 'refresh_token': refresh_token\n }\n jwttoken= jwt.encode(\n payload,\n ApiJWTAuthentication.secretKey_access,\n algorithm='HS256'\n )\n token=jwttoken.decode('utf-8')\n return {\"message\": \"success\", \"access_token\": token}\n except Exception as e:\n return {\"message\": \"exception\",\"Exception\": str(e)}", "def Access(self):\n if datetime.now() < self.access_exp:\n pass\n elif datetime.now() > self.access_exp and datetime.now() < self.refresh_exp:\n grant = 'refresh_token'\n self._postRequest(grant=grant)\n elif datetime.now() > self.refresh_exp:\n grant = 'authorization_code'\n self._getURLcode()\n self._postRequest(grant=grant)", "def create_access_token(oauth):\n #create parameters for API authorization\n\tredirect_uri = 'oob'\n\tparams = {'client_secret': oauth.client_secret,\n\t\t\t 'redirect_uri': redirect_uri,\n\t\t\t 'response_type': 'code'}\n\t#store the access code\n\turl = oauth.get_authorize_url(**params)\n\n\t#open a web browser to get access token and then store it via manual input\n\twebbrowser.open(url)\n\tcode = input('Enter code: ')\n\t#create credentials item\n\tstart_time = time.time()\n\t#create dictionary to hold credentials and store beginning time\n\tcredentials = {'token_time': start_time}\n\n\t#NEED TO ADD IN 'REFRESH TOKEN' FUNCTION HERE SOMEWHERE\n\t#\n\t\n\t#create parameters\n\tdata = {'code': code,\n\t\t\t'redirect_uri': redirect_uri,\n\t\t\t'grant_type': 'authorization_code'}\n\t#build the headers\n\theaders = oauth_headers(oauth)\n\t#create the raw access token\n\traw_access = oauth.get_raw_access_token(data=data, headers=headers)\n\t#parse the raw access token and add to credentials variable\n\tcredentials.update(access_parse(raw_access))\n\n\t#parse access token from credentials\n\taccess_token = credentials['access_token']\n\t#return access token\n\treturn access_token", "def request_access_token():\n\n # For Private application authentication, you must specifiy\n # grant_type=client_credentials and the service scope. For the \n # Content API, scope=contentapi\n post_data = {\"grant_type\": APP_CONFIG['GRANT_TYPE'],\n \"scope\": APP_CONFIG['SCOPE']}\n post_data_string = json.dumps(post_data)\n\n # Construct authentication string:\n # 1. Concatenate the client id, a colon character \":\", and the client secret into a single string\n # 2. URL encode the string from step 1\n # 3. Base64 encode the string from step 2\n authstr = to_native_string(\n b64encode(('%s:%s' % (APP_CONFIG['CLIENT_ID'], APP_CONFIG['CLIENT_SECRET'])).encode('utf-8'))).strip()\n\n # Construct an Authorization header with the value of 'Basic <base64 encoded auth string>'\n headers = {\n \"Content-Type\": \"application/json;charset=UTF-8\",\n \"Accept\": \"application/json\",\n \"Authorization\": \"Basic \" + authstr\n }\n\n r = s.post(APP_CONFIG['OAUTH_TOKEN_URL'], data=post_data_string, headers=headers, verify=(app.config['SSLVERIFY'] == 'True'))\n\n if r.status_code in (400,500):\n\n # Handle known error\n result = r.json() \n return jsonify(result)\n\n elif r.status_code == 200:\n\n result = r.json() \n access_token = result['access_token']\n token_type = result['token_type']\n timestamp = result.get('timestamp', None)\n expires_in = result.get('expires_in', None)\n token_expiry = None\n if expires_in is not None:\n token_expiry = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S')\n token_expiry = token_expiry + datetime.timedelta(seconds=expires_in)\n token_expiry = token_expiry.isoformat()\n\n html = '<pre>';\n html += '<h3>Successfully retrieved access token!</h3>' \n html += '<pre>';\n html += 'access_token : ' + access_token\n html += '<pre>';\n html += 'token_type : ' + token_type\n html += '<pre>';\n html += 'expires_in (sec) : ' + str(expires_in)\n html += '<pre>';\n html += 'token_expiry : ' + token_expiry\n html += '<pre>';\n html += 'timestamp : ' + timestamp\n\n html += '<pre>';\n html += '<h3>Query Content API with Access Token</h3>'\n html += '<pre>';\n html += '<a href=\"/query-collection-myhuman?access_token='+access_token+'\">Query Collection: myhuman</a>'\n\n return html\n\n else:\n # Handle unknown error\n return (r.text, r.status_code, r.headers.items())", "def re_authenticate(self):\n url = URLS['token']\n data = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": self.refresh_token,\n \"client_id\": self.client_id,\n \"client_secret\": self.client_secret\n }\n r = requests.post(url, data=data)\n r.raise_for_status()\n j = r.json()\n self.access_token = j['access_token']\n self.refresh_token = j['refresh_token']\n self._set_token_expiration_time(expires_in=j['expires_in'])\n return r", "async def _a_get_sp_token(self, resource: str) -> str:\n sp_token = self.oauth_tokens.get(resource)\n if sp_token and self._is_oauth_token_valid(sp_token):\n return sp_token[\"access_token\"]\n\n self.log.info(\"Existing Service Principal token is expired, or going to expire soon. Refreshing...\")\n try:\n async for attempt in self._a_get_retry_object():\n with attempt:\n async with self._session.post(\n resource,\n auth=HTTPBasicAuth(self.databricks_conn.login, self.databricks_conn.password),\n data=\"grant_type=client_credentials&scope=all-apis\",\n headers={\n **self.user_agent_header,\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n },\n timeout=self.token_timeout_seconds,\n ) as resp:\n resp.raise_for_status()\n jsn = await resp.json()\n jsn[\"expires_on\"] = int(time.time() + jsn[\"expires_in\"])\n\n self._is_oauth_token_valid(jsn)\n self.oauth_tokens[resource] = jsn\n break\n except RetryError:\n raise AirflowException(f\"API requests to Databricks failed {self.retry_limit} times. Giving up.\")\n except requests_exceptions.HTTPError as e:\n raise AirflowException(f\"Response: {e.response.content}, Status Code: {e.response.status_code}\")\n\n return jsn[\"access_token\"]", "def get_token(): \n \n # Token url\n token_endpoint = \"https://api.signicat.io/oauth/connect/token\"\n # Setting the grant type to client_credentials\n data = {'grant_type':'client_credentials', 'scope':'identify'}\n # Posting to token url with HTTP basic authentication\n token = requests.post(token_endpoint, data=data,allow_redirects=True, auth=(config.CLIENT_ID, config.CLIENT_SECRET))\n # Converting json string to json\n token_json = json.loads(token.text)\n \n # Returning the access_token\n return token_json['access_token']", "def post(self):\n current_user_id = get_jwt_identity()\n new_token = create_access_token(identity=current_user_id)\n response, status = {\n 'message': 'Access token was successfully refreshed',\n 'access_token': new_token\n }, 200\n return Response(dumps(response), status=status, mimetype='application/json')", "def refresh():\n current_user = get_jwt_identity()\n\n user = get_user_by_username(current_user)\n\n if not user:\n return make_response(CONST_LOGIN_MSG, 401, {\n 'WWW-Authenticate': f'Basic realm=\"{CONST_REALM_MSG}\"'})\n\n if user.is_admin:\n claims = {'is_admin': True}\n else:\n claims = {'is_admin': False}\n\n now = datetime.datetime.now(datetime.timezone.utc)\n access_expires = (now + jwt_config.access_expires).timestamp()\n refresh_expires = (now + jwt_config.refresh_expires).timestamp()\n\n response = {\n 'access_token': create_access_token(identity=current_user,\n user_claims=claims),\n 'access_expires': access_expires,\n 'refresh_expires': refresh_expires,\n 'refresh_token': create_refresh_token(identity=current_user),\n 'user': get_user_details(user)\n\n }\n return jsonify(response), 200", "def get_token(url, data):\n try:\n resp = requests.post(url, data)\n return resp.json()['access_token']\n except(KeyError, requests.exceptions.RequestException):\n return ''", "def get_access_token(self):\n access_token = self._auth_provider._get_auth_value()\n return access_token", "def get_access_token(self, path='/oauth/token', data={}):\n if data.keys():\n data.update(self.data)\n else:\n data = self.data.copy()\n data.update({\n 'grant_type': 'password',\n 'email': self.env.get('TESLA_EMAIL'),\n 'password': self.env.get('TESLA_PASSWORD')\n })\n try:\n req = requests.post(url='%s%s' % (self.url, path), data=data)\n # print(req.status_code)\n # print(req.content)\n self.token.update(req.json())\n except:\n raise 'invalid credentials'\n return self.token", "def refresh_token(self):\n now = timezone.now()\n limit = now - timedelta(days=20)\n # TODO: use expires_in from response data?\n print(self.token_refresh_date)\n print(limit)\n if self.token_refresh_date < limit:\n url = '{}refresh_access_token'.format(conf.INSTAGRAM_API)\n params = {\n 'grant_type': 'ig_refresh_token',\n 'access_token': self.token\n }\n response = requests.get(url, params=params)\n data = response.json()\n else:\n print('no need to get a fresch token yet')\n return\n if response.status_code == 200 and data:\n self.token = data.get('access_token')\n self.token_refresh_date = now\n self.token_ok = True\n self.save()\n elif settings.DEBUG:\n self.token_ok = False\n self.save()\n print('could not refresh token')\n return", "def getAccessToken():\n print(\"Getting access token...\")\n request = \"https://id.twitch.tv/oauth2/token?client_id=\" + client_id + \"&client_secret=\" + client_secret + \"&grant_type=client_credentials\"\n response = requests.post(request)\n try:\n response.raise_for_status() # Check status code\n jsonResponse = response.json()\n access_token = jsonResponse.get(\"access_token\")\n print(\"Got access token:\", access_token)\n return access_token\n except requests.exceptions.HTTPError as e:\n print(\"Failed on getAccessToken\")\n print(e)", "def _get_access_token(self) -> dict:\n demisto.debug('CDL - Fetching access token')\n try:\n oproxy_response = self._http_request('POST',\n '/cdl-token',\n json_data={'token': get_encrypted(self.refresh_token, self.enc_key)},\n timeout=(60 * 3, 60 * 3),\n retries=3,\n backoff_factor=10,\n status_list_to_retry=[400])\n except DemistoException as e:\n if re.match(BAD_REQUEST_REGEX, str(e)):\n demisto.error('The request to retrieve the access token has failed with 400 status code.')\n demisto.setIntegrationContext(self._cache_failure_times(demisto.getIntegrationContext()))\n raise e\n\n self.reset_failure_times()\n return oproxy_response", "def _refresh_access_token(self):\n # force https so that we don't send around tokens unsecurely\n url = 'https://{}/api/token/refresh'.format(urlparse(self.base_url).netloc)\n \n # paranoid: check again that we only send the token to https\n if urlparse(url).scheme != \"https\":\n msg = 'This should not happen, please file a bug report.'\n raise Exception(msg)\n\n if not self.jwt_refresh_token:\n raise FDSNUnauthorizedException(\"Unauthorized, authentication \"\n \"required.\", )\n\n # convert to json\n data = json.dumps({\"refresh\": self.jwt_refresh_token})\n # encode\n data = bytes(data, \"utf-8\")\n headers = {\"Content-Type\": \"application/json\"}\n html = urllib_request.Request(url, data=data, headers=headers)\n # decode('utf-8')\n try:\n result = urllib_request.urlopen(html).read().decode(\"utf-8\")\n dic = json.loads(result)\n self.jwt_access_token = dic['access']\n\n if self.debug:\n print('Got temporary access/refresh: {}/{}'.format(self.jwt_access_token, self.jwt_refresh_token))\n \n return\n except:\n raise FDSNUnauthorizedException(\"Unauthorized, authentication \"\n \"expired. Please set your credentials again.\", )", "def get_token(self): # pragma: no cover\n\t\treturn (session.get(\"access_token\"), \"\")", "def update_access_token(self):\n self.token = util.prompt_for_user_token(self._username, scope,\n client_id=const.CLIENT_ID,\n client_secret=const.CLIENT_SECRET,\n redirect_uri=const.REDIRECT_URL)\n self._client = spotipy.Spotify(auth=self.token)", "def get_access_token(self,\n client_id=settings.OPENHUMANS_CLIENT_ID,\n client_secret=settings.OPENHUMANS_CLIENT_SECRET):\n # Also refresh if nearly expired (less than 60s remaining).\n delta = timedelta(seconds=60)\n if arrow.get(self.token_expires) - delta < arrow.now():\n self._refresh_tokens(client_id=client_id,\n client_secret=client_secret)\n return self.access_token", "def accessToken(self):\n if session.token and 'expires' in session.token:\n expires = session.token['expires']\n # reuse token until expiration\n if expires == 0 or expires > time.time():\n return session.token['access_token']\n\n code = request.vars.code\n\n if code:\n data = dict(client_id=self.env.client_id,\n client_secret=self.env.client_secret,\n redirect_uri=session.redirect_uri,\n code=code,\n grant_type='authorization_code'\n )\n\n open_url = None\n opener = self.__build_url_opener(self.env.token_url)\n try:\n open_url = opener.open(self.env.token_url, urlencode(data),\n self.socket_timeout)\n except urllib2.HTTPError, e:\n tmp = e.read()\n raise Exception(tmp)\n finally:\n if session.code:\n del session.code\n if session.redirect_uri:\n del session.redirect_uri\n\n if open_url:\n try:\n data = open_url.read()\n resp_type = open_url.info().gettype()\n #: try json style first\n if not resp_type or resp_type[:16] == 'application/json':\n try:\n tokendata = json.loads(data)\n session.token = tokendata\n except Exception, e:\n raise Exception(\"Cannot parse oauth server response %s %s\" % (data, e))\n #: try with x-www-form-encoded\n else:\n tokendata = cgi.parse_qs(data)\n session.token = \\\n dict([(k, v[-1]) for k, v in tokendata.items()])\n #: we failed parsing\n if not tokendata:\n raise Exception(\"Cannot parse oauth server response %s\" % data)\n #: set expiration\n if 'expires_in' in session.token:\n exps = 'expires_in'\n elif 'expires' in session.token:\n exps = 'expires'\n else:\n exps = None\n session.token['expires'] = exps and \\\n int(session.token[exps]) + \\\n time.time()\n finally:\n opener.close()\n return session.token['access_token']\n\n session.token = None\n return None", "def access_token(self):\n access_token = self.session.get('component_access_token')\n if access_token:\n if not self.expires_at:\n # user provided access_token, just return it\n return access_token\n\n timestamp = time.time()\n if self.expires_at - timestamp > 60:\n return access_token\n\n self.fetch_access_token()\n return self.session.get('component_access_token')", "def access_token(self):\n access = import_string(api_settings.ACCESS_TOKEN_CLASS)()\n\n # Use instantiation time of refresh token as relative timestamp for\n # access token \"exp\" claim. This ensures that both a refresh and\n # access token expire relative to the same time if they are created as\n # a pair.\n access.set_exp(from_time=self.current_time)\n\n no_copy = self.no_copy_claims\n for claim, value in self.payload.items():\n if claim in no_copy:\n continue\n access[claim] = value\n\n access.set_issuer()\n access.set_audience()\n\n # in order to encode token with new claims\n return str(access)", "def step_impl(context):\n fields = {\n 'grant_type': 'refresh_token',\n 'refresh_token': context.oauth.refresh_token,\n 'scope': context.vendor_config['auth']['scope'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def _request_token(self):\n params = {\n 'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret\n }\n\n response = self._http_request(\n method='POST',\n headers={'Content-Type': 'application/x-www-form-urlencoded'},\n full_url=self.auth_url,\n data=params\n )\n access_token = response.get('access_token')\n auth_header = {'Authorization': f'Bearer {access_token}'}\n return auth_header", "def exchange_token(self, code):\n access_token_url = OAUTH_ROOT + '/access_token'\n params = {\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'redirect_uri': self.redirect_uri,\n 'code': code,\n }\n resp = requests.get(access_token_url, params=params)\n if not resp.ok:\n raise MixcloudOauthError(\"Could not get access token.\")\n return resp.json()['access_token']", "def _get_auth_value(self):\n if not self._access_token:\n try:\n # get the local access token using gcloud\n cmd = ['gcloud', 'auth', 'print-access-token']\n if self._user_email:\n cmd.append(self._user_email)\n\n self._logger.debug(f\"get gcloud_access_token {cmd}\")\n p = Popen(cmd, stdout=PIPE, stderr=PIPE)\n gcloud_access_token, stderr = p.communicate()\n gcloud_access_token = gcloud_access_token.decode(\"utf-8\").rstrip()\n assert len(gcloud_access_token) > 0, f'get gcloud_access_token MUST have an access token {stderr}'\n self._logger.debug(f\"gcloud_access_token {gcloud_access_token}\")\n # authenticate to terra, ask for fence/accesstoken\n headers = {'Authorization': f'Bearer {gcloud_access_token}'}\n r = requests.get(self._terra_auth_url, headers=headers)\n assert r.status_code == 200, f'MUST respond with 200 {self._terra_auth_url} {r.text}'\n self._logger.debug(r.text)\n terra_access_token = r.json()\n assert len(terra_access_token['token']) > 0, 'MUST have an access token'\n assert len(terra_access_token['expires_at']) > 0, 'MUST have an expires_at '\n\n expires_at = datetime.fromisoformat(terra_access_token['expires_at'])\n now = datetime.now()\n assert expires_at > now, 'expires_at MUST be in the future'\n\n self._access_token = terra_access_token['token']\n\n if self._logger.level == logging.DEBUG:\n self._logger.debug(f'Terra access token expires in {str(expires_at - now)}')\n self._logger.debug(self._access_token)\n # add padding\n self._logger.debug(base64.b64decode(self._access_token.split('.')[1] + \"===\"))\n\n except Exception as e:\n raise AnVILAuthError(\n \"Failed to authenticate to {}\\n{}\".format(self._terra_auth_url, str(e))\n )\n\n return \"Bearer \" + self._access_token", "def refresh_token(self, path='/oauth/token', data={}):\n if data.keys():\n data.update(self.data)\n else:\n data = self.data.copy()\n data.update({\n 'grant_type': 'refresh_token',\n 'refresh_token': self.token.get('refresh_token')\n })\n try:\n self.token.update(requests.post(url='%s%s' % (self.url, path), data=data).json())\n except:\n raise 'unknown issue'\n return self.token", "def step_impl(context):\n fields = {\n 'grant_type': 'refresh_token',\n 'refresh_token': context.oauth.refresh_token,\n 'scope': context.vendor_config['versioned_auth']['scope'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)", "def get_agol_token():\n params = {\n 'client_id': app.config['ESRI_APP_CLIENT_ID'],\n 'client_secret': app.config['ESRI_APP_CLIENT_SECRET'],\n 'grant_type': \"client_credentials\"\n }\n request = requests.get(\n 'https://www.arcgis.com/sharing/oauth2/token',\n params=params\n )\n token = request.json()\n print(\"AGOL token acquired: {0}\".format(token))\n return token", "def _refresh_access_token(self):\n url = self._get_url(subpath=\"auth\", route=\"refresh\")\n refresh_token = get_refresh_token()\n payload = {\"refresh_token\": refresh_token}\n response = self.session.post(url, json=payload)\n response.raise_for_status()\n access_token = response.json()[\"access_token\"]\n set_process_execution_user_token(access_token)\n self.session.headers[\"authorization\"] = f\"Bearer {access_token}\"", "async def token(request: Request):\n return get_token()", "def refresh():\n print(\"refresh request\")\n old_token = flask.request.get_data()\n new_token = guard.refresh_jwt_token(old_token)\n ret = {'access_token': new_token}\n return ret, 200", "def psirt_get_token():\n creds = json.load(open('creds.json'))\n psirt_headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n psirt_payload = {\n 'client_id': creds['CLIENT_ID'],\n 'client_secret': creds['CLIENT_SECRET'],\n 'grant_type': 'client_credentials'\n }\n url = 'https://cloudsso.cisco.com/as/token.oauth2'\n response = requests.post(url=url, data=psirt_payload, headers=psirt_headers).json()\n logger.debug('access_token_check = ' + response['access_token'])\n return response['access_token']", "def get_access_token(credentials={}):\n client_id = credentials['client_id']\n client_secret = credentials['client_secret']\n\n if client_id == None or client_secret == None:\n return None\n\n # POST request for token\n response = requests.post('https://auth.domain.com.au/v1/connect/token', \n data = {'client_id':client_id,\n \"client_secret\":client_secret,\n \"grant_type\":\"client_credentials\",\n \"scope\":\"api_listings_read api_listings_write\",\n \"Content-Type\":\"text/json\"})\n token=response.json()\n expire = datetime.now() + timedelta(seconds=token['expires_in'])\n print (f'token expires at {expire}')\n\n access_token = {}\n access_token['access_token'] = token['access_token']\n access_token['expire_at'] = expire\n\n return access_token", "def get_access(access_token='',expire_time=0):\r\n #Get a new access token if it expires or is five minutes away from exp#iration\r\n if (expire_time==0) or (len(access_token)==0) or (time.time()-expire_time>=-300):\r\n\r\n #API needed to authorize account with refresh token\r\n auth_url = 'https://api.tdameritrade.com/v1/oauth2/token'\r\n\r\n #Data needed for token\r\n data = {'grant_type':'refresh_token',\r\n 'refresh_token':TDAuth_Info.refresh_token,\r\n 'client_id':TDAuth_Info.client_id}\r\n\r\n #Post the data to get the token\r\n auth_reply_json = requests.post(url=auth_url,data=data)\r\n auth_reply=auth_reply_json.json()\r\n\r\n #Now use the token to get account information\r\n access_token = auth_reply['access_token']\r\n expire_time=time.time()+auth_reply['expires_in']\r\n \r\n return (access_token,expire_time)", "def _requestSwiftToken(self):\n oauth_access_token = self.accessTokenManager.token\n c, r = http._get(\n self.auth_package.HUBIC_API+'account/credentials/',\n headers={\n 'Authorization': 'Bearer '+oauth_access_token\n }\n )\n result = json.loads(r.read())\n c.close()\n\n if r.status != 200:\n try:\n err =result\n err['code'] = r.status\n except Exception as e:\n err = {}\n\n raise Exception(\"Unable to get swift token, \"\n \"(%s)\"%str(err))\n\n self._endpoint = result['endpoint']\n self._token = result['token']\n self._expire = datetime.strptime( result['expires'][:-6], \"%Y-%m-%dT%H:%M:%S\" ) - timedelta(seconds=10)", "def refresh_token(self):\n # type: () -> Token\n token = self._request(\n self._client.refresh_token,\n self._token_endpoint,\n self.token[\"refresh_token\"],\n )\n self.set_token(token)\n return token", "def auth(self):\n return self.api(self.token)", "def get_auth_token():\n \n form_fields = {\n \"client_id\": client_id,\n \"client_secret\":client_secret,\n \"code\": code,\n \"redirect_uri\": \"http://www.stackprinter.com\"\n }\n form_data = urllib.urlencode(form_fields)\n results = __gae_fetch(url = 'https://stackexchange.com/oauth/access_token',\n method = urlfetch.POST, \n payload = form_data,\n headers={'Content-Type': 'application/x-www-form-urlencoded'})\n response = results.content\n return response", "def get_token(self):\n\t\tself.client.post('/api/v1/auth/signup', data=json.dumps(self.signup_user), content_type='application/json')\n\t\tresponse = self.client.post('/api/v1/auth/login', data=json.dumps(self.login_user), content_type='application/json')\n\t\tresp = json.loads(response.data.decode())\n\t\treturn 'Bearer ' + resp['access_token']", "def authorize_access_token(self, request, **kwargs):\n params = self.retrieve_access_token_params(request)\n params.update(kwargs)\n return self.fetch_access_token(**params)", "def refreshAccessToken(self, token):\r\n header = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}\r\n url = self._config['OAUTH2ENDPOINT']['huddleAccessTokenServer']\r\n\r\n body = {\"grant_type\": \"refresh_token\",\r\n \"client_id\": self._config['OAUTH2']['clientID'],\r\n \"refresh_token\": token.getRefreshToken()\r\n }\r\n\r\n return self._adapter.postRequest(url, header, parse.urlencode(body))", "def _fetch_access_token(self, url, data):\n logger.info('Fetching component access token')\n res = self._http.post(\n url=url,\n data=data\n )\n try:\n res.raise_for_status()\n except requests.RequestException as reqe:\n raise WeChatClientException(\n errcode=None,\n errmsg=None,\n client=self,\n request=reqe.request,\n response=reqe.response\n )\n result = res.json()\n if 'errcode' in result and result['errcode'] != 0:\n raise WeChatClientException(\n result['errcode'],\n result['errmsg'],\n client=self,\n request=res.request,\n response=res\n )\n\n expires_in = 7200\n if 'expires_in' in result:\n expires_in = result['expires_in']\n self.session.set(\n 'component_access_token',\n result['component_access_token'],\n expires_in\n )\n self.expires_at = int(time.time()) + expires_in\n return result" ]
[ "0.75720465", "0.7227093", "0.7181543", "0.713469", "0.7129814", "0.7058425", "0.7026847", "0.6986984", "0.6963477", "0.6925943", "0.6916204", "0.68907243", "0.6879203", "0.6810618", "0.67986274", "0.67908436", "0.67369515", "0.6724379", "0.67114365", "0.67013663", "0.6646587", "0.6622682", "0.6611903", "0.6597061", "0.6596593", "0.6592237", "0.6585486", "0.6585486", "0.65625304", "0.65593886", "0.65556526", "0.65421355", "0.65206116", "0.6517814", "0.650875", "0.6506074", "0.6501458", "0.6494883", "0.64843184", "0.64770126", "0.6444629", "0.64343613", "0.642658", "0.6420756", "0.64083534", "0.63887817", "0.63546413", "0.6351095", "0.63386196", "0.63303965", "0.63264024", "0.6322746", "0.6319251", "0.6314548", "0.63138866", "0.63128626", "0.63090605", "0.63080966", "0.6304899", "0.63028497", "0.6301507", "0.6296675", "0.6295325", "0.62951314", "0.62804514", "0.62751555", "0.6271771", "0.6268819", "0.6259845", "0.62316024", "0.6229261", "0.6217072", "0.6215387", "0.62032855", "0.61980176", "0.6177143", "0.61647815", "0.61591136", "0.61570054", "0.6156265", "0.6151859", "0.6132709", "0.6129443", "0.6124436", "0.6120354", "0.61173034", "0.6112854", "0.61113626", "0.61077076", "0.61058825", "0.6098795", "0.60983723", "0.609212", "0.6091597", "0.6088179", "0.608735", "0.6084248", "0.608141", "0.60786235", "0.60769004" ]
0.7200718
2
Stores athlete's id, first name, last name, weight and ftp into strava_athlete KV Store collection.
def kvstore_save_athlete(session_key, athlete_id, firstname, lastname, weight, ftp): # pylint: disable=too-many-arguments url = 'https://localhost:8089/servicesNS/nobody/TA-strava-for-splunk/storage/collections/data/strava_athlete/batch_save' headers = {'Content-Type': 'application/json', 'Authorization': f'Splunk {session_key}'} payload = [{"_key": athlete_id, "id": athlete_id, "firstname": firstname, "lastname": lastname, "fullname": firstname + " " + lastname, "weight": weight, "ftp": ftp}] helper.send_http_request(url, "POST", headers=headers, payload=payload, verify=False, use_proxy=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_athlete(response):\n name = response['athlete']['firstname'] + \" \" + response['athlete']['lastname']\n athlete = {\n 'id': response['athlete']['id'],\n 'name': name,\n 'access_token': response['access_token'],\n 'refresh_token': response['refresh_token'],\n 'expires_at': response['expires_at'],\n 'ts_activity': 0}\n return athlete", "def save(self):\n for t in self.ace_types:\n self.api.api_request(\"PUT\", self.url + t, data={t: self[t]})", "def log_strava_event(athlete_id, action):\n strava_event = StravaEvent(athlete_id=athlete_id, action=action, timestamp=datetime.utcnow())\n db.session.add(strava_event)\n db.session.commit()", "def store_in_db(offers):\n with open(OFFERS_FILE, 'w', encoding='utf8') as f:\n json.dump(offers, f, ensure_ascii=False, indent=4)", "def store_triples(self, triples):\n cursor = self.db.cursor()\n cursor.executemany(\"INSERT INTO triples VALUES (?, ?, ?)\", triples)\n self.db.commit()", "def store_elasticsearch(self, item):\n self.datastore.create(\n index=\"dminer-alphabay-{date}\".format(\n date=datetime.datetime.strptime(item[\"timestamp\"], \"%Y:%m:%d %H:%M:%S\").date().strftime(\"%Y-%m-%d\")\n ),\n doc_type= \"alphabay_listing\",\n body=item\n )", "def save(self):\n store = datastore.DataStore()\n store.connect()\n store.setup()\n store.put(self.as_doc())", "def save_credentials(self):\n Stores.account_store.append(self.register_stores())", "def store_all_to_database(self, session):\n\n description = 'Established in 1974, JSM is a family-owned provider of quality apartments. We offer a variety of units from studios to five bedrooms with every location benefitting from our award winning amenities, responsive 24 hour maintenance, and friendly property management staff. JSM Development began in Champaign, IL, and manages roughly 1,500 apartments and 450,000 sq/ft of commercial space. JSM has been a major contributor to the development of Campustown in Champaign and the East Campus area in Urbana at the University of Illinois. These popular locations are now home to major national retailers such as Urban Outfitters, Chipotle, Panera, Cold Stone Creamery, and Noodles & Co.'\n\n # Insert a JSM company instance into the database\n current_company = Company(\n name='JSM',\n baseurl='https://apartments.jsmliving.com/',\n description = description\n )\n session.add(current_company)\n\n # Iterate over the apartments, storing each in the database\n for apartment in self.apartment_data:\n logging.info(\"Inserting %s to database\", apartment['name'])\n new_apartment = Apartment(\n company=current_company,\n url=apartment['url'],\n name=apartment['name'],\n bedrooms=apartment['bedrooms'],\n bathrooms=apartment['bathrooms'],\n price=apartment['price'],\n leasing_period=apartment['leasing_period'],\n description=apartment['description'],\n address=apartment['address'],\n lat=apartment['lat'],\n lng=apartment['lng']\n )\n session.add(new_apartment)\n\n # Insert images for the given apartment\n for index, image_url in enumerate(apartment['image_urls']):\n new_image = Image(\n url=image_url,\n apartment_id=new_apartment.id,\n type=0,\n image_index=index\n )\n session.add(new_image)\n\n # Connect images to apartment\n new_apartment.images.append(new_image)\n\n # Insert floorplan image, if it exists\n if apartment['floorplan_url'] != 0:\n new_floorplan_image = Image(\n url=apartment['floorplan_url'],\n apartment_id=new_apartment.id,\n type=1,\n image_index=len(apartment['image_urls'])\n )\n session.add(new_floorplan_image)\n\n # Connect images to apartment\n new_apartment.images.append(new_floorplan_image)\n\n # Insert amenities for the given apartment\n for amenity in apartment['amenities']:\n new_amenity = Amenity(\n apartment_id=new_apartment.id,\n amenity=amenity\n )\n session.add(new_amenity)\n\n # Connect amenity to apartment\n new_apartment.amenities.append(new_amenity)\n\n # Write all queries to the database\n session.commit()", "def save(self):\n self.lock()\n\n trader = self.strategy.trader()\n\n for trade in self.trades:\n t_data = trade.dumps()\n ops_data = [operation.dumps() for operation in trade.operations]\n\n # store per trade\n Database.inst().store_user_trade((trader.name, trader.account.name, self.instrument.market_id,\n self.strategy.identifier, trade.id, trade.trade_type, t_data, ops_data))\n\n # dumps of regions\n trader_data = {}\n regions_data = [region.dumps() for region in self.regions]\n\n Database.inst().store_user_trader((trader.name, trader.account.name, self.instrument.market_id,\n self.strategy.identifier, self.activity, trader_data, regions_data))\n\n self.unlock()", "def get_athlete(token):\n url = \"https://www.strava.com/api/v3/athlete\"\n params = {'access_token': token}\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response", "def Store(self):\n\n if FLAGS.verbose or FLAGS.verbose_writes:\n print 'Writing track:'\n for key in sorted(self.persistant):\n print ' %s = %s' %(key, self.persistant[key])\n\n if not self.persistant:\n return\n \n try:\n self.db.WriteOneRow('tracks', 'id', self.persistant)\n except MySQLdb.Error, (errno, errstr):\n if errno != 1064:\n raise TrackException(self.db, 'Could not store track %s: %s \"%s\"'\n %(self.persistant['id'], errno, errstr))\n except sql.FormatException, e:\n raise e\n except Exception, e:\n raise TrackException(self.db, 'Could not store track: %s: \"%s\" (%s)'\n %(self.persistant['id'], e, type(e)))", "def persist_db(database, tweets):\n log.debug(\"{} tweets to db\".format(len(tweets)))\n\n for tweet in tweets:\n tweet['_id'] = tweet['id_str']\n database.update(tweets)", "def save_in_db(self):\n self.sql_database.table_name = self.table_db\n self.sql_database.db_name = self.db\n if self.sql_database.insert_item(text_path=self.path, word_first=self.word_1.get(),\n word_second=self.word_2.get(),\n word_third=self.word_3.get(), word_fourth=self.word_4.get(),\n word_fifth=self.word_5.get()):\n msg.showinfo(message=\"Done\")", "def _persist(self):\n trunk.set(self.uuid, self.json)", "def save_favorited_trail(hike_id, user_id):\n\n trail = Trail(hike_id = hike_id, user_id = user_id)\n\n db.session.add(trail)\n db.session.commit()\n\n return (trail)", "def store_tweet(tweet, keyword):\n\tglobal _docs_to_store\n\tdoc = {'tweet': tweet, 'keyword': keyword, 'timestamp': int(time.time())}\n\t_docs_to_store.append(doc)\n\tif len(_docs_to_store) == UPDATE_CHUNK:\n\t\tcloudant.update(_docs_to_store)\n\t\t_docs_to_store = []", "def add_to_db(ark_obj):\n session = Session()\n session.add(ark_obj)\n session.commit()\n session.close()", "def _athlete_endpoint(self, athlete):\n return '{host}{athlete}'.format(\n host=self.host,\n athlete=quote_plus(athlete)\n )", "def save_aliment(self, aliment_name):\n aliment = Aliment.objects.get(name=aliment_name)\n self.aliments_pref.add(aliment)", "def save(self, db):\n db.googleResults.insert_one(\n {\n \"searchQuery\": self.search_query,\n \"title\": self.title,\n \"link\": self.link,\n \"subtext\": self.subtext,\n \"searchterms\" : self.searchterms, # array\n \"queryTime\": datetime.datetime.now(),\n \"details\": self.link_scripts\n }\n )", "def save_data(db, dict_key, url, data_to_store):\n if dict_key not in db:\n db[dict_key] = []\n data = db[dict_key]\n data.append({\n 'url': url,\n 'data': data_to_store,\n })\n db[dict_key] = data", "def save(self, db):\n db.query(\n \"INSERT INTO fellows (name, accomodation)\\\n VALUES(:name, :accomodation)\",\n name=self.name, accomodation=self.wants_accomodation\n )", "def getDBAthletesUsingAPI():\n athletes_response = api_requester.getAthletes()\n\n # Parse response into Athlete db objects\n athletes_to_return = list()\n for athlete in athletes_response.json():\n athletes_to_return.append(\n getAthleteObjectFromJSON(athlete))\n\n return athletes_to_return", "def save(self):\n response = settings.database.put_item(Item=self.to_dict())\n raise_for_response(response)", "def save(cls):\n playerdata = getAttributes(cls)\n Data.object_dump(playerdata, \"savedata.dat\")\n del playerdata", "def store_offers(offers):\n reducer((serialize, store_in_db), offers)", "def saveTeachersData():\n with open(\"TeacherData.txt\",\"wb\") as teacherData:\n pickle.dump(teacherEntities,teacherData)", "def _save_tally_share(\n self, guardian_id: GuardianId, guardians_tally_share: DecryptionShare\n ) -> None:\n self._tally_shares[guardian_id] = guardians_tally_share", "def store_trades(self, trades):\n trades_file = self.current_trades_path()\n fo = trades_file.open(\"wb\")\n LOGGER.info(f\"storing {len(trades)} trades on disk for league {self.league_id}\")\n pickle.dump(trades, fo)", "def store_if_new(self, act_list):\n self.create_connection()\n c = self.get_db_cursor()\n for act in act_list:\n strava_id = act.get_strava_id()\n ride_data = (strava_id, act.get_athlete(), act.get_name(),\n act.get_gmt_date(), act.get_elapsed_time(), act.get_distance(),\n act.get_elevation(), act.get_ride_type(), act.get_trainer_ride())\n sql = 'INSERT INTO rides VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) '\n sql += ' WHERE NOT EXISTS(SELECT id FROM rides WHERE rides.id = %s' % strava_id\n c.execute(sql, ride_data)\n self.commit_and_close()", "def persist_trie(self, trie):\n pickle.dump(trie, open(self.persist, \"wb\"))", "def save_db(self) -> None:", "def put_amenity(amenity_id):\n\n data = storage.get(Amenity, amenity_id)\n\n if not data:\n abort(404)\n if not request.is_json:\n abort(400, description=\"Not a JSON\")\n\n my_req = request.get_json()\n\n for k, v in my_req.items():\n if k != \"id\" and k != \"created_at\" and k != \"updated_at\":\n setattr(data, k, v)\n\n storage.save()\n return data.to_dict(), 200", "def deauthorize_athlete(athlete_id):\n athlete = StravaAthlete.query.filter_by(athlete_id=athlete_id).first()\n if athlete:\n athlete.is_active = 0\n athlete.last_updated = datetime.utcnow()\n db.session.commit()\n log_strava_event(athlete_id, \"Deauthorize\")\n return True\n\n current_app.logger.error('Athlete {} does not exist'.format(athlete_id))\n return False", "def save(self, store):\n self.db.query(f\"\"\"\n INSERT INTO {self.table} (id, name)\n VALUES (:id, :name)\n ON DUPLICATE KEY UPDATE name = :name\n \"\"\", **vars(store))\n\n if not store.id:\n store.id = self.get(name=store.name).id\n return store", "def addAnimalsToDb(petsInfo):\n \n for pet in petsInfo['petfinder']['pets']['pet']: \n \n #Parsing the json file to get individual information\n \n animal = pet['animal']['$t'] \n name = pet['name']['$t']\n pet_id = pet['id']['$t']\n desc = pet['description']['$t']\n age = pet['age']['$t']\n breeds = pet['breeds']['breed']\n breed = \"\"\n # because some pets have multiple breed stored in a list\n try: \n breed = breeds['$t']\n except TypeError:\n for x in breeds:\n breed += x['$t'] + \", \"\n \n status = pet['status']['$t']\n sex = pet['sex']['$t']\n size = pet['size']['$t']\n mix = pet['mix']['$t']\n match = \"Yes\"\n features = pet['options']['option']\n feature = \"\"\n # because some pets have multiple breed stored in a list\n try:\n feature = features['$t']\n except TypeError: \n for x in features:\n feature += x['$t'] + \", \"\n photo = pet['media']['photos']['photo'][2]['$t']\n if petExist(animal, pet_id): \n firstSeen = Animal.objects.get(pk = pet_id).firstSeen\n pet = Animal(animal = animal, petId = pet_id, petName = name, \n petDescription = desc, petAge = age, \n petBreed = breed, petStatus = status, \n petSex = sex, petSize = size, \n petMix = mix, petFeatures = feature, \n lastSeen = timezone.now(), \n firstSeen = firstSeen,match = match, petPhoto = photo) \n \n pet.save()\n \n# if the pet doesn't exist, add the pet. \n else: \n pet = Animal(animal = animal, petId = pet_id, petName = name, \n petDescription = desc, petAge = age, \n petBreed = breed, petStatus = status, \n petSex = sex, petSize = size, \n petMix = mix, petFeatures = feature, \n lastSeen = timezone.now(), \n firstSeen = timezone.now(), match = match, petPhoto = photo) \n \n pet.save()\n updateTwitterStatus(animal, name, pet_id)\n\n print(\"A new %s has been added.\", animal)\n \n #pprint.pprint(petsInfo) \n print(\"Pet information added to database.\")", "def save(self):\n if not os.path.exists(self.dictionary_save_path) and self.dictionary_save_path != \"\":\n os.makedirs(self.dictionary_save_path)\n with open(self.dictionary_save_path + \"dictionary\", 'w') as f:\n f.write(\"{}\\n\".format(len(self.dictionary)))\n\n for word in self.dictionary:\n f.write(\"{}\\t{}\\n\".format(self.dictionary[word]['id'], word))", "def save(self, db):\n db.query(\n \"INSERT INTO staff (name) VALUES(:name)\",\n name=self.name\n )", "def __extract_athletes(self):\n for ath in self.athletes:\n if dl.get_squad_id(ath) not in self.data_engine:\n # Athlete has no squad. Just skip over it.\n continue\n\n team_criteria = \\\n self.data_engine[dl.get_squad_id(ath)][\"team_criteria\"]\n\n if not team_criteria:\n # Probably already generated a team for athlete[\"squad_id\"]\n continue\n\n if athlete_match(ath, make_athlete_criteria(team_criteria)):\n self.__update_team_criteria(team_criteria, ath)\n yield ath", "def save(self):\n users = User.getall()\n users[self.username] = dict(self)\n return self.db().put(self.udb, users)", "def store(self, key, value):\n pass", "def save(self, data):\n data['id'] = self.id\n\n self.db.append(data)", "def storeQuiz():\n a1 = request.form['a1']\n a2 = request.form['a2']\n new_entry = Entry(a1 , a2)\n db.create_all()\n db.session.add(new_entry)\n db.session.commit()\n temp ={}\n temp['status']=(type(new_entry)==Entry)\n return jsonify(temp)", "def store(self):\n\n pass", "def create_ta_list(ta_list):\n with open(ta_list, \"r\") as ta_file:\n user_list = ta_file.readlines()\n add_to_db(\"ta_list\", user_list[1:])\n add_to_online_db(\"online_ta\", user_list[1:])\n add_to_rating_db(\"ta_rating\", user_list[1:])", "def store_data(self, data):\n self.data = data\n # HERE\n the_main_dict = {**self.user_data(), **self.entities_data(), **self.extract_relevant(), **self.locate(),\n **self.calculate_days(), **self.clean_user_desc()}\n # The below is the reason that the table creation must be written in alphabetical order. This is simpler than\n # writing the complex joins that would otherwise be needed.\n my_keys_list = sorted(the_main_dict.keys())\n my_items = list(map(lambda x: str(the_main_dict[x]).replace(\"'\", ''), my_keys_list))\n try:\n # Unpacks the items into an insert statement for the SQLite table\n self.conn.execute(\"INSERT INTO {0} VALUES('{1}','{2}','{3}','{4}','{5}','{6}','{7}','{8}','{9}',\"\n \"'{10}','{11}','{12}','{13}','{14}','{15}','{16}','{17}','{18}','{19}','{20}',\"\n \"'{21}','{22}','{23}','{24}','{25}','{26}','{27}','{28}')\".format(self.table, *my_items))\n self.limiting += 1\n return 0\n except sqlite3.IntegrityError:\n return 1", "def post_amenity2(place_id, amenity_id):\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n if amenity in place.amenities:\n return (jsonify(amenity.to_dict()), 200)\n place.amenities.append(obj)\n storage.save()\n return (jsonify(amenity.to_dict(), 201))", "def post_amenity_obj():\n dic = {}\n dic = request.get_json(silent=True)\n if dic is None:\n abort(400, \"Not a JSON\")\n if \"name\" not in dic.keys():\n abort(400, \"Missing name\")\n new_ame = amenity.Amenity()\n for k, v in dic.items():\n setattr(new_ame, k, v)\n storage.new(new_ame)\n storage.save()\n return jsonify(new_ame.to_dict()), 201", "def store(self, key, obj):\n attrs = self.load_attrs()\n attrs[key] = obj\n self.store_attrs(attrs)", "def store_attrs(self, attrs):\n self.get_attr().SetObject(dumps(attrs), False)", "def save_agea(self, data, suffix=''):\n self.title = data.get('title', self.title)\n self.question = data.get('question', self.question)\n self.raw_question = data.get('raw_question',self.raw_question)\n self.raw_solution= data.get('raw_solution',self.raw_solution)\n self.max_attempts = data.get('max_attempts', self.max_attempts)\n # Validate points before saving\n points = data.get('points', self.points)\n # Check that we are an int\n try:\n points = int(points)\n except ValueError:\n raise JsonHandlerError(400, '\"Score to be graded out of\" must be an integer')\n\n # Check that we are positive\n if points < 0:\n raise JsonHandlerError(400, '\"Score to be graded out of\" must be a positive integer')\n self.points = points\n\n # Validate weight before saving\n \n weight = data.get('weight', self.weight)\n # Check that weight is a float.\n if weight:\n try:\n weight = float(weight)\n except ValueError:\n raise JsonHandlerError(400, 'Weight must be a decimal number')\n # Check that we are positive\n if weight < 0:\n raise JsonHandlerError(\n 400, 'Weight must be a positive decimal number'\n )\n self.weight = weight \n submission = self.get_question()\n if submission:\n uploaded_submission = submission.get(\"question\").get(\"filename\", None)\n if uploaded_submission:\n question = self._question_storage_path(self.raw_question['sha1'], self.raw_question['filename'])\n question = os.path.join(IMAGEDIFF_ROOT, question)\n actual=total_marks(question)\n if actual < points:\n raise JsonHandlerError(400, '\"Score to be graded out of\" should be less than equal to the maximum attainable score for the question paper you uploaded')\n \n self.save()\n log.info(self)\n \n #self.weight = data.get('weight', self.max_score())", "def save_storage(store_set):\n with open('storage.txt', 'w') as file_save:\n for elem in store_set:\n file_save.write(elem + \" \")", "def save():", "def store(self, key, headers, value):", "def store(self, key: object, value: object):\n self._user_data.update({key: value})", "def _encode_and_store_(self, latitude, longitude, ID):\n hash = geohash.encode(latitude=latitude, longitude=longitude)\n self.storage[hash] = ID\n self.points_by_id[ID] = (latitude, longitude)", "def add_record(self, data):\n if self.current_trip is None:\n print \"no trip to add data\"\n return\n self.current_trip.store_data(data)", "def save_data(self, record):\n self.dbm.addRecord(record)", "def store(self, filename):", "def save(self):\n if self._mode == 'dict':\n self._mode = 'shelve'\n self._shelve_mode = 'c'\n\n for key, value in self._dict.items():\n ckey = copy.copy(key)\n cvalue = copy.copy(value)\n self.add(ckey, cvalue, 'shelve', check=False)\n\n self._dict.clear()\n\n if self._mode == 'dict':\n self._mode = 'dict'\n self._shelve_mode = 'r'", "def put(self, id, data):\n assert isinstance(data, dict)\n self._shelf[str(id)] = data", "def __store(self):\n # connection strings are accessed directly by dbo\n dbo = dbo.connect()\n dbo.save(self.__to_dict())\n # not supre important to call but a nice idea\n dbo.destroy()", "def save(self):\n if not self.id:\n self.id = uuid4()\n DataStore.add_instance(self)", "def save_book(self):\n db.session.add(self)\n db.session.commit()", "def store_classifier(conn, classifier_id, classifier_name, classifier_type, classifier):\n # Serialize the classifier.\n classifier = cPickle.dumps(classifier)\n # Create the table if it does not exist.\n _create_table(conn, TABLE_NAME_CLASSIFIERS, TABLE_COLS_CLASSIFIERS)\n # Store classifier in the database.\n table_entry = (classifier_id, classifier_name, classifier_type, classifier)\n _store_entry_in_table(conn, TABLE_NAME_CLASSIFIERS, table_entry)", "def store(self, item):\n cursor = self.conn.cursor()\n # Store the item\n if item:\n cursor.execute(*self._build_insert(item, 'items'))\n for file_ in item.files:\n cursor.execute(\"\"\"insert into files (filename, item_id)\n values (?, ?)\"\"\", (file_, item.kg_id))\n self.conn.commit()\n self.logger.info(\"Succesfully stored item %d\" % item.kg_id)", "def save_data(self):\n db.session.add(self)\n db.session.commit( )", "def features_to_db(track_uri):\n data = spotify.audio_features(track_uri)[0]\n audio_features = AudioFeatures(**data)\n DB.session.add(audio_features)\n # id, uri, danceability, energy, key, loudness, mode,\n # speechiness, acousticness, instrumentalness,\n # liveness, valence, tempo, type", "def create_hike(name, area_name, city_name, state_name, country_name, geoloc, difficulty_level, miles, features, avg_rating):\n\n hike = Hike(\n name=name,\n area_name=area_name,\n city_name=city_name,\n state_name=state_name,\n country_name=country_name,\n geoloc=geoloc,\n difficulty_level=difficulty_level,\n miles=miles,\n features=features,\n avg_rating=avg_rating\n )\n\n db.session.add(hike)\n db.session.commit()\n\n return hike", "async def store(self, api_name: str, schema: Dict, ttl_seconds: int):\n raise NotImplementedError()", "def store_anchors(base_url, anchors, path=\"logs/\"):\n\n url_filename = url_to_filename(base_url)\n filename = f\"{path}ANCHORS-{url_filename}.txt\"\n\n if os.path.isfile(filename):\n with open(filename, \"rb\") as fp:\n all_anchors = pickle.load(fp)\n all_anchors.append(anchors)\n else:\n all_anchors = anchors\n\n with open(filename, \"wb\") as fp:\n pickle.dump(all_anchors, fp)", "def _save_api_feature_data(self, dataset_id):\n dataset = Dataset.objects.get(id=dataset_id)\n json_data = Facade.prepare_dataset_data(dataset) # normalize data and convert it to json\n dataset.normalized_feature_JSON = json_data\n dataset.save() # save normalized data in models", "def put_amenity(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n body_request = request.get_json()\n if not body_request:\n abort(Response(\"Not a JSON\", 400))\n for key, value in body_request.items():\n if key not in [\"id\", \"created_at\", \"updated_at\"]:\n setattr(amenity, key, value)\n storage.save()\n return(jsonify(amenity.to_dict()), 200)", "def put(self, key): # TODO check and use key if given\n update_properties = ['name', 'description', 'avatar_url', 'locations','key']\n new_data = _.pick(request.json, update_properties)\n #new_data['added_by'] = ndb.Key(urlsafe=new_data['added_by'])\n # get trip key (if new)\n\n print \"UPDATE TRIP\"\n print new_data\n print \"key\"\n print new_data.get('key')\n\n if new_data.get(\"key\",\"new\") == \"new\" or new_data.get(\"key\") == \"add\" :\n trip_key = model.Trip.create_or_update(urlsafe=True, parent=auth.current_user_key(), name=new_data['name'])\n new_data['key'] = trip_key.urlsafe();\n else:\n trip_key = ndb.Key(urlsafe=new_data.get(\"key\"))\n\n\n # prepare data\n loc_keys = []\n for loc in new_data['locations']:\n user_keys = []\n for user in loc['fellow_travelers']:\n if user.get(\"new\",True):\n user_keys.append(model.FellowTraveler.create_or_update(urlsafe=True,\n parent=auth.current_user_key(),\n added_by=auth.current_user_key(),\n **user))\n else:\n user_keys.append(ndb.Key(urlsafe=user.get(\"key\",False)))\n loc['fellow_travelers'] = user_keys\n start_date = datetime.datetime.strptime(loc['start_datetime'][0:19],\"%Y-%m-%dT%H:%M:%S\") if loc.get('start_datetime',False) else None\n end_date = datetime.datetime.strptime(loc['end_datetime'][0:19],\"%Y-%m-%dT%H:%M:%S\") if loc.get('end_datetime',False) else None\n print \"Dates are:\"\n print start_date\n print end_date\n loc['start_datetime'] = start_date\n loc['end_datetime'] = end_date\n loc['trip'] = trip_key\n if loc['geo']:\n loc['geo'] = ndb.GeoPt(lat=loc['geo']['lat'],lon=loc['geo']['lng'])\n print \"SAVE EXPENSES\"\n loc['expenses'] = [{\"amount\":float(e.get('amount',0)),\n \"note\":e.get('note',\"\"),\n \"type\":ndb.Key(urlsafe=e['type']) if e.get('type',False) else None} for e in loc['expenses']]\n print loc['expenses']\n if 'trans_start' in loc:\n loc['trans_start']['waypoints'] = [ndb.GeoPt(lat=m.get('lat'),lon=m.get('lng')) for m in loc['trans_start'].get('waypoints',[])]\n if 'trans_end' in loc:\n loc['trans_end']['waypoints'] = [ndb.GeoPt(lat=m.get('lat'),lon=m.get('lng')) for m in loc['trans_end'].get('waypoints',[])]\n #loc['trans_end']['waypoints'] = [ndb.GetPt(lat=m.lat,lon=m.lng) for m in loc.trans_end.get('waypoints',[])[])]\n if loc.get(\"new\",True):\n print \"Save/update location\"\n loc_keys.append(model.Location.create_or_update(urlsafe=True,\\\n parent=auth.current_user_key(), **loc))\n else:\n print \"Don't save/update location (not changes)\"\n loc_keys.append(ndb.Key(urlsafe=loc.get(\"key\",False)))\n new_data['locations'] = loc_keys\n key = model.Trip.create_or_update(urlsafe=True, parent=auth.current_user_key(), **new_data)\n properties = model.Trip.get_public_properties()\n return key.get().to_dict(include=properties)", "def save(self, db):\n db.query(\n \"INSERT INTO rooms (name, type) VALUES(:name, :type)\",\n name=self.name, type='L'\n )", "def load_anvl_file_into_db(batch_file, engine=engine, verbose=True):\n if not db_exists():\n create_db(engine)\n\n arks = load_anvl_as_dict(batch_file)\n session = Session()\n\n for i, a in enumerate(arks):\n if verbose:\n sys.stdout.write(f\"\\r{i}\")\n sys.stdout.flush()\n\n ark_obj = Ark(\n ark=a.get(\"ark\", \"\"),\n target=a.get(\"_target\", \"\"),\n profile=a.get(\"_profile\", \"\"),\n status=a.get(\"_status\", \"\"),\n owner=a.get(\"_owner\", \"\"),\n ownergroup=a.get(\"_ownergroup\", \"\"),\n created=int(a.get(\"_created\", 0)),\n updated=int(a.get(\"_updated\", 0)),\n export=not a.get(\"_export\") == \"no\",\n dc_creator=a.get(\"dc.creator\", \"\"),\n dc_title=a.get(\"dc.title\", \"\"),\n dc_type=a.get(\"dc.type\", \"\"),\n dc_date=a.get(\"dc.date\", \"\"),\n dc_publisher=a.get(\"dc.publisher\", \"\"),\n erc_when=a.get(\"erc.when\", \"\"),\n erc_what=a.get(\"erc.what\", \"\"),\n erc_who=a.get(\"erc.who\", \"\"),\n replaceable=a.get(\"iastate.replaceable\") == \"True\" or input_is_replaceable(a[\"dc.title\"])\n )\n\n session.add(ark_obj)\n\n session.commit()\n session.close()", "def save_attributes(self):\n Credentials.credentials_list.append(self)", "def grant_teacher(account_id, availability):\n query = 'INSERT INTO teacher VALUES( %s, %s);'\n args = (account_id, availability)\n database.connection.save_data(query, args)", "def _store_agenda_items(self, agenda_dict, agenda_saved):\n pass", "def post_amenity():\n\n if not request.is_json:\n abort(400, description=\"Not a JSON\")\n data = request.get_json()\n\n if \"name\" not in data:\n abort(400, description=\"Missing name\")\n\n obj = Amenity(**data)\n storage.new(obj)\n storage.save()\n return obj.to_dict(), 201", "def store_user(profile, attrs={}):\n db = connection()\n doc_ref = db.document(f'users/{profile.username}')\n profile_dict = {\n 'userid': profile.userid,\n 'full_name': profile.full_name,\n 'is_verified': profile.is_verified,\n 'biography': profile.biography,\n 'followees': profile.followees,\n 'followers': profile.followers,\n 'mediacount': profile.mediacount,\n 'stored_at': datetime.now()\n }\n\n valid_attr_keys = ['is_artist', 'is_gallery',\n 'price_max', 'price_min', 'price_avg']\n valid_attrs = {k: attrs.get(k) for k in valid_attr_keys}\n\n return doc_ref.set({**profile_dict, **valid_attrs})", "def _store(self):\n database.mongo_store_object_by_label(self, self.label)", "def save(self, handler, name):", "def save_shelf(self, shelf_name, data):\r\n shelf_path = os.path.join(self.full_dir, shelf_name)\r\n with shelve.open(shelf_path, 'c') as shelf:\r\n shelf['data'] = data", "def store(self, idCust, idBook, flag, id):\n allR=self.__loadFromFile()\n\n rt=Rent( idBook,idCust, flag, id)\n if rt in allR:\n raise RepositoryExceptionRent(\"\\n Duplicated id \\n\".upper())\n\n\n allR.append(rt)\n self.__storeToFile(allR)", "def save(self):\n self.db.commit()", "def create_amenity_place(place_id, amenity_id):\n place = storage.get(Place, place_id)\n if place is None:\n abort(404, description=\"Not Found\")\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404, description=\"Not Found\")\n\n if amenity in place.amenities:\n return jsonify(amenity.to_dict())\n\n place.amenities.append(amenity)\n storage.new(place)\n storage.save()\n return jsonify(amenity.to_dict())", "def save(self, db):\n db.query(\n \"INSERT INTO rooms (name, type) VALUES(:name, :type)\",\n name=self.name, type='O'\n )", "def save(self):\n self.wallet.storage.put(\n \"slp_data_version\", None\n ) # clear key of other older formats.\n data = {\n \"validity\": self.validity,\n \"token_quantities\": {\n k: [[v0, v1] for v0, v1 in v.items()]\n for k, v in self.token_quantities.items()\n },\n \"txo_byaddr\": {\n k.to_storage_string(): list(v) for k, v in self.txo_byaddr.items()\n },\n \"version\": self.DATA_VERSION,\n }\n self.wallet.storage.put(\"slp\", data)", "def put(self):\n return super(TenderAwardDocumentResource, self).put()", "def put_amenity_obj(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n if not request.get_json():\n return make_response(jsonify({'error': 'Not a JSON'}), 400)\n for key, value in request.get_json().items():\n setattr(amenity, key, value)\n amenity.save()\n return jsonify(amenity.to_dict())", "def save(self):\n\n toStore = {\n key: obj.to_dict()\n for key, obj in FileStorage.__objects.items()\n }\n with open(FileStorage.__file_path, 'wt') as file:\n json.dump(toStore, file)", "def example_data():\n\n # In case this is run more than once, empty out existing data\n User.query.delete()\n Location.query.delete()\n\n # Add sample users and locations\n\n trinity = User(fname='Trinity', email='[email protected]',\n username='questionit', password='l0lagent')\n neo = User(fname='Neo', email='[email protected]',\n username='neo', password='l0lagent')\n\n tacorea = Location(yelp_id='tacorea-san-francisco', name='Tacorea',\n latitude='37.7749', longitude='122.3392',\n address='809 Bush St, San Francisco, CA 94108',\n yelp_url='[email protected]', pic='pic')\n\n db.session.add_all([trinity, neo, tacorea])\n db.session.commit()", "def save(self):\n return api.put([self])", "def store_all(self, store):\n for uid in self._status.list():\n distribution = self._status.get(uid)\n name = self._status.get_name(uid)\n\n # Store data\n store.store(uid, {'name': name, 'distribution': distribution})", "def amazon_accounts():\n import json\n from security_monkey.datastore import Account, AccountType\n from os.path import dirname, join\n\n data_file = join(dirname(dirname(__file__)), \"data\", \"aws_accounts.json\")\n data = json.load(open(data_file, 'r'))\n\n app.logger.info('Adding / updating Amazon owned accounts')\n try:\n account_type_result = AccountType.query.filter(AccountType.name == 'AWS').first()\n if not account_type_result:\n account_type_result = AccountType(name='AWS')\n db.session.add(account_type_result)\n db.session.commit()\n db.session.refresh(account_type_result)\n\n for group, info in data.items():\n for aws_account in info['accounts']:\n acct_name = \"{group} ({region})\".format(group=group, region=aws_account['region'])\n account = Account.query.filter(Account.identifier == aws_account['account_id']).first()\n if not account:\n app.logger.debug(' Adding account {0}'.format(acct_name))\n account = Account()\n else:\n app.logger.debug(' Updating account {0}'.format(acct_name))\n\n account.identifier = aws_account['account_id']\n account.account_type_id = account_type_result.id\n account.active = False\n account.third_party = True\n account.name = acct_name\n account.notes = info['url']\n\n db.session.add(account)\n\n db.session.commit()\n app.logger.info('Finished adding Amazon owned accounts')\n except Exception as e:\n app.logger.exception(\"An error occured while adding accounts\")\n store_exception(\"manager-amazon-accounts\", None, e)", "def save(self, a_s):\n\n # General action data.\n a_s = {\n 'description': a_s.description,\n 'num_neurons': a_s.N,\n 'tau_m': (a_s.taum/ms).tolist(),\n 'tau_pre': (a_s.taum/ms).tolist(),\n 'tau_post': (a_s.taupost/ms).tolist(),\n 'tau_c': (a_s.tauc/ms).tolist(),\n 'tau_dop': (a_s.tauDop/ms).tolist(),\n 'tau_e': (a_s.taue/ms).tolist(),\n 'Ee': (a_s.Ee/mV).tolist(),\n 'vt': (a_s.vt/mV).tolist(),\n 'vr': (a_s.vr/mV).tolist(),\n 'El': (a_s.El/mV).tolist(),\n 'F': (a_s.F/Hz).tolist(),\n 'gmax': a_s.gmax,\n 'dA_pre': a_s.dApre,\n 'dA_post': a_s.dApost,\n 'duration': (a_s.sim_time/ms).tolist(),\n 'frame_length': (a_s.frame_length/ms).tolist(),\n 'dop_boost': a_s.dopBoost,\n 'reward_distance': a_s.reward_distance,\n 'speed_factor': (a_s.SPEED_FACTOR/second).tolist(),\n 'dragonfly_start': a_s.dragonfly_start,\n 'animation_id': a_s.animation_id,\n 'pattern_recognition_id': a_s.pattern_recognition_id,\n 'weights': (a_s.saved_weights).tolist(),\n 'training': a_s.training\n }\n\n # Save general data.\n _id = self.collection.insert(a_s)\n\n return _id", "def secretstore():\n pass", "def save(self):\n self.data['items'] = self._items\n if self.storage_id:\n storage.set_shop_data([self.storage_id, 'cart'], self.data)" ]
[ "0.65488034", "0.5803945", "0.56478375", "0.52747154", "0.5271561", "0.52378386", "0.51092994", "0.50849545", "0.50378954", "0.5033445", "0.4992036", "0.49815983", "0.4975764", "0.4963135", "0.49445814", "0.48963758", "0.48822185", "0.48742172", "0.48721516", "0.48705444", "0.4864023", "0.48592743", "0.48569056", "0.48561573", "0.48419502", "0.48218003", "0.48206583", "0.48117527", "0.48082826", "0.48065695", "0.47999182", "0.47938186", "0.47591045", "0.475408", "0.47526893", "0.4750747", "0.4743468", "0.47338915", "0.47123381", "0.47109085", "0.47093937", "0.4709079", "0.47080243", "0.4707619", "0.47066143", "0.47012725", "0.46828043", "0.46779388", "0.46646285", "0.46610966", "0.46607593", "0.46590889", "0.46414366", "0.46395046", "0.46382558", "0.4634015", "0.46315953", "0.46280354", "0.46276698", "0.4620834", "0.46192718", "0.46131673", "0.4609567", "0.46071872", "0.46051297", "0.4600795", "0.45991737", "0.45948252", "0.4592922", "0.45810702", "0.45808515", "0.45775557", "0.45712838", "0.45697632", "0.45694426", "0.4566512", "0.4563048", "0.45626205", "0.45621282", "0.455997", "0.45552027", "0.45513046", "0.45506537", "0.45472184", "0.45436028", "0.45432556", "0.4532025", "0.45317858", "0.45303905", "0.45294935", "0.45234403", "0.45220798", "0.4519528", "0.45175827", "0.45141205", "0.4507378", "0.45071143", "0.45022583", "0.4497223", "0.44952294" ]
0.8156723
0
Gets raw JSON data, parses it into events and writes those to Splunk.
def parse_data(data, activity_id, activity_start_date): data_dict = {} final_dict = {} for i in data: data_dict[i['type']] = i['data'] counter = 1 nrange = len(data_dict['time']) for item in range(1, nrange + 1): final_dict[item] = {} for key, value in data_dict.items(): counter = 1 for i in value: final_dict[counter][key] = i final_dict[counter]['activity_id'] = activity_id if 'time' in key: final_dict[counter]['time'] = final_dict[counter]['time'] + activity_start_date final_dict[counter]['time'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(final_dict[counter]['time'])) if 'latlng' in key: final_dict[counter]['lat'] = final_dict[counter]['latlng'][0] final_dict[counter]['lon'] = final_dict[counter]['latlng'][1] final_dict[counter].pop('latlng') counter += 1 result_list = [value for key, value in final_dict.items()] for event in result_list: write_to_splunk(index=helper.get_output_index(), sourcetype='strava:activities:stream', data=json.dumps(event)) helper.log_info(f'Added activity stream {activity_id} for {athlete_id}.') return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_data(self, raw_data):\n data = json.loads(raw_data)\n\n tweet = None\n includes = {}\n errors = []\n matching_rules = []\n\n if \"data\" in data:\n tweet = Tweet(data[\"data\"])\n self.on_tweet(tweet)\n if \"includes\" in data:\n includes = self._process_includes(data[\"includes\"])\n self.on_includes(includes)\n if \"errors\" in data:\n errors = data[\"errors\"]\n self.on_errors(errors)\n if \"matching_rules\" in data:\n matching_rules = [\n StreamRule(id=rule[\"id\"], tag=rule[\"tag\"])\n for rule in data[\"matching_rules\"]\n ]\n self.on_matching_rules(matching_rules)\n\n self.on_response(\n StreamResponse(tweet, includes, errors, matching_rules)\n )", "def post(self):\n json_body = self.request.body\n if not json_body:\n # TODO(davidbyttow): Log error?\n return\n\n json_body = unicode(json_body, 'utf8')\n logging.info('Incoming: ' + json_body)\n\n context, events = robot_abstract.ParseJSONBody(json_body)\n for event in events:\n try:\n self._robot.HandleEvent(event, context)\n except:\n logging.error(traceback.format_exc())\n\n json_response = robot_abstract.SerializeContext(context,\n self._robot.version)\n logging.info('Outgoing: ' + json_response)\n\n # Build the response.\n self.response.headers['Content-Type'] = 'application/json; charset=utf-8'\n self.response.out.write(json_response.encode('utf-8'))", "def read_json_from_server():\n s = connect_to_eeg_server(enable_raw_output=True)\n loop = 50\n while loop > 0:\n loop -= 1\n buf = s.recv(1024)\n records = cleanup_raw_data(buf)\n for record in records:\n jres = json.loads(record)\n print(repr(jres))\n s.close()", "def post(self):\n required_keys = [\"event_name\", \"timestamp\"]\n\n if request.headers.get('Content-Encoding', '') == 'gzip':\n try:\n data = gzip.decompress(request.data)\n events = json.loads(data)\n except JSONDecodeError as e:\n log.info(f\"failed to decode compressed event data: {e.msg}\")\n abort(http_client.BAD_REQUEST, \"failed to decode compressed event data\")\n else:\n events = request.json\n\n verify_log_request(events, required_keys)\n\n # The event log API should enforce the player_id to the current player, unless\n # the user has role \"service\" in which case it should only set the player_id if\n # it's not passed in the event.\n player_id = current_user[\"player_id\"]\n is_service = \"service\" in current_user[\"roles\"]\n\n for event in events:\n if is_service:\n event.setdefault(\"player_id\", player_id)\n else:\n event[\"player_id\"] = player_id # Always override!\n eventlogger.info(\"eventlog\", extra={\"extra\": event})\n\n if request.headers.get(\"Accept\") == \"application/json\":\n return jsonify(status=\"OK\"), http_client.CREATED\n else:\n return \"OK\", http_client.CREATED", "def handle_dataevent(bot, event):\n event.reply(event.tojson())", "def ingest_json_file(request):\n path = save_file(request) \n try:\n with open(path, encoding='utf-8') as f:\n data = json.loads(f.read())\n except Exception as e:\n log.error(log.exc(e))\n return None\n return data", "def make_event_with_raw_data(self, raw_data, priority=None):\n # TODO: use priority? or log warning if someone tries to use it?\n try:\n ev = SensedEvent.from_json(raw_data)\n networks.util.process_remote_event(ev, relay_uri=self.remote_path)\n return ev\n except ValueError as e:\n log.error(\"Failed to decode SensedEvent from: %s\" % raw_data)\n raise e", "def setUp(self):\n self.output = StringIO.StringIO()\n self.formatter = json_out.Json(None, self.output)\n self.event_object = JsonTestEvent()", "def apigw_event():\n with open(\"events/event.json\") as json_file:\n return json.load(json_file)", "def _events_json(fname, overwrite=False):\n new_data = {\n \"sample\": {\"Description\": \"The event onset time in number of sampling points.\"},\n \"value\": {\n \"Description\": (\n \"The event code (also known as trigger code or event ID) \"\n \"associated with the event.\"\n )\n },\n \"trial_type\": {\"Description\": \"The type, category, or name of the event.\"},\n }\n\n # make sure to append any JSON fields added by the user\n fname = Path(fname)\n if fname.exists():\n orig_data = json.loads(\n fname.read_text(encoding=\"utf-8\"), object_pairs_hook=OrderedDict\n )\n new_data = {**orig_data, **new_data}\n\n _write_json(fname, new_data, overwrite)", "def handler(context, event):\n\n if _ensure_str(event.trigger.kind) != 'http' or _invoked_by_cron(event):\n body = event.body.decode('utf-8')\n context.logger.info('Received event body: {0}'.format(body))\n\n # serialized record\n serialized_record = json.dumps({\n 'body': body,\n 'headers': {\n _ensure_str(header): _ensure_str(value)\n for header, value in event.headers.items()\n },\n 'timestamp': datetime.datetime.utcnow().isoformat(),\n })\n\n # store in log file\n with open(events_log_file_path, 'a') as events_log_file:\n events_log_file.write(serialized_record + ', ')\n\n else:\n\n # read the log file\n try:\n with open(events_log_file_path, 'r') as events_log_file:\n events_log_file_contents = events_log_file.read()\n except IOError:\n events_log_file_contents = ''\n\n # make this valid JSON by removing last two chars (, ) and enclosing in [ ]\n encoded_event_log = '[' + events_log_file_contents[:-2] + ']'\n\n context.logger.info('Returning events: {0}'.format(encoded_event_log))\n\n # return json.loads(encoded_event_log)\n return encoded_event_log", "def handle_json(self, source, data):\n method, args = json.loads(data)\n try:\n result = self.call(source, method, *args)\n except Exception as exc:\n result = str(exc)\n\n return json.dumps(result)", "def handle_request(self):\n try:\n content_type = self.headers.get('content-type')\n\n if content_type != 'application/json':\n self.write_empty_response(400)\n return\n\n content_len = int(self.headers.get('content-length', 0))\n\n # If content was provided, then parse it\n if content_len > 0:\n message = json.loads(self.rfile.read(content_len))\n else:\n self.write_empty_response(400)\n return\n\n helper.log_info(f'Incoming POST from {self.client_address[0]}: {message}')\n\n aspect_type = message['aspect_type']\n object_id = message['object_id']\n object_type = message['object_type']\n # make owner_id a str to avoid issues with athlete_checkpoint dict\n owner_id = str(message['owner_id'])\n\n athlete_checkpoint = helper.get_check_point(\"webhook_updates\") or {}\n\n # We only care about activity updates. New activities are pulled in automatically as strava_api input restarts.\n if aspect_type == 'update' and object_type == 'activity':\n if owner_id not in athlete_checkpoint:\n athlete_checkpoint[owner_id] = []\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n else:\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n helper.log_debug(f'webhooks_updates checkpoint: {helper.get_check_point(\"webhook_updates\")}')\n\n # Send data to Splunk\n data = json.dumps(message)\n event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)\n ew.write_event(event)\n\n # Strava API expects a 200 response\n self.write_empty_response(200)\n\n # Restart strava_api inputs to pull in the data unless it's a delete, as the input doesn't do anything with that anyway.\n if aspect_type != 'delete':\n self.restart_input('strava_api', self.SESSION_KEY)\n helper.log_info(f'Reloading Strava API input to retrieve updated activity {object_id} for athlete {owner_id}.')\n\n except Exception as ex:\n helper.log_error(f'Something went wrong in handle request: {ex}')", "def collect_events(helper, ew): # pylint: disable=no-self-argument,invalid-name,too-many-statements\n\n class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):\n \"\"\"Handles incoming requests from the browser\"\"\"\n\n SESSION_KEY = helper.context_meta['session_key']\n SSL_VERIFY = False\n\n def handle_request(self):\n \"\"\"Parses incoming POST, saves as checkpoint and sends data to Splunk\"\"\"\n try:\n content_type = self.headers.get('content-type')\n\n if content_type != 'application/json':\n self.write_empty_response(400)\n return\n\n content_len = int(self.headers.get('content-length', 0))\n\n # If content was provided, then parse it\n if content_len > 0:\n message = json.loads(self.rfile.read(content_len))\n else:\n self.write_empty_response(400)\n return\n\n helper.log_info(f'Incoming POST from {self.client_address[0]}: {message}')\n\n aspect_type = message['aspect_type']\n object_id = message['object_id']\n object_type = message['object_type']\n # make owner_id a str to avoid issues with athlete_checkpoint dict\n owner_id = str(message['owner_id'])\n\n athlete_checkpoint = helper.get_check_point(\"webhook_updates\") or {}\n\n # We only care about activity updates. New activities are pulled in automatically as strava_api input restarts.\n if aspect_type == 'update' and object_type == 'activity':\n if owner_id not in athlete_checkpoint:\n athlete_checkpoint[owner_id] = []\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n else:\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n helper.log_debug(f'webhooks_updates checkpoint: {helper.get_check_point(\"webhook_updates\")}')\n\n # Send data to Splunk\n data = json.dumps(message)\n event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)\n ew.write_event(event)\n\n # Strava API expects a 200 response\n self.write_empty_response(200)\n\n # Restart strava_api inputs to pull in the data unless it's a delete, as the input doesn't do anything with that anyway.\n if aspect_type != 'delete':\n self.restart_input('strava_api', self.SESSION_KEY)\n helper.log_info(f'Reloading Strava API input to retrieve updated activity {object_id} for athlete {owner_id}.')\n\n except Exception as ex:\n helper.log_error(f'Something went wrong in handle request: {ex}')\n\n def do_GET(self): # pylint: disable=invalid-name\n \"\"\"Responds to incoming GET request from Strava with challenge token\"\"\"\n parsed_url = urlparse(self.path)\n parsed_query = parse_qs(parsed_url.query)\n\n helper.log_info(f'Incoming request from {self.client_address[0]} - {self.path}')\n\n # Strava webhook expects a reply with the hub.challenge parameter\n challenge = parsed_query['hub.challenge'][0]\n request_verify_token = parsed_query['hub.verify_token'][0]\n\n # Respond with hub.challenge parameter if verify_token is correct\n if request_verify_token == verify_token:\n self.write_response(200, {\"hub.challenge\": challenge})\n else:\n self.write_empty_response(400)\n\n def do_POST(self): # pylint: disable=invalid-name\n \"\"\"Used for incoming POST request\"\"\"\n self.handle_request()\n\n def restart_input(self, modinput, session_key):\n \"\"\"Restarts modinput, used to trigger the Strava Activities input to pull in update.\"\"\"\n rest_url = f'https://localhost:8089/services/data/inputs/{modinput}/_reload'\n headers = {'Authorization': f'Splunk {session_key}'}\n\n response = requests.get(rest_url, headers=headers, verify=self.SSL_VERIFY)\n try:\n response.raise_for_status()\n except Exception as ex:\n helper.log_error(f'Something went wrong in input function: {ex}')\n\n def write_response(self, status_code, json_body):\n \"\"\"Craft response header with status code and json_body\"\"\"\n self.send_response(status_code)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n self.write_json(json_body)\n\n def write_empty_response(self, status_code):\n \"\"\"Craft empty response with status code.\"\"\"\n self.send_response(status_code)\n self.end_headers()\n\n def write_json(self, json_dict):\n \"\"\"Write json_dict to string and encode it.\"\"\"\n content = json.dumps(json_dict)\n\n if isinstance(content, unicode):\n content = content.encode('utf-8')\n\n self.wfile.write(content)\n\n def create_webhook(client_id, client_secret, verify_token, callback_url):\n \"\"\"Creates webhook, raises error if one already exists\"\"\"\n url = 'https://www.strava.com/api/v3/push_subscriptions'\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'verify_token': verify_token,\n 'callback_url': callback_url}\n response = helper.send_http_request(url, \"POST\", payload=payload, use_proxy=False)\n\n try:\n response.raise_for_status()\n except Exception:\n if 'already exists' in response.text:\n webhook_details = get_webhook(client_id, client_secret)\n helper.log_info(webhook_details)\n if 'GET to callback URL does not return 200' in response.text:\n helper.log_error(f'Error: Strava can\\'t reach {callback_url}')\n if 'not verifiable' in response.text:\n helper.log_error(f'Error: Strava can\\'t verify {callback_url}. URL incorrect or server not using public CA certificate.')\n else:\n helper.log_error(f'{response.status_code} Error: {response.text}')\n else:\n response = response.json()\n helper.log_info(f\"Webhook created successfully: ID {response['id']}\")\n\n def get_webhook(client_id, client_secret):\n \"\"\"Gets webhook details\"\"\"\n url = 'https://www.strava.com/api/v3/push_subscriptions'\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret}\n response = helper.send_http_request(url, \"GET\", payload=payload, use_proxy=False)\n\n try:\n response.raise_for_status()\n except Exception as ex:\n helper.log_error(f'Something went wrong: {ex}')\n return False\n else:\n return response.json()\n\n # Get global arguments\n port = int(helper.get_arg('port'))\n verify_token = helper.get_arg('verify_token')\n cert_file = helper.get_arg('cert_file')\n callback_url = helper.get_arg('callback_url')\n key_file = helper.get_arg('key_file')\n client_id = helper.get_global_setting('client_id')\n client_secret = helper.get_global_setting('client_secret')\n\n # Setup HTTP Server instance\n try:\n httpd = HTTPServer(('', port), SimpleHTTPRequestHandler)\n sslctx = ssl.SSLContext()\n sslctx.check_hostname = False\n sslctx.load_cert_chain(certfile=cert_file, keyfile=key_file)\n httpd.socket = sslctx.wrap_socket(httpd.socket, server_side=True)\n except Exception as err:\n helper.log_error(err)\n raise\n\n helper.log_info(f'Starting HTTPS web server on port {port}.')\n thread = Thread(target=httpd.serve_forever)\n thread.start()\n\n # Get webhook details. If it doesn't exist, create it.\n get_webhook = get_webhook(client_id, client_secret)\n if get_webhook:\n helper.log_info(f'Existing webhook: {get_webhook}')\n else:\n create_webhook(client_id, client_secret, verify_token, callback_url)", "def filter_data(self, json_data):\n\n\t\tdata = json_data['data']\n\t\tlocal_time_convertor = time_convertor.TimeConvertor()\n\n\n\t\tfor event_data in data:\n\t\t\t# go through each event and save data\n\n\t\t\t# first need to get data for all avalible sites\n\t\t\tevent_h2h_odds = []\n\t\t\tevent_site_names = []\n\t\t\tfor i, sites_data in enumerate(event_data['sites']):\n\t\t\t\tif len(sites_data['odds']['h2h']) > 2:\n\t\t\t\t\t# if more the 3 odds values (draw odds given) only take win loss odds\n\t\t\t\t\tevent_h2h_odds.append([sites_data['odds']['h2h'][0], sites_data['odds']['h2h'][1]])\n\t\t\t\telse:\n\t\t\t\t\tevent_h2h_odds.append(sites_data['odds']['h2h'])\n\t\t\t\tevent_site_names.append(sites_data['site_nice'])\n\t\t\t\n\t\t\t# append event data\n\t\t\tself.teams.append(event_data['teams'])\n\t\t\tself.h2h_odds.append(event_h2h_odds)\n\t\t\tself.betting_sites.append(event_site_names)\n\n\t\t\tlocal_time_convertor.convert_to_AEST(event_data['commence_time'])\n\t\t\tself.start_time['string format'].append(local_time_convertor.local_time_string)\n\t\t\tself.start_time['datetime format'].append(local_time_convertor.local_time)\n\n\t\t# debug helper code\n\t\t# print(self.teams)\n\t\t# print(self.betting_sites)\n\t\t# print(self.h2h_odds)", "def handle_my_custom_event(json, methods=['GET', 'POST']):\n data = dict(json)\n if \"name\" in data:\n db = DataBase()\n db.save_message(data[\"name\"], data[\"message\"])\n\n socketio.emit('message response', json)", "def logjson(self, data):\n\n if not self.enabled:\n return\n\n dumps = json.dumps(\n data,\n indent=4,\n default=lambda obj: str(type(obj)),\n ensure_ascii=False\n )\n\n try:\n print(f\"```json\\n{dumps}\\n```\", file=self.fp)\n except UnicodeEncodeError:\n print(\"(encoding error occured here.)\", file=self.fp)", "def receive(self):\n\t\tjsoned_data = self.ser.readline()\n\t\tif not jsoned_data:\n\t\t\treturn None\n\t\ttry:\n\t\t\tunjsoned_data = json.loads(jsoned_data)\n\t\t\treturn unjsoned_data\n\t\texcept ValueError as err:\n\t\t\tself.logger.warn('ValueError: {}'.format(err))\n\t\t\tself.logger.warn('received: {}'.format(jsoned_data))\n\t\t\treturn None\t\t\t\t\t\n\t\t\n\t\t#TESTING STUFF\n\t\t#return {\"x\":50,\"y\":50,\"z\":50,\"lat\":33.5,\"lon\":87.5,\"temp\":25,\"time\":datetime.datetime.now()}\n\t\t#return \"This is your string\"", "def _get_events_from_json_output(json_events, event_labels=None):\n result = []\n for json_object in json_events:\n if json_object:\n try:\n event_dict = json.loads(json_object)\n except ValueError as err:\n logger.info(\n \"Failed to parse event log line; skipping. Err: {!r}\".format(err))\n continue\n if event_labels is not None:\n filtered_dict = {\n \"system_timestamp\": _get_datetime(event_dict[\"system_timestamp\"]),\n \"raw_log_line\": event_dict[\"raw_log_line\"]\n }\n for event_label in event_labels:\n if event_label in event_dict:\n filtered_dict[event_label] = event_dict[event_label]\n result.append(filtered_dict)\n else:\n event_dict[\"system_timestamp\"] = _get_datetime(\n event_dict[\"system_timestamp\"])\n result.append(event_dict)\n return result", "def on_data(self, data):\n\n t = json.loads(data)\n\n\n if 'extended_tweet' in t:\n text = t['extended_tweet']['full_text']\n else:\n text = t['text']\n\n\n is_tweet_reply = t['in_reply_to_status_id'] == None\n is_quote = t['is_quote_status'] == False\n\n if 'RT' not in t['text'] and is_tweet_reply and is_quote:\n\n tweet = {'text': text, 'username' : t['user']['screen_name'],\n 'number_of_followers' : t['user']['followers_count'],\n 'location' : t['user']['location'], 'number_of_friends' : t['user']['friends_count'], 'retweet_count' :\n t['retweet_count']}\n\n\n logging.critical('\\n\\n\\nNEW TWEET INCOMING: ' + tweet['text']) \n \n \n load_tweet_into_mongo(tweet)\n logging.critical('\\n\\n\\nSUCCESSFULLY DUMPED INTO MONGO!')", "def main(event, context):\n\n logger.info(f\"Event data is: {event}\")\n try:\n # Incoming event is already byte encoded\n client.append_message(stream_name=\"LocalDataStream\", data=event)\n except Exception as e:\n logger.error(f\"Error appending: {e}\")\n return", "def event_push_datapackage():\n key = request.headers.get('key')\n if not key or key != current_app.config['SECRET_API']:\n return jsonify(status='Error', errors=['Invalid API key'])\n data = request.get_json(force=True)\n results = import_event_package(data)\n if 'errors' in results:\n return jsonify(status='Error', errors=results['errors'])\n return jsonify(status='Complete', results=results)", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def post_read():\n mangos = flask_wms.read_data() # from flask_wms\n return jsonify(mangos)", "def getDataParsed():\r\n serialConsole.flush()\r\n rawData = serialConsole.readline().decode(\"utf-8\").rstrip()\r\n parsedJson = json.loads(rawData)\r\n return parsedJson", "def run(self):\n # read inputs\n indent = int(self.tcex.playbook.read(self.args.indent))\n byte_json_data = self.tcex.playbook.read(self.args.json_data)\n\n json_string = byte_json_data.decode()\n json_data = json.loads(json_string)\n\n try:\n # 1. each json_data['alerts'] is an identifier\n for alerts in json_data['alerts']:\n # 2. for each, 'items', add key:identifier name,\n identifier_name = alerts.get(\"name\") \n for item in alerts.items():\n for item in alerts['items']:\n item['source_identifier'] = identifier_name\n self.all_items.append({'key': item['id'], 'value': item})\n\n except Exception:\n self.tcex.exit(1, 'Failed parsing JSON data.')\n\n # set the App exit message\n self.exit_message = 'Firework Alert Ingested.'", "def _read_data(self) -> None:\n raw_data = self.__mmap[:].decode('ascii').rstrip('\\0')\n self.__data = json.loads(raw_data)", "def json_events(request):\n if request.method == 'GET':\n ttrss_url = request.GET['feed']\n\n # need xml for this. \n university_url = 'http://events.uchicago.edu/widgets/rss.php?key=47866f880d62a4f4517a44381f4a990d&id=48'\n\n n = datetime.datetime.now()\n return JsonResponse(\n {\n 'events': flatten_events(get_events(university_url, ttrss_url, n, n + relativedelta(years=1), False))\n }\n )", "def json_to_stream(data):\n json_stream = JsonStream()\n json.dump(data, json_stream)\n return json_stream", "async def emit(self, data):\n if type(data) is not str:\n serialized_data = json.dumps(data)\n else:\n serialized_data = data\n try:\n self.write(f\"data: {serialized_data}\\n\\n\")\n await self.flush()\n except StreamClosedError:\n app_log.warning(\"Stream closed while handling %s\", self.request.uri)\n # raise Finish to halt the handler\n raise Finish()", "def get_raw_data():\n params = request.args\n result = None\n\n def set_result(x):\n nonlocal result # This is ugly, ew, gotta fix this\n result = x\n\n pipeline_zoo.get_json_from_tweets(set_result).feed_data((params, None))\n return jsonify(result)", "async def run_collector(url: str, session: ClientSession):\n try:\n response = await get_records_from_api(url, session)\n event_data = json.dumps(response[0], ensure_ascii=False)\n log.info(f'Record to stream: {event_data}')\n return event_data\n except Exception as err:\n log.info('Unable to proceed: Error: ', err)\n raise err", "def on_data(self, data):\n try:\n # parse as json\n raw_data = json.loads(data)\n\n # extract the relevant data\n if \"text\" in raw_data:\n user = raw_data[\"user\"][\"screen_name\"]\n created_at = parser.parse(raw_data[\"created_at\"])\n tweet = raw_data[\"text\"]\n retweet_count = raw_data[\"retweet_count\"]\n id_str = raw_data[\"id_str\"]\n\n # insert data just collected into MySQL my_database\n populate_table(user, created_at, tweet, retweet_count, id_str)\n print(f\"Tweet colleted at: {created_at}\")\n\n except Error as e:\n print(e)", "def ingest(self):\n datetime_retrieved = datetime.now()\n prefix = self.prefix_template.format(**self.feed, year=datetime_retrieved.strftime('%Y'), month=datetime_retrieved.strftime('%m'))\n fp = self.generate_fp(\n template='{feedname}_{datetime_retrieved}',\n feedname=self.feed['feedname'],\n datetime_retrieved=datetime_retrieved\n )\n\n url_to_request = self.url_dict[(self.feed['state'],self.feed['feedname'])]\n try:\n r = requests.get(url_to_request)\n if r.status_code == 200:\n data_to_write = r.content\n self.s3helper.write_bytes(data_to_write, self.bucket, key=prefix+fp)\n self.print_func('Raw data ingested from {} to {} at {} UTC'.format(url_to_request, prefix+fp, datetime_retrieved))\n else:\n self.print_func('Received status code {} from {} feed.'.format(r.status_code,self.feed['feedname']))\n self.print_func('Skip triggering ingestion of {} to sandbox.'.format(self.feed['feedname']))\n self.print_func('Skip triggering ingestion of {} to Socrata.'.format(self.feed['feedname']))\n return\n except BaseException as e:\n data_to_write = f'The feed at {datetime_retrieved.isoformat()}.'.encode('utf-8')\n fp += '__FEED_NOT_RETRIEVED'\n self.s3helper.write_bytes(data_to_write, self.bucket, key=prefix+fp)\n self.print_func('We could not ingest data from {} at {} UTC'.format(url_to_request, datetime_retrieved))\n raise e\n\n # trigger semi-parse ingest\n if self.feed['pipedtosandbox'] == True:\n self.print_func('Trigger {} for {}'.format(self.lambda_to_trigger, self.feed['feedname']))\n lambda_client = self.s3helper.session.client('lambda')\n data_to_send = {'feed': self.feed, 'bucket': self.bucket, 'key': prefix+fp}\n response = lambda_client.invoke(\n FunctionName=self.lambda_to_trigger,\n InvocationType='Event',\n LogType='Tail',\n ClientContext='',\n Payload=json.dumps(data_to_send).encode('utf-8')\n )\n self.print_func(response)\n else:\n self.print_func('Skip triggering ingestion of {} to sandbox.'.format(self.feed['feedname']))\n\n # trigger ingest to socrata\n if self.feed['pipedtosocrata'] == True:\n self.print_func('Trigger {} for {}'.format(self.socrata_lambda_to_trigger, self.feed['feedname']))\n lambda_client = self.s3helper.session.client('lambda')\n data_to_send = {'feed': self.feed, 'bucket': self.bucket, 'key': prefix+fp}\n response = lambda_client.invoke(\n FunctionName=self.socrata_lambda_to_trigger,\n InvocationType='Event',\n LogType='Tail',\n ClientContext='',\n Payload=json.dumps(data_to_send).encode('utf-8')\n )\n self.print_func(response)\n else:\n self.print_func('Skip triggering ingestion of {} to Socrata.'.format(self.feed['feedname']))", "def event_json_to_csv(self, outfileName, data):\n event_raw = data.split('\\n')\n try:\n result = '\\nAPI ERROR! - ' + json.loads(event_raw[0])['error'] + '\\n'\n print result\n return\n except KeyError:\n pass\n\n '''remove the lost line, which is a newline'''\n event_raw.pop()\n\n event_list = []\n jsonfile = outfileName[:-4] + '.json'\n with open(jsonfile,'w') as j:\n j.write('[')\n i = 0\n event_count = len(event_raw)\n for event in event_raw:\n j.write(event)\n i += 1\n if i != event_count:\n j.write(',')\n else:\n j.write(']')\n event_json = json.loads(event)\n event_list.append(event_json)\n print 'JSON saved to ' + j.name\n j.close()\n\n subkeys = get_sub_keys(event_list)\n\n #open the file\n f = open(outfileName, 'w')\n writer = UnicodeWriter(f)\n\n #write the file header\n f.write(codecs.BOM_UTF8)\n\n #writer the top row\n header = [u'event']\n for key in subkeys:\n header.append(key)\n writer.writerow(header)\n\n #write all the data rows\n for event in event_list:\n line = []\n #get the event name\n try:\n line.append(event[u'event'])\n except KeyError:\n line.append(\"\")\n #get each property value\n for subkey in subkeys:\n try:\n line.append(unicode(event[u'properties'][subkey]))\n except KeyError:\n line.append(\"\")\n #write the line\n writer.writerow(line)\n\n print 'CSV saved to ' + f.name\n f.close()", "def get_data(self, start, end):\n data = self.r.lrange('stream', start, end)\n for i in range(len(data)):\n data[i] = data[i].decode()\n data[i] = json.loads(data[i])\n return data", "def segment2datadog(source):\n print(f\"Received request on /api/{source}\")\n\n signature = request.headers.get(\"x-signature\", \"\")\n\n if not check_signature(signature=signature, data=request.data):\n abort(403, \"Signature not valid.\")\n\n content = request.get_json()\n event_type = content[\"type\"]\n\n if event_type not in ALLOWED_EVENTS:\n return jsonify({\"source\": source, \"data\": content})\n\n event = content[\"event\"]\n emit(source=source, event=event, event_type=event_type)\n\n return jsonify({\"source\": source, \"data\": content})", "def sendjson(self, data):\n\n import json\n\n self.sendraw(json.dumps(data))", "def on_data(self,data):\n\n try:\n raw_data = json.loads(data)\n\n if 'text' in raw_data:\n\n created_at = raw_data['created_at']\n username = raw_data['user']['screen_name']\n location = raw_data['user']['location']\n followers_count = raw_data['user']['followers_count']\n tweet_id = raw_data['id']\n\n if 'extended_tweet' in raw_data:\n tweet = raw_data['extended_tweet']['full_text']\n else:\n tweet = raw_data['text']\n\n connect(created_at, username, tweet, location, followers_count, tweet_id)\n print(f'Tweet collected at: {str(created_at)}')\n\n except Error as e:\n print(e)", "def post_events(ruleset_name):\n message = json.loads(request.stream.read().decode('utf-8'))\n result = host.post(ruleset_name, message)\n return jsonify(result)", "def datagramReceived(self, data):\n try:\n obj = json.loads(data)\n except ValueError, e:\n log.err(e, 'Invalid JSON in stream: %r' % data)\n return\n\n if u'text' in obj:\n obj = Status.fromDict(obj)\n else:\n log.msg('Unsupported object %r' % obj)\n return\n\n self.callback(obj)", "def write_to_splunk(**kwargs):\n event = helper.new_event(**kwargs)\n ew.write_event(event)", "def _serialize_event_data_as_json(event_data):\n return json.dumps(event_data)", "def storm():\n sourcetype = 'generic_single_line'\n source = 'webhook'\n\n post_data = flask.request.data\n if not post_data:\n post_data = flask.request.form.keys()[0]\n\n event_params = {\n 'event_text': post_data, 'sourcetype': sourcetype, 'source': source}\n\n return _send_log(event_params)", "def data_parser(data, write_date, write_name, write_id):\n parsed_events = []\n for event in data:\n # Initial values\n event_coordinates = (None, None)\n event_date = None\n event_name = None\n event_id = None\n parsed_data = {}\n\n # Retrieving latitude and longitude if possible\n if event.get(\"venue\"):\n event_coordinates = (event.get(\"venue\")[\"lat\"],\n event.get(\"venue\")[\"lon\"])\n if event_coordinates is (0, 0):\n event_coordinates = (None, None)\n\n # Retrieving date\n if write_date:\n event_date = event.get(\"time\")\n\n # Retrieving description\n if write_name:\n event_name = event.get(\"name\")\n # Remove semicolons, they would produce interpretation errors when\n # read from file\n event_name = event_name.replace(';', '')\n\n if write_id:\n event_id = event.get(\"id\")\n\n # Save data\n parsed_data[\"coordinates\"] = event_coordinates\n parsed_data[\"date\"] = event_date\n parsed_data[\"name\"] = event_name\n parsed_data[\"id\"] = event_id\n parsed_events.append(parsed_data)\n\n return parsed_events", "def callback(ch, method, properties, body):\n record = json.loads(body.decode()) # decode binary string to dict\n pprint(record)", "def update_events(request):\n events_data = request.data\n events_manager.deserialize_event(events_data)\n # print(events_manager.serialize_events())\n events_manager.apply()\n return JsonResponse({'nodes': []})", "def write_file (data):\n\n req_text= data.text\n json_parsed=json.loads(req_text)\n return json_parsed", "def write_file (data):\n\n req_text= data.text\n json_parsed=json.loads(req_text)\n return json_parsed", "def wrap_get_shift_data():\n json_input = request.get_json()\n\n if not json_input:\n return\n # Search releases by\n if \"ts_start\" in json_input and \"ts_end\" in json_input:\n user_id = None\n try:\n user_id = json_input[\"user_id\"]\n except KeyError:\n pass\n\n ts_start = json_input[\"ts_start\"]\n ts_end = json_input[\"ts_end\"]\n\n releases_list = get_monitoring_releases(ts_start=ts_start, ts_end=ts_end, user_id=user_id, exclude_routine=True)\n else:\n var_checker(\"NO SHIFTS\", \"no shifts\", True)\n\n if \"user_id\" in json_input:\n releases_data = group_by_date(releases_list)\n else:\n releases_data = group_by_alert(releases_list)\n\n return jsonify(releases_data)", "def handle_events(self, events):\n for event in events:\n event_type = event['type']\n if event_type == types.SO_CHANGE:\n for key in event['data']:\n self.data[key] = event['data'][key]\n self.on_change(key)\n\n elif event_type == types.SO_REMOVE:\n key = event['data']\n assert key in self.data, (key, self.data.keys())\n del self.data[key]\n self.on_delete(key)\n\n elif event_type == types.SO_SEND_MESSAGE:\n self.on_message(event['data'])\n else:\n assert False, event", "def insert(self, events):\r\n url = '{0}/{1}'.format(self.get_url(), 'events')\r\n\r\n return http.Request('POST', url, events), parsers.parse_json", "def testReadAndWriteSerializedEventSource(self):\n test_path_spec = fake_path_spec.FakePathSpec(location='/opt/plaso.txt')\n\n expected_event_source = event_sources.EventSource(path_spec=test_path_spec)\n\n json_string = (\n json_serializer.JSONAttributeContainerSerializer.WriteSerialized(\n expected_event_source))\n\n self.assertIsNotNone(json_string)\n\n event_source = (\n json_serializer.JSONAttributeContainerSerializer.ReadSerialized(\n json_string))\n\n self.assertIsNotNone(event_source)\n self.assertIsInstance(event_source, event_sources.EventSource)\n\n expected_event_source_dict = {\n 'path_spec': test_path_spec.comparable,\n }\n\n event_source_dict = event_source.CopyToDict()\n path_spec = event_source_dict.get('path_spec', None)\n if path_spec:\n event_source_dict['path_spec'] = path_spec.comparable\n\n self.assertEqual(\n sorted(event_source_dict.items()),\n sorted(expected_event_source_dict.items()))", "def load_json(path):\n events = []\n try:\n with open(path, 'r') as fd:\n data = fd.read()\n except IOError as e:\n print \"I/O error({0}): {1}\".format(e.errno, e.strerror)\n return []\n except:\n raise\n\n jsondata = json.loads(data)\n if 'events' not in jsondata:\n return []\n for e in jsondata['events']:\n event = Event(occasion=e['occasion'],\n invited_count=e['invited_count'],\n year=e['year'],\n month=e['month'],\n day=e['day'],\n cancelled=e['cancelled'] if 'cancelled' in e else False)\n events.append(event)\n \n return events", "def _publish(self, data):\n json_data = json.dumps(data)\n self._udp_socket.sendto(json_data, (self._hsflowd_addr, self._hsflowd_port))", "def recvjson(self):\n\n import json\n\n data = self.recvraw()\n return json.loads(data)", "def get_event():\n json_data = request.args or {}\n return make_response(jsonify({ \"data\" : Event.get_events(json_data)}))", "async def receive(request):\n data = await request.json()\n\n auth_header = request.headers.get('authorization')\n if auth_header:\n auth_parts = auth_header.split(' ')\n\n token = os.getenv('AUTH_TOKEN', None)\n if token:\n if token != auth_parts[1]:\n return web.Response(status=401)\n\n if 'data' in data.keys():\n db = data.get('db', None)\n for item in data.get('data'):\n lines = line_protocol.make_lines(item)\n resp = await save(lines, db_name=db)\n resp.close()\n return web.Response(status=201)\n\n points = data.get('points')\n if points:\n # convert ints to floats\n for idx, item in enumerate(points):\n fields = item.get('fields')\n if fields:\n for k, v in fields.items():\n if k != 'time':\n if isinstance(v, int):\n data['points'][idx]['fields'][k] = float(v)\n\n lines = line_protocol.make_lines(data)\n resp = await save(lines)\n resp.close()\n return web.Response(status=201)", "def parse_to_json(self, data):\n try:\n feed_format = self.feed['format']\n if type(data) == dict:\n out = data\n elif feed_format == 'xml':\n xmldict = xmltodict.parse(data)\n out = json.loads(json.dumps(xmldict))\n elif feed_format in ['json', 'geojson']:\n out = json.loads(data)\n else:\n out = data\n return out\n except BaseException as e:\n self.print_func('ERROR WITH FEED')\n self.print_func('FEED: {}'.format(self.feed))\n self.print_func('DATA: {}'.format(data))\n self.print_func(traceback.format_exc())\n raise e", "def on_data(self, data):\r\n try:\r\n\r\n global conn\r\n\r\n # load the tweet JSON, get pure text\r\n full_tweet = json.loads(data)\r\n tweet_text = full_tweet['text']\r\n\r\n # print the tweet plus a separator\r\n print (\"------------------------------------------\")\r\n print(tweet_text + '\\n')\r\n\r\n # send it to spark\r\n conn.send(str.encode(tweet_text + '\\n'))\r\n except:\r\n # handle errors\r\n e = sys.exc_info()[0]\r\n print(\"Error: %s\" % e)\r\n return True", "def handle_datachan(bot, event):\n event.reply(event.chan.data.tojson())", "def write(self, record):\n # Make Splunk ready payload data and append it to self._buffers list.\n self._buffer.append({\n 'index': self._index,\n 'sourcetype': 'json',\n 'event': record\n })\n\n # If the records count in self._buffer is more than allowed by\n # self._buffer_size, send those records to Splunk.\n if len(self._buffer) >= self._buffer_size:\n self._flush()", "def events(self):\n for line_num, line in enumerate(self.file_handler):\n if not line:\n break\n # process line input to dictionary\n data = json.loads(line)\n # add id information\n data['id'] = line_num\n # update timestamp history\n timestamp = self._get_timestamp(data)\n self.last_two_timestamps = [self.last_two_timestamps[-1], timestamp]\n self.event_timestamps[line_num] = timestamp\n\n self.alarms.append(0) # add field for alarms\n self.users.append(data['user']) # add field for user\n self.anomalies.append(data.get('is_anomaly', 0)) # add field for anomalies\n if 'is_anomaly' in data:\n del data['is_anomaly'] # remove anomaly information from data for contestants\n\n # return line id and serialized JSON as string representing one event\n str_dump = json.dumps(data)\n logger.info(self._get_inner_time() + ' > ' + str_dump)\n yield line_num, str_dump", "def data_received(self, data):\n print('S> data received ['+str(len(data))+']: '+str(data))\n self.deserializer.append(data)\n if self.deserializer.ready():\n msg = self.deserializer.deserialize()\n status = TSDBStatus.OK # until proven otherwise.\n response = TSDBOp_Return(status, None) # until proven otherwise.\n try:\n op = TSDBOp.from_json(msg)\n except TypeError as e:\n print(e)\n response = TSDBOp_Return(TSDBStatus.INVALID_OPERATION, None)\n if status is TSDBStatus.OK:\n if isinstance(op, TSDBOp_InsertTS):\n response = self._insert_ts(op)\n elif isinstance(op, TSDBOp_UpsertMeta):\n response = self._upsert_meta(op)\n elif isinstance(op, TSDBOp_Select):\n response = self._select(op)\n elif isinstance(op, TSDBOp_AugmentedSelect):\n response = self._augmented_select(op)\n elif isinstance(op, TSDBOp_AddTrigger):\n response = self._add_trigger(op)\n elif isinstance(op, TSDBOp_RemoveTrigger):\n response = self._remove_trigger(op)\n elif isinstance(op, TSDBOp_DeleteTS):\n print('running delete')\n response = self._delete_ts(op)\n else:\n response = TSDBOp_Return(TSDBStatus.UNKNOWN_ERROR,\n op['op'])\n\n self.conn.write(serialize(response.to_json()))\n # print(\"close\")\n self.conn.close()", "def test_to_json(event_args):\n\n event = HealthEvent()\n event.populate(**event_args)\n now = datetime.utcnow()\n event.metric_data[0][\"Timestamp\"] = now\n json_event = event.to_json()\n event_dictionary = json.loads(json_event)\n\n assert event_dictionary[\"Source\"] == event_args.get(\"source\")\n assert event_dictionary[\"ComponentType\"] == event_args.get(\"component_type\")\n assert event_dictionary[\"EventType\"] == event_args.get(\"event_type\")\n assert event_dictionary[\"Environment\"] == event_args.get(\"environment\")\n assert event_dictionary[\"Service\"] == event_args.get(\"service\")\n assert event_dictionary[\"Healthy\"] == event_args.get(\"healthy\")\n assert event_dictionary[\"Resource\"][\"Name\"] == event_args.get(\"resource_name\")\n # this assert also checks that field_1 in the sourcedata is still snake case\n assert event_dictionary[\"SourceData\"] == event_args.get(\"source_data\")\n\n timestamp = event_dictionary.get(\"MetricData\")[0][\"Timestamp\"]\n assert datetime.strptime(timestamp, \"%Y-%m-%d %H:%M:%S.%f\") == now", "def ingest_json_body(request):\n # log.debug(request.body)\n try:\n data = json.loads(str(request.body, encoding='utf-8'))\n except Exception as e:\n log.error(log.exc(e))\n return None\n return data", "def consume_raw_event(\n self, event_name: str, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject\n ) -> None:", "def on_message(self, message):\n obj = json_decode(message)\n self.writing_logs(obj)\n return", "def _json_probe(srcfile):\n return json.loads(__run(srcfile))", "def _load_json(self, kind, source, **kwargs):\n if source is None:\n raise exceptions.invalid_json_map[kind](f\"Cannot load {kind} - no data source specified.\")\n\n # Decode the json string and deserialize to objects.\n try:\n data = load_json(source, **kwargs)\n except FileNotFoundError as e:\n raise exceptions.file_not_found_map[kind](e)\n\n except jsonlib.decoder.JSONDecodeError as e:\n raise exceptions.invalid_json_map[kind](e)\n\n return data", "def _json(self, data):\n if len(data) == 0:\n return \"\"\n if self.meta:\n data['meta_history'] = [{'prog': __prog__,\n 'release': __release__,\n 'author': __author__,\n 'date': __now__},]\n return json.dumps(data) + \"\\n\"", "def _readin_JSON(file):\n\tdef object_decoder(obj):\n\t\t\"\"\"This function is used to properly load the JSON elements into the corresponding classes.\"\"\"\n\t\tif 'logfile' in obj:\n\t\t\treturn logfile(obj['logfile']['name'], obj['logfile']['lines'], obj['logfile']['type'], obj['logfile']['content'], obj['logfile']['sources'])\n\t\tif 'logfile_entry' in obj:\n\t\t\tif len(obj['logfile_entry']['timestamp']['datetime']) >= 20 :\n\t\t\t\tdate = datetime.datetime.strptime(obj['logfile_entry']['timestamp']['datetime'],\"%Y-%m-%dT%H:%M:%S.%f\")\n\t\t\telif obj['logfile_entry']['timestamp']['datetime'][-6:-5] != '+':\n\t\t\t\tdate = datetime.datetime.strptime(obj['logfile_entry']['timestamp']['datetime'],\"%Y-%m-%dT%H:%M:%S\")\n\t\t\telse:\n\t\t\t\tunformatted_date = obj['logfile_entry']['timestamp']['datetime']\n\t\t\t\tunformatted_date = unformatted_date[:-3]+unformatted_date[-2:]\n\t\t\t\t# once again, related to missing features in Python 3.6\n\t\t\t\tdate = datetime.datetime.strptime(unformatted_date,\"%Y-%m-%dT%H:%M:%S.%f%z\")\n\t\t\treturn logfile_entry(obj['logfile_entry']['id'], file, obj['logfile_entry']['message'], obj['logfile_entry']['structured_data'], date,obj['logfile_entry']['hostname'],obj['logfile_entry']['source'])\n\t\treturn obj\n\n\tfp = open(file,'r')\n\tlf = json.load(fp, object_hook=object_decoder)\n\tfp.close()\n\treturn lf", "def save_vuln_json(self, data):\n self.helper.store_json_content(data, \"snyk-feed/vulnerability-data.json\")", "def write(self):\n self.json_o.write()", "def read_handler(socket, buf):\n while True:\n message = socket.recv(BUFFER_SIZE)\n if not message:\n break\n logging.debug(\"receiving data : %s\", message)\n\n try:\n message = json.loads(message)\n\n # handle callback functions\n if recv_data_callback is not None:\n event = message[\"event\"]\n data = message[\"data\"]\n recv_data_callback(event, data)\n except ValueError:\n logging.error(\"message must be json serialized\")\n\n buf.appendleft(message)\n socket.close()", "def webhook_process_json(user, application, json_dict, init_es, tool, scan_name, user_host, to_name,hook_log):\n hook_log = WebhookLog.objects.get(id=hook_log)\n hook_log.file_upload_event = True\n hook_log.file_upload_datetime = timezone.now()\n hook_log.save()\n process_json(user, application, json_dict, init_es, tool, scan_name, user_host, to_name,hook_log=hook_log)\n info_debug_log(event='Webhooks - json process',status='success')", "async def write_json(self, data) -> None:\n print(f\"Sending: {data}\")\n await self.write(json.dumps(data, separators=(\",\", \":\")))", "def process_raw_trace(raw_trace):\n trace = trace_events_pb2.Trace()\n trace.ParseFromString(raw_trace)\n return ''.join(trace_events_json.TraceEventsJsonStream(trace))", "def post(self):\n response = \"Null\"\n\n data = request.json\n self.logger.info(\"########## Events API Called\")\n self.logger.info(data)\n\n if data.get('type') == \"INSERT\":\n name = data.get('name')\n date = data.get('event_date')\n event_type = data.get('event_type')\n location = data.get('event_location')\n\n print(name + \" \" + date + \" \" + event_type + \" \" + location + \" \\n\\n\\n\\n\")\n\n if name is None or date is None or event_type is None or location is None:\n response = \"Couldnt perfomr action: Missing data\"\n else:\n response = EVENTS.insert_event(name, date, event_type, location)\n\n return jsonify({\n 'events': response\n }), 201", "def save_json(data):\n data = json.dumps(data)\n\n with MEDTADATA_FILE.open('w') as outfile:\n outfile.write(data)", "def _json_decoder_hook(obj):\n if \"starttime\" in obj:\n obj[\"starttime\"] = datetime.strptime(obj[\"starttime\"], \"%Y-%m-%dT%H:%M:%SZ\")\n if \"endtime\" in obj:\n obj[\"endtime\"] = datetime.strptime(obj[\"endtime\"], \"%Y-%m-%dT%H:%M:%SZ\")\n return obj", "def on_data(self, data):\n status = json.loads(data)\n # increase the counter\n self.counter += 1\n\n retweet, rt_user, tweet_text, created_time = organize_tweet(status) \n\n if status['user']['id_str'] in infos.twitterids:\n\n who = status['user']['id_str']\n\n try:\n replied_to = status['in_reply_to_screen_name']\n except:\n replied_to = 'NULL'\n \n else:\n \n who = status['user']['screen_name']\n \n try:\n replied_to = infos.twitterids[status['in_reply_to_user_id_str']]\n except:\n replied_to = 'NULL'\n \n tweet = {\n \n 'id': status['user']['id_str'], #status.user.id_str,\n 'who': who,\n 'replied_to': replied_to,\n 'retweeted': retweet, #status['retweeted'], #status.retweeted,\n 'retweeted_from': rt_user,\n 'text': tweet_text,\n 'timestamp' : created_time\n }\n\n #write to mongoDB here\n collection.insert_one(tweet)\n print(f'New tweet arrived: {tweet[\"text\"]}')\n\n\n # check if we have enough tweets collected\n if self.max_tweets == self.counter:\n # reset the counter\n self.counter=0\n # return False to stop the listener\n return False", "def on_data(self, data):\n\n t = json.loads(data) \n tweet = {\n 'text': t['text'],\n 'username': t['user']['screen_name'],\n 'followers_count': t['user']['followers_count']\n }\n\n logging.critical(f'\\n\\n\\nTWEET INCOMING: {tweet[\"text\"]}\\n\\n\\n')\n tweet_collection.insert({'username' : tweet['username'],'followers_count' : tweet['followers_count'], 'text' : tweet['text']})", "def test_streamBufferedEvents(self):\n events = (\n dict(eventID=u\"1\", eventText=u\"A\"),\n dict(eventID=u\"2\", eventText=u\"B\"),\n dict(eventID=u\"3\", eventText=u\"C\"),\n dict(eventID=u\"4\", eventText=u\"D\"),\n )\n\n resource = self.eventSourceResource()\n resource.addEvents(events)\n\n response = self.render(resource)\n\n # Each result from read() is another event\n for i in range(len(events)):\n result = yield response.stream.read()\n self.assertEquals(\n result,\n textAsEvent(\n text=events[i][\"eventText\"],\n eventID=events[i][\"eventID\"]\n )\n )", "def post(self):\n required_keys = [\"event_name\", \"timestamp\"]\n\n verify_log_request(request, required_keys)\n\n args = request.json\n\n # The event log API should enforce the player_id to the current player, unless\n # the user has role \"service\" in which case it should only set the player_id if\n # it's not passed in the event.\n player_id = current_user[\"player_id\"]\n is_service = \"service\" in current_user[\"roles\"]\n\n for event in args:\n if is_service:\n event.setdefault(\"player_id\", player_id)\n else:\n event[\"player_id\"] = player_id # Always override!\n eventlogger.info(\"eventlog\", extra={\"extra\": event})\n\n if request.headers.get(\"Accept\") == \"application/json\":\n return jsonify(status=\"OK\"), http_client.CREATED\n else:\n return \"OK\", http_client.CREATED", "def get():\n return jsonify({'events': 'Events API'}), 200", "async def server_event_trigger(self, event):\n event_data = event[\"event_data\"]\n await self.send_json(event_data)", "def get_json_data(request):\n\n # First we need to write request logs\n record_logs(request)\n if request.method == \"GET\":\n return json.loads(request.GET[\"data\"])\n else:\n return json.loads(request.POST[\"data\"])", "def json_dump(data):\n if OLD_SUGAR_SYSTEM is True:\n return json.write(data)\n else:\n _io = StringIO()\n jdump(data, _io)\n return _io.getvalue()", "def peek_astarte(test_cfg: TestCfg):\n server_data = {}\n cprint(\"\\nReading data stored on the server.\", color=\"cyan\", flush=True)\n json_res = get_server_interface(test_cfg, test_cfg.interface_device_prop)\n server_data[test_cfg.interface_device_prop] = json_res.get(\"data\", {}).get(\"sensor-id\", {})\n parse_received_data(server_data[test_cfg.interface_device_prop])\n\n json_res = get_server_interface(test_cfg, test_cfg.interface_server_prop)\n server_data[test_cfg.interface_server_prop] = json_res.get(\"data\", {}).get(\"sensor-id\", {})\n parse_received_data(server_data[test_cfg.interface_server_prop])\n return server_data", "def getLatestData(self):\n jsonText = self.session.get(self.jsonURL).text\n\n # Somehow, the output I am getting has some garbage at the beginning.\n # So, skipping all text before first instance of \"{\".\n jsonText = jsonText[jsonText.find(\"{\"):]\n latestData = json.loads(jsonText)\n return latestData", "def process_data(self, data):\n # Decode the incoming data.\n try:\n message = json.loads(data.decode('utf-8'))\n except ValueError:\n self.disconnect(\"Bad response received\")\n logger.warning(\"Cannot parse incoming message, discarding.\")\n return\n\n self.process_incoming_response(message)", "def prepare_agent_input_data(event, context):\n print \"Preparing input data for the agent\"\n write_data_to_file(json.dumps(event), '/tmp/event.json')\n write_data_to_file('{ \"logStreamName\": \"%s\" }' % context.log_stream_name, '/tmp/context.json')", "def __init__(self):\n super(JsonTestEvent, self).__init__()\n self.timestamp = timelib_test.CopyStringToTimestamp(\n '2012-06-27 18:17:01+00:00')\n self.hostname = u'ubuntu'\n self.display_name = u'OS: /var/log/syslog.1'\n self.inode = 12345678\n self.text = (\n u'Reporter <CRON> PID: |8442| (pam_unix(cron:session): session\\n '\n u'closed for user root)')\n self.username = u'root'\n\n os_path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_OS, location=u'/cases/image.dd')\n self.pathspec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_TSK, inode=15, location=u'/var/log/syslog.1',\n parent=os_path_spec)", "def process(self, snyk_data):\n json_data = self.get_vuln_data()\n json_data = self.extract_info(snyk_data, json_data)\n self.save_vuln_json(json_data)", "def _rest_call(self, data, action):\n path = '/wm/staticflowentrypusher/json'\n headers = {\n 'Content-type': 'application/json',\n 'Accept': 'application/json',\n }\n body = json.dumps(data)\n conn = httplib.HTTPConnection(self.host, self.port)\n conn.request(action, path, body, headers)\n response = conn.getresponse()\n ret = (response.status, response.reason, response.read())\n conn.close()\n return ret", "def lambda_handler(event, context):\n # EOL char append function\n encode_data = lambda x: \"{data}{eol}\".format(data=json.dumps(x), eol=chr(10)).encode(\"UTF-8\")\n \n # Punk API call\n try:\n logger.debug(\"Requesting api: {api}\".format(api=os.environ[\"API_URL\"]))\n request = r.get(os.environ[\"API_URL\"])\n except Exception as e:\n logger.error(\"An error occured while requesting api: {api}\".format(api=os.environ[\"API_URL\"]))\n raise e\n \n # Send records to kinesis stream\n logger.debug(\"Sending data to stream: {stream}\".format(stream=os.environ[\"STREAM_NAME\"]))\n for data in request.json():\n client.put_record(\n StreamName=os.environ[\"STREAM_NAME\"],\n Data=encode_data(data),\n PartitionKey=\"key\"\n )\n\n return {\n 'statusCode': request.status_code,\n 'body': data\n }", "def updated_data():\n with open(UPDATED_JSON, 'r') as f:\n updated_data = json.load(f)\n return updated_data", "def runs_json(request, instrument, ipts): \n logger.debug(\"Fecthing runs as jsons for ipts = %s\"%ipts)\n info_dict = get_ipts_runs_as_json(instrument, ipts)\n response = HttpResponse(json.dumps(info_dict), content_type=\"application/json\")\n return response" ]
[ "0.5950303", "0.58063334", "0.5770824", "0.5747445", "0.56922185", "0.5671852", "0.5653861", "0.56337476", "0.5627374", "0.5618672", "0.553584", "0.55230635", "0.54872155", "0.5470855", "0.54696006", "0.5466001", "0.54565036", "0.5411228", "0.53876317", "0.53832644", "0.53710985", "0.5355568", "0.5348323", "0.5348323", "0.53339344", "0.5327865", "0.5311301", "0.5307277", "0.5304746", "0.5298316", "0.5297478", "0.5288468", "0.5286846", "0.5278388", "0.5271889", "0.5254496", "0.524362", "0.5240846", "0.52372205", "0.52302265", "0.52284163", "0.52236104", "0.52198786", "0.52022654", "0.5200515", "0.51758975", "0.51721793", "0.5170541", "0.5166759", "0.5166759", "0.5166297", "0.5149927", "0.5133078", "0.51318115", "0.51242226", "0.51228017", "0.51110584", "0.51094586", "0.5109113", "0.51089925", "0.5106254", "0.51061505", "0.50947976", "0.50803137", "0.5077654", "0.50740075", "0.506988", "0.5063749", "0.5047774", "0.504394", "0.5032386", "0.5016885", "0.5002358", "0.49987078", "0.4985952", "0.49696645", "0.49632025", "0.4962338", "0.4957381", "0.49571568", "0.49548316", "0.49542916", "0.4945556", "0.4943959", "0.49423552", "0.493791", "0.4937553", "0.49327457", "0.49295244", "0.4922996", "0.49159485", "0.49141753", "0.4906768", "0.49049413", "0.4902674", "0.49018365", "0.4898821", "0.48983243", "0.48965383", "0.4892766" ]
0.5168414
48
Gets JSON from URL and parses it for potential error messages.
def return_json(url, method, **kwargs): response = helper.send_http_request(url, method, use_proxy=False, **kwargs) try: response.raise_for_status() except requests.HTTPError as ex: # status code 429 means we hit Strava's API limit, wait till next 15 minute mark (+5 seconds) and try again if ex.response.status_code == 429: # Get the 15m/24h API limits for this user api_usage_15m = response.headers['X-RateLimit-Usage'].split(",")[0] api_usage_24h = response.headers['X-RateLimit-Usage'].split(",")[1] api_limit_15m = response.headers['X-RateLimit-Limit'].split(",")[0] api_limit_24h = response.headers['X-RateLimit-Limit'].split(",")[1] timestamp_now = int(time.time()) modulus_time = timestamp_now % 900 sleepy_time = 0 if modulus_time == 0 else (900 - modulus_time + 5) helper.log_warning(f'Strava API rate limit hit. Used {api_usage_15m}/15min (limit {api_limit_15m}), {api_usage_24h}/24h (limit {api_limit_24h}). Sleeping for {sleepy_time} seconds.') time.sleep(sleepy_time) response = return_json(url, method, **kwargs) helper.log_debug(f'429 detail: {response}') return response if ex.response.status_code in (400, 401): helper.log_error(f'{ex.response.status_code} Error: Strava API credentials invalid or session expired. Make sure Client ID & Client Secret have been added to the Configuration -> Add-On Parameters tab and your access code is valid.') sys.exit(1) if ex.response.status_code == 404: helper.log_warning(f'404 Error: no stream data for url {url}, can happen for manually added activities.') return False if ex.response.status_code == 500: helper.log_warning(f'500 Error: no data received from Strava API for url {url}, it might be corrupt or invalid. Skipping activity.') return False # In case there's any other error than the ones described above, log the error and exit. helper.log_error(f'Error: {ex}') sys.exit(1) # Must have been a 200 status code return response.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _process_url(self, url):\n response = requests.get(url, timeout=self.TIMEOUT)\n try:\n ret = response.json()\n except JSONDecodeError:\n self.log.exception(\"JSONDecodeError, response: %r, response.text: %r\", response, response.text)\n ret = {\"error\": \"The api broke.\"}\n return ret", "def _get_json(self, url):\n try:\n resp = urllib.urlopen(url)\n except:\n resp = urllib.request.urlopen(url)\n json_string = resp.read()\n parsed_json = json.loads(json_string.decode('utf-8'))\n return parsed_json", "def get_json(url):\n\n parsed_result = []\n try:\n response = urllib2.urlopen(url)\n result = response.read()\n response.close()\n if len(result) != 0:\n parsed_result = json.loads(result)\n except HTTPError as exc:\n print \"ERROR:\"\n print \" REST GET URL: %s\" % url\n # NOTE: exc.fp contains the object with the response payload\n error_payload = json.loads(exc.fp.read())\n print \" REST Error Code: %s\" % (error_payload['code'])\n print \" REST Error Summary: %s\" % (error_payload['summary'])\n print \" REST Error Description: %s\" % (error_payload['formattedDescription'])\n print \" HTTP Error Code: %s\" % exc.code\n print \" HTTP Error Reason: %s\" % exc.reason\n except URLError as exc:\n print \"ERROR:\"\n print \" REST GET URL: %s\" % url\n print \" URL Error Reason: %s\" % exc.reason\n return parsed_result", "def load_json(url):\n with urllib.request.urlopen(url) as u:\n data = json.loads(u.read().decode())\n return data", "def fetchJson(url):", "def load_json_url(url):\n response = urllib.request.urlopen(url).read()\n response_str = str(response, 'UTF-8')\n return json.loads(response_str)", "def get_json(url):\n f = urllib2.urlopen(url) #opens url\n response_text = f.read() #reads through url\n response_data = json.loads(response_text) #converts data to json\n results = response_data[\"results\"]\n return results", "def get_json(url):\n f = urllib.request.urlopen(url)\n response_text = f.read().decode('utf-8')\n response_data = json.loads(response_text)\n\n return response_data", "def get_json_from_url(url: str):\n return requests.get(url).json()", "def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)", "def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)", "def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)", "def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)", "def get(self, url):\n return json.loads(self.as_source.urlopen(url).read())", "def get_json(url):\n f = urllib.request.urlopen(url)\n response_text = f.read().decode('utf-8')\n response_data = json.loads(response_text)\n pprint(response_data)", "def get_json(self, url):\n if self.json is None:\n try:\n try:\n self.json = requests.get(url).json()\n except requests.exceptions.SSLError:\n LOGGER.warning(\"SSL error, using http instead of https (press ^C to abort)\")\n time.sleep(1)\n url = url.replace('https', 'http', 1)\n self.json = requests.get(url).json()\n except json.decoder.JSONDecodeError as e:\n LOGGER.error(\"Failed to decode JSON data in response from server.\")\n LOGGER.error(\"JSON error encountered: \" + str(e))\n LOGGER.error(\"This issue might be caused by server-side issues, or by to unusual activity in your \"\n \"network (as determined by CloudFlare). Please visit https://plugins.getnikola.com/ in \"\n \"a browser.\")\n sys.exit(2)\n\n return self.json", "def get_json(url):\n f = urllib.request.urlopen(url)\n response_text = f.read().decode('utf-8')\n response_data = json.loads(response_text)\n # pprint(response_data)\n return response_data", "def getResponse( self, url ):\n\n try:\n res = urllib2.urlopen( url ).read()\n except urllib2.HTTPError, e:\n print(e.code)\n except urllib2.URLError, e:\n print(e.args)\n return json.loads(res)", "def request_json(url):\n return json.loads(requests.get(url).content.decode('utf-8'))", "def lookup_json(url):\n session=requests.Session()\n json_res=get_api_result(session,url)\n try:\n content=json.loads(json_res)\n return content\n except:\n raise NoContent", "def get_json(url):\n f = urlopen(url, 1)\n response_text = f.read()\n response_data = json.loads(str(response_text, \"utf-8\"))\n return response_data", "def get_json(self, url):\n json_response = self.testapp.get(url)\n self.assertEqual(json_response.status_int, 200)\n return self._parse_json_response(json_response, expect_errors=False)", "def getResponse(url):\n response = urllib.request.urlopen(url)\n data = response.read().decode('utf-8')\n resp = json.loads(data)\n if 'error' in resp:\n console('Error: {}'.format(resp['error']['msg']))\n input('Press Enter to Close')\n sys.exit()\n return resp", "def get_json(url):\n f = urlopen(url)\n response_text = f.read()\n response_data = json.loads(str(response_text, \"utf-8\"))\n #pprint(response_data)\n return response_data", "def fetch_json(url):\n\n request = urllib.request.Request(url)\n result = urllib.request.urlopen(request)\n result_text = result.read()\n js = json.loads(result_text)\n return js", "def get_json(url):\n r = requests.get(url)\n return r.json()", "def get_json_from_url(url):\r\n content = get_url(url)\r\n js = json.loads(content)\r\n return js", "def _get_json(self, url: str) -> dict:\n r = self._req_get(url)\n return r.json() if r else None", "def _handle_api_call(self, url):\n response = urlopen(url)\n url_response = response.read()\n json_response = loads(url_response)\n \n if not json_response:\n raise ValueError('Error getting data from the api, no return was given.')\n elif \"Error Message\" in json_response:\n raise ValueError(json_response[\"Error Message\"])\n elif \"Information\" in json_response and self.treat_info_as_error:\n raise ValueError(json_response[\"Information\"])\n \n return json_response", "def get_json(url):\n headers = {\n 'accept': \"application/json\",\n 'cache-control': \"no-cache\",\n 'postman-token': \"cce2e0c1-c598-842b-f15f-a1fe8b3e31e2\"\n }\n\n response = requests.request(\"GET\", url, headers=headers)\n return json.loads(response.text)", "def Access_URL(url): \n r = requests.get(url) \n json = r.json() \n return json", "def get_json_from_url(url):\n content = get_url(url)\n js = json.loads(content)\n return js", "def getData(url):\n data = ''\n request = urllib2.Request(url, headers={\"Accept\": \"application/json\"})\n try:\n data = json.loads(urllib2.urlopen(request).read())\n except urllib2.HTTPError, e:\n raise Exception(\"HTTP error: %d\" % e.code)\n except urllib2.URLError, e:\n raise Exception(\"Network error: %s\" % e.reason.args[1])\n\n return data", "def json_response(url):\n headers = {'User-Agent': 'Mozilla/5.0'}\n\n session = requests.Session()\n\n # Get the page\n res = session.get(url, headers=headers)\n # Load into json\n try:\n return json.loads(res.text)\n except json.decoder.JSONDecodeError:\n logger.error(res)", "def read_url(url):\n try:\n response = requests.get(url)\n except requests.ConnectionError:\n content = '{\"error\": \"Bad Connection\"}'\n except MissingSchema: # The url does not exist\n content = '{\"error\": \"Bad Url\"}'\n else:\n if response.status_code == 200:\n content = response.text\n else:\n content = '{\"error\": \"' + response.reason + '\"}'\n\n return content", "def json_api_call(url):\n response = requests.get(url)\n return response.json()", "def fromurl(cls, url: str):\n return cls.parse_obj(requests.get(url).json())", "def _get(url, *, verbose=False): \n r = get_from_api(url, verbose=verbose)\n return json.loads(r.content)", "def get_json(url, allow_empty=False):\n try:\n response = requests.get(url)\n json = response.json()\n\n except ValueError:\n if not allow_empty:\n raise\n json = {}\n\n return json", "def getJson(self,url):\n r = req.get(str(url),\"GET\")\n jsonResponse = json.loads(r.text)\n return jsonResponse", "def get_data(url):\n response = get(url, timeout=10)\n \n if response.status_code >= 400:\n raise RuntimeError(f'Request failed: { response.text }')\n \n return response.json()", "def FetchUrlJson(*args, **kwargs):\n fh = FetchUrl(*args, **kwargs)\n # The first line of the response should always be: )]}'\n s = fh.readline()\n if s and s.rstrip() != \")]}'\":\n raise GOBError(200, 'Unexpected json output: %s' % s)\n s = fh.read()\n if not s:\n return None\n return json.loads(s)", "def _request_data(self, url):\n connection = httplib.HTTPConnection(self.url)\n connection.request(\"GET\", url)\n response = connection.getresponse()\n\n if response.status != 200:\n raise Exception(response.reason)\n\n data = response.read()\n response.close()\n\n return json.loads(data)", "def _request_get(self, url):\n try:\n r = requests.get(url)\n except Exception:\n raise Exception('Cannot connect')\n if (r.status_code != 200):\n raise Exception('%d %s' % (r.status_code, r.text))\n if (not r.text) or (not r.text.strip()):\n raise Exception('Empty answer')\n try:\n response = json.loads(r.text)\n except Exception:\n raise Exception('Cannot parse response')\n return response", "def make_request(url):\r\n\r\n req = urllib2.Request(url)\r\n response = urllib2.urlopen(req)\r\n data = json.loads(response.read())\r\n response.close()\r\n\r\n return data", "def get_response(url):\n resp = requests.get(url)\n if resp.status_code == 200:\n return resp.json()\n\n raise Exception(f\"Failed to fetch: {url}\")", "def request_json_from_url(url, params={}):\n params[\"format\"] = \"json\"\n r = requests.get(url=url, params=params, headers=get_headers())\n r.raise_for_status()\n return r.json()", "def fetch_url(self, url: str) -> Union[Dict, None]:\n\n try:\n req = requests.get(url)\n req.raise_for_status()\n res = req.json()\n except (requests.HTTPError, json.JSONDecodeError) as e:\n logging.warning(f'{self.__class__.__name__} failed to retrieve/parse {url}')\n # logging.debug(e)\n return\n\n # safe-check for empty response from server\n if not res:\n logging.warning(f\"{self.__class__.__name__} empty response from {url}\")\n return\n\n return res", "def get_json_data(url):\n\n r = requests.get(url)\n try:\n return r.json()\n except json.JSONDecodeError:\n # Catch the Unexpected UTF-8 BOM error\n r.encoding='utf-8-sig'\n return r.json()", "def getjson(url, **kwargs):\n json = fetch_resource(url, **kwargs)\n return simplejson.loads(json)", "def _get_json(self, url, file=None):\n r = requests.get(url)\n # If status is not OK, raise error.\n if not r.ok:\n r.raise_for_status()\n # Otherwise load JSON.\n data = json.loads(r.text)\n # Optionally save JSON to disk.\n if file is not None:\n with open(file, 'w') as f:\n json.dump(data, f)\n return data", "def _get_json(self, url, payload):\n if self.settings.requests:\n r = self.settings.requests.get(url, params=payload, headers=HEADERS)\n return self._process_result(r.json())\n else:\n payload = self.settings.urllib.urlencode(payload)\n r = self.settings.urllib2.Request(url + \"?\" + payload)\n r.add_header('Accept', HEADERS['Accept'])\n try:\n data = self.settings.urllib2.urlopen(r)\n except self.settings.urllib2.HTTPError:\n raise\n return self._process_result(self.settings.json.load(data))", "def get_response(request_url):\n response = requests.get(request_url)\n return json.loads(response.text)", "def _getJason(self, url, use_session = False):\n print ('Retrieving Jason for %s' % url)\n if use_session:\n r = session.get(url)\n else:\n r = requests.get(url)\n data = json.loads(r.text)\n return data", "def get_json(self, url, *, timeout, headers):", "def get_json():\n response = requests.get(JSON_URL)\n if response.status_code != 200:\n raise Exception(\"Could not not load json file!\")\n return response.json()", "def get_request(query_url):\n\n stream = urlopen(query_url)\n result = json.loads(stream.read().decode())\n return result", "def fetch_url(url):\n logger.info(\"Resolving \" + url)\n try:\n resp = requests.get(url, timeout=1.5)\n resp.raise_for_status()\n return {\n \"resolved_url\": resp.url,\n \"raw_content\": resp.text\n }\n except Exception as e:\n logger.error('Error fetching %s' % url, e)\n return {\n \"resolved_url\": url,\n \"raw_content\": \"\",\n \"url_error\": str(e)\n }", "def _get_json_response(self, url, data, headers):\n if data:\n data = json.dumps(data)\n req = urllib2.Request(url, data, headers)\n response = urllib2.urlopen(req)\n raw_response = response.read()\n return raw_response", "def make_request(self, url):\n try:\n response = requests.get(url)\n if response.status_code != 200:\n return None\n return response.json()\n except requests.ConnectionError:\n return None", "def get_json(self, url, params=None, headers=None, timeout=10):\r\n headers = headers or self.headers\r\n try:\r\n return self.request(url=url, method='GET', params=params, extra_headers=headers, timeout=timeout).json()\r\n except ValueError:\r\n return None\r\n except requests.exceptions.ProxyError:\r\n return None\r\n except requests.RequestException as error:\r\n print(error)\r\n if self._debug:\r\n logging.exception(\r\n ''.join(traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))\r\n return None", "def __fetch_from_url(url: str) -> Any:\n song_information: Any = None\n try:\n # Send the request and load the returned contents.\n req = request.Request(url, headers={\n 'User-Agent': Config.Config.get_user_agent()\n })\n response = request.urlopen(req)\n contents: str = response.read().decode('utf-8')\n except (HTTPError, TimeoutError) as ex:\n Logger.Logger.log_error(str(ex))\n Logger.Logger.log_error('Request failed for URL: ' + url)\n return\n # Parse the response from the endpoint as a JSON encoded string\n data: Any = json.loads(contents)\n # Check if response contains at least one result, otherwise return \"None\".\n if data['resultCount'] > 0:\n song_information = data\n return song_information", "def get_location():\n try:\n return json.load(urllib.request.urlopen(URL))\n except urllib.error.URLError:\n return None", "def _remoteloadjson(path: str) -> JSONType:\n return json.loads(request.urlopen(path).read())", "def _query_tmdb_api(url):\n contents = {}\n try:\n response = urllib2.urlopen(url)\n contents = json.loads(response.read()) # Convert JSON to dict\n except urllib2.HTTPError as e:\n if e.code == 401:\n # Check that API key is valid in auth fails\n print(\"Unauthorized access - ensure you have set the correct TMDB api_key set in secret.py\")\n elif e.code == 404:\n print(\"File not found for url: \" + url)\n else:\n print(e)\n except:\n print(\"An unknown exception has ocurred\")\n finally:\n return contents", "def get(self, url):\n headers = {\"Authorization\": \"Bearer \" + self.token}\n full_url = self.api_url + starts_slash(url)\n logging.info(\"GET url: \" + str(full_url))\n logging.info(\"GET header: \" + str(headers))\n try:\n result = requests.get(full_url, headers=headers).json()\n except json.decoder.JSONDecodeError:\n result = \"error parsing JSON response\"\n logging.info(\"GET result: \" + str(result))\n return result", "def _api_request(date: str, api_url: str) -> Dict[str, str]:\n try:\n data = json.loads(urlopen(\n Request(f\"{api_url}?at={quote(date)}\")\n ).read().decode('utf-8'))\n except HTTPError as e:\n data = json.loads(e.file.read().decode('utf-8'))\n if \"message\" in data:\n raise ValidationError(data[\"message\"])\n else:\n raise ValidationError(f\"Service unavailable ({e}\")\n return data", "def retrieve_json(url):\n\n logger.info('retrieving: %s' % url)\n j = _read_cache(url)\n if j is not None:\n logger.debug('cached version found: %s' % url)\n return j\n j = json.loads(urllib2.urlopen(url).read())\n logger.info('caching: %s' % url)\n _write_cache(url, j)\n\n return j", "def get_json(url, data) -> dict:\n headers = {\n # 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36',\n }\n # logging.debug('User-Agent: ' + headers['User-Agent'])\n logging.debug('url: ' + url)\n logging.debug('data: ' + repr(data))\n r = requests.post(url.strip(), data=data, headers=headers)\n r.encoding = 'utf8'\n print('[Status Code: %s]' % r.status_code)\n if r.status_code != 200:\n raise Exception('Error in get Json!')\n return r.json()", "def cached_json_get(url):\n return requests.get(url).json()", "def __GetJson(self, url, auth, responseProcessor = None):\n\n conn = self.__GetConnection()\n conn.request(\"GET\", url, \"\", self.__MakeHeaders(auth))\n response = conn.getresponse()\n if (responseProcessor != None):\n if (responseProcessor(response) == False):\n return None\n\n self.__CheckResponse(response)\n data = response.read()\n return cjson.decode(data)", "def get_data():\n try:\n response = requests.get(uri)\n json_data = response.json()\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)\n return json_data", "def __exec_request(self, URL) -> Any:\n headers = {\n \"X-ELS-APIKey\": self.config['apikey'],\n \"Accept\": 'application/json'\n }\n\n request = requests.get(\n URL,\n headers=headers\n )\n self._status_code = request.status_code\n\n if request.status_code == 200:\n return json.loads(request.text, strict=False)\n else:\n return \"failed\"", "def get_json_dict(url: str) -> dict:\n r = requests.get(url)\n jtext = r.text\n ca_d = json.loads(jtext)\n return ca_d", "def load_json(url, memberscrape=False):\n # TODO: Add error handling if request fails (e.g. if repo was not found)\n if memberscrape:\n r = requests.get(url, auth=(username, api_token))\n jsonData = json.loads(r.text)\n return jsonData\n else:\n page = 1\n jsonList = []\n page_not_empty = True\n while page_not_empty:\n r = requests.get(url + \"&page=\" + str(page), auth=(username,\n api_token))\n jsonData = json.loads(r.text)\n if jsonData == []:\n page_not_empty = False\n else:\n jsonList.extend(jsonData)\n page += 1\n return jsonList", "def fetch_json(uri):\n data = requests.get(uri)\n # Raise an exception if the fetch failed.\n data.raise_for_status()\n return data.json()", "def _get_api_request(url):\n req = requests.get(url)\n\n if not req.status_code == 200:\n print(\"Error getting API request:\", url)\n print(\"Status code:\", req.status_code)\n print(\"Error:\", req.text)\n exit(200)\n\n data = None\n try:\n data = req.json()\n except JSONDecodeError:\n print(\"WarcraftLogs did not return proper JSON, it is likely down for maintenance.\")\n print(\"Request response:\", req.text)\n exit(300)\n\n return data", "def request_url_json_dict_from_url(url, params={}):\n params[\"format\"] = \"json\"\n r = requests.get(url=url, params=params, headers=get_headers())\n r.raise_for_status()\n return {url: r.json()}", "def get_response(url, params):\n resp = requests.get(url=url, params=params)\n data = json.loads(resp.text)\n return data", "def fetch(url):\n content = requests.get(url).text\n if \"Error\" in content:\n raise ValueError(f\"Cannot read from: {url}\")\n return content", "def get_json(url=PL_GALLERY_JSON_URL):\n r = requests.get(url)\n # Fail fast, always should return 200, even if empty\n r.raise_for_status()\n return r.json()", "async def jsonreq(self, ctx, url):\n\n allowedDomains = await self.config.guild(ctx.guild).allowedDomains()\n urldomain = urlparse(url).netloc\n\n # Domain validation\n if self.checkDomain(allowedDomains, urldomain) == False:\n return await ctx.send(\"Oops! This domain isn't approved on this server....\")\n\n # Json request\n try:\n reqcontent = self.makeJsonRequest(url)\n except:\n return await ctx.send(\"Oops! An error occured with the request....\")\n else:\n # Max 2000 character limit on messages\n await ctx.send(\"```json\\n\"+str(reqcontent[:1986])+\"```\")", "def _fetch_remote_json(service_url, params={}, use_http_post=False):\n request_url, response = _fetch_remote(service_url, params, use_http_post)\n return (request_url, json.load(response))", "def _http_get(self, url, params={}):\n if not self.token:\n self.get_token()\n headers = {'Authorization': self.token, 'Accept': 'application/json; indent=4'}\n url = self.server + '/api2' + url\n try:\n r = requests.get(url=url, headers=headers, params=params)\n except requests.exceptions.RequestException as e:\n return check_failed(e)\n # raise ClientHttpError(None, e)\n if r.status_code != 200:\n return check_failed(r.status_code)\n # return ClientHttpError(r.status_code, json.loads(r.text)['error_msg'])\n try:\n data = json.loads(r.text)\n except:\n data = r.text\n # TODO: check data\n return data", "def _download_url(self,url_to_download: str) -> dict:\r\n response = None\r\n r_obj = None\r\n\r\n try:\r\n response = urllib.request.urlopen(url_to_download)\r\n json_results = response.read()\r\n r_obj = json.loads(json_results)\r\n\r\n except urllib.error.HTTPError as e:\r\n if self.errors < 1:\r\n self.errors += 1\r\n print('Failed to download contents of URL')\r\n print('Status code: {}'.format(e.code))\r\n if e.code == 400:\r\n print('request to server cannot be understood due to invalid syntax')\r\n elif e.code == 401:\r\n print('invalid apikey, please try again with a new apikey')\r\n elif e.code == 403:\r\n print('possible invalid apikey or access is forbidden')\r\n elif e.code == 404:\r\n print('cannot find the url, so unable to connect')\r\n elif e.code == 408:\r\n print('client unable to produce a request within server timeout limit')\r\n elif e.code == 410:\r\n print('server no longer exists')\r\n elif e.code == 414:\r\n print('the request url is longer than what the server is willing to interpret')\r\n elif e.code == 422:\r\n print('correct request entity and syntax, but unable to process the contained instructions')\r\n elif e.code == 502:\r\n print('bad gatway from server so unable to fulfill request')\r\n elif e.code == 503:\r\n print('server unable to connect due to possible maintenance or is overloaded with requests')\r\n elif e.code == 504:\r\n print('reuqest to server took too long, so gateway timed out')\r\n elif e.code == 505:\r\n print('server does not support or refuses to support the request message')\r\n elif 399 < e.code < 452:\r\n print('a client error')\r\n elif 499 < e.code < 512:\r\n print('a server error')\r\n\r\n finally:\r\n if response != None:\r\n response.close()\r\n \r\n return r_obj", "def get_json(self, url, params, timeout=5, retries=3, data=None):\n return self.request('GET', url, params, timeout=timeout, retries=retries, data=data)", "def getData(eUrl, eToken):\n if eUrl == '' or eToken == '':\n print(\"Enviroment is not setup\")\n exit(1)\n\n with urllib.request.urlopen(eUrl + eToken) as url:\n data = json.loads(url.read().decode())\n\n return(data)", "def get_url_data(self, url):\n # print \"opening: \" + url\n request = urllib2.Request(url)\n base64string = '%s:%s' % (self.username, self.key)\n request.add_header(\"Authorization\", \"ApiKey %s\" % base64string)\n response = urllib2.urlopen(request)\n data = json.loads(response.read())\n return data", "def _load_json(self, url):\n try:\n browser = self._browser\n browser.get(url)\n except:\n print(browser.current_url)\n\n try:\n robot_check = browser.find_element_by_xpath(\"//div[@class='content center']\")\n if 'I am not a robot' in robot_check.text:\n self._recaptcha(browser)\n except:\n pass\n\n jtext = browser.find_element_by_xpath(\"//script[@id='__NEXT_DATA__']\")\\\n .get_attribute(\"innerHTML\")\n \n # convert text to JSON\n jdict = json.loads(jtext) \n return jdict", "def getJsonFromApi(urlAPI: AnyStr) -> Any:\n\n if validaURL(urlAPI):\n return requestToApi(urlAPI)\n else:\n print(f\"La url \\\"{urlAPI}\\\" no es válida.\")\n return None", "def get(self, url=\"\", query={}):\r\n qs = urllib.urlencode(query)\r\n if qs:\r\n qs = \"?%s\" % qs\r\n \r\n url = \"%s%s%s\" % (self.base_url, url, qs)\r\n log.debug(\"GET %s\" % (url))\r\n \r\n self.__connection.connect()\r\n request = self.__connection.request(\"GET\", url, None, self.__headers)\r\n response = self.__connection.getresponse()\r\n data = response.read()\r\n self.__connection.close()\r\n \r\n log.debug(\"GET %s status %d\" % (url,response.status))\r\n result = {}\r\n \r\n # Check the return status\r\n if response.status == 200:\r\n log.debug(\"%s\" % data)\r\n parser = DetailsToDict()\r\n parseString(data, parser)\r\n return parser.data\r\n \r\n elif response.status == 204:\r\n raise EmptyResponseWarning(\"%d %s @ https://%s%s\" % (response.status, response.reason, self.host, url))\r\n \r\n elif response.status == 404:\r\n log.debug(\"%s returned 404 status\" % url)\r\n raise HTTPException(\"%d %s @ https://%s%s\" % (response.status, response.reason, self.host, url))\r\n \r\n elif response.status >= 400:\r\n _result = simplejson.loads(data)\r\n log.debug(\"OUTPUT %s\" % _result)\r\n raise HTTPException(\"%d %s @ https://%s%s\" % (response.status, response.reason, self.host, url))\r\n \r\n return result", "async def fetch(self, session, url):\n async with session.get(url) as response:\n if response.status != 200:\n response.raise_for_status()\n response = await response.text()\n return json.loads(response)", "def _get(self, url):\n response = requests.get(url)\n if response.status_code == requests.codes.ok:\n return response\n else:\n raise HTTPError", "def fetch(url, verbose=False):\n\n resp = requests.get(url)\n if verbose:\n print(resp.json())\n if resp.status_code == 200:\n\n resp=resp.json()\n return resp\n else:\n return None", "def get_json(url, params=None, retries=0, ratelimit=None):\n result = {}\n if not params:\n params = {}\n # apply rate limiting if needed\n rate_limiter(ratelimit)\n try:\n response = requests.get(url, params=params, timeout=20)\n if response and response.content and response.status_code == 200:\n result = json.loads(response.content.decode('utf-8', 'replace'))\n if \"results\" in result:\n result = result[\"results\"]\n elif \"result\" in result:\n result = result[\"result\"]\n elif response.status_code in (429, 503, 504):\n raise Exception('Read timed out')\n except Exception as exc:\n result = None\n if \"Read timed out\" in str(exc) and retries < 5 and not ratelimit:\n # retry on connection error or http server limiting\n monitor = xbmc.Monitor()\n if not monitor.waitForAbort(2):\n result = get_json(url, params, retries + 1)\n del monitor\n else:\n log_exception(__name__, exc)\n # return result\n return result", "def get_from(url):\r\n try:\r\n with current_app.app_context():\r\n r = requests.get(url, timeout=current_app.config[\"TIMEOUT\"])\r\n if r.status_code == 200:\r\n return r.json()\r\n return None\r\n except:\r\n return None", "def _request(self, url, values=None):\n\n url += \"?{}\".format(urllib.urlencode(values))\n\n request = urllib2.Request(url)\n\n try:\n connection = urllib2.urlopen(request)\n except urllib2.HTTPError, e:\n return {'status': 'Failed', 'message': str(e.reason)}\n except urllib2.URLError, e:\n return {'status': 'Failed', 'message': str(e.reason)}\n except httplib.HTTPException, e:\n return {'status': 'Failed', 'message': str(e.reason)}\n except Exception as exception:\n return {'status': 'Failed', 'message': str(exception)}\n\n response = connection.read()\n connection.close()\n\n\n try:\n result = json.loads(response.decode())\n except ValueError as exception:\n return {'status': 'Failed', 'message': str(exception)}\n\n return result", "def _do_request(self, url, method='GET', body=None):\n response, content = self.request(url, method=method, body=body, headers=self.headers)\n if int(response['status']) != 200:\n raise GPAPIError(response['status'], 'ERROR IN REQUEST')\n json = simplejson.loads(content)\n return json", "def _get(self, endpoint):\n res = self._request(\"get\", endpoint)\n if not res.content:\n return {}\n try:\n res = res.json()\n except ValueError:\n raise ValueError(\"Cannot parse {} as JSON\".format(res))\n if \"error\" in res:\n raise AirthingsError(res[\"error\"])\n return res", "def req(url):\n headers = {'Accept': 'application/json'}\n timeout = 10\n r = requests.get(url, headers=headers, timeout=timeout)\n response_json = r.text\n return response_json", "def _get(self, url, **queryparams):\n url = urljoin(self.base_url, url)\n if len(queryparams):\n url += '?' + urlencode(queryparams)\n try:\n r = self._make_request(**dict(\n method='GET',\n url=url,\n auth=self.auth,\n timeout=self.timeout,\n hooks=self.request_hooks,\n headers=self.request_headers\n ))\n except requests.exceptions.RequestException as e:\n raise e\n else:\n if r.status_code >= 400:\n _raise_response_error(r)\n return r.json()" ]
[ "0.79039305", "0.7806594", "0.77826935", "0.74478185", "0.7402855", "0.73680866", "0.7356292", "0.73440784", "0.73038524", "0.729605", "0.729605", "0.729605", "0.729605", "0.72762716", "0.7274411", "0.7236561", "0.7228294", "0.7226799", "0.7206968", "0.71908146", "0.7171582", "0.7113547", "0.710801", "0.70939356", "0.703857", "0.7023408", "0.7021735", "0.7018842", "0.6994597", "0.699206", "0.6974263", "0.697225", "0.6967956", "0.6918491", "0.686823", "0.68426496", "0.68371105", "0.68369365", "0.6832404", "0.6777087", "0.6754401", "0.67199963", "0.6711265", "0.6694099", "0.66776323", "0.66681075", "0.66645825", "0.66441417", "0.66042525", "0.6604117", "0.65854293", "0.6550297", "0.649044", "0.6489615", "0.6472959", "0.6442219", "0.6320404", "0.6317872", "0.6307393", "0.63040453", "0.62957335", "0.6295385", "0.62768507", "0.62510014", "0.6228413", "0.6212684", "0.6211485", "0.62031704", "0.6199033", "0.61731595", "0.6165832", "0.6163066", "0.61559683", "0.61368", "0.6110805", "0.61010945", "0.60975087", "0.60943866", "0.6087597", "0.6082894", "0.605008", "0.602545", "0.6020476", "0.6016676", "0.60084563", "0.59746903", "0.59733003", "0.5962754", "0.5957849", "0.5925888", "0.59079975", "0.5899247", "0.58937293", "0.5889336", "0.5887688", "0.5883861", "0.58537406", "0.5850925", "0.58415437", "0.582635", "0.58201927" ]
0.0
-1
Creates dict with athlete details, including token expiry.
def set_athlete(response): name = response['athlete']['firstname'] + " " + response['athlete']['lastname'] athlete = { 'id': response['athlete']['id'], 'name': name, 'access_token': response['access_token'], 'refresh_token': response['refresh_token'], 'expires_at': response['expires_at'], 'ts_activity': 0} return athlete
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_athlete(token):\n url = \"https://www.strava.com/api/v3/athlete\"\n params = {'access_token': token}\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response", "def asdict(self):\n return {\n \"access_token\": self.access_token,\n \"audience\": self.audience,\n \"token_type\": self.token_type,\n \"expires_in\": self.expires_in,\n \"expires_at\": self.expires_at,\n }", "def alpaca_create(self, keyname = \"ALPACA_API_KEY\", secret = \"ALPACA_SECRET_KEY\"):\n aak = os.getenv(keyname)\n ask = os.getenv(secret)\n if type(aak) is not str | type(aak) is not str:\n raise Exception(\"Could not load API or Secret Key\")\n #try to create object regardless \n alpaca = tradeapi.REST(\n aak,\n ask,\n api_version=\"v2\"\n )\n self.alpaca_api = alpaca\n return alpaca", "def getDBAthletesUsingAPI():\n athletes_response = api_requester.getAthletes()\n\n # Parse response into Athlete db objects\n athletes_to_return = list()\n for athlete in athletes_response.json():\n athletes_to_return.append(\n getAthleteObjectFromJSON(athlete))\n\n return athletes_to_return", "def get_sso_data(self):\n return {\n 'access_token': self.access_token,\n 'refresh_token': self.refresh_token,\n 'expires_in': (\n self.access_token_expires - datetime.utcnow()\n ).total_seconds()\n }", "def fetch_profile(access_token):\n\n headers = {\"Authorization\": \"Bearer \" + access_token}\n\n # Fetch athlete profile\n r = requests.get(API_URL + \"/athlete\", headers=headers)\n profile = r.json()\n if \"errors\" in profile:\n raise AuthError(profile[\"message\"])\n\n return {\n \"firstName\": profile[\"firstname\"],\n \"lastName\": profile[\"lastname\"],\n \"imgUrl\": profile[\"profile\"],\n \"profileUrl\": \"https://www.strava.com/athletes/{}\".format(profile[\"id\"]),\n }", "def create_access_token(\n data: tp.Mapping[str, tp.Any],\n *,\n expires_delta: tp.Optional[timedelta] = None\n) -> str:\n to_encode = data.copy()\n expires_delta = expires_delta or DEFAULT_EXPIRES_DELTA\n expires = datetime.utcnow() + expires_delta\n to_encode.update({\"exp\": expires, \"sub\": ACCESS_TOKEN_SUBJECT})\n return jwt.encode(\n to_encode,\n config.SECRET_KEY,\n algorithm=ALGORITHM,\n json_encoder=JSONEncoderUUID\n )", "def get_token(self):\r\n token = {'id': self.catalog['access']['token']['id'],\r\n 'expires': self.catalog['access']['token']['expires'], }\r\n try:\r\n token['user_id'] = self.catalog['access']['user']['id']\r\n token['tenant_id'] = (\r\n self.catalog['access']['token']['tenant']['id'])\r\n except Exception:\r\n # just leave the tenant and user out if it doesn't exist\r\n pass\r\n return token", "def _create_auth_headers(self):\n auth_headers = {**self.get_headers()}\n auth_headers['Authorization'] = 'Bearer ' + self.get_access_token()\n return auth_headers", "def get_access(access_token='',expire_time=0):\r\n #Get a new access token if it expires or is five minutes away from exp#iration\r\n if (expire_time==0) or (len(access_token)==0) or (time.time()-expire_time>=-300):\r\n\r\n #API needed to authorize account with refresh token\r\n auth_url = 'https://api.tdameritrade.com/v1/oauth2/token'\r\n\r\n #Data needed for token\r\n data = {'grant_type':'refresh_token',\r\n 'refresh_token':TDAuth_Info.refresh_token,\r\n 'client_id':TDAuth_Info.client_id}\r\n\r\n #Post the data to get the token\r\n auth_reply_json = requests.post(url=auth_url,data=data)\r\n auth_reply=auth_reply_json.json()\r\n\r\n #Now use the token to get account information\r\n access_token = auth_reply['access_token']\r\n expire_time=time.time()+auth_reply['expires_in']\r\n \r\n return (access_token,expire_time)", "def test_to_dict_amenity(self):\n format = \"%Y-%m-%dT%H:%M:%S.%f\"\n holi = Amenity()\n d = holi.to_dict()\n self.assertIsInstance(d, dict)\n for keys in d:\n self.assertTrue(keys, d)\n self.assertTrue('__class__' in d)\n self.assertEqual(d[\"__class__\"], \"Amenity\")\n self.assertIsInstance(d[\"created_at\"], str)\n self.assertIsInstance(d[\"updated_at\"], str)\n self.assertEqual(d[\"created_at\"], holi.created_at.strftime(format))\n self.assertEqual(d[\"updated_at\"], holi.updated_at.strftime(format))", "def _generate_voter_in_dict(id: bytes, timestamp: int, prep: 'Prep') -> dict:\n voter_in_dict = {\n \"id\": '0x' + bytes.hex(id),\n \"timestamp\": timestamp,\n \"address\": str(prep.address),\n \"name\": prep.name,\n \"amount\": prep.delegated\n }\n return voter_in_dict", "def post_amenity_obj():\n dic = {}\n dic = request.get_json(silent=True)\n if dic is None:\n abort(400, \"Not a JSON\")\n if \"name\" not in dic.keys():\n abort(400, \"Missing name\")\n new_ame = amenity.Amenity()\n for k, v in dic.items():\n setattr(new_ame, k, v)\n storage.new(new_ame)\n storage.save()\n return jsonify(new_ame.to_dict()), 201", "def set_auth(self):\n timestamp = str(int(time.time()))\n unique = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(16))\n hashstr = sha1((self.callerid + timestamp +\n self.privatekey + unique).encode('utf8')).hexdigest()\n logger.debug(\"Time from api {}\".format(timestamp))\n\n return {\"callerId\": self.callerid,\n \"time\": timestamp,\n \"unique\": unique,\n \"hash\": hashstr\n }", "def generate_pair(cls, user: User) -> Dict[str, str]:\n if not isinstance(user, User):\n raise PermissionDenied()\n\n refresh_token = RefreshToken.objects.create(user=user)\n access_payload = refresh_token.get_payload_by_token()\n access_payload['type'] = 'access'\n access_token = jwt_encode(access_payload)\n\n return {\n 'access_token': access_token,\n 'refresh_token': refresh_token.token,\n }", "def create_access_token(self):\n\t\t# Wraper for also caching invalid results\n #def getMetadataRofs(path):\n #\ttry:\n # \treturn self.client.metadata(path)\n # except Exception, e:\n # log.write('Exception at getMetadataRofs for path '+ path + '\\n')\n # pprint(e, log)\n # return False\n\n\t\ttry:\n\t\t\trequest_token = self.session.obtain_request_token()\n\t\t\turl = self.session.build_authorize_url(request_token)\n\t\t\tprint url\n\t\t\traw_input()\n\t\t\taccess_token = self.session.obtain_access_token(request_token)\n\t\t\tself.client = client.DropboxClient(self.session)\n\t\t\t\n\t\t\t# Build cache for metadata querying\n\n\t\t\t# Wraper for also caching invalid results\n\t\t\tdef getMetadataRofs(path):\n\t\t\t\ttry:\n\t\t\t\t\treturn self.client.metadata(path)\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tlogger.error('Exception at getMetadataRofs for path '+ path + '\\n')\n\t\t logger.debug(sys.exc_info()[0])\n\t\t\t\t\treturn False\n\n\t\t\tself.cache_metadata = Cache(getMetadataRofs)\n\t\t\tself.cache_files = {}\n\n\t\texcept Exception, e:\n\t\t\tlogger.error('Exception %s at create_access_token' % (sys.exc_info()[0]))\n\t\t\tlogger.debug(pformat(sys.exc_info()))", "def create_temporary_access_token(self, api_token: str) -> dict:\n query = \"\"\"\n mutation CreateToken {\n createMyProfileTemporaryReadAccessToken(input: {}) {\n temporaryReadAccessToken {\n token\n expiresAt\n }\n }\n }\n \"\"\"\n\n path = jmespath.compile(\n \"\"\"\n data.createMyProfileTemporaryReadAccessToken.temporaryReadAccessToken.{\n token: token\n expires_at: expiresAt\n }\n \"\"\"\n )\n data = self.do_query(query, api_token=api_token)\n parsed_data = path.search(data)\n self.contains_keys(parsed_data, [\"token\", \"expires_at\"])\n parsed_data[\"expires_at\"] = parse_datetime(parsed_data[\"expires_at\"])\n return parsed_data", "def create_amenity():\n new_amenity_dict = request.get_json(silent=True)\n if new_amenity_dict is None:\n return jsonify({\"error\": \"Not a JSON\"}), 400\n if 'name' not in request.json:\n return jsonify({\"error\": \"Missing name\"}), 400\n new_amenity = Amenity(**new_amenity_dict)\n storage.new(new_amenity)\n storage.save()\n return jsonify(new_amenity.to_dict()), 201", "def get_initial_author_dict():\n adict = {}\n try:\n ah = run_sql(\"select aterm,hitlist from rnkAUTHORDATA\")\n for (a, h) in ah:\n adict[a] = deserialize_via_marshal(h)\n return adict\n except:\n register_exception(prefix=\"could not read rnkAUTHORDATA\", alert_admin=True)\n return {}", "def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -> str:\n to_encode = data.copy()\n if expires_delta:\n expire = datetime.utcnow() + expires_delta\n else:\n expire = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n\n to_encode.update({\"exp\": expire})\n return jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)", "def fetch_stats(access_token, athlete_id):\n\n headers = {\"Authorization\": \"Bearer \" + access_token}\n\n # Fetch athlete stats\n r = requests.get(API_URL + \"/athletes/{}/stats\".format(athlete_id), headers=headers)\n stats = r.json()\n if \"errors\" in stats:\n raise AuthError(stats[\"message\"])\n\n return {\n \"recentRuns\": stats[\"recent_run_totals\"],\n \"yearRuns\": stats[\"ytd_run_totals\"],\n \"allRuns\": stats[\"all_run_totals\"],\n }", "def create_amenity():\n my_dict = request.get_json()\n if my_dict is None:\n abort(400, \"Not a JSON\")\n elif \"name\" not in my_dict:\n abort(400, \"Missing name\")\n new_amenity = Amenity(**my_dict)\n new_amenity.save()\n return jsonify(new_amenity.to_dict()), 201", "def generate_oauth_headers(access_token: str) -> dict:\n return {'Authorization': 'Bearer ' + access_token}", "def create_amenity():\n amenity_json = request.get_json()\n if amenity_json is None:\n abort(400, 'Not a JSON')\n if amenity_json.get('name') is None:\n abort(400, \"Missing name\")\n amenity = Amenity(**amenity_json)\n storage.new(amenity)\n storage.save()\n return jsonify(amenity.to_dict()), 201", "def get_agol_token():\n params = {\n 'client_id': app.config['ESRI_APP_CLIENT_ID'],\n 'client_secret': app.config['ESRI_APP_CLIENT_SECRET'],\n 'grant_type': \"client_credentials\"\n }\n request = requests.get(\n 'https://www.arcgis.com/sharing/oauth2/token',\n params=params\n )\n token = request.json()\n print(\"AGOL token acquired: {0}\".format(token))\n return token", "def _standard_token(self):\n return {\n 'iss': 'https://iam-test.idc.eu/',\n 'jti': '098cb343-c45e-490d-8aa0-ce1873cdc5f8',\n 'iat': int(time.time()) - 2000000,\n 'sub': CLIENT_ID,\n 'exp': int(time.time()) + 200000\n }", "def amenity_ret():\n ame_list = []\n all_objs = storage.all(\"Amenity\")\n for obj in all_objs.values():\n ame_list.append(obj.to_dict())\n return jsonify(ame_list)", "def __extract_athletes(self):\n for ath in self.athletes:\n if dl.get_squad_id(ath) not in self.data_engine:\n # Athlete has no squad. Just skip over it.\n continue\n\n team_criteria = \\\n self.data_engine[dl.get_squad_id(ath)][\"team_criteria\"]\n\n if not team_criteria:\n # Probably already generated a team for athlete[\"squad_id\"]\n continue\n\n if athlete_match(ath, make_athlete_criteria(team_criteria)):\n self.__update_team_criteria(team_criteria, ath)\n yield ath", "def create_enrollment(context: dict) -> dict:\n enrollment = Enrollment()\n\n for attr in context.keys():\n setattr(enrollment, attr, context[attr])\n\n enrollment.save()\n return enrollment.asdict()", "def to_dictionary(apartment_obj):\n if isinstance(apartment_obj, Apartment):\n return {\"expenses\": apartment_obj.expenses}\n\n raise Exception(\"apartment_obj is not of type Apartment\")", "def generate_new_token(self):\n self.access_token = random_auth_key()", "def make_secure_oauth_cookie(response):\n return {\n \"paypal_session\": response,\n \"expiry\": get_expiry_time(response),\n }", "def post(self):\n current_user = get_jwt_identity()\n return {\n # Mark the token as un-fresh since we used the refresh token to regenerate this\n \"accessToken\": create_access_token(identity=current_user, fresh=False),\n \"userId\": current_user\n }", "def generate_access_token(self):\n return gen_api_key(length=self.token_length)", "async def _a_get_aad_headers(self) -> dict:\n headers = {}\n if \"azure_resource_id\" in self.databricks_conn.extra_dejson:\n mgmt_token = await self._a_get_aad_token(AZURE_MANAGEMENT_ENDPOINT)\n headers[\"X-Databricks-Azure-Workspace-Resource-Id\"] = self.databricks_conn.extra_dejson[\n \"azure_resource_id\"\n ]\n headers[\"X-Databricks-Azure-SP-Management-Token\"] = mgmt_token\n return headers", "def create_auth_token():\n data = get_request_data(request)\n address = data.get(\"address\")\n expiration = int(data.get(\"expiration\"))\n\n pk = get_provider_private_key(use_universal_key=True)\n token = jwt.encode({\"exp\": expiration, \"address\": address}, pk, algorithm=\"HS256\")\n token = token.decode(\"utf-8\") if isinstance(token, bytes) else token\n\n valid, message = is_token_valid(token, address)\n if not valid:\n if message == \"Token is deleted.\":\n force_restore_token(token)\n else:\n return jsonify(error=message), 400\n\n return jsonify(token=token)", "def authorization(self):\n return {'auth-token': '{token}'.format(token=self.token)}", "def authorization(self):\n return {'auth-token': '{token}'.format(token=self.token)}", "def _amenities():\n if request.method == \"GET\":\n all_amenities = []\n for key in storage.all(\"Amenity\").values():\n all_amenities.append(key.to_dict())\n return jsonify(all_amenities)\n\n if request.method == 'POST':\n if not request.is_json:\n return \"Not a JSON\", 400\n\n all_amenities = Amenity(**request.get_json())\n if \"name\" not in all_amenities.to_dict().keys():\n return \"Missing name\", 400\n\n all_amenities.save()\n return all_amenities.to_dict(), 201", "def _authenticate(self):\n url = self.endpoint + \"/tokens\"\n h = httplib2.Http()\n response, rawcontent = h.request(\n url, \n method=\"POST\",\n headers={ \"Content-Type\":\"application/json\" },\n body=json.dumps(self.credentials()))\n content = json.loads(rawcontent)\n self.token = content['access']['token']['id']\n #TODO: this needs to convert the ISO8601 string to a timestamp\n self.expiration = content['access']['token']['expires']\n self.catalog = content['access']['serviceCatalog']", "def return_amenities():\n amenities = list(storage.all(Amenity).values())\n amenity_list = []\n for amenity in amenities:\n amenity_list.append(amenity.to_dict())\n return jsonify(amenity_list)", "def __init__(self, authenticator, access_token, expires_in, scope):\n super(ImplicitAuthorizer, self).__init__(authenticator)\n self._expiration_timestamp = time.time() + expires_in\n self.access_token = access_token\n self.scopes = set(scope.split(' '))", "def post_amenity():\n\n if not request.is_json:\n abort(400, description=\"Not a JSON\")\n data = request.get_json()\n\n if \"name\" not in data:\n abort(400, description=\"Missing name\")\n\n obj = Amenity(**data)\n storage.new(obj)\n storage.save()\n return obj.to_dict(), 201", "def access_token(self):\n access = import_string(api_settings.ACCESS_TOKEN_CLASS)()\n\n # Use instantiation time of refresh token as relative timestamp for\n # access token \"exp\" claim. This ensures that both a refresh and\n # access token expire relative to the same time if they are created as\n # a pair.\n access.set_exp(from_time=self.current_time)\n\n no_copy = self.no_copy_claims\n for claim, value in self.payload.items():\n if claim in no_copy:\n continue\n access[claim] = value\n\n access.set_issuer()\n access.set_audience()\n\n # in order to encode token with new claims\n return str(access)", "def get_userinfo_from_access_token(self) -> dict:\n pass", "def get_amenities():\n list_amenities = []\n for amenity in storage.all('Amenity').values():\n list_amenities.append(amenity.to_dict())\n return jsonify(list_amenities)", "def get_amenities():\n amenities_dict_list = [amenity.to_dict() for amenity in\n storage.all(\"Amenity\").values()]\n return jsonify(amenities_dict_list)", "def get_apartment_amenities(self, soup, apartment_dict):\n\n amenities_list_container = soup.find('div', class_='amenities')\n amenities_list = []\n for spantag in amenities_list_container.find_all('span', class_='amenity'):\n amenities_list.append(spantag.text.strip())\n apartment_dict['amenities'] = amenities_list", "def _athlete_endpoint(self, athlete):\n return '{host}{athlete}'.format(\n host=self.host,\n athlete=quote_plus(athlete)\n )", "def test_authtoken_get(self):\n specialdachs = self.fixtures.specialdachs\n oakley = self.fixtures.oakley\n scope = ['id']\n dachsadv = models.AuthClient(\n title=\"Dachshund Adventures\",\n organization=specialdachs,\n confidential=True,\n website=\"http://dachsadv.com\",\n )\n auth_token = models.AuthToken(auth_client=dachsadv, user=oakley, scope=scope)\n token = auth_token.token\n db.session.add(dachsadv, auth_token)\n result = models.AuthToken.get(token)\n self.assertIsInstance(result, models.AuthToken)\n self.assertEqual(result.auth_client, dachsadv)", "def create_random_transaction() -> dict:\n cust_dtls = _get_cust_dtls()\n source_cust = _random_customer(cust_dtls)\n print(f\"Source Customer: {source_cust}\")\n print(f\"Source Customer Type: {type(source_cust)}\")\n rand_amt = _random_amount(int(source_cust[1]))\n print(f\"Random Amt: {type(rand_amt)}\")\n target_cust = _random_customer(cust_dtls)\n print(f\"Target Customer: {target_cust}\")\n rand_event = _random_event()\n print(f\"Event: {str(rand_event)}\")\n\n return {\n # 'activityTimestampUTC': datetime.utcnow().timestamp(),\n 'eventName': rand_event,\n 'sourceAcct': source_cust[0],\n 'targetAcct': target_cust[0],\n 'amount': rand_amt,\n 'customerSpendType': int(source_cust[1]),\n 'currency': 'USD'\n }", "def get_amenities():\n amenities = []\n for amenity in storage.all(Amenity).values():\n amenities.append(amenity.to_dict())\n return jsonify(amenities)", "def test_create_o_auth_access_token(self):\n pass", "def look_up_a_token():\n try:\n data = request.get_json(force=True)\n except Exception:\n data = None\n if data:\n tok = data['token']\n else:\n tok = request.headers.get('TOK_ID')\n request.data\n\n try:\n creation_time = int(round(datetime.timestamp(tokens[tok]), 0))\n issue_time = tokens[tok].isoformat()\n except Exception:\n _now = datetime.now(UTC)\n creation_time = int(round(datetime.timestamp(_now)))\n issue_time = _now.isoformat()\n tokens[tok] = _now\n expire_time = datetime.fromtimestamp(creation_time + 2764790)\n\n return jsonify({\n \"data\": {\n \"accessor\": \"8609694a-cdbc-db9b-d345-e782dbb562ed\",\n \"creation_time\": creation_time,\n \"creation_ttl\": 2764800,\n \"display_name\": \"fooname\",\n \"entity_id\": \"7d2e3179-f69b-450c-7179-ac8ee8bd8ca9\",\n \"expire_time\": expire_time.isoformat(),\n \"explicit_max_ttl\": 0,\n \"id\": tok,\n \"identity_policies\": [\n \"dev-group-policy\"\n ],\n \"issue_time\": issue_time,\n \"meta\": {\n \"username\": \"tesla\"\n },\n \"num_uses\": 0,\n \"orphan\": True,\n \"path\": \"auth/kubernetes/login\",\n \"policies\": [\n \"default\"\n ],\n \"renewable\": True,\n \"ttl\": 2764790\n }\n })", "def get_auth_token_teacher():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def createEvent(self, aid, time, bz, location):\n\n event = Event()\n\n a_id = EventId()\n a_id.setHashed(aid)\n admin = User.getById(a_id)\n event.admin = admin\n\n date = EventDatetime()\n date.fromString(time)\n event.datetime = date\n\n event.description = bz\n\n event.location = location\n\n event.create()\n\n return event.getAsDict()", "def create(self, request):\n return ObtainAuthToken().post(request)", "def create_asa(self):\n self.asa_id = blockchain_utils.create_algorand_standard_asset(client=self.client,\n creator_private_key=self.app_creator_pk,\n unit_name=self.asa_unit_name,\n asset_name=self.asa_asset_name,\n total=1,\n decimals=0,\n manager_address=self.app_creator_address,\n reserve_address=self.app_creator_address,\n freeze_address=self.app_creator_address,\n clawback_address=self.app_creator_address,\n default_frozen=True)", "def create(self):\n id_access_secretkey = uuid.uuid4()\n id_webuser = Base.logged_id_webuser or None\n keys = Token().generate_secretkey(config.PACKAGE_NAME)\n\n with Database() as db:\n db.insert(Table(id_access_secretkey, id_webuser, config.PACKAGE_NAME,\n keys['randomkey'], keys['secretkey']))\n db.commit()\n\n return {\n 'secretkey': keys['secretkey'],\n 'message': 'access secretkey successfully created'\n }", "def __init__(self,\n app_creator_pk: str,\n app_creator_address: str,\n asa_unit_name: str,\n asa_asset_name: str,\n app_duration: int,\n teal_version: int = 3):\n self.app_creator_pk = app_creator_pk\n self.app_creator_address = app_creator_address\n self.asa_unit_name = asa_unit_name\n self.asa_asset_name = asa_asset_name\n self.app_duration = app_duration\n self.teal_version = teal_version\n\n self.client = developer_credentials.get_client()\n self.approval_program_code = approval_program()\n self.clear_program_code = clear_program()\n\n self.app_id = -1\n self.asa_id = -1\n self.asa_delegate_authority_address = ''\n self.algo_delegate_authority_address = ''", "def activate():\n try:\n body = request.get_json()\n\n activate_token = body[\"activate_token\"]\n password = body[\"password\"]\n\n if len(password) < 3 or len(password) > 50:\n return bad_request()\n\n if not models.token_exists(activate_token):\n\n return bad_request()\n\n student_hash = models.create_hash(password)\n models.save_hash(student_hash, activate_token)\n\n except KeyError:\n return bad_request()\n except Exception as e:\n print(e)\n return server_error()\n\n return created()", "def create_bearer_token(self):\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n\n data = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": self.refresh_token,\n \"client_id\": self.client_id,\n \"client_secret\": self.client_secret,\n }\n\n r = requests.post(self.token_endpoint, headers=headers, data=data)\n\n if r.status_code == 200:\n logging.info(\"Successfully obtained bearer token\")\n self.bearer_token = r.json()[\"access_token\"]\n else:\n logging.warning(\"HTTP Error {}\".format(r.status_code))", "def get_time():\n return {\n 'timestamp': datetime.now()+ timedelta(hours=-1)\n }", "def amazon_authorization(access_token):\n b = BytesIO()\n # verify that the access token belongs to us\n c = pycurl.Curl()\n c.setopt(pycurl.URL, \"https://api.amazon.com/auth/o2/tokeninfo?access_token=\" + urllib.quote_plus(access_token))\n c.setopt(pycurl.SSL_VERIFYPEER, 1)\n c.setopt(pycurl.WRITEFUNCTION, b.write)\n c.perform()\n d = json.loads(b.getvalue())\n if d['aud'] != config.YOUR_CLIENT_ID:\n # the access token does not belong to us\n raise BaseException(\"Invalid Token\")\n\n # exchange the access token for user profile\n b = BytesIO()\n c = pycurl.Curl()\n c.setopt(pycurl.URL, \"https://api.amazon.com/user/profile\")\n c.setopt(pycurl.HTTPHEADER, [\"Authorization: bearer \" + access_token])\n c.setopt(pycurl.SSL_VERIFYPEER, 1)\n c.setopt(pycurl.WRITEFUNCTION, b.write)\n c.perform()\n d = json.loads(b.getvalue())\n return d", "def _add_sponsor(data: dict) -> dict:\n user = create_user()\n user['name'] = data['agency']\n user['type'] = 32\n if 'agency_class' in data:\n user['tag'] = data['agency_class']\n return user", "def post_amenity():\n the_json = request.get_json()\n if not the_json:\n abort(400, 'Not a JSON')\n if 'name' not in the_json:\n abort(400, 'Missing name')\n new_amenity = Amenity(**request.get_json())\n storage.new(new_amenity)\n storage.save()\n return jsonify(new_amenity.to_dict()), 201", "def _get_access_token_with_backoff_strategy(self) -> dict:\n self._backoff_strategy(demisto.getIntegrationContext())\n return self._get_access_token()", "def build_response_dict(self):\n return {\n \"release\": self.settings['bookstore'][\"release\"],\n \"features\": self.settings['bookstore'][\"features\"],\n }", "def state_attributes(self):\n attrs = {\"access_token\": self.access_tokens[-1]}\n\n if self.model:\n attrs[\"model_name\"] = self.model\n\n if self.brand:\n attrs[\"brand\"] = self.brand\n\n if self.motion_detection_enabled:\n attrs[\"motion_detection\"] = self.motion_detection_enabled\n\n if self.supports_doorbell_chime:\n attrs[\"doorbell_chime\"] = self.supports_doorbell_chime\n\n return attrs", "def create_account(self, account_tree):\n\n # Couple this object to the account object in order\n # to access the request_xml methods and other account info\n account_data = dict()\n account_data['client'] = self\n\n for param in account_tree.iter('CardData'):\n name = param.get('name',\"NA\")\n if name != \"NA\":\n account_data[name] = param.text\n\n for summary_element in account_tree.iter('AccountSummaryData'):\n key = 'value' if 'value' in summary_element.attrib else 'formattedValue'\n name = summary_element.get('name',\"NA\")\n if name != \"NA\":\n account_data[name] = summary_element.attrib[key]\n\n # Extract the loyalty programmes from the XML\n # for element in account_tree.findall('LoyaltyData/RewardsData/param'):\n # name = element.attrib['label']\n # value = element.attrib['formattedValue'].replace(',', '')\n # loyalty_programme = LoyaltyProgramme(name, value)\n # self.loyalty_programmes.append(loyalty_programme)\n\n\n return CardAccount(account_data)", "def create(self, request):\n\n return ObtainAuthToken().post(request)", "def get_antags(self):\n antags = []\n for obj in self.antagobjs.group_by(AntagObjective.mindkey):\n antag = {'key': obj.mindkey, 'name': obj.mindname, 'role': obj.special_role}\n antags.append(antag)\n return antags", "def generate_auth_dict_ws(self,\n nonce: int):\n return {\n \"algo\": \"HS256\",\n \"pKey\": str(self.api_key),\n \"nonce\": str(nonce),\n \"signature\": hmac.new(self.secret_key.encode('utf-8'),\n str(nonce).encode('utf-8'),\n hashlib.sha256).hexdigest()\n }", "def test_create_application_credential_expires(self):\n expires_at = timeutils.utcnow() + datetime.timedelta(hours=1)\n\n app_cred = self.create_application_credential(expires_at=expires_at)\n\n expires_str = expires_at.isoformat()\n self.assertEqual(expires_str, app_cred['expires_at'])", "def __init__(self):\n #print (\"Object created\")\n self.apikey='acc_4fc1a435b3188b5'\n self.secret = 'f49c4be14a048d5de7e7f6c564b52022'\n self.fileToIdMap = {}", "def all_amenities():\n amenities_list = []\n for amenity in storage.all(Amenity).values():\n amenities_list.append(amenity.to_dict())\n return jsonify(amenities_list)", "def build_access_token_expired():\n return do_build_access_token(tenant_id='intility_tenant_id', expired=True)", "def get_access_token(credentials={}):\n client_id = credentials['client_id']\n client_secret = credentials['client_secret']\n\n if client_id == None or client_secret == None:\n return None\n\n # POST request for token\n response = requests.post('https://auth.domain.com.au/v1/connect/token', \n data = {'client_id':client_id,\n \"client_secret\":client_secret,\n \"grant_type\":\"client_credentials\",\n \"scope\":\"api_listings_read api_listings_write\",\n \"Content-Type\":\"text/json\"})\n token=response.json()\n expire = datetime.now() + timedelta(seconds=token['expires_in'])\n print (f'token expires at {expire}')\n\n access_token = {}\n access_token['access_token'] = token['access_token']\n access_token['expire_at'] = expire\n\n return access_token", "def _get_aad_headers(self) -> dict:\n headers = {}\n if \"azure_resource_id\" in self.databricks_conn.extra_dejson:\n mgmt_token = self._get_aad_token(AZURE_MANAGEMENT_ENDPOINT)\n headers[\"X-Databricks-Azure-Workspace-Resource-Id\"] = self.databricks_conn.extra_dejson[\n \"azure_resource_id\"\n ]\n headers[\"X-Databricks-Azure-SP-Management-Token\"] = mgmt_token\n return headers", "def describe_account_attributes():\n pass", "def __init__(self):\n\n self._authorization = None\n self._last_used = datetime.utcnow() - timedelta(hours=10)\n\n self._resource_owner_key = None\n self._resource_owner_secret = None\n\n self._consumer_key = etrade_config.oauth_consumer_key\n self._consumer_secret = etrade_config.oath_consumer_secret\n\n self._auth_file_path = etrade_config.auth_file_path\n self._user_name = etrade_config.user_name\n self._user_pwd = etrade_config.user_pwd", "def get_access_token():\n\n # Request the access token using app's id and secret\n response = requests.post('https://api.yelp.com/oauth2/token',\n data={\n 'grand_type': 'client_credentials',\n 'client_id': os.environ['YELP_APP_ID'],\n 'client_secret': os.environ['YELP_APP_SECRET']\n })\n\n return response.json()['access_token']", "def get_azure_access_token_mock() -> dict:\n return {\n 'access_token': 'my-access-token',\n 'expires_in': 3595,\n 'refresh_token': 'my-refresh-token',\n }", "def generate_aead(hsm, args):\n key = get_oath_k(args)\n # Enabled flags 00010000 = YSM_HMAC_SHA1_GENERATE\n flags = struct.pack(\"< I\", 0x10000)\n hsm.load_secret(key + flags)\n nonce = hsm.get_nonce().nonce\n aead = hsm.generate_aead(nonce, args.key_handle)\n if args.debug:\n print \"AEAD: %s (%s)\" % (aead.data.encode('hex'), aead)\n return nonce, aead", "def __init__(self):\n self.aeropuertos = {}", "def build_dictionary(self):\n return BertTokenizerDictionaryAgent(self.opt)", "def seat_profile(first, last, **passenger_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in passenger_info.items():\n profile[key] = value\n return profile", "def who():\n cleanup()\n return {'available': userlist(), 'eta': data['etas'], 'etd': data['etds'], 'lastlocation': data['lastlocation'], 'ceitloch': ceitloch(), 'reminder': data['reminder']}", "def generate_auth_token(self, expiration):\n ser = Serializer(current_app.config['SECRET_KEY'],\n expires_in=expiration)\n return ser.dumps({'id': self.id}).decode('utf-8')", "def create(self, atomic_desc, atomic_numbers=[]):\n return self.acronym, []", "def account(self):\n provider_name = self.provider_entry.get_text()\n provider = Provider.get_by_name(provider_name)\n\n # Create a new provider if we don't find one\n if not provider:\n provider_image = self.provider_image.image\n provider_website = self.provider_website_entry.get_text()\n provider = Provider.create(provider_name, provider_website, None,\n provider_image)\n # Update the provider image if it changed\n elif provider and self.provider_image.image != provider.image:\n provider.update(image=self.provider_image.image)\n\n account = {\n \"username\": self.account_name_entry.get_text(),\n \"provider\": provider\n }\n if not self.props.is_edit:\n # remove spaces\n token = self.token_entry.get_text()\n account[\"token\"] = \"\".join(token.split())\n return account", "def create_authorization_header(self, **kwargs):\n return {\"Authorization\": \"Bearer {}\".format(self.create_jwt(**kwargs))}", "def from_dictionary(dictionary):\n if \"expenses\" in dictionary and sorted(dictionary[\"expenses\"]) == sorted(EXPENSE_TYPES):\n apartment_temp = Apartment()\n apartment_temp.expenses = dictionary[\"expenses\"]\n return apartment_temp\n\n raise Exception(\"dictionary is not compatible with Apartment\")", "def agencia(request):\n return {'agencia':Agencia.get_activa(request)}", "def create_access_token(oauth):\n #create parameters for API authorization\n\tredirect_uri = 'oob'\n\tparams = {'client_secret': oauth.client_secret,\n\t\t\t 'redirect_uri': redirect_uri,\n\t\t\t 'response_type': 'code'}\n\t#store the access code\n\turl = oauth.get_authorize_url(**params)\n\n\t#open a web browser to get access token and then store it via manual input\n\twebbrowser.open(url)\n\tcode = input('Enter code: ')\n\t#create credentials item\n\tstart_time = time.time()\n\t#create dictionary to hold credentials and store beginning time\n\tcredentials = {'token_time': start_time}\n\n\t#NEED TO ADD IN 'REFRESH TOKEN' FUNCTION HERE SOMEWHERE\n\t#\n\t\n\t#create parameters\n\tdata = {'code': code,\n\t\t\t'redirect_uri': redirect_uri,\n\t\t\t'grant_type': 'authorization_code'}\n\t#build the headers\n\theaders = oauth_headers(oauth)\n\t#create the raw access token\n\traw_access = oauth.get_raw_access_token(data=data, headers=headers)\n\t#parse the raw access token and add to credentials variable\n\tcredentials.update(access_parse(raw_access))\n\n\t#parse access token from credentials\n\taccess_token = credentials['access_token']\n\t#return access token\n\treturn access_token", "def post_amenity():\n body = request.get_json()\n if not body:\n abort(400, \"Not a JSON\")\n if body.get(\"name\") is None:\n abort(400, \"Missing name\")\n amenity = Amenity(**body)\n amenity.save()\n return jsonify(amenity.to_dict()), 201", "def getAircraftCodeDict():\n table = 'aircraft'\n connection = openConnection()\n curs = connection.cursor()\n sqlcmd = \"SELECT * FROM \" + str(table)\n d = {}\n \n curs.execute(sqlcmd)\n for row in curs.fetchall():\n aircraft = airlineClasses.Aircraft()\n aircraft.aircraftCode = row[0]\n aircraft.name = row[1]\n d[aircraft.aircraftCode] = aircraft\n \n curs.close()\n connection.close()\n return d", "def get_bearer_token(self):\n key = quote(self.api_key)\n secret = quote(self.api_secret)\n bearer_token = base64.b64encode(\"{}:{}\".format(key,\n secret).encode(\"utf8\"))\n\n post_headers = {\n \"Authorization\": \"Basic {0}\".format(bearer_token.decode(\"utf8\")),\n \"Content-Type\": \"application/x-www-form-urlencoded;charset=UTF-8\",\n }\n response = requests.post(\n url=\"https://api.twitter.com/oauth2/token\",\n data={\"grant_type\": \"client_credentials\"},\n headers=post_headers,\n )\n token_info = response.json()\n self.bearer_token = token_info", "def create(self):\n\n self._calculate_hp()\n self.race.alterAbilities()\n self.race.racialAbilities()", "def create_amenity_place(place_id, amenity_id):\n place = storage.get(Place, place_id)\n if place is None:\n abort(404, description=\"Not Found\")\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404, description=\"Not Found\")\n\n if amenity in place.amenities:\n return jsonify(amenity.to_dict())\n\n place.amenities.append(amenity)\n storage.new(place)\n storage.save()\n return jsonify(amenity.to_dict())" ]
[ "0.6095435", "0.5521886", "0.5451787", "0.5388297", "0.5352944", "0.5180581", "0.5166357", "0.5155647", "0.5131322", "0.51014733", "0.50938517", "0.50851166", "0.5080323", "0.5078669", "0.505904", "0.50572944", "0.5016414", "0.50079054", "0.49948767", "0.49728522", "0.49649662", "0.49618575", "0.49447498", "0.49316362", "0.49138573", "0.4899521", "0.48984912", "0.48825777", "0.48758575", "0.4871646", "0.48689035", "0.48679847", "0.48625776", "0.48622802", "0.4851304", "0.48476398", "0.4836382", "0.4836382", "0.4834808", "0.4825441", "0.4819236", "0.48172885", "0.48131356", "0.4808197", "0.47988257", "0.47941643", "0.47912034", "0.47830147", "0.47821403", "0.47804388", "0.47664455", "0.47659153", "0.47361293", "0.47345778", "0.47332984", "0.47234866", "0.47105393", "0.46994874", "0.46919277", "0.46842426", "0.46801972", "0.4677983", "0.46771953", "0.46726924", "0.4670116", "0.46650854", "0.4657511", "0.464551", "0.4644783", "0.46442285", "0.46431398", "0.46375942", "0.4637531", "0.4633765", "0.46336836", "0.46310377", "0.46273187", "0.46256506", "0.46239597", "0.4620655", "0.4615205", "0.46088567", "0.46069074", "0.46065524", "0.4605996", "0.46032828", "0.46005312", "0.46002027", "0.4592612", "0.4590222", "0.4588358", "0.45807976", "0.4580668", "0.4577914", "0.4577176", "0.45726183", "0.45705187", "0.4568624", "0.4567396", "0.456493" ]
0.7620931
0
Writes activity to Splunk index.
def write_to_splunk(**kwargs): event = helper.new_event(**kwargs) ew.write_event(event)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, data):\n activities = [json.loads(activity['Json']) for activity in data]\n\n for i in range(len(activities)):\n activities[i]['created_at'] = to_datetime(activities[i]['created_at'])\n\n with Elastic(index='wink', doc_type='activity') as elastic:\n elastic.upload(activities, 'created_at')\n\n Log.info(\"Successfully uploaded wink activity data into elasticsearch.\")", "def write(self, record):\n # Make Splunk ready payload data and append it to self._buffers list.\n self._buffer.append({\n 'index': self._index,\n 'sourcetype': 'json',\n 'event': record\n })\n\n # If the records count in self._buffer is more than allowed by\n # self._buffer_size, send those records to Splunk.\n if len(self._buffer) >= self._buffer_size:\n self._flush()", "def write_index(self):\n self.Lock = True\n self.file_out.seek(self.index_offset)\n for identifier, offset in self.index.items():\n self._write_identifier(identifier)\n self._write_offset(offset)", "def write(self, host, index):\n msg = []\n operation = \"WRITE\"\n if not self.create_uid(host, index):\n return False\n url = \"%s%s%s\" % (\"http://\", host, \"/api/put\")\n payload = {\"metric\": METRIC_NAME, \"timestamp\": TIMESTAMP_MILLIS(), \\\n \"value\": METRIC_VAL, \"tags\":{TAGK: \"%s.%d\" % (TAGV, index)}}\n headers = {\"content-type\": \"application/json\"}\n try:\n response = requests.post(url, data=json.dumps(payload), headers=headers)\n if response.status_code == 204:\n LOGGER.debug(\"Value 1 inserted to metric %s\", METRIC_NAME)\n self.process_resp([], operation, \"1\", index)\n return True\n response_dict = json.loads(response.text)\n msg.append(response_dict[\"error\"][\"message\"])\n LOGGER.warning(\"Unable to write 1, error message is %s\", \\\n response_dict[\"error\"][\"message\"])\n self.process_resp(msg, operation, \"0\", index)\n return False\n except requests.exceptions.ConnectionError as ex_message:\n LOGGER.warning(\"Unable to write 1, error message is %s\", str(ex_message))\n self.process_resp([str(ex_message)], operation, \"0\", index)\n return False", "def save_index(self):\n vsn_objs = [dict(Id = v['id'], Name = v['name']) for v in self.versions]\n self.backend.write_json(dict(\n Versions = vsn_objs,\n Channels = [], # This is unused.\n ApiVersion = 0,\n ), self.index_path())", "def write(self, index, data):\n isNotFirstCmd = False\n # Write Opcode\n self.__ser_wr_trans(RG_WR, isNotFirstCmd)\n isNotFirstCmd = True\n # Write Address\n self.__ser_wr_trans(index, isNotFirstCmd)\n # Write Data\n self.__ser_wr_trans(data, isNotFirstCmd)", "def write(cls, activity_type, actor, target, data):\n\n activity_log = ActivityLog.objects.create(\n activity_type=activity_type, actor=actor, target=target, data=data\n )\n cls.notify(activity_log)", "def write_activityMessage(self, value):\n # PROTECTED REGION ID(SdpMasterLeafNode.activityMessage_write) ENABLED START #\n self.update_attr_map(\"activityMessage\", value)\n # PROTECTED REGION END # // SdpMasterLeafNode.activityMessage_write", "def _write_shard(filename, dataset, indices):\n with tf.io.TFRecordWriter(filename) as writer:\n for j in indices:\n writer.write(dataset[j])", "def _write_shard(filename, dataset, indices):\n with tf.python_io.TFRecordWriter(filename) as writer:\n for j in indices:\n writer.write(dataset[j])", "def _flush(self):\n buffer_len = len(self._buffer)\n\n if buffer_len == 0:\n _log.info('No pending records to index; URI: %s; index: %s',\n self._uri, self._index)\n return\n\n _log.info('Indexing %d records; URI: %s; index: %s ...',\n buffer_len, self._uri, self._index)\n\n headers = {'Authorization': 'Splunk ' + self._token}\n\n try:\n response = self._session.post(self._uri,\n headers=headers,\n data=json.dumps(self._buffer),\n verify=self._ca_cert)\n\n log_data = ('URI: {}; index: {}; response status: {}; '\n 'response content: {}'\n .format(self._uri, self._index,\n response.status_code, response.text))\n\n if response.status_code != 200:\n _log.error('Failed to index %d records; HTTP status '\n 'code indicates error; %s',\n buffer_len, log_data)\n return\n\n try:\n j = response.json()\n except Exception as e:\n _log.error('Failed to get JSON from response; %s; '\n 'error: %s; %s', log_data, type(e).__name__, e)\n return\n\n if j['code'] != 0:\n _log.error('Failed to index %d records; Splunk status '\n 'code in JSON indicates error; %s',\n buffer_len, log_data)\n return\n\n _log.info('Indexed %d records; %s', buffer_len, log_data)\n del self._buffer[:]\n\n except requests.ConnectionError as e:\n _log.error('Failed to index %d records; connection error; '\n 'URI: %s; index: %s; error: %s: %s; ',\n buffer_len, self._uri, self._index,\n type(e).__name__, e)\n\n except Exception as e:\n _log.error('Failed to index %d records; unexpected error; '\n 'URI: %s; index: %s; error: %s: %s',\n buffer_len, self._uri, self._index,\n type(e).__name__, e)", "def write(self, segment, result):\n pass", "def write(self, batch):\n time.sleep(self.WRITE_DELAY)", "def write_info_to_file(self):\n\n self.info.write_mission_info()\n\n self.logger.info(\"Mission instance write succeeded.\")", "def sendIndex(self):\n self.updateIndex()\n outpkg = json.dumps(self.serverindex)\n self.send(outpkg)", "def push_write(self, s):\n ...", "def save(self):\n self.index.saveIndex(c.index_path('hnsw.index'))\n joblib.dump(self.ys, \"%s.ys\" % self.index_file_prefix)", "def _write(self, location, data):\n self._connector.write(location=location, data=data)", "def write_activityMessage(self, value):\n self.update_attr_map(\"activityMessage\", value)", "def write(self):", "def write(self):", "def save_index(self, fn):\n utils.save_obj(self.tweetTerms, \"TweetTerm_%s\" % (self.counterOfTweetTermsFiles))\n self.computeTfIdf(self.counterOfTweets)\n self.deleteSingleEntities()\n inv_dict = {'inverted_idx': self.inverted_idx, 'posting': self.postingFiles}\n utils.save_obj(inv_dict, fn)", "def write(self):\n\t\traise NotImplementedError('%s: No write function implemented!' % self.name)", "def write(self, file):\n pos = file.tell()\n pickle.dump((self.index, self.meta, self.info), file)\n file.seek(0)\n\n # update the header with the position of the content index.\n file.write(struct.pack('<Q', pos))", "def _write_stream(self):\n enrich_df = self._process_stream()\n df_writer = enrich_df \\\n .writeStream \\\n .queryName(\"Agro Data Writer\") \\\n .foreachBatch(db_utils.foreach_batch_function) \\\n .option(\"checkpointLocation\", \"chk-point-dir\") \\\n .trigger(processingTime=\"1 minute\") \\\n .start()\n\n df_writer.awaitTermination()", "def write_index(self, file_name):\n self.df_index.to_csv(file_name, sep='\\t')", "def save(self) -> None:\n try:\n js = json.loads(\n self.reset_index().to_json(orient=\"records\", date_format=\"iso\")\n )\n\n with open(self._fp, \"w\") as f:\n f.writelines(json.dumps(js, indent=4))\n logger.debug(f\"Saved index to {self._fp}\")\n except Exception as e:\n logger.error(f\"Could not update database -- {e}\")", "def write_to_index(self,write_dict):\n self.__mode = self.WRITE_MODE\n if not self.__storage:\n self.__load_index()\n try:\n for key,value in write_dict.iteritems():\n self.__storage[key]=value\n except Exception,e:\n print e\n self.__storage = None\n return False\n\n self.__close_storage()\n return True", "def write_log(self):\n with open(self.trav_stat_file, 'a') as stat_file:\n travel_writer = csv.writer(stat_file)\n # Every row starts with the start and destnation\n row = [self.start, self.dest]\n # This uses a static list so that the order is fixed\n for state in [\"waiting\", \"riding\", \"transferring\"]:\n state_total = sum(self.time_record[state])\n row.append(state_total)\n travel_writer.writerow(row)", "def _write_index(self):\n # Make sure to only write non-default objects to the index.\n self.store.write_object(\n object=[obj.to_dict() for ns in self.index.values() for obj in ns.values() if not obj.is_default],\n object_id=self.identifier\n )", "def write(self):\n pass", "def write(self):\n pass", "def write(self, s):\n ...", "def create_sync_entry(ts, coll, idx):\n sync_log = connection.ElasticLogs()\n sync_log.ts = ts\n sync_log.coll = unicode(coll)\n sync_log.idx = unicode(idx)\n sync_log.save()\n return True", "def write_run(run):\n r=Run(run)\n r.write_all()", "def write(self, content):\n ...", "def _writeBuffer(self, dataset, datasetName, buf, idxName, sparse=False):\n # compute end index\n if type(buf) is list:\n end = self.idxs[idxName] + len(buf)\n else:\n end = self.idxs[idxName] + buf.shape[0]", "def put(self,item,index=None):\n self.inbound.put((item,index))", "def export(self, buffer: IO[str], ind: str = '') -> None:\n buffer.write(ind + self.as_keyvalue())", "def update_index(signum):\n cdx = redis_cli.zrange('ipfs:cdxj', 0, -1)\n cdx = ''.join(cdx)\n buff = BytesIO(cdx)\n\n # Add New Index\n res = ipfs_api.add(CustomNameStream(buff, 'index.cdxj'))\n print('Updating Index: ' + str(res))\n\n # Register with IPNS\n res = ipfs_api.name_publish(res['Hash'])\n print res", "def write(self, out):", "def write(self, buf: AnyWritableBuf, /) -> int | None:", "def write_main_index(self):\n\n for miEntry in self.mainIndex:\n self.db_file.write(miEntry.get_representation())", "def write(self, value: int, /) -> None:", "def save(self):\n logging.debug(\"environment save entered\")\n filename = \"index.json\"\n content_dict = {}\n for fpname in self.footprints:\n # for now, just using the patteern ${footprint_name}-metadata for the name \n content_dict[fpname] = fpname\n content = json.dumps(content_dict)\n index = cf.store_object(self.container, filename, content) \n return True", "def log(self, obj, action):\n action_dict = {'time': time.time(),\n 'action': action}\n self.log_data[obj.get_obj_id()]['actions'].append(action_dict)", "def write():\n pass", "def add_workspace_to_index(self, ctx, params):\n # ctx is the context object\n #BEGIN add_workspace_to_index\n #END add_workspace_to_index\n pass", "def write(self):\n raise NotImplementedError", "def commit(self, state):\n # TODO: User optimistic concurrency control via\n # \"version_type=external_gte\"\n return self.client.index(\n index=self.index,\n id=self.document_id,\n body=state\n )", "def WriteFlowLogEntry(self, entry: rdf_flow_objects.FlowLogEntry) -> None:\n key = (entry.client_id, entry.flow_id)\n\n if key not in self.flows:\n raise db.UnknownFlowError(entry.client_id, entry.flow_id)\n\n entry = entry.Copy()\n entry.timestamp = rdfvalue.RDFDatetime.Now()\n\n self.flow_log_entries.setdefault(key, []).append(entry)", "def index_batch(self,batch):\n pass", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def write_sub_index(self):\n for sie in self.subIndex:\n self.db_file.write(sie.get_representation())", "def write(self, buffer):\n utils.print_for_unimplemented_functions(SPI.write.__name__)\n telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_SPI)", "def write(self, notification):", "def report_action(self, action_name):\n last_index = self.configuration.results.shape[0] - 1\n self.configuration.results.loc[last_index, 'action'] = action_name", "async def send_to_elastic(self, data, index='wallarm'):\n self.es.index(body=data, index=index)\n return print('Sent successfully')", "def _save_chromosome_at_index(self, index, file_name):\n how_to_open = 'w' if index == 0 else 'a'\n with open(file_name, how_to_open) as out_file:\n for category in self.population[index].get_genes():\n out_file.write(''.join(category) + '\\t')\n out_file.write(\n '\\n{}\\n'.format(self.population[index].get_fitness())\n )", "def log_activity(self, log_entry):\n # open log file in \"append mode\"\n with open(self.log_filename, mode='a') as log_file:\n writer = csv.DictWriter(log_file, fieldnames=LogEntry.ENTRY_ORDER)\n # add a row to the log: the attributes of log_entry, in fieldnames order\n writer.writerow(log_entry.__dict__)", "def write_to_file_ann(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")", "async def exec_write(self, query, *args):", "def _OpenWrite(self):\n if self._last_stream_numbers['event'] == 1:\n self._WriteStorageMetadata()", "def last_write_out(self):\n end_name = self.comp_name\n self.write_out_part_counter += 1\n end_name += \"/streaming/p\" + str(self.write_out_part_counter)\n end_streaming_content_object = Content(end_name, \"sdo:endstreaming\")\n self.cs.add_content_object(end_streaming_content_object)\n print(\"[last_write_out] Last entry in content store:\", self.cs.get_container()[-1].content.name,\n self.cs.get_container()[-1].content.content)", "def save_metrics(self):\n self.data_stats.write.format(\"org.apache.spark.sql.cassandra\").mode(\"append\").options(table=self.cassandra_stats_table, keyspace=self.cassandra_keyspace).save()\n print (\"Saved data successfully\")", "def store_elasticsearch(self, item):\n self.datastore.create(\n index=\"dminer-dreammarket-{date}\".format(\n date=datetime.datetime.strptime(item[\"timestamp\"], \"%Y:%m:%d %H:%M:%S\").date().strftime(\"%Y-%m-%d\")\n ),\n doc_type= \"dreammarket_listing\",\n body=item\n )", "def aws_write(vl, flusher):\n flusher.add_metric(vl)", "def stax(self, addr):\n\n self.mem_if.write(addr, self.reg.accum, index=self.reg.idx)", "def write(self, fname):\n pass", "def post_activities():\n pass", "def post(self):\n try:\n id = self.request.get('batch_id')\n batch = Batch.get_by_id(long(id))\n except ValueError:\n # Flush tasks with bad batch ids\n self.response.set_status(200)\n return\n\n if not batch:\n logging.warning(\"Batch not found %s.\" % id)\n return\n\n alltweets = Tweet.from_batch(batch)\n tweets_by_topic = Topic.link_topics(alltweets)\n ontopic = set()\n topic_activity = {}\n for topic, tweets in tweets_by_topic.items():\n key = str(topic.key())\n topic_activity[key] = len(tweets)\n ontopic.update(tweets)\n taskqueue.Task(\n url='/tasks/truncate', params={'key': key}\n ).add('truncate')\n\n taskqueue.add(\n url='/tasks/activity',\n params={\n 'values': simplejson.dumps(topic_activity),\n 'batchsize': len(alltweets)\n }\n )\n\n batch.delete()\n db.put(ontopic)", "async def write(self, key: str, item: ResponseOrKey):", "def _commit_to_index( env_dict ):\n from indexer.solr import adapter as adapter_file\n\n adapter = adapter_file.adapter(env_dict)\n adapter.commit()\n if env_dict[\"indexer\"][\"optimise\"]:\n adapter.optimise(maxSegments=1)", "def write(self, id, data):\n raise NotImplementedError", "def register_write(self):\n self._writes_since_check += 1", "def entities_write_index(schema):\n schema = model.get(schema)\n return schema_index(schema, SETTINGS.INDEX_WRITE)", "def write_data():", "def write_search_index(self, search_index):\n self.logger.info('writing search index')\n with tempfile.NamedTemporaryFile(mode='w', dir=str(self.output_path),\n encoding='utf-8',\n delete=False) as index:\n try:\n json.dump(search_index, index,\n check_circular=False, separators=(',', ':'))\n except BaseException:\n index.delete = True\n raise\n else:\n os.fchmod(index.file.fileno(), 0o664)\n os.replace(index.name, str(self.output_path / 'packages.json'))", "def WRR(self, meta, pkt):\n # flowID is just sport field\n flowID = pkt.sport\n\n if flowID not in self.istate.flow_last_rank.keys():\n rank = self.istate.max_rank + 1\n self.istate.num_active_flows += 1\n self.istate.flow_cnt[flowID] = 1\n else:\n weight = self.istate.flow_weight[flowID]\n if (self.istate.flow_cnt[flowID] == weight):\n rank = self.istate.flow_last_rank[flowID] + self.istate.num_active_flows\n self.istate.flow_cnt[flowID] = 1\n else:\n rank = self.istate.flow_last_rank[flowID]\n self.istate.flow_cnt[flowID] += 1\n\n if rank > self.istate.max_rank:\n self.istate.max_rank = rank\n self.istate.flow_last_rank[flowID] = rank\n meta.ranks[0] = rank\n meta.leaf_node = 0\n yield self.wait_clock()", "def _write_status(self, status, cls=MySQLStatus):", "def update_activity():\n pass", "def write_event(self, event):\n self.events_written.append(event)", "def write_entries(self, entries):\n for entry in entries:\n self.write(entry)", "def enable_activity_log(self):\n self.add_payload('createActivityLog', 'true')", "def store_action_log(self, filename):\n t = self.get_current_timeindex()\n camera_obs = self.get_camera_observation(t)\n self._action_log[\"final_object_pose\"] = {\n \"t\": t,\n \"pose\": camera_obs.object_pose,\n }\n\n with open(filename, \"wb\") as fh:\n pickle.dump(self._action_log, fh)", "def track_activity(func):\n @wraps(func)\n def f(*args, **kwargs):\n if g.user is None: return\n entry = Action()\n entry.user_id = g.user.id\n entry.path = request.path\n entry.verb = request.method\n db.session.add(entry)\n db.session.commit()\n\n return func(*args, **kwargs)\n return f", "def _write(self, data):\n self._writer.write(data)", "def write( data ):", "def store_elasticsearch(self, item):\n self.datastore.create(\n index=\"dminer-alphabay-{date}\".format(\n date=datetime.datetime.strptime(item[\"timestamp\"], \"%Y:%m:%d %H:%M:%S\").date().strftime(\"%Y-%m-%d\")\n ),\n doc_type= \"alphabay_listing\",\n body=item\n )", "def put(self, task):\n self.async_vis.get_indices_ls.append(task.id)\n self.model.put(task)", "def write_strand(self, strand):\n if strand['channel'] != self._current_channel \\\n or self._strand_counter == self._reads_per_file:\n self._start_new_file(strand)\n fname = self._write_strand(strand)\n self._index.write('{}\\t{}\\t{}\\t{}\\n'.format(strand['channel'],\n strand['read_attrs']['read_number'],\n self._current_file, fname))\n return", "def save_activity(self, new_activity):\n self.__load_activities_from_file_into_memory()\n super().save_activity(new_activity)\n self.__save_activities_from_memory_to_file()", "def write(data):", "def write_push(self, segment, index):\n self._write_line(VMWriter._PUSH.format(segment, index))", "def write(self, trollpacket):\n\t\tpacket = self.parse_to_dref(trollpacket.name, trollpacket.value)\n\t\tself.xp_write.send(packet)", "def write_to_log(self, message, severity=\"INFO\", src=None):\n if src is None:\n src = self._ioc_name\n msg_time = datetime.datetime.now()\n IsisLogger.executor.submit(\n self._queued_write_to_log, message, severity, src, self.ioc_log_host, self.ioc_log_port, msg_time)", "def flush(self, index):\n url = f'{self.host}{index}/_flush'\n resp = requests.post(url)\n return resp.json()", "def index(self):\n\n\t\tself.db = DB()\n\t\tactivityTuple = self.db.select_all_from(\"activity\")[1]\n\t\ttmpl = lookup.get_template(\"index.html\")\n\t\treturn (tmpl.render(activity=activityTuple))", "def write(self, val: int, idx: Optional[int] = None) -> None:\n if not idx:\n idx = self.ip\n\n self.memory[idx] = val" ]
[ "0.6238934", "0.5661036", "0.56116706", "0.55407304", "0.54127765", "0.5401947", "0.5389567", "0.5381006", "0.53470606", "0.53407896", "0.5288701", "0.5285223", "0.5240927", "0.5197521", "0.5191341", "0.51774335", "0.5176707", "0.51560175", "0.5123677", "0.50669056", "0.50669056", "0.50389946", "0.5025154", "0.5023109", "0.5019835", "0.501346", "0.50076485", "0.5005588", "0.50030744", "0.50013804", "0.49995986", "0.49995986", "0.4990415", "0.49837893", "0.49505594", "0.49054536", "0.4904026", "0.4893266", "0.4888715", "0.48853448", "0.48841196", "0.48836654", "0.48792145", "0.48682675", "0.48624635", "0.4861608", "0.4859948", "0.48599312", "0.4857775", "0.48557952", "0.48424053", "0.4834797", "0.48188624", "0.48188624", "0.48154914", "0.48075897", "0.4804069", "0.4797107", "0.47938088", "0.47869134", "0.47836968", "0.477943", "0.4772516", "0.47704926", "0.47641653", "0.47640163", "0.47609586", "0.4760056", "0.47529805", "0.47472474", "0.47440794", "0.47398132", "0.47338122", "0.47254112", "0.47203085", "0.47137788", "0.47120413", "0.4709017", "0.47088584", "0.47061738", "0.47008634", "0.47005567", "0.46999246", "0.46988228", "0.46969965", "0.4696074", "0.4692133", "0.46888626", "0.46873298", "0.46864653", "0.46846575", "0.46816558", "0.4680329", "0.46762142", "0.46730143", "0.4671764", "0.46663997", "0.4665586", "0.46633852", "0.46589997" ]
0.5662735
1
hamgiin baga yoronhii huvaagdagchiig oloh function
def lcm(*values): values = set([abs(int(v)) for v in values]) if values and 0 not in values: n = n0 = max(values) values.remove(n) while any( n % m for m in values ): n += n0 return n return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pohyb(seznam_tahu, seznam_ovoce, tah,radky, sloupce):\n\n x= seznam_tahu [len(seznam_tahu)-1][0] # [x,y] souradnice noveho tahu\n y= seznam_tahu [len(seznam_tahu)-1][1]\n\n if tah == \"s\": #sever\n y -= 1\n elif tah == \"j\": #jih\n y += 1\n elif tah == \"v\": #vychod\n x += 1\n elif tah == \"z\": #zapad\n x -= 1\n else:\n print(\"Zadal jsi spatne pismeno.\" )\n return()\n\n if x<0 or x>sloupce-1 or y<0 or y>radky-1: #tah mimo pole\n print(\"Tah neni mozny, je mimo hraci pole. Opakuj tah.\")\n elif [x,y] in seznam_tahu: #jiz obsazene policko hadem\n print(\"Tah neni mozny, had uz na nem je. Opakuj tah.\")\n elif [x,y] in seznam_ovoce: #policko s ovocem, vola se funkce snez\n snez (seznam_ovoce, seznam_tahu,[x,y],radky, sloupce)\n else:\n seznam_tahu.append([x,y]) #tah na volne policko, prida se tah a odebere posledni bod\n seznam_tahu.pop(0)", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def exo2():", "def hra(kolo):\r\n\r\n kolo = 0\r\n vitaz = None #zaciname bez vitaza\r\n\r\n global values\r\n\r\n while not vitaz: #dokym nemame vitaza\r\n kolo += 1 #posun kola\r\n if kolo % 2 == 1: #ak sme v neparnom kole aktivny hrac je W(biely)\r\n aktivny_hrac = \"W\"\r\n else: #ak sme v parnom kole aktivny hrac je B(cierny)\r\n aktivny_hrac = \"B\"\r\n\r\n print('Kolo: ' + str(kolo) + '\\n'\r\n + 'Na ťahu: ' + str(aktivny_hrac))\r\n\r\n vykresli_pole(values)\r\n\r\n if aktivny_hrac == \"W\": #ak je W aktivny hrac zavola funkciu pre tah hraca\r\n print('Zadajte súradnice - číslo riadku a stĺpec bez medzery (napr. 1A):')\r\n tah = hrac_input()\r\n\r\n if aktivny_hrac == \"B\": #ak je B aktivny hrac zavola funkciu pre tah pocitaca\r\n tah = hra_pocitac()\r\n\r\n if (prehladavanie_hex_W(\"W\")): #kontrola ci nemame vitaza\r\n vitaz = prehladavanie_hex_W(\"W\")\r\n elif (prehladavanie_hex_B(\"B\")):\r\n vitaz = prehladavanie_hex_B(\"B\")\r\n\r\n if vitaz in [\"W\", \"B\"]: #ak mame vitaza, koniec hry, vypise vitaza\r\n vykresli_pole(values)\r\n print(vitaz + \" \" + \"VYHRAL HRU\")\r\n print('\\n')\r\n print('Ak chcete hrať znovu stlačte 0 a ENTER')\r\n x = input()\r\n if x == \"0\":\r\n values = [[\" \" for i in range(pocet_stlpcov)] for j in range(pocet_riadkov)]\r\n return hra(0)", "def bloqueio_2(tab,jog):\r\n jog*=-1\r\n return vitoria_1(tab,jog)", "def lateral_vazio_8(tab, jog):\r\n for x in [2,4,6,8]:\r\n if eh_posicao_livre(tab,x):\r\n return x", "def podziel(self):\n def fraktal(dlugosc, alpha, poziom):\n \"\"\"Metoda wyznaczajaca fraktal.\n\n Metoda ta przyjmuje dlugosc, kat oraz poziom drzewa.\n Na bazie podanych parametrow wylicza fraktal z podanych w zadaniu wzorow.\n Zwraca liste zawierajaca punkX oraz punktY fraktalu.\n \"\"\"\n#obliczanie punktow punktu Abis dla kazdego poziomu galezi\n x = float(self.p2[0] + self.dlugosc * cos(alpha))\n y = float(self.p2[1] + self.dlugosc * sin(alpha))\n return [round(x), round(y)]\n\n#petla przechodzaca po wszystkich poziomach drzewa\n while self.tmp <= self.poziom:\n#obliczanie grubosci, dlugosci galezi oraz kolorowanie jej\n self.grubosc = float((2 * self.grubosc + 1) / 3)\n self.dlugosc = float((2 * self.dlugosc) / 3)\n self.kolor += 6\n\n #sprawdzenie czy kolor nie wyszedl po za skale maksymalnej wartosci\n if self.kolor > 255:\n self.kolor = 255\n\n#rozbicie obliczen na poziom 1 i wyzej\n#Abis jest to punkt prawy dla kazdej galezi\n#B jest to punkt srodkowy dla kazdej galezi\n#C jest to punkt srodkowy dla kazdej galezi\n\n#obliczenia dla pierwszego poziomu\n if self.tmp < 2:\n#obliczenie fraktalu, prawa galaz dla kazdej galezi\n#podstawienie obliczonych wartosci z punktu Abis do pozostalych wedlug podanych wzorow\n Abis = fraktal(self.dlugosc, self.alpha, self.poziom)\n B = [round(self.p2[0]), round(Abis[1])]\n C = [round(-Abis[0] + 2 * self.p2[0]), round(Abis[1])]\n\n#zwiekszenie poziomu drzewa o jeden\n self.tmp += 1\n\n#tutaj nastepuje zwrocenie obiektow typu Branch z nowo obliczonymi wartosciami\n return [Branch(self.p2, Abis, self.dlugosc, self.grubosc, self.kolor, self.alpha, self.tmp),\n Branch(self.p2, B, self.dlugosc, self.grubosc, self.kolor, self.alpha, self.tmp),\n Branch(self.p2, C, self.dlugosc, self.grubosc, self.kolor, self.alpha, self.tmp)]\n#obliczenia poziomow wyzej niz pierwszy\n else:\n#obliczanie kata dla punktu prawego\n self.zetprim = randint(-1, 1) * randint(1, self.s)\n self.beta = self.alpha + self.zetprim\n\n#obliczanie kata dla punktu srodkowego\n self.zetbis = randint(-1, 1) * randint(1, self.s)\n self.gamma = self.alpha + self.zetbis\n\n#obliczanie kata dla punktu lewego\n self.zetter = randint(-1, 1) * randint(1, self.s)\n self.teta = self.alpha + self.zetter\n\n#obliczenie fraktalu, prawa galaz dla kazdej galezi\n#podstawienie obliczonych wartosci z punktu Abis do pozostalych wedlug podanych wzorow\n Abis = fraktal(self.dlugosc, self.beta, self.poziom)\n B = [round(self.p2[0]), round(Abis[1])]\n C = [round(-Abis[0] + 2 * self.p2[0]), round(Abis[1])]\n\n#zwiekszenie poziomu drzewa o jeden\n self.tmp += 1\n\n#tutaj nastepuje zwrocenie obiektow typu Branch z nowo obliczonymi wartosciami\n return [Branch(self.p2, Abis, self.dlugosc, self.grubosc, self.kolor, self.beta, self.tmp),\n Branch(self.p2, B, self.dlugosc, self.grubosc, self.kolor, self.gamma, self.tmp),\n Branch(self.p2, C, self.dlugosc, self.grubosc, self.kolor, self.teta, self.tmp)]", "def mezclar_bolsa(self):", "def main():\r\n\r\n print(\"Berikut adalah daftar naga yang tersedia.\")\r\n for naga in daftar_naga:\r\n naga.hp_sementara = naga.hp_maks\r\n print(naga)\r\n\r\n indeks_naga: int = int(input(\"Tolong masukkan indeks dari naga pilihan Anda: \"))\r\n while indeks_naga < 0 or indeks_naga >= len(daftar_naga):\r\n indeks_naga = int(input(\"Maaf, input Anda tidak sah! Tolong masukkan indeks dari naga pilihan Anda: \"))\r\n\r\n naga_pilihan: Naga = daftar_naga[indeks_naga]\r\n naga_musuh: Naga = daftar_naga[random.randint(0, len(daftar_naga) - 1)]\r\n print(naga_pilihan)\r\n print(naga_musuh)\r\n giliran: int = 0 # nilai semula\r\n while naga_pilihan.hp_sementara >= 0 and naga_musuh.hp_sementara >= 0:\r\n giliran += 1\r\n # Giliran Anda adalah ketika nilai 'giliran' itu ganjil dan giliran musuh adalah ketika nilai 'giliran'\r\n # itu genap\r\n if giliran % 2 == 1:\r\n print(naga_pilihan.serang(naga_musuh))\r\n else:\r\n print(naga_musuh.serang(naga_pilihan))\r\n\r\n if naga_musuh.hp_sementara < 0:\r\n print(\"Anda menang!!!\")\r\n break\r\n if naga_pilihan.hp_sementara < 0:\r\n print(\"Anda kalah!!!\")\r\n break\r\n\r\n print(\"Tekan Y untuk ya.\")\r\n print(\"Tekan tombol apapun yang lainnya untuk tidak.\")\r\n tanya: str = input(\"Apakah Anda mau bertarung lagi? \")\r\n if tanya == \"Y\":\r\n main()\r\n else:\r\n sys.exit()", "def sth():", "def tah_hraca(pole, cislo_policka):\n symbol = \"X\"\n list_pole = list(pole)\n while True:\n try:\n cislo_policka = int(input('\\nNa ktore policko chces umiestnit svoje \\'X\\'? Zadaj hodnotu 0 - 19: '))\n break\n except ValueError:\n print('Pozor! Musis zadat CISLO.')\n continue\n if cislo_policka in range(0, 20):\n if list_pole[cislo_policka] == \"-\":\n print('Ok. Policko je volne. Tvoj tah:')\n pole = tah(pole, cislo_policka, symbol) \n return pole\n else:\n print('Pozor! Toto policko je uz obsadene.')\n return tah_hraca(pole, cislo_policka)\n else:\n print('Pozor! Toto policko neexistuje.')\n return tah_hraca(pole, cislo_policka)", "def f(inicio,obj):\n return g(inicio,obj)+h(inicio,obj)", "def canto_oposto_6(tab, jog):\r\n jog*=-1\r\n if obter_linha(tab,1)[0]==jog and eh_posicao_livre(tab,9):\r\n return 9\r\n if obter_linha(tab,1)[2]==jog and eh_posicao_livre(tab,7):\r\n return 7\r\n if obter_linha(tab,3)[0]==jog and eh_posicao_livre(tab,3):\r\n return 3\r\n if obter_linha(tab,3)[2]==jog and eh_posicao_livre(tab,1):\r\n return 1", "def piku():\n pass", "def ikkuna(nimi, x_data, y_data, syote, funktio):\n nimi = ik.luo_ali_ikkuna(\"Spektri\")\n kirjasto[nimi] = nimi\n piirtoalue, kuvaaja = ik.luo_kuvaaja(nimi, valitse_datapiste, 1000, 650)\n kirjasto[\"kuvaaja\"] = kuvaaja\n lisaa = kuvaaja.add_subplot()\n lisaa.plot(x_data, y_data)\n lisaa.set_xlabel('Energia')\n lisaa.set_ylabel('Intensiteetti')\n piirtoalue.draw()\n ik.luo_nappi(nimi, syote, funktio)\n ik.luo_nappi(nimi, \"Tallenna\", tallentaja)\n kirjasto[\"pisteet\"] = []\n ik.kaynnista()", "def g():", "def zapisi_pot(pot):", "def zapisi_pot(pot):", "def zapisi_pot(pot):", "def zapisi_pot(pot):", "def zapisi_pot(pot):", "def zapisi_pot(pot):", "def tah_hraca(pole, str_policka):\n symbol = \"X\"\n list_pole = list(pole)\n str_policka = input_cislo_policka()\n\n try:\n cislo_policka = int(str_policka)\n print(cislo_policka)\n if cislo_policka in range(0, 20):\n if list_pole[cislo_policka] == \"-\":\n print('Ok. Policko je volne. Tvoj tah:')\n pole = tah(cislo_policka, symbol, pole) \n return pole\n else:\n print('Pozor! Toto policko je uz obsadene.')\n return tah_hraca(pole, str_policka)\n except ValueError:\n print('Pozor! Musis zadat CISLO.')\n return tah_hraca(pole, str_policka)\n\n else:\n print('Pozor! Toto policko neexistuje.')\n return tah_hraca(pole, str_policka)", "def lineaarinen():\n x = []\n y = []\n if not kirjasto[\"korjaus\"]:\n try:\n for erottaja in kirjasto[\"lineaariset_arvot\"]:\n x_arvo, y_arvo = erottaja\n x.append(x_arvo)\n y.append(y_arvo)\n kirjasto[\"lineaariset_arvot\"] = []\n kirjasto[\"pisteet\"] = []\n if x and x[0] != x[1] and y[0] != y[1]:\n kk = (y[1]-y[0])/(x[1]-x[0])\n intensiteetti_korjaus = []\n for j in kirjasto[\"kineettiset_energiat\"]:\n y_korjaava = kk * (j - x[0]) + y[0]\n intensiteetti_korjaus.append(y_korjaava)\n for k, l in enumerate(kirjasto[\"intensiteetit\"]):\n korjaus = l - intensiteetti_korjaus[k]\n kirjasto[\"korjaus\"].append(korjaus)\n else:\n ik.avaa_viesti_ikkuna(\"Error\", \"Korjauspisteiden valinnassa tapahtui virhe\")\n return\n except IndexError:\n ik.avaa_viesti_ikkuna(\"Error\", \"Korjauspisteitä ei ole valittu\")\n else:\n ikkuna(\"korjattu_spektri\", kirjasto[\"kineettiset_energiat\"], kirjasto[\"korjaus\"], \"Integroi\", integrointi)\n else:\n ikkuna(\"korjattu_spektri\", kirjasto[\"kineettiset_energiat\"], kirjasto[\"korjaus\"], \"Integroi\", integrointi)", "def canto_vazio_7(tab, jog):\r\n for x in [1,3,7,9]:\r\n if eh_posicao_livre(tab,x):\r\n return x", "def vytvor_hru (seznam_tahu, seznam_ovoce, radky, sloupce):\n hriste = hraci_pole(radky, sloupce)\n vloz(hriste,seznam_ovoce, \"O\")\n vloz(hriste, seznam_tahu, \"X\")\n return(hriste)", "def bloqueio_de_bifurcacao_4(tab,jog): \r\n if len(bifurcacao_3(tab,-1*jog)) == 1 :\r\n return bifurcacao_3(tab,-1*jog)[0]\r\n else:\r\n for i in range(1,4):\r\n if obter_coluna(tab,i).count(jog)==1:\r\n col = obter_coluna(tab,i)\r\n for j in range(3):\r\n if col[j]==0:\r\n pos1=3*j+i\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1\r\n \r\n if obter_linha(tab,i).count(jog)==1:\r\n linha = obter_linha(tab,i)\r\n for j in range(3):\r\n if linha[j]==0:\r\n pos1=j+1+3*(i-1)\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1\r\n \r\n if i < 3 and obter_diagonal(tab,i).count(jog)==1:\r\n diagonal = obter_diagonal(tab,i)\r\n for j in range(3):\r\n if i==1:\r\n if diagonal[j]==0:\r\n pos1=4*j+i\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1\r\n else:\r\n if diagonal[j]==0:\r\n pos1=7-2*j\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1", "def abw(og, fg):\n\n oe = sg2plato(og)\n re = real_extract(og, fg)\n return (oe - re) / (2.0665 - 0.010665 * oe)", "def pyramida(zakladna, orientacia, centrovanie):\n nova_pyramida = []\n if orientacia not in [\"normalna\", 'obratena']:\n print(\"Pyramida moze byt iba [normalna] alebo [obratena]\")\n return False\n\n if centrovanie != \"center\" and centrovanie != \"vlavo\":\n print(\"Centrovanie pyramidy moze byt iba [center] alebo [vlavo]\")\n return False\n\n if centrovanie == \"center\":\n if orientacia == \"normalna\":\n\n cislo_riadka = -1\n for i in range(1, zakladna + 1, 2): #pocet hviezdiciek rastie po 2\n #print(f\"{'*' * i:^{zakladna}}\")\n cislo_riadka +=1\n riadok = []\n for j in range(cislo_riadka,zakladna//2): #vyska pyramidy = polovica zakladne\n riadok.append(\" \") #kolky riadok, tolko medzier vlavo\n for j in range(0, i):\n riadok.append(\"*\")\n for j in range(cislo_riadka,zakladna//2): # aj v pravo\n riadok.append(\" \")\n nova_pyramida.append(riadok)\n else:\n cislo_riadka = -1\n for i in range(zakladna, 0, -2): #pocet hviezdiciek\n #print(f\"{'*' * i:^{zakladna}}\")\n cislo_riadka +=1\n riadok = []\n for j in range(0,cislo_riadka):\n riadok.append(\" \")\n for j in range(0,i):\n riadok.append(\"*\")\n for j in range(0,cislo_riadka):\n riadok.append(\" \")\n nova_pyramida.append(riadok)\n else:\n if orientacia == \"normalna\":\n for i in range(zakladna):\n #print(f\"{'*' * (i + 1)}\")\n riadok = []\n for j in range(0,i):\n riadok.append(\"*\")\n nova_pyramida.append(riadok)\n else:\n for i in range(zakladna):\n riadok = []\n #print(f\"{'*' * (zakladna - i)}\")\n for j in range(zakladna, i, -1):\n riadok.append(\"*\")\n nova_pyramida.append(riadok)\n return nova_pyramida", "def zmiana_glosnosci(utwor, procent = 0):\r\n if(-1 <= procent <= 1):\r\n #ile razy mamy pomnozyc amplitude naszego dzwieku\r\n mnoznik = 0\r\n if( procent < 0 ):\r\n mnoznik = 1 + procent\r\n else:\r\n # obliczamy najwyzsza amplitude w danym utworze i ona bedzie \r\n # wyznaczac jak bardzo mozemy podglosnic\r\n maks_ampli = 0\r\n maks_ampli = max(abs(utwor))\r\n mnoznik = 32767/maks_ampli # maksymalny mnoznik\r\n # mnoznik minimalnie moze osiagnac wartosc 1, to co powyzej \r\n # (mnoznik-1) mnozymy o procent zglosnienia\r\n # i dodajemy do podstawy (czyli 1)\r\n mnoznik = 1 + (mnoznik - 1)*procent\r\n glosniej = mnoznik * utwor\r\n #glosniej = np.array(glosniej, dtype=np.int16)\r\n glosniej = glosniej.astype(np.int16) \r\n return glosniej\r\n else:\r\n print(\"Podaj procent z zakresu -1 do 1\")", "def lin_o_func(self):\n return self.hx", "def jogo_do_galo(str1, str2):\r\n if not (str1 in ['X','O'] and type(str1)==str) or not (str2 in ['basico','normal','perfeito'] and type(str2)==str):\r\n raise ValueError('jogo_do_galo: algum dos argumentos e invalido')\r\n else:\r\n print(\"Bem-vindo ao JOGO DO GALO.\\nO jogador joga com '{}'.\".format(str1))\r\n tab = ((0,0,0),(0,0,0),(0,0,0))\r\n if str1=='X':\r\n jog = 1\r\n pos = escolher_posicao_manual(tab)\r\n tab = marcar_posicao(tab,jog,pos)\r\n print(tabuleiro_str(tab))\r\n else:\r\n jog = -1\r\n while len(obter_posicoes_livres(tab))!=0:\r\n print('Turno do computador ({}):'.format(str2))\r\n pos = escolher_posicao_auto(tab, -1*jog, str2)\r\n tab = marcar_posicao(tab,-1*jog,pos)\r\n print(tabuleiro_str(tab))\r\n if jogador_ganhador(tab) in [-1,1] or len(obter_posicoes_livres(tab))==0:\r\n break\r\n pos = escolher_posicao_manual(tab) \r\n tab = marcar_posicao(tab,jog,pos)\r\n print(tabuleiro_str(tab)) \r\n if jogador_ganhador(tab) in [-1,1] or len(obter_posicoes_livres(tab))==0:\r\n break\r\n \r\n if jogador_ganhador(tab) == 1:\r\n return 'X'\r\n elif jogador_ganhador(tab) == -1:\r\n return 'O' \r\n else:\r\n return 'EMPATE'", "def vitoria_1(tab,jog):\r\n for i in range(1,4):\r\n win = [(0,jog,jog), (jog,0,jog), (jog,jog,0)]\r\n coluna = obter_coluna(tab, i)\r\n linha = obter_linha(tab, i) \r\n if coluna in win:\r\n return i+3*win.index(coluna)\r\n elif linha in win:\r\n return 3*i-2+win.index(linha) \r\n if i!=3:\r\n diagonal = obter_diagonal(tab, i)\r\n if diagonal in win:\r\n if i==1:\r\n return i+4*win.index(diagonal)\r\n\r\n else:\r\n return 7-2*win.index(diagonal)", "def tah_pocitaca(pole, cislo_policka, mozne_policka1, mozne_policka2):\n symbol = \"O\"\n strategia_pocitaca(pole)\n if mozne_policka1:\n cislo_policka = choice(mozne_policka1)\n elif mozne_policka2:\n cislo_policka = choice(mozne_policka2)\n else:\n cislo_policka = cislo_policka\n \n if cislo_policka == False:\n cislo_policka = randint(0,20)\n return tah_pocitaca(pole, cislo_policka, mozne_policka1, mozne_policka2)\n elif list_pole[cislo_policka] == \"-\":\n print(f\"\\nPocitac si vybral policko: {cislo_policka}. Tah pocitaca:\")\n pole = tah(pole, cislo_policka, symbol) \n return pole\n else:\n return tah_pocitaca(pole, cislo_policka, mozne_policka1, mozne_policka2)", "def pingjiazhibiao(result):\n import math\n list_ed_normal = []\n list_es_normal = []\n list_ed_true = []\n list_es_true = []\n # these definations are for statistic\n ed_pred_all, es_pred_all,ed_true_all,es_true_all,ed_match,es_match,ed_normal,es_normal,ed_nomiss,es_nomiss= 0,0,0,0,0,0,0,0,0,0\n total_error_ed,total_error_es = 0,0\n sample_missimg_num = 0\n a4cdDict = {}\n a4csDict = {}\n for i in range(-5,7):\n a4cdDict[i] = 0\n a4csDict[i] = 0\n for i in result:\n pred = i[0]\n ed_pred = pred[0]\n es_pred = pred[1]\n if ed_pred == [] or es_pred == []:\n sample_missimg_num += 1\n true = i[1]\n ed_true = true[0]\n es_true = true[1]\n\n # avoid many to one\n ed_pred.sort()\n es_pred.sort()\n deleteAmong10frames(ed_pred)\n deleteAmong10frames(es_pred)\n \n for j in ed_pred:\n ed_pred_all += 1\n for t in ed_true:\n if math.fabs(j - t) < 6:\n ed_normal += 1\n total_error_ed += math.fabs(t - j)\n a4cdDict[j-t]+=1\n break\n # all - normal = FP\n # normal is TP\n a4cdDict[6] = ed_pred_all-ed_normal\n\n for j in es_pred:\n es_pred_all += 1\n for t in es_true:\n if math.fabs(j - t) < 6:\n es_normal += 1\n total_error_es += math.fabs(t - j)\n a4csDict[j-t]+=1\n break\n a4csDict[6] = es_pred_all-es_normal\n for j in ed_true:\n ed_true_all += 1\n for t in ed_pred:\n if math.fabs(t - j) < 6:\n ed_nomiss += 1\n break\n\n for j in es_true:\n es_true_all += 1\n for t in es_pred:\n if math.fabs(t - j) < 6:\n es_nomiss += 1\n break\n # aFD precision recall \n ed_result = total_error_ed / ed_normal,(ed_normal / ed_pred_all),(ed_nomiss / ed_true_all)\n es_result = total_error_es / es_normal,(es_normal / es_pred_all),(es_nomiss / es_true_all)\n return ed_result,a4cdDict, es_result,a4csDict, sample_missimg_num / len(result)", "def Wygrana():\r\n for x in range (0, ROZMIAR_PLANSZY):\r\n for y in range (0, ROZMIAR_PLANSZY):\r\n for kierunek in (\"poziom\", \"pion\", \"skos prawy\", \"skos lewy\"):\r\n iksy, kolka = SprawdzLinie ((x, y), kierunek)\r\n if iksy == 5:\r\n return X\r\n if kolka == 5:\r\n return O\r\n return False", "def energy_PhotonFormula(wave=1.0,energy=0.00):\n global r,c,h\n print(\"Enerji var ise lutfen giriniz.\")\n if energy != 0:\n energy = energy\n else:\n energy=h*(c/wave)\n getit =str(input(\"Dalga boyunu istiyorsaniz d,enerji istiyorsaniz bos birakin.\"))\n if getit == 'd':\n return ('%.2E' % Decimal(str(energy/(h*c))))\n elif getit ==\"\":\n ('%.2E' % Decimal(str(energy)))\n print(\"Yanlis girdi.Yeniden dene.\")\n return energy_PhotonFormula(wave)", "def cal_ja(Y,t,voltage_clamp_func,voltage_clamp_params):\n v = voltage_clamp_func(t,voltage_clamp_params)\n m = Y[0]\n \n tfa = 1.\n ki = 0.001 # (mM)\n \n cao = 2.5 # Davidson (mM)\n \" To do: make cai variable as an input like voltage \"\n cai = 1.e-4 # (mM) Roughly values (100 nM) from Intracellular calcium regulation among subpopulations of rat dorsal root ganglion neurons by Lu, Zhang, Gold 2007\n \n celsius = 37.\n \n def alpha(v):\n return 15.69*(81.5 - v)/(np.exp((-1.0*v+81.5)/10.0)-1.0)\n def beta(v):\n return 0.29*np.exp(-v/10.86)\n def KTF(celsius):\n return ((25./293.15)*(celsius + 273.15))\n def efun(z):\n return np.array([1 - i/2 if i < 1e-4 else i/(np.exp(i)-1) for i in z])\n def calc_ghk(v, cai, cao): \n f = KTF(celsius)/2\n nu = v/f\n return -f*(1. - (cai/cao)*np.exp(nu))*efun(nu)\n\n a = alpha(v)\n b = beta(v)\n tau = 1./(tfa*(a + b))\n minf = a/(a+b)\n dm = (minf - m)/tau\n \n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]", "def ohodnotP(row, col, znak, prevx, prevy, pocet_ciest, hlbka, mx): # vpravo\r\n\r\n susedia = getSusedia_ohodnot(row, col, znak)\r\n\r\n if (values[row][col] != \"W\" and col != 5):\r\n hlbka += 1\r\n\r\n if col == 5:\r\n if values[row][col] == \"W\" and hlbka != 0:\r\n hlbka -= 1\r\n dlzkyP.append(hlbka)\r\n\r\n if (col != 5 and hlbka < mx):\r\n for sused in susedia:\r\n if (sused[1] == col + 1 or (sused[1] == col and (sused[0] != prevx or sused[1] != prevy))):\r\n if sused[1] == 5:\r\n pocet_ciest += 1\r\n pocet_ciest += ohodnotP(sused[0], sused[1], znak, row, col, 0, hlbka, mx)\r\n if (values[sused[0]][sused[1]] == \"W\") and col == 4: # nema zmysel sem umiestnovat - radsej inde\r\n pocet_ciest = 0\r\n return pocet_ciest", "def triangulos_equilateros(opcion,b):\n x1 =[0,0]\n x = b/2\n y = ((b**2)-(x**2))**0.5\n y = int(y)\n x2 =[round(x),y]\n x3 =[b-1,0]\n if opcion == 1:\n lado1_x,lado1_y = dda_algrithm(x1[0],x1[1],x2[0],x2[1])\n lado2_x,lado2_y = dda_algrithm(x1[0],x1[1],x3[0],x3[1])\n lado3_x,lado3_y = dda_algrithm(x2[0],x2[1],x3[0],x3[1])\n return [lado1_x,lado1_y,lado2_x,lado2_y,lado3_x,lado3_y]\n else:\n lado1_x,lado1_y = bresenham_algrithm(x1[0],x1[1],x2[0],x2[1])\n lado2_x,lado2_y = bresenham_algrithm(x1[0],x1[1],x3[0],x3[1])\n lado3_x,lado3_y = bresenham_algrithm(x2[0],x2[1],x3[0],x3[1])\n return [lado1_x,lado1_y,lado2_x,lado2_y,lado3_x,lado3_y]", "def func1(y, j, h, add_u = 0):\n y_temp = y[j] + add_u\n N = xsize\n k = np.zeros(xsize)\n for i in range(xsize):\n k[i] = -(1/4.)*(1./h)*(y_temp[(i+1)%N]**2-y_temp[(i-1)%N]**2) - 0.5/(h**3)*(y_temp[(i+2)%N]-2*y_temp[(i+1)%N]+2*y_temp[(i-1)%N]-y_temp[(i-2)%N])\n return k", "def piskvorky1D():\n print(HLASKA_UVOD)\n \n symbol_hrace = input('Vyber si a zadej svuj herni symbol, \"x\" nebo \"o\"?: ') # vyber herniho symbolu hrace\n while symbol_hrace not in ('x', 'o'):\n symbol_hrace = input('Spatne, zadej znovu, \"x\" nebo \"o\"?: ')\n \n if symbol_hrace == 'x': # nastaveni herniho symbolu pocitace\n symbol_pocitace = 'o'\n else:\n symbol_pocitace = 'x'\n\n herni_pole = DELKA_HERNIHO_POLE * '-'\n print(herni_pole)\n \n kolo = 1\n while True:\n for tahne in (tah_hrace, tah_pocitace):\n herni_pole = tahne(herni_pole, symbol_pocitace, symbol_hrace)\n print('{}. kolo: {}'.format(kolo, herni_pole))\n stav = vyhodnot(herni_pole) # promenna, kde je ulozeno aktualni vyhodnoceni herniho pole\n if stav in KDO_VYHRAL:\n print(KDO_VYHRAL[stav])\n return\n kolo += 1", "def handle_jiang_gan_hua(event):\n gan_hua_list = gan_hua_expert.query_similiar_from_dict(event.message.text, 10)\n gan_hua = gan_hua_list[random.randint(0, 9)][0]\n line_bot.replyMessage(event.reply_token, gan_hua)", "def Zhu_op(Top, rho_op):\n Pop = rho_op * c.kb * Top/c.mu #gas pressure\n kap_z = np.zeros(len(Top))\n \n for i in range(len(Top)):\n xlp = np.log10(Pop[i])\n xlt = np.log10(Top[i])\n xlop = - 30.\n if (xlt < 3.+0.03*(xlp+4.)): # metal grain\n xlop=-1.27692+0.73846*xlt\n elif (xlt < 3.08+0.028084*(xlp+4)): # metal grain evap\n xlop=129.88071-42.98075*xlt+(142.996475-129.88071)*0.1*(xlp+4)\n elif (xlt < 3.28+xlp/4.*0.12): #water\n xlop=-15.0125+4.0625*xlt\n elif (xlt < 3.41+0.03328*xlp/4.): # water evap\n xlop=58.9294-18.4808*xlt+(61.6346-58.9294)*xlp/4.\n elif (xlt < 3.76+(xlp-4)/2.*0.03): #molecular\n xlop=-12.002+2.90477*xlt+(xlp-4)/4.*(13.9953-12.002)\n elif (xlt < 4.07+(xlp-4)/2.*0.08): #bound free,free free\n xlop=-39.4077+10.1935*xlt+(xlp-4)/2.*(40.1719-39.4077)\n elif (xlt < 5.3715+(xlp-6)/2.*0.5594):\n xlop=17.5935-3.3647*xlt+(xlp-6)/2.*(17.5935-15.7376)\n else:\n xlop = -0.48\n if ((xlop < 3.586*xlt-16.85) and (xlt < 4.)): xlop = 3.586*xlt - 16.85\n if (xlt < 2.9): xlop = -1.27692 + 0.73846*xlt\n\n kap_z[i] = 10.**xlop\n return kap_z", "def piskvorky(pole):\n\n print('Ahoj. Toto je hra 1D piskvorky. Pocitac hra so symbolmi \\'O\\', ty hras so symbolmi \\'X\\'.') \n while \"-\" in pole:\n if vyhodnot(pole) == False:\n break\n else:\n pole = tah_hraca(pole, cislo_policka)\n if vyhodnot(pole) == False:\n break\n else:\n pole = tah_pocitaca(pole, cislo_policka, mozne_policka1, mozne_policka2)\n if \"-\" not in pole:\n vyhodnot(pole)\n return print(\"Dakujem za hru.\")", "def _iou(self, bb_test,bb_gt):\n xx1 = np.maximum(bb_test[0], bb_gt[0])\n yy1 = np.maximum(bb_test[1], bb_gt[1])\n xx2 = np.minimum(bb_test[2], bb_gt[2])\n yy2 = np.minimum(bb_test[3], bb_gt[3])\n w = np.maximum(0., xx2 - xx1)\n h = np.maximum(0., yy2 - yy1)\n wh = w * h\n o = wh / ((bb_test[2]-bb_test[0])*(bb_test[3]-bb_test[1])\n + (bb_gt[2]-bb_gt[0])*(bb_gt[3]-bb_gt[1]) - wh)\n return(o)", "def leeHoja(hoja):\r\n return [renglon(hoja, r) for r in range(hoja.nrows)]", "def escolher_posicao_auto(tab, jog, str1):\r\n if not eh_tabuleiro(tab) or not (jog in [-1,1] and type(jog)==int) or not (str1 in ['basico','normal','perfeito'] and type(str1)==str):\r\n raise ValueError('escolher_posicao_auto: algum dos argumentos e invalido') \r\n else:\r\n \r\n #vitoria_1: tabuleiro X inteiro -> posicao\r\n \r\n def vitoria_1(tab,jog):\r\n \"\"\"\r\n vitoria_1 recebe um tabuleiro e um inteiro correspondente ao jogador \r\n (1 para o jogador 'X' e -1 para o jogador 'O') e se o jogador tiver duas\r\n das suas pecas em linha e uma posicao livre entao retorna essa posicao livre.\r\n \"\"\"\r\n for i in range(1,4):\r\n win = [(0,jog,jog), (jog,0,jog), (jog,jog,0)]\r\n coluna = obter_coluna(tab, i)\r\n linha = obter_linha(tab, i) \r\n if coluna in win:\r\n return i+3*win.index(coluna)\r\n elif linha in win:\r\n return 3*i-2+win.index(linha) \r\n if i!=3:\r\n diagonal = obter_diagonal(tab, i)\r\n if diagonal in win:\r\n if i==1:\r\n return i+4*win.index(diagonal)\r\n\r\n else:\r\n return 7-2*win.index(diagonal)\r\n \r\n #bloqueio_2: tabuleiro X inteiro -> posicao \r\n \r\n def bloqueio_2(tab,jog):\r\n \"\"\"\r\n bloqueio_2 recebe um tabuleiro e um inteiro correspondente ao jogador \r\n (1 para o jogador 'X' e -1 para o jogador 'O') e se o adversario tiver duas\r\n das suas pecas em linha e uma posicao livre entao retorna essa posicao livre.\r\n \"\"\"\r\n jog*=-1\r\n return vitoria_1(tab,jog) \r\n \r\n #bifurcacao_3: tabuleiro X inteiro -> lista de posicoes\r\n \r\n def bifurcacao_3(tab, jog):\r\n \"\"\"\r\n bifurcacao_3 recebe um tabuleiro e um inteiro correspondente ao jogador \r\n (1 para o jogador 'X' e -1 para o jogador 'O') e se o jogador tiver duas\r\n linhas/colunas/diagonais que se intersectam, onde cada uma contem uma das\r\n suas pecas entao retorna uma lista com todas as posicoes de intersecao \r\n (criando duas formas de vencer na jogada seguinte).\r\n \"\"\"\r\n pos = []\r\n for i in range(1,4):\r\n for j in range(1,4):\r\n if obter_coluna(tab,i).count(jog)==1 and obter_linha(tab,j).count(jog)==1 and eh_posicao_livre(tab, i+3*j-3):\r\n pos+=[i+3*j-3]\r\n for k in range(1,3):\r\n if k==1:\r\n if obter_coluna(tab,i).count(jog)==1 and obter_diagonal(tab,1).count(jog)==1 and eh_posicao_livre(tab, 1+4*(i-1)):\r\n pos+=[1+4*(i-1)]\r\n if obter_linha(tab,j).count(jog)==1 and obter_diagonal(tab,1).count(jog)==1 and eh_posicao_livre(tab, 1+4*(i-1)):\r\n pos+=[1+4*(i-1)]\r\n if obter_diagonal(tab,1).count(jog)==1 and obter_diagonal(tab,2).count(jog)==1 and eh_posicao_livre(tab, 5):\r\n pos+=[5]\r\n if k==2:\r\n if obter_coluna(tab,i).count(jog)==1 and obter_diagonal(tab,1).count(jog)==1 and eh_posicao_livre(tab, 7-2*(i-1)):\r\n pos+=[7-2*(i-1)]\r\n if obter_linha(tab,j).count(jog)==1 and obter_diagonal(tab,1).count(jog)==1 and eh_posicao_livre(tab, 3+2*(i-1)):\r\n pos+=[3+2*(i-1)] \r\n return pos\r\n \r\n #bloqueio_de_bifurcacao_4: tabuleiro X inteiro -> posicao\r\n \r\n def bloqueio_de_bifurcacao_4(tab,jog): \r\n \"\"\"\r\n bloqueio_de_bifurcacao_4 recebe um tabuleiro e um inteiro correspondente ao jogador \r\n (1 para o jogador 'X' e -1 para o jogador 'O') e se o adversario tiver apenas uma bifurcacao\r\n entao retorna a posicao de bloqueio dessa bifurcacao, caso contrario, retorna a posicao\r\n em que se cria um dois em linha para forcar o oponente a defender, desde que a defesa nao\r\n resulte na criacao de uma bifurcacao para o oponente.\r\n \"\"\" \r\n if len(bifurcacao_3(tab,-1*jog)) == 1 :\r\n return bifurcacao_3(tab,-1*jog)[0]\r\n else:\r\n for i in range(1,4):\r\n if obter_coluna(tab,i).count(jog)==1:\r\n col = obter_coluna(tab,i)\r\n for j in range(3):\r\n if col[j]==0:\r\n pos1=3*j+i\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1\r\n \r\n if obter_linha(tab,i).count(jog)==1:\r\n linha = obter_linha(tab,i)\r\n for j in range(3):\r\n if linha[j]==0:\r\n pos1=j+1+3*(i-1)\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1\r\n \r\n if i < 3 and obter_diagonal(tab,i).count(jog)==1:\r\n diagonal = obter_diagonal(tab,i)\r\n for j in range(3):\r\n if i==1:\r\n if diagonal[j]==0:\r\n pos1=4*j+i\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1\r\n else:\r\n if diagonal[j]==0:\r\n pos1=7-2*j\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1 \r\n \r\n #centro_5: tabuleiro X inteiro -> posicao\r\n \r\n def centro_5(tab, jog):\r\n \"\"\"\r\n centro_5 recebe um tabuleiro e um inteiro correspondente ao jogador \r\n (1 para o jogador 'X' e -1 para o jogador 'O') e devolve a posicao\r\n central (5) no caso da mesma estar livre.\r\n \"\"\"\r\n if eh_posicao_livre(tab, 5):\r\n return 5\r\n \r\n #canto_oposto_6: tabuleiro X inteiro -> posicao\r\n \r\n def canto_oposto_6(tab, jog):\r\n \"\"\"\r\n canto_oposto_6 recebe um tabuleiro e um inteiro correspondente ao jogador \r\n (1 para o jogador 'X' e -1 para o jogador 'O') e se o adversario estiver num\r\n canto e se o canto diagonalmente oposto for uma posicao livre entao\r\n retorna a posicao desse canto oposto.\r\n \"\"\"\r\n jog*=-1\r\n if obter_linha(tab,1)[0]==jog and eh_posicao_livre(tab,9):\r\n return 9\r\n if obter_linha(tab,1)[2]==jog and eh_posicao_livre(tab,7):\r\n return 7\r\n if obter_linha(tab,3)[0]==jog and eh_posicao_livre(tab,3):\r\n return 3\r\n if obter_linha(tab,3)[2]==jog and eh_posicao_livre(tab,1):\r\n return 1 \r\n \r\n #canto_vazio_7: tabuleiro X inteiro -> posicao\r\n \r\n def canto_vazio_7(tab, jog):\r\n \"\"\"\r\n canto_vazio_7 recebe um tabuleiro e um inteiro correspondente ao jogador \r\n (1 para o jogador 'X' e -1 para o jogador 'O') e se um canto for uma posicao\r\n livre entao devolve a posicao correspondente a esse canto.\r\n \"\"\"\r\n for x in [1,3,7,9]:\r\n if eh_posicao_livre(tab,x):\r\n return x \r\n \r\n #lateral_vazio_8: tabuleiro X inteiro -> posicao\r\n \r\n def lateral_vazio_8(tab, jog):\r\n \"\"\"\r\n lateral_vazio_8 recebe um tabuleiro e um inteiro correspondente ao jogador \r\n (1 para o jogador 'X' e -1 para o jogador 'O') e se uma posicao lateral\r\n (que nem e o centro, nem um canto) for livre, entao retorna a posicao\r\n correspondente a essa posicao lateral.\r\n \"\"\"\r\n for x in [2,4,6,8]:\r\n if eh_posicao_livre(tab,x):\r\n return x \r\n \r\n if str1=='basico':\r\n for i in range(2):\r\n if i==0: res = centro_5(tab, jog)\r\n if i==1: res = canto_vazio_7(tab, jog)\r\n if res!=None:\r\n return res\r\n return lateral_vazio_8(tab, jog) \r\n \r\n \r\n elif str1=='normal':\r\n for i in range(5):\r\n if i==0: res = vitoria_1(tab,jog)\r\n if i==1: res = bloqueio_2(tab,jog)\r\n if i==2: res = centro_5(tab, jog)\r\n if i==3: res = canto_oposto_6(tab, jog)\r\n if i==4: res = canto_vazio_7(tab, jog)\r\n if res!=None:\r\n return res\r\n return lateral_vazio_8(tab, jog)\r\n \r\n \r\n elif str1=='perfeito':\r\n for i in range(7):\r\n if i==0: res = vitoria_1(tab,jog)\r\n if i==1: res = bloqueio_2(tab,jog)\r\n if i==2: \r\n res = bifurcacao_3(tab, jog)\r\n if res!=[]:\r\n res = bifurcacao_3(tab, jog)[0]\r\n else:\r\n res=None\r\n if i==3: \r\n res = bloqueio_de_bifurcacao_4(tab,jog)\r\n if res!=[]:\r\n res = bloqueio_de_bifurcacao_4(tab,jog)\r\n else:\r\n res=None \r\n if i==4: res = centro_5(tab, jog)\r\n if i==5: res = canto_oposto_6(tab, jog)\r\n if i==6: res = canto_vazio_7(tab, jog)\r\n if res!=None:\r\n return res\r\n return lateral_vazio_8(tab, jog)", "def hilfe(self):\n sZweieck_hilfe(3)", "def ohodnotL(row, col, znak, prevx, prevy, pocet_ciest, hlbka, mx): # vlavo\r\n\r\n susedia = getSusedia_ohodnot(row, col, znak)\r\n\r\n if (values[row][col] != \"W\" and col != 0):\r\n hlbka += 1\r\n\r\n if col == 0:\r\n if values[row][col] == \"W\" and hlbka != 0:\r\n hlbka -= 1\r\n dlzkyL.append(hlbka)\r\n\r\n if (col != 0 and hlbka < mx):\r\n for sused in susedia:\r\n if (sused[1] == col - 1 or (sused[1] == col and (sused[0] != prevx or sused[1] != prevy))):\r\n if sused[1] == 0:\r\n pocet_ciest += 1\r\n pocet_ciest += ohodnotL(sused[0], sused[1], znak, row, col, 0, hlbka, mx)\r\n if (values[sused[0]][sused[1]] == \"W\") and col == 1: # nema zmysel sem umiestnovat - radsej inde\r\n pocet_ciest = 0\r\n return pocet_ciest", "def ohodnotH_B(row, col, znak, prevx, prevy, pocet_ciest, hlbka, mx): # hore\r\n\r\n susedia = getSusedia_ohodnot(row, col, znak)\r\n\r\n if (values[row][col] != \"B\" and row != 0):\r\n hlbka += 1\r\n\r\n if row == 0:\r\n if values[row][col] == \"B\" and hlbka != 0:\r\n hlbka -= 1\r\n dlzkyHB.append(hlbka)\r\n\r\n if (row != 0 and hlbka < mx):\r\n for sused in susedia:\r\n if (sused[0] == row - 1 or (sused[0] == row and (sused[0] != prevx or sused[1] != prevy))):\r\n if sused[0] == 0:\r\n pocet_ciest += 1\r\n pocet_ciest += ohodnotH_B(sused[0], sused[1], znak, row, col, 0, hlbka, mx)\r\n return pocet_ciest", "def piskvorky(pole):\n\n print('Ahoj. Toto je hra 1D piskvorky. Pocitac hra so symbolmi \\'O\\', ty hras so symbolmi \\'X\\'.') \n while \"-\" in pole:\n if vyhodnot(pole) == False:\n break\n else:\n pole = tah_hraca(pole, str_policka)\n if vyhodnot(pole) == False:\n break\n else:\n pole = tah_pocitaca(pole, symbol=\"O\")\n if \"-\" not in pole:\n vyhodnot(pole)\n return print(\"Dakujem za hru.\")", "def batalhar(i, j):\n if (i.medabot.ataque > j.medabot.defesa or j.medabot.ataque > i.medabot.defesa) and i.medabot.ataque - j.medabot.defesa != j.medabot.ataque - i.medabot.defesa:\n if i.medabot.ataque - j.medabot.defesa > j.medabot.ataque - i.medabot.defesa:\n troca_peca(i, j)\n return i\n else:\n troca_peca(j, i)\n return j\n elif i.habilidade != j.habilidade:\n if i.habilidade > j.habilidade:\n troca_peca(i, j)\n return i\n else:\n troca_peca(j, i)\n return j\n else:\n if i.ID < j.ID:\n troca_peca(i, j)\n return i\n else:\n troca_peca(j, i)\n return j", "def homozygotie(self):\n if self.allele[1] == 0.0:\n self.homozygote = True", "def ganhou():\n global JOGADOR\n return ((TAB[6] == JOGADOR and TAB[7] == JOGADOR and TAB[8] == JOGADOR) or # Linha horizontal baixa\n (TAB[3] == JOGADOR and TAB[4] == JOGADOR and TAB[5] == JOGADOR) or # Linha horizontal meio\n (TAB[0] == JOGADOR and TAB[1] == JOGADOR and TAB[2] == JOGADOR) or # Linha horizontal alta\n (TAB[6] == JOGADOR and TAB[3] == JOGADOR and TAB[0] == JOGADOR) or # Linha vertical Esquerda\n (TAB[7] == JOGADOR and TAB[4] == JOGADOR and TAB[1] == JOGADOR) or # Linha vertical central\n (TAB[8] == JOGADOR and TAB[5] == JOGADOR and TAB[2] == JOGADOR) or # Linha vertical direita\n (TAB[6] == JOGADOR and TAB[4] == JOGADOR and TAB[2] == JOGADOR) or # diagonal\n (TAB[8] == JOGADOR and TAB[4] == JOGADOR and TAB[0] == JOGADOR)) # diagonal", "def vyhodnot(herni_pole):\n if 'xxx' in herni_pole: # vyhral hrac s x\n return 'x' \n elif 'ooo' in herni_pole: # vyhral hrac s o\n return 'o'\n elif '-' not in herni_pole: # remiza\n return '!'\n else:\n return '-' # hra jeste neskoncila", "def triangulos_rectangulos(opcion,b,h):\n x1 =[0,0]\n x2 =[b-1,h-1]\n x3 =[b-1,0]\n if opcion == 1:\n lado1_x,lado1_y = dda_algrithm(x1[0],x1[1],x2[0],x2[1])\n lado2_x,lado2_y = dda_algrithm(x1[0],x1[1],x3[0],x3[1])\n lado3_x,lado3_y = dda_algrithm(x2[0],x2[1],x3[0],x3[1])\n return [lado1_x,lado1_y,lado2_x,lado2_y,lado3_x,lado3_y]\n else:\n lado1_x,lado1_y = bresenham_algrithm(x1[0],x1[1],x2[0],x2[1])\n lado2_x,lado2_y = bresenham_algrithm(x1[0],x1[1],x3[0],x3[1])\n lado3_x,lado3_y = bresenham_algrithm(x2[0],x2[1],x3[0],x3[1])\n return [lado1_x,lado1_y,lado2_x,lado2_y,lado3_x,lado3_y]", "def tah_hrace(herni_pole, symbol_pocitace, symbol_hrace):\n \n while True:\n try:\n pozice = int(input('Zadej ciselnou pozici tveho tahu (0-{}) '.format(len(herni_pole) - 1)))\n except ValueError:\n print('Nezadal jsi cislici.')\n continue\n\n if pozice < 0:\n print('Zadej kladne cislo')\n elif pozice > len(herni_pole) - 1:\n print('Hrajes mimo herni pole, to ma jenom {} pozic!'.format(len(herni_pole)))\n elif herni_pole[pozice] != '-':\n print('Hrajes na obsazene policko. ')\n else:\n break\n\n return tah(herni_pole, pozice, symbol_hrace)", "def smhm_behroozi(logMstar, z, Mstar00 = 10.72, Mstar0a = 0.55,\n M10 = 12.35, M1a = 0.28, beta0 = 0.44,\n abeta = 0.18, delta0 = 0.57, adelta = 0.17,\n gamma0 = 1.56, agamma = 2.51 ):\n\n # Warning\n if np.any(logMstar<8.8):\n print \"Warning: LogMstar value less than Behroozi et al. 2010 Figure 2 range [8.8, 11.6].\"\n elif np.any(logMstar>11.6):\n print \"Warning: LogMstar value greater than Behroozi et al. 2010 Figure 2 range [8.8, 11.6].\"\n \n #scale factor\n a = 1/(1+z)\n \n #Equation 21 of Behroozi et al\n logM1 = M10 + M1a*(a-1)\n logMstar0 = Mstar00 + Mstar0a*(a-1)\n beta = beta0 + abeta*(a-1)\n delta = delta0 + adelta*(a-1)\n gamma = gamma0 + agamma*(a-1) \n MstarbyMstar0 = 10**( logMstar - logMstar0 ) \n return logM1 + beta*np.log10( MstarbyMstar0 ) +\\\n (MstarbyMstar0**delta)/(1+(MstarbyMstar0)**-gamma) - 0.5", "def hilfe(self):\n toto_hilfe(3)", "def integrointi():\n if kirjasto[\"pisteet\"]:\n try:\n indeksi_1, indeksi_2 = etsi_indeksit(\n kirjasto[\"kineettiset_energiat\"], kirjasto[\"pisteet\"][0], kirjasto[\"pisteet\"][1]\n )\n intesiteetti_kayra = kirjasto[\"korjaus\"][indeksi_1:indeksi_2]\n energia_kayra = kirjasto[\"kineettiset_energiat\"][indeksi_1:indeksi_2]\n integraali = np.trapz(intesiteetti_kayra, energia_kayra)\n ik.avaa_viesti_ikkuna(\"integraali\", integraali)\n kirjasto[\"pisteet\"] = []\n intesiteetti_kayra = []\n energia_kayra = []\n return\n except IndexError:\n ik.avaa_viesti_ikkuna(\"Error\", \"Integrointivälin valinnassa tapahtui virhe\")\n kirjasto[\"pisteet\"] = []\n intesiteetti_kayra = []\n energia_kayra = []\n else:\n ik.avaa_viesti_ikkuna(\"Error\", \"Integrointiväliä ei ole valittu\")", "def elongate(self,DNA, Pol, Hel):\n Helicase = Hel\n PolymeraseIII = Pol\n if self.ATP_molecules >= 100 and (Helicase.position - PolymeraseIII.position) < 3000: #genug ATP, Abstand klein genug\n Helicase.position += 100 \n self.ATP_molecules -= 100\n if self.Nucleotide >= 200 and (Helicase.position - PolymeraseIII.position) > 1500: #genug Nucleotide (>=200)\n PolymeraseIII.position += 100\n self.Nucleotide -= 200\n elif self.Nucleotide > 1 and (Helicase.position - PolymeraseIII.position) > 1500: #nicht genug Nucleotide (1-199)\n PolymeraseIII.position += self.Nucleotide/2\n Helicase.position = Helicase.position -100 +self.Nucleotide/2\n self.ATP_molecules =self.ATP_molecules+100-self.Nucleotide/2\n self.Nucleotide -= 2*(self.Nucleotide/2)\n \n elif self.ATP_molecules >= 0 and (Helicase.position - PolymeraseIII.position) < 3000: #nicht genug ATP, Abstand klein genug\n Helicase.position += self.ATP_molecules\n if self.Nucleotide >= 200 and (Helicase.position - PolymeraseIII.position) > 1500: #genug Nucleotide\n PolymeraseIII.position += 100\n self.Nucleotide -= 200\n elif self.Nucleotide > 1 and (Helicase.position - PolymeraseIII.position) > 1500: #nicht genug Nucleotide\n PolymeraseIII.position += self.Nucleotide/2\n Helicase.position = Helicase.position -self.ATP_molecules +self.Nucleotide/2\n self.ATP_molecules -=self.Nucleotide/2\n self.Nucleotide -= 2*(self.Nucleotide/2)\n self.ATP_molecules -= self.ATP_molecules\n\n if Helicase.position > self.DNA.length:\n self.ATP_molecules=self.ATP_molecules+(Helicase.position -self.DNA.length)\n Helicase.position = self.DNA.length\n\n if Helicase.position >= self.DNA.length:\n Helicase.bound =False\n #print ('ATP:',self.ATP_molecules,'NT:',self.Nucleotide)\n return Helicase, PolymeraseIII", "def img2heliovec(bxImg,byImg,bzImg,lon,lat,lonc,latc,pAng):\n a11 = -np.sin(latc)*np.sin(pAng)*np.sin(lon - lonc) + np.cos(pAng)*np.cos(lon - lonc)\n a12 = np.sin(latc)*np.cos(pAng)*np.sin(lon - lonc) + np.sin(pAng)*np.cos(lon - lonc)\n a13 = -np.cos(latc)*np.sin(lon - lonc)\n a21 = -np.sin(lat)*(np.sin(latc)*np.sin(pAng)*np.cos(lon - lonc) + np.cos(pAng)*np.sin(lon - lonc)) - np.cos(lat)*np.cos(latc)*np.sin(pAng)\n a22 = np.sin(lat)*(np.sin(latc)*np.cos(pAng)*np.cos(lon - lonc) - np.sin(pAng)*np.sin(lon - lonc)) + np.cos(lat)*np.cos(latc)*np.cos(pAng)\n a23 = -np.cos(latc)*np.sin(lat)*np.cos(lon - lonc) + np.sin(latc)*np.cos(lat)\n a31 = np.cos(lat)*(np.sin(latc)*np.sin(pAng)*np.cos(lon - lonc) + np.cos(pAng)*np.sin(lon - lonc)) - np.sin(lat)*np.cos(latc)*np.sin(pAng)\n a32 = -np.cos(lat)*(np.sin(latc)*np.cos(pAng)*np.cos(lon - lonc) - np.sin(pAng)*np.sin(lon - lonc)) + np.sin(lat)*np.cos(latc)*np.cos(pAng)\n a33 = np.cos(lat)*np.cos(latc)*np.cos(lon - lonc) + np.sin(lat)*np.sin(latc)\n\n bxHelio = a11 * bxImg + a12 * byImg + a13 * bzImg\n byHelio = a21 * bxImg + a22 * byImg + a23 * bzImg\n bzHelio = a31 * bxImg + a32 * byImg + a33 * bzImg\n\n return bxHelio,byHelio,bzHelio", "def jogador_ganhador(tab):\r\n if not eh_tabuleiro(tab):\r\n raise ValueError('jogador_ganhador: o argumento e invalido') \r\n else: \r\n for i in range(1,4):\r\n coluna = obter_coluna(tab, i)\r\n if sum(coluna)==3:\r\n return 1\r\n elif sum(coluna)==-3:\r\n return -1\r\n linha = obter_linha(tab, i)\r\n if sum(linha)==3:\r\n return 1\r\n elif sum(linha)==-3:\r\n return -1 \r\n if i<3:\r\n diagonal = obter_diagonal(tab, i)\r\n if sum(diagonal)==3:\r\n return 1\r\n elif sum(diagonal)==-3:\r\n return -1 \r\n return 0", "def curve_no_hillcoef(ph, pka):\n# return ph - pka\n return 1/(10**(pka-ph)+1)", "def kto_wygral():\n for x in range(0, ROZMIAR_PLANSZY):\n for y in range(0, ROZMIAR_PLANSZY):\n for kierunek in (\"poziom\", \"pion\", \"skos prawy\", \"skos lewy\"):\n iksy, kolka = sprawdz_linie((x, y), kierunek)\n if iksy == ile_do_wygranej:\n return X\n if kolka == ile_do_wygranej:\n return O\n return False", "def aestrella(inicio,obj):\n nodos_abiertos=[inicio]\n nodos_cerrados=[]\n lista1=[]\n for cel in nodos_abiertos:\n lista1.append(cel.costo)\n m=min(lista1)\n for j in nodos_abiertos:\n j.set_gscore(g(inicio,j))\n j.set_hscore(h(j,obj))\n j.set_fscore(f(inicio,obj))\n if j.fscore==m:\n if j==obj:\n print'terminado'\n nodos_cerrados.append(j)\n else:\n nodos_abiertos.append(j)\n for k in j.vecinos:\n if k in nodos_cerrados :\n gk=k.gscore\n gk1=k.get_gscore()\n if gk1<=gk:\n k.set_gscore=gk1\n j=k\n else:\n pass\n elif k in nodos_abiertos:\n gk=k.gscore\n gk1=k.get_gscore\n if gk1<=gk:\n k.set_gscore=gk1\n j=k\n else:\n pass\n \n else:\n nodos_abiertos.append(k)\n k.set_gscore()\n else:\n pass\n ruta=[] \n for u in nodos_cerrados:\n lnc=len(nodos_cerrados)\n for v in range(lnc):\n ruta.insert(v,nodos_cerrados[lnc-v])\n return ruta", "def exercise_b2_70():\r\n pass", "def reemplaza_tildes(palabra):", "def real_extract(og, fg):\n\n oe = sg2plato(og)\n ae = sg2plato(fg)\n q = 0.22 + 0.001 * oe\n return (q * oe + ae) / (1 + q)", "def psi(a):", "def etap2rhog ( self ) :\n from GaudiConfUtils.ConfigurableGenerators import DaVinci__N3BodyDecays \n pre_etap = self.make_selection (\n ## the unique tag \n 'PreEtapRhoG' ,\n ## algorithm type to be used\n DaVinci__N3BodyDecays ,\n ## input selections \n [ self.pions () , self.gamma () ] ,\n ##\n DecayDescriptor = \" eta_prime -> pi+ pi- gamma\" ,\n ##\n DaughtersCuts = { 'gamma' : self['GammaCut'] } ,\n ## \n Combination12Cut = \"\"\" ( AM < 950 * MeV ) &\n ( ACHI2DOCA(1,2) < 12 ) \n \"\"\" , \n CombinationCut = \"\"\"\n ( APT > %s ) & \n in_range ( 500 * MeV , AM12 , 950 * MeV ) & \n ( ADAMASS ( 'eta_prime' ) < 100 * MeV ) \n \"\"\" % ( 0.9 * self['ETAP_PT'] ),\n ##\n MotherCut = \"\"\"\n ( PT > %s ) &\n ( chi2vx < 9 )\n \"\"\" % self['ETAP_PT'] \n )\n ## \n from GaudiConfUtils.ConfigurableGenerators import Pi0Veto__Tagger\n ## \n return self.make_selection (\n 'Etap2rhogamma' ,\n Pi0Veto__Tagger ,\n [ pre_etap ] ,\n MassWindow = 25 * MeV ,\n MassChi2 = -1 ,\n ExtraInfoIndex = 25017 ## unique ! \n )", "def verteileKarten(anzahlSpieler):\n pass", "def rotacija_pravouglog_trougla_oko_hipotenuze(s2, s1):\r\n c = math.sqrt(s2 * s2 + s1 * s1)\r\n povrsina_trougla= (s2 * s1) / 2\r\n hc = (2 * povrsina_trougla) / c\r\n H1 = math.sqrt(s1 * s1 - hc * hc)\r\n H2 = math.sqrt(s2 * s2 - hc * hc)\r\n pi= 3.14\r\n povrsina = hc * pi * (s1 + s2)\r\n zapremina = (hc * hc * pi * (H1 + H2)) / 3\r\n return povrsina, zapremina", "def sinh(a):", "def egim_hesapla(x1, y1, x2, y2):\n\tsonuc = (y2 - y1) / (x2 - x1)\n\tprint float(sonuc)", "def yin_yang():\n turtle.down() # met la plume en mode tracé (si ce n'était déjà le cas)\n turtle.width(2) # grosseur du tracé de 2 points\n # dessine le yin externe\n turtle.color(\"black\", \"black\") # le tracé et le remplissage seront en noir\n turtle.begin_fill() # la ou les formes suivantes seront remplies\n turtle.circle(-100, 180) # demi cercle intérieur tournant vers la droite\n turtle.circle(-200, -180) # demi cercle extérieur, en marche arrière\n turtle.circle(-100, -180) # demi cercle intérieur qui complète le yin\n turtle.end_fill() # remplissage\n # dessine le yang interne\n turtle.color(\"white\") # couleur blanche\n turtle.up() # on ne trace pas ce qui suit\n # déplace la tortue au bon endroit\n turtle.right(90)\n turtle.forward(80)\n turtle.left(90)\n # tracé du disque yang (blanc) interne au yin\n turtle.down()\n turtle.begin_fill()\n turtle.circle(-20)\n turtle.end_fill()\n # se replace au centre\n turtle.up()\n turtle.left(90)\n turtle.forward(80)\n turtle.right(90)\n # dessine le yang externe\n turtle.down()\n turtle.color(\"black\", \"white\") # contour noir, remplissage blanc\n turtle.begin_fill()\n turtle.circle(-100, 180)\n turtle.circle(-200, -180)\n turtle.circle(-100, -180)\n turtle.end_fill()\n # tracé du disque yin (noir) interne au yang\n\n turtle.color(\"black\")\n # déplace la tortue au bon endroit\n turtle.up()\n turtle.right(90)\n turtle.forward(80)\n turtle.left(90)\n turtle.down()\n # trace le disque\n turtle.begin_fill()\n turtle.circle(-20)\n turtle.end_fill()\n # se replace au centre\n turtle.up()\n turtle.left(90)\n turtle.forward(80)\n turtle.right(90)\n turtle.down()\n turtle.hideturtle()\n return", "def winkel(self, *args, **kwargs):\n\t\t\n if kwargs.get('h'):\n print(\"\\nWinkel der sphärischen Geraden mit einer anderen sphärischen\")\n print(\"Geraden im gemeinsamen Schnittpunkt\\n\")\t\t\n print(\"Aufruf sgerade . winkel( sgerade1 )\\n\")\t\t \n print(\" sgerade sphärische Gerade\\n\")\n return\n\t\t\t\t\n try:\t\t\t\t\n if len(args) != 1:\n raise AglaError('sphärische Gerade angeben')\n gg = args[0]\t\t \n if not isinstance(gg, sGerade):\t\t\t \n raise AglaError(\"sphärische Gerade angeben\")\n except AglaError as e:\n print('agla:', str(e))\n return\t\n\t\t\n if gg == self:\t\t\n return 0\t\t\t\n P1 = self.pol\t\t\t\n P2 = gg.pol\n wi1 = P1[0].e.winkel(P2[0].e)\n wi2 = P1[0].e.winkel(P2[1].e)\n return wi1, wi2", "def hairness_do_stuff(h, y, c):\n\n print('sup hairness!')\n\n return None", "def needleman_wunsch1(x,y,lodict=None,gop=-2.5, gep=-1.75, local=False):\n n,m = len(x),len(y)\n dp = np.zeros((n+1,m+1))\n pointers = np.zeros((n+1,m+1),np.int32)\n for i in range(1,n+1):\n dp[i,0] = dp[i-1,0]+(gep if i>1 else gop)\n pointers[i,0]=1\n for j in range(1,m+1):\n dp[0,j] = dp[0,j-1]+(gep if j>1 else gop)\n pointers[0,j]=2\n for i in range(1,n+1):\n for j in range(1,m+1):\n if not lodict:\n if x[i-1] == y[j-1]:\n match = dp[i-1,j-1]+1\n else:\n match = dp[i-1,j-1]-1\n else:\n match = dp[i-1,j-1]+lodict[x[i-1],y[j-1]]\n insert = dp[i-1,j]+(gep if pointers[i-1,j]==1 else gop)\n delet = dp[i,j-1]+(gep if pointers[i,j-1]==2 else gop)\n max_score = max([match,insert,delet])\n dp[i,j] = max_score\n pointers[i,j] = [match,insert,delet].index(max_score)\n alg = []\n i,j = n,m\n while(i>0 or j>0):\n pt = pointers[i,j]\n if pt==0:\n i-=1\n j-=1\n alg = [[x[i],y[j]]]+alg\n if pt==1:\n i-=1\n alg = [[x[i],'-']]+alg\n if pt==2:\n j-=1\n alg = [['-',y[j]]]+alg\n return dp[-1,-1], alg", "def exercise_b2_52():\r\n pass", "def hilfe(self):\n datenreihe_hilfe(3)", "def compute_thermo(E,dos,TT):\n if (len(dos)<3):\n print (\"Not enough points in the phonon DOS!\")\n return None\n \n ZPE = 0.5*dos_integral(E,dos,1)\n modes = dos_integral(E,dos)\n \n EvibT = np.zeros(len(TT))\n SvibT = np.zeros(len(TT))\n CvibT = np.zeros(len(TT))\n FvibT = np.zeros(len(TT))\n for i in range(0,len(TT)):\n h = 0.5*(E[2]-E[0])\n arg = K_BOLTZMANN_RY*TT[i]\n arg2 = 2.0 * arg\n Evib = 0.0\n Svib = 0.0\n Cvib = 0.0\n for j in range(0,len(dos)-3,3):\n\n Evib += 3.0*E[j]/tanh(E[j]/(arg2))*dos[j]+\\\n 3.0*E[j+1]/tanh(E[j+1]/(arg2))*dos[j+1]+\\\n 2.0*E[j+2]/tanh(E[j+2]/(arg2))*dos[j+2]\n \n Svib += 3.0*(E[j]/arg2/tanh(E[j]/arg2)-log(2.0*sinh(E[j]/arg2)))*dos[j]+\\\n 3.0*(E[j+1]/arg2/tanh(E[j+1]/arg2)-log(2.0*sinh(E[j+1]/arg2)))*dos[j+1]+\\\n 2.0*(E[j+2]/arg2/tanh(E[j+2]/arg2)-log(2.0*sinh(E[j+2]/arg2)))*dos[j+2]\n\n try: # avoid overflow error for arg very small\n Cvib += 3.0*pow(E[j]/arg,2)/( 4.0*pow(sinh(E[j]/(arg2)),2) )*dos[j]+\\\n 3.0*pow(E[j+1]/arg,2)/( 4.0*pow(sinh(E[j+1]/(arg2)),2) )*dos[j+1]+\\\n 2.0*pow(E[j+2]/arg,2)/( 4.0*pow(sinh(E[j+2]/(arg2)),2) )*dos[j+2]\n except:\n Cvib += 0.0\n\n EvibT[i] = h*0.5*Evib*3.0/8.0 # h is the integration step, 0.5 comes from the equation for E,\n # the factor 3.0/8.0 comes from the Simpson 3/8 rule\n SvibT[i] = h*K_BOLTZMANN_RY*Svib*3.0/8.0\n CvibT[i] = h*K_BOLTZMANN_RY*Cvib*3.0/8.0\n FvibT = EvibT - SvibT * TT\n\n print ()\n return TT, EvibT, SvibT, CvibT, FvibT, ZPE, modes", "def func2(y, j, h, add_u = 0):\n y_temp = y[j] + add_u\n N = xsize\n k = np.zeros(xsize)\n for i in range(xsize):\n k[i] = -(1/4.)*(1./h)*(y_temp[(i+1)%N]**2-y_temp[(i-1)%N]**2)\n return k", "def distcalc(z,h=0.70,omegalambda=0.7,omegam=0.3,omegak=0.0):\n\n H0 = 100 * h # this is in units of km/s/Mpc\n\n H0freq = H0 * constants.kilo/(constants.mega * constants.parsec) # this is H0 is units of Hz\n \n hubbletime = 1.0/H0freq # in seconds\n hubbletimeyr = hubbletime / constants.year\n\n #hubble distance\n dh = constants.c / H0freq # in meters\n\n #now i can calculate the comoving distance (line of sight) using hogg eqn 15\n dc = dh * integrate.quad(dcintegrand,0,z,(omegalambda,omegam,omegak))[0]\n\n #now i can find the transverse comoving distance using hogg eqn 16\n if omegak == 0:\n dm = dc\n elif omegak > 0:\n dm = dh/np.sqrt(omegak) * np.sinh(dc * np.sqrt(omegak) / dh)\n else:\n dm = dh/np.sqrt(abs(omegak)) * np.sin(dc * np.sqrt(abs(omegak)) / dh)\n\n\n #now i will calculate the angular diameter distance (hogg eqn 18)\n da = dm/(1+z)\n \n #now i will calculate scale in kpc/arcsec, since this is commonly used\n scale = da * constants.arcsec / (constants.kilo * constants.parsec)\n\n #now i will calculate the luminosity distance (hog eqn 21)\n dl = (1+z)*dm\n \n #now i will calculate lookback time and \n #time from the begining of the universe to that redshift using hogg eqn 30\n \n tlookback = hubbletimeyr * integrate.quad(timeintegrand,0,z,(omegalambda,omegam,omegak))[0]\n \n tz = hubbletimeyr * integrate.quad(timeintegrand,z,np.inf,(omegalambda,omegam,omegak))[0]\n \n #all sky co-moving volume out to redshift z (hogg eqn 30)\n if omegak == 0:\n vc = 4 * np.pi * dm**3 / 3\n elif omegak > 0:\n vc = ( 4 * np.pi * dh**3 / (2 * omegak) ) * ( dm * np.sqrt(1 + omegak * dm**2 / dh**2) / dh - \n np.arcsinh( np.sqrt(omegak) * dm / dh ) / np.sqrt(omegak) )\n else:\n vc = ( 4 * np.pi * dh**3 / (2 * omegak) ) * ( dm * np.sqrt(1 + omegak * dm**2 / dh**2) / dh - \n np.arcsin( np.sqrt(abs(omegak)) * dm / dh ) / np.sqrt(abs(omegak)) )\n\n #for output, i will make a dictionary\n output = dict(dh=dh, dc=dc, dm=dm, da=da, scale=scale, dl=dl, tlookback = tlookback, tz=tz, vc=vc)\n\n return output", "def overpotential2(x, doh):\n # | - overpotential2\n dooh = ooh_oh_scaling(doh)\n dg14 = [doh, x, -x + 2.46, -dooh + 4.92]\n m = max(dg14)\n return(m - 1.23)\n #return doh*do\n #__|", "def overpotential2(x, doh):\n # | - overpotential2\n dooh = ooh_oh_scaling(doh)\n dg14 = [doh, x, -x + 2.46, -dooh + 4.92]\n m = max(dg14)\n return(m - 1.23)\n #return doh*do\n #__|", "def func3(y, j, h, add_u = 0):\n y_temp = y[j] + add_u\n N = xsize\n k = np.zeros(xsize)\n for i in range(xsize):\n k[i] = -(1/4.)*(1./h)*(y_temp[(i+1)%N]**2-y_temp[(i-1)%N]**2) + (1/2.)*(1./h**2)*(y_temp[(i+1)%N]-2*y_temp[i%N]+y_temp[(i-1)%N])\n return k", "def agregar_bolsa(self, letra, cantidad):", "def bifurcacao_3(tab, jog):\r\n pos = []\r\n for i in range(1,4):\r\n for j in range(1,4):\r\n if obter_coluna(tab,i).count(jog)==1 and obter_linha(tab,j).count(jog)==1 and eh_posicao_livre(tab, i+3*j-3):\r\n pos+=[i+3*j-3]\r\n for k in range(1,3):\r\n if k==1:\r\n if obter_coluna(tab,i).count(jog)==1 and obter_diagonal(tab,1).count(jog)==1 and eh_posicao_livre(tab, 1+4*(i-1)):\r\n pos+=[1+4*(i-1)]\r\n if obter_linha(tab,j).count(jog)==1 and obter_diagonal(tab,1).count(jog)==1 and eh_posicao_livre(tab, 1+4*(i-1)):\r\n pos+=[1+4*(i-1)]\r\n if obter_diagonal(tab,1).count(jog)==1 and obter_diagonal(tab,2).count(jog)==1 and eh_posicao_livre(tab, 5):\r\n pos+=[5]\r\n if k==2:\r\n if obter_coluna(tab,i).count(jog)==1 and obter_diagonal(tab,1).count(jog)==1 and eh_posicao_livre(tab, 7-2*(i-1)):\r\n pos+=[7-2*(i-1)]\r\n if obter_linha(tab,j).count(jog)==1 and obter_diagonal(tab,1).count(jog)==1 and eh_posicao_livre(tab, 3+2*(i-1)):\r\n pos+=[3+2*(i-1)] \r\n return pos", "def desplazamientox(tiempo,velocidad):\r\n #se realiza un operacion para encontrar el el desplzamiento horizaontal\r\n x=tiempo*velocidad\r\n #se regresa el valor de x\r\n return x", "def hilfe(self):\n sKreis_hilfe(3)", "def tulosta_energia(merkkijono, liike_energia=0):\n print \"%s saa liike-energian %.3f J.\" % (merkkijono.title(), liike_energia)", "def merkkaa_miina(kentta):\n while True:\n print(\"Voit merkata tyhjän paikan x:llä tai poistaa merkkauksen syöttämällä merkatun paikan koordinaatit uudestaan.\")\n print(\"Merkataan ruutu x:llä\")\n merkattava_ruutu = input(\"- Syötä koordinaatit välilyönnillä erotettuna: \").split()\n print(\"------------------------------------------------\")\n if len(merkattava_ruutu) == 0:\n print(\">>> Syötä koordinaatit kokonaislukuina välilyönnillä erotettuna toisistaan!\")\n tulosta_kentta(kentta, miinat)\n continue\n elif merkattava_ruutu[0] == \"q\":\n return \"q\"\n elif len(merkattava_ruutu) != 2:\n print(\">>> Syötä kaksi koordinaattia kokonaislukuina välilyönnillä erotettuna toisistaan!\")\n tulosta_kentta(kentta, miinat)\n continue\n try:\n miinan_leveys = int(merkattava_ruutu[0])\n miinan_korkeus = int(merkattava_ruutu[1])\n if miinan_leveys >= len(kentta[0]) or miinan_korkeus >= len(kentta) or miinan_leveys < 0 or miinan_korkeus <0:\n print(\">>> Syöttämäsi koordinaatit ovat kentän ulkopuolella. Yritä uudestaan.\")\n tulosta_kentta(kentta, miinat)\n continue\n except ValueError:\n print(\">>> Anna koordinaatit kokonaislukuina!\")\n tulosta_kentta(kentta, miinat)\n else:\n if kentta[miinan_korkeus][miinan_leveys] == \"-\":\n kentta[miinan_korkeus][miinan_leveys] = \"x\"\n tulosta_kentta(kentta, miinat)\n elif kentta[miinan_korkeus][miinan_leveys] == \"x\":\n kentta[miinan_korkeus][miinan_leveys] = \"-\"\n tulosta_kentta(kentta, miinat)\n else:\n print(\">>> Et voi merkata avattua ruutua!\")\n tulosta_kentta(kentta, miinat)\n return miinan_leveys, miinan_korkeus", "def hemianopsia_hard(hemi='left',dprime_fnc=dprime_basic):\n def hemianopsia_fnc(distance):\n if (hemi == 'left' and distance[0] < 0) or (hemi == 'right' and distance[0] > 0):\n return SMALL_FLOAT\n else:\n return dprime_fnc(distance)\n return hemianopsia_fnc", "def GetHelix(helix):\r\n pass", "def get_problem():\n\n # Rename this and/or move to optim package?\n problem = beluga.optim.Problem('Hannibal_HPAdemo')\n problem.mode='analytical' #Other options: 'numerical', 'dae'\n\n #Define independent variables\n problem.independent('t', 's')\n\n #~~~~~!!!\n #problem.quantity('terrain3','(-0.3*exp(-0.5*((x-2.7)^2+1.5*(y-2.1)^2))+2.6*exp(-0.55*(0.87*(x-6.7)^2+(y-2.2)^2))+2.1*exp(-0.27*(0.2*(x-5.5)^2+(y-7.2)^2))+1.6*(cos(0.8*y))^2*(sin(0.796*x))^2)')\n\n # Define equations of motion\n problem.state('x','V*cos(hdg)','m') \\\n .state('y','V*sin(hdg)','m') \\\n\n # Define controls\n problem.control('hdg','rad')\n\n # Define Cost Functional\n problem.cost['path'] = Expression('(1-w)+w*V*conv*elev*terrain(x,y)', 's')\n\n #Define constraints\n problem.constraints().initial('x-x_0','m') \\\n .initial('y-y_0','m') \\\n .terminal('x-x_f','m') \\\n .terminal('y-y_f','m')\n\n #Define constants\n problem.constant('w',0.9,'1') #Initial Terrain weighting factor\n problem.constant('conv',1,'s/m^2') #Integral conversion factor\n problem.constant('V',1,'m/s') #Vehicle speed\n problem.constant('elev',1,'m') #Initial Elevation\n\n #Unit scaling\n problem.scale.unit('m',1) \\\n .unit('s',1) \\\n .unit('rad',1)\n\n #Configure solver\n #problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose = True, cached = False, number_arcs=8)\n problem.bvp_solver = algorithms.SingleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=15, verbose = True, cached = False)\n\n #Initial Guess (Classic test example [4.9,0.4])\n problem.guess.setup('auto',start=[9.0,0.5], costate_guess=[0.0,-0.1]) #City A\n #problem.guess.setup('auto',start=[4.9,0.4], costate_guess=[0.1,-0.1]) #City A\n\n #Add Continuation Steps (Classic test example [7.2,8.5]) [8, 4.5]\n problem.steps.add_step(strategy='HPA',hweight=0.9) \\\n .terminal('x', 3.0, 10) \\\n .terminal('y', 9.5, 10) \\\n # .const('w', 0.9, 2, confined=True)\n\n #problem.steps.add_step(strategy='manual').num_cases(10) \\\n # .terminal('x', 3.0) \\\n # .terminal('y', 9.5) \\\n\n #problem.steps.add_step().num_cases(30) \\\n # .const('w',0.99) #Final Terrain weighting factor\n\n\n return problem" ]
[ "0.73375344", "0.6515627", "0.6515627", "0.6515627", "0.6515627", "0.6515627", "0.63205355", "0.63011354", "0.62365353", "0.60986763", "0.60985714", "0.60890335", "0.5958645", "0.5890988", "0.5886411", "0.58524626", "0.58322126", "0.58077747", "0.57773924", "0.5761866", "0.57223475", "0.57223475", "0.57223475", "0.57223475", "0.57223475", "0.57223475", "0.571887", "0.5718639", "0.5711012", "0.5664288", "0.5662177", "0.56599605", "0.5657947", "0.5654217", "0.564221", "0.56408644", "0.5627254", "0.56222415", "0.5599161", "0.55892324", "0.5571564", "0.5557062", "0.55567455", "0.5545736", "0.5545149", "0.55443573", "0.55431116", "0.5532864", "0.55116194", "0.5491878", "0.54826957", "0.5479709", "0.5479051", "0.54721814", "0.5468445", "0.546656", "0.54627055", "0.54588836", "0.5456865", "0.5452711", "0.5442361", "0.54378456", "0.5423311", "0.5390309", "0.53875065", "0.5381221", "0.5380506", "0.53647196", "0.5359925", "0.5350779", "0.53476423", "0.5346019", "0.53295934", "0.53265136", "0.5325549", "0.5322232", "0.53104806", "0.5309377", "0.530894", "0.5307278", "0.53039426", "0.52840525", "0.5282044", "0.5281686", "0.5280491", "0.5274712", "0.52640957", "0.52507687", "0.5249924", "0.5247031", "0.5247031", "0.52334213", "0.52303374", "0.5229328", "0.5228804", "0.52195823", "0.52157277", "0.5213443", "0.52132964", "0.5213189", "0.52105117" ]
0.0
-1
objectiin querysetiig avna. Tuhain querysetiin date_time uy deh datag excel export hiine
def export_to_excel(self, worksheet, row_start, col_start, queryset, date_time=timezone.now()): if queryset: [row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start) for q in queryset: # object_excel_write function---date_time uyiin history objectiig excel -ruu horvuulne [row_write, col_write] = q.object_excel_write(worksheet, row_write, col_write, date_time=date_time) else: worksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_ho_dan_as_excel_action(fields=None, exclude=None, header=True):\n def export_as_excel(modeladmin, request, queryset):\n opts = modeladmin.model._meta\n field_names = [\"name\", \"status\", \"location\", \"tinh\",\n \"xa\", \"huyen\", \"phone\", \"cuuho\", \"update_time\", \"note\"]\n display_names = [\"Tên hộ dân\", \"Tình trạng\", \"Vị trí\", \"Tỉnh\", \"Xã\",\n \"Huyện\", \"Sdt\", \"hỗ trợ\", \"Thời gian cuối cùng cập nhật\", \"Ghi chú\"]\n file_name = \"Danh_sach_ho_dan\"\n\n output = io.BytesIO()\n\n workbook = xlsxwriter.Workbook(output, {'in_memory': True})\n worksheet = workbook.add_worksheet()\n row = 0\n if header:\n write_a_row(worksheet, row, display_names)\n row += 1\n for obj in queryset:\n arr = []\n for field in field_names:\n if field == \"status\" and obj.status:\n arr.append(obj.status.name)\n elif field == \"update_time\":\n utc_time = getattr(obj, field)\n local_datetime = utc_to_local(utc_time)\n arr.append(local_datetime.strftime(\"%d/%m/%Y %H:%M:%S\"))\n else:\n arr.append(str(getattr(obj, field) or \"\"))\n write_a_row(worksheet, row, arr)\n row += 1\n\n workbook.close()\n\n output.seek(0)\n\n response = HttpResponse(output.read(\n ), content_type=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\")\n response['Content-Disposition'] = f\"attachment; filename={file_name}.xlsx\"\n\n output.close()\n\n return response\n\n export_as_excel.short_description = \"Xuất file excel\"\n return export_as_excel", "def export(self, queryset=None):\n self.queryset = queryset or self.queryset\n exported_datetime = get_utcnow()\n filename = self.get_filename(exported_datetime)\n path = os.path.join(self.export_folder, filename)\n with open(path, 'w') as f:\n csv_writer = csv.DictWriter(\n f, fieldnames=self.field_names, delimiter=self.delimiter)\n csv_writer.writeheader()\n for model_obj in self.queryset:\n object_helper = self.object_history_helper_cls(\n model_obj=model_obj, create=True)\n objects = object_helper.get_not_exported()\n for obj in objects:\n row = self.prepare_row(\n model_obj=model_obj,\n exported_datetime=exported_datetime,\n export_change_type=obj.export_change_type)\n csv_writer.writerow(row)\n object_helper.update_as_exported(\n objects=objects, exported_datetime=exported_datetime)\n file_history_updater = self.file_history_updater_cls(\n path=path,\n delimiter=self.delimiter,\n model=self.model_cls._meta.label_lower,\n filename=filename)\n file_history_updater.update()\n return path", "def export_any_queryset(request, queryset, filename, excluded_fields=[], included_fields=[], csv_field_delimiter = \";\"):\n\n name, extension = os.path.splitext(filename)\n file_format = extension[1:]\n\n output = None\n if file_format == 'csv':\n content_type = 'text/csv'\n output = io.StringIO()\n writer = csv.writer(output, delimiter=csv_field_delimiter, quoting=csv.QUOTE_MINIMAL)\n exporter = SpreadsheetQuerysetExporter(writer, file_format=file_format)\n exporter.export_queryset(queryset)\n elif file_format == 'xlsx':\n content_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n #content_type = 'application/vnd.ms-excel'\n output = io.BytesIO()\n with open_xlsx_file(output) as writer:\n # # Write Spreadsheet\n # writer.write_headers_from_strings(\n # ['Cliente', 'Commessa', 'Progetto', 'Attività', ] +\n # ['Totale', ],\n # )\n # writer.apply_autofit()\n exporter = SpreadsheetQuerysetExporter(writer, file_format=file_format)\n exporter.export_queryset(queryset, excluded_fields=excluded_fields, included_fields=included_fields)\n writer.apply_autofit()\n assert writer.is_closed()\n else:\n raise Exception('Wrong export file format \"%s\"' % file_format)\n\n # send \"output\" object to stream with mimetype and filename\n assert output is not None\n output.seek(0)\n # response = HttpResponse(\n # output.read(),\n response = StreamingHttpResponse(\n output,\n content_type=content_type,\n )\n #response['Content-Disposition'] = 'inline; filename=\"%s\"' % filename\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % filename\n\n return response", "def export(self,**kwargs):\n \n # import pdb;pdb.set_trace()\n \n # provide for case where recs are set extenally\n if not self.recs:\n self.select_recs(**kwargs)\n if self.recs:\n if self.export_file_name:\n filename = self.export_file_name\n else:\n filename = \"{table_name}_report_{datetime}.csv\".format(\n table_name = self.table.display_name,\n datetime = date_to_string(local_datetime_now(),'iso_datetime'),\n ).replace(' ','_').lower()\n \n if not self.export_fields:\n # include all fields by default\n self.export_fields = self._set_default_list_fields(include_all=True).copy()\n\n self.set_list_fields(self.export_fields)\n \n \n if self.export_template:\n result = render_template(self.export_template, data=self)\n else:\n # add a descriptive title row\n if self.export_title:\n result = self.export_title.strip() + '\\n'\n else:\n result = \"Export of table {} as of {}\\n\".format(self.table.table_name,excel_date_and_time_string(local_datetime_now()))\n \n result += ','.join([x['label'] for x in self.export_fields]) + '\\n'\n for rec in self.recs:\n rec_row = []\n for field in self.export_fields:\n data = rec.__getattribute__(field['name'])\n if field['type'].upper() == \"DATE\":\n data = local_date_string(data)\n elif field['type'].upper() == \"DATETIME\":\n data = excel_date_and_time_string(data)\n else:\n # just text\n data = str(data).strip()\n \n # replace double quotes with double-double quotes\n data = data.replace('\"','\"\"') #double up on double quotes\n \n if \",\" in data:\n # if any commas, wrap in quotes\n data = '\"' + data + '\"'\n \n #replace returns\n data = data.replace('\\r\\n',' -crnl- ')\n data = data.replace('\\n',' -nl- ')\n data = data.replace('\\r',' -rtn- ')\n\n rec_row.append(data)\n \n result += ','.join([str(x) for x in rec_row]) + '\\n'\n \n return DataStreamer(result,filename,'text/csv').send()\n \n self.result_text = \"No records selected\"\n self.success = False\n \n flash(self.result_text)\n return self.list(**kwargs)", "def export_to_excel(self, workbook, tailan_queryset):\n\t\t# workbook argumentdaa avna\n\t\tif tailan_queryset:\n\t\t\t#[row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start)\n\t\t\t\n\t\t\tworksheet = workbook.add_worksheet(u'Гүний худаг')\n\t\t\tqueryset = Hudag.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Hudag.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.gunii_hudags:\n\t\t\t\t\tqueryset = tailan.gunii_hudags.hudags.all()\n\t\t\t\t\t[row_write, col_write] = Hudag.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэршүүлэх байгууламж')\n\t\t\tqueryset = Ts_baiguulamj.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ts_baiguulamj.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsevershuuleh:\n\t\t\t\t\tqueryset = tailan.tsevershuuleh.tsevershuuleh.all()\n\t\t\t\t\t[row_write, col_write] = Ts_baiguulamj.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэрлэх байгууламж')\n\t\t\tqueryset = Ts_baiguulamj.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ts_baiguulamj.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tseverleh:\n\t\t\t\t\tqueryset = tailan.tseverleh.tseverleh.all()\n\t\t\t\t\t[row_write, col_write] = Ts_baiguulamj.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Усан сан')\n\t\t\tqueryset = UsanSan.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsanSan.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.usansan:\n\t\t\t\t\tqueryset = tailan.usansan.usan_sans.all()\n\t\t\t\t\t[row_write, col_write] = UsanSan.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны насос станц')\n\t\t\tqueryset = NasosStants.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = NasosStants.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsever_nasos_stants:\n\t\t\t\t\tqueryset = tailan.tsever_nasos_stants.nasos_stantss.all()\n\t\t\t\t\t[row_write, col_write] = NasosStants.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны насос станц')\n\t\t\tqueryset = NasosStants.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = NasosStants.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_nasos_stants:\n\t\t\t\t\tqueryset = tailan.bohir_nasos_stants.nasos_stantss.all()\n\t\t\t\t\t[row_write, col_write] = NasosStants.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Лаборатори')\n\t\t\tqueryset = Lab.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Lab.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.lab:\n\t\t\t\t\tqueryset = tailan.lab.labs.all()\n\t\t\t\t\t[row_write, col_write] = Lab.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны шугам')\n\t\t\tqueryset = Sh_suljee.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Sh_suljee.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsever_usnii_shugam:\n\t\t\t\t\tqueryset = tailan.tsever_usnii_shugam.sh_suljees.all()\n\t\t\t\t\t[row_write, col_write] = Sh_suljee.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны шугам')\n\t\t\tqueryset = Sh_suljee.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Sh_suljee.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_usnii_shugam:\n\t\t\t\t\tqueryset = tailan.bohir_usnii_shugam.sh_suljees.all()\n\t\t\t\t\t[row_write, col_write] = Sh_suljee.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'АХББ')\n\t\t\tqueryset = ABB.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = ABB.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.abb:\n\t\t\t\t\tqueryset = tailan.abb.abbs.all()\n\t\t\t\t\t[row_write, col_write] = ABB.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ус, дулаан дамжуулах төв')\n\t\t\tqueryset = UsDamjuulahBair.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsDamjuulahBair.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.us_damjuulah_tov:\n\t\t\t\t\tqueryset = tailan.us_damjuulah_tov.usDamjuulahBair.all()\n\t\t\t\t\t[row_write, col_write] = UsDamjuulahBair.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ус түгээх байр')\n\t\t\tqueryset = UsTugeehBair.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsTugeehBair.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.us_tugeeh:\n\t\t\t\t\tqueryset = tailan.us_tugeeh.us_tugeeh_bairs.all()\n\t\t\t\t\t[row_write, col_write] = UsTugeehBair.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны машин')\n\t\t\tqueryset = WaterCar.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = WaterCar.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.water_car:\n\t\t\t\t\tqueryset = tailan.water_car.water_cars.all()\n\t\t\t\t\t[row_write, col_write] = WaterCar.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны машин')\n\t\t\tqueryset = BohirCar.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = BohirCar.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_car:\n\t\t\t\t\tqueryset = tailan.bohir_car.bohir_cars.all()\n\t\t\t\t\t[row_write, col_write] = BohirCar.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ажилчдын судалгаа')\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ajiltan.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.ajiltans:\n\t\t\t\t\tqueryset = tailan.ajiltans.ajiltans.all()\n\t\t\t\t\t[row_write, col_write] = Ajiltan.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\t\n\t\telse:\n\t\t\tworksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')", "def export_outstanding_fires(request, region_id, queryset):\n #regions = Region.objects.filter(id=region_id) if region_id else Region.objects.all()\n regions = Region.objects.filter(id=region_id) if region_id else Region.objects.filter(dbca=True)\n region_name = regions[0].name if region_id else 'All-Regions'\n\n rpt_date = datetime.now()\n filename = 'outstanding_fires_{}_{}.xls'.format(region_name, rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n for region in regions:\n outstanding_fires(book, region, queryset, rpt_date)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export_to_csv(self, request, queryset):\n fields = self.get_table_fields()\n field_names = [field.name for field in fields]\n field_verbose_names = [field.verbose_name.encode(\n 'utf-8'\n ) for field in fields]\n\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; \\\nfilename=%s.csv' % unicode(self.model._meta).replace('.', '_')\n\n writer = csv.writer(response)\n writer.writerow(field_verbose_names)\n for obj in queryset:\n writer.writerow([unicode(getattr(obj, field)).encode(\n \"utf-8\",\n \"replace\"\n ) for field in field_names])\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.ministerial.get_excel_sheet(rpt_date, book)\n self.ministerial_auth.get_excel_sheet(rpt_date, book)\n self.ministerial_268.get_excel_sheet(rpt_date, book)\n self.quarterly.get_excel_sheet(rpt_date, book)\n self.by_tenure.get_excel_sheet(rpt_date, book)\n self.by_cause.get_excel_sheet(rpt_date, book)\n self.region_by_tenure.get_excel_sheet(rpt_date, book)\n self.indicator.get_excel_sheet(rpt_date, book)\n self.by_cause_10YrAverage.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 1')\n book.save(response)\n\n return response", "def export_repayment_csv(request):\n import csv\n from django.utils.encoding import smart_str\n # response = HttpResponse(content_type='text/csv')\n # response['Content-Disposition'] = 'attachment; filename=Repayment_report.csv'\n from_date = request.GET.get('from_date')\n to_date = request.GET.get('to_date')\n search = request.GET.get('search_value') or ''\n search_query = Q()\n if search:\n search_query = Q(user__user__username__icontains=search) | \\\n Q(user__user__first_name__icontains=search) | \\\n Q(project__title__icontains=search) | \\\n Q(amount__icontains=search) | \\\n Q(user__user__last_name__icontains=search) | \\\n Q(user__user__email__icontains=search)\n if from_date and to_date:\n import datetime\n import pytz\n date1 = datetime.datetime.strptime(from_date, '%Y-%m-%d').date()\n date2 = datetime.datetime.strptime(to_date, '%Y-%m-%d').date()\n repayments = RepaymentFragment.objects.filter(amount__gt=0.00,\n created_at__range=[\n datetime.datetime(date1.year, date1.month, date1.day, 8, 15,\n 12, 0, pytz.UTC),\n datetime.datetime(date2.year, date2.month, date2.day, 8, 15,\n 12, 0, pytz.UTC)]).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"user__user\").filter(search_query).iterator()\n else:\n repayments = RepaymentFragment.objects.filter(amount__gt=0.00).filter(search_query).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"user__user\").iterator()\n # writer = csv.writer(response, csv.excel)\n # response.write(u'\\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly)\n\n def stream():\n buffer_ = StringIO()\n writer = csv.writer(buffer_)\n writer.writerow([\n smart_str(u\"FIRST NAME\"),\n smart_str(u\"LAST NAME\"),\n smart_str(u\"USERNAME\"),\n smart_str(u\"EMAIL\"),\n smart_str(u\"DATE\"),\n smart_str(u\"NAME OF PROJECT\"),\n smart_str(u\"DONATION AMOUNT\"),\n smart_str(u\"REPAYMENT AMOUNT\"),\n\n ])\n\n for payment in repayments:\n writer.writerow([\n smart_str(payment.user.user.first_name),\n smart_str(payment.user.user.last_name),\n smart_str(payment.user.user.username),\n smart_str(payment.user.user.email),\n smart_str(payment.created_at),\n smart_str(payment.project.title),\n smart_str(round(\n Payment.objects.filter(user=payment.user).filter(project=payment.project).aggregate(Sum('amount'))[\n 'amount__sum'] or 0, 2)),\n smart_str(round(payment.amount, 2)),\n ])\n buffer_.seek(0)\n data = buffer_.read()\n buffer_.seek(0)\n buffer_.truncate()\n yield data\n\n # Create the streaming response object with the appropriate CSV header.\n response = StreamingHttpResponse(stream(), content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"Repayment_report.csv\"'\n return response", "def get_export_data(self, file_format, queryset, *args, **kwargs):\n request = kwargs.pop(\"request\")\n resource_class = self.get_export_resource_class()\n data = resource_class(**self.get_export_resource_kwargs(request)).export(queryset, *args, **kwargs)\n export_data = file_format.export_data(data)\n return export_data", "def export_any_dataset(request, *fields, queryset, filename, csv_field_delimiter = \";\"):\n\n name, extension = os.path.splitext(filename)\n file_format = extension[1:]\n headers, rows = render_queryset_as_data(*fields, queryset=queryset)\n\n output = None\n if file_format == 'csv':\n content_type = 'text/csv'\n output = io.StringIO()\n writer = csv.writer(output, delimiter=csv_field_delimiter, quoting=csv.QUOTE_MINIMAL)\n\n writer.writerow(headers)\n for row in rows:\n writer.writerow(row)\n\n elif file_format == 'xlsx':\n content_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n #content_type = 'application/vnd.ms-excel'\n output = io.BytesIO()\n with open_xlsx_file(output) as writer:\n\n writer.write_headers_from_strings(headers)\n for row in rows:\n writer.writerow(row)\n writer.apply_autofit()\n\n assert writer.is_closed()\n else:\n raise Exception('Wrong export file format \"%s\"' % file_format)\n\n # send \"output\" object to stream with mimetype and filename\n assert output is not None\n output.seek(0)\n # response = HttpResponse(\n # output.read(),\n response = StreamingHttpResponse(\n output,\n content_type=content_type,\n )\n #response['Content-Disposition'] = 'inline; filename=\"%s\"' % filename\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % filename\n\n return response", "def export_as_csv(modeladmin, request, queryset):\n if not request.user.is_staff:\n raise PermissionDenied\n opts = modeladmin.model._meta\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s.csv' % unicode(opts).replace('.', '_')\n writer = csv.writer(response, delimiter=';')\n field_names = [field.name for field in opts.fields]\n # Write a first row with header information\n writer.writerow(field_names)\n # Write data rows\n for obj in queryset:\n values = []\n for field in field_names:\n value = (getattr(obj, field))\n if callable(value):\n try:\n value = value() or ''\n except:\n value = 'Error retrieving value'\n if value is None:\n value = ''\n values.append(unicode(value).encode('utf-8'))\n writer.writerow(values)\n #writer.writerow([getattr(obj, field) for field in field_names])\n return response", "def dump_to_file_format(queryset, file_format, data_zip):\n # create a temporary output stream (temp file)\n if file_format == 'xls':\n # it seems that an excel file needs to be written on a BytesIo even if on the xlwt they write exactly\n # the opposite (I was about to become fool)\n output = io.BytesIO()\n else:\n output = io.StringIO()\n\n # get queryset model\n model = queryset.model\n # the export code depends on the file format\n if file_format == 'csv':\n # create an instance of csv writer that writes on the stream 'output' opened above\n csv_writer = csv.writer(output, dialect='excel', delimiter=';')\n\n # there are some things that may be different from a model to another\n\n # for example, I also want to write in the project csv the username of the members\n if model == Projet:\n csv_writer.writerow(['ID', 'NAME', 'MEMBERS'])\n for project in queryset:\n # build a comma separated list with all the users and the tasks that are in the project\n members = ', '.join([member.username for member in project.members.all()])\n csv_writer.writerow([project.id, project.name, members])\n # if the model is User, only export non confidential fields\n if model == User:\n csv_writer.writerow(['USERNAME', 'NAME', 'SURNAME', 'E-MAIL'])\n for user in queryset:\n csv_writer.writerow([user.username, user.first_name, user.last_name, user.email])\n # for the other models that's what is going to happen\n else:\n # get all the field names and write them as headers\n field_names = [field.name for field in model._meta.fields]\n csv_writer.writerow(field.upper() for field in field_names)\n # for each instance in the queryset\n for obj in queryset:\n # \"\"\"general backup code\"\"\"\n # csv_writer.writerow([getattr(obj, field) for field in field_names])\n\n row = [] # create an empty row list\n # for each field of the model\n for field in field_names:\n # get the field value\n field_value = getattr(obj, field)\n # this is to control the format of the date that will be written in the csv\n if isinstance(field_value, datetime.datetime):\n field_value = field_value.strftime(\"%m/%d/%Y, %H:%M:%S\")\n row.append(field_value) # append the field value to the end of the row list\n\n csv_writer.writerow(row)\n\n # the .json and .xml formats are generated with the django serializers utilities\n elif file_format == 'json' or file_format == 'xml':\n # if the model is User, only export non confidential fields\n if model == User:\n json_xml = serializers.serialize(file_format, queryset, use_natural_foreign_keys=True,\n fields=('username', 'first_name', 'last_name', 'email'))\n else:\n # use_natural_foreign_keys=True means that the foreign keys won't be written as just numbers\n json_xml = serializers.serialize(file_format, queryset, use_natural_foreign_keys=True)\n\n output.write(json_xml)\n\n elif file_format == 'xls':\n wb = xlwt.Workbook(encoding='utf-8') # create excel workbook\n ws = wb.add_sheet(model._meta.model.__name__) # create sheet\n\n # Sheet header, first row\n row_num = 0\n font_style = xlwt.XFStyle()\n font_style.font.bold = True\n\n '''This code is pretty similar to the code to export in .csv, but in excel each cell (row and column) \n must written separately'''\n # get all the field names and write them as headers\n # if User only confidential data\n if model == User:\n field_names = ['username', 'first_name', 'last_name', 'email']\n else:\n field_names = [field.name for field in model._meta.fields]\n for col_num in range(len(field_names)):\n ws.write(row_num, col_num, field_names[col_num].upper(), font_style)\n\n # add a column for the members of the project\n # (otherwise it won't be done automatically because it's ManytoMany)\n if model == Projet:\n ws.write(row_num, col_num + 1, 'MEMBERS', font_style)\n\n # Sheet body, remaining rows\n font_style = xlwt.XFStyle()\n\n # for each instance in the queryset\n for obj in queryset:\n row_num += 1\n # for each field of the model\n for col_num in range(len(field_names)):\n # get the field value\n field_value = getattr(obj, field_names[col_num])\n # this is to control the format of the date that will be written in the csv\n if isinstance(field_value, datetime.datetime):\n field_value = field_value.strftime(\"%m/%d/%Y, %H:%M:%S\")\n ws.write(row_num, col_num, field_value.__str__(), font_style)\n\n # add the column with the members of the project\n if model == Projet:\n members = ', '.join([member.username for member in obj.members.all()])\n ws.write(row_num, col_num + 1, members, font_style)\n\n # save the excel file on the output stream\n wb.save(output)\n\n # generates the name of the output file depending on the model and the file format\n file_name = model._meta.model.__name__.lower() + '_data.' + file_format\n # add the file to the zip archive and close the output stream\n data_zip.writestr(file_name, output.getvalue())\n output.close()", "def uploader_actividad(df,to_model):\n\tengine = create_engine(\"mssql+pyodbc://sa:[email protected]:1433/vpcanales?driver=SQL+Server+Native+Client+11.0\")\n\n\tfecha = df.loc[0,'Fecha']\n\tprint(fecha.month)\n\tprint(fecha.year)\n\n\tif to_model.__name__==\"Activacion\":\n\n\t\tActivacion.objects.filter(fecha_actividad__month=fecha.month ,\n\t\t\tfecha_actividad__year=fecha.year).delete()\n\n\t\tfor row in df.itertuples():\n\t\t\tconnection = engine.raw_connection()\n\t\t\tcursor=connection.cursor()\n\t\t\t#Se ejecuta el SP por cada registro del dataframe\n\t\t\tstring=\"\"\"exec sp_insert_into_activacion\n\t\t\t@fecha_actividad='{0}',\n\t\t\t@plataforma='{1}',\n\t\t\t@tecnologia='{2}',\n\t\t\t@terminal='{3}',\n\t\t\t@cantidad='{4}',\n\t\t\t@codigo_plan='{5}',\n\t\t\t@mes={6},\n\t\t\t@ano={7},\n\t\t\t@codigo_agente='{8}'\n\t\t\t \"\"\".format(row[2],\n\t\t\trow[5],\n\t\t\trow[6],\n\t\t\trow[7],\n\t\t\trow[-2],\n\t\t\trow[4],\n\t\t\trow[2].month,\n\t\t\trow[2].year,\n\t\t\trow[3])\n\t\t\tcursor.execute(string).commit()\n\n\t\tresults = Activacion.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(suma = Sum('cantidad'))\n\n\n\t\tresults.update(Activacion.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(count = Count('cantidad')))\n\n\t\tprint(\"*************************\",results)\n\n\t\treturn results\n\n\n\n\telse:\n\n\t\tAlta.objects.filter(fecha_actividad__month=fecha.month ,\n\t\t\tfecha_actividad__year=fecha.year).delete()\n\n\t\tfor row in df.itertuples():\n\t\t\tconnection = engine.raw_connection()\n\t\t\tcursor=connection.cursor()\n\t\t\t#Se ejecuta el SP por cada registro del dataframe\n\t\t\tstring=\"\"\"exec sp_insert_into_alta\n\t @fecha_actividad='{0}',\n\t @plataforma='{1}',\n\t @tecnologia='{2}',\n\t @terminal='{3}',\n\t @cantidad='{4}',\n\t @codigo_plan='{5}',\n\t @mes={6},\n\t @ano={7},\n\t @codigo_agente='{8}' \"\"\".format(row[2],\n\t row[5],\n\t row[6],\n\t row[7],\n\t row[-2],\n\t row[4],\n\t row[2].month,\n\t row[2].year,\n\t row[3])\n\t\t\tcursor.execute(string).commit()\n\n\n\t\tresults = Alta.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(suma = Sum('cantidad'))\n\n\n\t\tresults.update(Alta.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(count = Count('cantidad')))\n\n\t\tprint(\"*************************\",results)\n\n\n\n\t\treturn results", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'quarterly_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def download_queryset(self, queryset, export_format):\n dataset = StockItemResource().export(queryset=queryset)\n\n filedata = dataset.export(export_format)\n\n filename = 'InvenTree_StockItems_{date}.{fmt}'.format(\n date=datetime.now().strftime(\"%d-%b-%Y\"),\n fmt=export_format\n )\n\n return DownloadFile(filedata, filename)", "def export(self):\n if len(self.records) == 0:\n exit_message = \"Exiting. There are no records for {} {} to export.\".format(self.args.date.strftime(\"%B\"), self.year)\n sys.exit(exit_message)\n\n total_days = (self.args.date.replace(month = self.args.date.month % 12 +1, day = 1)-timedelta(days=1)).day\n start_month = self.args.date.replace(day = 1)\n end_month = self.args.date.replace(day = total_days)\n workdays = self.netto_workdays(start_month, end_month, weekend_days=(5,6))\n template_file = os.path.join(self.config[\"templates_dir\"], \"template_timesheet_{}_days.xlsx\".format(workdays))\n\n export_file = os.path.join(self.config[\"exports_dir\"], \"timesheet_{}_{}.xlsx\".format(self.year, self.month_str))\n\n # set locale to use weekdays, months full name in german\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n wb = load_workbook(template_file)\n ws = wb.active\n ws.cell(row=7, column=4).value = self.config[\"name\"]\n month_year_str = \"{} {}\".format(self.args.date.strftime(\"%B\"), self.year)\n ws.cell(row=8, column=4).value = month_year_str\n row = 12\n for record in self.records:\n col = 2\n date = datetime.strptime(record[\"date\"], \"%d.%m.%Y\")\n ws.cell(row=row, column=col).value = date.strftime(\"%A\")\n col += 1\n ws.cell(row=row, column=col).value = date\n col += 1\n if \"special\" in record.keys() and record[\"special\"] == \"true\":\n ws.cell(row=row, column=9).value = 8.00\n col += 4\n else:\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_break\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_break\"], \"%H:%M\").time()\n col += 4\n ws.cell(row=row, column=col).value = record[\"comment\"]\n row += 1\n wb.save(export_file)\n return True", "def export(request):\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n filename = 'export-inscripcions-tallers-%s.csv' % date.today().strftime(\"%y-%m-%d\")\n\n regtaller_list = TallerRegistration.objects.all()\n\n table = ExportTallerRegistrationTable(regtaller_list)\n table.order_by = request.GET.get(\"sort\",'last_name')\n\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n writer = csv.writer(response)\n # Write headers to CSV file\n headers = []\n for column in table.columns:\n headers.append(column.header.encode('utf8'))\n writer.writerow(headers)\n\n # Write data to CSV file\n for obj in table.rows:\n row = []\n for value in obj:\n if isinstance(value, basestring):\n row.append(value.encode('utf8'))\n else:\n row.append(value)\n writer.writerow(row)\n\n # Return CSV file to browser as download\n return response", "def export_xlsx(request):\n import openpyxl\n try:\n from openpyxl.cell import get_column_letter\n except ImportError:\n from openpyxl.utils import get_column_letter\n\n from_date = request.GET.get('from_date')\n to_date = request.GET.get('to_date')\n search = request.GET.get('search_value') or ''\n search_query = Q()\n if search:\n search_query = Q(user__user__username__icontains=search) | \\\n Q(user__user__first_name__icontains=search) | \\\n Q(project__title__icontains=search) | \\\n Q(amount__icontains=search) | \\\n Q(user__user__last_name__icontains=search) | \\\n Q(user__user__email__icontains=search)\n if from_date and to_date:\n import datetime\n import pytz\n date1 = datetime.datetime.strptime(from_date, '%Y-%m-%d').date()\n date2 = datetime.datetime.strptime(to_date, '%Y-%m-%d').date()\n payments = Payment.objects.filter(\n created_at__range=[datetime.datetime(date1.year, date1.month, date1.day, 8, 15, 12, 0, pytz.UTC),\n datetime.datetime(date2.year, date2.month, date2.day, 8, 15, 12, 0,\n pytz.UTC)]).order_by('-created_at').filter(search_query)\\\n .select_related(\"user\", \"project\", \"admin_reinvestment\", \"user_reinvestment\", \"tip\", \"user__user\").iterator()\n else:\n payments = Payment.objects.filter(search_query).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"admin_reinvestment\", \"user_reinvestment\", \"tip\", \"user__user\")\\\n .iterator()\n response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n response['Content-Disposition'] = 'attachment; filename=RE-volv.xlsx'\n wb = openpyxl.Workbook()\n ws = wb.get_active_sheet()\n ws.title = \"RE-volv\"\n\n row_num = 0\n\n columns = [\n (u\"FIRST NAME\", 30),\n (u\"LAST NAME\", 30),\n (u\"USERNAME\", 30),\n (u\"EMAIL\", 30),\n (u\"DATE\", 30),\n (u\"NAME OF PROJECT\", 30),\n (u\"DONATION TO SOLAR SEED FUND\", 30),\n (u\"REINVESTMENT IN SOLAR SEED FUND\", 20),\n (u\"ADMIN REINVESTMENT IN SOLAR SEED FUND\", 20),\n (u\"DONATION TO OPERATION\", 20),\n (u\"TOTAL DONATIONS\", 20),\n ]\n\n for col_num in xrange(len(columns)):\n c = ws.cell(row=row_num + 1, column=col_num + 1)\n c.value = columns[col_num][0]\n ws.column_dimensions[get_column_letter(col_num + 1)].width = columns[col_num][1]\n\n for payment in payments:\n if payment.admin_reinvestment:\n admin_reinvestment = round(payment.amount, 2)\n else:\n admin_reinvestment = 0\n\n if payment.user_reinvestment:\n user_reinvestment = round(payment.user_reinvestment.amount, 2)\n else:\n user_reinvestment = 0\n\n if payment.admin_reinvestment or payment.user_reinvestment:\n donation_amount = 0\n else:\n donation_amount = payment.amount\n\n if payment.tip:\n tip = round(payment.tip.amount, 2)\n else:\n tip = 0\n\n if payment.tip and payment.amount:\n total = round(payment.tip.amount + payment.amount, 2)\n if payment.tip and not payment.amount:\n total = round(payment.tip.amount, 2)\n if payment.amount and not payment.tip:\n total = round(payment.amount, 2)\n if not payment.amount and not payment.tip:\n total = 0\n if AnonymousUserDonation.objects.filter(payment_id=payment.id):\n email = AnonymousUserDonation.objects.get(payment_id=payment.id).email\n else:\n email = payment.user.user.email\n\n row_num += 1\n row = [\n payment.user.user.first_name,\n payment.user.user.last_name,\n payment.user.user.username,\n email,\n payment.created_at,\n payment.project.title,\n donation_amount,\n user_reinvestment,\n admin_reinvestment,\n tip,\n total,\n ]\n for col_num in xrange(len(row)):\n c = ws.cell(row=row_num + 1, column=col_num + 1)\n c.value = row[col_num]\n\n wb.save(response)\n payments.close()\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_10yr_average_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export_data(self):\r\n if len(app.entry6.get()) != 0:\r\n\r\n if app.var.get() == 'xls':\r\n\r\n wb = Workbook()\r\n sheet = wb.add_sheet('Sheet1')\r\n self.columns = ['id', 'Name', 'Section', 'Dept.', 'Gpa', 'MP1', 'MP2', 'MP3', 'MT', 'FINAL']\r\n style = xlwt.easyxf('font: bold 1')\r\n for col in range(10):\r\n sheet.write(0, col, self.columns[col], style)\r\n index=0\r\n for row in range(1,162):\r\n sheet.write(row, 1, open_data.sort_list[index])\r\n index += 1\r\n index1 = -1\r\n for row in range(1,162):\r\n index1 += 1\r\n index2=0\r\n for col in range(10):\r\n if col == 1 or index2 == 1:\r\n index2 += 1\r\n continue\r\n if index2 == 0:\r\n sheet.write(row, col, int(open_data.student[open_data.sort_list[index1]][index2]))\r\n index2 += 1\r\n continue\r\n sheet.write(row, col, open_data.student[open_data.sort_list[index1]][index2])\r\n index2 += 1\r\n file_name=app.entry6.get()\r\n if '.xls' not in file_name:\r\n wb.save(file_name+'.xls')\r\n else:\r\n wb.save(file_name)\r\n\r\n elif app.var.get() == 'txt':\r\n\r\n file_name = app.entry6.get()\r\n if '.txt' not in file_name:\r\n file_name = file_name + '.txt'\r\n file = open(file_name, 'w')\r\n index2 = 0\r\n for key in open_data.student:\r\n for index in range(10):\r\n if index == 0:\r\n file.write(str(int(open_data.student[key][index])))\r\n file.write(', ')\r\n continue\r\n if index == 1:\r\n try:\r\n self.split_names = open_data.sort_list[index2].split(' ')\r\n file.write(self.split_names[0])\r\n file.write(', ')\r\n file.write(self.split_names[1])\r\n file.write(', ')\r\n index2 += 1\r\n except UnicodeEncodeError:\r\n index2 += 1\r\n pass\r\n continue\r\n if index >= 5 and index <= 9:\r\n if open_data.student[key][index] != '':\r\n file.write(str(int(open_data.student[key][index])))\r\n file.write(', ')\r\n else:\r\n file.write('\\n')\r\n break\r\n if index == 9:\r\n file.write('\\n')\r\n continue\r\n try:\r\n file.write(str(open_data.student[key][index]))\r\n file.write(', ')\r\n except UnicodeEncodeError:\r\n pass\r\n file.close()\r\n\r\n\r\n\r\n elif app.var.get() == 'csv':\r\n app.info.configure(text=\"INFO: Type not Supported\")\r\n # The program does not support saving in 'csv' type. If the user selects 'csv' file type, 'Info' Label\r\n # shows the message: 'INFO: Type not Supported'.\r\n\r\n else:\r\n app.info.configure(text='INFO: Type not chosen!')\r\n # Also, If the user presses on 'Export Data' button, with a file name provided, but without choosing a\r\n # file type, 'Info' Label shows the message: 'INFO: Type not chosen'.\r\n\r\n else:\r\n app.info.configure(text=\"INFO: Please provide the name of the file.\")\r\n # Also, if the user presses 'Export Data' button without giving a file name, 'Info' Label shows the message:\r\n # 'INFO: Please provide the name of the file.'\r", "def action_date_ret(self):\n for wh in self.browse():\n if not wh.date_ret:\n self.write([wh.id],\n {'date_ret': time.strftime('%Y-%m-%d')})\n return True", "def print_xlsx(self):\n if self.date_from and self.date_to:\n if self.date_from > self.date_to:\n raise ValidationError(\"Date From must be less than Date To\")\n\n # active_record = self._context['id']\n # record = self.env['room.accommodation'].browse(active_record)\n data = {\n 'date_from': self.date_from,\n 'date_to': self.date_to,\n 'guest_id': self.guest_id.id,\n 'model_id': self.id,\n 'check_out': self.check_out,\n 'date_today': fields.Datetime.now()\n }\n\n print(\"XLSX Wizard data : \", data)\n\n return {\n 'type': 'ir.actions.report',\n 'data': {\n 'model': 'accommodation.reporting',\n 'options': json.dumps(data, default=date_utils.json_default),\n 'output_format': 'xlsx',\n 'report_name': 'Accommodation Report'\n },\n 'report_type': 'xlsx'\n }", "def make_csv(user_id, fobj):\n data = show_history(user_id)\n report = csv.writer(fobj)\n report.writerow([\n 'Status',\n 'Date',\n 'Amount',\n 'From Curr',\n 'To Curr',\n 'To Address',\n ])\n for row in data:\n report.writerow([\n row.exchange_status.capitalize(),\n row.created_at.strftime('%Y-%m-%d %H:%I:%M'),\n row.amount,\n row.from_curr,\n row.to_curr,\n row.address_out\n ])", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def download_data(request, file_format, exp_p=False, exp_m=False, exp_t=False, exp_j=False, exp_s=False,\n querysets=None):\n\n # set the response so that the browser will understand that the user is receiving a zip file to download\n response = HttpResponse(content_type='application/zip')\n response['Content-Disposition'] = 'attachment; filename=\"data.zip\"'\n\n # create the zip archive by using the python library ZipFile\n data_zip = ZipFile(response, 'w')\n\n file_format = file_format.lower() # it may be helpful\n\n \"\"\" ONLY the data that refers to the projects of which the AUTHENTICATED USER is MEMBER will be exported\"\"\"\n user = request.user\n # models queryset to be used to generate to export the database\n projects_queryset = user.projets.all() # only projects that the user has access to\n projects_members_queryset = User.objects.filter(\n projets__in=projects_queryset).distinct() # infos about project members\n tasks_queryset = Task.objects.filter(projet__in=projects_queryset) # all the tasks in these projects\n journals_queryset = Journal.objects.filter(task__in=tasks_queryset) # all the journals in these tasks\n status_queryset = Status.objects.all()\n\n def dump_to_file_format(queryset, file_format, data_zip):\n \"\"\" Subfunction used not to repeat the same code for the export process\n\n :param queryset: a generic queryset of a model\n :param file_format:\n :param data_zip: a zip archive\n\n \"\"\"\n # create a temporary output stream (temp file)\n if file_format == 'xls':\n # it seems that an excel file needs to be written on a BytesIo even if on the xlwt they write exactly\n # the opposite (I was about to become fool)\n output = io.BytesIO()\n else:\n output = io.StringIO()\n\n # get queryset model\n model = queryset.model\n # the export code depends on the file format\n if file_format == 'csv':\n # create an instance of csv writer that writes on the stream 'output' opened above\n csv_writer = csv.writer(output, dialect='excel', delimiter=';')\n\n # there are some things that may be different from a model to another\n\n # for example, I also want to write in the project csv the username of the members\n if model == Projet:\n csv_writer.writerow(['ID', 'NAME', 'MEMBERS'])\n for project in queryset:\n # build a comma separated list with all the users and the tasks that are in the project\n members = ', '.join([member.username for member in project.members.all()])\n csv_writer.writerow([project.id, project.name, members])\n # if the model is User, only export non confidential fields\n if model == User:\n csv_writer.writerow(['USERNAME', 'NAME', 'SURNAME', 'E-MAIL'])\n for user in queryset:\n csv_writer.writerow([user.username, user.first_name, user.last_name, user.email])\n # for the other models that's what is going to happen\n else:\n # get all the field names and write them as headers\n field_names = [field.name for field in model._meta.fields]\n csv_writer.writerow(field.upper() for field in field_names)\n # for each instance in the queryset\n for obj in queryset:\n # \"\"\"general backup code\"\"\"\n # csv_writer.writerow([getattr(obj, field) for field in field_names])\n\n row = [] # create an empty row list\n # for each field of the model\n for field in field_names:\n # get the field value\n field_value = getattr(obj, field)\n # this is to control the format of the date that will be written in the csv\n if isinstance(field_value, datetime.datetime):\n field_value = field_value.strftime(\"%m/%d/%Y, %H:%M:%S\")\n row.append(field_value) # append the field value to the end of the row list\n\n csv_writer.writerow(row)\n\n # the .json and .xml formats are generated with the django serializers utilities\n elif file_format == 'json' or file_format == 'xml':\n # if the model is User, only export non confidential fields\n if model == User:\n json_xml = serializers.serialize(file_format, queryset, use_natural_foreign_keys=True,\n fields=('username', 'first_name', 'last_name', 'email'))\n else:\n # use_natural_foreign_keys=True means that the foreign keys won't be written as just numbers\n json_xml = serializers.serialize(file_format, queryset, use_natural_foreign_keys=True)\n\n output.write(json_xml)\n\n elif file_format == 'xls':\n wb = xlwt.Workbook(encoding='utf-8') # create excel workbook\n ws = wb.add_sheet(model._meta.model.__name__) # create sheet\n\n # Sheet header, first row\n row_num = 0\n font_style = xlwt.XFStyle()\n font_style.font.bold = True\n\n '''This code is pretty similar to the code to export in .csv, but in excel each cell (row and column) \n must written separately'''\n # get all the field names and write them as headers\n # if User only confidential data\n if model == User:\n field_names = ['username', 'first_name', 'last_name', 'email']\n else:\n field_names = [field.name for field in model._meta.fields]\n for col_num in range(len(field_names)):\n ws.write(row_num, col_num, field_names[col_num].upper(), font_style)\n\n # add a column for the members of the project\n # (otherwise it won't be done automatically because it's ManytoMany)\n if model == Projet:\n ws.write(row_num, col_num + 1, 'MEMBERS', font_style)\n\n # Sheet body, remaining rows\n font_style = xlwt.XFStyle()\n\n # for each instance in the queryset\n for obj in queryset:\n row_num += 1\n # for each field of the model\n for col_num in range(len(field_names)):\n # get the field value\n field_value = getattr(obj, field_names[col_num])\n # this is to control the format of the date that will be written in the csv\n if isinstance(field_value, datetime.datetime):\n field_value = field_value.strftime(\"%m/%d/%Y, %H:%M:%S\")\n ws.write(row_num, col_num, field_value.__str__(), font_style)\n\n # add the column with the members of the project\n if model == Projet:\n members = ', '.join([member.username for member in obj.members.all()])\n ws.write(row_num, col_num + 1, members, font_style)\n\n # save the excel file on the output stream\n wb.save(output)\n\n # generates the name of the output file depending on the model and the file format\n file_name = model._meta.model.__name__.lower() + '_data.' + file_format\n # add the file to the zip archive and close the output stream\n data_zip.writestr(file_name, output.getvalue())\n output.close()\n\n '''\n uses the function defined above the export the data\n '''\n if exp_p:\n dump_to_file_format(projects_queryset, file_format, data_zip)\n if exp_m:\n dump_to_file_format(projects_members_queryset, file_format, data_zip)\n if exp_t:\n dump_to_file_format(tasks_queryset, file_format, data_zip)\n if exp_j:\n dump_to_file_format(journals_queryset, file_format, data_zip)\n if exp_s:\n dump_to_file_format(status_queryset, file_format, data_zip)\n\n # it is also possible to pass whatever list of querysets to this function\n if not querysets is None:\n for queryset in querysets:\n dump_to_file_format(queryset, file_format, data_zip)\n\n # closes the zip file\n data_zip.close()\n\n # finally send the zip file as a the HTTP response\n return response", "def export_as_csv(modeladmin, request, queryset):\n if not request.user.is_staff:\n raise PermissionDenied\n opts = modeladmin.model._meta\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s.csv' % unicode(opts).replace('.', '_')\n writer = csv.writer(response)\n field_names = [field.name for field in opts.fields]\n # Write a first row with header information\n writer.writerow(field_names)\n # Write data rows\n for obj in queryset:\n writer.writerow([getattr(obj, field) for field in field_names])\n return response", "def export(request):\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n filename = 'export-inscrits%s.csv' % date.today().strftime(\"%y-%m-%d\")\n\n person_list = Person.objects.all()\n\n table = ExportPersonTable(person_list)\n table.order_by = request.GET.get(\"sort\",'last_name')\n\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n writer = csv.writer(response)\n # Write headers to CSV file\n headers = []\n for column in table.columns:\n headers.append(column.header.encode('utf8'))\n writer.writerow(headers)\n\n # Write data to CSV file\n for obj in table.rows:\n row = []\n for value in obj:\n row.append(value.encode('utf8'))\n writer.writerow(row)\n\n # Return CSV file to browser as download\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_tenure_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\r\n self.prices[\"returns\"] = self.returns\r\n self.prices.columns = ['prices', 'returns']\r\n self.prices = self.prices.dropna()\r\n \r\n name = QFileDialog.getSaveFileName(None, 'Save File', filter='*.xlsx')\r\n if(name[0] == ''):\r\n # if name empty\r\n pass\r\n else:\r\n self.prices.to_excel(name[0])", "def export_as_csv(modeladmin, request, queryset):\n opts = modeladmin.model._meta\n field_names = set([field.name for field in opts.fields])\n if fields:\n fieldset = set(fields)\n field_names = field_names & fieldset\n elif exclude:\n excludeset = set(exclude)\n field_names = field_names - excludeset\n\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s.csv' % str(opts).replace('.', '_')\n\n writer = csv.DictWriter(response, fields)\n writer.writeheader()\n\n for obj in queryset:\n writer.writerow(dict(zip(fields, [getattr(obj, field) for field in fields])))\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_indicator_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_268_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n rpt_date = datetime.now()\n filename = 'bushfire_regionbytenure_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def on_Output_CWA_excel_Now_button_clicked(self):\n # self.lineEdit.setText(result['Cname'])\n # self.lineEdit_2.setText(result['Sclass'])\n # self.lineEdit_3.setText(result['ClassTime'])\n # self.lineEdit_4.setText(result['Tno'])\n # self.lineEdit_6.setText(result['Date'])\n Result = CWA_Message_Query(self.lineEdit_2.text(),self.lineEdit_3.text(), self.lineEdit_6.text(), self.lineEdit.text())\n Create_Cwa_excel_table(self.lineEdit_2.text(),self.lineEdit.text(),Result)", "def queryset_to_csv(self):\n csv_data = []\n custom_fields = []\n\n # Start with the column headers\n headers = self.queryset.model.csv_headers.copy()\n\n # Add custom field headers, if any\n if hasattr(self.queryset.model, 'get_custom_fields'):\n for custom_field in self.queryset.model().get_custom_fields():\n headers.append(custom_field.name)\n custom_fields.append(custom_field.name)\n\n csv_data.append(','.join(headers))\n\n # Iterate through the queryset appending each object\n for obj in self.queryset:\n data = obj.to_csv()\n\n for custom_field in custom_fields:\n data += (obj.cf.get(custom_field, ''),)\n\n csv_data.append(csv_format(data))\n\n return '\\n'.join(csv_data)", "def ortra_export(request):\n export_fields = OrderedDict(ORTRA_EXPORT_FIELDS)\n export = OpenXMLExport('Exportation')\n export.write_line(export_fields.keys(), bold=True) # Headers\n # Data\n query_keys = [f for f in export_fields.values() if f is not None]\n query = Student.objects.filter(Q(klass__name__contains='ASAFE') |\n Q(klass__name__contains='ASEFE') |\n Q(klass__name__contains='ASSCFE'),\n archived=False).order_by('klass__name',\n 'last_name',\n 'first_name')\n\n for line in query.values(*query_keys):\n values = []\n for field in query_keys:\n if field == 'gender':\n values.append(('Madame', 'Monsieur')[line[field] == 'M'])\n else:\n values.append(line[field])\n export.write_line(values)\n\n return export.get_http_response('ortra_export')", "def test_export_with_str_datetime(self):\n self._test_export_stream(Users)", "def download_bank_details(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"bank_details.csv\"'\n\n writer = csv.writer(response)\n \n writer.writerow([\n 's/n',\n 'account_number',\n 'account_name',\n 'recipient_code',\n 'bank_name',\n 'student_name',\n 'date_added'\n ])\n \n count = 0\n for bank in StudentBankDetail.objects.filter(month=batch_date):\n count +=1\n writer.writerow([\n count,\n str(bank.account_number),\n str(bank.account_name),\n str(bank.recipient_code),\n str(bank.bank.bank_name),\n str(bank.student.name),\n datetime.strftime(bank.date_added, '%d-%m-%Y')\n ])\n \n\n\n return response", "def export_table (self,_w):\n try:\n _data = \"\"\n maxRow = _w.rowCount()\n maxColumn = _w.columnCount()\n for hc in range(0,maxColumn):\n try: _hci = str(_w.horizontalHeaderItem(hc).text())\n except:_hci=\"None\";pass\n if hc == (maxColumn-1) :_data += _hci\n elif hc < maxColumn:_data += \"%s,\" % _hci\n _data += \"\\n\"\n for r in range(0, maxRow):\n for c in range(0, maxColumn):\n _d = str(_w.item(r, c).text())\n if c == (maxColumn-1):_data += _d\n elif c < maxColumn:_data += \"%s,\" % _d\n _data += \"\\n\"\n options = QFileDialog.Options()\n saved_file, _ = QFileDialog.getSaveFileName(self, \"Save Table to file \", \"data\", \"Plain Text (*.txt);;CSV (*.csv);;All Files (*)\", options=options)\n _file = open(saved_file, 'w')\n _file.write(_data)\n _file.close()\n except FileNotFoundError:pass", "def save_entries(self):\n with open(self.file_name, \"w\") as file:\n file.write('date,name,minutes,note\\n')\n for entry in self.entries:\n writer = csv.writer(file)\n writer.writerow([entry.date, entry.name, entry.minutes, entry.note])", "def get_serialized_data(self):\n results = []\n fields = self.get_model_obj()._meta.fields\n\n user_dict = {}\n object_dict = {}\n related_dict = {}\n\n object_list = self.get_queryset().distinct().values()\n\n href = u'<a href=\"{}\">{}</a>'\n type_choices = (('+', u'Created'), ('~', u'Changed'), ('-', u'Deleted'))\n for index, item in enumerate(object_list):\n data = {\"DT_RowId\": item.get('history_id'), 0: '',\n 1: '', 2: '', 3: '-', 4: '-', 5: '-', 6: '-'}\n\n # 0 - Date\n # 1 - Who\n # 2 - Object (if not specific)\n # 3 - Action\n # 4 - Field\n # 5 - Previous\n # 6 - Current\n data[0] = item.get('history_date').strftime(\"%m/%d/%y %H:%M\")\n\n try:\n data[1] = user_dict[item.get('history_user_id')]\n except KeyError:\n link = u''\n try:\n user = User.objects.get(id=item.get('history_user_id'))\n link = href.format(user.profile.get_absolute_url(),user.get_full_name())\n except ObjectDoesNotExist:\n link = u\"Administrator*\"\n user_dict[item.get('history_user_id')] = link\n data[1] = user_dict[item.get('history_user_id')]\n\n try:\n data[2] = object_dict[item.get('id')]\n except KeyError:\n link, data_obj = u'', u''\n try:\n data_obj = self.get_object(id=item.get('id'))\n name = data_obj.__unicode__()\n name = name if len(name) < 32 else name[0:32] + u\" ...\"\n link = href.format(data_obj.get_absolute_url(), name)\n except ObjectDoesNotExist:\n link = \"Deleted\"\n except AttributeError:\n link = data_obj.__unicode__()\n object_dict[item.get('id')] = link\n data[2] = object_dict[item.get('id')]\n\n data[3] = next((y for x, y in type_choices if x == item.get('history_type')), u\"-\")\n\n\n try:\n previous_item = object_list[index - 1]\n except AssertionError:\n previous_item = {}\n changed_fields, prev_values, cur_values = [], [], []\n for field in fields:\n if field.name == \"modified_date\":\n continue\n prev_value = previous_item.get(field.name, u\"-\")\n prev_value = prev_value if prev_value else u\"-\"\n curr_value = item.get(field.name, u\"-\")\n curr_value = curr_value if curr_value else u\"-\"\n\n # Handle nice choices keys\n if hasattr(field, '_choices') and len(field._choices) and curr_value != u'-':\n curr_value = next((x[1] for x in field._choices if str(x[0]) == str(curr_value)))\n # Handle foreign keys.\n elif hasattr(field, 'related') and curr_value != u'-':\n try:\n curr_value = related_dict[(field.name, curr_value)]\n # log.debug(\"Related (current) Dict - Query Saved {} = {}\".format(field.name, curr_value))\n except KeyError:\n _v = u'-'\n try:\n _v = field.related.parent_model.objects.get(id=curr_value).__unicode__()\n except ObjectDoesNotExist:\n _v = u'Deleted'\n # log.debug(\"Setting C Related ({}, {}) = {}\".format(field.name, curr_value,_v))\n related_dict[(field.name, curr_value)] = _v\n curr_value = related_dict[(field.name, curr_value)]\n\n if hasattr(field, '_choices') and len(field._choices) and prev_value != u'-':\n prev_value = next((x[1] for x in field._choices if str(x[0]) == str(prev_value)))\n\n elif hasattr(field, 'related') and prev_value != u'-':\n try:\n prev_value = related_dict[(field.name, prev_value)]\n # log.debug(\"Related (prev) Dict - Query Saved {} = {}\".format(field.name, prev_value))\n except KeyError:\n _v = u'-'\n try:\n _v = field.related.parent_model.objects.get(id=prev_value).__unicode__()\n except ObjectDoesNotExist:\n _v = u'Deleted'\n # log.debug(\"Setting P Related ({}, {}) = {}\".format(field.name, prev_value,_v))\n related_dict[(field.name, prev_value)] = _v\n prev_value = related_dict[(field.name, prev_value)]\n\n if prev_value != curr_value:\n changed_fields.append(field.name)\n prev_values.append(prev_value)\n cur_values.append(curr_value)\n if len(changed_fields):\n data[4] = u\"<br />\".join([unicode(x) for x in changed_fields])\n data[5] = u\"<br />\".join([unicode(x) for x in prev_values])\n data[6] = u\"<br />\".join([unicode(x) for x in cur_values])\n results.append(data)\n\n results.reverse()\n # log.debug(pformat(results))\n return results", "def export_file_dto(self, active_model, objs=[], type=''):\n dto_parser = DtoParser()\n objs2 = []\n for obj in objs:\n objs2 += dto_parser.parseJointPromotion(obj)\n\n doc_type_obj = self.env[\"edi.doc.type\"]\n doc_obj = self.env[\"edi.doc\"]\n doc_type = doc_type_obj.search([(\"code\", '=', \"dto\")])[0]\n last_dto_file = doc_obj.search([(\"doc_type\", '=', doc_type.id)],\n order=\"date desc\", limit=1)\n if last_dto_file:\n count = last_dto_file.count + 1\n else:\n count = 1\n\n tmp_name = \"export_dto.txt\"\n file_len = len(objs2)\n filename = \"%sDTO%s.%s\" % (self.env.user.company_id.frigo_code,\n str(file_len).zfill(4),\n str(count).zfill(4))\n templates_path = self.addons_path('frigo_edi') + os.sep + 'wizard' + \\\n os.sep + 'templates' + os.sep\n mylookup = TemplateLookup(input_encoding='utf-8',\n output_encoding='utf-8',\n encoding_errors='replace')\n tmp = Template(filename=templates_path + tmp_name,\n lookup=mylookup, default_filters=['decode.utf8'])\n\n doc = tmp.render_unicode(o=objs2, type_=type, datetime=datetime,\n user=self.env.user).encode('utf-8', 'replace')\n file_name = self[0].service_id.output_path + os.sep + filename\n f = file(file_name, 'w')\n f.write(doc)\n f.close()\n file_obj = self.create_doc(filename, file_name, doc_type)\n file_obj.count = count", "def saved_results(request):\n timestamps = []\n for i in Source.objects.filter(user=request.user):\n timestamps.append({'id':i.source_id, 'val':i.datetime_extracted.strftime('%d/%m/%Y %H:%M') + \" \" + i.source})\n form = DeleteRelsCSVForm()\n return render(request, 'saved_results.html', {'timestamps':timestamps, 'form':form})", "def write_date(self, daty):\r\n for elem in range(len(self.output_zakladki)):\r\n first_row, first_col, no_date, cegla_no = 2, 1, 0, 0\r\n\r\n sheet = self.output_zakladki[elem]\r\n sh = self.output_file.get_sheet_by_name(sheet)\r\n\r\n for no in range(self.liczba_pelnych_linii): # writing data in complete row, containing 5 cegla\r\n for i in range(15):\r\n sh[self.alfabet[first_col] + str(first_row)] = daty[no_date]\r\n first_col += 1\r\n no_date += 1\r\n if no_date == 3:\r\n no_date = 0\r\n cegla_no += 5\r\n first_col = 1\r\n first_row += len(self.output_leki[elem]) + 2\r\n\r\n # writing data in incomplete row, containing < 5 cegla\r\n mising_date = (len(self.output_lista_cegiel) - cegla_no) * 3\r\n for i in range(mising_date):\r\n sh[self.alfabet[first_col] + str(first_row)] = daty[no_date]\r\n first_col += 1\r\n no_date += 1\r\n if no_date == 3:\r\n no_date = 0", "def export_as_csv(modeladmin, request, queryset):\n opts = modeladmin.model._meta\n field_names = [field.name for field in opts.fields]\n labels = []\n\n if exclude:\n field_names = [f for f in field_names if f not in exclude]\n\n elif fields:\n field_names = [field for field, _ in fields]\n labels = [label for _, label in fields]\n\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s.csv' % (\n str(opts).replace('.', '_')\n )\n\n writer = csv.writer(response)\n\n if header:\n writer.writerow(labels if labels else field_names)\n\n for obj in queryset:\n writer.writerow([prep_field(request, obj, field, manyToManySep) for field in field_names])\n return response", "def export_query():\n\tdata = frappe._dict(frappe.local.form_dict)\n\n\tdel data[\"cmd\"]\n\tif \"csrf_token\" in data:\n\t\tdel data[\"csrf_token\"]\n\n\tif isinstance(data.get(\"filters\"), string_types):\n\t\tfilters = json.loads(data[\"filters\"])\n\tif isinstance(data.get(\"report_name\"), string_types):\n\t\treport_name = data[\"report_name\"]\n\t\tfrappe.permissions.can_export(\n\t\t\tfrappe.get_cached_value('Report', report_name, 'ref_doctype'),\n\t\t\traise_exception=True\n\t\t)\n\tif isinstance(data.get(\"file_format_type\"), string_types):\n\t\tfile_format_type = data[\"file_format_type\"]\n\t\n\tif isinstance(data.get(\"visible_idx\"), string_types):\n\t\tvisible_idx = json.loads(data.get(\"visible_idx\"))\n\telse:\n\t\tvisible_idx = None\n\t\n\t# add filter this customer\n\tparty = get_party()\n\tfilters[\"customer\"] = party.name or \"\"\n\n\tif file_format_type == \"Excel\":\n\t\tdata = run(report_name, filters)\n\t\tdata = frappe._dict(data)\n\t\tcolumns = get_columns_dict(data.columns)\n\n\t\tfrom frappe.utils.xlsxutils import make_xlsx\n\t\txlsx_data = build_xlsx_data(columns, data)\n\t\t\n\t\txlsx_file = make_xlsx(xlsx_data, \"Query Report\")\n\n\t\tfrappe.response['filename'] = report_name + '.xlsx'\n\t\tfrappe.response['filecontent'] = xlsx_file.getvalue()\n\t\tfrappe.response['type'] = 'binary'", "def test_fetchall(self):\n result = export.processExport(houseId=1)\n #We should have 2 locations * 1 sensor * 10 days of data here\n # 2 * 1 * (288 * 10) == 5670\n #print result.shape\n\n #result.to_csv(\"temp.csv\")\n #Do we get the right object\n self.assertEqual(type(result), pandas.DataFrame)\n #And is it the right size\n self.assertEqual(result.shape, (2880, 2)) #So 2880 samples from two sensors\n #And the right range of data\n self.assertEqual(result.index[0], datetime.datetime(2013, 01, 01))\n self.assertEqual(result.index[-1], datetime.datetime(2013, 01, 10, 23, 55))", "def create_csv(self, type): \n if os.path.isfile(_path_finder('keydata','{0}_{1}.db'.format(\n self.keyword,type))):\n self.__db_init('{0}'.format(type))\n self.c.execute(\"SELECT MIN(date) FROM tweets\")\n mindate = self.c.fetchone()[0][0:10]\n self.c.execute(\"SELECT MAX(date) FROM tweets\")\n maxdate = self.c.fetchone()[0][0:10]\n start_date = datetime.datetime.strptime(mindate, '%Y-%m-%d')\n end_date = (datetime.datetime.strptime(maxdate, '%Y-%m-%d') + \n datetime.timedelta(days=1))\n \n def __date_range(start, end):\n for n in range((end - start).days):\n yield start + datetime.timedelta(days=n)\n \n def __db_to_list():\n for single_date in __date_range(start_date, end_date):\n d = \"\".join(['%',single_date.strftime(\"%Y-%m-%d\"),'%'])\n self.c.execute('''SELECT count(*) FROM tweets where \n date like('{0}')'''.format(d))\n yield [d[1:11], self.c.fetchone()[0]]\n \n path = _path_finder('keydata','{0}_{1}.csv'.format(\n self.keyword,type))\n if sys.version_info[0] < 3: #Python3 compatibility check\n infile = open(path, 'wb')\n else:\n infile = open(path, 'w', newline='', encoding='utf8')\n with infile as f:\n writer = csv.writer(f)\n writer.writerows(__db_to_list())\n self.conn.commit()\n self.conn.close()\n print('\\nReport has been created:')\n print(os.path.abspath(path))", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_auth_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export_repayment_xlsx(request):\n import openpyxl\n try:\n from openpyxl.cell import get_column_letter\n except ImportError:\n from openpyxl.utils import get_column_letter\n\n from_date = request.GET.get('from_date')\n to_date = request.GET.get('to_date')\n search = request.GET.get('search_value') or ''\n search_query = Q()\n if search:\n search_query = Q(user__user__username__icontains=search) | \\\n Q(user__user__first_name__icontains=search) | \\\n Q(project__title__icontains=search) | \\\n Q(amount__icontains=search) | \\\n Q(user__user__last_name__icontains=search) | \\\n Q(user__user__email__icontains=search)\n if from_date and to_date:\n import datetime\n import pytz\n date1 = datetime.datetime.strptime(from_date, '%Y-%m-%d').date()\n date2 = datetime.datetime.strptime(to_date, '%Y-%m-%d').date()\n repayments = RepaymentFragment.objects.filter(amount__gt=0.00,\n created_at__range=[\n datetime.datetime(date1.year, date1.month, date1.day, 8, 15,\n 12, 0, pytz.UTC),\n datetime.datetime(date2.year, date2.month, date2.day, 8, 15,\n 12, 0, pytz.UTC)]).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"user__user\").filter(search_query).iterator()\n else:\n repayments = RepaymentFragment.objects.filter(amount__gt=0.00).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"user__user\").filter(search_query).iterator()\n response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n response['Content-Disposition'] = 'attachment; filename=Repayment_report.xlsx'\n wb = openpyxl.Workbook()\n ws = wb.get_active_sheet()\n ws.title = \"RE-volv\"\n\n row_num = 0\n\n columns = [\n (u\"FIRST NAME\", 30),\n (u\"LAST NAME\", 30),\n (u\"USERNAME\", 30),\n (u\"EMAIL\", 30),\n (u\"DATE\", 30),\n (u\"NAME OF PROJECT\", 30),\n (u\"DONATION AMOUNT\", 30),\n (u\"REPAYMENT AMOUNT\", 30),\n ]\n\n for col_num in xrange(len(columns)):\n c = ws.cell(row=row_num + 1, column=col_num + 1)\n c.value = columns[col_num][0]\n ws.column_dimensions[get_column_letter(col_num + 1)].width = columns[col_num][1]\n\n for payment in repayments:\n row_num += 1\n row = [\n payment.user.user.first_name,\n payment.user.user.last_name,\n payment.user.user.username,\n payment.user.user.email,\n payment.created_at,\n payment.project.title,\n round(Payment.objects.filter(user=payment.user).filter(project=payment.project).aggregate(Sum('amount'))[\n 'amount__sum'] or 0, 2),\n round(payment.amount, 2)\n\n ]\n for col_num in xrange(len(row)):\n c = ws.cell(row=row_num + 1, column=col_num + 1)\n c.value = row[col_num]\n wb.save(response)\n return response", "def get_export_queryset(self, request, context):\n # scope = self.request.POST.get('_select_across', False) == '1'\n scope = request.GET.get('scope')\n select_across = request.GET.get('_select_across', False) == '1'\n selected = request.GET.get('_selected_actions', '')\n if scope == 'all':\n queryset = self.admin_view.queryset()\n elif scope == 'header_only':\n queryset = []\n elif scope == 'selected':\n if not select_across:\n selected_pk = selected.split(',')\n queryset = self.admin_view.queryset().filter(pk__in=selected_pk)\n else:\n queryset = self.admin_view.queryset()\n else:\n queryset = [r['object'] for r in context['results']]\n return queryset", "def get_obj_df():\n obj_df = pd.read_csv('data/object.csv')\n obj_df = obj_df.drop_duplicates()[['course_id', 'module_id', 'category', 'start']] \n obj_df['start'] = pd.to_datetime(obj_df[obj_df['start'] != 'null']['start'])\n return obj_df", "def print_stock_rotation_report(self):\n warehouses = False\n locations = False\n from_date = False\n to_date = False\n active_id = self.ids[0]\n today=datetime.now().strftime(\"%Y-%m-%d\")\n f_name = 'Stock Rotation Report' + ' ' + today\n stock_warehouse_obj = self.env['stock.warehouse']\n stock_locations_obj = self.env['stock.location']\n product_obj = self.env['product.product']\n \n if self.filtaration == 'warehouse':\n if not self.include_all_warehouse:\n if not self.warehouse_ids:\n raise ValidationError(\"please select the Warehouse.\")\n warehouses = self.warehouse_ids\n else:\n warehouses = stock_warehouse_obj.search([])\n else:\n if not self.include_all_location:\n if not self.location_ids:\n raise ValidationError(\"please select the Locations.\")\n locations = self.location_ids\n else:\n locations = stock_locations_obj.search([('usage','=','internal')])\n\n\n if not self.from_date:\n raise ValidationError(\"please select the From Date.\")\n \n if not self.to_date:\n raise ValidationError(\"please select the To Date.\")\n\n all_products = product_obj.with_context(active_test=True).search([('type','=','product')])\n from_date = self.from_date\n to_date = self.to_date\n \n date_1 = time.strptime(from_date, \"%Y-%m-%d\")\n date_2 = time.strptime(to_date, \"%Y-%m-%d\")\n if not (date_1 <= date_2):\n raise ValidationError(\"Fromdate is not previous then Todate\")\n self.get_stock_rotation_report(from_date,to_date,warehouses,locations,all_products)\n if self.datas:\n return {\n 'type' : 'ir.actions.act_url',\n 'url':'web/content/?model=stock.rotation.report&download=true&field=datas&id=%s&filename=%s.xls'%(active_id,f_name),\n 'target': 'new',\n }", "def filter_meteo_data(self, startdate, enddate):\n self.all_meteo_data.columns.values[0]='Datum-tijd'\n self.all_meteo_data['datetime']=pd.to_datetime(self.all_meteo_data['Datum-tijd'], format='%Y-%m-%dT%H:%M:%SZ')\n self.all_meteo_data.drop(['Datum-tijd'],axis=1, inplace=True)\n mask = (self.all_meteo_data['datetime'] > startdate) & (self.all_meteo_data['datetime'] <= enddate)\n meteodata = self.all_meteo_data.loc[mask].copy()\n meteodata.set_index('datetime',inplace=True)\n return meteodata", "def _get_report_data(self, request, queryset):\n first_item = queryset[0]\n data = {\n 'id': str(slugify(first_item.invoice_no)),\n 'property_of_id': (\n first_item.property_of.id\n if first_item.property_of else None\n ),\n 'model': queryset.model._meta.model_name,\n 'base_info': {\n 'invoice_no': first_item.invoice_no,\n 'invoice_date': first_item.invoice_date,\n 'provider': first_item.provider,\n 'datetime': datetime.datetime.now().strftime(\n self._invoice_report_datetime_format\n ),\n },\n 'items': list(map(self._parse_item, queryset)),\n 'sum_price': str(\n queryset.aggregate(\n Sum(self._price_field)\n ).get('{}__sum'.format(self._price_field))\n )\n }\n logger.info('Invoice report data: {}'.format(data))\n return data", "def raw_csv_sys_2w(request):\n two_weeks = datetime.date.today() - datetime.timedelta(days=14)\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'atachment; filename = \"raw-powerbi-sys-2w.csv\"'\n sys_er = System_error.objects.filter(event_date__gt=two_weeks)\n sys_w = System_warning.objects.filter(event_date__gt=two_weeks)\n sys_crit = System_critical.objects.filter(event_date__gt=two_weeks)\n writer = csv.writer(response)\n for line in sys_er:\n writer.writerow([line.event_id, line.event_source, line.event_description,\n line.machine_name, line.events_count, line.event_user, line.event_date, 'system error'])\n for line in sys_w:\n writer.writerow([line.event_id, line.event_source, line.event_description,\n line.machine_name, line.events_count, line.event_user, line.event_date, 'system warning'])\n for line in sys_crit:\n writer.writerow([line.event_id, line.event_source, line.event_description,\n line.machine_name, line.events_count, line.event_user, line.event_date, 'system critical'])\n \n return response", "def download_queryset(self, queryset, export_format):\n\n dataset = LocationResource().export(queryset=queryset)\n filedata = dataset.export(export_format)\n filename = f\"InvenTree_Locations.{export_format}\"\n\n return DownloadFile(filedata, filename)", "def export_gegevens(te_doorzoeken_kolom, is_gelijk_aan, exporteer_gegevens, bestandsnaam):\n empt_lists = []\n # array met lege lijsten maken\n for n in range(len(exporteer_gegevens)):\n empt_lists.append([])\n\n # gegevens aan de gemaakte array toevoegen in de betreffende lijsten\n for i in range(len(df)):\n if df[te_doorzoeken_kolom].iloc[i] == is_gelijk_aan:\n for j in range(len(exporteer_gegevens)):\n empt_lists[j].append(df[exporteer_gegevens[j]].iloc[i])\n\n df_angfo = pd.DataFrame(empt_lists[0], columns=[exporteer_gegevens[0]])\n\n # geheel omzetten naar een pandas dataframe\n for k in range(len(exporteer_gegevens)):\n df_angfo[exporteer_gegevens[k]] = pd.Series(empt_lists[k], index=df_angfo.index)\n\n df_angfo.to_excel(bestandsnaam, 'Sheet1')\n print('Het bestand is opgeslagen als: %s' % bestandsnaam)", "def _get_date(self):\n for fax_out in self:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n date_planned = False\n if fax_out.date:\n from_dt = datetime.datetime.strptime(str(fax_out.date[:19]), DATETIME_FORMAT)\n # from_dt = from_dt + datetime.timedelta(hours=5 , minutes=30)\n date_planned = from_dt.strftime('%Y-%m-%d')\n fax_out.fax_date = date_planned", "def _get_data_from_view(self):\n self.log.info(\"Getting data from view: vw_AllSurveyData \")\n view_data = self.db.execute_pandas_query(self._get_query('vw_survey_data'))\n self._export_data_to_csv(view_data, 'fresh_survey_data.csv')", "def _setData(self):\n data_list = []\n results = self.query.all()\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n time_formatter = date.getLocaleFormatter(self.request, \"time\", \"short\")\n for result in results:\n data = {}\n data[\"subject\"] = result.short_name\n # this tab appears in the workspace pi/ view...\n data[\"url\"] = url.set_url_context(\"../calendar/sittings/obj-%i/schedule\" %\n result.sitting_id)\n # Note: same UI is also displayed at: \n # /business/sittings/obj-%i/schedule % result.sitting_id\n data[\"items\"] = \"\"\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"\"\n data[\"type\"] = result.group.type\n data[\"group\"] = u\"%s %s\" % (\n result.group.type.capitalize(), result.group.short_name)\n data[\"time_from_to\"] = (\n time_formatter.format(result.start_date),\n time_formatter.format(result.end_date))\n data[\"date\"] = formatter.format(result.start_date) \n if result.venue:\n data[\"venue\"] = _(result.venue.short_name)\n else:\n date[\"venue\"] = \"\"\n if type(result)==domain.Question:\n data[\"to\"] = result.ministry.short_name\n else:\n data[\"to\"]= \"\"\n # past, present, future\n today = datetime.datetime.today().date()\n startday = result.start_date.date()\n if today==startday:\n data[\"css_class\"] = \"present\"\n elif today>startday:\n data[\"css_class\"] = \"past\"\n else:\n data[\"css_class\"] = \"future\"\n data_list.append(data)\n self._data = data_list", "def get_queryset(self):\n queryset = models.Drukteindex.objects.all().order_by(\"index\")\n\n vollcode = self.request.query_params.get('vollcode', None)\n if vollcode is not None:\n queryset = queryset.filter(vollcode=vollcode)\n\n timestamp_str = self.request.query_params.get('timestamp', None)\n\n if timestamp_str is None:\n timestamp_dt = datetime.now()\n\n if timestamp_str is not None:\n timestamp_dt = convert_to_date(timestamp_str)\n\n if timestamp_dt > convert_to_date('07-12-2017-00-00-00'):\n current_day = timestamp_dt.strftime(\"%A\")\n if current_day == 'Friday':\n timestamp_dt = '02-12-2017-23-00-00'\n elif current_day == 'Saturday':\n timestamp_dt = '01-12-2017-23-00-00'\n elif current_day == 'Sunday':\n timestamp_dt = '03-12-2017-23-00-00'\n elif current_day == 'Monday':\n timestamp_dt = '04-12-2017-23-00-00'\n elif current_day == 'Tuesday':\n timestamp_dt = '05-12-2017-23-00-00'\n elif current_day == 'Wednesday':\n timestamp_dt = '06-12-2017-23-00-00'\n elif current_day == 'Thursday':\n timestamp_dt = '07-12-2017-23-00-00'\n timestamp_dt = convert_to_date(timestamp_dt)\n\n today = timestamp_dt.date()\n\n level = self.request.query_params.get('level', None)\n if level == 'day':\n queryset = queryset.filter(timestamp__date=today)\n exclude = ('weekday',)\n\n if level == 'week':\n yesterday = today - timedelta(days=1)\n previous_week = yesterday - timedelta(days=7)\n queryset = queryset.filter(\n timestamp__gte=previous_week, timestamp__lt=yesterday)\n current_hour = timestamp_dt.hour\n queryset = queryset.filter(timestamp__hour=current_hour)\n\n return queryset", "def _setData(self):\n data_list = []\n results = self.query.all()\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n for result in results:\n data ={}\n data[\"qid\"]= (\"m_\" + str(result.motion_id))\n data[\"subject\"] = u\"M \" + str(result.motion_number) + u\" \" + result.short_name\n data[\"title\"] = result.short_name\n if result.approval_date:\n data[\"result_item_class\"] = (\"workflow-state-\" + \n result.status + \"sc-after-\" + \n datetime.date.strftime(result.approval_date, \"%Y-%m-%d\"))\n else:\n data[\"result_item_class\"] = \"workflow-state-\" + result.status\n data[\"url\"] = url.set_url_context(\"motions/obj-\" + str(result.motion_id))\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"%s %s\" %(result.owner.first_name, result.owner.last_name)\n data[\"type\"] = _(result.type)\n data[\"to\"] = \"\"\n data_list.append(data)\n self._data = data_list", "def print_date_in_sheet(self,data_dict,workbook,sheet_data,row_data,body_style,qty_cell_style,value_style,blank_cell_style,value_style2):\n product_data_dict = collections.OrderedDict()\n row=4\n column=0\n for warehouse_id,data_details in data_dict.iteritems():\n for product_data in data_details:\n row=row_data[sheet_data[warehouse_id]]\n sheet_data[warehouse_id].row(row).height = 350\n opening_stock = product_data.get('opening_qty') or 0\n qty_purchase = product_data.get('qty_purchase_in_duration') or 0\n qty_sale = product_data.get('qty_sales_in_duration') or 0\n scap_qty = product_data.get('scrap_location_qty') or 0\n adj_qty = product_data.get('adjusted_qty_in_duration') or 0\n last_sales = product_data.get('last_sales') or ''\n last_purchase_date = product_data.get('last_purchase_date') or ''\n warehouse_in_qty = product_data.get('warehouse_in_qty') or 0\n warehouse_out_qty = product_data.get('warehouse_out_qty') or 0\n closing_qty = (opening_stock+qty_purchase+warehouse_in_qty)-(qty_sale+scap_qty+warehouse_out_qty)\n sheet_data[warehouse_id].write(row,column,product_data.get('sku'),body_style)\n sheet_data[warehouse_id].write(row,column+1,product_data.get('name') or '-',body_style)\n sheet_data[warehouse_id].write(row,column+2,product_data.get('Cost') or 0,qty_cell_style)\n sheet_data[warehouse_id].write(row,column+3,product_data.get('sales_price') or 0,qty_cell_style)\n sheet_data[warehouse_id].write(row,column+4,None,blank_cell_style)\n sheet_data[warehouse_id].write(row,column+5,opening_stock,value_style2)\n sheet_data[warehouse_id].write(row,column+6,qty_purchase,value_style2)\n sheet_data[warehouse_id].write(row,column+7,qty_sale,value_style2)\n sheet_data[warehouse_id].write(row,column+8,scap_qty,value_style2)\n sheet_data[warehouse_id].write(row,column+9,adj_qty,value_style2)\n sheet_data[warehouse_id].write(row,column+10,closing_qty,value_style2)\n sheet_data[warehouse_id].write(row,column+11,None,blank_cell_style)\n sheet_data[warehouse_id].write(row,column+12,warehouse_in_qty,value_style)\n sheet_data[warehouse_id].write(row,column+13,warehouse_out_qty,value_style)\n sheet_data[warehouse_id].write(row,column+14,None,blank_cell_style)\n sheet_data[warehouse_id].write(row,column+15,last_purchase_date,body_style)\n sheet_data[warehouse_id].write(row,column+16,last_sales,body_style)\n row+=1\n row_data.update({sheet_data[warehouse_id]: row})\n product_data_dict = self.prepare_date_for_all_warehouses_sheets(product_data.get('product'),product_data_dict,opening_stock,last_sales,last_purchase_date,qty_purchase,qty_sale,scap_qty,adj_qty,warehouse_in_qty,warehouse_out_qty)\n return product_data_dict", "def species_points(request, format='csv'):\n \n \n species = request.GET.get('species')\n if species:\n records = ( SpeciesPoints.objects\n .filter(valid_species_name=species)\n .filter(lon__isnull=False)\n .filter(lat__isnull=False) )\n \n \n if request.GET.get('lon'):\n records = records.filter(lon=request.GET.get('lon'))\n \n if request.GET.get('lat'):\n records = records.filter(lat=request.GET.get('lat'))\n \n if request.GET.get('max_lat'):\n records = records.filter(lat__lte=request.GET.get('max_lat'))\n \n if request.GET.get('max_lon'):\n records = records.filter(lon__lte=request.GET.get('max_lon'))\n \n if request.GET.get('min_lat'):\n records = records.filter(lat__gte=request.GET.get('min_lat'))\n \n if request.GET.get('min_lon'):\n records = records.filter(lon__gte=request.GET.get('min_lon'))\n \n if request.GET.get('bentity_id'):\n records = records.filter(bentity_id=request.GET.get('bentity_id')) \n \n \n \n # fetch all the bentitites at once, so we don't have to hit the database once for each record\n records = records.prefetch_related('bentity') \n \n # serialize to JSON\n export_objects = [{\n 'gabi_acc_number': r.gabi_acc_number,\n 'species': species,\n 'lat': r.lat,\n 'lon': r.lon,\n 'status':r.status,\n 'bentity_id': r.bentity_id,\n 'bentity_name': r.bentity.bentity,\n 'num_records': r.num_records,\n 'literature_count': r.literature_count,\n 'museum_count': r.museum_count,\n 'database_count': r.database_count,\n } for r in records]\n \n \n if format == 'csv':\n return CSVResponse(\n export_objects,\n fields=('species', 'lat', 'lon', 'bentity_id', 'bentity_name', 'status', 'num_records', 'literature_count', 'museum_count', 'database_count') )\n \n else: \n return JSONResponse({'records': export_objects})\n \n else: # punt if the request doesn't have a species\n return errorResponse(\"Please supply a 'species' argument.\", format, {'records':[]})", "def export_scholars(modeladmin, request, queryset):\r\n response = HttpResponse('', content_type='text/csv; charset=utf-8')\r\n response['Content-Disposition'] = 'attachment; filename=cos.csv'\r\n writer = csv.writer(response)\r\n writer.writerow([\r\n 'Title',\r\n 'Reviewer',\r\n 'Leader',\r\n 'Leader Email',\r\n 'Sponsor',\r\n 'Other Sponsor',\r\n 'Presenters',\r\n 'Funding Source',\r\n 'Work Type',\r\n 'Permission to Reproduce',\r\n 'Faculty Sponsor Approval',\r\n 'Table',\r\n 'Electricity',\r\n 'Link',\r\n 'Poster',\r\n 'Date created',\r\n ])\r\n for presentation in queryset:\r\n link = 'http://{0}{1}'.format(\r\n settings.SERVER_URL,\r\n presentation.get_absolute_url(),\r\n )\r\n poster = 'http://{0}/assets/{1}'.format(\r\n settings.SERVER_URL, presentation.poster_file,\r\n )\r\n try:\r\n leader = '{0}, {1}'.format(\r\n presentation.leader.last_name,\r\n presentation.leader.first_name,\r\n )\r\n except Exception:\r\n leader = ''\r\n presenters = ''\r\n for presenter in presentation.presenters.all():\r\n if not presenter.leader:\r\n presenters += '{0}, {1}|'.format(\r\n presenter.last_name, presenter.first_name,\r\n )\r\n title = smart_str(\r\n presentation.title,\r\n encoding='utf-8',\r\n strings_only=False,\r\n errors='strict',\r\n )\r\n funding = smart_str(\r\n presentation.funding,\r\n encoding='utf-8',\r\n strings_only=False,\r\n errors='strict',\r\n )\r\n work_type = smart_str(\r\n presentation.work_type,\r\n encoding='utf-8',\r\n strings_only=False,\r\n errors='strict',\r\n )\r\n sponsor_email = ''\r\n if presentation.leader:\r\n sponsor_email = presentation.leader.sponsor_email\r\n sponsor_other = presentation.leader.sponsor_other\r\n writer.writerow([\r\n title,\r\n presentation.reviewer,\r\n leader,\r\n presentation.user.email,\r\n sponsor_email,\r\n sponsor_other,\r\n presenters[:-1],\r\n funding,\r\n work_type,\r\n presentation.permission,\r\n presentation.shared,\r\n presentation.need_table,\r\n presentation.need_electricity,\r\n link,poster,\r\n presentation.date_created,\r\n ])\r\n return response", "def export_vrouwen():\n voornaam, tussenvoegsel, achternaam, straat, huisnummer, postcode, woonplaats = [], [], [], [], [], [], []\n for i in range(len(df)):\n if df['Vrouw'].iloc[i]:\n voornaam.append(df['voornaam'].iloc[i])\n tussenvoegsel.append(df['tussenvoegsel'].iloc[i])\n achternaam.append(df['achternaam'].iloc[i])\n straat.append(df['straat'].iloc[i])\n huisnummer.append(df['huisnummer'].iloc[i])\n postcode.append(df['postcode'].iloc[i])\n woonplaats.append(df['woonplaats'].iloc[i])\n\n df_angfo = pd.DataFrame(voornaam, columns=['voornaam'])\n df_angfo['tussenvoegsel'] = pd.Series(tussenvoegsel, index=df_angfo.index)\n df_angfo['achternaam'] = pd.Series(achternaam, index=df_angfo.index)\n df_angfo['straat'] = pd.Series(straat, index=df_angfo.index)\n df_angfo['huisnummer'] = pd.Series(huisnummer, index=df_angfo.index)\n df_angfo['postcode'] = pd.Series(postcode, index=df_angfo.index)\n df_angfo['woonplaats'] = pd.Series(woonplaats, index=df_angfo.index)\n df_angfo.to_excel('output\\\\vrouwen_leden.xlsx', 'vrouwen_leden')", "def download_excel(restaurant_id):\n raw_data = get_menu_items_based_on_restaurant(restaurant_id=restaurant_id)\n csv_file_path = \"{}/file.csv\".format(settings.BASE_DIR)\n static_form = ['name', 'description', 'price', 'category', 'sub_category']\n with open(csv_file_path, 'w', newline='') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=static_form)\n writer.writeheader()\n writer.writerows(raw_data['itemsList'])\n csv_file.close()\n return csv_file_path", "def order_report():", "def DownloadRingtoneDataSince(request, since):\r\n response = HttpResponse(mimetype='text/csv')\r\n response['Content-Disposition'] = 'attachment; filename=ringtones.csv'\r\n\r\n writer = csv.DictWriter(response, models.Ringtone.CSV_FILEDS)\r\n # Hack. Write the header first.\r\n d = {}\r\n for k in models.Ringtone.CSV_FILEDS:\r\n d[k] = k\r\n writer.writerow(d)\r\n if since:\r\n query = models.Ringtone.all().filter('creation_time >= ',\r\n datetime.datetime.strptime(since, \"%Y-%m-%dT%H:%M:%S.%fZ\"))\r\n else:\r\n query = models.Ringtone.all()\r\n for r in query:\r\n writer.writerow(r.DumpToCSVRow())\r\n return response", "def general_export(request):\n export_fields = OrderedDict(GENERAL_EXPORT_FIELDS)\n export = OpenXMLExport('Exportation')\n export.write_line(export_fields.keys(), bold=True) # Headers\n # Data\n query_keys = [f for f in export_fields.values() if f is not None]\n query = Student.objects.filter(archived=False).order_by('klass__name', 'last_name', 'first_name')\n for line in query.values(*query_keys):\n values = []\n for field in query_keys:\n if field == 'gender':\n values.append(('Madame', 'Monsieur')[line[field] == 'M'])\n elif field in ('dispense_ecg', 'dispense_eps', 'soutien_dys'):\n values.append('Oui' if line[field] is True else '')\n else:\n values.append(line[field])\n export.write_line(values)\n\n return export.get_http_response('general_export')", "def export_data(self):\r\n stocks = {}\r\n headings = ['Security', 'Price', 'Change', 'Change %', '52 Week', 'Market Cap']\r\n\r\n for data in range(6):\r\n for items in self.root.main.treeview.get_children():\r\n values = self.root.main.treeview.item(items, 'values')\r\n if headings[data] not in stocks:\r\n stocks[headings[data]] = []\r\n stocks.get(headings[data]).append(values[data])\r\n\r\n df = pd.DataFrame(stocks, columns=headings)\r\n path = tk.filedialog.asksaveasfilename(title='Save File As...',\r\n filetypes=((\"CComma-separated values (.csv)\", \"*.csv\"), (\"Text Document(.txt)\", \"*.txt\")))\r\n\r\n if not path:\r\n return\r\n else:\r\n df.to_excel(path, index=False, header=True)", "def generateCsvData(self, context, obj, entity):\n raise NotImplementedError()", "def _getdata(self, data):\n lines = []\n start_date = str(data['form']['start_date'])\n end_date = str(data['form']['end_date'])\n department_ids = data['form']['department_ids']\n\n vehicles_ids = self.pool.get('fleet.vehicle').search(self.cr, self.uid,\\\n [('department_id', 'in', department_ids)], context=self.context)\n\n fuel_qty_line_obj = self.pool.get('fuel.qty.line')\n\n sdate = datetime.strptime(start_date, \"%Y-%m-%d\")\n syear = sdate.year\n smonth = sdate.month\n edate = datetime.strptime(end_date, \"%Y-%m-%d\")\n eyear = edate.year\n emonth = edate.month\n\n fuel_qty_line_ids = fuel_qty_line_obj.search(self.cr, self.uid,\\\n [('vehicles_id', 'in', vehicles_ids)], context=self.context)\n\n\n\n counter = 1\n for qty_line in fuel_qty_line_obj.browse(self.cr, self.uid, \\\n fuel_qty_line_ids, context=self.context):\n current_m = int(qty_line.month)\n current_y = int(qty_line.year)\n start = current_m >= smonth and current_y >= syear\n end = current_m <= emonth and current_y <= eyear\n if start and end:\n line = {'type':str(counter)+\" : \"+\\\n qty_line.vehicles_id.type.name}\n line['vehicle_no'] = qty_line.vehicles_id.vin_sn\n line['spent'] = qty_line.spent_qty\n line['counter_no'] = str(qty_line.vehicles_id.odometer)+\" \"+\\\n qty_line.vehicles_id.odometer_unit\n line['date'] = qty_line.month+\"/\"+qty_line.year\n lines.append(line)\n counter += 1\n return lines", "def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )", "def create_query_csv(self):\n\n self.query_df.to_csv(self.query_output_file)", "def save_csv(self):\n path, _ = QtWidgets.QFileDialog.getSaveFileName(self, '保存数据', '', 'CSV(*.csv)')\n\n if not path:\n return\n\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator='\\n')\n\n writer.writerow(self.headers.keys())\n\n for row in range(self.rowCount()):\n row_data = []\n for column in range(self.columnCount()):\n item = self.item(row, column)\n if item:\n row_data.append(str(item.text()))\n else:\n row_data.append('')\n writer.writerow(row_data)", "def on_export_button(self, event):\n wildcard = \"Filtered _iso_res_filt.csv file (*_iso_res_filt.csv)|*_iso_res_filt.csv|\"\\\n \"All files (*.*)|*.*|\"\n defFile = self.datafile[:-4]+'_filt.csv'\n dlg = wx.FileDialog(\n self, message=\"Save file as ...\", \n defaultDir=self.currentDirectory, \n defaultFile=defFile, wildcard=wildcard, style=wx.SAVE\n )\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.recalcAll()\n self.redrawAll()\n self.dataFrame['priorFilter'] = self.dataFrame['allFPass']\n self.dataFrame.to_csv(path, index=False)\n summaryCSVPath = path.split('.')[0] + '_median_[' + ''.join(self.calcNum) + ']_[' + ''.join(self.calcDen) + '].csv'\n self.writeSummaryCSV(summaryCSVPath)\n \n dlg.Destroy()", "def download_data_slot(self):\n if self.result is None:\n self.label_current_message.setText('尚未有預測結果!請確認是否已載入資料並執行預測。')\n else:\n fileName, _ = QFileDialog.getSaveFileName(self, 'Save file', '', '*.csv') # 建立儲存檔案的對話盒(dialog)\n if fileName:\n self.result['date'] = pd.to_datetime(self.result['date'])\n raw_input_data = self.Data.copy() # 需要把原資料copy,否則直接取用的話,輸出結果會隨著下載次數而無謂增加\n output_data = raw_input_data.append(self.result.loc[:, ['date'] + [i for i in self.column_name]])\n output_data.to_csv(fileName, index = None)", "def write_csv(self):\n self.tableView.df.to_csv('Data export.csv', index=False)\n print('CSV file exported')", "def mono_sheet(self):\n xls = pandas.read_excel(str(self.source))\n xls.to_csv(str(self.dest), **self.kwargs)", "def _get_date(self):\n for fax_in in self:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n date_planned = False\n if fax_in.date:\n from_dt = datetime.datetime.strptime(str(fax_in.date[:19]), DATETIME_FORMAT)\n # from_dt = from_dt + datetime.timedelta(hours=5 , minutes=30)\n date_planned = from_dt.strftime('%Y-%m-%d')\n fax_in.fax_date = date_planned", "def generate_spreadsheet(request, id):\n election = get_object_or_404(Election, pk=id)\n response = render_to_response(\"django_elect/spreadsheet.html\", {\n 'full_stats': election.get_full_statistics(),\n })\n filename = \"election%s.xls\" % (election.pk)\n response['Content-Disposition'] = 'attachment; filename='+filename\n response['Content-Type'] = 'application/vnd.ms-excel; charset=utf-8'\n return response", "def print_facturas(self, data):\n #pruebas = self.env['calling'].search([('state', '=', 'active')])\n # self.nuevo = self.env['account.invoice'].search([('type','=','out invoice')])\n\n\n if self.date_from and self.date_to:\n fecha_inicio = self.date_from\n fecha_fin = self.date_to\n\n if datetime.strptime(fecha_inicio, DATE_FORMAT) >= datetime.strptime(fecha_fin, DATE_FORMAT):\n raise ValidationError('Advertencia! La fecha de inicio no puede ser superior a la fecha final')\n\n fecha_actual = str(date.today())\n if datetime.strptime(fecha_inicio, DATE_FORMAT) > datetime.strptime(fecha_actual, DATE_FORMAT):\n raise ValidationError('Advertencia! La fecha de inicio no puede ser mayor a la fecha actual')\n elif datetime.strptime(fecha_fin, DATE_FORMAT) > datetime.strptime(fecha_actual, DATE_FORMAT):\n raise ValidationError('Advertencia! La fecha de final no puede ser mayor a la fecha actual')\n\n calling_obj = self.env['calling']\n calling_ids = calling_obj.search(\n [('calling_date', '>=', fecha_inicio), ('calling_date', '<=', fecha_fin)])\n if calling_ids:\n ids = []\n for id in calling_ids:\n ids.append(id.id)\n datas = self.read(self.ids)[0]\n data = {\n 'ids': ids,\n 'model': 'report.tys_calling.report_services_sede',\n 'form': {\n 'datas': datas,\n 'date_from': self.date_from,\n 'date_to': self.date_to,\n 'sede': self.sede.id,\n 'all':self.all,\n },\n 'context': self._context\n }\n return self.env.ref('tys_calling.report_services_for_sede').report_action(self, data=data, config=False)\n else:\n raise ValidationError('Advertencia! No existen llamadas entre las fechas seleccionadas')\n\n\n # use `module_name.report_id` as reference.\n # `report_action()` will call `get_report_values()` and pass `data` automatically.", "def generate_dataset_csv(request):\n\n response = csv_export(request,Dataset)\n return response", "def career_teachers_excel(self, request):\n\n # Get the career to be processed their results.\n career_id = request.GET.get('career_id', '')\n career = EvaluationsCareer.objects.get(pk__exact=career_id)\n\n # Get the results for each esignature of the carrer en each exam.\n data = self.get_career_results(career)\n\n # Generates the CSV with the results of the career,then return as downloadable file.\n response = self.get_teacher_results_excel(data)\n return response", "def export_patient_records(request, app, model):\n restart_reasons = [r.reason_id for r in RestartReason.objects.order_by('reason_id')]\n \n header_row = ['Patient ID', 'Vesicant/Irritant?', 'IV Attempts']\n header_row += restart_reasons\n \n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=ebp.csv' \n writer = csv.writer(response)\n \n # Write the header (i.e., titles) row\n writer.writerow(header_row)\n \n for p in PatientRecord.objects.order_by('patient_id'):\n patient_restart_reasons = [r.reason_id for r in p.restart_reasons.order_by('id')]\n row = [p.patient_id, p.vesicant_irritant, p.iv_attempts]\n row += [r in patient_restart_reasons for r in restart_reasons]\n writer.writerow(row)\n \n return response", "def download(self,connector,condition):\n c= connector.cursor()\n# condition = \" WHERE DIF_ID=%d AND NUM=%d\" % (difid,num)\n snew = buildSelect(self,'HR2',condition)\n# print snew\n c.execute(snew)\n lnames=[]\n for name,val in sorted(self.__dict__.iteritems()):\n lnames.append(name)\n\n vobj=[]\n for row in c:\n # print row\n hr2=HR2Def(0)\n for i in range(len(lnames)):\n #print lnames[i],row[i]\n hr2.__dict__[lnames[i]]=row[i]\n vobj.append(hr2)\n return vobj", "def bring_records_to_file_using_threads():\n username = username_entry.get()\n password = password_entry.get()\n day = int(day_entry.get())\n month = int(month_entry.get())\n year = int(year_entry.get())\n today = datetime.date(year, month, day)\n if username in users:\n if password == users[username]:\n db = Database(database_name)\n data = db.fetch_calculations(day, month, year)\n # print(data)\n # print(today)\n save_to_file(today, data)", "def queryset(self):\n return [\n self.Item('John Doe', TestReport.STATUS_ACTIVE, date(1980, 5, 6), (1, 2)),\n self.Item('Jane Doe', TestReport.STATUS_ACTIVE, date(1981, 2, 21), (Decimal('2'), 4)),\n self.Item('Billy Boe', TestReport.STATUS_ACTIVE, date(1965, 1, 1), (3, 10)),\n self.Item('Jesus Christ', TestReport.STATUS_INACTIVE, date(1946, 12, 25), (Decimal('11.2'), Decimal('22.4'))),\n ]", "def dump3(request):\n \n if not request.user.is_authenticated:\n return HttpResponseForbidden()\n else:\n import csv\n from django.http import HttpResponse\n\n answer_list = list(Answer.objects.all())\n \n result = {} # date --> { usernumber --> { datetime -> [{\"argument_number\"}, ...] }\n \n for answer in answer_list:\n mydatetime = u\"%s\" % answer.date\n date = u\"%s\" % answer.date.date()\n\n result.setdefault(date, {})\n\n\n result[date].setdefault(answer.usernumber, {})\n\n result[date][answer.usernumber].setdefault(mydatetime, [])\n\n myanswer_dict = {}\n\n gameNumber = int(\"%d\" % answer.game_number)\n myanswer_dict[\"game_number\"] = gameNumber\n myanswer_dict[\"argument_number\"] = answer.argumentNumber\n myanswer_dict[\"date\"] = answer.date\n\n if answer.validity:\n validity = \"valid\"\n else:\n validity = \"invalid\"\n\n myanswer_dict[\"validity\"] = validity\n\n if answer.correctness:\n correctness = \"Correct\"\n else:\n correctness = \"Incorrect\"\n\n myanswer_dict[\"correctness\"] = correctness\n\n result[date][answer.usernumber][mydatetime].append(myanswer_dict)\n\n # Create the HttpResponse object with the appropriate CSV header.\n response = HttpResponse(\"\", content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=logproplog-data-answers-plus-aggregates.csv'\n\n writer = csv.writer(response)\n first_row = []\n\n first_row.extend(['date', 'datetime', 'usernumber'])\n\n first_row.extend(['game_number', 'argument_number',\n 'validity', 'correctness'])\n\n first_row.extend(['totaltime (s)',\n 'lastcorrectinarow',\n 'answercount',\n 'correctanswercount'])\n \n writer.writerow(first_row)\n \n for date in sorted(result):\n for usernumber in sorted(result[date]):\n last_correct_in_a_row = 0\n answer_count = 0\n correct_answer_count = 0\n min_datetime = None\n max_datetime = None\n for mydatetime in result[date][usernumber]:\n for myanswer_dict in result[date][usernumber][mydatetime]:\n if min_datetime == None:\n min_datetime = myanswer_dict['date']\n elif min_datetime > myanswer_dict['date']:\n min_datetime = myanswer_dict['date']\n else:\n pass\n\n if max_datetime == None:\n max_datetime = myanswer_dict['date']\n elif max_datetime < myanswer_dict['date']:\n max_datetime = myanswer_dict['date']\n else:\n pass\n \n next_row = []\n next_row.append(date)\n next_row.append(mydatetime)\n next_row.append('%d' % usernumber)\n \n next_row.append('%d' % myanswer_dict['game_number'])\n next_row.append('%d' % myanswer_dict['argument_number'])\n next_row.append('%s' % myanswer_dict['validity'])\n next_row.append('%s' % myanswer_dict['correctness'])\n\n writer.writerow(next_row)\n del next_row\n\n answer_count += 1\n\n if myanswer_dict[\"correctness\"] == \"Correct\":\n correct_answer_count += 1\n last_correct_in_a_row += 1\n else:\n last_correct_in_a_row = 0\n\n timedelta = max_datetime - min_datetime\n aggregate_row = []\n aggregate_row.append(date)\n aggregate_row.append('')\n aggregate_row.append(usernumber)\n\n # 'game_number', 'argument_number',\n # 'validity', 'correctness'\n aggregate_row.extend(['', '', '', ''])\n\n # 'totaltime',\n # 'lastcorrectinarow',\n # 'answercount',\n # 'correctanswercount'\n aggregate_row.append('%d' % timedelta.seconds)\n aggregate_row.append('%d' % last_correct_in_a_row)\n aggregate_row.append('%d' % answer_count)\n aggregate_row.append('%d' % correct_answer_count)\n \n writer.writerow(aggregate_row)\n\n del aggregate_row\n \n return response", "def outputExcelReport(self):\n # ++++++++++\n # init\n # ++++++++++\n wb = openpyxl.Workbook()\n wb.fonts = openpyxl.styles.Font(\n name = 'Courier New',\n size = 12\n )\n # create and delete sheets\n _ = wb.create_sheet(title='Cover',index=0)\n _ = wb.create_sheet(title='Results',index=1)\n _ = wb.create_sheet(title='AllItems',index=2)\n _ = wb.remove(wb.worksheets[-1])\n # ++++++++++\n # Sheet 1 <Cover>\n # ++++++++++\n ws = wb['Cover']\n # --- title and date\n timeNow = datetime.datetime.now().isoformat().split('T')[0]\n ws.merge_cells('A1:B1')\n ws.merge_cells('A3:B3')\n ws['A1'] = '納入チェック ダイアグ確認結果'\n ws['A3'] = '作成日:{}'.format(timeNow)\n # --- sample info\n ws['A5'] = '<サンプル情報>'\n self._write2excel(ws, self._sample_info, 6, 1)\n for r in range(6,8):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- checker info\n ws['A9'] = '<チェッカ情報>'\n self._write2excel(ws, self._checker_info, 10, 1)\n for r in range(10,13):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- dmm info\n ws['A14'] = '<DMM情報>'\n self._write2excel(ws, self._dmm_info, 15, 1)\n for r in range(15,18):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- resistor info\n ws['A19'] = '<抵抗器情報>'\n self._write2excel(ws, self._resistor_info, 20, 1)\n for r in range(20,23):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n ws[cell.coordinate].font = STYLE_FONT_PASS\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 2 <Results>\n # ++++++++++\n ws = wb['Results']\n # --- output all scenario\n ws['A1'] = '<結果一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._result_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==6:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n ws.cell(cell.row,6).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 3 <AllItems>\n # ++++++++++\n ws = wb['AllItems']\n # --- output all scenario\n ws['A1'] = '<出力一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._scenario_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==5:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # save book\n # ++++++++++\n wb.save(self._filename)", "def export_database(self):\n base_path = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', filter='CSV (*.csv)')\n database.export_to_csv(DB_PATH, base_path[0])", "def downloadResponse(request, formcode=None):\n if formcode !=None:\n response = HttpResponse(content_type='text/csv')\n responses = Response.objects.filter(form_id=formcode)\n writer = csv.writer(response)\n writer.writerow(['User', 'Submit Date', 'Answer1', 'Answer2', 'Answer3'])\n for r in responses:\n user = User.objects.get(id=r.user_id)\n writer.writerow([user, r.submitDate, r.answer1 ,r.answer2 , r.answer3])\n\n response['Content-Disposition'] = 'attachment; filename=\"response.csv\"'\n return response \n return render(request, 'download.html')", "def import_excel(self, filepath_excel,database_type):\n if database_type == \"render\":\n try:\n connection = sqlite3.connect(self.filepath_render_database)\n pointer = connection.cursor()\n\n sql_anweisung = \"\"\"\n INSERT INTO render_information (\n object_type,\n name,\n radius,\n polar_angle_min,\n polar_anglel_max,\n polar_angle_segments,\n polar_angle_random_rad,\n azimuth_angle_min,\n azimuth_angle_max,\n azimuth_angle_segments,\n azimuth_angle_random_rad,\n tracking_obj,\n segmentation\n )\n VALUES (\n :object_type,\n :name,\n :radius,\n :polar_angle_min,\n :polar_anglel_max,\n :polar_angle_segments,\n :polar_angle_random_rad,\n :azimuth_angle_min,\n :azimuth_angle_max,\n :azimuth_angle_segments,\n :azimuth_angle_random_rad,\n :tracking_obj,\n :segmentation\n )\n \"\"\"\n with open(filepath_excel) as csvdatei:\n csv_reader_object = csv.reader(csvdatei, delimiter=';')\n next(csv_reader_object)\n pointer.executemany(sql_anweisung, csv_reader_object)\n connection.commit()\n connection.close()\n print(\"render data addet from excel file\")\n except :\n print(\"adding render data from excel file failed\")\n\n elif database_type == \"object\":\n try:\n connection = sqlite3.connect(self.filepath_object_database)\n pointer = connection.cursor()\n\n sql_anweisung = \"\"\"\n INSERT INTO object_information (\n obj_filepath,\n obj_name,\n obj_type,\n obj_scale_factor,\n obj_type,\n obj_location_x,\n obj_location_y,\n obj_location_z,\n obj_rotation_x,\n obj_rotation_y,\n obj_rotation_z,\n obj_amount_percent,\n obj_material_path,\n obj_point_in_time,\n maximum_random_rotation_degree_z,\n maximum_random_translation,\n random_amount\n )\n VALUES (\n :obj_filepath,\n :obj_name,\n :obj_type,\n :obj_scale_factor,\n :obj_type,\n :obj_location_x,\n :obj_location_y,\n :obj_location_z,\n :obj_rotation_x,\n :obj_rotation_y,\n :obj_rotation_z,\n :obj_amount_percent,\n :obj_material_path,\n :obj_point_in_time,\n :maximum_random_rotation_degree_z,\n :maximum_random_translation,\n :random_amount\n )\n \"\"\"\n with open(filepath_excel) as csvdatei:\n csv_reader_object = csv.reader(csvdatei, delimiter=';')\n print(csv_reader_object)\n next(csv_reader_object)\n pointer.executemany(sql_anweisung, csv_reader_object)\n connection.commit()\n connection.close()\n print(\"object data added from excel file\")\n except :\n print(\"adding object data from excel file failed\")\n\n else:\n print(\"no Database found, maybe check spelling in method call??\")\n return", "def createDates(self, data: QDate=None):\n if data is None:\n data = self.oggi\n # print('CREATEDATES DATA', data)\n dateList = MeseGiorniDictGen.bigList(data)\n return dateList", "def on_show_eqp_datasheet_export(self):\n from EqpDatasheetExportDialog import QEqpDatasheetExportDialog\n\n dlg = QEqpDatasheetExportDialog(self)\n dlg.exec_()", "def object_export(request, simulation, object_name):\n query = get_query(object_name, simulation)\n # To avoid conflict if two users export a file at the same time, we\n # generate a random name for the export file.\n seed = np.random.randint(10000)\n filename = '{0}/website_files/exports/{1}.tsv'.format(settings.BASE_DIR,\n seed)\n with codecs.open(filename, 'w', encoding='utf8') as f:\n if object_name == 'centroid':\n fields = ['id', 'name', 'x', 'y', 'db_id']\n elif object_name == 'crossing':\n fields = ['id', 'name', 'x', 'y', 'db_id']\n elif object_name == 'link':\n fields = ['id', 'name', 'origin', 'destination', 'lanes', 'length',\n 'speed', 'capacity', 'vdf']\n elif object_name == 'function':\n fields = ['id', 'expression']\n writer = csv.writer(f, delimiter='\\t')\n if object_name in ('centroid', 'crossing'):\n writer.writerow(['id', 'name', 'x', 'y', 'db_id'])\n values = query.values_list('user_id', 'name', 'x', 'y', 'id')\n elif object_name == 'function':\n writer.writerow(['id', 'name', 'expression'])\n values = query.values_list('user_id', 'name', 'expression')\n elif object_name == 'link':\n writer.writerow(['id', 'name', 'lanes', 'length', 'speed',\n 'capacity', 'function', 'origin', 'destination'])\n values = query.values_list('user_id', 'name', 'lanes', 'length',\n 'speed', 'capacity', 'vdf__user_id')\n # Origin and destination id must be converted to user_id.\n centroids = get_query('centroid', simulation)\n crossings = get_query('crossing', simulation)\n ids = list(centroids.values_list('id', 'user_id'))\n ids += list(crossings.values_list('id', 'user_id'))\n # Map id of nodes to their user_id.\n id_mapping = dict(ids)\n origins = query.values_list('origin', flat=True)\n origins = np.array([id_mapping[n] for n in origins])\n destinations = query.values_list('destination', flat=True)\n destinations = np.array([id_mapping[n] for n in destinations])\n # Add origin and destination user ids to the values array.\n origins = np.transpose([origins])\n destinations = np.transpose([destinations])\n values = np.hstack([values, origins, destinations])\n writer.writerows(values)\n with codecs.open(filename, 'r', encoding='utf8') as f:\n # Build a response to send a file.\n response = HttpResponse(f.read())\n response['content_type'] = 'text/tab-separated-values'\n response['Content-Disposition'] = \\\n 'attachement; filename={}.tsv'.format(metro_to_user(object_name))\n # We delete the export file to save disk space.\n os.remove(filename)\n return response" ]
[ "0.679835", "0.6526606", "0.6513021", "0.63559425", "0.6347008", "0.6267613", "0.61500996", "0.604096", "0.59455335", "0.5921458", "0.58053875", "0.5804869", "0.57998806", "0.5739592", "0.57366", "0.5705012", "0.5652193", "0.5652061", "0.564152", "0.56380385", "0.5635472", "0.5631188", "0.56273067", "0.56179833", "0.5613689", "0.56049776", "0.55923617", "0.5577416", "0.5553695", "0.5525446", "0.551815", "0.55168766", "0.5516326", "0.55082", "0.5489981", "0.5487981", "0.5427786", "0.5401955", "0.5399324", "0.5384938", "0.53824437", "0.5377837", "0.53295857", "0.53294235", "0.5316194", "0.5312439", "0.53019184", "0.5295519", "0.5292883", "0.5268818", "0.52566725", "0.52566016", "0.52500707", "0.5245568", "0.5235348", "0.52342576", "0.52329075", "0.52245665", "0.5221163", "0.5208893", "0.520708", "0.5205253", "0.51907784", "0.51669055", "0.5158997", "0.51581615", "0.5157487", "0.5154282", "0.51541394", "0.51485616", "0.51482743", "0.51455176", "0.5145089", "0.51402813", "0.5133328", "0.51235473", "0.5113577", "0.5098605", "0.50973713", "0.5093422", "0.50901455", "0.5089147", "0.50885534", "0.5079107", "0.50754035", "0.5075197", "0.50726944", "0.5060791", "0.5060442", "0.50598955", "0.5054938", "0.5054577", "0.5053718", "0.5047551", "0.5045823", "0.5042332", "0.5042126", "0.50382984", "0.50252926", "0.5002381" ]
0.75960785
0
objectiin querysetiig avna. Tuhain querysetiin date_time uy deh datag excel export hiine
def export_to_excel(self, workbook, tailan_queryset): # workbook argumentdaa avna if tailan_queryset: #[row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start) worksheet = workbook.add_worksheet(u'Гүний худаг') queryset = Hudag.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = Hudag.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.gunii_hudags: queryset = tailan.gunii_hudags.hudags.all() [row_write, col_write] = Hudag.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Цэвэршүүлэх байгууламж') queryset = Ts_baiguulamj.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = Ts_baiguulamj.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.tsevershuuleh: queryset = tailan.tsevershuuleh.tsevershuuleh.all() [row_write, col_write] = Ts_baiguulamj.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Цэвэрлэх байгууламж') queryset = Ts_baiguulamj.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = Ts_baiguulamj.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.tseverleh: queryset = tailan.tseverleh.tseverleh.all() [row_write, col_write] = Ts_baiguulamj.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Усан сан') queryset = UsanSan.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = UsanSan.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.usansan: queryset = tailan.usansan.usan_sans.all() [row_write, col_write] = UsanSan.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Цэвэр усны насос станц') queryset = NasosStants.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = NasosStants.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.tsever_nasos_stants: queryset = tailan.tsever_nasos_stants.nasos_stantss.all() [row_write, col_write] = NasosStants.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Бохир усны насос станц') queryset = NasosStants.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = NasosStants.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.bohir_nasos_stants: queryset = tailan.bohir_nasos_stants.nasos_stantss.all() [row_write, col_write] = NasosStants.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Лаборатори') queryset = Lab.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = Lab.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.lab: queryset = tailan.lab.labs.all() [row_write, col_write] = Lab.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Цэвэр усны шугам') queryset = Sh_suljee.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = Sh_suljee.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.tsever_usnii_shugam: queryset = tailan.tsever_usnii_shugam.sh_suljees.all() [row_write, col_write] = Sh_suljee.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Бохир усны шугам') queryset = Sh_suljee.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = Sh_suljee.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.bohir_usnii_shugam: queryset = tailan.bohir_usnii_shugam.sh_suljees.all() [row_write, col_write] = Sh_suljee.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'АХББ') queryset = ABB.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = ABB.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.abb: queryset = tailan.abb.abbs.all() [row_write, col_write] = ABB.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Ус, дулаан дамжуулах төв') queryset = UsDamjuulahBair.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = UsDamjuulahBair.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.us_damjuulah_tov: queryset = tailan.us_damjuulah_tov.usDamjuulahBair.all() [row_write, col_write] = UsDamjuulahBair.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Ус түгээх байр') queryset = UsTugeehBair.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = UsTugeehBair.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.us_tugeeh: queryset = tailan.us_tugeeh.us_tugeeh_bairs.all() [row_write, col_write] = UsTugeehBair.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Цэвэр усны машин') queryset = WaterCar.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = WaterCar.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.water_car: queryset = tailan.water_car.water_cars.all() [row_write, col_write] = WaterCar.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Бохир усны машин') queryset = BohirCar.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = BohirCar.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.bohir_car: queryset = tailan.bohir_car.bohir_cars.all() [row_write, col_write] = BohirCar.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Ажилчдын судалгаа') row_write = 5 col_write = 1 [row_write, col_write] = Ajiltan.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.ajiltans: queryset = tailan.ajiltans.ajiltans.all() [row_write, col_write] = Ajiltan.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) else: worksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_to_excel(self, worksheet, row_start, col_start, queryset, date_time=timezone.now()):\n\t\tif queryset:\n\t\t\t[row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start)\n\t\t\tfor q in queryset:\n\t\t\t\t# object_excel_write function---date_time uyiin history objectiig excel -ruu horvuulne\n\t\t\t\t[row_write, col_write] = q.object_excel_write(worksheet, row_write, col_write, date_time=date_time)\n\t\telse:\n\t\t\tworksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')", "def export_ho_dan_as_excel_action(fields=None, exclude=None, header=True):\n def export_as_excel(modeladmin, request, queryset):\n opts = modeladmin.model._meta\n field_names = [\"name\", \"status\", \"location\", \"tinh\",\n \"xa\", \"huyen\", \"phone\", \"cuuho\", \"update_time\", \"note\"]\n display_names = [\"Tên hộ dân\", \"Tình trạng\", \"Vị trí\", \"Tỉnh\", \"Xã\",\n \"Huyện\", \"Sdt\", \"hỗ trợ\", \"Thời gian cuối cùng cập nhật\", \"Ghi chú\"]\n file_name = \"Danh_sach_ho_dan\"\n\n output = io.BytesIO()\n\n workbook = xlsxwriter.Workbook(output, {'in_memory': True})\n worksheet = workbook.add_worksheet()\n row = 0\n if header:\n write_a_row(worksheet, row, display_names)\n row += 1\n for obj in queryset:\n arr = []\n for field in field_names:\n if field == \"status\" and obj.status:\n arr.append(obj.status.name)\n elif field == \"update_time\":\n utc_time = getattr(obj, field)\n local_datetime = utc_to_local(utc_time)\n arr.append(local_datetime.strftime(\"%d/%m/%Y %H:%M:%S\"))\n else:\n arr.append(str(getattr(obj, field) or \"\"))\n write_a_row(worksheet, row, arr)\n row += 1\n\n workbook.close()\n\n output.seek(0)\n\n response = HttpResponse(output.read(\n ), content_type=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\")\n response['Content-Disposition'] = f\"attachment; filename={file_name}.xlsx\"\n\n output.close()\n\n return response\n\n export_as_excel.short_description = \"Xuất file excel\"\n return export_as_excel", "def export(self, queryset=None):\n self.queryset = queryset or self.queryset\n exported_datetime = get_utcnow()\n filename = self.get_filename(exported_datetime)\n path = os.path.join(self.export_folder, filename)\n with open(path, 'w') as f:\n csv_writer = csv.DictWriter(\n f, fieldnames=self.field_names, delimiter=self.delimiter)\n csv_writer.writeheader()\n for model_obj in self.queryset:\n object_helper = self.object_history_helper_cls(\n model_obj=model_obj, create=True)\n objects = object_helper.get_not_exported()\n for obj in objects:\n row = self.prepare_row(\n model_obj=model_obj,\n exported_datetime=exported_datetime,\n export_change_type=obj.export_change_type)\n csv_writer.writerow(row)\n object_helper.update_as_exported(\n objects=objects, exported_datetime=exported_datetime)\n file_history_updater = self.file_history_updater_cls(\n path=path,\n delimiter=self.delimiter,\n model=self.model_cls._meta.label_lower,\n filename=filename)\n file_history_updater.update()\n return path", "def export_any_queryset(request, queryset, filename, excluded_fields=[], included_fields=[], csv_field_delimiter = \";\"):\n\n name, extension = os.path.splitext(filename)\n file_format = extension[1:]\n\n output = None\n if file_format == 'csv':\n content_type = 'text/csv'\n output = io.StringIO()\n writer = csv.writer(output, delimiter=csv_field_delimiter, quoting=csv.QUOTE_MINIMAL)\n exporter = SpreadsheetQuerysetExporter(writer, file_format=file_format)\n exporter.export_queryset(queryset)\n elif file_format == 'xlsx':\n content_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n #content_type = 'application/vnd.ms-excel'\n output = io.BytesIO()\n with open_xlsx_file(output) as writer:\n # # Write Spreadsheet\n # writer.write_headers_from_strings(\n # ['Cliente', 'Commessa', 'Progetto', 'Attività', ] +\n # ['Totale', ],\n # )\n # writer.apply_autofit()\n exporter = SpreadsheetQuerysetExporter(writer, file_format=file_format)\n exporter.export_queryset(queryset, excluded_fields=excluded_fields, included_fields=included_fields)\n writer.apply_autofit()\n assert writer.is_closed()\n else:\n raise Exception('Wrong export file format \"%s\"' % file_format)\n\n # send \"output\" object to stream with mimetype and filename\n assert output is not None\n output.seek(0)\n # response = HttpResponse(\n # output.read(),\n response = StreamingHttpResponse(\n output,\n content_type=content_type,\n )\n #response['Content-Disposition'] = 'inline; filename=\"%s\"' % filename\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % filename\n\n return response", "def export(self,**kwargs):\n \n # import pdb;pdb.set_trace()\n \n # provide for case where recs are set extenally\n if not self.recs:\n self.select_recs(**kwargs)\n if self.recs:\n if self.export_file_name:\n filename = self.export_file_name\n else:\n filename = \"{table_name}_report_{datetime}.csv\".format(\n table_name = self.table.display_name,\n datetime = date_to_string(local_datetime_now(),'iso_datetime'),\n ).replace(' ','_').lower()\n \n if not self.export_fields:\n # include all fields by default\n self.export_fields = self._set_default_list_fields(include_all=True).copy()\n\n self.set_list_fields(self.export_fields)\n \n \n if self.export_template:\n result = render_template(self.export_template, data=self)\n else:\n # add a descriptive title row\n if self.export_title:\n result = self.export_title.strip() + '\\n'\n else:\n result = \"Export of table {} as of {}\\n\".format(self.table.table_name,excel_date_and_time_string(local_datetime_now()))\n \n result += ','.join([x['label'] for x in self.export_fields]) + '\\n'\n for rec in self.recs:\n rec_row = []\n for field in self.export_fields:\n data = rec.__getattribute__(field['name'])\n if field['type'].upper() == \"DATE\":\n data = local_date_string(data)\n elif field['type'].upper() == \"DATETIME\":\n data = excel_date_and_time_string(data)\n else:\n # just text\n data = str(data).strip()\n \n # replace double quotes with double-double quotes\n data = data.replace('\"','\"\"') #double up on double quotes\n \n if \",\" in data:\n # if any commas, wrap in quotes\n data = '\"' + data + '\"'\n \n #replace returns\n data = data.replace('\\r\\n',' -crnl- ')\n data = data.replace('\\n',' -nl- ')\n data = data.replace('\\r',' -rtn- ')\n\n rec_row.append(data)\n \n result += ','.join([str(x) for x in rec_row]) + '\\n'\n \n return DataStreamer(result,filename,'text/csv').send()\n \n self.result_text = \"No records selected\"\n self.success = False\n \n flash(self.result_text)\n return self.list(**kwargs)", "def export_outstanding_fires(request, region_id, queryset):\n #regions = Region.objects.filter(id=region_id) if region_id else Region.objects.all()\n regions = Region.objects.filter(id=region_id) if region_id else Region.objects.filter(dbca=True)\n region_name = regions[0].name if region_id else 'All-Regions'\n\n rpt_date = datetime.now()\n filename = 'outstanding_fires_{}_{}.xls'.format(region_name, rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n for region in regions:\n outstanding_fires(book, region, queryset, rpt_date)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export_to_csv(self, request, queryset):\n fields = self.get_table_fields()\n field_names = [field.name for field in fields]\n field_verbose_names = [field.verbose_name.encode(\n 'utf-8'\n ) for field in fields]\n\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; \\\nfilename=%s.csv' % unicode(self.model._meta).replace('.', '_')\n\n writer = csv.writer(response)\n writer.writerow(field_verbose_names)\n for obj in queryset:\n writer.writerow([unicode(getattr(obj, field)).encode(\n \"utf-8\",\n \"replace\"\n ) for field in field_names])\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.ministerial.get_excel_sheet(rpt_date, book)\n self.ministerial_auth.get_excel_sheet(rpt_date, book)\n self.ministerial_268.get_excel_sheet(rpt_date, book)\n self.quarterly.get_excel_sheet(rpt_date, book)\n self.by_tenure.get_excel_sheet(rpt_date, book)\n self.by_cause.get_excel_sheet(rpt_date, book)\n self.region_by_tenure.get_excel_sheet(rpt_date, book)\n self.indicator.get_excel_sheet(rpt_date, book)\n self.by_cause_10YrAverage.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 1')\n book.save(response)\n\n return response", "def export_repayment_csv(request):\n import csv\n from django.utils.encoding import smart_str\n # response = HttpResponse(content_type='text/csv')\n # response['Content-Disposition'] = 'attachment; filename=Repayment_report.csv'\n from_date = request.GET.get('from_date')\n to_date = request.GET.get('to_date')\n search = request.GET.get('search_value') or ''\n search_query = Q()\n if search:\n search_query = Q(user__user__username__icontains=search) | \\\n Q(user__user__first_name__icontains=search) | \\\n Q(project__title__icontains=search) | \\\n Q(amount__icontains=search) | \\\n Q(user__user__last_name__icontains=search) | \\\n Q(user__user__email__icontains=search)\n if from_date and to_date:\n import datetime\n import pytz\n date1 = datetime.datetime.strptime(from_date, '%Y-%m-%d').date()\n date2 = datetime.datetime.strptime(to_date, '%Y-%m-%d').date()\n repayments = RepaymentFragment.objects.filter(amount__gt=0.00,\n created_at__range=[\n datetime.datetime(date1.year, date1.month, date1.day, 8, 15,\n 12, 0, pytz.UTC),\n datetime.datetime(date2.year, date2.month, date2.day, 8, 15,\n 12, 0, pytz.UTC)]).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"user__user\").filter(search_query).iterator()\n else:\n repayments = RepaymentFragment.objects.filter(amount__gt=0.00).filter(search_query).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"user__user\").iterator()\n # writer = csv.writer(response, csv.excel)\n # response.write(u'\\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly)\n\n def stream():\n buffer_ = StringIO()\n writer = csv.writer(buffer_)\n writer.writerow([\n smart_str(u\"FIRST NAME\"),\n smart_str(u\"LAST NAME\"),\n smart_str(u\"USERNAME\"),\n smart_str(u\"EMAIL\"),\n smart_str(u\"DATE\"),\n smart_str(u\"NAME OF PROJECT\"),\n smart_str(u\"DONATION AMOUNT\"),\n smart_str(u\"REPAYMENT AMOUNT\"),\n\n ])\n\n for payment in repayments:\n writer.writerow([\n smart_str(payment.user.user.first_name),\n smart_str(payment.user.user.last_name),\n smart_str(payment.user.user.username),\n smart_str(payment.user.user.email),\n smart_str(payment.created_at),\n smart_str(payment.project.title),\n smart_str(round(\n Payment.objects.filter(user=payment.user).filter(project=payment.project).aggregate(Sum('amount'))[\n 'amount__sum'] or 0, 2)),\n smart_str(round(payment.amount, 2)),\n ])\n buffer_.seek(0)\n data = buffer_.read()\n buffer_.seek(0)\n buffer_.truncate()\n yield data\n\n # Create the streaming response object with the appropriate CSV header.\n response = StreamingHttpResponse(stream(), content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"Repayment_report.csv\"'\n return response", "def get_export_data(self, file_format, queryset, *args, **kwargs):\n request = kwargs.pop(\"request\")\n resource_class = self.get_export_resource_class()\n data = resource_class(**self.get_export_resource_kwargs(request)).export(queryset, *args, **kwargs)\n export_data = file_format.export_data(data)\n return export_data", "def export_any_dataset(request, *fields, queryset, filename, csv_field_delimiter = \";\"):\n\n name, extension = os.path.splitext(filename)\n file_format = extension[1:]\n headers, rows = render_queryset_as_data(*fields, queryset=queryset)\n\n output = None\n if file_format == 'csv':\n content_type = 'text/csv'\n output = io.StringIO()\n writer = csv.writer(output, delimiter=csv_field_delimiter, quoting=csv.QUOTE_MINIMAL)\n\n writer.writerow(headers)\n for row in rows:\n writer.writerow(row)\n\n elif file_format == 'xlsx':\n content_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n #content_type = 'application/vnd.ms-excel'\n output = io.BytesIO()\n with open_xlsx_file(output) as writer:\n\n writer.write_headers_from_strings(headers)\n for row in rows:\n writer.writerow(row)\n writer.apply_autofit()\n\n assert writer.is_closed()\n else:\n raise Exception('Wrong export file format \"%s\"' % file_format)\n\n # send \"output\" object to stream with mimetype and filename\n assert output is not None\n output.seek(0)\n # response = HttpResponse(\n # output.read(),\n response = StreamingHttpResponse(\n output,\n content_type=content_type,\n )\n #response['Content-Disposition'] = 'inline; filename=\"%s\"' % filename\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % filename\n\n return response", "def export_as_csv(modeladmin, request, queryset):\n if not request.user.is_staff:\n raise PermissionDenied\n opts = modeladmin.model._meta\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s.csv' % unicode(opts).replace('.', '_')\n writer = csv.writer(response, delimiter=';')\n field_names = [field.name for field in opts.fields]\n # Write a first row with header information\n writer.writerow(field_names)\n # Write data rows\n for obj in queryset:\n values = []\n for field in field_names:\n value = (getattr(obj, field))\n if callable(value):\n try:\n value = value() or ''\n except:\n value = 'Error retrieving value'\n if value is None:\n value = ''\n values.append(unicode(value).encode('utf-8'))\n writer.writerow(values)\n #writer.writerow([getattr(obj, field) for field in field_names])\n return response", "def dump_to_file_format(queryset, file_format, data_zip):\n # create a temporary output stream (temp file)\n if file_format == 'xls':\n # it seems that an excel file needs to be written on a BytesIo even if on the xlwt they write exactly\n # the opposite (I was about to become fool)\n output = io.BytesIO()\n else:\n output = io.StringIO()\n\n # get queryset model\n model = queryset.model\n # the export code depends on the file format\n if file_format == 'csv':\n # create an instance of csv writer that writes on the stream 'output' opened above\n csv_writer = csv.writer(output, dialect='excel', delimiter=';')\n\n # there are some things that may be different from a model to another\n\n # for example, I also want to write in the project csv the username of the members\n if model == Projet:\n csv_writer.writerow(['ID', 'NAME', 'MEMBERS'])\n for project in queryset:\n # build a comma separated list with all the users and the tasks that are in the project\n members = ', '.join([member.username for member in project.members.all()])\n csv_writer.writerow([project.id, project.name, members])\n # if the model is User, only export non confidential fields\n if model == User:\n csv_writer.writerow(['USERNAME', 'NAME', 'SURNAME', 'E-MAIL'])\n for user in queryset:\n csv_writer.writerow([user.username, user.first_name, user.last_name, user.email])\n # for the other models that's what is going to happen\n else:\n # get all the field names and write them as headers\n field_names = [field.name for field in model._meta.fields]\n csv_writer.writerow(field.upper() for field in field_names)\n # for each instance in the queryset\n for obj in queryset:\n # \"\"\"general backup code\"\"\"\n # csv_writer.writerow([getattr(obj, field) for field in field_names])\n\n row = [] # create an empty row list\n # for each field of the model\n for field in field_names:\n # get the field value\n field_value = getattr(obj, field)\n # this is to control the format of the date that will be written in the csv\n if isinstance(field_value, datetime.datetime):\n field_value = field_value.strftime(\"%m/%d/%Y, %H:%M:%S\")\n row.append(field_value) # append the field value to the end of the row list\n\n csv_writer.writerow(row)\n\n # the .json and .xml formats are generated with the django serializers utilities\n elif file_format == 'json' or file_format == 'xml':\n # if the model is User, only export non confidential fields\n if model == User:\n json_xml = serializers.serialize(file_format, queryset, use_natural_foreign_keys=True,\n fields=('username', 'first_name', 'last_name', 'email'))\n else:\n # use_natural_foreign_keys=True means that the foreign keys won't be written as just numbers\n json_xml = serializers.serialize(file_format, queryset, use_natural_foreign_keys=True)\n\n output.write(json_xml)\n\n elif file_format == 'xls':\n wb = xlwt.Workbook(encoding='utf-8') # create excel workbook\n ws = wb.add_sheet(model._meta.model.__name__) # create sheet\n\n # Sheet header, first row\n row_num = 0\n font_style = xlwt.XFStyle()\n font_style.font.bold = True\n\n '''This code is pretty similar to the code to export in .csv, but in excel each cell (row and column) \n must written separately'''\n # get all the field names and write them as headers\n # if User only confidential data\n if model == User:\n field_names = ['username', 'first_name', 'last_name', 'email']\n else:\n field_names = [field.name for field in model._meta.fields]\n for col_num in range(len(field_names)):\n ws.write(row_num, col_num, field_names[col_num].upper(), font_style)\n\n # add a column for the members of the project\n # (otherwise it won't be done automatically because it's ManytoMany)\n if model == Projet:\n ws.write(row_num, col_num + 1, 'MEMBERS', font_style)\n\n # Sheet body, remaining rows\n font_style = xlwt.XFStyle()\n\n # for each instance in the queryset\n for obj in queryset:\n row_num += 1\n # for each field of the model\n for col_num in range(len(field_names)):\n # get the field value\n field_value = getattr(obj, field_names[col_num])\n # this is to control the format of the date that will be written in the csv\n if isinstance(field_value, datetime.datetime):\n field_value = field_value.strftime(\"%m/%d/%Y, %H:%M:%S\")\n ws.write(row_num, col_num, field_value.__str__(), font_style)\n\n # add the column with the members of the project\n if model == Projet:\n members = ', '.join([member.username for member in obj.members.all()])\n ws.write(row_num, col_num + 1, members, font_style)\n\n # save the excel file on the output stream\n wb.save(output)\n\n # generates the name of the output file depending on the model and the file format\n file_name = model._meta.model.__name__.lower() + '_data.' + file_format\n # add the file to the zip archive and close the output stream\n data_zip.writestr(file_name, output.getvalue())\n output.close()", "def uploader_actividad(df,to_model):\n\tengine = create_engine(\"mssql+pyodbc://sa:[email protected]:1433/vpcanales?driver=SQL+Server+Native+Client+11.0\")\n\n\tfecha = df.loc[0,'Fecha']\n\tprint(fecha.month)\n\tprint(fecha.year)\n\n\tif to_model.__name__==\"Activacion\":\n\n\t\tActivacion.objects.filter(fecha_actividad__month=fecha.month ,\n\t\t\tfecha_actividad__year=fecha.year).delete()\n\n\t\tfor row in df.itertuples():\n\t\t\tconnection = engine.raw_connection()\n\t\t\tcursor=connection.cursor()\n\t\t\t#Se ejecuta el SP por cada registro del dataframe\n\t\t\tstring=\"\"\"exec sp_insert_into_activacion\n\t\t\t@fecha_actividad='{0}',\n\t\t\t@plataforma='{1}',\n\t\t\t@tecnologia='{2}',\n\t\t\t@terminal='{3}',\n\t\t\t@cantidad='{4}',\n\t\t\t@codigo_plan='{5}',\n\t\t\t@mes={6},\n\t\t\t@ano={7},\n\t\t\t@codigo_agente='{8}'\n\t\t\t \"\"\".format(row[2],\n\t\t\trow[5],\n\t\t\trow[6],\n\t\t\trow[7],\n\t\t\trow[-2],\n\t\t\trow[4],\n\t\t\trow[2].month,\n\t\t\trow[2].year,\n\t\t\trow[3])\n\t\t\tcursor.execute(string).commit()\n\n\t\tresults = Activacion.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(suma = Sum('cantidad'))\n\n\n\t\tresults.update(Activacion.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(count = Count('cantidad')))\n\n\t\tprint(\"*************************\",results)\n\n\t\treturn results\n\n\n\n\telse:\n\n\t\tAlta.objects.filter(fecha_actividad__month=fecha.month ,\n\t\t\tfecha_actividad__year=fecha.year).delete()\n\n\t\tfor row in df.itertuples():\n\t\t\tconnection = engine.raw_connection()\n\t\t\tcursor=connection.cursor()\n\t\t\t#Se ejecuta el SP por cada registro del dataframe\n\t\t\tstring=\"\"\"exec sp_insert_into_alta\n\t @fecha_actividad='{0}',\n\t @plataforma='{1}',\n\t @tecnologia='{2}',\n\t @terminal='{3}',\n\t @cantidad='{4}',\n\t @codigo_plan='{5}',\n\t @mes={6},\n\t @ano={7},\n\t @codigo_agente='{8}' \"\"\".format(row[2],\n\t row[5],\n\t row[6],\n\t row[7],\n\t row[-2],\n\t row[4],\n\t row[2].month,\n\t row[2].year,\n\t row[3])\n\t\t\tcursor.execute(string).commit()\n\n\n\t\tresults = Alta.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(suma = Sum('cantidad'))\n\n\n\t\tresults.update(Alta.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(count = Count('cantidad')))\n\n\t\tprint(\"*************************\",results)\n\n\n\n\t\treturn results", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'quarterly_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def download_queryset(self, queryset, export_format):\n dataset = StockItemResource().export(queryset=queryset)\n\n filedata = dataset.export(export_format)\n\n filename = 'InvenTree_StockItems_{date}.{fmt}'.format(\n date=datetime.now().strftime(\"%d-%b-%Y\"),\n fmt=export_format\n )\n\n return DownloadFile(filedata, filename)", "def export(request):\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n filename = 'export-inscripcions-tallers-%s.csv' % date.today().strftime(\"%y-%m-%d\")\n\n regtaller_list = TallerRegistration.objects.all()\n\n table = ExportTallerRegistrationTable(regtaller_list)\n table.order_by = request.GET.get(\"sort\",'last_name')\n\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n writer = csv.writer(response)\n # Write headers to CSV file\n headers = []\n for column in table.columns:\n headers.append(column.header.encode('utf8'))\n writer.writerow(headers)\n\n # Write data to CSV file\n for obj in table.rows:\n row = []\n for value in obj:\n if isinstance(value, basestring):\n row.append(value.encode('utf8'))\n else:\n row.append(value)\n writer.writerow(row)\n\n # Return CSV file to browser as download\n return response", "def export(self):\n if len(self.records) == 0:\n exit_message = \"Exiting. There are no records for {} {} to export.\".format(self.args.date.strftime(\"%B\"), self.year)\n sys.exit(exit_message)\n\n total_days = (self.args.date.replace(month = self.args.date.month % 12 +1, day = 1)-timedelta(days=1)).day\n start_month = self.args.date.replace(day = 1)\n end_month = self.args.date.replace(day = total_days)\n workdays = self.netto_workdays(start_month, end_month, weekend_days=(5,6))\n template_file = os.path.join(self.config[\"templates_dir\"], \"template_timesheet_{}_days.xlsx\".format(workdays))\n\n export_file = os.path.join(self.config[\"exports_dir\"], \"timesheet_{}_{}.xlsx\".format(self.year, self.month_str))\n\n # set locale to use weekdays, months full name in german\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n wb = load_workbook(template_file)\n ws = wb.active\n ws.cell(row=7, column=4).value = self.config[\"name\"]\n month_year_str = \"{} {}\".format(self.args.date.strftime(\"%B\"), self.year)\n ws.cell(row=8, column=4).value = month_year_str\n row = 12\n for record in self.records:\n col = 2\n date = datetime.strptime(record[\"date\"], \"%d.%m.%Y\")\n ws.cell(row=row, column=col).value = date.strftime(\"%A\")\n col += 1\n ws.cell(row=row, column=col).value = date\n col += 1\n if \"special\" in record.keys() and record[\"special\"] == \"true\":\n ws.cell(row=row, column=9).value = 8.00\n col += 4\n else:\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_break\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_break\"], \"%H:%M\").time()\n col += 4\n ws.cell(row=row, column=col).value = record[\"comment\"]\n row += 1\n wb.save(export_file)\n return True", "def export_xlsx(request):\n import openpyxl\n try:\n from openpyxl.cell import get_column_letter\n except ImportError:\n from openpyxl.utils import get_column_letter\n\n from_date = request.GET.get('from_date')\n to_date = request.GET.get('to_date')\n search = request.GET.get('search_value') or ''\n search_query = Q()\n if search:\n search_query = Q(user__user__username__icontains=search) | \\\n Q(user__user__first_name__icontains=search) | \\\n Q(project__title__icontains=search) | \\\n Q(amount__icontains=search) | \\\n Q(user__user__last_name__icontains=search) | \\\n Q(user__user__email__icontains=search)\n if from_date and to_date:\n import datetime\n import pytz\n date1 = datetime.datetime.strptime(from_date, '%Y-%m-%d').date()\n date2 = datetime.datetime.strptime(to_date, '%Y-%m-%d').date()\n payments = Payment.objects.filter(\n created_at__range=[datetime.datetime(date1.year, date1.month, date1.day, 8, 15, 12, 0, pytz.UTC),\n datetime.datetime(date2.year, date2.month, date2.day, 8, 15, 12, 0,\n pytz.UTC)]).order_by('-created_at').filter(search_query)\\\n .select_related(\"user\", \"project\", \"admin_reinvestment\", \"user_reinvestment\", \"tip\", \"user__user\").iterator()\n else:\n payments = Payment.objects.filter(search_query).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"admin_reinvestment\", \"user_reinvestment\", \"tip\", \"user__user\")\\\n .iterator()\n response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n response['Content-Disposition'] = 'attachment; filename=RE-volv.xlsx'\n wb = openpyxl.Workbook()\n ws = wb.get_active_sheet()\n ws.title = \"RE-volv\"\n\n row_num = 0\n\n columns = [\n (u\"FIRST NAME\", 30),\n (u\"LAST NAME\", 30),\n (u\"USERNAME\", 30),\n (u\"EMAIL\", 30),\n (u\"DATE\", 30),\n (u\"NAME OF PROJECT\", 30),\n (u\"DONATION TO SOLAR SEED FUND\", 30),\n (u\"REINVESTMENT IN SOLAR SEED FUND\", 20),\n (u\"ADMIN REINVESTMENT IN SOLAR SEED FUND\", 20),\n (u\"DONATION TO OPERATION\", 20),\n (u\"TOTAL DONATIONS\", 20),\n ]\n\n for col_num in xrange(len(columns)):\n c = ws.cell(row=row_num + 1, column=col_num + 1)\n c.value = columns[col_num][0]\n ws.column_dimensions[get_column_letter(col_num + 1)].width = columns[col_num][1]\n\n for payment in payments:\n if payment.admin_reinvestment:\n admin_reinvestment = round(payment.amount, 2)\n else:\n admin_reinvestment = 0\n\n if payment.user_reinvestment:\n user_reinvestment = round(payment.user_reinvestment.amount, 2)\n else:\n user_reinvestment = 0\n\n if payment.admin_reinvestment or payment.user_reinvestment:\n donation_amount = 0\n else:\n donation_amount = payment.amount\n\n if payment.tip:\n tip = round(payment.tip.amount, 2)\n else:\n tip = 0\n\n if payment.tip and payment.amount:\n total = round(payment.tip.amount + payment.amount, 2)\n if payment.tip and not payment.amount:\n total = round(payment.tip.amount, 2)\n if payment.amount and not payment.tip:\n total = round(payment.amount, 2)\n if not payment.amount and not payment.tip:\n total = 0\n if AnonymousUserDonation.objects.filter(payment_id=payment.id):\n email = AnonymousUserDonation.objects.get(payment_id=payment.id).email\n else:\n email = payment.user.user.email\n\n row_num += 1\n row = [\n payment.user.user.first_name,\n payment.user.user.last_name,\n payment.user.user.username,\n email,\n payment.created_at,\n payment.project.title,\n donation_amount,\n user_reinvestment,\n admin_reinvestment,\n tip,\n total,\n ]\n for col_num in xrange(len(row)):\n c = ws.cell(row=row_num + 1, column=col_num + 1)\n c.value = row[col_num]\n\n wb.save(response)\n payments.close()\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_10yr_average_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export_data(self):\r\n if len(app.entry6.get()) != 0:\r\n\r\n if app.var.get() == 'xls':\r\n\r\n wb = Workbook()\r\n sheet = wb.add_sheet('Sheet1')\r\n self.columns = ['id', 'Name', 'Section', 'Dept.', 'Gpa', 'MP1', 'MP2', 'MP3', 'MT', 'FINAL']\r\n style = xlwt.easyxf('font: bold 1')\r\n for col in range(10):\r\n sheet.write(0, col, self.columns[col], style)\r\n index=0\r\n for row in range(1,162):\r\n sheet.write(row, 1, open_data.sort_list[index])\r\n index += 1\r\n index1 = -1\r\n for row in range(1,162):\r\n index1 += 1\r\n index2=0\r\n for col in range(10):\r\n if col == 1 or index2 == 1:\r\n index2 += 1\r\n continue\r\n if index2 == 0:\r\n sheet.write(row, col, int(open_data.student[open_data.sort_list[index1]][index2]))\r\n index2 += 1\r\n continue\r\n sheet.write(row, col, open_data.student[open_data.sort_list[index1]][index2])\r\n index2 += 1\r\n file_name=app.entry6.get()\r\n if '.xls' not in file_name:\r\n wb.save(file_name+'.xls')\r\n else:\r\n wb.save(file_name)\r\n\r\n elif app.var.get() == 'txt':\r\n\r\n file_name = app.entry6.get()\r\n if '.txt' not in file_name:\r\n file_name = file_name + '.txt'\r\n file = open(file_name, 'w')\r\n index2 = 0\r\n for key in open_data.student:\r\n for index in range(10):\r\n if index == 0:\r\n file.write(str(int(open_data.student[key][index])))\r\n file.write(', ')\r\n continue\r\n if index == 1:\r\n try:\r\n self.split_names = open_data.sort_list[index2].split(' ')\r\n file.write(self.split_names[0])\r\n file.write(', ')\r\n file.write(self.split_names[1])\r\n file.write(', ')\r\n index2 += 1\r\n except UnicodeEncodeError:\r\n index2 += 1\r\n pass\r\n continue\r\n if index >= 5 and index <= 9:\r\n if open_data.student[key][index] != '':\r\n file.write(str(int(open_data.student[key][index])))\r\n file.write(', ')\r\n else:\r\n file.write('\\n')\r\n break\r\n if index == 9:\r\n file.write('\\n')\r\n continue\r\n try:\r\n file.write(str(open_data.student[key][index]))\r\n file.write(', ')\r\n except UnicodeEncodeError:\r\n pass\r\n file.close()\r\n\r\n\r\n\r\n elif app.var.get() == 'csv':\r\n app.info.configure(text=\"INFO: Type not Supported\")\r\n # The program does not support saving in 'csv' type. If the user selects 'csv' file type, 'Info' Label\r\n # shows the message: 'INFO: Type not Supported'.\r\n\r\n else:\r\n app.info.configure(text='INFO: Type not chosen!')\r\n # Also, If the user presses on 'Export Data' button, with a file name provided, but without choosing a\r\n # file type, 'Info' Label shows the message: 'INFO: Type not chosen'.\r\n\r\n else:\r\n app.info.configure(text=\"INFO: Please provide the name of the file.\")\r\n # Also, if the user presses 'Export Data' button without giving a file name, 'Info' Label shows the message:\r\n # 'INFO: Please provide the name of the file.'\r", "def action_date_ret(self):\n for wh in self.browse():\n if not wh.date_ret:\n self.write([wh.id],\n {'date_ret': time.strftime('%Y-%m-%d')})\n return True", "def print_xlsx(self):\n if self.date_from and self.date_to:\n if self.date_from > self.date_to:\n raise ValidationError(\"Date From must be less than Date To\")\n\n # active_record = self._context['id']\n # record = self.env['room.accommodation'].browse(active_record)\n data = {\n 'date_from': self.date_from,\n 'date_to': self.date_to,\n 'guest_id': self.guest_id.id,\n 'model_id': self.id,\n 'check_out': self.check_out,\n 'date_today': fields.Datetime.now()\n }\n\n print(\"XLSX Wizard data : \", data)\n\n return {\n 'type': 'ir.actions.report',\n 'data': {\n 'model': 'accommodation.reporting',\n 'options': json.dumps(data, default=date_utils.json_default),\n 'output_format': 'xlsx',\n 'report_name': 'Accommodation Report'\n },\n 'report_type': 'xlsx'\n }", "def make_csv(user_id, fobj):\n data = show_history(user_id)\n report = csv.writer(fobj)\n report.writerow([\n 'Status',\n 'Date',\n 'Amount',\n 'From Curr',\n 'To Curr',\n 'To Address',\n ])\n for row in data:\n report.writerow([\n row.exchange_status.capitalize(),\n row.created_at.strftime('%Y-%m-%d %H:%I:%M'),\n row.amount,\n row.from_curr,\n row.to_curr,\n row.address_out\n ])", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def download_data(request, file_format, exp_p=False, exp_m=False, exp_t=False, exp_j=False, exp_s=False,\n querysets=None):\n\n # set the response so that the browser will understand that the user is receiving a zip file to download\n response = HttpResponse(content_type='application/zip')\n response['Content-Disposition'] = 'attachment; filename=\"data.zip\"'\n\n # create the zip archive by using the python library ZipFile\n data_zip = ZipFile(response, 'w')\n\n file_format = file_format.lower() # it may be helpful\n\n \"\"\" ONLY the data that refers to the projects of which the AUTHENTICATED USER is MEMBER will be exported\"\"\"\n user = request.user\n # models queryset to be used to generate to export the database\n projects_queryset = user.projets.all() # only projects that the user has access to\n projects_members_queryset = User.objects.filter(\n projets__in=projects_queryset).distinct() # infos about project members\n tasks_queryset = Task.objects.filter(projet__in=projects_queryset) # all the tasks in these projects\n journals_queryset = Journal.objects.filter(task__in=tasks_queryset) # all the journals in these tasks\n status_queryset = Status.objects.all()\n\n def dump_to_file_format(queryset, file_format, data_zip):\n \"\"\" Subfunction used not to repeat the same code for the export process\n\n :param queryset: a generic queryset of a model\n :param file_format:\n :param data_zip: a zip archive\n\n \"\"\"\n # create a temporary output stream (temp file)\n if file_format == 'xls':\n # it seems that an excel file needs to be written on a BytesIo even if on the xlwt they write exactly\n # the opposite (I was about to become fool)\n output = io.BytesIO()\n else:\n output = io.StringIO()\n\n # get queryset model\n model = queryset.model\n # the export code depends on the file format\n if file_format == 'csv':\n # create an instance of csv writer that writes on the stream 'output' opened above\n csv_writer = csv.writer(output, dialect='excel', delimiter=';')\n\n # there are some things that may be different from a model to another\n\n # for example, I also want to write in the project csv the username of the members\n if model == Projet:\n csv_writer.writerow(['ID', 'NAME', 'MEMBERS'])\n for project in queryset:\n # build a comma separated list with all the users and the tasks that are in the project\n members = ', '.join([member.username for member in project.members.all()])\n csv_writer.writerow([project.id, project.name, members])\n # if the model is User, only export non confidential fields\n if model == User:\n csv_writer.writerow(['USERNAME', 'NAME', 'SURNAME', 'E-MAIL'])\n for user in queryset:\n csv_writer.writerow([user.username, user.first_name, user.last_name, user.email])\n # for the other models that's what is going to happen\n else:\n # get all the field names and write them as headers\n field_names = [field.name for field in model._meta.fields]\n csv_writer.writerow(field.upper() for field in field_names)\n # for each instance in the queryset\n for obj in queryset:\n # \"\"\"general backup code\"\"\"\n # csv_writer.writerow([getattr(obj, field) for field in field_names])\n\n row = [] # create an empty row list\n # for each field of the model\n for field in field_names:\n # get the field value\n field_value = getattr(obj, field)\n # this is to control the format of the date that will be written in the csv\n if isinstance(field_value, datetime.datetime):\n field_value = field_value.strftime(\"%m/%d/%Y, %H:%M:%S\")\n row.append(field_value) # append the field value to the end of the row list\n\n csv_writer.writerow(row)\n\n # the .json and .xml formats are generated with the django serializers utilities\n elif file_format == 'json' or file_format == 'xml':\n # if the model is User, only export non confidential fields\n if model == User:\n json_xml = serializers.serialize(file_format, queryset, use_natural_foreign_keys=True,\n fields=('username', 'first_name', 'last_name', 'email'))\n else:\n # use_natural_foreign_keys=True means that the foreign keys won't be written as just numbers\n json_xml = serializers.serialize(file_format, queryset, use_natural_foreign_keys=True)\n\n output.write(json_xml)\n\n elif file_format == 'xls':\n wb = xlwt.Workbook(encoding='utf-8') # create excel workbook\n ws = wb.add_sheet(model._meta.model.__name__) # create sheet\n\n # Sheet header, first row\n row_num = 0\n font_style = xlwt.XFStyle()\n font_style.font.bold = True\n\n '''This code is pretty similar to the code to export in .csv, but in excel each cell (row and column) \n must written separately'''\n # get all the field names and write them as headers\n # if User only confidential data\n if model == User:\n field_names = ['username', 'first_name', 'last_name', 'email']\n else:\n field_names = [field.name for field in model._meta.fields]\n for col_num in range(len(field_names)):\n ws.write(row_num, col_num, field_names[col_num].upper(), font_style)\n\n # add a column for the members of the project\n # (otherwise it won't be done automatically because it's ManytoMany)\n if model == Projet:\n ws.write(row_num, col_num + 1, 'MEMBERS', font_style)\n\n # Sheet body, remaining rows\n font_style = xlwt.XFStyle()\n\n # for each instance in the queryset\n for obj in queryset:\n row_num += 1\n # for each field of the model\n for col_num in range(len(field_names)):\n # get the field value\n field_value = getattr(obj, field_names[col_num])\n # this is to control the format of the date that will be written in the csv\n if isinstance(field_value, datetime.datetime):\n field_value = field_value.strftime(\"%m/%d/%Y, %H:%M:%S\")\n ws.write(row_num, col_num, field_value.__str__(), font_style)\n\n # add the column with the members of the project\n if model == Projet:\n members = ', '.join([member.username for member in obj.members.all()])\n ws.write(row_num, col_num + 1, members, font_style)\n\n # save the excel file on the output stream\n wb.save(output)\n\n # generates the name of the output file depending on the model and the file format\n file_name = model._meta.model.__name__.lower() + '_data.' + file_format\n # add the file to the zip archive and close the output stream\n data_zip.writestr(file_name, output.getvalue())\n output.close()\n\n '''\n uses the function defined above the export the data\n '''\n if exp_p:\n dump_to_file_format(projects_queryset, file_format, data_zip)\n if exp_m:\n dump_to_file_format(projects_members_queryset, file_format, data_zip)\n if exp_t:\n dump_to_file_format(tasks_queryset, file_format, data_zip)\n if exp_j:\n dump_to_file_format(journals_queryset, file_format, data_zip)\n if exp_s:\n dump_to_file_format(status_queryset, file_format, data_zip)\n\n # it is also possible to pass whatever list of querysets to this function\n if not querysets is None:\n for queryset in querysets:\n dump_to_file_format(queryset, file_format, data_zip)\n\n # closes the zip file\n data_zip.close()\n\n # finally send the zip file as a the HTTP response\n return response", "def export_as_csv(modeladmin, request, queryset):\n if not request.user.is_staff:\n raise PermissionDenied\n opts = modeladmin.model._meta\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s.csv' % unicode(opts).replace('.', '_')\n writer = csv.writer(response)\n field_names = [field.name for field in opts.fields]\n # Write a first row with header information\n writer.writerow(field_names)\n # Write data rows\n for obj in queryset:\n writer.writerow([getattr(obj, field) for field in field_names])\n return response", "def export(request):\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n filename = 'export-inscrits%s.csv' % date.today().strftime(\"%y-%m-%d\")\n\n person_list = Person.objects.all()\n\n table = ExportPersonTable(person_list)\n table.order_by = request.GET.get(\"sort\",'last_name')\n\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n writer = csv.writer(response)\n # Write headers to CSV file\n headers = []\n for column in table.columns:\n headers.append(column.header.encode('utf8'))\n writer.writerow(headers)\n\n # Write data to CSV file\n for obj in table.rows:\n row = []\n for value in obj:\n row.append(value.encode('utf8'))\n writer.writerow(row)\n\n # Return CSV file to browser as download\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_tenure_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\r\n self.prices[\"returns\"] = self.returns\r\n self.prices.columns = ['prices', 'returns']\r\n self.prices = self.prices.dropna()\r\n \r\n name = QFileDialog.getSaveFileName(None, 'Save File', filter='*.xlsx')\r\n if(name[0] == ''):\r\n # if name empty\r\n pass\r\n else:\r\n self.prices.to_excel(name[0])", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_indicator_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export_as_csv(modeladmin, request, queryset):\n opts = modeladmin.model._meta\n field_names = set([field.name for field in opts.fields])\n if fields:\n fieldset = set(fields)\n field_names = field_names & fieldset\n elif exclude:\n excludeset = set(exclude)\n field_names = field_names - excludeset\n\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s.csv' % str(opts).replace('.', '_')\n\n writer = csv.DictWriter(response, fields)\n writer.writeheader()\n\n for obj in queryset:\n writer.writerow(dict(zip(fields, [getattr(obj, field) for field in fields])))\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_268_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n rpt_date = datetime.now()\n filename = 'bushfire_regionbytenure_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def on_Output_CWA_excel_Now_button_clicked(self):\n # self.lineEdit.setText(result['Cname'])\n # self.lineEdit_2.setText(result['Sclass'])\n # self.lineEdit_3.setText(result['ClassTime'])\n # self.lineEdit_4.setText(result['Tno'])\n # self.lineEdit_6.setText(result['Date'])\n Result = CWA_Message_Query(self.lineEdit_2.text(),self.lineEdit_3.text(), self.lineEdit_6.text(), self.lineEdit.text())\n Create_Cwa_excel_table(self.lineEdit_2.text(),self.lineEdit.text(),Result)", "def queryset_to_csv(self):\n csv_data = []\n custom_fields = []\n\n # Start with the column headers\n headers = self.queryset.model.csv_headers.copy()\n\n # Add custom field headers, if any\n if hasattr(self.queryset.model, 'get_custom_fields'):\n for custom_field in self.queryset.model().get_custom_fields():\n headers.append(custom_field.name)\n custom_fields.append(custom_field.name)\n\n csv_data.append(','.join(headers))\n\n # Iterate through the queryset appending each object\n for obj in self.queryset:\n data = obj.to_csv()\n\n for custom_field in custom_fields:\n data += (obj.cf.get(custom_field, ''),)\n\n csv_data.append(csv_format(data))\n\n return '\\n'.join(csv_data)", "def ortra_export(request):\n export_fields = OrderedDict(ORTRA_EXPORT_FIELDS)\n export = OpenXMLExport('Exportation')\n export.write_line(export_fields.keys(), bold=True) # Headers\n # Data\n query_keys = [f for f in export_fields.values() if f is not None]\n query = Student.objects.filter(Q(klass__name__contains='ASAFE') |\n Q(klass__name__contains='ASEFE') |\n Q(klass__name__contains='ASSCFE'),\n archived=False).order_by('klass__name',\n 'last_name',\n 'first_name')\n\n for line in query.values(*query_keys):\n values = []\n for field in query_keys:\n if field == 'gender':\n values.append(('Madame', 'Monsieur')[line[field] == 'M'])\n else:\n values.append(line[field])\n export.write_line(values)\n\n return export.get_http_response('ortra_export')", "def test_export_with_str_datetime(self):\n self._test_export_stream(Users)", "def download_bank_details(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"bank_details.csv\"'\n\n writer = csv.writer(response)\n \n writer.writerow([\n 's/n',\n 'account_number',\n 'account_name',\n 'recipient_code',\n 'bank_name',\n 'student_name',\n 'date_added'\n ])\n \n count = 0\n for bank in StudentBankDetail.objects.filter(month=batch_date):\n count +=1\n writer.writerow([\n count,\n str(bank.account_number),\n str(bank.account_name),\n str(bank.recipient_code),\n str(bank.bank.bank_name),\n str(bank.student.name),\n datetime.strftime(bank.date_added, '%d-%m-%Y')\n ])\n \n\n\n return response", "def export_table (self,_w):\n try:\n _data = \"\"\n maxRow = _w.rowCount()\n maxColumn = _w.columnCount()\n for hc in range(0,maxColumn):\n try: _hci = str(_w.horizontalHeaderItem(hc).text())\n except:_hci=\"None\";pass\n if hc == (maxColumn-1) :_data += _hci\n elif hc < maxColumn:_data += \"%s,\" % _hci\n _data += \"\\n\"\n for r in range(0, maxRow):\n for c in range(0, maxColumn):\n _d = str(_w.item(r, c).text())\n if c == (maxColumn-1):_data += _d\n elif c < maxColumn:_data += \"%s,\" % _d\n _data += \"\\n\"\n options = QFileDialog.Options()\n saved_file, _ = QFileDialog.getSaveFileName(self, \"Save Table to file \", \"data\", \"Plain Text (*.txt);;CSV (*.csv);;All Files (*)\", options=options)\n _file = open(saved_file, 'w')\n _file.write(_data)\n _file.close()\n except FileNotFoundError:pass", "def save_entries(self):\n with open(self.file_name, \"w\") as file:\n file.write('date,name,minutes,note\\n')\n for entry in self.entries:\n writer = csv.writer(file)\n writer.writerow([entry.date, entry.name, entry.minutes, entry.note])", "def export_file_dto(self, active_model, objs=[], type=''):\n dto_parser = DtoParser()\n objs2 = []\n for obj in objs:\n objs2 += dto_parser.parseJointPromotion(obj)\n\n doc_type_obj = self.env[\"edi.doc.type\"]\n doc_obj = self.env[\"edi.doc\"]\n doc_type = doc_type_obj.search([(\"code\", '=', \"dto\")])[0]\n last_dto_file = doc_obj.search([(\"doc_type\", '=', doc_type.id)],\n order=\"date desc\", limit=1)\n if last_dto_file:\n count = last_dto_file.count + 1\n else:\n count = 1\n\n tmp_name = \"export_dto.txt\"\n file_len = len(objs2)\n filename = \"%sDTO%s.%s\" % (self.env.user.company_id.frigo_code,\n str(file_len).zfill(4),\n str(count).zfill(4))\n templates_path = self.addons_path('frigo_edi') + os.sep + 'wizard' + \\\n os.sep + 'templates' + os.sep\n mylookup = TemplateLookup(input_encoding='utf-8',\n output_encoding='utf-8',\n encoding_errors='replace')\n tmp = Template(filename=templates_path + tmp_name,\n lookup=mylookup, default_filters=['decode.utf8'])\n\n doc = tmp.render_unicode(o=objs2, type_=type, datetime=datetime,\n user=self.env.user).encode('utf-8', 'replace')\n file_name = self[0].service_id.output_path + os.sep + filename\n f = file(file_name, 'w')\n f.write(doc)\n f.close()\n file_obj = self.create_doc(filename, file_name, doc_type)\n file_obj.count = count", "def get_serialized_data(self):\n results = []\n fields = self.get_model_obj()._meta.fields\n\n user_dict = {}\n object_dict = {}\n related_dict = {}\n\n object_list = self.get_queryset().distinct().values()\n\n href = u'<a href=\"{}\">{}</a>'\n type_choices = (('+', u'Created'), ('~', u'Changed'), ('-', u'Deleted'))\n for index, item in enumerate(object_list):\n data = {\"DT_RowId\": item.get('history_id'), 0: '',\n 1: '', 2: '', 3: '-', 4: '-', 5: '-', 6: '-'}\n\n # 0 - Date\n # 1 - Who\n # 2 - Object (if not specific)\n # 3 - Action\n # 4 - Field\n # 5 - Previous\n # 6 - Current\n data[0] = item.get('history_date').strftime(\"%m/%d/%y %H:%M\")\n\n try:\n data[1] = user_dict[item.get('history_user_id')]\n except KeyError:\n link = u''\n try:\n user = User.objects.get(id=item.get('history_user_id'))\n link = href.format(user.profile.get_absolute_url(),user.get_full_name())\n except ObjectDoesNotExist:\n link = u\"Administrator*\"\n user_dict[item.get('history_user_id')] = link\n data[1] = user_dict[item.get('history_user_id')]\n\n try:\n data[2] = object_dict[item.get('id')]\n except KeyError:\n link, data_obj = u'', u''\n try:\n data_obj = self.get_object(id=item.get('id'))\n name = data_obj.__unicode__()\n name = name if len(name) < 32 else name[0:32] + u\" ...\"\n link = href.format(data_obj.get_absolute_url(), name)\n except ObjectDoesNotExist:\n link = \"Deleted\"\n except AttributeError:\n link = data_obj.__unicode__()\n object_dict[item.get('id')] = link\n data[2] = object_dict[item.get('id')]\n\n data[3] = next((y for x, y in type_choices if x == item.get('history_type')), u\"-\")\n\n\n try:\n previous_item = object_list[index - 1]\n except AssertionError:\n previous_item = {}\n changed_fields, prev_values, cur_values = [], [], []\n for field in fields:\n if field.name == \"modified_date\":\n continue\n prev_value = previous_item.get(field.name, u\"-\")\n prev_value = prev_value if prev_value else u\"-\"\n curr_value = item.get(field.name, u\"-\")\n curr_value = curr_value if curr_value else u\"-\"\n\n # Handle nice choices keys\n if hasattr(field, '_choices') and len(field._choices) and curr_value != u'-':\n curr_value = next((x[1] for x in field._choices if str(x[0]) == str(curr_value)))\n # Handle foreign keys.\n elif hasattr(field, 'related') and curr_value != u'-':\n try:\n curr_value = related_dict[(field.name, curr_value)]\n # log.debug(\"Related (current) Dict - Query Saved {} = {}\".format(field.name, curr_value))\n except KeyError:\n _v = u'-'\n try:\n _v = field.related.parent_model.objects.get(id=curr_value).__unicode__()\n except ObjectDoesNotExist:\n _v = u'Deleted'\n # log.debug(\"Setting C Related ({}, {}) = {}\".format(field.name, curr_value,_v))\n related_dict[(field.name, curr_value)] = _v\n curr_value = related_dict[(field.name, curr_value)]\n\n if hasattr(field, '_choices') and len(field._choices) and prev_value != u'-':\n prev_value = next((x[1] for x in field._choices if str(x[0]) == str(prev_value)))\n\n elif hasattr(field, 'related') and prev_value != u'-':\n try:\n prev_value = related_dict[(field.name, prev_value)]\n # log.debug(\"Related (prev) Dict - Query Saved {} = {}\".format(field.name, prev_value))\n except KeyError:\n _v = u'-'\n try:\n _v = field.related.parent_model.objects.get(id=prev_value).__unicode__()\n except ObjectDoesNotExist:\n _v = u'Deleted'\n # log.debug(\"Setting P Related ({}, {}) = {}\".format(field.name, prev_value,_v))\n related_dict[(field.name, prev_value)] = _v\n prev_value = related_dict[(field.name, prev_value)]\n\n if prev_value != curr_value:\n changed_fields.append(field.name)\n prev_values.append(prev_value)\n cur_values.append(curr_value)\n if len(changed_fields):\n data[4] = u\"<br />\".join([unicode(x) for x in changed_fields])\n data[5] = u\"<br />\".join([unicode(x) for x in prev_values])\n data[6] = u\"<br />\".join([unicode(x) for x in cur_values])\n results.append(data)\n\n results.reverse()\n # log.debug(pformat(results))\n return results", "def saved_results(request):\n timestamps = []\n for i in Source.objects.filter(user=request.user):\n timestamps.append({'id':i.source_id, 'val':i.datetime_extracted.strftime('%d/%m/%Y %H:%M') + \" \" + i.source})\n form = DeleteRelsCSVForm()\n return render(request, 'saved_results.html', {'timestamps':timestamps, 'form':form})", "def write_date(self, daty):\r\n for elem in range(len(self.output_zakladki)):\r\n first_row, first_col, no_date, cegla_no = 2, 1, 0, 0\r\n\r\n sheet = self.output_zakladki[elem]\r\n sh = self.output_file.get_sheet_by_name(sheet)\r\n\r\n for no in range(self.liczba_pelnych_linii): # writing data in complete row, containing 5 cegla\r\n for i in range(15):\r\n sh[self.alfabet[first_col] + str(first_row)] = daty[no_date]\r\n first_col += 1\r\n no_date += 1\r\n if no_date == 3:\r\n no_date = 0\r\n cegla_no += 5\r\n first_col = 1\r\n first_row += len(self.output_leki[elem]) + 2\r\n\r\n # writing data in incomplete row, containing < 5 cegla\r\n mising_date = (len(self.output_lista_cegiel) - cegla_no) * 3\r\n for i in range(mising_date):\r\n sh[self.alfabet[first_col] + str(first_row)] = daty[no_date]\r\n first_col += 1\r\n no_date += 1\r\n if no_date == 3:\r\n no_date = 0", "def export_as_csv(modeladmin, request, queryset):\n opts = modeladmin.model._meta\n field_names = [field.name for field in opts.fields]\n labels = []\n\n if exclude:\n field_names = [f for f in field_names if f not in exclude]\n\n elif fields:\n field_names = [field for field, _ in fields]\n labels = [label for _, label in fields]\n\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s.csv' % (\n str(opts).replace('.', '_')\n )\n\n writer = csv.writer(response)\n\n if header:\n writer.writerow(labels if labels else field_names)\n\n for obj in queryset:\n writer.writerow([prep_field(request, obj, field, manyToManySep) for field in field_names])\n return response", "def export_query():\n\tdata = frappe._dict(frappe.local.form_dict)\n\n\tdel data[\"cmd\"]\n\tif \"csrf_token\" in data:\n\t\tdel data[\"csrf_token\"]\n\n\tif isinstance(data.get(\"filters\"), string_types):\n\t\tfilters = json.loads(data[\"filters\"])\n\tif isinstance(data.get(\"report_name\"), string_types):\n\t\treport_name = data[\"report_name\"]\n\t\tfrappe.permissions.can_export(\n\t\t\tfrappe.get_cached_value('Report', report_name, 'ref_doctype'),\n\t\t\traise_exception=True\n\t\t)\n\tif isinstance(data.get(\"file_format_type\"), string_types):\n\t\tfile_format_type = data[\"file_format_type\"]\n\t\n\tif isinstance(data.get(\"visible_idx\"), string_types):\n\t\tvisible_idx = json.loads(data.get(\"visible_idx\"))\n\telse:\n\t\tvisible_idx = None\n\t\n\t# add filter this customer\n\tparty = get_party()\n\tfilters[\"customer\"] = party.name or \"\"\n\n\tif file_format_type == \"Excel\":\n\t\tdata = run(report_name, filters)\n\t\tdata = frappe._dict(data)\n\t\tcolumns = get_columns_dict(data.columns)\n\n\t\tfrom frappe.utils.xlsxutils import make_xlsx\n\t\txlsx_data = build_xlsx_data(columns, data)\n\t\t\n\t\txlsx_file = make_xlsx(xlsx_data, \"Query Report\")\n\n\t\tfrappe.response['filename'] = report_name + '.xlsx'\n\t\tfrappe.response['filecontent'] = xlsx_file.getvalue()\n\t\tfrappe.response['type'] = 'binary'", "def test_fetchall(self):\n result = export.processExport(houseId=1)\n #We should have 2 locations * 1 sensor * 10 days of data here\n # 2 * 1 * (288 * 10) == 5670\n #print result.shape\n\n #result.to_csv(\"temp.csv\")\n #Do we get the right object\n self.assertEqual(type(result), pandas.DataFrame)\n #And is it the right size\n self.assertEqual(result.shape, (2880, 2)) #So 2880 samples from two sensors\n #And the right range of data\n self.assertEqual(result.index[0], datetime.datetime(2013, 01, 01))\n self.assertEqual(result.index[-1], datetime.datetime(2013, 01, 10, 23, 55))", "def create_csv(self, type): \n if os.path.isfile(_path_finder('keydata','{0}_{1}.db'.format(\n self.keyword,type))):\n self.__db_init('{0}'.format(type))\n self.c.execute(\"SELECT MIN(date) FROM tweets\")\n mindate = self.c.fetchone()[0][0:10]\n self.c.execute(\"SELECT MAX(date) FROM tweets\")\n maxdate = self.c.fetchone()[0][0:10]\n start_date = datetime.datetime.strptime(mindate, '%Y-%m-%d')\n end_date = (datetime.datetime.strptime(maxdate, '%Y-%m-%d') + \n datetime.timedelta(days=1))\n \n def __date_range(start, end):\n for n in range((end - start).days):\n yield start + datetime.timedelta(days=n)\n \n def __db_to_list():\n for single_date in __date_range(start_date, end_date):\n d = \"\".join(['%',single_date.strftime(\"%Y-%m-%d\"),'%'])\n self.c.execute('''SELECT count(*) FROM tweets where \n date like('{0}')'''.format(d))\n yield [d[1:11], self.c.fetchone()[0]]\n \n path = _path_finder('keydata','{0}_{1}.csv'.format(\n self.keyword,type))\n if sys.version_info[0] < 3: #Python3 compatibility check\n infile = open(path, 'wb')\n else:\n infile = open(path, 'w', newline='', encoding='utf8')\n with infile as f:\n writer = csv.writer(f)\n writer.writerows(__db_to_list())\n self.conn.commit()\n self.conn.close()\n print('\\nReport has been created:')\n print(os.path.abspath(path))", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_auth_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export_repayment_xlsx(request):\n import openpyxl\n try:\n from openpyxl.cell import get_column_letter\n except ImportError:\n from openpyxl.utils import get_column_letter\n\n from_date = request.GET.get('from_date')\n to_date = request.GET.get('to_date')\n search = request.GET.get('search_value') or ''\n search_query = Q()\n if search:\n search_query = Q(user__user__username__icontains=search) | \\\n Q(user__user__first_name__icontains=search) | \\\n Q(project__title__icontains=search) | \\\n Q(amount__icontains=search) | \\\n Q(user__user__last_name__icontains=search) | \\\n Q(user__user__email__icontains=search)\n if from_date and to_date:\n import datetime\n import pytz\n date1 = datetime.datetime.strptime(from_date, '%Y-%m-%d').date()\n date2 = datetime.datetime.strptime(to_date, '%Y-%m-%d').date()\n repayments = RepaymentFragment.objects.filter(amount__gt=0.00,\n created_at__range=[\n datetime.datetime(date1.year, date1.month, date1.day, 8, 15,\n 12, 0, pytz.UTC),\n datetime.datetime(date2.year, date2.month, date2.day, 8, 15,\n 12, 0, pytz.UTC)]).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"user__user\").filter(search_query).iterator()\n else:\n repayments = RepaymentFragment.objects.filter(amount__gt=0.00).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"user__user\").filter(search_query).iterator()\n response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n response['Content-Disposition'] = 'attachment; filename=Repayment_report.xlsx'\n wb = openpyxl.Workbook()\n ws = wb.get_active_sheet()\n ws.title = \"RE-volv\"\n\n row_num = 0\n\n columns = [\n (u\"FIRST NAME\", 30),\n (u\"LAST NAME\", 30),\n (u\"USERNAME\", 30),\n (u\"EMAIL\", 30),\n (u\"DATE\", 30),\n (u\"NAME OF PROJECT\", 30),\n (u\"DONATION AMOUNT\", 30),\n (u\"REPAYMENT AMOUNT\", 30),\n ]\n\n for col_num in xrange(len(columns)):\n c = ws.cell(row=row_num + 1, column=col_num + 1)\n c.value = columns[col_num][0]\n ws.column_dimensions[get_column_letter(col_num + 1)].width = columns[col_num][1]\n\n for payment in repayments:\n row_num += 1\n row = [\n payment.user.user.first_name,\n payment.user.user.last_name,\n payment.user.user.username,\n payment.user.user.email,\n payment.created_at,\n payment.project.title,\n round(Payment.objects.filter(user=payment.user).filter(project=payment.project).aggregate(Sum('amount'))[\n 'amount__sum'] or 0, 2),\n round(payment.amount, 2)\n\n ]\n for col_num in xrange(len(row)):\n c = ws.cell(row=row_num + 1, column=col_num + 1)\n c.value = row[col_num]\n wb.save(response)\n return response", "def get_export_queryset(self, request, context):\n # scope = self.request.POST.get('_select_across', False) == '1'\n scope = request.GET.get('scope')\n select_across = request.GET.get('_select_across', False) == '1'\n selected = request.GET.get('_selected_actions', '')\n if scope == 'all':\n queryset = self.admin_view.queryset()\n elif scope == 'header_only':\n queryset = []\n elif scope == 'selected':\n if not select_across:\n selected_pk = selected.split(',')\n queryset = self.admin_view.queryset().filter(pk__in=selected_pk)\n else:\n queryset = self.admin_view.queryset()\n else:\n queryset = [r['object'] for r in context['results']]\n return queryset", "def get_obj_df():\n obj_df = pd.read_csv('data/object.csv')\n obj_df = obj_df.drop_duplicates()[['course_id', 'module_id', 'category', 'start']] \n obj_df['start'] = pd.to_datetime(obj_df[obj_df['start'] != 'null']['start'])\n return obj_df", "def print_stock_rotation_report(self):\n warehouses = False\n locations = False\n from_date = False\n to_date = False\n active_id = self.ids[0]\n today=datetime.now().strftime(\"%Y-%m-%d\")\n f_name = 'Stock Rotation Report' + ' ' + today\n stock_warehouse_obj = self.env['stock.warehouse']\n stock_locations_obj = self.env['stock.location']\n product_obj = self.env['product.product']\n \n if self.filtaration == 'warehouse':\n if not self.include_all_warehouse:\n if not self.warehouse_ids:\n raise ValidationError(\"please select the Warehouse.\")\n warehouses = self.warehouse_ids\n else:\n warehouses = stock_warehouse_obj.search([])\n else:\n if not self.include_all_location:\n if not self.location_ids:\n raise ValidationError(\"please select the Locations.\")\n locations = self.location_ids\n else:\n locations = stock_locations_obj.search([('usage','=','internal')])\n\n\n if not self.from_date:\n raise ValidationError(\"please select the From Date.\")\n \n if not self.to_date:\n raise ValidationError(\"please select the To Date.\")\n\n all_products = product_obj.with_context(active_test=True).search([('type','=','product')])\n from_date = self.from_date\n to_date = self.to_date\n \n date_1 = time.strptime(from_date, \"%Y-%m-%d\")\n date_2 = time.strptime(to_date, \"%Y-%m-%d\")\n if not (date_1 <= date_2):\n raise ValidationError(\"Fromdate is not previous then Todate\")\n self.get_stock_rotation_report(from_date,to_date,warehouses,locations,all_products)\n if self.datas:\n return {\n 'type' : 'ir.actions.act_url',\n 'url':'web/content/?model=stock.rotation.report&download=true&field=datas&id=%s&filename=%s.xls'%(active_id,f_name),\n 'target': 'new',\n }", "def filter_meteo_data(self, startdate, enddate):\n self.all_meteo_data.columns.values[0]='Datum-tijd'\n self.all_meteo_data['datetime']=pd.to_datetime(self.all_meteo_data['Datum-tijd'], format='%Y-%m-%dT%H:%M:%SZ')\n self.all_meteo_data.drop(['Datum-tijd'],axis=1, inplace=True)\n mask = (self.all_meteo_data['datetime'] > startdate) & (self.all_meteo_data['datetime'] <= enddate)\n meteodata = self.all_meteo_data.loc[mask].copy()\n meteodata.set_index('datetime',inplace=True)\n return meteodata", "def _get_report_data(self, request, queryset):\n first_item = queryset[0]\n data = {\n 'id': str(slugify(first_item.invoice_no)),\n 'property_of_id': (\n first_item.property_of.id\n if first_item.property_of else None\n ),\n 'model': queryset.model._meta.model_name,\n 'base_info': {\n 'invoice_no': first_item.invoice_no,\n 'invoice_date': first_item.invoice_date,\n 'provider': first_item.provider,\n 'datetime': datetime.datetime.now().strftime(\n self._invoice_report_datetime_format\n ),\n },\n 'items': list(map(self._parse_item, queryset)),\n 'sum_price': str(\n queryset.aggregate(\n Sum(self._price_field)\n ).get('{}__sum'.format(self._price_field))\n )\n }\n logger.info('Invoice report data: {}'.format(data))\n return data", "def raw_csv_sys_2w(request):\n two_weeks = datetime.date.today() - datetime.timedelta(days=14)\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'atachment; filename = \"raw-powerbi-sys-2w.csv\"'\n sys_er = System_error.objects.filter(event_date__gt=two_weeks)\n sys_w = System_warning.objects.filter(event_date__gt=two_weeks)\n sys_crit = System_critical.objects.filter(event_date__gt=two_weeks)\n writer = csv.writer(response)\n for line in sys_er:\n writer.writerow([line.event_id, line.event_source, line.event_description,\n line.machine_name, line.events_count, line.event_user, line.event_date, 'system error'])\n for line in sys_w:\n writer.writerow([line.event_id, line.event_source, line.event_description,\n line.machine_name, line.events_count, line.event_user, line.event_date, 'system warning'])\n for line in sys_crit:\n writer.writerow([line.event_id, line.event_source, line.event_description,\n line.machine_name, line.events_count, line.event_user, line.event_date, 'system critical'])\n \n return response", "def download_queryset(self, queryset, export_format):\n\n dataset = LocationResource().export(queryset=queryset)\n filedata = dataset.export(export_format)\n filename = f\"InvenTree_Locations.{export_format}\"\n\n return DownloadFile(filedata, filename)", "def export_gegevens(te_doorzoeken_kolom, is_gelijk_aan, exporteer_gegevens, bestandsnaam):\n empt_lists = []\n # array met lege lijsten maken\n for n in range(len(exporteer_gegevens)):\n empt_lists.append([])\n\n # gegevens aan de gemaakte array toevoegen in de betreffende lijsten\n for i in range(len(df)):\n if df[te_doorzoeken_kolom].iloc[i] == is_gelijk_aan:\n for j in range(len(exporteer_gegevens)):\n empt_lists[j].append(df[exporteer_gegevens[j]].iloc[i])\n\n df_angfo = pd.DataFrame(empt_lists[0], columns=[exporteer_gegevens[0]])\n\n # geheel omzetten naar een pandas dataframe\n for k in range(len(exporteer_gegevens)):\n df_angfo[exporteer_gegevens[k]] = pd.Series(empt_lists[k], index=df_angfo.index)\n\n df_angfo.to_excel(bestandsnaam, 'Sheet1')\n print('Het bestand is opgeslagen als: %s' % bestandsnaam)", "def _get_date(self):\n for fax_out in self:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n date_planned = False\n if fax_out.date:\n from_dt = datetime.datetime.strptime(str(fax_out.date[:19]), DATETIME_FORMAT)\n # from_dt = from_dt + datetime.timedelta(hours=5 , minutes=30)\n date_planned = from_dt.strftime('%Y-%m-%d')\n fax_out.fax_date = date_planned", "def _get_data_from_view(self):\n self.log.info(\"Getting data from view: vw_AllSurveyData \")\n view_data = self.db.execute_pandas_query(self._get_query('vw_survey_data'))\n self._export_data_to_csv(view_data, 'fresh_survey_data.csv')", "def _setData(self):\n data_list = []\n results = self.query.all()\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n time_formatter = date.getLocaleFormatter(self.request, \"time\", \"short\")\n for result in results:\n data = {}\n data[\"subject\"] = result.short_name\n # this tab appears in the workspace pi/ view...\n data[\"url\"] = url.set_url_context(\"../calendar/sittings/obj-%i/schedule\" %\n result.sitting_id)\n # Note: same UI is also displayed at: \n # /business/sittings/obj-%i/schedule % result.sitting_id\n data[\"items\"] = \"\"\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"\"\n data[\"type\"] = result.group.type\n data[\"group\"] = u\"%s %s\" % (\n result.group.type.capitalize(), result.group.short_name)\n data[\"time_from_to\"] = (\n time_formatter.format(result.start_date),\n time_formatter.format(result.end_date))\n data[\"date\"] = formatter.format(result.start_date) \n if result.venue:\n data[\"venue\"] = _(result.venue.short_name)\n else:\n date[\"venue\"] = \"\"\n if type(result)==domain.Question:\n data[\"to\"] = result.ministry.short_name\n else:\n data[\"to\"]= \"\"\n # past, present, future\n today = datetime.datetime.today().date()\n startday = result.start_date.date()\n if today==startday:\n data[\"css_class\"] = \"present\"\n elif today>startday:\n data[\"css_class\"] = \"past\"\n else:\n data[\"css_class\"] = \"future\"\n data_list.append(data)\n self._data = data_list", "def get_queryset(self):\n queryset = models.Drukteindex.objects.all().order_by(\"index\")\n\n vollcode = self.request.query_params.get('vollcode', None)\n if vollcode is not None:\n queryset = queryset.filter(vollcode=vollcode)\n\n timestamp_str = self.request.query_params.get('timestamp', None)\n\n if timestamp_str is None:\n timestamp_dt = datetime.now()\n\n if timestamp_str is not None:\n timestamp_dt = convert_to_date(timestamp_str)\n\n if timestamp_dt > convert_to_date('07-12-2017-00-00-00'):\n current_day = timestamp_dt.strftime(\"%A\")\n if current_day == 'Friday':\n timestamp_dt = '02-12-2017-23-00-00'\n elif current_day == 'Saturday':\n timestamp_dt = '01-12-2017-23-00-00'\n elif current_day == 'Sunday':\n timestamp_dt = '03-12-2017-23-00-00'\n elif current_day == 'Monday':\n timestamp_dt = '04-12-2017-23-00-00'\n elif current_day == 'Tuesday':\n timestamp_dt = '05-12-2017-23-00-00'\n elif current_day == 'Wednesday':\n timestamp_dt = '06-12-2017-23-00-00'\n elif current_day == 'Thursday':\n timestamp_dt = '07-12-2017-23-00-00'\n timestamp_dt = convert_to_date(timestamp_dt)\n\n today = timestamp_dt.date()\n\n level = self.request.query_params.get('level', None)\n if level == 'day':\n queryset = queryset.filter(timestamp__date=today)\n exclude = ('weekday',)\n\n if level == 'week':\n yesterday = today - timedelta(days=1)\n previous_week = yesterday - timedelta(days=7)\n queryset = queryset.filter(\n timestamp__gte=previous_week, timestamp__lt=yesterday)\n current_hour = timestamp_dt.hour\n queryset = queryset.filter(timestamp__hour=current_hour)\n\n return queryset", "def _setData(self):\n data_list = []\n results = self.query.all()\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n for result in results:\n data ={}\n data[\"qid\"]= (\"m_\" + str(result.motion_id))\n data[\"subject\"] = u\"M \" + str(result.motion_number) + u\" \" + result.short_name\n data[\"title\"] = result.short_name\n if result.approval_date:\n data[\"result_item_class\"] = (\"workflow-state-\" + \n result.status + \"sc-after-\" + \n datetime.date.strftime(result.approval_date, \"%Y-%m-%d\"))\n else:\n data[\"result_item_class\"] = \"workflow-state-\" + result.status\n data[\"url\"] = url.set_url_context(\"motions/obj-\" + str(result.motion_id))\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"%s %s\" %(result.owner.first_name, result.owner.last_name)\n data[\"type\"] = _(result.type)\n data[\"to\"] = \"\"\n data_list.append(data)\n self._data = data_list", "def print_date_in_sheet(self,data_dict,workbook,sheet_data,row_data,body_style,qty_cell_style,value_style,blank_cell_style,value_style2):\n product_data_dict = collections.OrderedDict()\n row=4\n column=0\n for warehouse_id,data_details in data_dict.iteritems():\n for product_data in data_details:\n row=row_data[sheet_data[warehouse_id]]\n sheet_data[warehouse_id].row(row).height = 350\n opening_stock = product_data.get('opening_qty') or 0\n qty_purchase = product_data.get('qty_purchase_in_duration') or 0\n qty_sale = product_data.get('qty_sales_in_duration') or 0\n scap_qty = product_data.get('scrap_location_qty') or 0\n adj_qty = product_data.get('adjusted_qty_in_duration') or 0\n last_sales = product_data.get('last_sales') or ''\n last_purchase_date = product_data.get('last_purchase_date') or ''\n warehouse_in_qty = product_data.get('warehouse_in_qty') or 0\n warehouse_out_qty = product_data.get('warehouse_out_qty') or 0\n closing_qty = (opening_stock+qty_purchase+warehouse_in_qty)-(qty_sale+scap_qty+warehouse_out_qty)\n sheet_data[warehouse_id].write(row,column,product_data.get('sku'),body_style)\n sheet_data[warehouse_id].write(row,column+1,product_data.get('name') or '-',body_style)\n sheet_data[warehouse_id].write(row,column+2,product_data.get('Cost') or 0,qty_cell_style)\n sheet_data[warehouse_id].write(row,column+3,product_data.get('sales_price') or 0,qty_cell_style)\n sheet_data[warehouse_id].write(row,column+4,None,blank_cell_style)\n sheet_data[warehouse_id].write(row,column+5,opening_stock,value_style2)\n sheet_data[warehouse_id].write(row,column+6,qty_purchase,value_style2)\n sheet_data[warehouse_id].write(row,column+7,qty_sale,value_style2)\n sheet_data[warehouse_id].write(row,column+8,scap_qty,value_style2)\n sheet_data[warehouse_id].write(row,column+9,adj_qty,value_style2)\n sheet_data[warehouse_id].write(row,column+10,closing_qty,value_style2)\n sheet_data[warehouse_id].write(row,column+11,None,blank_cell_style)\n sheet_data[warehouse_id].write(row,column+12,warehouse_in_qty,value_style)\n sheet_data[warehouse_id].write(row,column+13,warehouse_out_qty,value_style)\n sheet_data[warehouse_id].write(row,column+14,None,blank_cell_style)\n sheet_data[warehouse_id].write(row,column+15,last_purchase_date,body_style)\n sheet_data[warehouse_id].write(row,column+16,last_sales,body_style)\n row+=1\n row_data.update({sheet_data[warehouse_id]: row})\n product_data_dict = self.prepare_date_for_all_warehouses_sheets(product_data.get('product'),product_data_dict,opening_stock,last_sales,last_purchase_date,qty_purchase,qty_sale,scap_qty,adj_qty,warehouse_in_qty,warehouse_out_qty)\n return product_data_dict", "def species_points(request, format='csv'):\n \n \n species = request.GET.get('species')\n if species:\n records = ( SpeciesPoints.objects\n .filter(valid_species_name=species)\n .filter(lon__isnull=False)\n .filter(lat__isnull=False) )\n \n \n if request.GET.get('lon'):\n records = records.filter(lon=request.GET.get('lon'))\n \n if request.GET.get('lat'):\n records = records.filter(lat=request.GET.get('lat'))\n \n if request.GET.get('max_lat'):\n records = records.filter(lat__lte=request.GET.get('max_lat'))\n \n if request.GET.get('max_lon'):\n records = records.filter(lon__lte=request.GET.get('max_lon'))\n \n if request.GET.get('min_lat'):\n records = records.filter(lat__gte=request.GET.get('min_lat'))\n \n if request.GET.get('min_lon'):\n records = records.filter(lon__gte=request.GET.get('min_lon'))\n \n if request.GET.get('bentity_id'):\n records = records.filter(bentity_id=request.GET.get('bentity_id')) \n \n \n \n # fetch all the bentitites at once, so we don't have to hit the database once for each record\n records = records.prefetch_related('bentity') \n \n # serialize to JSON\n export_objects = [{\n 'gabi_acc_number': r.gabi_acc_number,\n 'species': species,\n 'lat': r.lat,\n 'lon': r.lon,\n 'status':r.status,\n 'bentity_id': r.bentity_id,\n 'bentity_name': r.bentity.bentity,\n 'num_records': r.num_records,\n 'literature_count': r.literature_count,\n 'museum_count': r.museum_count,\n 'database_count': r.database_count,\n } for r in records]\n \n \n if format == 'csv':\n return CSVResponse(\n export_objects,\n fields=('species', 'lat', 'lon', 'bentity_id', 'bentity_name', 'status', 'num_records', 'literature_count', 'museum_count', 'database_count') )\n \n else: \n return JSONResponse({'records': export_objects})\n \n else: # punt if the request doesn't have a species\n return errorResponse(\"Please supply a 'species' argument.\", format, {'records':[]})", "def export_vrouwen():\n voornaam, tussenvoegsel, achternaam, straat, huisnummer, postcode, woonplaats = [], [], [], [], [], [], []\n for i in range(len(df)):\n if df['Vrouw'].iloc[i]:\n voornaam.append(df['voornaam'].iloc[i])\n tussenvoegsel.append(df['tussenvoegsel'].iloc[i])\n achternaam.append(df['achternaam'].iloc[i])\n straat.append(df['straat'].iloc[i])\n huisnummer.append(df['huisnummer'].iloc[i])\n postcode.append(df['postcode'].iloc[i])\n woonplaats.append(df['woonplaats'].iloc[i])\n\n df_angfo = pd.DataFrame(voornaam, columns=['voornaam'])\n df_angfo['tussenvoegsel'] = pd.Series(tussenvoegsel, index=df_angfo.index)\n df_angfo['achternaam'] = pd.Series(achternaam, index=df_angfo.index)\n df_angfo['straat'] = pd.Series(straat, index=df_angfo.index)\n df_angfo['huisnummer'] = pd.Series(huisnummer, index=df_angfo.index)\n df_angfo['postcode'] = pd.Series(postcode, index=df_angfo.index)\n df_angfo['woonplaats'] = pd.Series(woonplaats, index=df_angfo.index)\n df_angfo.to_excel('output\\\\vrouwen_leden.xlsx', 'vrouwen_leden')", "def export_scholars(modeladmin, request, queryset):\r\n response = HttpResponse('', content_type='text/csv; charset=utf-8')\r\n response['Content-Disposition'] = 'attachment; filename=cos.csv'\r\n writer = csv.writer(response)\r\n writer.writerow([\r\n 'Title',\r\n 'Reviewer',\r\n 'Leader',\r\n 'Leader Email',\r\n 'Sponsor',\r\n 'Other Sponsor',\r\n 'Presenters',\r\n 'Funding Source',\r\n 'Work Type',\r\n 'Permission to Reproduce',\r\n 'Faculty Sponsor Approval',\r\n 'Table',\r\n 'Electricity',\r\n 'Link',\r\n 'Poster',\r\n 'Date created',\r\n ])\r\n for presentation in queryset:\r\n link = 'http://{0}{1}'.format(\r\n settings.SERVER_URL,\r\n presentation.get_absolute_url(),\r\n )\r\n poster = 'http://{0}/assets/{1}'.format(\r\n settings.SERVER_URL, presentation.poster_file,\r\n )\r\n try:\r\n leader = '{0}, {1}'.format(\r\n presentation.leader.last_name,\r\n presentation.leader.first_name,\r\n )\r\n except Exception:\r\n leader = ''\r\n presenters = ''\r\n for presenter in presentation.presenters.all():\r\n if not presenter.leader:\r\n presenters += '{0}, {1}|'.format(\r\n presenter.last_name, presenter.first_name,\r\n )\r\n title = smart_str(\r\n presentation.title,\r\n encoding='utf-8',\r\n strings_only=False,\r\n errors='strict',\r\n )\r\n funding = smart_str(\r\n presentation.funding,\r\n encoding='utf-8',\r\n strings_only=False,\r\n errors='strict',\r\n )\r\n work_type = smart_str(\r\n presentation.work_type,\r\n encoding='utf-8',\r\n strings_only=False,\r\n errors='strict',\r\n )\r\n sponsor_email = ''\r\n if presentation.leader:\r\n sponsor_email = presentation.leader.sponsor_email\r\n sponsor_other = presentation.leader.sponsor_other\r\n writer.writerow([\r\n title,\r\n presentation.reviewer,\r\n leader,\r\n presentation.user.email,\r\n sponsor_email,\r\n sponsor_other,\r\n presenters[:-1],\r\n funding,\r\n work_type,\r\n presentation.permission,\r\n presentation.shared,\r\n presentation.need_table,\r\n presentation.need_electricity,\r\n link,poster,\r\n presentation.date_created,\r\n ])\r\n return response", "def download_excel(restaurant_id):\n raw_data = get_menu_items_based_on_restaurant(restaurant_id=restaurant_id)\n csv_file_path = \"{}/file.csv\".format(settings.BASE_DIR)\n static_form = ['name', 'description', 'price', 'category', 'sub_category']\n with open(csv_file_path, 'w', newline='') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=static_form)\n writer.writeheader()\n writer.writerows(raw_data['itemsList'])\n csv_file.close()\n return csv_file_path", "def order_report():", "def general_export(request):\n export_fields = OrderedDict(GENERAL_EXPORT_FIELDS)\n export = OpenXMLExport('Exportation')\n export.write_line(export_fields.keys(), bold=True) # Headers\n # Data\n query_keys = [f for f in export_fields.values() if f is not None]\n query = Student.objects.filter(archived=False).order_by('klass__name', 'last_name', 'first_name')\n for line in query.values(*query_keys):\n values = []\n for field in query_keys:\n if field == 'gender':\n values.append(('Madame', 'Monsieur')[line[field] == 'M'])\n elif field in ('dispense_ecg', 'dispense_eps', 'soutien_dys'):\n values.append('Oui' if line[field] is True else '')\n else:\n values.append(line[field])\n export.write_line(values)\n\n return export.get_http_response('general_export')", "def DownloadRingtoneDataSince(request, since):\r\n response = HttpResponse(mimetype='text/csv')\r\n response['Content-Disposition'] = 'attachment; filename=ringtones.csv'\r\n\r\n writer = csv.DictWriter(response, models.Ringtone.CSV_FILEDS)\r\n # Hack. Write the header first.\r\n d = {}\r\n for k in models.Ringtone.CSV_FILEDS:\r\n d[k] = k\r\n writer.writerow(d)\r\n if since:\r\n query = models.Ringtone.all().filter('creation_time >= ',\r\n datetime.datetime.strptime(since, \"%Y-%m-%dT%H:%M:%S.%fZ\"))\r\n else:\r\n query = models.Ringtone.all()\r\n for r in query:\r\n writer.writerow(r.DumpToCSVRow())\r\n return response", "def export_data(self):\r\n stocks = {}\r\n headings = ['Security', 'Price', 'Change', 'Change %', '52 Week', 'Market Cap']\r\n\r\n for data in range(6):\r\n for items in self.root.main.treeview.get_children():\r\n values = self.root.main.treeview.item(items, 'values')\r\n if headings[data] not in stocks:\r\n stocks[headings[data]] = []\r\n stocks.get(headings[data]).append(values[data])\r\n\r\n df = pd.DataFrame(stocks, columns=headings)\r\n path = tk.filedialog.asksaveasfilename(title='Save File As...',\r\n filetypes=((\"CComma-separated values (.csv)\", \"*.csv\"), (\"Text Document(.txt)\", \"*.txt\")))\r\n\r\n if not path:\r\n return\r\n else:\r\n df.to_excel(path, index=False, header=True)", "def generateCsvData(self, context, obj, entity):\n raise NotImplementedError()", "def _getdata(self, data):\n lines = []\n start_date = str(data['form']['start_date'])\n end_date = str(data['form']['end_date'])\n department_ids = data['form']['department_ids']\n\n vehicles_ids = self.pool.get('fleet.vehicle').search(self.cr, self.uid,\\\n [('department_id', 'in', department_ids)], context=self.context)\n\n fuel_qty_line_obj = self.pool.get('fuel.qty.line')\n\n sdate = datetime.strptime(start_date, \"%Y-%m-%d\")\n syear = sdate.year\n smonth = sdate.month\n edate = datetime.strptime(end_date, \"%Y-%m-%d\")\n eyear = edate.year\n emonth = edate.month\n\n fuel_qty_line_ids = fuel_qty_line_obj.search(self.cr, self.uid,\\\n [('vehicles_id', 'in', vehicles_ids)], context=self.context)\n\n\n\n counter = 1\n for qty_line in fuel_qty_line_obj.browse(self.cr, self.uid, \\\n fuel_qty_line_ids, context=self.context):\n current_m = int(qty_line.month)\n current_y = int(qty_line.year)\n start = current_m >= smonth and current_y >= syear\n end = current_m <= emonth and current_y <= eyear\n if start and end:\n line = {'type':str(counter)+\" : \"+\\\n qty_line.vehicles_id.type.name}\n line['vehicle_no'] = qty_line.vehicles_id.vin_sn\n line['spent'] = qty_line.spent_qty\n line['counter_no'] = str(qty_line.vehicles_id.odometer)+\" \"+\\\n qty_line.vehicles_id.odometer_unit\n line['date'] = qty_line.month+\"/\"+qty_line.year\n lines.append(line)\n counter += 1\n return lines", "def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )", "def create_query_csv(self):\n\n self.query_df.to_csv(self.query_output_file)", "def save_csv(self):\n path, _ = QtWidgets.QFileDialog.getSaveFileName(self, '保存数据', '', 'CSV(*.csv)')\n\n if not path:\n return\n\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator='\\n')\n\n writer.writerow(self.headers.keys())\n\n for row in range(self.rowCount()):\n row_data = []\n for column in range(self.columnCount()):\n item = self.item(row, column)\n if item:\n row_data.append(str(item.text()))\n else:\n row_data.append('')\n writer.writerow(row_data)", "def on_export_button(self, event):\n wildcard = \"Filtered _iso_res_filt.csv file (*_iso_res_filt.csv)|*_iso_res_filt.csv|\"\\\n \"All files (*.*)|*.*|\"\n defFile = self.datafile[:-4]+'_filt.csv'\n dlg = wx.FileDialog(\n self, message=\"Save file as ...\", \n defaultDir=self.currentDirectory, \n defaultFile=defFile, wildcard=wildcard, style=wx.SAVE\n )\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.recalcAll()\n self.redrawAll()\n self.dataFrame['priorFilter'] = self.dataFrame['allFPass']\n self.dataFrame.to_csv(path, index=False)\n summaryCSVPath = path.split('.')[0] + '_median_[' + ''.join(self.calcNum) + ']_[' + ''.join(self.calcDen) + '].csv'\n self.writeSummaryCSV(summaryCSVPath)\n \n dlg.Destroy()", "def download_data_slot(self):\n if self.result is None:\n self.label_current_message.setText('尚未有預測結果!請確認是否已載入資料並執行預測。')\n else:\n fileName, _ = QFileDialog.getSaveFileName(self, 'Save file', '', '*.csv') # 建立儲存檔案的對話盒(dialog)\n if fileName:\n self.result['date'] = pd.to_datetime(self.result['date'])\n raw_input_data = self.Data.copy() # 需要把原資料copy,否則直接取用的話,輸出結果會隨著下載次數而無謂增加\n output_data = raw_input_data.append(self.result.loc[:, ['date'] + [i for i in self.column_name]])\n output_data.to_csv(fileName, index = None)", "def write_csv(self):\n self.tableView.df.to_csv('Data export.csv', index=False)\n print('CSV file exported')", "def mono_sheet(self):\n xls = pandas.read_excel(str(self.source))\n xls.to_csv(str(self.dest), **self.kwargs)", "def _get_date(self):\n for fax_in in self:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n date_planned = False\n if fax_in.date:\n from_dt = datetime.datetime.strptime(str(fax_in.date[:19]), DATETIME_FORMAT)\n # from_dt = from_dt + datetime.timedelta(hours=5 , minutes=30)\n date_planned = from_dt.strftime('%Y-%m-%d')\n fax_in.fax_date = date_planned", "def generate_spreadsheet(request, id):\n election = get_object_or_404(Election, pk=id)\n response = render_to_response(\"django_elect/spreadsheet.html\", {\n 'full_stats': election.get_full_statistics(),\n })\n filename = \"election%s.xls\" % (election.pk)\n response['Content-Disposition'] = 'attachment; filename='+filename\n response['Content-Type'] = 'application/vnd.ms-excel; charset=utf-8'\n return response", "def print_facturas(self, data):\n #pruebas = self.env['calling'].search([('state', '=', 'active')])\n # self.nuevo = self.env['account.invoice'].search([('type','=','out invoice')])\n\n\n if self.date_from and self.date_to:\n fecha_inicio = self.date_from\n fecha_fin = self.date_to\n\n if datetime.strptime(fecha_inicio, DATE_FORMAT) >= datetime.strptime(fecha_fin, DATE_FORMAT):\n raise ValidationError('Advertencia! La fecha de inicio no puede ser superior a la fecha final')\n\n fecha_actual = str(date.today())\n if datetime.strptime(fecha_inicio, DATE_FORMAT) > datetime.strptime(fecha_actual, DATE_FORMAT):\n raise ValidationError('Advertencia! La fecha de inicio no puede ser mayor a la fecha actual')\n elif datetime.strptime(fecha_fin, DATE_FORMAT) > datetime.strptime(fecha_actual, DATE_FORMAT):\n raise ValidationError('Advertencia! La fecha de final no puede ser mayor a la fecha actual')\n\n calling_obj = self.env['calling']\n calling_ids = calling_obj.search(\n [('calling_date', '>=', fecha_inicio), ('calling_date', '<=', fecha_fin)])\n if calling_ids:\n ids = []\n for id in calling_ids:\n ids.append(id.id)\n datas = self.read(self.ids)[0]\n data = {\n 'ids': ids,\n 'model': 'report.tys_calling.report_services_sede',\n 'form': {\n 'datas': datas,\n 'date_from': self.date_from,\n 'date_to': self.date_to,\n 'sede': self.sede.id,\n 'all':self.all,\n },\n 'context': self._context\n }\n return self.env.ref('tys_calling.report_services_for_sede').report_action(self, data=data, config=False)\n else:\n raise ValidationError('Advertencia! No existen llamadas entre las fechas seleccionadas')\n\n\n # use `module_name.report_id` as reference.\n # `report_action()` will call `get_report_values()` and pass `data` automatically.", "def generate_dataset_csv(request):\n\n response = csv_export(request,Dataset)\n return response", "def career_teachers_excel(self, request):\n\n # Get the career to be processed their results.\n career_id = request.GET.get('career_id', '')\n career = EvaluationsCareer.objects.get(pk__exact=career_id)\n\n # Get the results for each esignature of the carrer en each exam.\n data = self.get_career_results(career)\n\n # Generates the CSV with the results of the career,then return as downloadable file.\n response = self.get_teacher_results_excel(data)\n return response", "def export_patient_records(request, app, model):\n restart_reasons = [r.reason_id for r in RestartReason.objects.order_by('reason_id')]\n \n header_row = ['Patient ID', 'Vesicant/Irritant?', 'IV Attempts']\n header_row += restart_reasons\n \n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=ebp.csv' \n writer = csv.writer(response)\n \n # Write the header (i.e., titles) row\n writer.writerow(header_row)\n \n for p in PatientRecord.objects.order_by('patient_id'):\n patient_restart_reasons = [r.reason_id for r in p.restart_reasons.order_by('id')]\n row = [p.patient_id, p.vesicant_irritant, p.iv_attempts]\n row += [r in patient_restart_reasons for r in restart_reasons]\n writer.writerow(row)\n \n return response", "def download(self,connector,condition):\n c= connector.cursor()\n# condition = \" WHERE DIF_ID=%d AND NUM=%d\" % (difid,num)\n snew = buildSelect(self,'HR2',condition)\n# print snew\n c.execute(snew)\n lnames=[]\n for name,val in sorted(self.__dict__.iteritems()):\n lnames.append(name)\n\n vobj=[]\n for row in c:\n # print row\n hr2=HR2Def(0)\n for i in range(len(lnames)):\n #print lnames[i],row[i]\n hr2.__dict__[lnames[i]]=row[i]\n vobj.append(hr2)\n return vobj", "def bring_records_to_file_using_threads():\n username = username_entry.get()\n password = password_entry.get()\n day = int(day_entry.get())\n month = int(month_entry.get())\n year = int(year_entry.get())\n today = datetime.date(year, month, day)\n if username in users:\n if password == users[username]:\n db = Database(database_name)\n data = db.fetch_calculations(day, month, year)\n # print(data)\n # print(today)\n save_to_file(today, data)", "def queryset(self):\n return [\n self.Item('John Doe', TestReport.STATUS_ACTIVE, date(1980, 5, 6), (1, 2)),\n self.Item('Jane Doe', TestReport.STATUS_ACTIVE, date(1981, 2, 21), (Decimal('2'), 4)),\n self.Item('Billy Boe', TestReport.STATUS_ACTIVE, date(1965, 1, 1), (3, 10)),\n self.Item('Jesus Christ', TestReport.STATUS_INACTIVE, date(1946, 12, 25), (Decimal('11.2'), Decimal('22.4'))),\n ]", "def dump3(request):\n \n if not request.user.is_authenticated:\n return HttpResponseForbidden()\n else:\n import csv\n from django.http import HttpResponse\n\n answer_list = list(Answer.objects.all())\n \n result = {} # date --> { usernumber --> { datetime -> [{\"argument_number\"}, ...] }\n \n for answer in answer_list:\n mydatetime = u\"%s\" % answer.date\n date = u\"%s\" % answer.date.date()\n\n result.setdefault(date, {})\n\n\n result[date].setdefault(answer.usernumber, {})\n\n result[date][answer.usernumber].setdefault(mydatetime, [])\n\n myanswer_dict = {}\n\n gameNumber = int(\"%d\" % answer.game_number)\n myanswer_dict[\"game_number\"] = gameNumber\n myanswer_dict[\"argument_number\"] = answer.argumentNumber\n myanswer_dict[\"date\"] = answer.date\n\n if answer.validity:\n validity = \"valid\"\n else:\n validity = \"invalid\"\n\n myanswer_dict[\"validity\"] = validity\n\n if answer.correctness:\n correctness = \"Correct\"\n else:\n correctness = \"Incorrect\"\n\n myanswer_dict[\"correctness\"] = correctness\n\n result[date][answer.usernumber][mydatetime].append(myanswer_dict)\n\n # Create the HttpResponse object with the appropriate CSV header.\n response = HttpResponse(\"\", content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=logproplog-data-answers-plus-aggregates.csv'\n\n writer = csv.writer(response)\n first_row = []\n\n first_row.extend(['date', 'datetime', 'usernumber'])\n\n first_row.extend(['game_number', 'argument_number',\n 'validity', 'correctness'])\n\n first_row.extend(['totaltime (s)',\n 'lastcorrectinarow',\n 'answercount',\n 'correctanswercount'])\n \n writer.writerow(first_row)\n \n for date in sorted(result):\n for usernumber in sorted(result[date]):\n last_correct_in_a_row = 0\n answer_count = 0\n correct_answer_count = 0\n min_datetime = None\n max_datetime = None\n for mydatetime in result[date][usernumber]:\n for myanswer_dict in result[date][usernumber][mydatetime]:\n if min_datetime == None:\n min_datetime = myanswer_dict['date']\n elif min_datetime > myanswer_dict['date']:\n min_datetime = myanswer_dict['date']\n else:\n pass\n\n if max_datetime == None:\n max_datetime = myanswer_dict['date']\n elif max_datetime < myanswer_dict['date']:\n max_datetime = myanswer_dict['date']\n else:\n pass\n \n next_row = []\n next_row.append(date)\n next_row.append(mydatetime)\n next_row.append('%d' % usernumber)\n \n next_row.append('%d' % myanswer_dict['game_number'])\n next_row.append('%d' % myanswer_dict['argument_number'])\n next_row.append('%s' % myanswer_dict['validity'])\n next_row.append('%s' % myanswer_dict['correctness'])\n\n writer.writerow(next_row)\n del next_row\n\n answer_count += 1\n\n if myanswer_dict[\"correctness\"] == \"Correct\":\n correct_answer_count += 1\n last_correct_in_a_row += 1\n else:\n last_correct_in_a_row = 0\n\n timedelta = max_datetime - min_datetime\n aggregate_row = []\n aggregate_row.append(date)\n aggregate_row.append('')\n aggregate_row.append(usernumber)\n\n # 'game_number', 'argument_number',\n # 'validity', 'correctness'\n aggregate_row.extend(['', '', '', ''])\n\n # 'totaltime',\n # 'lastcorrectinarow',\n # 'answercount',\n # 'correctanswercount'\n aggregate_row.append('%d' % timedelta.seconds)\n aggregate_row.append('%d' % last_correct_in_a_row)\n aggregate_row.append('%d' % answer_count)\n aggregate_row.append('%d' % correct_answer_count)\n \n writer.writerow(aggregate_row)\n\n del aggregate_row\n \n return response", "def outputExcelReport(self):\n # ++++++++++\n # init\n # ++++++++++\n wb = openpyxl.Workbook()\n wb.fonts = openpyxl.styles.Font(\n name = 'Courier New',\n size = 12\n )\n # create and delete sheets\n _ = wb.create_sheet(title='Cover',index=0)\n _ = wb.create_sheet(title='Results',index=1)\n _ = wb.create_sheet(title='AllItems',index=2)\n _ = wb.remove(wb.worksheets[-1])\n # ++++++++++\n # Sheet 1 <Cover>\n # ++++++++++\n ws = wb['Cover']\n # --- title and date\n timeNow = datetime.datetime.now().isoformat().split('T')[0]\n ws.merge_cells('A1:B1')\n ws.merge_cells('A3:B3')\n ws['A1'] = '納入チェック ダイアグ確認結果'\n ws['A3'] = '作成日:{}'.format(timeNow)\n # --- sample info\n ws['A5'] = '<サンプル情報>'\n self._write2excel(ws, self._sample_info, 6, 1)\n for r in range(6,8):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- checker info\n ws['A9'] = '<チェッカ情報>'\n self._write2excel(ws, self._checker_info, 10, 1)\n for r in range(10,13):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- dmm info\n ws['A14'] = '<DMM情報>'\n self._write2excel(ws, self._dmm_info, 15, 1)\n for r in range(15,18):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- resistor info\n ws['A19'] = '<抵抗器情報>'\n self._write2excel(ws, self._resistor_info, 20, 1)\n for r in range(20,23):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n ws[cell.coordinate].font = STYLE_FONT_PASS\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 2 <Results>\n # ++++++++++\n ws = wb['Results']\n # --- output all scenario\n ws['A1'] = '<結果一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._result_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==6:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n ws.cell(cell.row,6).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 3 <AllItems>\n # ++++++++++\n ws = wb['AllItems']\n # --- output all scenario\n ws['A1'] = '<出力一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._scenario_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==5:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # save book\n # ++++++++++\n wb.save(self._filename)", "def export_database(self):\n base_path = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', filter='CSV (*.csv)')\n database.export_to_csv(DB_PATH, base_path[0])", "def downloadResponse(request, formcode=None):\n if formcode !=None:\n response = HttpResponse(content_type='text/csv')\n responses = Response.objects.filter(form_id=formcode)\n writer = csv.writer(response)\n writer.writerow(['User', 'Submit Date', 'Answer1', 'Answer2', 'Answer3'])\n for r in responses:\n user = User.objects.get(id=r.user_id)\n writer.writerow([user, r.submitDate, r.answer1 ,r.answer2 , r.answer3])\n\n response['Content-Disposition'] = 'attachment; filename=\"response.csv\"'\n return response \n return render(request, 'download.html')", "def import_excel(self, filepath_excel,database_type):\n if database_type == \"render\":\n try:\n connection = sqlite3.connect(self.filepath_render_database)\n pointer = connection.cursor()\n\n sql_anweisung = \"\"\"\n INSERT INTO render_information (\n object_type,\n name,\n radius,\n polar_angle_min,\n polar_anglel_max,\n polar_angle_segments,\n polar_angle_random_rad,\n azimuth_angle_min,\n azimuth_angle_max,\n azimuth_angle_segments,\n azimuth_angle_random_rad,\n tracking_obj,\n segmentation\n )\n VALUES (\n :object_type,\n :name,\n :radius,\n :polar_angle_min,\n :polar_anglel_max,\n :polar_angle_segments,\n :polar_angle_random_rad,\n :azimuth_angle_min,\n :azimuth_angle_max,\n :azimuth_angle_segments,\n :azimuth_angle_random_rad,\n :tracking_obj,\n :segmentation\n )\n \"\"\"\n with open(filepath_excel) as csvdatei:\n csv_reader_object = csv.reader(csvdatei, delimiter=';')\n next(csv_reader_object)\n pointer.executemany(sql_anweisung, csv_reader_object)\n connection.commit()\n connection.close()\n print(\"render data addet from excel file\")\n except :\n print(\"adding render data from excel file failed\")\n\n elif database_type == \"object\":\n try:\n connection = sqlite3.connect(self.filepath_object_database)\n pointer = connection.cursor()\n\n sql_anweisung = \"\"\"\n INSERT INTO object_information (\n obj_filepath,\n obj_name,\n obj_type,\n obj_scale_factor,\n obj_type,\n obj_location_x,\n obj_location_y,\n obj_location_z,\n obj_rotation_x,\n obj_rotation_y,\n obj_rotation_z,\n obj_amount_percent,\n obj_material_path,\n obj_point_in_time,\n maximum_random_rotation_degree_z,\n maximum_random_translation,\n random_amount\n )\n VALUES (\n :obj_filepath,\n :obj_name,\n :obj_type,\n :obj_scale_factor,\n :obj_type,\n :obj_location_x,\n :obj_location_y,\n :obj_location_z,\n :obj_rotation_x,\n :obj_rotation_y,\n :obj_rotation_z,\n :obj_amount_percent,\n :obj_material_path,\n :obj_point_in_time,\n :maximum_random_rotation_degree_z,\n :maximum_random_translation,\n :random_amount\n )\n \"\"\"\n with open(filepath_excel) as csvdatei:\n csv_reader_object = csv.reader(csvdatei, delimiter=';')\n print(csv_reader_object)\n next(csv_reader_object)\n pointer.executemany(sql_anweisung, csv_reader_object)\n connection.commit()\n connection.close()\n print(\"object data added from excel file\")\n except :\n print(\"adding object data from excel file failed\")\n\n else:\n print(\"no Database found, maybe check spelling in method call??\")\n return", "def createDates(self, data: QDate=None):\n if data is None:\n data = self.oggi\n # print('CREATEDATES DATA', data)\n dateList = MeseGiorniDictGen.bigList(data)\n return dateList", "def on_show_eqp_datasheet_export(self):\n from EqpDatasheetExportDialog import QEqpDatasheetExportDialog\n\n dlg = QEqpDatasheetExportDialog(self)\n dlg.exec_()", "def object_export(request, simulation, object_name):\n query = get_query(object_name, simulation)\n # To avoid conflict if two users export a file at the same time, we\n # generate a random name for the export file.\n seed = np.random.randint(10000)\n filename = '{0}/website_files/exports/{1}.tsv'.format(settings.BASE_DIR,\n seed)\n with codecs.open(filename, 'w', encoding='utf8') as f:\n if object_name == 'centroid':\n fields = ['id', 'name', 'x', 'y', 'db_id']\n elif object_name == 'crossing':\n fields = ['id', 'name', 'x', 'y', 'db_id']\n elif object_name == 'link':\n fields = ['id', 'name', 'origin', 'destination', 'lanes', 'length',\n 'speed', 'capacity', 'vdf']\n elif object_name == 'function':\n fields = ['id', 'expression']\n writer = csv.writer(f, delimiter='\\t')\n if object_name in ('centroid', 'crossing'):\n writer.writerow(['id', 'name', 'x', 'y', 'db_id'])\n values = query.values_list('user_id', 'name', 'x', 'y', 'id')\n elif object_name == 'function':\n writer.writerow(['id', 'name', 'expression'])\n values = query.values_list('user_id', 'name', 'expression')\n elif object_name == 'link':\n writer.writerow(['id', 'name', 'lanes', 'length', 'speed',\n 'capacity', 'function', 'origin', 'destination'])\n values = query.values_list('user_id', 'name', 'lanes', 'length',\n 'speed', 'capacity', 'vdf__user_id')\n # Origin and destination id must be converted to user_id.\n centroids = get_query('centroid', simulation)\n crossings = get_query('crossing', simulation)\n ids = list(centroids.values_list('id', 'user_id'))\n ids += list(crossings.values_list('id', 'user_id'))\n # Map id of nodes to their user_id.\n id_mapping = dict(ids)\n origins = query.values_list('origin', flat=True)\n origins = np.array([id_mapping[n] for n in origins])\n destinations = query.values_list('destination', flat=True)\n destinations = np.array([id_mapping[n] for n in destinations])\n # Add origin and destination user ids to the values array.\n origins = np.transpose([origins])\n destinations = np.transpose([destinations])\n values = np.hstack([values, origins, destinations])\n writer.writerows(values)\n with codecs.open(filename, 'r', encoding='utf8') as f:\n # Build a response to send a file.\n response = HttpResponse(f.read())\n response['content_type'] = 'text/tab-separated-values'\n response['Content-Disposition'] = \\\n 'attachement; filename={}.tsv'.format(metro_to_user(object_name))\n # We delete the export file to save disk space.\n os.remove(filename)\n return response" ]
[ "0.7596945", "0.6799895", "0.65263826", "0.6513705", "0.6357183", "0.62692904", "0.61503154", "0.60432583", "0.5946482", "0.5921336", "0.58059806", "0.5805394", "0.5800253", "0.5739457", "0.57385826", "0.57053816", "0.5653567", "0.56533146", "0.56427974", "0.5640495", "0.56375164", "0.56315285", "0.5628419", "0.5618763", "0.5615743", "0.5606126", "0.55929095", "0.5578895", "0.55558044", "0.55276084", "0.55195177", "0.55185074", "0.5517387", "0.5510379", "0.5492106", "0.5489197", "0.5426536", "0.5404016", "0.5398346", "0.5386845", "0.5383161", "0.5378391", "0.53296936", "0.5329001", "0.5316723", "0.5313373", "0.5302527", "0.52961016", "0.5292815", "0.5269756", "0.5258976", "0.52579355", "0.52500904", "0.52448237", "0.5236059", "0.5233468", "0.5232686", "0.5225019", "0.52213895", "0.5209911", "0.5206977", "0.5205515", "0.5191455", "0.51664203", "0.5160294", "0.5158514", "0.51582026", "0.5155522", "0.5155432", "0.51505214", "0.5149905", "0.51472324", "0.5146348", "0.51411164", "0.51339495", "0.5123706", "0.51143074", "0.5098308", "0.50976807", "0.5095006", "0.50911343", "0.5089502", "0.5089297", "0.50789434", "0.5077274", "0.5075408", "0.5073754", "0.5062834", "0.50615346", "0.5061213", "0.50554377", "0.5054726", "0.5054479", "0.5049673", "0.5046184", "0.5044225", "0.5043269", "0.50380033", "0.50268084", "0.5003334" ]
0.6348618
5
Originates from external call from trigger system
def start(self, data): log.info(data) self.stop() self.time_start = time.time() - data.get('time_offset', 0) - self.time_offset self.bpm = float(data.get('bpm', self.DEFAULT_BPM)) self.timesigniture = parse_timesigniture(data.get('timesigniture', DEFAULT_TIMESIGNITURE)) if data.get('sequence'): sequence_name = data.get('sequence') assert sequence_name in self.sequences, '{0} is not a known sequence'.format(sequence_name) self.sequence = self.sequences[sequence_name] if data.get('scene'): # Single scene - Fake the sequence list by inserting the name of the single scene required self.sequence = (data.get('scene', self.DEFAULT_SCENE_NAME), ) self.sequence_index = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, trigger, type, event):", "def triggered(self, *args, **kwargs): # real signature unknown\n pass", "def call(self):", "def trigger(self, type, event):", "def fire(self):", "def __do_trigger(self, request):\n dmp_trigger.DmpTrigger().trigger(request)\n return defines.ReturnCode.SUCC", "def on_execute(self):\n pass", "def on(self) -> None:", "def Trigger(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('trigger', payload=payload, response_object=None)", "def fire(self):\n pass", "def arm(self):\n error(\"Attempted to access abstract trigger arming function.\")", "def post_execute(self):", "def on(self):", "def execute(self):", "def execute(self):", "def execute(self):", "def execute(self):", "def perform(self):\n pass", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def _hook(self):", "def on_run(self):\r\n\r\n\t\tpass", "def call(self) -> global___Snippet.ClientCall:", "def call(self) -> global___Snippet.ClientCall:", "def on_call_update(self, event):\n # if plivo_app != 'true', check b leg Dial callback\n plivo_app_flag = event['variable_plivo_app'] == 'true'\n if not plivo_app_flag:\n # request Dial callbackUrl if needed\n aleg_uuid = event['Bridged-To']\n if not aleg_uuid:\n return\n bleg_uuid = event['Unique-ID']\n if not bleg_uuid:\n return\n disposition = event['variable_endpoint_disposition']\n if disposition != 'ANSWER':\n return\n ck_url = event['variable_plivo_dial_callback_url']\n if not ck_url:\n return\n ck_method = event['variable_plivo_dial_callback_method']\n if not ck_method:\n return\n params = {'DialBLegUUID': bleg_uuid,\n 'DialALegUUID': aleg_uuid,\n 'DialBLegStatus': 'answer',\n 'CallUUID': aleg_uuid\n }\n # add extra params\n extra_params = self.get_extra_fs_vars(event)\n if extra_params:\n params.update(extra_params)\n spawn_raw(self.send_to_url, ck_url, params, ck_method)\n return", "def on_before_execution(self):\n pass", "def execute(self):\n\t\tpass", "def handle(self):", "def on_hook(self) -> None:", "def execute(self):\r\n pass", "def after_send(self):", "def callback(self):\n pass # pragma: no cover", "def on_success(self) -> None:", "def on(self) -> None:\n ...", "def on_run(self):\n pass", "def place_call_onhold(self) -> None:", "def RUN(self):", "def _get_trigger(self, cursor):\n raise NotImplementedError", "def analogueTriggerChangeHandler(val):\n print(\"Analogue Trigger Value Changed: {}\".format(val) )", "def place_call_offhold(self) -> None:", "def test_change_trigger(self):\n self._test_change_trigger(False)", "def onSlave(self):", "def _do_action(self):\n pass", "def _do_action(self):\n pass", "def _execute(self, _):\r\n pass", "def test_change_trigger_carefully(self):\n self._test_change_trigger(True)", "def _remoteScript(self, source_script):", "def trigger_service(call):\n event = call.data.get(ATTR_EVENT)\n value1 = call.data.get(ATTR_VALUE1)\n value2 = call.data.get(ATTR_VALUE2)\n value3 = call.data.get(ATTR_VALUE3)\n if event is None:\n return\n\n try:\n import pyfttt as pyfttt\n pyfttt.send_event(key, event, value1, value2, value3)\n except requests.exceptions.RequestException:\n _LOGGER.exception(\"Error communicating with IFTTT\")", "def run(self): \r\n return", "def __init__(self, temboo_session):\n super(UpdateTrigger, self).__init__(temboo_session, '/Library/Xively/Triggers/UpdateTrigger')", "def execute():", "def changeTrigger(self):\n def reg(letter,number):\n \"\"\"\n Returns an integer for a cell value\n \n Usable triggers on the insight explorer software\n * 0: Camera\n * 1: Continous\n * 2: External\n * 3: Manual\n * 4: Network\n \n \"\"\"\n v=ord(letter.lower())-ord('a')\n v=v<<10\n v+=number\n return v\n v = reg(\"A\",3)\n rospy.sleep(0.1)\n self.setOutput(v, [3])\n rospy.sleep(0.6)\n if self.sim:\n self.setOutput(v, [4])\n else:\n self.setOutput(v, [1])", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass", "def __call__(object):", "def take_action(self, *args, **kwargs):\r\n pass", "def trigger_build(self, postdata):\n pass", "def change():", "def handle(self) -> None:", "def on_success(self):\n pass", "def _run_action_external(self):\n action = self.act_kwargs['action_external']\n logger.debug('running external action %s on file %s' % (action, self.file_name))", "def polling_call(self) -> global___Snippet.ClientCall:", "def perform_callback(self, *args, **kwargs):\n pass", "def before_send(self):", "def __call__(self):\n\t\treturn", "def __call__(self):\n if grinder.runNumber == 0: self.initialSleep()\n (param1, param2) = self.getParam()\n self.request1(param1, param2)", "def pre_execute(self):", "def call(self, *args, **kwargs):", "def call(self):\n self.call() # Call a function", "def __call__( self ):\n pass", "def on_invoke(self, ins, const, obj, args):\n pass", "def _invoke(self):\n with self.storlet_logger.activate(),\\\n self._activate_invocation_descriptors():\n self._send_execute_command()", "def fire_trigger(self, trigger):\n if not self.exists():\n return\n if trigger in self.events:\n for action in self.events[trigger]:\n action(requestor=self)", "def _during_execute(self, db):\n pass", "def run_local(self, message):", "def perform(self):\n return", "def perform(self):\n return", "def script(self):", "def monitor(self):", "def execute(self):\n\n pass", "def execute(self):\n\n pass", "def execute(self):\n\n pass", "def execute(self):\n\n pass", "def manual_trigger(pg):\n\tpg.write('trig:seq:imm') #execute\n\treturn", "def _process(self, activity):", "def main():\n Fire(cli)", "def communicate():\n pass", "def on_manual_dialog(self, request, trigger_context):\n raise NotImplementedError", "def __call__(self) -> None:", "def cmd(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):" ]
[ "0.70280033", "0.6894477", "0.6630215", "0.65832853", "0.6425316", "0.64154744", "0.6267138", "0.62295663", "0.616958", "0.6122845", "0.6121851", "0.6086105", "0.60141927", "0.5989397", "0.5989397", "0.5989397", "0.5989397", "0.5890496", "0.5888235", "0.5888235", "0.5888235", "0.5888235", "0.5888235", "0.58862466", "0.58824253", "0.5867491", "0.5867491", "0.5855708", "0.58479476", "0.5842817", "0.58406967", "0.58356535", "0.58222073", "0.58214235", "0.58083737", "0.5808216", "0.5801205", "0.57734865", "0.573483", "0.57268035", "0.5726258", "0.5708648", "0.5707", "0.5691239", "0.569058", "0.5687405", "0.5687405", "0.56858224", "0.56775326", "0.5664959", "0.56590414", "0.5650927", "0.56282514", "0.56264794", "0.56218916", "0.5611706", "0.5611706", "0.5611706", "0.5611706", "0.5611706", "0.5611706", "0.5589923", "0.5582916", "0.55800325", "0.55779034", "0.55765", "0.55650574", "0.55641884", "0.55575645", "0.5553132", "0.5547896", "0.5537557", "0.55354613", "0.5530165", "0.55094606", "0.55072623", "0.5497105", "0.54924226", "0.54884744", "0.5487914", "0.5485952", "0.547899", "0.5478293", "0.5478293", "0.5477248", "0.5471381", "0.5460717", "0.5460717", "0.5460717", "0.5460717", "0.5445904", "0.54390514", "0.54383934", "0.5429923", "0.54251623", "0.5422204", "0.5419305", "0.5406471", "0.5406471", "0.5406471", "0.5406471" ]
0.0
-1
Originates from external call from trigger system
def stop(self, data={}): self.time_start = 0 self.time_mutator = 0 self.sequence = () self.sequence_index = None self.bpm = self.DEFAULT_BPM self.timesigniture = DEFAULT_TIMESIGNITURE_
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, trigger, type, event):", "def triggered(self, *args, **kwargs): # real signature unknown\n pass", "def call(self):", "def trigger(self, type, event):", "def fire(self):", "def __do_trigger(self, request):\n dmp_trigger.DmpTrigger().trigger(request)\n return defines.ReturnCode.SUCC", "def on_execute(self):\n pass", "def on(self) -> None:", "def Trigger(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('trigger', payload=payload, response_object=None)", "def fire(self):\n pass", "def arm(self):\n error(\"Attempted to access abstract trigger arming function.\")", "def post_execute(self):", "def on(self):", "def execute(self):", "def execute(self):", "def execute(self):", "def execute(self):", "def perform(self):\n pass", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def _hook(self):", "def on_run(self):\r\n\r\n\t\tpass", "def call(self) -> global___Snippet.ClientCall:", "def call(self) -> global___Snippet.ClientCall:", "def on_call_update(self, event):\n # if plivo_app != 'true', check b leg Dial callback\n plivo_app_flag = event['variable_plivo_app'] == 'true'\n if not plivo_app_flag:\n # request Dial callbackUrl if needed\n aleg_uuid = event['Bridged-To']\n if not aleg_uuid:\n return\n bleg_uuid = event['Unique-ID']\n if not bleg_uuid:\n return\n disposition = event['variable_endpoint_disposition']\n if disposition != 'ANSWER':\n return\n ck_url = event['variable_plivo_dial_callback_url']\n if not ck_url:\n return\n ck_method = event['variable_plivo_dial_callback_method']\n if not ck_method:\n return\n params = {'DialBLegUUID': bleg_uuid,\n 'DialALegUUID': aleg_uuid,\n 'DialBLegStatus': 'answer',\n 'CallUUID': aleg_uuid\n }\n # add extra params\n extra_params = self.get_extra_fs_vars(event)\n if extra_params:\n params.update(extra_params)\n spawn_raw(self.send_to_url, ck_url, params, ck_method)\n return", "def on_before_execution(self):\n pass", "def execute(self):\n\t\tpass", "def handle(self):", "def on_hook(self) -> None:", "def execute(self):\r\n pass", "def after_send(self):", "def callback(self):\n pass # pragma: no cover", "def on_success(self) -> None:", "def on(self) -> None:\n ...", "def on_run(self):\n pass", "def place_call_onhold(self) -> None:", "def RUN(self):", "def _get_trigger(self, cursor):\n raise NotImplementedError", "def analogueTriggerChangeHandler(val):\n print(\"Analogue Trigger Value Changed: {}\".format(val) )", "def place_call_offhold(self) -> None:", "def test_change_trigger(self):\n self._test_change_trigger(False)", "def onSlave(self):", "def _do_action(self):\n pass", "def _do_action(self):\n pass", "def _execute(self, _):\r\n pass", "def test_change_trigger_carefully(self):\n self._test_change_trigger(True)", "def _remoteScript(self, source_script):", "def trigger_service(call):\n event = call.data.get(ATTR_EVENT)\n value1 = call.data.get(ATTR_VALUE1)\n value2 = call.data.get(ATTR_VALUE2)\n value3 = call.data.get(ATTR_VALUE3)\n if event is None:\n return\n\n try:\n import pyfttt as pyfttt\n pyfttt.send_event(key, event, value1, value2, value3)\n except requests.exceptions.RequestException:\n _LOGGER.exception(\"Error communicating with IFTTT\")", "def run(self): \r\n return", "def __init__(self, temboo_session):\n super(UpdateTrigger, self).__init__(temboo_session, '/Library/Xively/Triggers/UpdateTrigger')", "def execute():", "def changeTrigger(self):\n def reg(letter,number):\n \"\"\"\n Returns an integer for a cell value\n \n Usable triggers on the insight explorer software\n * 0: Camera\n * 1: Continous\n * 2: External\n * 3: Manual\n * 4: Network\n \n \"\"\"\n v=ord(letter.lower())-ord('a')\n v=v<<10\n v+=number\n return v\n v = reg(\"A\",3)\n rospy.sleep(0.1)\n self.setOutput(v, [3])\n rospy.sleep(0.6)\n if self.sim:\n self.setOutput(v, [4])\n else:\n self.setOutput(v, [1])", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass", "def __call__(object):", "def take_action(self, *args, **kwargs):\r\n pass", "def trigger_build(self, postdata):\n pass", "def change():", "def handle(self) -> None:", "def on_success(self):\n pass", "def _run_action_external(self):\n action = self.act_kwargs['action_external']\n logger.debug('running external action %s on file %s' % (action, self.file_name))", "def polling_call(self) -> global___Snippet.ClientCall:", "def perform_callback(self, *args, **kwargs):\n pass", "def before_send(self):", "def __call__(self):\n\t\treturn", "def __call__(self):\n if grinder.runNumber == 0: self.initialSleep()\n (param1, param2) = self.getParam()\n self.request1(param1, param2)", "def pre_execute(self):", "def call(self, *args, **kwargs):", "def call(self):\n self.call() # Call a function", "def __call__( self ):\n pass", "def on_invoke(self, ins, const, obj, args):\n pass", "def _invoke(self):\n with self.storlet_logger.activate(),\\\n self._activate_invocation_descriptors():\n self._send_execute_command()", "def fire_trigger(self, trigger):\n if not self.exists():\n return\n if trigger in self.events:\n for action in self.events[trigger]:\n action(requestor=self)", "def _during_execute(self, db):\n pass", "def run_local(self, message):", "def perform(self):\n return", "def perform(self):\n return", "def script(self):", "def monitor(self):", "def execute(self):\n\n pass", "def execute(self):\n\n pass", "def execute(self):\n\n pass", "def execute(self):\n\n pass", "def manual_trigger(pg):\n\tpg.write('trig:seq:imm') #execute\n\treturn", "def _process(self, activity):", "def main():\n Fire(cli)", "def communicate():\n pass", "def on_manual_dialog(self, request, trigger_context):\n raise NotImplementedError", "def __call__(self) -> None:", "def cmd(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):" ]
[ "0.70280033", "0.6894477", "0.6630215", "0.65832853", "0.6425316", "0.64154744", "0.6267138", "0.62295663", "0.616958", "0.6122845", "0.6121851", "0.6086105", "0.60141927", "0.5989397", "0.5989397", "0.5989397", "0.5989397", "0.5890496", "0.5888235", "0.5888235", "0.5888235", "0.5888235", "0.5888235", "0.58862466", "0.58824253", "0.5867491", "0.5867491", "0.5855708", "0.58479476", "0.5842817", "0.58406967", "0.58356535", "0.58222073", "0.58214235", "0.58083737", "0.5808216", "0.5801205", "0.57734865", "0.573483", "0.57268035", "0.5726258", "0.5708648", "0.5707", "0.5691239", "0.569058", "0.5687405", "0.5687405", "0.56858224", "0.56775326", "0.5664959", "0.56590414", "0.5650927", "0.56282514", "0.56264794", "0.56218916", "0.5611706", "0.5611706", "0.5611706", "0.5611706", "0.5611706", "0.5611706", "0.5589923", "0.5582916", "0.55800325", "0.55779034", "0.55765", "0.55650574", "0.55641884", "0.55575645", "0.5553132", "0.5547896", "0.5537557", "0.55354613", "0.5530165", "0.55094606", "0.55072623", "0.5497105", "0.54924226", "0.54884744", "0.5487914", "0.5485952", "0.547899", "0.5478293", "0.5478293", "0.5477248", "0.5471381", "0.5460717", "0.5460717", "0.5460717", "0.5460717", "0.5445904", "0.54390514", "0.54383934", "0.5429923", "0.54251623", "0.5422204", "0.5419305", "0.5406471", "0.5406471", "0.5406471", "0.5406471" ]
0.0
-1
When a track is started we receve a lighting.start event The audio in the html5 player may have a seek time. This is to start part way into a test recording This seek time update is fired the moment the audio starts playing and within milliseconds of the lighting.start event. This 'first seek' sent within milliseconds of the 'start' event should not actually seek in the lighting scene. As self.time_start is calculated with global offset. We need to know the ACTUAL time since the 'trigger started' to detect our seek to ignore
def _seek(self, time_offset): if (time.time() - (self.time_start + self.time_offset)) < 0.1: log.info('Seek recived within 100ms of start - Assuming this is a bounceback from test_audio - applying automatic time mutator of {0}s'.format(time_offset)) self.time_mutator = time_offset self.time_start = time.time() - time_offset log.info('seek {0}'.format(time_offset))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setTrackStartTime() :\n s.startTrack()", "def audio_cd_start_track_time(self, audio_cd_start_track_time):\n self._audio_cd_start_track_time = audio_cd_start_track_time", "def seek_to_start_time(self):\n return 0", "def start_time(self):\n return RPR.GetAudioAccessorStartTime(self.id)", "async def track_start(self):\n await self.wait_until_ready()\n self.start_time = datetime.datetime.utcnow()", "def start_time(self) -> float:\r\n ...", "def audio_cd_start_track(self, audio_cd_start_track):\n self._audio_cd_start_track = audio_cd_start_track", "def _calculateStarttime(self):\n self.corrected_starttime = deepcopy(\\\n self.fixed_header['Record start time'])\n # Check whether or not the time correction has already been applied.\n if not self.fixed_header['Activity flags'] & 2:\n # Apply the correction.\n self.corrected_starttime += \\\n self.fixed_header['Time correction'] * 0.0001\n # Check for blockette 1001.\n if 1001 in self.blockettes:\n self.corrected_starttime += self.blockettes[1001]['mu_sec'] * \\\n 1E-6", "def start(self):\n# if self._start_time is not None:\n self._start_time = time.perf_counter()", "def start(self):\r\n self.start_time = time.time()", "def start_timer(self):\n self.start_time = time.time()", "def set_start_time():\n __start = current_time_milli()", "def get_start_time(self):\n # Timezone and BST not accounted for. Always gives it as GMT.\n create_time = (os.path.getmtime(self.file_path))\n start_time = create_time - len(self.amplitude) / self.fs\n return datetime.fromtimestamp(start_time)", "def on_update_playback(self, event):\n try:\n offset = self.mplayer.GetTimePos()\n except:\n return\n print offset\n mod_off = str(offset)[-1]\n if mod_off == '0':\n print \"mod_off\"\n offset = int(offset)\n self.playbackSlider.SetValue(offset)\n secsPlayed = time.strftime('%M:%S', time.gmtime(offset))\n self.trackCounter.SetLabel(secsPlayed)", "def onTimeStepStart(self, timeStep):\n pass", "def mark_test_started(self) -> None:\n # Blow up instead of blowing away a previously set start_time_millis.\n assert self.test_record.start_time_millis == 0\n self.test_record.start_time_millis = util.time_millis()\n self.notify_update()", "def track_start_time(self, test_class, test_name, start_time):\n if test_class is None or test_name is None:\n return\n\n test_key = \"{}.{}\".format(test_class, test_name)\n self.start_time_by_test[test_key] = start_time", "def start(self):\n self.timeStart = pygame.time.get_ticks()", "def StartTimer(self):\n self._start_time = time.time()", "def faceTrackingStarted(faceSize):\n\n\t# First, wake up\n\t#motionProxy.wakeUp()\n\t#motionProxy.rest()\n\n\t# Add target to track\n\ttargetName = \"Face\"\n\tfaceWidth = faceSize\n\ttracker.registerTarget(targetName, faceWidth)\n\n\t# Then, start tracker\n\ttracker.track(targetName)", "def timestart(self, timestamp=0):\n self._p('[timestart] {}'.format(timestamp))", "def start_time(self, start_time):\n self._start_time = start_time", "def start_time(self, start_time):\n self._start_time = start_time", "def start_loop_recording(self, track):\n pass", "def _start_clock(self):\n self._start = time.time()", "def start_time(self) -> float:\n return self._start_time", "def start_timer(self):\n self.start_time = datetime.now()", "def start_track(self):\n \n self.open()\n fcntl.ioctl(self.f_dev, self.MMLOG_SET_LOG_LEVEL, KERN_ALERT)", "def start(self):\n self.start_time = time.time()", "def set_start_time(self, *args, **kwargs):\n return _uhd_swig.usrp_source_sptr_set_start_time(self, *args, **kwargs)", "def set_start_time(self, timestamp):\n self.start_day = int(timestamp[8:10])\n hour = int(timestamp[11:13])\n minute = int(timestamp[14:16])\n second = int(timestamp[17:19])\n usecond = float(int(timestamp[21:])) / 1000000\n self.start_time = float(hour * 3600 + minute * 60 + second) + usecond", "def min_time(self):\n #{{{ function to return time of first sample\n\n return self.mintime", "def start(self):\n \n if self.started:\n return\n \n self.clock_time = 0.\n self.virtual_time = 0.\n self.game_time = 0.\n self.game_frame_count = 0\n self.real_time_passed = 0.\n \n self.real_time = self.get_real_time()\n self.started = True\n \n self.fps = 0.0\n self.fps_sample_start_time = self.real_time\n self.fps_sample_count = 0", "def start_time(self, start_time):\n\n self._start_time = start_time", "def start_time(self, start_time):\n\n self._start_time = start_time", "def start_time(self, start_time):\n\n self._start_time = start_time", "def start_time(self, start_time):\n\n self._start_time = start_time", "def start_time(self, start_time):\n\n self._start_time = start_time", "def recordStart(self, event_key):\n self.start_times[event_key] = time.time()", "def start(self):\n return self.trial.start + timedelta(seconds=self.start_checkpoint)", "def start_time(self, value):\n self._start_time = value", "def startTime(self) -> float:\n try: return self.times[0]\n except IndexError: return 0.0", "def start_playing(self, mouse):\n self.start = True", "def getStartTime(self):\n return _osgAnimation.Vec3LinearSampler_getStartTime(self)", "def starts(self):\n return self.time_start", "def started_on(self):\n return self.get_time(\"started_on\")", "def start_time_stamp(self, start_time_stamp):\n\n self._start_time_stamp = start_time_stamp", "def videotime_start(self):\n # type: () -> int\n return self._videotime_start", "def get_reltriggertimes(self):\n return np.array(self.trtimes)-self.soundstarttime", "def getStart(self) -> long:\n ...", "def time_start(self):\n return self._time_start", "def start_time(self):\n pass", "def set_start(self, ts):\n base_key = self.floor_time(ts)\n if self.first_timestamp is None or base_key < self.first_timestamp:\n self.first_timestamp = base_key", "def start_time(self):\n return self.__start", "def getStartTime(self):\n return _osgAnimation.QuatSphericalLinearSampler_getStartTime(self)", "def set_start_time(self, *args, **kwargs):\n return _uhd_swig.usrp_source_set_start_time(self, *args, **kwargs)", "def get_starttime(self):\n filetime = datetime.datetime.strptime(self.filenametime,\n \"%Y%m%d_%H%M%S\")\n if self.ldat_type != 'acc':\n starttime = filetime\n else:\n starttime = filetime - datetime.timedelta(seconds=512)\n return starttime", "def start(self):\n global trackWidth\n trackWidth = self.getTrackWidth()\n print(\"track width = \" + str(trackWidth))\n #motors.moveForward(0,2)\n initTheta = self.getTheta(trackWidth)\n motors.pivot(\"left\", 30, 0.25) #spin left for 1 second\n print(\"Moved\")\n newTheta = self.getTheta(trackWidth)\n #Checks if the robot is pointed even further of course or not, corrects for whichever\n if newTheta < initTheta:\n while self.getTheta(trackWidth) >=rads: #Spins while the robot is pointed more than 0.122 rads from straight\n motors.pivot(\"left\", 30, 0.25) #spin left for 0.25 second\n elif newTheta > initTheta:\n while self.getTheta(trackWidth) >= rads:\n motors.pivot(\"right\", 30, 0.25) #spin right for 0.25 second", "def trigger_recording_started(_):\n log_threadsafe(obs.LOG_DEBUG, 'Recording started')\n \n global state\n with mutex_state_sending:\n state = int(time.time())\n pipe_send_state()", "def get_time(self) -> float:\n # if the controller is playing we must play the music if paused\n if self.controller.playing and self.music.paused:\n self.music.set_time(self.controller.time)\n self.music.start()\n return self.controller.time\n\n # If the controller is not playing and music is not paused, we need to pause music\n if not self.controller.playing and not self.music.paused:\n self.music.pause()\n self.music.set_time(self.controller.time)\n return self.controller.time\n\n rt = super().get_time()\n t = self.music.get_time()\n\n if abs(rt - t) > 0.1:\n # print(\"Music out of sync!!!\", t, rt)\n self.music.set_time(rt)\n return rt\n\n return t", "def getStartTime(self):\n return _osgAnimation.Vec2LinearSampler_getStartTime(self)", "def initialize(self):\n self._startTime = time.time()", "def __get_starting_time(self):\n return self.__starting_time", "def onTimeStepStart(self, timeStep):\n self.queuedInTimeStep = 0\n self.dequeuedInTimeStep = 0\n \n pass", "def get_linear_track_on_zero(self):\r\n return self._arm.get_linear_track_on_zero()", "def initialize(self):\n self._start = time.time()", "def analysisStartTime(self, val: WQXTime) -> None:\r\n self.__analysisStartTime = None if val is None else WQXTime(val)", "def start(self):\n for tlight in self.trafficLights:\n self.trafficLights[tlight].start()\n self.globalTimer = Timer(1, self.step)\n self.globalTimer.start()", "def start_record_trajectory(self):\r\n return self._arm.start_record_trajectory()", "def get_track_start(line):\n\n assert line is not None\n # each index line for title should begin with this ' INDEX 0'\n assert line[:11] == ' INDEX 0'\n # there could higher-numbered indexes but I haven't seen any in QUE files\n assert line[11] == '0' or line[11] == '1'\n # were not interested int pregap track start time\n if line[11] == '0':\n return\n # making sure we have the right index, which is required\n assert line[11] == '1'\n\n # expecting the remainder to contain only something that fits the\n # following format dd:dd:dd\n assert line[12:].strip().count(':') == 2\n\n minutes, seconds, frames = line[12:].strip().split(':')\n\n # checking every value ensuring its numeric by composition\n assert minutes.isdigit()\n assert seconds.isdigit()\n assert frames.isdigit()\n\n minutes, seconds, frames = int(minutes), int(seconds), int(frames)\n\n # doesn't hurt to check the value ranges\n assert minutes >= 0\n assert seconds >= 0 and seconds <= 59\n assert frames >= 0 and frames <= 75\n\n frames_to_sec = frames / 75\n\n assert isinstance(frames_to_sec, float)\n\n frames_to_sec = round(frames_to_sec, 6)\n\n # terrible hack to find the mantissa part however its fast enough?\n frames_to_sec = str(frames_to_sec).split('.')\n assert len(frames_to_sec) == 2\n assert frames_to_sec[1]\n\n # we want to cap the number of digits we get back to six. I don't thinks\n # Audacity cares about values after this precision\n frames_to_sec = frames_to_sec[1][:6]\n\n assert len(frames_to_sec) <= 6\n\n # padding with zeros to make the output look aligned\n frames_to_sec = frames_to_sec.ljust(6, '0')\n\n assert len(frames_to_sec) == 6\n\n return '%d.%s' % (minutes * 60 + seconds, frames_to_sec)", "def cal_start(self):\n return self.setup_start", "def test_get_start_true(self):\n\n tt = TemperatureTracker()\n tt.start()\n self.assertIsNotNone(tt.get_start())", "def start(self, data):\n log.info(data)\n self.stop()\n self.time_start = time.time() - data.get('time_offset', 0) - self.time_offset\n self.bpm = float(data.get('bpm', self.DEFAULT_BPM))\n self.timesigniture = parse_timesigniture(data.get('timesigniture', DEFAULT_TIMESIGNITURE))\n if data.get('sequence'):\n sequence_name = data.get('sequence')\n assert sequence_name in self.sequences, '{0} is not a known sequence'.format(sequence_name)\n self.sequence = self.sequences[sequence_name]\n if data.get('scene'):\n # Single scene - Fake the sequence list by inserting the name of the single scene required\n self.sequence = (data.get('scene', self.DEFAULT_SCENE_NAME), )\n self.sequence_index = 0", "def start_time(self):\n return self.time_parser.start_time", "def start(self):\n return self._config.trace_start", "def start_time(self) -> float:\n return float(self.get_from_redis(\"start_time\"))", "def getStartTime(self):\n return _osgAnimation.Vec4LinearSampler_getStartTime(self)", "def getStartTime(self):\n assert not self.isWaitingToStart(), \"Too early to tell: %s\" % self\n return \"%s\" % self.__rawInfo.startTime", "def seek(self,event):\r\n if self.app.controlLock.locked():\r\n return\r\n self.app.controlLock.acquire()\r\n x = event.x\r\n scalex,_ = self.getScale()\r\n scalex_secs = [scalex[0]/self.samplerate,scalex[1]/self.samplerate]# Get x scale in seconds\r\n seekTo = (x/self.w) * (scalex_secs[1]-scalex_secs[0]) + scalex_secs[0]# Transform pixel coordinates to represented time\r\n self.app.videoPlayer.pause()\r\n self.app.videoPlayer.seek(seekTo-self.app.dataOffset)\r\n self.app.videoPlayer.pause()# Restart audio to sync\r\n self.update(self.app.videoPlayer.startTimestamp)\r\n self.draw()\r\n self.app.videoPlayer.play()\r\n self.app.controlLock.release()", "def start(self):\n moment = self.tz_match(self.moment)\n\n delta_to_start = timedelta(minutes=moment.minute % self.freq_minutes,\n seconds=moment.second,\n microseconds=moment.microsecond)\n\n start = moment - delta_to_start\n return start", "def start_time(self) -> datetime:\n return self.root_hartree.start_time", "def t0(self):\n return self._time_axis.start", "def __init__(self, start_time=None):\n if start_time is None:\n self.started = time.time()\n else:\n self.started = start_time", "def timing_since(self, stat, start, sample_rate=1):\n self.timing(stat, int((time.time() - start) * 1000000), sample_rate)", "def start(self) -> global___Pos:", "def timer_startIfNeeded():\n nonlocal b_timerStart\n for k, v in kwargs.items():\n if k == 'timerStart': b_timerStart = bool(v)\n if b_timerStart:\n other.tic()", "def lap(self):\n oldtime = self._clock() - self._starttime\n self._starttime = self._clock()\n return oldtime", "def start_ms(self) -> int:\n return self._start_ms", "def pre_step(self,status):\n self.t0 = time.time()\n pass", "def started(self):\n if not self.start_ts:\n return None\n return datetime.utcfromtimestamp(self.start_ts)", "def start(self):\n if self._offset == 0 and self._interval == 0:\n raise ValueError(\"timer will not fire because offset and interval are both zero\")\n \n self._apply_schedule()\n self._started = True", "def __stream_triggered(self):\n # Call this every time period\n thread = Timer(self.stream_time, self.__stream_triggered)\n thread.start()\n self.__threads.append(thread)\n\n if len(self.__spike_buffer) > 2:\n speed = self.__get_speed()\n print(speed)\n self.__stream_send(speed)", "def get_linear_track_pos(self):\r\n return self._arm.get_linear_track_pos()", "def analysisStartTime(self) -> WQXTime:\r\n return self.__analysisStartTime", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time" ]
[ "0.680665", "0.66605866", "0.6150333", "0.6049713", "0.59493154", "0.58593935", "0.58006895", "0.57242954", "0.56212443", "0.55866975", "0.5570046", "0.55602914", "0.55241376", "0.5506695", "0.55048025", "0.55008477", "0.54706705", "0.54700154", "0.5436743", "0.538599", "0.53857064", "0.5373308", "0.5373308", "0.5368477", "0.5315364", "0.53132516", "0.5304299", "0.52330905", "0.5228638", "0.5222458", "0.52167225", "0.5211725", "0.52116", "0.52106375", "0.52106375", "0.52106375", "0.52106375", "0.52106375", "0.5207867", "0.51776344", "0.517306", "0.51654327", "0.51554835", "0.51312387", "0.5130933", "0.5127154", "0.5114786", "0.5114177", "0.5107811", "0.51055056", "0.5073291", "0.5073102", "0.507033", "0.505928", "0.50558686", "0.5054096", "0.5051428", "0.50486785", "0.5046742", "0.5042802", "0.5031196", "0.5027274", "0.50218236", "0.5012392", "0.50062585", "0.5005429", "0.4970143", "0.49669546", "0.49579617", "0.4957399", "0.49520323", "0.49504235", "0.4947862", "0.49470678", "0.49422422", "0.49419242", "0.49369115", "0.49340746", "0.49276116", "0.49255252", "0.49151686", "0.49142504", "0.49028033", "0.49019036", "0.4894784", "0.48930192", "0.48922777", "0.48849192", "0.4883336", "0.4875901", "0.48742348", "0.4872953", "0.48671734", "0.48621845", "0.4856665", "0.4856665", "0.4856665", "0.4856665", "0.4856665", "0.4856665" ]
0.5973443
4
Durations are 'dict string keys'. The keys need to be converted to floats. The keys need to be ordered and the scenes returned with calculated durations
def parse_scene_order(self, data, timesigniture): if not data: return () num_scenes = len(data) def attempt_parse_key_timecode(value): if not value: return value try: return float(value) except (ValueError, TypeError): pass try: return timecode_to_beat(value, timesigniture) except (AssertionError, ValueError, AttributeError): pass return value # Surface the original key value in the dict (useful for debugging) for key, value in data.items(): if value: value['key'] = key data_float_indexed = {attempt_parse_key_timecode(k): v for k, v in data.items()} assert len(data_float_indexed) == num_scenes sorted_keys = sorted(data_float_indexed.keys()) assert len(sorted_keys) == num_scenes def normalise_duration(index): """ Convert any time code or alias to a linear float value. e.g. '1.2' parses to -> 1.5 'match_next' resolves to -> 4.0 """ key = sorted_keys[index] item = data_float_indexed[key] if not item: item = {'duration': 'auto'} data_float_indexed[key] = item duration = attempt_parse_key_timecode(item.get('duration')) if duration == 'match_next': duration = normalise_duration(index+1) if duration == 'match_prev': duration = normalise_duration(index-1) if isinstance(duration, str) and duration.startswith('match '): duration = normalise_duration(sorted_keys.index(float(duration.strip('match ')))) if (not duration or duration == 'auto') and index < len(sorted_keys)-1: duration = sorted_keys[index+1] - key if not isinstance(duration, float): #log.info('Unparsed duration: {0}'.format(duration)) duration = self.DEFAULT_DURATION if duration != item.get('duration'): item['duration'] = duration return duration for index in range(len(sorted_keys)): normalise_duration(index) scene_items = [] for key in sorted_keys: scene_item = data_float_indexed[key] assert scene_item and scene_item.get('duration') >= 0, "All scene must have durations. Something has failed in parsing. {0}:{1}".format(key, scene_item) scene_items.append(scene_item) return scene_items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalise_duration(index):\n key = sorted_keys[index]\n item = data_float_indexed[key]\n if not item:\n item = {'duration': 'auto'}\n data_float_indexed[key] = item\n duration = attempt_parse_key_timecode(item.get('duration'))\n if duration == 'match_next':\n duration = normalise_duration(index+1)\n if duration == 'match_prev':\n duration = normalise_duration(index-1)\n if isinstance(duration, str) and duration.startswith('match '):\n duration = normalise_duration(sorted_keys.index(float(duration.strip('match '))))\n if (not duration or duration == 'auto') and index < len(sorted_keys)-1:\n duration = sorted_keys[index+1] - key\n if not isinstance(duration, float):\n #log.info('Unparsed duration: {0}'.format(duration))\n duration = self.DEFAULT_DURATION\n if duration != item.get('duration'):\n item['duration'] = duration\n return duration", "def test_durations_per_type(self):\n sim = ss.Simulation()\n assert type(sim.durations_per_type()) == dict", "def _get_dur(inst):\n for fil, sig in inst['localization'].items():\n ke = sorted([int(i) for i in sig.keys()], key=int)\n if (len(ke) != 2):\n log(0, \"Error: Instance has two ranges\\n%s\" % (str(inst)))\n exit(1)\n dur = ke[1] - ke[0]\n assert dur > 0, \"Duration <= 0\"\n return(dur)", "def stats():\r\n times_lst = []\r\n time_dict = {}\r\n for album, details in dbase().items():\r\n time_m = 0\r\n time_s = 0\r\n for songs, details_s in details[0].items():\r\n time = details_s[1].split(\":\")\r\n min = int(time[0])\r\n sec = int(time[1])\r\n time_m += min\r\n time_s += sec\r\n time_s = datetime.timedelta(seconds=time_s)\r\n time_m = datetime.timedelta(seconds=time_m)\r\n time = time_m + time_s\r\n time = str(time)\r\n times_lst.append(time)\r\n time_dict[album] = time\r\n\r\n time_dict = sorted(time_dict.items(), key=lambda x: x[1], reverse=True)\r\n return time_dict", "def create_event_dur_score(self):\n for inst in self.instruments:\n #[rest/midipitch, dur, vel]\n inst_score=[]\n running_clock = 0\n for n, note in enumerate(inst.notes):\n freq = mp_to_adjusted_freq(note[0], self.ratios)\n if type(freq) != int: freq = np.asscalar(freq)\n if type(note[0]) != int: inst.notes[n][0] = np.asscalar(note[0])\n if type(note[1]) != int: inst.notes[n][1] = np.asscalar(note[1])\n if type(note[2]) != int: inst.notes[n][2] = np.asscalar(note[2])\n # if type(note[3]) != int: inst.notes[n][3] = np.asscalar(note[3])\n if note[1] != running_clock:\n inst_score.append(['Rest()', note[1] - running_clock, 0])\n inst_score.append([freq, note[2], note[3]])\n running_clock = note[1] + note[2]\n inst.event_dur_score = inst_score", "def durations_per_type(self):\n pass", "def dictagdur2(kind, fname):\n\n #x = ['movie', 'moviecode', 'offset', 'well', 'agmin', 'agsec', 'agdur', \n #'agtype', 'agcomm', 'escmin', 'escsec', 'escdur', 'esctype', 'escbeh', \n #'esccomm', 'gen', 'date', 'assay', 'fps', 'flyid', 'side', 'moviepart']\n\n d = {}\n y = '1'\n b = []\n dur = []\n \n with open(fname) as f:\n for l in f:\n #print(l)\n adict = agline2(l)\n \n if adict['well'] != y:\n if len(dur) > 0:\n agdurcmd(kind, b, dur, d[gen])\n b = []\n dur = []\n \n if adict['agtype'] != '-' and adict['agtype'] != 'x' and \\\n adict['agdur'] != '':\n b.append(adict['agtype'])\n dur.append(adict['agdur'])\n \n if adict['esctype'] != '' and adict['escdur'] != '':\n b.append(adict['esctype'])\n dur.append(adict['escdur'])\n\n gen = adict['gen']\n #print(gen)\n if gen not in d:\n d[gen] = []\n \n y = adict['well']\n \n agdurcmd(kind, b, dur, d[gen])\n\n return(d)", "def sort_duration(self):\n self.sort('duration')", "def __init__(self, scene_items, timesigniture=DEFAULT_TIMESIGNITURE_):\n self.scene_items = scene_items\n self.total_beats = sum(scene_item['duration'] for scene_item in self.scene_items)\n self.timesigniture = timesigniture", "def get_state(self, duration):\n metrics = []\n\n if duration:\n for count_key in self.kv_counts:\n metrics.append(\n MetricObject(\n count_key,\n self.kv_counts[count_key] / duration\n )\n )\n\n for time_key in self.kv_times:\n values = self.kv_times[time_key]['values']\n unit = self.kv_times[time_key]['unit']\n\n metrics.append(\n MetricObject(\n '.'.join([time_key, 'mean']),\n stats_helper.find_mean(values),\n unit\n )\n )\n\n metrics.append(\n MetricObject(\n '.'.join([time_key, 'median']),\n stats_helper.find_median(values),\n unit\n )\n )\n\n for pct in self.percentiles:\n metrics.append(\n MetricObject(\n '.'.join([time_key, \"%sth_percentile\" % pct]),\n stats_helper.find_percentile(values, int(pct)),\n unit\n )\n )\n\n return metrics", "def _generate_case_durations(self):\n return pd.Series(self.df_cases[\"Median Duration\"].values, index=self.df_cases[\"CaseID\"]).to_dict()", "def _gather_durations(ret, minion_id):\n if isinstance(ret.data, dict) and isinstance(\n ret.data.get(minion_id, None), dict\n ):\n duration = 0\n for _, state_ret in ret.data[minion_id].items():\n try:\n duration += state_ret[\"duration\"]\n except KeyError:\n break\n else:\n return duration\n pytest.skip(\"Something went wrong with the states, skipping.\")", "def dictagdur(kind, fname):\n\n with open(fname, 'r') as g:\n g.next()\n g.next()\n m = g.next()\n startdict = agline(m)\n genold = startdict['gen']\n\n f = open(fname)\n f.next()\n f.next()\n d = {}\n y = '1'\n nb = []\n for l in f:\n adict = agline(l)\n kdur = kind + 'dur'\n gen = adict['gen']\n well = adict['well']\n\n if adict['gen'] not in d:\n d[gen] = []\n \n if gen != genold:\n d[genold].append(sum(nb))\n nb = []\n else: \n if adict['well'] != y:\n #print(sum(nb))\n d[gen].append(sum(nb))\n nb = []\n \n if adict[kdur] == '':\n nb.append(0)\n elif int(adict[kdur]) >= 0:\n nb.append(int(adict[kdur]))\n elif adict[ks] == '-':\n pass\n \n\n y = adict['well']\n genold = adict['gen']\n \n d[gen].append(sum(nb))\n\n return(d)", "def test_duration(self):\n for duration_, _, _ in self.test_cases:\n self.assertEqual(Rest(duration_).duration, duration_)", "def times(self):\n ret = {}\n for tag in self.TIMETAGLIST:\n if self.has_tag(tag):\n try:\n ret[tag] = safeInt(self.tag(tag))\n except TypeError:\n pass\n return ret", "def getTranslationKeyTimes(self, view) -> list[float]:\n ...", "def get_timestamps(filename, dictionary):\n \n with open(filename, 'r') as f_obj:\n text = f_obj.readlines()\n inferred_name = re.sub(r'[0-9_\\-]+', ' ', filename).split('/')[-1].split('.lab')[0].split('CD')[-1].strip().lower()\n end_stamp = float(text[-1].split()[1]) # relic of an old idea.\n for line in text:\n line = line.split() \n start = float(line[0])\n stop = float(line[1])\n musical_key = line[2]\n new_key = (inferred_name, start, stop)\n dictionary[new_key] = musical_key", "def get_event_start_idxs_durations(self):\n durations = []\n start_idxs = []\n prev = 0\n count = 0\n for idx, score in enumerate(self.summary):\n if score == 1 and prev == 0:\n count += 1\n start_idxs.append(idx)\n if score == 1 and prev == 1:\n count += 1\n elif score == 0 and prev == 1:\n durations.append(count)\n count = 0\n prev = score\n return dict(zip(start_idxs, durations))", "def get_dur(self):\n return [char.get_dur() for char in self.string]", "def process_notes_in_song(dict_time_notes, seq_len = 50):\n list_of_dict_keys_time = []\n \n for key in dict_time_notes:\n sample = dict_time_notes[key]\n times = np.unique(np.where(sample > 0)[1])\n index = np.where(sample > 0)\n dict_keys_time = {}\n\n for time in times:\n index_where = np.where(index[1] == time)\n notes = index[0][index_where]\n dict_keys_time[time] = notes\n list_of_dict_keys_time.append(dict_keys_time)\n return list_of_dict_keys_time", "def _generate_session_durations(self):\n return pd.Series(self.df_sessions[\"Duration\"].values, index=self.df_sessions[\"SessionID\"]).to_dict()", "def _sort_by_duration(self) -> None:\n total_samples = len(self.paths)\n if total_samples == 0:\n return\n samples = zip(self.paths, self.durations, self.transcriptions)\n sorted_samples = sorted(samples, key=lambda sample: sample[1])\n self.paths, self.durations, self.transcriptions = [\n list(c) for c in zip(*sorted_samples)\n ]\n assert (\n total_samples\n == len(self.paths)\n == len(self.durations)\n == len(self.transcriptions)\n ), \"_sort_by_duration len mis-match\"", "def testHrtDuration(self):\n attr = self.session.create_visit_attr()\n\n self.util.stringTypeTest(self, attr, \"duration\")\n\n self.util.stringPropertyTest(self, attr, \"duration\")", "def plotagdur(kind, d, iskeyfile='True', keyfile='keylist', type='b'):\n md = cl.dictmeans(d)\n\n if iskeyfile == 'True':\n keylist = cmn.load_keys(keyfile)\n else:\n keylist = sorted(d.keys())\n\n ylabel = 'Seconds'\n \n ftitle = 'Mean duration of behavior'\n\n if kind == 'escd':\n ftitle = 'Mean duration of dominant escalation'\n \n if kind == 'escm':\n ftitle = 'Mean duration of mutual escalation'\n\n fig1 = gpl.plotdata(d, md, keylist, type, ylabel=ylabel, ftitle=ftitle, \n titlesize='large', err='none', figw=10, figh=8)\n \n plt.ylim(0)", "def getDurations(self):\n return self.durations", "def duration(timedelta):\r\n duration = {}\r\n duration['day'] = timedelta.days\r\n minutes_temp, duration['second'] = divmod(timedelta.seconds, 60)\r\n duration['hour'], duration['minute'] = divmod(minutes_temp, 60)\r\n\r\n return duration", "def time_taken(json_cutlist, laser):\r\n\tcutlist = json.loads(json_cutlist)\r\n\ttime = 0\r\n\tcoordinate_array = [0, 0]\r\n\tfor a in cutlist:\r\n\t\tif a[0] == \"jump\" or a[0] == \"mark\":\r\n\t\t\tcoordinate_array = [float(a[1]) - coordinate_array[0], float(a[2]) - coordinate_array[1]]\r\n\t\t\tmag = math.sqrt(coordinate_array[0]**2 + coordinate_array[1]**2)\r\n\t\t\tif a[0] == \"jump\":\r\n\t\t\t\ttime += mag/laser[\"jump_speed\"]\r\n\t\t\telse:\r\n\t\t\t\ttime += mag/laser[\"mark_speed\"]\r\n\t\t\tcoordinate_array = [float(a[1]), float(a[2])]\r\n\t\telif a[0] == \"z_abs\" or a[0] == \"z_rel\":\r\n\t\t\tzSet = float(a[1])\r\n\t\telif a[0] == \"c_abs\" or a[0] == \"c_rel\":\r\n\t\t\tcSet = float(a[1])\r\n\t\telif a[0] == \"a_abs\" or a[0] == \"a_rel\":\r\n\t\t\taSet = float(a[1])\r\n\t\telse:\r\n\t\t\tpass\r\n\treturn str(datetime.timedelta(seconds=int(time)))", "def getTransformKeyTimes(self, view) -> list[float]:\n ...", "def get_duration(self):\n duration = 0\n\n for entry in self.entries:\n duration += entry.get_duration()\n return duration", "def test_time_dicts():\n dmd = DMD()\n dmd.fit(X=sample_data_1, Y=sample_data_2)\n expected_dict = {\"dt\": 1, \"t0\": 0, \"tend\": 13}\n np.testing.assert_equal(dmd.original_time, expected_dict)\n np.testing.assert_equal(dmd.dmd_time, expected_dict)", "def get_indv_units(\n dataset: KantoData,\n keys: List[str],\n ID: str,\n pad: bool = True,\n song_level: bool = False,\n) -> Dict[str, Path]:\n\n units = [\n get_vocalisation_units(dataset, key, song_level=song_level)\n for key in keys\n ]\n units = dictlist_to_dict(units)\n\n if pad:\n if song_level:\n max_frames = max([unit.shape[1] for unit in units.values()])\n units = {\n key: pad_spectrogram(spec, max_frames)\n for key, spec in units.items()\n }\n else:\n max_frames = max(\n [unit.shape[1] for ls in units.values() for unit in ls]\n )\n units = {\n key: [pad_spectrogram(spec, max_frames) for spec in ls]\n for key, ls in units.items()\n }\n\n # Save dataset\n out_dir = dataset.DIRS.SPECTROGRAMS / (\n f\"{ID}_avg_units.p\" if song_level else f\"{ID}_units.p\"\n )\n makedir(out_dir)\n pickle.dump(units, open(out_dir, \"wb\"))\n return {ID: out_dir}", "def get_duration_steps(self):\n return {\n # acc. to ATV-A 121 chap. 5.2 (till 2012)\n ATV: (60 * 3, 60 * 48),\n # acc. to DWA-A 531 chap. 5.2.1\n DWA_adv: (60 * 3, 60 * 24),\n # acc. to DWA-A 531 chap. 5.2.1\n DWA: (60, 60 * 12)\n }[self.worksheet]", "def choose_movement_durations(self):\n\n minimum = GOLDEN_MEAN\n\n # The golden mean of the section between the golden mean and 1\n maximum = scale(minimum, 0, 1, minimum, 1)\n\n # Pick a random float between minimum and maximum\n division = scale(random.random(), 0, 1, minimum, maximum)\n\n # Get the durations of each section\n one = int(scale(division, 0, 1, 0, self.piece_duration))\n two = self.piece_duration - one\n return one, two", "def getScaleKeyTime(self, index, view) -> float:\n ...", "def main(args):\n words = dict(tuple( (w.strip().split()[-1],[])\n for w in open(args.w,'r').read().strip().split('\\n')))\n \n for line in sys.stdin:\n line = line.strip().split()\n identifier = line[0]\n start = float(line[1])\n end = float(line[2])\n duration = end-start\n word = line[3]\n if words.has_key(word):\n words[word].append(\n (identifier,start,end,duration))\n\n fl_durations = open('%s/av_durations.txt' % args.o,'w')\n for word in np.sort(words.keys()):\n fl_durations.write('%s %d %g %g\\n' % (word,\n len(words[word]),\n np.mean([duration for identifier,start,end,duration in words[word]]),\n np.median([duration for identifier,start,end,duration in words[word]])))\n\n fl_word_times = open('%s/%s' % (args.o, word),'w')\n for identifier,start,end,duration in words[word]:\n fl_word_times.write('%s %g %g\\n' % (identifier, start,end))\n \n fl_word_times.close()\n\n fl_durations.close()\n\n\n\n # import pdb; pdb.set_trace()\n # words = []\n # use_cur_line = False\n # keyphrase = \"# normal lexical items\"\n # for line in open(args.d,'r'):\n # if line[:len(keyphrase)] == keyphrase:\n # use_cur_line = True\n \n # if not use_cur_line: continue\n\n # split_line = line.strip().split()\n # if len(split_line) == 0 or split_line[0][:1] == '#': continue\n \n # words.append(split_line[0])\n # words.sort()\n\n # open(args.o,'w').write('\\n'.join( '%d %s' % (i,w) for i,w in enumerate(words)))", "def _deltas(self):\n istat = self.init\n lstat = self.stats\n uptime = self._uptime()\n delta = float(uptime) - float(self.uptime)\n self.uptime = uptime\n \n for dev in lstat.keys():\n if not istat.has_key(dev):\n del lstat[dev]\n continue\n idev = istat[dev]\n ldev = lstat[dev]\n\n for key,value in ldev.items():\n if re.search(r'(^major\\Z|^minor\\Z)',key):\n continue\n \n if not idev.has_key(key):\n print \"Different keys in statistics\"\n sys.exit(1)\n if not str(value).isdigit and \\\n not str(ldev[key]).isdigit(): \n print \"value of key is not a number\"\n sys.exit(1)\n \n if ldev[key] == idev[key]:\n ldev[key] = self._sprintf('%.2f', 0)\n elif int(delta) > 0:\n ldev[key] = self._sprintf('%.2f',float((ldev[key] - idev[key]) / delta))\n else:\n\t ldev[key] = self._sprintf('%.2f', float(ldev[key] - idev[key]))\n idev[key] = value\n return idev", "def getScaleKeyTimes(self, view) -> list[float]:\n ...", "def _get_duration(self):\n try:\n dur = self.im.info[\"duration\"] / 1000.0\n except KeyError:\n dur = DEFAULT_DURATION / 1000.0 \n\n return dur", "def duration_steps_readable(durations):\n duration_strings = list()\n for i, minutes in enumerate(durations):\n duration_strings.append(minutes_readable(minutes))\n return duration_strings", "def get_statistics_dash(self):\n query = dates_query = db.session.query(VideoRepresentation,db.func.count().label(\"count\"),VideoRepresentation.height.label(\"height\")).outerjoin((QueryVideoMM, QueryVideoMM.video_id == VideoRepresentation.video_id)).filter_by(youtube_query_id=self.id).group_by(VideoRepresentation.height).order_by(VideoRepresentation.height)\n representations = query.all()\n return [{\"height\":representation.height if representation.height !='' else 'audio',\"count\":representation.count} for representation in representations]", "def dehydrate_duration(value):\n return Structure(ord(b\"E\"), value.months, value.days, value.seconds, int(1000000000 * value.subseconds))", "def create_time_dicts(\n Spc:Dict) -> List[Dict]:\n abreviations = ['Kd','Km','Kq']\n daily_range = Spc['daily']\n monthly_range = Spc['monthly']\n quarterly_range = Spc['quarterly']\n all_ranges = np.cumsum([0,daily_range,monthly_range,quarterly_range])\n\n out_list = []\n for i,abrev in enumerate(abreviations):\n temp_dict = {}\n temp_dict['range'] = range(all_ranges[i],all_ranges[i+1])\n temp_dict['one'] = np.ones(Spc[abrev])\n temp_dict['w'] = np.arange(1,Spc[abrev]+1)/Spc[abrev]\n temp_dict['k'] = np.arange(1,Spc[abrev]+1)\n temp_dict['kk'] = Spc[abrev]\n out_list.append(temp_dict)\n\n return out_list", "def getTransformKeyTime(self, index, view) -> float:\n ...", "def getDuration(self):\n return (self._get_int('duration'), self._attributes.getDivisions())", "def get_labels_for_video(video_title: str, framerate: int) -> dict:\n frames = list_pngs(video_title)\n label_times = {}\n for f in frames:\n message = rekognize_objects_in_frame(video_title, f)\n seconds = int(f[5:-4]) / framerate\n minutes = seconds // 60\n display_seconds = seconds - minutes * 60\n labels = [label['Name'] for label in message['Labels']]\n for label in labels:\n if label not in label_times:\n label_times[label] = []\n label_times[label].append(seconds)\n print(minutes, ':', display_seconds, '|', ' '.join(labels))\n return label_times", "def _addTiming(self, key, duration):\n pass", "def scale_time_to(recs, unit):\n\n for r in recs:\n if unit == 'd':\n r.t = [t / 3600 / 24 for t in r.time]\n elif unit == 'hours':\n r.t = [t / 3600 for t in r.time]\n elif unit == 'min':\n r.t = [t / 60 for t in r.time]\n elif unit in ('s', 'sec'):\n r.t = r.time\n else:\n Exception('Wrong time unit')\n\n Records.time_unit = unit\n Records.time_label = 'Time (' + unit + ')'", "def test_find_parallel_duration():\n pt2_example = {\n \"C\": [],\n \"A\": [\"C\"],\n \"F\": [\"C\"],\n \"B\": [\"A\"],\n \"D\": [\"A\"],\n \"E\": [\"B\", \"D\", \"F\"],\n }\n assert find_parallel_duration(pt2_example, 2, 0) == 15", "def parse_duration_string_ns(duration):\n pattern = r'(?P<value>[0-9]+\\.?[0-9]*?)(?P<units>\\D+)'\n matches = list(re.finditer(pattern, duration))\n assert matches, 'Failed to parse duration string %s' % duration\n\n times = {'h': 0, 'm': 0, 's': 0, 'ms': 0, 'us': 0, 'ns': 0}\n for match in matches:\n parsed = match.groupdict()\n times[parsed['units']] = float(parsed['value'])\n\n value_ns = (times['h'] * 60 * 60 + times['m'] * 60 + times['s']) * 1000000000\n value_ns += times['ms'] * 1000000 + times['us'] * 1000 + times['ns']\n\n return value_ns", "def samples_per_sec(header):\n hertz = header['samples_per_record'] / header['seconds_per_record']\n return OrderedDict(zip(header['label'], hertz))", "def align_time_frames(self, dict, name, freq_unit, group_unit='mean'):\n\t\tagg_dict = {}\n\t\tindex_use = dict[name]\n\t\tif freq_unit == 'M':\n\t\t\tfreq_unit = 'MS'\n\t\tfor name, df in dict.items():\n\t\t\ttime_series = pd.date_range(index_use.index[0],index_use.index[-1], freq=freq_unit)\n\t\t\t#print('time_series',time_series)\n\t\t\tdf = df.reindex(time_series)\n\t\t\t#print('df', df)\n\t\t\tarray = list(df.columns)\n\t\t\tarray.remove('value')\n\t\t\tdf = df.drop(array,axis=1)\n\t\t\tdf[name + ' Value'] = df['value']\n\t\t\tagg_dict[name + ' Value'] = group_unit\n\t\t\t\"\"\"\n\t\t\tdf[name + ' Min'] = df['value']\n\t\t\tdf[name + ' Max'] = df['value']\n\t\t\tdf[name + ' Average'] = df['value']\n\t\t\tagg_dict[name + ' Min'] = 'min'\n\t\t\tagg_dict[name + ' Max'] = 'max'\n\t\t\tagg_dict[name + ' Average'] = 'mean'\n\t\t\t\"\"\"\n\t\t\tdf = df.drop('value',axis=1)\n\t\t\t#print(df)\n\t\t\tdict[name] = df\n\t\treturn dict, agg_dict", "def get_labels_and_times(dict_):\n labels = np.asarray(list(dict_.items()))\n times = labels[:, 0].astype(int)\n labels = labels[:, 1]\n\n return labels, times", "def getSectionsOfNewVideo (silences, duration):\n return [0.0] + silences + [duration]", "def parse_duration_string_ms(duration):\n pattern = r'(?P<value>[0-9]+\\.?[0-9]*?)(?P<units>\\D+)'\n matches = list(re.finditer(pattern, duration))\n assert matches, 'Failed to parse duration string %s' % duration\n\n times = {'h': 0, 'm': 0, 's': 0, 'ms': 0}\n for match in matches:\n parsed = match.groupdict()\n times[parsed['units']] = float(parsed['value'])\n\n return (times['h'] * 60 * 60 + times['m'] * 60 + times['s']) * 1000 + times['ms']", "def time_units(self) -> str:\n return self._ll_tree_sequence.get_time_units()", "def decodeSpaceTime(self, result):\r\n if self.case == 1:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(x[0]/self.scale))),\r\n reader.morton2coordsX2D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY2D(x[1], self.offy, self.scaley, self.roundNum), x[2]], result)\r\n else:\r\n return map(lambda x: [int(x[0]/self.scale), reader.morton2coordsX2D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY2D(x[1], self.offy, self.scaley, self.roundNum), x[2]], result)\r\n elif self.case == 2:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(x[0]/self.scale))), \r\n reader.morton2coordsX3D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY3D(x[1], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ3D(x[1], self.offz, self.scalez, self.roundNum)], result)\r\n else:\r\n return map(lambda x: [int(x[0]/self.scale), reader.morton2coordsX3D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY3D(x[1], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ3D(x[1], self.offz, self.scalez, self.roundNum)], result)\r\n elif self.case == 3:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(reader.morton2coordst3D(x[0])/self.scale))), \r\n reader.morton2coordsY3D(x[0], self.offx, self.scalex, self.roundNum),\r\n reader.morton2coordsZ3D(x[0], self.offy, self.scaley, self.roundNum), x[1]], result)\r\n else:\r\n return map(lambda x: [int(reader.morton2coordst3D(x[0])/self.scale), \r\n reader.morton2coordsY3D(x[0], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsZ3D(x[0], self.offy, self.scaley, self.roundNum), x[1]], result)\r\n elif self.case == 4:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(reader.morton2coordst4D(x[0])/self.scale))), \r\n reader.morton2coordsX4D(x[0], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY4D(x[0], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ4D(x[0], self.offz, self.scalez, self.roundNum)], result)\r\n else:\r\n return map(lambda x: [int(reader.morton2coordst4D(x[0])/self.scale), \r\n reader.morton2coordsX4D(x[0], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY4D(x[0], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ4D(x[0], self.offz, self.scalez, self.roundNum)], result)", "def get_average_duration_episode_in_seconds(self) -> NamedTuple:\n times = [ep.itunes_duration for ep in self.entries]\n format_times = []\n\n for time in times:\n if not time.startswith('00'):\n time = '0' + time\n format_times.append(time)\n\n dts = [datetime.strptime(x, '%H:%M:%S') for x in format_times]\n secs = [timedelta(\n hours=x.hour,\n minutes=x.minute,\n seconds=x.second\n ).seconds for x in dts]\n\n return Duration(\n floor(mean(secs)),\n max(format_times),\n min(format_times)\n )", "def get_unit_map(self):\n units = dict()\n for t in META:\n for c in META[t]:\n for i in META[t][c]:\n unit = DEFAULT_UNIT\n if (isinstance(i, (tuple, list))):\n val, unit = i\n else:\n val = i\n # category/metric\n n = \"/\".join((c, val))\n units[n] = (unit, t)\n return units", "def test_dumps_timedelta(self):\n try:\n _build_test_dirs()\n dicti = {\n 'timedelta1': datetime.timedelta(days=392),\n 'timedelta2': datetime.timedelta(weeks=2, hours=23),\n 'timedelta3': datetime.timedelta(microseconds=27836),\n 'array': [1, 2, 3],\n 'string': 'trololo',\n 'int': 1,\n 'float': 4.32,\n 'true': True,\n 'false': False,\n 'null': None\n }\n with open(_TEST_FILE, 'w+') as fileobj:\n morejson.dump(dicti, fileobj)\n with open(_TEST_FILE, 'r') as fileobj:\n self.assertEqual(dicti, morejson.load(fileobj))\n finally:\n _dismantle_test_dirs()", "def getTranslationKeyTime(self, index, view) -> float:\n ...", "def transform_timestamps(time_tracker):\n def calculate_timediff(t1, t2):\n return (t2 - t1).seconds + (t2 - t1).microseconds/1000000\n\n durations = dict()\n\n durations[\"Initialization\"] \\\n = round(calculate_timediff(time_tracker[\"time_start\"],\n time_tracker[\"after_init\"]), 3)\n\n durations[\"Configuration\"] \\\n = round(calculate_timediff(time_tracker[\"after_init\"],\n time_tracker[\"after_config\"]), 3)\n\n iter_list = []\n for i, iteration in enumerate(time_tracker[\"iterations\"]):\n if i == 0:\n iter_list\\\n .append(round(calculate_timediff(time_tracker[\"after_config\"],\n iteration), 3))\n else:\n iter_list\\\n .append(round(calculate_timediff(\n time_tracker[\"iterations\"][i-1], iteration), 3))\n durations[\"Iterations\"] = iter_list\n\n durations[\"Finalization\"] \\\n = round(calculate_timediff(time_tracker[\"iterations\"][-1],\n time_tracker[\"finish\"]), 3)\n durations[\"Total\"] \\\n = round(durations[\"Initialization\"] + durations[\"Configuration\"]\n + sum(durations[\"Iterations\"]) + durations[\"Finalization\"], 3)\n\n return durations", "def calculate_durations(labels, num_classes):\n num_segments = len(labels)\n durations = np.zeros((num_segments, num_classes), dtype='int32')\n for segment_index, (segment_indices, segment_labels) in enumerate(labels):\n segment_durations = np.diff(segment_indices, prepend=0)\n for label in range(num_classes):\n durations[segment_index, label] = segment_durations[segment_labels == label].sum()\n return durations", "def read_releaseResourcesMeasuredDuration(self):\n # PROTECTED REGION ID(CspSubElementSubarray.releaseResourcesMeasuredDuration_read) ENABLED START #\n return self._cmd_measured_duration[\"releaseresources\"]\n # PROTECTED REGION END # // CspSubElementSubarray.releaseResourcesMeasuredDuration_read", "def decode(self, data):\r\n return Duration.from_sec(float(data))", "def process_feature_durations(self, docFeatList):\n \n for feat in docFeatList:\n tlink = feat.getTlink()\n if not tlink or tlink.getType()!='DURATION':\n continue\n# \n tdurs = [t for t in tlink.getTimexes() if t.getType()=='DUR']\n if not tdurs: continue\n \n timexes = [t for t in tlink.getTimexes() if t.getDateTime()]\n if not timexes: continue\n \n tdur = tdurs[0]\n \n if len(timexes)==1:\n dt = timexan.getRelativeDatetime(tdur.getString(), 'after', timexes[0].getDateTime())\n tdur.setDateTime(dt) \n tlink.setType('BETWEEN')\n continue\n \n timexes2 = []\n for t in timexes:\n dt = timexan.getRelativeDatetime(tdur.getString(), 'after', t.getDateTime())\n timexes2.append(timexan.Timex3(0, 0, '', dt, tdur.getString()))\n tlink.setTimexes2(timexes2)\n tlink.setType('MULTI_DURATIONS')\n \n return docFeatList", "def test_duration_property(self):\n recording_dt = 0.1\n recording_shape = {\n 'no_timesteps': 1000,\n 'no_sweeps': 10,\n 'no_channels': 4,\n }\n expected_duration = recording_shape['no_timesteps'] * recording_dt\n test_rec = rt.Recording(\n np.zeros(\n [\n recording_shape['no_channels'],\n recording_shape['no_timesteps'],\n recording_shape['no_sweeps'],\n ]\n ),\n dt=recording_dt,\n )\n npt.assert_almost_equal(\n test_rec.duration,\n expected_duration,\n err_msg='Expected {} for `duration` property; got {} instead.'.format(\n expected_duration, test_rec.duration\n ),\n )", "def calc_duration(z_array_reion_allmodels, lookback_array_reion_allmodels,\n mass_frac_allmodels, duration_definition):\n\n duration_z = []\n duration_t = []\n reion_completed = []\n\n # We need to be careful here. For low values of fesc, reionization\n # won't actually complete. Hence we need to check `duration_z` and see\n # those models in which reionization is 'completed' at the last snapshot.\n for model_number in range(len(mass_frac_allmodels)):\n mass_frac_thismodel = mass_frac_allmodels[model_number]\n\n duration_z.append([]) \n duration_t.append([])\n for val in duration_definition:\n idx = (np.abs(mass_frac_thismodel - val)).argmin()\n duration_z[model_number].append(z_array_reion_allmodels[model_number][idx]) \n duration_t[model_number].append(lookback_array_reion_allmodels[model_number][idx]) \n if (val == duration_definition[-1]) and \\\n (idx == len(mass_frac_thismodel)-1):\n reion_completed.append(0)\n elif(val == duration_definition[-1]):\n reion_completed.append(1)\n\n return duration_z, duration_t, reion_completed", "def test_combined_long_singular(self):\n expected = dict(\n seconds=1, minutes=2, hours=3, days=4, weeks=5, months=6, years=7)\n self.assertEqual(\n expected,\n util.parse_relative_time_string(\n \"+1second 2minute 3hour 4day 5week 6month 7year\"))", "def test_case(self):\n expected = dict(seconds=1)\n self.assertEqual(expected, util.parse_relative_time_string(\"+1s\"))\n self.assertEqual(expected, util.parse_relative_time_string(\"+1S\"))", "def time(self):\n self.convert_window(\"Time\", \"seconds\", [\"centuries\", \"days\", \"decades\", \"femtoseconds\", \"fortnights\", \"hours\", \"microseconds\", \"millenia\", \"milliseconds\", \"minutes\", \"months(Common)\", \"months(Synodic)\", \"nanoseconds\", \"picoseconds\", \"quarters(Common)\", \"seconds\", \"shakes\", \"weeks\", \"years(Average Gregorian)\", \"years(Common)\", \"years(Julian)\", \"years(Leap)\", \"years(Tropical)\"])", "def duration(self, row: Dict[str, str]) -> float:\n\n #compute time difference from start and stop datetime objs\n fmt = '%m/%d/%y %H:%M:%S.%f'\n start = datetime.strptime(row['Start Time'], fmt)\n stop = datetime.strptime(row['End Time'], fmt)\n return (stop - start).total_seconds()", "def duration(self):\r\n return self.t2 - self.t1", "def _extract_timings(self, outfile):\n f = open_general(outfile)\n tmptxt = f.readlines()\n f.close()\n search_keys = ['time until scf starts',\n 'vpot->tmat',\n 'gref->gmat',\n 'gonsite->density',\n 'energyloop',\n 'Iteration number',\n 'Total running time']\n\n res = {}\n for isearch in search_keys:\n tmpval = []\n itmp = 0\n while itmp>=0:\n itmp = search_string(isearch, tmptxt)\n if itmp>=0:\n tmpval.append(float(tmptxt.pop(itmp).split()[-1]))\n if len(tmpval)>0:\n res[isearch] = tmpval\n # average over iterations\n niter = len(res.get(search_keys[-2], []))\n if niter>0:\n for key in search_keys[1:6]:\n res[key] = sum(res[key])/niter\n for key in [search_keys[0], search_keys[-1]]:\n res[key] = res[key][0]\n return res", "def do_duration(seconds):\n m, s = divmod(seconds, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n tokens = []\n tokens.append(one_many(d, '{d} day'))\n tokens.append(one_many(h, '{h} hour'))\n tokens.append(one_many(m, '{d} minute'))\n tokens.append(one_many(s, '{d} second'))\n template = ', '.join(tokens)\n\n return template.format(d=d, h=h, m=m, s=s)", "def time_dictionary():\n # Each mushroom is a key in the dictionary\n world = {}\n positions = list_of_positions()\n for position in positions:\n world[position] = random.randrange(1, 1000)\n\n time_start = time.time()\n\n # Iterate through the world,\n # incrementing each \"mushroom\"\n for position in world:\n world[position] += 1\n\n time_end = time.time()\n return time_end - time_start", "def get_subphase_durations(\n data: pd.DataFrame, subphases: Dict[str, Union[int, Tuple[int, int]]]\n) -> Sequence[Tuple[int, int]]:\n subphase_durations = np.array(list(subphases.values()))\n if subphase_durations.ndim == 1:\n # 1d array => subphase values are integer => they are consecutive and each entry is the duration\n # of the subphase, so the start and end times of each subphase must be computed\n times_cum = np.cumsum(subphase_durations)\n if subphase_durations[-1] == 0:\n # last subphase has duration 0 => end of last subphase is length of dataframe\n times_cum[-1] = len(data)\n subphase_times = list(zip([0, *list(times_cum)], times_cum))\n else:\n # 2d array => subphase values are tuples => start end end time of each subphase are already provided and do\n # not need to be computed\n subphase_times = subphase_durations\n return subphase_times", "def sum_syllable_durations(syllable_tuple):\n# if syllable_tuple[0] == None:\n# return\n total_duration = 0\n for note in syllable_tuple[-1]:\n total_duration += note['duration']\n return total_duration", "def to_dict(self):\n# \"\"\" The JSON model used is like:\n# <code>\n#{\n# \"duration\": 15,\n# \"url\": \"url1\",\n# \"selections\": [{\n# \"annotations\": [{\n# \"author\": \"\",\n# \"description\": \"speaker\",\n# \"keyword\": \"john\",\n# \"lang\": \"EN\"\n# },\n# {\n# \"author\": \"\",\n# \"description\": \"speakerLabel\",\n# \"keyword\": \"S0\",\n# \"lang\": \"EN\"\n# }\n# , {\n# \"author\": \"\",\n# \"description\": \"gender\",\n# \"keyword\": \"F\",\n# \"lang\": \"EN\" \n# }],\n# \"resolution\": \"0x0\",\n# \"selW\": 20,\n# \"selH\": 15,\n# \"selY\": 10,\n# \"selX\": 10,\n# \"startTime\" : 0,\n# \"endTime\" : 10\n# \n# }]\n#}\n# </code>\n# \n# \"\"\"\n\n dic = {\"duration\": self.get_duration(),\n \"url\": self._filename,\n \"db\":self.get_db().get_path(),\n \"selections\": [] }\n for seg in self.get_time_slices():\n dic['selections'].append({\n \"startTime\": float(seg[0]) / 100.0,\n \"endTime\": float(seg[1]) / 100.0,\n 'speaker': seg[-2],\n 'speakerLabel': seg[-1],\n 'gender': seg[2],\n 'speakers': seg[3]\n })\n return dic", "def find_duration(data):\n t = [i[0] for i in data]\n duration = t[len(t) - 1] - t[0]\n logging.info('Calculated duration: %s', duration)\n return duration", "def test_durations_per_type_num_players(self):\n type_of_player = [ss.Player, ss.LazyPlayer, ss.ResilientPlayer]\n sim = ss.Simulation(player_field=type_of_player)\n run = sim.durations_per_type()\n assert list(run.keys()) == ['Player', 'LazyPlayer', 'ResilientPlayer']", "def get_vocalisation_units(\n dataset: KantoData, key: str, song_level: bool = False\n) -> Dict[str, np.ndarray | List[np.ndarray]]:\n\n # Get spectrogram and segmentation information\n if \"onsets\" not in dataset.data.columns:\n raise KeyError(\n \"'onsets': This vocalisation has not yet been segmented.\"\n )\n spectrogram = retrieve_spectrogram(dataset.files.at[key, \"spectrogram\"])\n onsets, offsets = [dataset.data.at[key, i] for i in [\"onsets\", \"offsets\"]]\n\n # Get spectrogram for each unit\n unit_spectrograms = get_unit_spectrograms(\n spectrogram,\n onsets,\n offsets,\n sr=dataset.parameters.sr,\n hop_length=dataset.parameters.hop_length,\n )\n\n if song_level:\n # Pad unit spectrograms to max duration and return mean\n max_frames = max([unit.shape[1] for unit in unit_spectrograms])\n padded_units = [\n pad_spectrogram(unit, max_frames) for unit in unit_spectrograms\n ]\n avg_unit = norm(np.mean(padded_units, axis=0))\n return {key: avg_unit}\n else:\n return {key: unit_spectrograms}", "def repeteTimeDic(dic, k):\n pass", "def generate_dict_time_notes(list_all_midi, batch_song = 16, start_index=0, fs=30, use_tqdm=True):\n assert len(list_all_midi) >= batch_song\n \n dict_time_notes = {}\n process_tqdm_midi = tqdm_notebook(range(start_index, min(start_index + batch_song, len(list_all_midi)))) if use_tqdm else range(start_index, min(start_index + batch_song, len(list_all_midi)))\n for i in process_tqdm_midi:\n midi_file_name = list_all_midi[i]\n if use_tqdm:\n process_tqdm_midi.set_description(\"Processing {}\".format(midi_file_name))\n try: # Handle exception on malformat MIDI files\n midi_pretty_format = pretty_midi.PrettyMIDI(midi_file_name)\n piano_midi = midi_pretty_format.instruments[0] # Get the piano channels\n piano_roll = piano_midi.get_piano_roll(fs=fs)\n dict_time_notes[i] = piano_roll\n except Exception as e:\n print(e)\n print(\"broken file : {}\".format(midi_file_name))\n pass\n return dict_time_notes", "def GetSamples(self):\n samples = super().GetSamples()\n for container in itertools.chain(*list(self.containers.values())):\n metadata = {'image': container.image.split('/')[-1]}\n if container.resource_ready_time and container.create_start_time:\n samples.append(\n sample.Sample(\n 'Container Deployment Time',\n container.resource_ready_time - container.create_start_time,\n 'seconds', metadata))\n if container.delete_end_time and container.delete_start_time:\n samples.append(\n sample.Sample(\n 'Container Delete Time',\n container.delete_end_time - container.delete_start_time,\n 'seconds', metadata))\n for service in self.services.values():\n metadata = {'image': service.image.split('/')[-1]}\n if service.resource_ready_time and service.create_start_time:\n samples.append(\n sample.Sample(\n 'Service Deployment Time',\n service.resource_ready_time - service.create_start_time,\n 'seconds', metadata))\n if service.delete_end_time and service.delete_start_time:\n samples.append(\n sample.Sample('Service Delete Time',\n service.delete_end_time - service.delete_start_time,\n 'seconds', metadata))\n\n return samples", "def test_unit_of_measurement(self):\n for name in self.sensor_dict:\n sensor = self.sensor_dict[name][\"sensor\"]\n assert self.sensor_dict[name][\"units\"] == sensor.unit_of_measurement", "def get_operation_times(self):\n self.write(\"TIMERS?\")\n timers = {}\n timers['psu'] = int(re.search(r\"\\d+\", self.read()).group())\n timers['laser'] = int(re.search(r\"\\d+\", self.read()).group())\n timers['laser_above_1A'] = int(re.search(r\"\\d+\", self.read()).group())\n self.read() # an empty line is at the end.\n return timers", "def get_timestamps_and_keys(song_name):\n\n filepath = labels[labels.inferred_name.str.title() == song_name].label_file.values[0]\n\n timestamps = []\n keys = []\n\n with open(filepath, 'r') as f_obj:\n text = f_obj.readlines()\n inferred_name = re.sub(r'[0-9_\\-]+', ' ', filepath).split('/')[-1].split('.lab')[0].split('CD')[-1].strip().lower()\n for line in text:\n line = line.split() \n start = float(line[0])\n key = line[-1]\n timestamps.append(start)\n keys.append(key)\n # Grab the last timestamp in the song.\n timestamps.append(float(text[-1].split()[1]))\n\n keys = clean_ys(keys)\n keys = list(map(lambda x: relative_map[x], keys))\n\n return timestamps, keys", "def getDuration(self):\n #return np.sum(self.subintinfo['TSUBINT']) #This is constant.\n return np.sum(self.getSubintinfo('TSUBINT')) #This is constant.", "def summary(self) -> Dict[str, Dict[str, float]]:\n vals: Dict[str, List[float]] = defaultdict(list)\n if not self.steps: # pragma: no cover\n return {}\n\n for timing_dict in self._timings:\n for step in self.steps:\n if step in timing_dict:\n vals[step].append(timing_dict[step])\n summary = {}\n for step in self.steps:\n if vals[step]:\n summary[step] = {\n \"cnt\": len(vals[step]),\n \"sum\": sum(vals[step]),\n \"min\": min(vals[step]),\n \"max\": max(vals[step]),\n \"avg\": sum(vals[step]) / len(vals[step]),\n }\n return summary", "def getTimes():", "def getTimes():", "def getTimes():", "def extract_times(raw_times_dict):\n actual_times = {}\n if raw_times_dict[\"realtime\"] is not None:\n actual_times[\"realtime\"] = raw_times_dict[\"realtime_t\"]\n\n if raw_times_dict[\"realtime_noloads\"] is not None:\n actual_times[\"realtime_noloads\"] = raw_times_dict[\"realtime_noloads_t\"]\n\n if raw_times_dict[\"ingame\"] is not None:\n actual_times[\"ingame\"] = raw_times_dict[\"ingame_t\"]\n\n return actual_times", "def _timedesc_traversal(self, root):\n return map(int, self.timedesc(root))", "def timeframes():\n return '1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1D', '7D', '14D', '1M'", "def measure_dict():\n out = base_dict()\n out['mro']['current'] = ['Measure']\n out['name']['current'] = 'Measure'\n ao(out, 'nSamples', 'Integer', 1, readLevel=3)\n ao(out, 'id', 'String', 'Conversion source ID', readLevel=3)\n ao(out, 'uid', 'String', 'Unique ID', readLevel=5)\n ao(out, 'date', 'Date', '00:00:00 01/01/2000', name='Test date')\n ao(out, 'zerotime', 'Float', name='Acquisition starting time', readLevel=4)\n ao(out, 'elapsed', 'Float', name='Test duration', unit='second')\n ao(out, 'operator', 'String', name='Operator')\n return out", "def getRotationKeyTime(self, index, view) -> float:\n ...", "def transform_times(event):\n if isinstance(event, dict):\n retval = {}\n for key, value in event.items():\n if key == 'times' and len(value) == 2:\n retval[key] = [transform_time(t) for t in value]\n else:\n retval[key] = transform_times(value)\n else:\n retval = event\n return retval", "def duration(self):\n pass", "def duration(self):\n pass" ]
[ "0.6176038", "0.59368414", "0.5897897", "0.58419317", "0.5807034", "0.5672501", "0.56141585", "0.5577464", "0.555346", "0.5521533", "0.5519391", "0.55059147", "0.5491283", "0.54520303", "0.5369046", "0.5366904", "0.53086877", "0.52912253", "0.52870804", "0.5256063", "0.5221701", "0.5210851", "0.51985115", "0.51917607", "0.51570517", "0.5146656", "0.5144112", "0.5126075", "0.51170737", "0.511182", "0.51103926", "0.5087729", "0.50744784", "0.5074042", "0.5061073", "0.5056842", "0.5056786", "0.50101805", "0.5009491", "0.4994144", "0.4987129", "0.49863508", "0.49780032", "0.49713492", "0.49687114", "0.49647552", "0.4961274", "0.49599695", "0.49530515", "0.49480727", "0.49469417", "0.4939983", "0.4931111", "0.4926713", "0.49264643", "0.4926178", "0.49200597", "0.4908793", "0.49069113", "0.4898231", "0.48961738", "0.48870423", "0.4884477", "0.4881886", "0.4880404", "0.48802528", "0.4871733", "0.48601142", "0.48587728", "0.48524964", "0.48455644", "0.48438498", "0.4819308", "0.48179373", "0.48133907", "0.4804386", "0.48022628", "0.48014495", "0.47979686", "0.47978908", "0.47948337", "0.47936893", "0.47921288", "0.47888908", "0.47832212", "0.47714844", "0.47691244", "0.47565186", "0.47542432", "0.47390598", "0.47390598", "0.47390598", "0.47174007", "0.4717359", "0.47133234", "0.47111815", "0.4709584", "0.47075096", "0.47032014", "0.47032014" ]
0.6748647
0
Convert any time code or alias to a linear float value. e.g. '1.2' parses to > 1.5 'match_next' resolves to > 4.0
def normalise_duration(index): key = sorted_keys[index] item = data_float_indexed[key] if not item: item = {'duration': 'auto'} data_float_indexed[key] = item duration = attempt_parse_key_timecode(item.get('duration')) if duration == 'match_next': duration = normalise_duration(index+1) if duration == 'match_prev': duration = normalise_duration(index-1) if isinstance(duration, str) and duration.startswith('match '): duration = normalise_duration(sorted_keys.index(float(duration.strip('match ')))) if (not duration or duration == 'auto') and index < len(sorted_keys)-1: duration = sorted_keys[index+1] - key if not isinstance(duration, float): #log.info('Unparsed duration: {0}'.format(duration)) duration = self.DEFAULT_DURATION if duration != item.get('duration'): item['duration'] = duration return duration
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_speed(as_str: str) -> float:\n return float(as_str.rstrip(\"x\"))", "def _fs (v):\r\n try : \r\n v = float(v)\r\n except : \r\n v = tuple([float (ss) for ss in \r\n v.replace('(', '').replace(')', '').split(',')])\r\n return v", "def to_float(x, key):\n x = x.strip()\n if not x or x in ('NA', 'n/a'):\n return None\n if '.' in x:\n # There are '.'s, so commas are placeholders\n x = x.replace(',', '') \n if x.endswith('ft'):\n scale = 0.3048\n x = x[:-2].strip()\n else:\n scale = 1 \n try:\n return scale * float(x)\n except:\n logging.warn('Could not convert %s value %s to float', key, x)\n return None", "def value(self):\n float_str = first_token(self._node).spelling\n\n # Remove any C-specific suffix (f, F, l, L) so we can use Python's\n # float constructor to parse the string.\n float_str = re.sub(r'^(.*)[fFlL]$', r'\\1', float_str)\n\n return float(float_str)", "def getFloat(string):\n return (0.0)", "def parse_time(value: str) -> float:\n return float(value[:-1]) * TIME[value[-1]]", "def __parse_traffic(str):\n return float(str.strip().split(\",\")[0].replace('.',''))", "def str2floatTrap(self, someStr):\n\n tempStr = someStr\n\n if tempStr.startswith('('):\n tempStr = tempStr[1:]\n\n if tempStr.endswith(')'):\n tempStr = tempStr[:len(tempStr) - 1]\n\n return float(tempStr)", "def parse_float(val, fn):\n return float(val)", "def _to_float(self, s: str) -> float:\n return int(s[:-1]) / 1e9 if s.endswith('n') else float(s[:-1])", "def _parse_float(\n value_expr: str, target_expr: str, ref_parts: List[str],\n a_type: mapry.Float, auto_id: mapry.py.generate.AutoID) -> str:\n uid = auto_id.next_identifier()\n\n return _PARSE_FLOAT_TPL.render(\n uid=uid,\n value_expr=value_expr,\n ref_parts=ref_parts,\n target_expr=target_expr,\n a_type=a_type).rstrip(\"\\n\")", "def convert_to_float(word: str) -> float:\n return round(float(word), 2)", "def __parse_float(str):\n return float(str.strip().replace(',','.'))", "def stof(fstr):\n return float(fstr.replace(',', '.'))", "def float_from_string(data):\n return float(maybe_number(data))", "def SI_string_to_float(inStr, debug = False):\n func_name = \"SI_string_to_float\"\n \n # Debug print incoming string. \n if debug: print(\"DEBUG: (Func = %s): Input-str: %s\" %( func_name, inStr ))\n \n #Remove all spaces from incoming string. \n inStr = inStr.replace(\" \", \"\"); \n if debug: print(\"DEBUG: (Func = %s): Removed spaces: %s\" %( func_name, inStr ))\n \n # Allocate return value, and search in\n result = None\n letters = re.search( r'([\\d\\.]+)([a-z A-Z]+)', inStr)\n \n # Query if match was found. If not, print warning then try to directly convert incoming string.\n if letters:\n try:\n value = float(letters.group(1))\n scale = float(SI_UNITS[letters.group(2)])\n result = value * scale\n if debug: print(\"DEBUG: (Func = %s): Value: %f, scale: %f, result: %f\"%(func_name, value,scale,result))\n except:\n print(\"ERROR: (Func = %s): Couldn't extract value and SI-Unit.\"%func_name)\n print(\" Possible issue with seaching 'SI_UNITS for (%s)\"% scale)\n else:\n print(\"WARNING: (Function = %s) Couldn't extract value and SI-Unit. Will attempt direct float conversion... \"%func_name)\n #print(\" Used the following regex: '([\\d\\.]+)([a-z A-Z]+)'\")\n result = float(inStr) # TODO : Insert try catch \n \n return result", "def extract_float(self, s: str) -> float:\n f = re.findall(r'([0-9]*[.]*[0-9]+)', s)\n return float(f[0]) if len(f) > 0 else None", "def _convert_to_float(s):\n try:\n return float(s)\n except:\n return s", "def parsefloat(el):\n return parse(el, float)", "def parse_float_value(self, value: str):\n value, power = self.parse_value_ending(value)\n try:\n value = float(value)\n return value * 10 ** power\n except:\n raise Exception(\"Failed to parse the __value.\")", "def make_float(value):\n return float(value[0])", "def ffloat(string):\n try:\n return float(string.strip())\n except:\n return 0", "def parseFloat(s, ret=0.0):\n if not isinstance(s, str):\n return float(s)\n elif s:\n if s[0] in \"+-\":\n ts = s[1:]\n else:\n ts = s\n\n if ts and ts.count(\".\") <= 1 and all([_ in \".0123456789\" for _ in ts]):\n return float(s)\n\n return ret", "def txt2float(file: str) -> float:\n return float(get_first_line(file))", "def eval_number(parse_result):\r\n return super_float(\"\".join(parse_result))", "def __time_to_float(time_value):\n return time_value.hour/24 + time_value.minute/(24*60) + time_value.second/(24*60*60) + round(time_value.microsecond, 3)/(24*60*60*1e6)", "def testtofloatString ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTup1, expRes in self.knownFloatStringValues:\r\n\t\t\tfrac1 = eval ( r.sub ( 'frac.frac', fracTup1 ) ) \r\n\t\t\tself.assertEqual ( frac1.tofloatString (), expRes )", "def convertFloat(s):\n try:\n float(s)\n return \"FLOAT\"\n except:\n return s", "def convert_to_float(lst, purpose):\n float_times = []\n len_times = 0\n for t in lst:\n if (str(t)[:3] != 'DNF') and (t != '') and (str(t)[-1] != '+'):\n float_times.append(float(t))\n len_times += 1\n elif str(t)[-1] == '+':\n if purpose == 'average':\n float_times.append(float(t[:-1]))\n len_times += 1\n elif purpose == 'single':\n float_times.append(t)\n len_times += 1\n return float_times, len_times", "def find_float(input: str) -> float:\n str_split = input.split('<@')\n if (len(str_split) == 0):\n raise AmountMissingException(\"amount_not_found\")\n input_text = str_split[0]\n regex = r'(?:^|\\s)(\\d*\\.?\\d+)(?=$|\\s)'\n matches = re.findall(regex, input_text, re.IGNORECASE)\n if len(matches) >= 1:\n return abs(float(matches[0].strip()))\n raise AmountMissingException(\"amount_not_found\")", "def string_to_float(in_str):\n if \"D\" not in in_str:\n in_str = in_str.replace(\"-\",\"D-\")\n out_float = float(in_str.replace(\"D\", \"E\"))\n return out_float", "def convert_to_float(s):\n try:\n return float(s)\n except TypeError:\n return None", "def parse_float(s):\n return float(s.replace(',', '.'))", "def convert_str_float(x):\n\ttry:\n\t\treturn float(x)\n\texcept ValueError:\n\t\tprint(\"must be a number\")", "def floatify(string):\n temporary_string = \"0.\" + string[:-1]\n return float(temporary_string)", "def valf(node: md.Document) -> float:\n try:\n return float(val(node))\n except ValueError:\n return None", "def Float(val):\n try:\n return float(val)\n except ValueError:\n return ''", "def convert_to_float(series):\n series = series.str[:-4].str.replace('.', '').str.replace(',', '.').astype(float)\n return series", "def question_6():\n data_strings = [\"Result = 95%\", \"Final Score = 8%\", \"Relative Value = 178%\",\n \"Something else that's very important = 9.2%\", \"x = 42%\"]\n for string in data_strings:\n start_index = string.find(\"= \")\n value = float(string[start_index + 2:-1])\n print(value)", "def parse_float2(s):\n s_list = s.split()\n n = len(s_list)\n if n==0:\n return None\n elif n==1: \n s_list *= 2\n return [float(item) for item in s_list[:2]]", "def _getDuration(v, line, text):\n if \"/\" in v:\n try:\n return eval(v + \".\")\n except:\n raise ValueError(\"invalid duration value '%s' on line %d: %s\" %\n (v, line, text))\n return float(v)", "def _string_to_float(s):\n try:\n f = float(s)\n return f\n except ValueError:\n return None", "def extract_subs_value(text):\n parts = text.split()\n value = float(parts[-1])\n\n return value", "def handle_value(value):\n\n if value[-1] == 'x':\n return float(value[0:-1])\n\n if value[-1] == '%':\n return float(value[0:-1])\n\n if value[0].isdigit():\n return bytify(value)\n\n raise ValueError", "def spice_unit_convert(valuet, restrict=[]):\n # valuet is a tuple of (unit, value), where \"value\" is numeric\n # and \"unit\" is a string. \"restrict\" may be used to require that\n # the value be of a specific class like \"time\" or \"resistance\". \n\n # Recursive handling of '/' and multiplicatioon dot in expressions\n if '/' in valuet[0]:\n parts = valuet[0].split('/', 1)\n result = numeric(spice_unit_convert([parts[0], valuet[1]], restrict))\n result /= numeric(spice_unit_convert([parts[1], \"1.0\"], restrict))\n return str(result)\n\n if '\\u22c5' in valuet[0]:\t# multiplication dot\n parts = valuet[0].split('\\u22c5')\n result = numeric(spice_unit_convert([parts[0], valuet[1]], restrict))\n result *= numeric(spice_unit_convert([parts[1], \"1.0\"], restrict))\n return str(result)\n\n if '\\u00b2' in valuet[0]:\t# squared\n part = valuet[0].split('\\u00b2')[0]\n result = numeric(spice_unit_unconvert([part, valuet[1]], restrict))\n result *= numeric(spice_unit_unconvert([part, \"1.0\"], restrict))\n return str(result)\n\n if valuet[0] == \"\":\t\t# null case, no units\n return valuet[1]\n\n for unitrec in unittypes:\t# case of no prefix\n if re.match('^' + unitrec + '$', valuet[0]):\n if restrict:\n if unittypes[unitrec] == restrict.lower():\n return valuet[1]\n else:\n return valuet[1]\n\n for prerec in prefixtypes:\n for unitrec in unittypes:\n if re.match('^' + prerec + unitrec + '$', valuet[0]):\n if restrict:\n if unittypes[unitrec] == restrict.lower():\n newvalue = numeric(valuet[1]) * prefixtypes[prerec]\n return str(newvalue)\n else:\n newvalue = numeric(valuet[1]) * prefixtypes[prerec]\n return str(newvalue)\n\n # Check for \"%\", which can apply to anything.\n if valuet[0][0] == '%':\n newvalue = numeric(valuet[1]) * 0.01\n return str(newvalue)\n \n if restrict:\n raise ValueError('units ' + valuet[0] + ' cannot be parsed as ' + restrict.lower())\n else:\n # raise ValueError('units ' + valuet[0] + ' cannot be parsed')\n # (Assume value is not in SI units and will be passed back as-is)\n return valuet[1]", "def test_normal_decimal_input(self):\r\n ws_leader = \"S. O'Neal (14.9)\"\r\n res = treat_input(ws_leader, type=\"float\")\r\n assert res == 14.9", "def read_endf_float(string):\n if string.strip() == \"\":\n return 0.0\n if \".\" in string:\n strsplit = string.split('.')\n return float(strsplit[0]+\".\"+strsplit[1].replace(\"-\",\"e-\").replace(\"+\",\"e+\"))\n else:\n return float(string)", "def str2float(\n min: Optional[float] = None,\n max: Optional[float] = None,\n step: Optional[int] = None, # pylint: disable=redefined-outer-name\n include_min: bool = True,\n include_max: bool = True,\n) -> Callable[[str], float]:\n\n def _parse(string: str) -> float:\n return _str2range(\n string, float, RangeParam(min=min, max=max, step=step, include_min=include_min, include_max=include_max)\n )\n\n return _parse", "def parse_moneyline(string):\n if string.lower() in [\"ev\", \"even\", \"100\", \"+100\"]:\n return 1.0\n elif re.match(\"[+-][0-9]+?\", string):\n line = float(string[1:])\n if string[0] == '+':\n return line/100.0\n else:\n return 100.0/line\n elif re.match(\"[0-9]+?\", string):\n line = float(string)\n return line/100.0\n return", "def parse_dollars_to_float(x):\n import locale\n locale.setlocale( locale.LC_ALL, 'en_US.UTF-8' )\n try:\n if x.startswith('('):\n #print(x)\n y = -1.0*locale.atof(x[2:-1])\n #print(x,y)\n elif x.startswith('$'):\n y = locale.atof(x[1:])\n except AttributeError:\n y = np.nan\n return y", "def read_float(v):\n if v.strip() == '':\n return 0.\n try:\n return float(v)\n except ValueError:\n # ENDF6 may omit the e for exponent\n return float(v[0] + v[1:].replace('+', 'e+').replace('-', 'e-')) # don't replace leading negative sign", "def visit_number(self, node, children):\n if self.debug:\n print(\"Converting {}.\".format(node.value))\n return float(node.value)", "def str_to_float(temp_list):\n float_temp_list = [float(temp) for temp in temp_list]\n return float_temp_list", "def process(self, value):\n return float(value)", "def convertUnit(*args, fromUnit: AnyStr=\"\", toUnit: AnyStr=\"\", **kwargs)->float:\n pass", "def sci_notation_to_float(n):\n\n if 'e' in n:\n exponent = float(n[n.find('e') + 1:])\n number = float(n[:n.find('-') - 1])\n number *= 10**exponent\n \n return number\n\n return n", "def _get_accuracy(text):\n sta_obj = [m.start() for m in re.finditer('%',text)]\n return([float(text[x-3:x:1]) for x in sta_obj])", "def letters_to_float(let):\n\n\texpr = str(let[1]\t\t\t\t\t\t# integer part\n\t\t+ '.' + let[2:]\t\t\t\t\t\t# decimals\n\t\t+ 'e' + _exponents[let[0].lower()]\t# exponent\n\t)\n\n\ttry:\n\t\treturn (1 - 2*let[0].isupper())*float(expr)\n\texcept ValueError: return 0", "def flex(v=0):\n return float(v)", "def get_float(self, item: str) -> float:\n return float(self[item])", "def super_float(text):\r\n if text[-1] in SUFFIXES:\r\n return float(text[:-1]) * SUFFIXES[text[-1]]\r\n else:\r\n return float(text)", "def possible_float(arg):\n try:\n return float(arg)\n except ValueError:\n logging.info(f'failed to parse {arg} as a float, treating it as a string')\n return arg", "def string_to_float(value):\n # if no periods (.) then assume commas are decimal separators\n if '.' not in value:\n value = value.replace(',', '.')\n # if decimals exist then simply remove commas\n else:\n value = value.replace(',', '')\n\n return float(value)", "def convertRate(row):\n if row == 'null':\n return 1.0\n elif ':' in row:\n rows = row.split(':')\n return 1.0 - float(rows[1])/float(rows[0])\n else:\n return float(row)", "def convert_to_floating(string):\n return \"\".join([\"\\u00b7\" if char==\".\" else char for char in string])", "def get_float_data(d, name):\n data = list(d[name])\n while '' in data:\n data.remove('')\n\n return list(map(float, data))", "async def test_floating_point_encoding(self, r):\n await r.flushdb()\n timestamp = 1349673917.939762\n await r.zadd('a', timestamp, 'a1')\n assert await r.zscore('a', 'a1') == timestamp", "def float(x) -> float:\n pass", "def get_price(str_val):\n return float(str_val.replace('.', '').replace(',', '.'))", "def isFloat(string):\n return (True)", "def euroadolaramerica(x):\r\n\tconversion = x * 1.35750\r\n\treturn conversion", "def convert_to_number(text):\n try:\n value = float(text)\n return value\n except ValueError:\n return 0.0", "def get_value_from_string(text):\n if len(text.strip()) == 0:\n return None\n\n try:\n if '-' in text or '+' in text:\n tl = [ti for ti in text.split('-')]\n for i in range(1, len(tl)):\n tl[i] = '-' + tl[i]\n ntl = []\n for ti in tl:\n ntl = ntl + ti.split('+')\n ntl = [ti.replace(' ', '') for ti in ntl]\n values = [float(ti) for ti in ntl if len(ti) > 0]\n value = sum(values)\n else:\n value = float(text)\n return value\n\n except Exception:\n return None", "def _get_number_from_string(x):\n try:\n return float(x)\n except ValueError:\n raise ValueError('Unknown element')", "def to_float(s):\n try:\n return float(s)\n except ValueError:\n return np.nan", "def parse_float(value):\n try:\n return float(value)\n except (ValueError, TypeError):\n return None", "def to_float(self) -> float:\n return (1.0 * self.as_int) / sensor_fixed_point_conversion", "def check_for_float(check):", "def _to_float(maybe_float):\n try:\n return float(maybe_float)\n except ValueError:\n return maybe_float", "def ijson_decimal_to_float(event):\n if event[1] == 'number' and isinstance(event[2], decimal.Decimal):\n return event[0], event[1], float(event[2])\n else:\n return event", "def __float__(self) -> float:\n return self._translate_in_type(float, self.integer)", "def float_format(self):\n ...", "def beatlevel_float(self):\n\n parts = self.header['BeatLevel'].split('/')\n return float(parts[0])/float(parts[1])", "def char_float(inp_char):\n try:\n nFloat = float(inp_char)\n except:\n nFloat = 0.0\n return nFloat", "def getFloat(self, address: ghidra.program.model.address.Address) -> float:\n ...", "def _ConvertFloat(value, field):\n if isinstance(value, float):\n if math.isnan(value):\n raise ParseError('Couldn\\'t parse NaN, use quoted \"NaN\" instead')\n if math.isinf(value):\n if value > 0:\n raise ParseError('Couldn\\'t parse Infinity or value too large, '\n 'use quoted \"Infinity\" instead')\n else:\n raise ParseError('Couldn\\'t parse -Infinity or value too small, '\n 'use quoted \"-Infinity\" instead')\n if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT:\n # pylint: disable=protected-access\n if value > type_checkers._FLOAT_MAX:\n raise ParseError('Float value too large')\n # pylint: disable=protected-access\n if value < type_checkers._FLOAT_MIN:\n raise ParseError('Float value too small')\n if value == 'nan':\n raise ParseError('Couldn\\'t parse float \"nan\", use \"NaN\" instead')\n try:\n # Assume Python compatible syntax.\n return float(value)\n except ValueError:\n # Check alternative spellings.\n if value == _NEG_INFINITY:\n return float('-inf')\n elif value == _INFINITY:\n return float('inf')\n elif value == _NAN:\n return float('nan')\n else:\n raise ParseError('Couldn\\'t parse float: {0}'.format(value))", "def convertToFloat(vote):\n if vote == 'y':\n return 1\n if vote == 'n':\n return -1\n if vote == '?':\n return 0", "def get_float(self, name):\n return self.field(name).toDouble()[0]", "def to_factor(self, time: float) -> float:\n pass", "def parse_reading(data):\n pat =re.compile('([1-9][0-9]*)')\n datum = data.split('\\n')\n #print datum\n for d in datum:\n m = pat.search(d)\n if m is not None:\n return float(m.group(1))\n return float(-1)", "def convert_to_float(percentage):\n return float(percentage.strip('%')) / 100", "def test_float():\n floatify = fields.FloatField().adapt\n\n for input, expect in [\n (1.1, 1.1),\n (11, 11.0),\n (int(5.7), 5)\n ]:\n assert floatify(input) == expect", "def _parse_float_uncertainty(x, dx):\n\n if not isinstance(x, str):\n raise NNDCRequestError(f\"Value must be a string: {x}\")\n if not isinstance(dx, str):\n raise NNDCRequestError(f\"Uncertainty must be a string: {dx}\")\n # ignore percents\n if \"%\" in x:\n x = x.replace(\"%\", \"\")\n # ignore unknown ground state levels (X, Y, Z, W)\n for sym in [\"X\", \"Y\", \"Z\", \"W\"]:\n if \"+\" + sym in x:\n x = x.replace(\"+\" + sym, \"\")\n elif x == sym:\n x = \"0\"\n # handle special ENSDF abbreviations, e.g.,\n # http://www.iaea.org/inis/collection/NCLCollectionStore/_Public/14/785/14785563.pdf\n # \"One of the following expressions:\n # LT, GT, LE, GE, AP, CA, SY\n # for less than, greater than, less than or equal to greater\n # than or equal to. approximately equal to, calculated, and\n # from systematics, respectively.\"\n for sym in [\"*\", \"<\", \">\", \"=\", \"~\", \"?\", \"@\", \"&\", \"P\", \"N\"]:\n while sym in x:\n x = x.replace(sym, \"\")\n # correct specific typos in the database\n if \"E-11 0\" in x:\n x = x.replace(\"E-11 0\", \"E-11\")\n if \"E-12 0\" in x:\n x = x.replace(\"E-12 0\", \"E-12\")\n if \"0.0000 1\" in x:\n x = x.replace(\"0.0000 1\", \"0.0000\")\n if \"2 .8E-7\" in x:\n x = x.replace(\"2 .8E-7\", \"2.8E-7\")\n if \"8 .0E-E5\" in x:\n x = x.replace(\"8 .0E-E5\", \"8.0E-5\")\n # handle blank or missing data\n if x == \"\" or x == \" \":\n return None\n if \"****\" in dx:\n dx = \"\"\n elif dx in [\"LT\", \"GT\", \"LE\", \"GE\", \"AP\", \"CA\", \"SY\"]:\n dx = \"\"\n try:\n x2 = float(x)\n except ValueError:\n raise NNDCRequestError(f'Value cannot be parsed as float: \"{x}\"')\n if dx == \"\":\n return x2\n # handle multiple exponents with some uncertainties, e.g., \"7E-4E-5\"\n tokens = dx.split(\"E\")\n if len(tokens) == 3:\n dx = \"E\".join(tokens[:2])\n factor = pow(10.0, int(tokens[2]))\n else:\n factor = 1.0\n try:\n dx2 = float(dx) * factor\n except ValueError:\n raise NNDCRequestError(f'Uncertainty cannot be parsed as float: \"{dx}\"')\n return uncertainties.ufloat(x2, dx2)", "def c2f(t):\r\n return round(9*t/5 + 32)", "def f_number(node, pos, size, context, v):\n\n if xpath.tools.nodesetp(v):\n v = xpath.tools.string(v, context)\n try:\n return float(v)\n except ValueError:\n return float('NaN')", "def convert(srat):\n try:\n return float(srat)\n except ValueError:\n num, denom = srat.split('/')\n return float(num) / float(denom)", "def _parse_test_duration(duration_str):\n try:\n if duration_str.endswith(\"s\"):\n duration_str = duration_str[:-1]\n return float(duration_str)\n except:\n return None", "def _resolve_float(self, item):\n if not type(item) in (float, int, str, np.number) and not np.isscalar(item):\n if isinstance(item, np.ndarray) and item.shape == ():\n item = float(item)\n else:\n item = item[0]\n\n return item", "def convert_str(strr):\n if strr.lower() == 'true' or strr.lower() == 't':\n return True\n if strr.lower() == 'false' or strr.lower() == 'f':\n return False\n\n try:\n float_value = float(strr)\n return float_value\n except ValueError as ex:\n print(repr(ex))\n\n return strr", "def clean_value(self, value):\n return float(value.replace('.', '').replace(',', '.'))", "def get_number(text):\n if text is None:\n return None\n text = text.strip()\n if text == \"\":\n retval = None\n elif text == \"MM\":\n retval = None\n elif text == \"T\":\n retval = TRACE_VALUE\n else:\n number = re.findall(r\"[\\-\\+]?\\d*\\.\\d+|[\\-\\+]?\\d+\", text)\n if len(number) == 1:\n if text.find(\".\") > 0:\n retval = float(number[0])\n else:\n retval = int(number[0])\n else:\n LOG.warning(\"get_number() failed for |%s|\", text)\n retval = None\n return retval" ]
[ "0.6765329", "0.66060674", "0.6591223", "0.6582236", "0.65468556", "0.65312254", "0.6518258", "0.645661", "0.6408871", "0.6333807", "0.63263595", "0.62676144", "0.62387794", "0.6207294", "0.61648494", "0.61467373", "0.61412436", "0.61278975", "0.6122148", "0.6112401", "0.60823154", "0.6071225", "0.60369945", "0.5987835", "0.5975778", "0.59491265", "0.5945117", "0.5918884", "0.5890829", "0.58543795", "0.5842475", "0.58256364", "0.5809539", "0.58089393", "0.5795404", "0.5765368", "0.57406497", "0.5710321", "0.5702829", "0.56982243", "0.56963956", "0.5672935", "0.5669628", "0.5651406", "0.56425077", "0.56347394", "0.5628859", "0.56145793", "0.5609235", "0.5606579", "0.55945265", "0.5586088", "0.55824715", "0.55674356", "0.55652773", "0.5564257", "0.5551613", "0.5551326", "0.55434406", "0.55428046", "0.55158603", "0.550071", "0.5486044", "0.5479348", "0.5477621", "0.54675466", "0.54674983", "0.54640466", "0.54568505", "0.5452103", "0.54504704", "0.5436182", "0.54178977", "0.5409634", "0.5405972", "0.5404724", "0.54027677", "0.5398575", "0.5397991", "0.53956", "0.53832936", "0.5383282", "0.537993", "0.5374893", "0.5374849", "0.5369788", "0.5367455", "0.53668785", "0.536169", "0.5359839", "0.53548443", "0.534555", "0.53423095", "0.5337398", "0.5336473", "0.53330207", "0.5330269", "0.53267676", "0.53238106", "0.53231096", "0.5319131" ]
0.0
-1
Once the order of the items is known, we can iterate over the scenes calculating/prerendering the dmx state for each section This make seeking much faster
def pre_render_scene_item(self, current_scene_item, previous_scene_item): assert current_scene_item current_scene_dmx = current_scene_item.setdefault(Scene.SCENE_ITEM_DMX_STATE_KEY, {}) # Aquire a reference to the previous DMX state current_scene_dmx['previous'] = copy.copy(previous_scene_item.get(Scene.SCENE_ITEM_DMX_STATE_KEY, {})['target']) if previous_scene_item else AbstractDMXRenderer.new_dmx_array() # The target state is a copy of the previous state current_scene_dmx['target'] = copy.copy(current_scene_dmx['previous']) # Modify the starting/previous state based on any overrides in this scene (this is a shortcut feature as I kept requireing this) self.render_state_dict(current_scene_item.get('state_start'), current_scene_dmx['previous']) # Modify the target state based on this scene item self.render_state_dict(current_scene_item.get('state'), current_scene_dmx['target'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def loadData(self, actions):\n # begin to clear the scene\n self.scene.clear()\n self.scene.drawGrid()\n \n # and draw all items\n maxItemId = self.itemId\n for graphicalItem in actions:\n\n # extract item info\n itemType = int(graphicalItem['item-type'])\n itemId = graphicalItem['item-id']\n if sys.version_info > (3,): # py3 support\n graphicalItem['item-text'] = graphicalItem['item-text']\n else:\n graphicalItem['item-text'] = graphicalItem['item-text'].decode('utf8')\n itemText = graphicalItem['item-text']\n posX = float(graphicalItem['pos-x'])\n posY = float(graphicalItem['pos-y'])\n itemData = graphicalItem['item-data']\n\n\n # define the color of the item\n color = self.getItemColor(itemType=itemType)\n \n # add item in first\n self.addItem( itemType=itemType, itemId=itemId, itemText=itemText, \n itemColor=QBrush(color), itemPos=QPointF(posX,posY), itemData=itemData )\n \n # kept the max id\n if int(itemId) > maxItemId:\n maxItemId = int(itemId)\n \n self.itemId = maxItemId\n\n # endly draw all arrows\n for curItem in self.scene.items():\n for saveItem in actions:\n if not isinstance(curItem, DiagramItem):\n continue\n if curItem.itemId == int(saveItem['item-id']):\n if 'item-links' in saveItem:\n if isinstance(saveItem['item-links'], dict):\n saveItem['item-links'] = [saveItem['item-links']]\n for lnk in saveItem['item-links']:\n itemId = lnk['next-item-id']\n toHotspotId = lnk['to-hotspot-id']\n fromHotspotId = lnk['from-hotspot-id']\n \n endItem = self.findItem(id=itemId)\n if endItem is not None:\n self.trace( \"Arrow: %s -> %s\" % (fromHotspotId,toHotspotId) )\n arrow = Arrow(curItem, endItem, toHotspotId=toHotspotId, fromHotspotId=fromHotspotId)\n arrow.setColor(self.scene.myLineColor)\n curItem.addArrow(arrow)\n endItem.addArrow(arrow)\n arrow.setZValue(-1000.0)\n self.scene.addItem(arrow)\n arrow.updatePosition()", "def __getitem__(self, index):\n\n #get the image name \n image_names = self.image_names[index]\n\n #make single name a list\n if(type(image_names) is not list):\n image_names = [image_names]\n\n image_target_list = []\n for image_name in image_names:\n\n #build the path to the image and annotation file\n #see format tab on Get Data page on AVD dataset website\n if image_name[0] == '0':\n scene_type = 'Home'\n else:\n scene_type = 'Office'\n scene_name = scene_type + \"_\" + image_name[1:4] + \"_\" + image_name[4]\n \n #read the image and bounding boxes for this image\n #(doesn't get the movement pointers) \n img = (Image.open(os.path.join(self.root,scene_name, \n images_dir,image_name)))\n with open(os.path.join(self.root,scene_name,annotation_filename)) as f:\n annotations = json.load(f)\n target = annotations[image_name]['bounding_boxes'] \n \n #apply target transform\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n #crop images for classification if flag is set\n if self.classification:\n img = np.asarray(img)\n images = []\n ids = []\n for box in target:\n cur_img = Image.fromarray(img[box[1]:box[3],\n box[0]:box[2],\n :])\n if self.transform is not None:\n cur_img = self.transform(cur_img)\n images.append(cur_img)\n ids.append(box[4])\n\n img = images\n target = ids\n \n #apply image transform \n if self.transform is not None:\n img = self.transform(img)\n\n image_target_list.append([img,target])\n\n #special case for single image/label\n if(len(image_target_list) == 1):\n image_target_list = image_target_list[0]\n\n return image_target_list", "def calculate_scene(self):\n \n if self.is_game_going:\n for pl in self.player_list:\n pl.make_step()\n\n #obj_list = self.player_list + self.foot_list + self.border_list\n obj_list = self.player_list + self.grafik_item_list\n for pl in self.player_list:\n pl.check_for_intersection(obj_list)\n\n for pl in self.player_list:\n if pl.status_remove:\n print(\"x killll xxx\")\n self.stop_game()\n\n # filter removed/killed objecets\n for item in self.grafik_item_list:\n if item.status_remove:\n if item.type == \"food\":\n item.set_random_position()\n\n # end game if a player is removed", "def __handle_view_item(self, gamestate_component):", "def voxelize4(self, materials):\n\t\tlayers = list()\n\t\tlayersR = list()\n\t\tlayersG = list()\n\t\tlayersB = list()\n\t\t\n\t\tlayerMaterial = list()\n\t\tself.volumeComposition = list()\n\t\tfor l in range(len(materials)):\n\t\t\tlayerMaterial.append(list())\n\t\t\tself.volumeComposition.append(list())\n\n\t\tvolumeGeneral = list()\n\t\tm = 0\n\t\tfor i in self.slicePoints:\n\t\t\t#print self.boolResult[m].shape\n\t\t\ttupleResultR = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleResultG = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleResultB = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleMaterial = list()\n\t\t\tfor l in range(len(materials)):\n\t\t\t\ttupleMaterial.append(numpy.zeros(self.boolLayers[m].shape, dtype=float))\n\t\t\t\n\t\t\tj = numpy.nditer(self.boolLayers[m], flags=['multi_index'], op_flags=['readwrite'])\n\t\t\twhile not j.finished:\n\t\t\t\tif j[0] == True:\n\t\t\t\t#tupleResult[j.multi_index] = round((i[direction] - minValue) * ratio)\n\t\t\t\t#tupleResult[j.multi_index] = 78\n\t\t\t\t\tprint type(j.multi_index)\n\t\t\t\t\tprint j.multi_index\n\t\t\t\t\t#tupleResult[j.multi_index] = planeWeight * math.fabs((j.multi_index[1] - planeOrigin[0]) * planeNormal[0] + (j.multi_index[0] - planeOrigin[1]) * planeNormal[1] + (i[2] - planeOrigin[2]) * planeNormal[2]) + pointWeight * math.sqrt(math.pow((j.multi_index[1]- pointValue[0]),2) + math.pow((j.multi_index[0] - pointValue[1]), 2)+math.pow((i[2] - pointValue[2]),2))\n\t\t\t\t\t\n\t\t\t\t\tdistanceList = []\n\t\t\t\t\ttotalDistance = 0.0\n\t\t\t\t\tfor k in range(len(materials)):\n\t\t\t\t\t\tif materials[k].controlSourceType == \"Plane\":\n\t\t\t\t\t\t\tGplane = math.fabs((j.multi_index[1] - materials[k].origin[0]) * materials[k].normal[0] + (j.multi_index[0] - materials[k].origin[1]) * materials[k].normal[1] + (i[2] - materials[k].origin[2]) * materials[k].normal[2])\n\t\t\t\t\t\t\tdistanceList.append(Gplane)\n\t\t\t\t\t\t\ttotalDistance += Gplane\n\t\t\t\t\t\tif materials[k].controlSourceType == \"Point\":\n\t\t\t\t\t\t\tGpoint = (math.sqrt(math.pow((j.multi_index[1]- materials[k].point[0]),2) + math.pow((j.multi_index[0] - materials[k].point[1]), 2)+math.pow((i[2] - materials[k].point[2]),2)))\n\t\t\t\t\t\t\tdistanceList.append(Gpoint)\n\t\t\t\t\t\t\ttotalDistance += Gpoint\n\t\t\t\t\tfor k in range(len(distanceList)):\n\t\t\t\t\t\tdistanceList[k] = distanceList[k] / totalDistance\n\t\t\t\t\t\tdistanceList[k] = 1.0 - distanceList[k]\n\t\t\t\t\t\t\n\t\t\t\t\t\ttupleMaterial[k][j.multi_index] = distanceList[k]\n\t\t\t\t\t\t\n\t\t\t\t\t\ttupleResultR[j.multi_index] += materials[k].materialColor[0] * distanceList[k] * materials[k].weight\n\t\t\t\t\t\ttupleResultG[j.multi_index] += materials[k].materialColor[1] * distanceList[k] * materials[k].weight\n\t\t\t\t\t\ttupleResultB[j.multi_index] += materials[k].materialColor[2] * distanceList[k] * materials[k].weight\n\t\t\t\t\t#if(tupleResult[j.multi_index] > 0):\n\t\t\t\t\t#\ttupleResult[j.multi_index] = round(tupleResult[j.multi_index]) \n\t\t\t\t\t#if(tupleResult[j.multi_index] == 0):\n\t\t\t\t\t#\t\ttupleResult[j.multi_index] = 1\n\t\t\t\t\t#if(tupleResult[j.multi_index] < 0):\n\t\t\t\t\t#\ttupleResult[j.multi_index] = round(0 - tupleResult[j.multi_index]) \n\t\t\t\telse:\n\t\t\t\t\ttupleResultR[j.multi_index] = 0\n\t\t\t\t\ttupleResultG[j.multi_index] = 0\n\t\t\t\t\ttupleResultB[j.multi_index] = 0\n\t\t\t\t\tfor k in range(len(materials)):\n\t\t\t\t\t\ttupleMaterial[k][j.multi_index] = 0.0\n\t\t\t\tj.iternext()\n\t\t\tlayersR.append(tupleResultR)\n\t\t\tlayersG.append(tupleResultG)\n\t\t\tlayersB.append(tupleResultB)\n\t\t\tfor k in range(len(materials)):\n\t\t\t\tlayerMaterial[k].append(tupleMaterial[k])\n\t\t\t\t\n\t\t\tm = m + 1\n\t\tprint \"i got here\"\n\t\tvolumeR=numpy.array(layersR) # create the 3d volume\n\t\tvolumeG=numpy.array(layersG) \n\t\tvolumeB=numpy.array(layersB)\n\t\tfor k in range(len(materials)):\n\t\t\tself.volumeComposition[k] = numpy.array(layerMaterial[k])\n\t\t\n\t\tvolumeGeneral.append(volumeR)\n\t\tvolumeGeneral.append(volumeG)\n\t\tvolumeGeneral.append(volumeB)\n\t\treturn volumeGeneral", "def update_scenes(self) -> None:\n self.scenes.update(\n {\n f\"{group.id}_{scene.id}\": scene\n for group in self.groups.values() # type: ignore\n for scene in group.scenes.values()\n if f\"{group.id}_{scene.id}\" not in self.scenes\n }\n )", "def items(self):\n return _osgAnimation.mapVertexInfluence_items(self)", "def __getitem__(self, index):\n\n #get the image name and box\n #image_name,box_index = self.name_and_box_index[index]\n name_and_index = self.name_and_box_index[index]\n #name_and_index needs to be alist of lists\n if(len(name_and_index) >0 and type(name_and_index[0]) is not list): \n name_and_index = [name_and_index] \n \n image_target_list = []\n\n for image_name,box_index in name_and_index:\n #build the path to the image and annotation file\n #see format tab on Get Data page on AVD dataset website\n if image_name[0] == '0':\n scene_type = 'Home'\n else:\n scene_type = 'Office'\n scene_name = scene_type + \"_\" + image_name[1:4] + \"_\" + image_name[4]\n \n #read the image and bounding boxes for this image\n #(doesn't get the movement pointers) \n img = (Image.open(os.path.join(self.root,scene_name, \n images_dir,image_name)))\n with open(os.path.join(self.root,scene_name,annotation_filename)) as f:\n annotations = json.load(f)\n target = annotations[image_name]['bounding_boxes'] \n \n #apply target transform\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n #get the single box\n target = target[box_index]\n\n #crop images for classification if flag is set\n if self.classification:\n img = np.asarray(img)\n img = img[target[1]:target[3],target[0]:target[2],:]\n img = Image.fromarray(img)\n target = target[4] \n \n \n #apply image transform \n if self.transform is not None:\n img = self.transform(img)\n\n image_target_list.append([img,target])\n\n #special case for single image/label\n if(len(image_target_list) == 1):\n image_target_list = image_target_list[0]\n\n return image_target_list", "def items():", "async def async_get_scenes(work_dir=None):\n scenes: Dict[Group, Dict[str, Union[Dict[ResponderAddress, LinkInfo], str]]] = {}\n if work_dir:\n await async_load_scene_names(work_dir=work_dir)\n for addr in devices:\n device = devices[addr]\n if device == devices.modem:\n continue\n for rec in device.aldb.find(\n target=devices.modem.address, is_controller=False, in_use=True\n ):\n if rec.group == 0:\n continue\n if not scenes.get(rec.group):\n scenes[rec.group] = {}\n scenes[rec.group][\"name\"] = _scene_names.get(\n rec.group, f\"Insteon Scene {rec.group}\"\n )\n scenes[rec.group][\"group\"] = rec.group\n scenes[rec.group][\"devices\"] = {}\n scene = scenes.get(rec.group)\n if not scene[\"devices\"].get(device.address):\n scene[\"devices\"][device.address] = []\n has_controller = False\n for _ in devices.modem.aldb.find(\n target=device.address, group=rec.group, is_controller=True, in_use=True\n ):\n has_controller = True\n break\n scene[\"devices\"][device.address].append(\n LinkInfo(rec.data1, rec.data2, rec.data3, has_controller, True)\n )\n return scenes", "def load_items(self):\n # LIST OF THE ITEMS TO COLLECT TO WIN\n list_items = [self.aiguille, self.ether, self.tube]\n # CALLING OF THE METHODS define_position\n list_items[0].define_position_item_1()\n list_items[1].define_position_item_2()\n list_items[2].define_position_item_3()\n # LOOP FOR, FOREACH ITEM IN THE LIST, WE DRAW IT ON THE SCREEN\n for item in list_items:\n # CALLING OF THE METHOD display_items\n item.display_items(self.window)\n # IF MACGVER COLLECTS AN ITEM...\n if (self.macgyver.position_x == list_items\n [list_items.index(item)].obj_sprite_x) \\\n and (self.macgyver.position_y == list_items\n [list_items.\n index(item)].obj_sprite_y):\n # IT MAKES A SOUND\n pygame.mixer.music.load(ITEM_SOUND)\n pygame.mixer.music.play()\n # IT INCREMENTS MACGYVER'S BAG\n self.macgyver.bag += 1\n # IT MOVES THE OBJECT TO THE BAG\n list_items[list_items.index(item)].obj_sprite_x = \\\n TILESIZE*(5 + list_items.index(item))\n list_items[list_items.index(item)].obj_sprite_y = \\\n NBCASES*TILESIZE\n # IT HIDES THE QUESTIONS MARK\n self.noitem.fill(TRANSPARENT)", "def process_items():\n global HAS_WATCH\n global HAS_FIRST_AID_KIT\n global HAS_FLASHLIGHT\n global HAS_RAINCOAT\n global HAS_COMPASS\n global HAS_BEARTRAP\n\n if \"Watch\" in ITEMS:\n HAS_WATCH = True\n if \"First Aid Kit\" in ITEMS:\n HAS_FIRST_AID_KIT = True\n if \"Flashlight\" in ITEMS:\n HAS_FLASHLIGHT = True\n if \"Raincoat\" in ITEMS:\n HAS_RAINCOAT = True\n if \"Compass\" in ITEMS:\n HAS_COMPASS = True\n if \"Bear Trap\" in ITEMS:\n HAS_BEARTRAP = True\n\n # Stupid little hack to provide 'immediate updates/effect' of having the below items\n if HAS_WATCH:\n update_title_area(\" Day: %d Time: %d:00 \" % (DAY, TIME))\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS] = \"Y\"", "def mosaicLoaded(self):\n for item in self.item_store.itemIterator(item_type = imageItem.ImageItem):\n if (item.getZValue() > self.current_z):\n self.current_z = item.getZValue() + self.z_inc", "def __getitem__(self, index):\n A_path = self.A_paths[index % self.A_size] # make sure index is within then range\n #if self.opt.serial_batches: # make sure index is within then range\n \n\n A_img = Image.open(A_path).convert('L')\n \n A = self.transform_A(A_img)\n # B20 = self.transform_B(B20_img)\n #B2 = self.transform_B(B2_img)\n\n\n index_B50 = index % self.B50_size\n B50_path = self.B50_paths[index_B50]\n B50_img = Image.open(B50_path).convert('L')\n B50 = self.transform_B(B50_img)\n\n\n\n index_B100 = index % self.B100_size\n B100_path = self.B100_paths[index_B100]\n B100_img = Image.open(B100_path).convert('L')\n B100 = self.transform_B(B100_img)\n\n index_B150 = index % self.B150_size\n B150_path = self.B150_paths[index_B150]\n B150_img = Image.open(B150_path).convert('L')\n B150 = self.transform_B(B150_img)\n\n\n \n\n index_m0 = 0\n m0_path = self.m0_paths[index_m0]\n m0_img = Image.open(m0_path).convert('L')\n m0 = self.transform_B(m0_img)\n \n index_m50 = 0\n m50_path = self.m50_paths[index_m50]\n m50_img = Image.open(m50_path).convert('L')\n m50 = self.transform_B(m50_img)\n\n index_m100 = 0\n m100_path = self.m100_paths[index_m100]\n m100_img = Image.open(m100_path).convert('L')\n m100 = self.transform_B(m100_img)\n\n index_m150 = 0\n m150_path = self.m150_paths[index_m150]\n m150_img = Image.open(m150_path).convert('L')\n m150 = self.transform_B(m150_img)\n\n\n\n return {'A': A, 'B50': B50,'B100': B100, 'B150': B150, 'A_paths': A_path, 'B50_paths': B50_path,'B100_paths': B100_path, 'B150_paths': B150_path, 'm0':m0, 'm50':m50,'m100':m100, 'm150':m150}", "def reindex_graphics(self):\n for obj in self.context.static_objects:\n self.canvas.children.remove(obj.widget.canvas)\n # fill _objects_z_index\n _objects_z_index = {}\n for obj in self.context.static_objects:\n y = obj.widget.pos[1]\n if not y in _objects_z_index:\n _objects_z_index[y] = []\n _objects_z_index[y].append(obj)\n _keys = _objects_z_index.keys()\n _keys.sort()\n _keys.reverse()\n for k in _keys:\n objs = _objects_z_index[k]\n for obj in objs:\n self.canvas.add(obj.widget.canvas)", "def update_gl_state(self, *args, **kwargs):\n for v in self._subvisuals:\n v.update_gl_state(*args, **kwargs)", "def __getitem__(self, cur_episode):\n if self.platform == \"win\":\n env = lmdb.open(self.lmdb_file, subdir=False,\n readonly=True, lock=False,\n readahead=False, meminit=False)\n else:\n env = self.env\n # episode_set = self.episode_sets[episode]\n total_support_x = []\n total_query_x = []\n total_support_y = []\n total_query_y = []\n\n for t in range(self.t_task):\n # create a task (n_way*k_shot+ n_way*k_query)\n\n support_x = []\n query_x = []\n support_y = []\n query_y = []\n\n support_imgs = []\n query_imgs = []\n\n # select n_way classes randomly\n selected_classes = np.random.choice(self.total_cls, self.n_way)\n # select k_shot + k_query for each class\n for selected_class in selected_classes:\n selected_imgs = np.random.choice(\n self.dic_img_label[self.num2label[selected_class]], self.k_shot + self.k_query, False)\n support_imgs += selected_imgs[:self.k_shot].tolist()\n query_imgs += selected_imgs[self.k_shot:].tolist()\n\n with env.begin(write=False) as txn:\n for i, img_id in enumerate(support_imgs):\n res = pyarrow.deserialize(txn.get(u'{}'.format(img_id).encode('ascii')))\n support_x.append(self.transform(res[0]))\n support_y.append(np.array([self.label2num[res[1]]]))\n\n for i, img_id in enumerate(query_imgs):\n res = pyarrow.deserialize(txn.get(u'{}'.format(img_id).encode('ascii')))\n query_x.append(self.transform(res[0]))\n query_y.append(np.array([self.label2num[res[1]]]))\n support_x = torch.stack(support_x, 0)\n query_x = torch.stack(query_x, 0)\n support_y = np.array(support_y)\n query_y = np.array(query_y)\n\n # shuffle:\n index = np.random.permutation(len(support_y))\n support_x = support_x[index]\n if not self.fet_global:\n support_y = np.array([i for i in range(self.n_way) for j in range(self.k_shot)])\n support_y = support_y[index]\n\n index = np.random.permutation(len(query_y))\n query_x = query_x[index]\n if not self.fet_global:\n query_y = np.array([i for i in range(self.n_way) for j in range(self.k_query)])\n\n query_y = query_y[index]\n\n # a batch\n total_query_x.append(query_x)\n total_query_y.append(query_y)\n total_support_x.append(support_x)\n total_support_y.append(support_y)\n\n total_query_x = torch.cat(total_query_x, 0)\n total_query_y = np.hstack(total_query_y)\n total_support_x = torch.cat(total_support_x, 0)\n total_support_y = np.hstack(total_support_y)\n\n imgs = torch.cat([total_support_x, total_query_x], 0)\n labels = torch.from_numpy(np.hstack([total_support_y, total_query_y]).reshape([-1, 1]))\n return imgs, labels", "def update_all_states(self):\n \n for l in range(0,4):\n temp_s = \"/loop/{}/mode\".format(l+1) # stupid using loop 1 to 4\n temp_m = self.loop_modes[self.loop_states[l]]\n self.osc_client.send_message(temp_s,temp_m)\n print(\"sent_message {} {}\".format(temp_s,temp_m))\n self.set_loop_led(l)\n \n for l in range(0,4):\n for s in range(0,8):\n temp_s = self.osc_slice_string.format(l+1,s)\n temp_m = self.slice_modes[self.slice_states[l][s] ]\n self.osc_client.send_message(temp_s,temp_m)\n print(\"sent message {} {}\".format(temp_s, temp_m))\n self.set_slice_led(l,s)\n return", "def populateSceneRefs(*args):\n pi.referenceDictionary = {}\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, ra=True)\n\n #get reference paths\n refs = cmds.file(q=True, r=True)\n\n buff = []\n # loaded = []\n for ref in refs:\n #get the associated namespace\n ns = cmds.file(ref, q=True, ns=True)\n pi.referenceDictionary[ns] = ref\n\n # put files in buffer list to sort\n for g in pi.referenceDictionary.keys():\n buff.append(g)\n buff.sort()\n\n # now put the sorted namespaces in the list\n for b in buff:\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, append=b, dcc = selectRefs)\n\n # if ref is deferred(not loaded), change it's font\n for ref in refs:\n if cmds.file(ref, q=True, deferReference=True):\n ns = cmds.file(ref, q=True, ns=True) # get the namespace in order to get the item name\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, selectItem=ns) # sel the item in order to query it\n index = cmds.textScrollList(widgets[\"shotAssListTSL\"], q=True, selectIndexedItem=True)[0] # query the index of sel\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, lineFont = [index, \"obliqueLabelFont\"])\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, deselectAll=True)\n\n # if we're in a lgt file, look through current refs and for each one of type \"anm\", check the frame rates, etc. and give option to change\n curr = paths.PathManager(cmds.file(q=True, sn=True))\n if curr.shotType == \"lgt\":\n for ref in refs:\n p=paths.PathManager(ref)\n if p.shotType == \"anm\":\n dict = cFuncs.getFileFrameInfo(cFuncs.fixPath(ref))\n csi.compareSceneInfo(dict)", "def on_draw( self ):\r\n self.clear()\r\n self.setup_3D()\r\n print \"DEBUG:\" , \"There are\" , len( self.renderlist ) , \"items in 'self.renderlist'\"\r\n for obj in self.renderlist:\r\n obj.draw()", "def get_items(self):\n\n to_process_mat_ids = self._find_to_process()\n\n self.logger.info(\n \"Updating all substrate calculations for {} materials\".format(\n len(to_process_mat_ids)\n )\n )\n\n for mpid in to_process_mat_ids:\n e_tensor = self.elasticity.query_one(\n criteria={self.elasticity.key: mpid},\n properties=[\"elasticity\", \"last_updated\"],\n )\n e_tensor = (\n e_tensor.get(\"elasticity\", {}).get(\"elastic_tensor\", None)\n if e_tensor\n else None\n )\n mat = self.materials.query_one(\n criteria={self.materials.key: mpid},\n properties=[\"structure\", \"deprecated\", \"material_id\", \"last_updated\"],\n )\n\n yield {\n \"structure\": mat[\"structure\"],\n \"material_id\": mat[self.materials.key],\n \"elastic_tensor\": e_tensor,\n \"deprecated\": mat[\"deprecated\"],\n \"last_updated\": max(\n mat.get(\"last_updated\"), e_tensor.get(\"last_updated\")\n ),\n }", "def get_section_sprites(self):\n visible = set()\n for rect_info in self.sections:\n if pg.Rect(rect_info).colliderect(self.view_rect):\n visible.update(self.sections[rect_info])\n return visible", "def __getitem__(self, index):\n ID = self.ID[index]\n turn_id = self.turn_id[index]\n turn_belief = self.turn_belief[index]\n turn_belief_dict = self.turn_belief_dict[index]\n sorted_domainslots = self.sorted_domainslots[index]\n turn_uttr = self.turn_uttr[index]\n context_plain = self.dialog_history[index] \n sorted_lenval = self.sorted_lenval[index]\n sorted_in_domains2 = self.sorted_in_domains2[index]\n sorted_in_slots2 = self.sorted_in_slots2[index]\n sorted_generate_y = self.sorted_generate_y[index]\n c = copy.deepcopy\n context = self.preprocess(context_plain, self.src_word2id)\n delex_context = None\n if self.args['delex_his']:\n temp = self.delex_dialog_history[index].split()\n original = self.dialog_history[index].split()\n if self.split == 'train' and 'p_delex_noise' in self.args and np.random.uniform() < self.args['p_delex_noise']:\n prob = np.random.uniform()\n if prob < 0.5:\n indices = [idx for idx,i in enumerate(temp) if len(i.split('-'))==2]\n if len(indices) > 0:\n random_idx = random.choice(indices)\n temp[random_idx] = original[random_idx] # removal \n else:\n random_token = random.choice(self.all_slots)\n out_words = list(self.mem_word2id.keys())[4:]\n indices = [idx for idx,i in enumerate(original) if i in out_words]\n if len(indices) > 0:\n index = random.choice(indices)\n temp[index] = random_token\n delex_context = ' '.join(temp)\n delex_context = self.preprocess(delex_context, self.src_word2id) \n tag_x, tag_y = None, None\n if not self.args['sep_input_embedding']:\n sorted_in_domains = self.preprocess_seq(self.sorted_in_domains[index], self.src_word2id)\n sorted_in_slots = self.preprocess_seq(self.sorted_in_slots[index], self.src_word2id)\n sorted_in_domains2 = self.preprocess_seq(sorted_in_domains2, self.src_word2id)\n sorted_in_slots2 = self.preprocess_seq(sorted_in_slots2, self.src_word2id)\n else:\n sorted_in_domains = self.preprocess_seq(self.sorted_in_domains[index], self.domain_word2id)\n sorted_in_slots = self.preprocess_seq(self.sorted_in_slots[index], self.slot_word2id)\n sorted_in_domains2 = self.preprocess_seq(sorted_in_domains2, self.domain_word2id)\n sorted_in_slots2 = self.preprocess_seq(sorted_in_slots2, self.slot_word2id)\n sorted_in_domainslots2_idx, y_in, y_out = None, None, None\n if args['auto_regressive']:\n sorted_in_domainslots2_idx = self.sorted_in_domainslots2_idx[index]\n y_in, y_out = self.preprocess_atrg_seq(self.atrg_generate_y[index], self.src_word2id) \n if self.args['pointer_decoder']:\n sorted_generate_y = self.preprocess_seq(sorted_generate_y, self.src_word2id)\n else:\n sorted_generate_y = self.preprocess_seq(sorted_generate_y, self.mem_word2id)\n sorted_gates = None\n if self.sorted_gates[index] is not None:\n sorted_gates = self.sorted_gates[index]\n user_uttr_plain, user_uttr = None, None\n turn_prev_bs_plain, turn_prev_bs = None, None\n \n item_info = {\n \"ID\":ID, \n \"turn_id\":turn_id, \n \"turn_belief\":turn_belief, #?\n \"context\":context,\n \"delex_context_plain\": self.delex_dialog_history[index],\n \"delex_context\": delex_context,\n \"context_plain\":context_plain, \n \"user_uttr\": user_uttr,\n \"user_uttr_plain\": user_uttr_plain, \n \"sorted_in_domains\": sorted_in_domains,\n \"sorted_in_domains2\": sorted_in_domains2,\n \"sorted_in_slots\": sorted_in_slots,\n \"sorted_in_slots2\": sorted_in_slots2,\n \"sorted_in_domainslots2_idx\": sorted_in_domainslots2_idx, \n \"sorted_lenval\": sorted_lenval,\n \"sorted_gates\": sorted_gates,\n \"sorted_generate_y\": sorted_generate_y,\n \"y_in\": y_in,\n \"y_out\": y_out\n }\n return item_info", "def __getitem__(self, index):\n path, name, txt = self.imgs[index]\n img = self.loader(path)\n\n img_size = img.size\n img_size = (400,400)\n\n loader = loadjson\n \n data = loader(txt, self.objectsofinterest,img)\n\n pointsBelief = data['pointsBelief'] \n objects_centroid = data['centroids']\n points_all = data['points']\n points_keypoints = data['keypoints_2d']\n translations = torch.from_numpy(np.array(\n data['translations'])).float()\n rotations = torch.from_numpy(np.array(\n data['rotations'])).float() \n\n if len(points_all) == 0:\n points_all = torch.zeros(1, 10, 2).double()\n \n # self.save == true assumes there is only \n # one object instance in the scene. \n if translations.size()[0] > 1:\n translations = translations[0].unsqueeze(0)\n rotations = rotations[0].unsqueeze(0)\n\n # If there are no objects, still need to return similar shape array\n if len(translations) == 0:\n translations = torch.zeros(1,3).float()\n rotations = torch.zeros(1,4).float()\n\n # Camera intrinsics\n path_cam = path.replace(name,'_camera_settings.json')\n with open(path_cam) as data_file: \n data = json.load(data_file)\n # Assumes one camera\n cam = data['camera_settings'][0]['intrinsic_settings']\n\n matrix_camera = np.zeros((3,3))\n matrix_camera[0,0] = cam['fx']\n matrix_camera[1,1] = cam['fy']\n matrix_camera[0,2] = cam['cx']\n matrix_camera[1,2] = cam['cy']\n matrix_camera[2,2] = 1\n\n # Load the cuboid sizes\n path_set = path.replace(name,'_object_settings.json')\n with open(path_set) as data_file: \n data = json.load(data_file)\n\n cuboid = torch.zeros(1)\n\n if self.objectsofinterest is None:\n cuboid = np.array(data['exported_objects'][0]['cuboid_dimensions'])\n else:\n for info in data[\"exported_objects\"]:\n if self.objectsofinterest in info['class']:\n cuboid = np.array(info['cuboid_dimensions'])\n\n img_original = img.copy() \n\n \n def Reproject(points,tm, rm):\n \"\"\"\n Reprojection of points when rotating the image\n \"\"\"\n proj_cuboid = np.array(points)\n\n rmat = np.identity(3)\n rmat[0:2] = rm\n tmat = np.identity(3)\n tmat[0:2] = tm\n\n new_cuboid = np.matmul(\n rmat, np.vstack((proj_cuboid.T, np.ones(len(points)))))\n new_cuboid = np.matmul(tmat, new_cuboid)\n new_cuboid = new_cuboid[0:2].T\n\n return new_cuboid\n\n # Random image manipulation, rotation and translation with zero padding\n dx = round(np.random.normal(0, 2) * float(self.random_translation[0]))\n dy = round(np.random.normal(0, 2) * float(self.random_translation[1]))\n angle = round(np.random.normal(0, 1) * float(self.random_rotation))\n\n tm = np.float32([[1, 0, dx], [0, 1, dy]])\n rm = cv2.getRotationMatrix2D(\n (img.size[0]/2, img.size[1]/2), angle, 1)\n\n for i_objects in range(len(pointsBelief)):\n points = pointsBelief[i_objects]\n new_cuboid = Reproject(points, tm, rm)\n pointsBelief[i_objects] = new_cuboid.tolist()\n objects_centroid[i_objects] = tuple(new_cuboid.tolist()[-1])\n pointsBelief[i_objects] = list(map(tuple, pointsBelief[i_objects]))\n\n for i_objects in range(len(points_keypoints)):\n points = points_keypoints[i_objects]\n new_cuboid = Reproject(points, tm, rm)\n points_keypoints[i_objects] = new_cuboid.tolist()\n points_keypoints[i_objects] = list(map(tuple, points_keypoints[i_objects]))\n \n image_r = cv2.warpAffine(np.array(img), rm, img.size)\n result = cv2.warpAffine(image_r, tm, img.size)\n img = Image.fromarray(result)\n\n # Note: All point coordinates are in the image space, e.g., pixel value.\n # This is used when we do saving --- helpful for debugging\n if self.save or self.test: \n # Use the save to debug the data\n if self.test:\n draw = ImageDraw.Draw(img_original)\n else:\n draw = ImageDraw.Draw(img)\n \n # PIL drawing functions, here for sharing draw\n def DrawKeypoints(points):\n for key in points:\n DrawDot(key,(12, 115, 170),7) \n \n def DrawLine(point1, point2, lineColor, lineWidth):\n if not point1 is None and not point2 is None:\n draw.line([point1,point2],fill=lineColor,width=lineWidth)\n\n def DrawDot(point, pointColor, pointRadius):\n if not point is None:\n xy = [point[0]-pointRadius, point[1]-pointRadius, point[0]+pointRadius, point[1]+pointRadius]\n draw.ellipse(xy, fill=pointColor, outline=pointColor)\n\n def DrawCube(points, which_color = 0, color = None):\n '''Draw cube with a thick solid line across the front top edge.'''\n lineWidthForDrawing = 2\n lineColor1 = (255, 215, 0) # yellow-ish\n lineColor2 = (12, 115, 170) # blue-ish\n lineColor3 = (45, 195, 35) # green-ish\n if which_color == 3:\n lineColor = lineColor3\n else:\n lineColor = lineColor1\n\n if not color is None:\n lineColor = color \n\n # draw front\n DrawLine(points[0], points[1], lineColor, 8) #lineWidthForDrawing)\n DrawLine(points[1], points[2], lineColor, lineWidthForDrawing)\n DrawLine(points[3], points[2], lineColor, lineWidthForDrawing)\n DrawLine(points[3], points[0], lineColor, lineWidthForDrawing)\n \n # draw back\n DrawLine(points[4], points[5], lineColor, lineWidthForDrawing)\n DrawLine(points[6], points[5], lineColor, lineWidthForDrawing)\n DrawLine(points[6], points[7], lineColor, lineWidthForDrawing)\n DrawLine(points[4], points[7], lineColor, lineWidthForDrawing)\n \n # draw sides\n DrawLine(points[0], points[4], lineColor, lineWidthForDrawing)\n DrawLine(points[7], points[3], lineColor, lineWidthForDrawing)\n DrawLine(points[5], points[1], lineColor, lineWidthForDrawing)\n DrawLine(points[2], points[6], lineColor, lineWidthForDrawing)\n\n # draw dots\n DrawDot(points[0], pointColor=(255,255,255), pointRadius = 3)\n DrawDot(points[1], pointColor=(0,0,0), pointRadius = 3)\n\n # Draw all the found objects. \n for points_belief_objects in pointsBelief:\n DrawCube(points_belief_objects)\n for keypoint in points_keypoints:\n DrawKeypoints(keypoint)\n\n img = self.transform(img)\n \n return {\n \"img\":img,\n \"translations\":translations,\n \"rot_quaternions\":rotations,\n 'pointsBelief':np.array(points_all[0]),\n 'matrix_camera':matrix_camera,\n 'img_original': np.array(img_original),\n 'cuboid': cuboid,\n 'file_name':name,\n }\n\n # Create the belief map\n beliefsImg = CreateBeliefMap(\n img, \n pointsBelief=pointsBelief,\n nbpoints = 9,\n sigma = self.sigma)\n\n # Create the image maps for belief\n transform = transforms.Compose([transforms.Resize(min(img_size))])\n totensor = transforms.Compose([transforms.ToTensor()])\n\n for j in range(len(beliefsImg)):\n beliefsImg[j] = self.target_transform(beliefsImg[j])\n # beliefsImg[j].save('{}.png'.format(j))\n beliefsImg[j] = totensor(beliefsImg[j])\n\n beliefs = torch.zeros((len(beliefsImg),beliefsImg[0].size(1),beliefsImg[0].size(2)))\n for j in range(len(beliefsImg)):\n beliefs[j] = beliefsImg[j][0]\n \n\n # Create affinity maps\n scale = 8\n if min (img.size) / 8.0 != min (img_size)/8.0:\n # print (scale)\n scale = min (img.size)/(min (img_size)/8.0)\n\n affinities = GenerateMapAffinity(img,8,pointsBelief,objects_centroid,scale)\n img = self.transform(img)\n\n # Transform the images for training input\n w_crop = np.random.randint(0, img.size[0] - img_size[0]+1)\n h_crop = np.random.randint(0, img.size[1] - img_size[1]+1)\n transform = transforms.Compose([transforms.Resize(min(img_size))])\n totensor = transforms.Compose([transforms.ToTensor()])\n\n if not self.normal is None:\n normalize = transforms.Compose([transforms.Normalize\n ((self.normal[0],self.normal[0],self.normal[0]),\n (self.normal[1],self.normal[1],self.normal[1])),\n AddNoise(self.noise)])\n else:\n normalize = transforms.Compose([AddNoise(0.0001)])\n \n img = crop(img,h_crop,w_crop,img_size[1],img_size[0])\n img = totensor(img)\n\n img = normalize(img)\n\n w_crop = int(w_crop/8)\n h_crop = int(h_crop/8)\n\n affinities = affinities[:,h_crop:h_crop+int(img_size[1]/8),w_crop:w_crop+int(img_size[0]/8)]\n beliefs = beliefs[:,h_crop:h_crop+int(img_size[1]/8),w_crop:w_crop+int(img_size[0]/8)]\n\n if affinities.size()[1] == 49 and not self.test:\n affinities = torch.cat([affinities,torch.zeros(16,1,50)],dim=1)\n\n if affinities.size()[2] == 49 and not self.test:\n affinities = torch.cat([affinities,torch.zeros(16,50,1)],dim=2)\n\n return {\n 'img':img, \n \"affinities\":affinities, \n 'beliefs':beliefs,\n }", "def dispatch_items_randomly(self, level):\n for item in self.list:\n item.position = Item.define_random_position(item, level)", "def compute_relations(self):\n\n visible_nodes = {}\n\n self.cameras = self.get_all_cameras()\n rospy.logdebug(self.cameras)\n\n if self.cameras.items():\n try:\n if self.visibility_monitor is None:\n self.visibility_monitor = VisibilityMonitor(self.ctx, self.source)\n rospy.loginfo(\"[perspective_filter] Visibility monitor now running, please active the Pygame windows.\")\n visible_nodes = self.visibility_monitor.compute_all()\n rospy.logdebug(\"[perspective_filter] %d perspectives computed \" % len(visible_nodes))\n #rospy.logdebug(visible_nodes)\n except Exception as e:\n rospy.logwarn(\"[perspective_filter] Exception occurred while computing relation : %s\" % str(e))\n if self.visibility_monitor:\n self.visible_nodes = {} #visible_nodes\n for camera_name, visibles_obj in visible_nodes.items():\n camera_id = self.source.scene.nodebyname(camera_name)[0].id\n self.visible_nodes[camera_id] = visibles_obj\n for node in visibles_obj:\n if node.parent in self.cameras.keys():\n if self.source.scene.nodes[node.parent] not in visibles_obj:\n visibles_obj.append(self.source.scene.nodes[node.parent])\n\n for agent_id, nodes_seen in self.visible_nodes.items():\n agent = self.source.scene.nodes[agent_id]\n for node in nodes_seen:\n if agent_id in self.previously_visible_nodes:\n if node not in self.previously_visible_nodes[agent_id]:\n self.start_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n else:\n self.start_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n\n for agent_id, nodes_previously_seen in self.previously_visible_nodes.items():\n agent = self.source.scene.nodes[agent_id]\n for node in nodes_previously_seen:\n if agent_id in self.visible_nodes:\n if node not in self.visible_nodes[agent_id]:\n self.end_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n else:\n self.end_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n\n self.publish_perspectives()\n self.previously_visible_nodes = self.visible_nodes", "def next_scene(self):\n if self.current_scene == len(self.scenes) - 1:\n self.current_scene = 0\n else:\n self.current_scene += 1\n print 'Scene: {}'.format(self.current_scene)\n self.redraw()", "def update_state(self):\n self.reset_state()\n for piece in self.pieces:\n coordinates = piece.get_block_positions()\n for coor in coordinates:\n x, y = coor\n self.state[y][x] = piece", "def _recompute_indexes(self, first_index=0, free_index=None):\n if free_index is None:\n free_index = self.index + 1\n\n # Cleanup the linkable_vars for all the pulses which will be reindexed.\n linked_vars = self.root.linkable_vars\n for var in linked_vars[:]:\n if var[0].isdigit() and int(var[0]) >= free_index:\n linked_vars.remove(var)\n\n for item in self.items[first_index:]:\n\n item.index = free_index\n prefix = '{}_'.format(free_index)\n linkable_vars = [prefix + var for var in item.linkable_vars]\n linked_vars.extend(linkable_vars)\n\n if isinstance(item, Sequence):\n item.unobserve('_last_index', self._item_last_index_updated)\n item._recompute_indexes()\n item.observe('_last_index', self._item_last_index_updated)\n free_index = item._last_index + 1\n\n # We have a non indexed item (pulse or template).\n else:\n free_index += 1\n\n self._last_index = free_index - 1", "def next_scene(self):\n if self.current_scene == len(self.scenes) - 1:\n self.current_scene = 0\n else:\n self.current_scene += 1\n self.redraw()", "def update(self, delta_time):\n\n #scene_dico = self.scene.__dict__\n\n #print(\"\\n\\n *******\")\n\n #for k, v in scene_dico.items():\n # print(k,\" \",v)\n # print(\"------------\")\n \n\n #self.scene[sprite_lists].update()\n\n #print(type(self.scene))\n #self.scene.sprite_lists.update()\n\n for spl in self.scene.sprite_lists:\n spl.update()\n\n \n\n self.frame_count += 1\n self.player_list.update()\n\n for i in range (self.lives):\n \n\n\n self.life_list[i].center_x = (self.player_sprite.center_x - SCREEN_WIDTH // 2) + i * self.life_list[i].width\n self.life_list[i].center_y = (self.player_sprite.center_y - SCREEN_HEIGHT // 2) \n\n\n self.crosshair_sprite.center_x = self.player_sprite.center_x + self.crosshair_relative_xoffset\n self.crosshair_sprite.center_y = self.player_sprite.center_y + self.crosshair_relative_yoffset\n\n \n\n\n\n \n\n \n\n self.enemy_list.update()\n \n\n # Move the player with the physics engine\n #self.physics_engine_walls.update()\n #self.stairs_list.update()\n\n\n\n \n\n\n\n\n\n #for medikit in ammo_medikit_hit_list:\n # medikit.remove_from_sprite_lists()\n \n # self.player_sprite.cur_health += MEDIKIT_HEALTH_BOOST\n\n\n\n\n\n # Generate a list of all sprites that collided with the player.\n #stairs_hit_list = arcade.check_for_collision_with_list(self.player_sprite,\n # self.stairs_list)\n\n #for stairs in stairs_hit_list:\n # self.level += 1\n # self.is_smoked = False\n # Load the next level\n # self.setup(self.level) # .............?????????.........\n\n # Set the camera to the start\n # self.view_left = 0\n # self.view_bottom = 0\n # changed_viewport = True\n\n\n\n \n\n\n # Loop through each enemy that we have\n for enemy in self.enemy_list:\n\n \n start_x = enemy.center_x\n start_y = enemy.center_y\n\n \n dest_x = self.player_sprite.center_x\n dest_y = self.player_sprite.center_y\n \n x_diff = dest_x - start_x\n y_diff = dest_y - start_y\n angle = math.atan2(y_diff, x_diff)\n\n # Set the enemy to face the player.\n enemy.angle = math.degrees(angle)-90\n\n # Shoot every 60 frames change of shooting each frame\n if self.frame_count % 60 == 0:\n bullet = arcade.Sprite(\":resources:images/space_shooter/laserBlue01.png\") \n \n bullet.center_x = start_x\n bullet.center_y = start_y\n\n # Angle the bullet sprite\n bullet.angle = math.degrees(angle)\n\n # Taking into account the angle, calculate our change_x\n # and change_y. Velocity is how fast the bullet travels.\n bullet.change_x = math.cos(angle) * BULLET_SPEED\n bullet.change_y = math.sin(angle) * BULLET_SPEED\n\n #self.bullet_list.append(bullet) -------------------------\n self.terro_bullet_list.append(bullet)\n\n\n \n\n\n\n # --- Manage Scrolling ---\n\n # Scroll left\n\n \"\"\"\n left_boundary = self.view_left + LEFT_VIEWPORT_MARGIN\n if self.player_sprite.left < left_boundary:\n self.view_left -= left_boundary - self.player_sprite.left\n changed_viewport = True\n\n # Scroll right\n right_boundary = self.view_left + SCREEN_WIDTH - RIGHT_VIEWPORT_MARGIN\n if self.player_sprite.right > right_boundary:\n self.view_left += self.player_sprite.right - right_boundary\n changed_viewport = True\n\n # Scroll up\n top_boundary = self.view_bottom + SCREEN_HEIGHT - TOP_VIEWPORT_MARGIN\n if self.player_sprite.top > top_boundary:\n self.view_bottom += self.player_sprite.top - top_boundary\n changed_viewport = True\n\n # Scroll down\n bottom_boundary = self.view_bottom + BOTTOM_VIEWPORT_MARGIN\n if self.player_sprite.bottom < bottom_boundary:\n self.view_bottom -= bottom_boundary - self.player_sprite.bottom\n changed_viewport = True\n\n if changed_viewport:\n # Only scroll to integers. Otherwise we end up with pixels that\n # don't line up on the screen\n self.view_bottom = int(self.view_bottom)\n self.view_left = int(self.view_left)\n\n # Do the scrolling\n arcade.set_viewport(self.view_left,\n SCREEN_WIDTH + self.view_left,\n self.view_bottom,\n SCREEN_HEIGHT + self.view_bottom)\n\n \"\"\"\n\n\n if self.player_sprite.right >= self.end_of_map:\n if self.level < self.max_level:\n self.level += 1\n self.load_level(self.level)\n self.player_sprite.center_x = 128\n self.player_sprite.center_y = 64\n self.player_sprite.change_x = 0\n self.player_sprite.change_y = 0\n else:\n self.game_over = True\n\n\n\n\n self.pan_camera_to_user()\n\n\n\n # ///////// joy\n\n joy_dico = self.window.joys[0]\n\n BTN_A = joy_dico.button_controls[0]\n BTN_B = joy_dico.button_controls[1]\n BTN_X = joy_dico.button_controls[2]\n BTN_Y = joy_dico.button_controls[3]\n BTN_TL = joy_dico.button_controls[4]\n BTN_TR = joy_dico.button_controls[5]\n BTN_SELECT = joy_dico.button_controls[6]\n BTN_START = joy_dico.button_controls[7]\n BTN_MODE = joy_dico.button_controls[8]\n BTN_THUMBL = joy_dico.button_controls[9]\n BTN_THUMBR = joy_dico.button_controls[10]\n\n\n print(f\"\\n BTN_A ----> {BTN_A}\")\n\n\n BTN_list = [BTN_A,BTN_B,BTN_X,BTN_Y, BTN_TL, BTN_TR, BTN_SELECT, BTN_START, BTN_MODE, BTN_THUMBL, BTN_THUMBR]\n\n BTN_fn_list = [self.joy_A, self.joy_B, self.joy_X, self.joy_Y, self.joy_TL, self.joy_TR, self.joy_SELECT, self.joy_START, self.joy_MODE, self.joy_THUMBL, self.joy_THUMBR]\n\n for BTN in BTN_list:\n if BTN._value == 1:\n print(f\"=====> >=====> ====> {BTN.raw_name}\")\n\n idx = BTN_list.index(BTN)\n\n BTN_fn_list[idx]()", "def __getitem__(self, section_id):", "def iterate():\n # States are of the form (coordinates, word so far, used spots)\n # Load the initial states into the stack\n global theStack\n for r,layer in enumerate(honeycomb):\n for e,el in enumerate(layer):\n theStack.append( ((e,r), [el],set([(e,r)])) )\n \n while (len(theStack) != 0):\n #pop the next run\n (e,r),soFar,used=theStack[-1]\n theStack=theStack[:-1]\n #run it!\n step((e,r),soFar,used)", "def updateScene_(self):\r\n self.scene().clear()\r\n\r\n self._setup_background()\r\n self._add_sockets()\r\n self._add_rooms()\r\n self._add_fuses()\r\n self._add_switchs()\r\n self._add_lamp_outlets()", "def results(self):\n page = []\n\n for i, item in enumerate(super(VideoCarouselTile, self).results()):\n page.append(item)\n if (i + 1) % 3 == 0:\n yield page\n page = []\n if page:\n yield page", "def __getitem__(self, index):\n path, name, txt = self.imgs[index]\n img = self.loader(path)\n\n # img_size = (400, 400)\n img_size = (self.img_size, self.img_size)\n \n\n loader = loadjson\n\n data = loader(txt, self.objectsofinterest, img)\n\n pointsBelief = data['pointsBelief'] \n objects_centroid = data['centroids']\n points_all = data['points']\n points_keypoints = data['keypoints_2d']\n translations = torch.from_numpy(np.array(\n data['translations'])).float()\n rotations = torch.from_numpy(np.array(\n data['rotations'])).float()\n\n if len(points_all) == 0:\n # points_all = torch.zeros(1, 10, 2).double()\n points_all = torch.zeros(1)\n\n # self.save == true assumes there is only\n # one object instance in the scene.\n if translations.size()[0] > 1:\n translations = translations[0].unsqueeze(0)\n rotations = rotations[0].unsqueeze(0)\n\n # If there are no objects, still need to return similar shape array\n if len(translations) == 0:\n translations = torch.zeros(1, 3).float()\n rotations = torch.zeros(1, 4).float()\n\n # Camera intrinsics\n path_cam = path.replace(name, '_camera_settings.json')\n with open(path_cam) as data_file:\n data = json.load(data_file)\n # Assumes one camera\n cam = data['camera_settings'][0]['intrinsic_settings']\n\n matrix_camera = np.zeros((3, 3))\n matrix_camera[0, 0] = cam['fx']\n matrix_camera[1, 1] = cam['fy']\n matrix_camera[0, 2] = cam['cx']\n matrix_camera[1, 2] = cam['cy']\n matrix_camera[2, 2] = 1\n\n # Load the cuboid sizes\n path_set = path.replace(name, '_object_settings.json')\n with open(path_set) as data_file:\n data = json.load(data_file)\n\n cuboid = torch.zeros(1)\n\n if self.objectsofinterest is None:\n cuboid = np.array(data['exported_objects'][0]['cuboid_dimensions'])\n else:\n for info in data[\"exported_objects\"]:\n if self.objectsofinterest in info['class']:\n cuboid = np.array(info['cuboid_dimensions'])\n\n img_original = img.copy()\n\n def Reproject(points, tm, rm):\n \"\"\"\n Reprojection of points when rotating the image\n \"\"\"\n proj_cuboid = np.array(points)\n\n rmat = np.identity(3)\n rmat[0:2] = rm\n tmat = np.identity(3)\n tmat[0:2] = tm\n\n new_cuboid = np.matmul(\n rmat, np.vstack((proj_cuboid.T, np.ones(len(points)))))\n new_cuboid = np.matmul(tmat, new_cuboid)\n new_cuboid = new_cuboid[0:2].T\n\n return new_cuboid\n\n # Random image manipulation, rotation and translation with zero padding\n dx = round(np.random.normal(0, 2) * float(self.random_translation[0]))\n dy = round(np.random.normal(0, 2) * float(self.random_translation[1]))\n angle = round(np.random.normal(0, 1) * float(self.random_rotation))\n\n tm = np.float32([[1, 0, dx], [0, 1, dy]])\n rm = cv2.getRotationMatrix2D(\n (img.size[0] / 2, img.size[1] / 2), angle, 1)\n\n for i_objects in range(len(pointsBelief)):\n points = pointsBelief[i_objects]\n new_cuboid = Reproject(points, tm, rm)\n pointsBelief[i_objects] = new_cuboid.tolist()\n objects_centroid[i_objects] = tuple(new_cuboid.tolist()[-1])\n pointsBelief[i_objects] = list(map(tuple, pointsBelief[i_objects]))\n\n for i_objects in range(len(points_keypoints)):\n points = points_keypoints[i_objects]\n new_cuboid = Reproject(points, tm, rm)\n points_keypoints[i_objects] = new_cuboid.tolist()\n points_keypoints[i_objects] = list(map(tuple, points_keypoints[i_objects]))\n\n image_r = cv2.warpAffine(np.array(img), rm, img.size)\n result = cv2.warpAffine(image_r, tm, img.size)\n img = Image.fromarray(result)\n\n # Note: All point coordinates are in the image space, e.g., pixel value.\n # This is used when we do saving --- helpful for debugging\n if self.save or self.test:\n # Use the save to debug the data\n if self.test:\n draw = ImageDraw.Draw(img_original)\n else:\n draw = ImageDraw.Draw(img)\n\n # PIL drawing functions, here for sharing draw\n def DrawKeypoints(points):\n for key in points:\n DrawDot(key, (12, 115, 170), 7)\n\n def DrawLine(point1, point2, lineColor, lineWidth):\n if not point1 is None and not point2 is None:\n draw.line([point1, point2], fill=lineColor, width=lineWidth)\n\n def DrawDot(point, pointColor, pointRadius):\n if not point is None:\n xy = [point[0] - pointRadius, point[1] - pointRadius, point[0] + pointRadius,\n point[1] + pointRadius]\n draw.ellipse(xy, fill=pointColor, outline=pointColor)\n\n def DrawCube(points, which_color=0, color=None):\n '''Draw cube with a thick solid line across the front top edge.'''\n lineWidthForDrawing = 2\n lineColor1 = (255, 215, 0) # yellow-ish\n lineColor2 = (12, 115, 170) # blue-ish\n lineColor3 = (45, 195, 35) # green-ish\n if which_color == 3:\n lineColor = lineColor3\n else:\n lineColor = lineColor1\n\n if not color is None:\n lineColor = color\n\n # draw front\n DrawLine(points[0], points[1], lineColor, 8) # lineWidthForDrawing)\n DrawLine(points[1], points[2], lineColor, lineWidthForDrawing)\n DrawLine(points[3], points[2], lineColor, lineWidthForDrawing)\n DrawLine(points[3], points[0], lineColor, lineWidthForDrawing)\n\n # draw back\n DrawLine(points[4], points[5], lineColor, lineWidthForDrawing)\n DrawLine(points[6], points[5], lineColor, lineWidthForDrawing)\n DrawLine(points[6], points[7], lineColor, lineWidthForDrawing)\n DrawLine(points[4], points[7], lineColor, lineWidthForDrawing)\n\n # draw sides\n DrawLine(points[0], points[4], lineColor, lineWidthForDrawing)\n DrawLine(points[7], points[3], lineColor, lineWidthForDrawing)\n DrawLine(points[5], points[1], lineColor, lineWidthForDrawing)\n DrawLine(points[2], points[6], lineColor, lineWidthForDrawing)\n\n # draw dots\n DrawDot(points[0], pointColor=(255, 255, 255), pointRadius=3)\n DrawDot(points[1], pointColor=(0, 0, 0), pointRadius=3)\n\n # Draw all the found objects.\n for points_belief_objects in pointsBelief:\n DrawCube(points_belief_objects)\n for keypoint in points_keypoints:\n DrawKeypoints(keypoint)\n\n img = self.transform(img)\n\n return {\n \"img\": img,\n \"translations\": translations,\n \"rot_quaternions\": rotations,\n 'pointsBelief': np.array(points_all[0]),\n 'matrix_camera': matrix_camera,\n 'img_original': np.array(img_original),\n 'cuboid': cuboid,\n 'file_name': name,\n }\n\n # Create the belief map\n beliefsImg = CreateBeliefMap( #TODO: Investigate generating belief maps\n img,\n pointsBelief=pointsBelief,\n nbpoints=9,\n sigma=self.sigma)\n\n # Create the image maps for belief\n transform = transforms.Compose([transforms.Resize(min(img_size))])\n totensor = transforms.Compose([transforms.ToTensor()])\n\n for j in range(len(beliefsImg)):\n beliefsImg[j] = self.target_transform(beliefsImg[j])\n # beliefsImg[j].save('{}.png'.format(j))\n beliefsImg[j] = totensor(beliefsImg[j])\n\n beliefs = torch.zeros((len(beliefsImg), beliefsImg[0].size(1), beliefsImg[0].size(2)))\n for j in range(len(beliefsImg)):\n beliefs[j] = beliefsImg[j][0]\n\n # Create affinity maps\n scale = 8\n if min(img.size) / 8.0 != min(img_size) / 8.0:\n # print (scale)\n scale = min(img.size) / (min(img_size) / 8.0)\n\n affinities = GenerateMapAffinity(img, 8, pointsBelief, objects_centroid, scale)\n img = self.transform(img)\n\n # Transform the images for training input\n w_crop = np.random.randint(0, img.size[0] - img_size[0] + 1)\n h_crop = np.random.randint(0, img.size[1] - img_size[1] + 1)\n transform = transforms.Compose([transforms.Resize(min(img_size))])\n totensor = transforms.Compose([transforms.ToTensor()])\n\n # if not self.normal is None:\n # normalize = transforms.Compose([transforms.Normalize\n # ((self.normal[0],self.normal[0],self.normal[0]),\n # (self.normal[1],self.normal[1],self.normal[1])),\n # AddNoise(self.noise)])\n\n if not self.normal is None:\n normalize = transforms.Compose([transforms.Normalize\n ((self.normal[0][0], self.normal[0][1], self.normal[0][2]),\n (self.normal[1][0], self.normal[1][1], self.normal[1][2])),\n AddNoise(self.noise)])\n else:\n normalize = transforms.Compose([AddNoise(0.0001)])\n\n img = crop(img, h_crop, w_crop, img_size[1], img_size[0])\n img = totensor(img)\n\n img = normalize(img)\n\n w_crop = int(w_crop / 8)\n h_crop = int(h_crop / 8)\n\n affinities = affinities[:, h_crop:h_crop + int(img_size[1] / 8), w_crop:w_crop + int(img_size[0] / 8)]\n beliefs = beliefs[:, h_crop:h_crop + int(img_size[1] / 8), w_crop:w_crop + int(img_size[0] / 8)]\n\n if affinities.size()[1] == 49 and not self.test:\n affinities = torch.cat([affinities, torch.zeros(16, 1, 50)], dim=1)\n\n if affinities.size()[2] == 49 and not self.test:\n affinities = torch.cat([affinities, torch.zeros(16, 50, 1)], dim=2)\n\n return {\n 'img': img,\n \"affinities\": affinities,\n 'beliefs': beliefs,\n }", "def targets(self):\n self.renderer.begin_rendering(\"targets\")\n for target in self.targets:\n self.renderer.draw_rect_3d(target, 10, 10, True, self.renderer.blue())\n self.renderer.end_rendering()", "def get_scenes(self, buffer_size=0.1, update=False):\n # Returns start and end date of a 10 day interval, last day is the date of event\n start_date, end_date = ReMasFrame.date_interval(self.event_date_str, delta_minus=10)\n \n # Obtain info for scene collection\n product_id = self.products[self.current_cat][self.current_prod]['id']\n aoi = self.event_geometry.buffer(buffer_size).envelope\n \n # if cfs and date <= 2011-03-31 => change id and res to old cfs\n current_is_cfs = self.current_prod == 'cfs'\n date_is_cfs_v1 = parse(self.event_date_str) <= parse('2011-03-31')\n if current_is_cfs and date_is_cfs_v1:\n product_id = product_id.replace('v2:', 'v1:')\n self.current_deg_res += 0.1\n \n print(self.current_deg_res)\n scenes, ctx = ReMasFrame.search_scenes(\n aoi,\n product_id,\n start_date=start_date,\n end_date=end_date\n )\n if not scenes:\n print(len(scenes), start_date, end_date)\n if current_is_cfs and date_is_cfs_v1:\n self.current_deg_res -= 0.1\n error_str = f\"El conjunto de escenas está vacía para {product_id}, {self.event_id}\"\n raise IndexError(error_str)\n \n new_ctx = ctx.assign(resolution=self.current_deg_res)\n \n if current_is_cfs and date_is_cfs_v1: # restore deg_res\n self.current_deg_res -= 0.1\n \n if update:\n self.current_scenes = scenes\n self.current_ctx = new_ctx\n\n return (scenes, new_ctx), (start_date, end_date)", "def __getitem__(self, index):\n idx = self.available_idx[index]\n qn = self.questions[idx]\n qid = qn['id']\n house = qn['house']\n attr, room_attr = self.question_to_attribute(qn)\n\n # encode question and answer\n qe = self.encoded_questions[qn['h5_id']]\n ae = self.encoded_questions[qn['h5_id']]\n\n # choose path_ix\n path_ix = random.choice(range(qn['num_paths'])) if self.split == 'train' else 0 \n path_feats_h5 = h5py.File(osp.join(self.path_feats_dir, qn['path_name']+'.h5'), 'r')\n raw_ego_feats = path_feats_h5['ego_rgb%s' % path_ix][...].reshape(-1, 3200) # (L, 32, 10, 10)\n raw_path_len = raw_ego_feats.shape[0]\n raw_actions = qn['path_actions'][path_ix] # (L, )\n raw_pos_queue = qn['path_positions'][path_ix] # list of L positions\n if self.requires_imgs:\n path_images_h5 = h5py.File(osp.join(self.path_images_dir, qn['path_name']+'.h5'), 'r')\n raw_ego_imgs = path_images_h5['ego_rgb%s' % path_ix] # (L, 224, 224, 3)\n nav_ego_imgs = []\n\n # nav_phrases, nav_phrase_embs\n nav_pgs = [pg for pg in qn['program'] if 'nav' in pg['function']]\n nav_ids = [pg['id'][0] for pg in qn['program'] if 'nav' in pg['function']]\n nav_types = [pg['function'][4:] for pg in qn['program'] if 'nav' in pg['function']]\n nav_phrases = [pg['value_inputs'][0] for pg in qn['program'] if 'nav' in pg['function']]\n nav_phrase_embs = []\n for phrase in nav_phrases:\n nav_phrase_embs.append(np.array([self.wtov[wd] for wd in phrase.split()]).mean(0).astype(np.float32)) # (300, )\n nav_phrase_embs = np.array(nav_phrase_embs) # (#targets, 300)\n\n # For each segment path: feats + actions + pos_queue\n raw_key_ixs = qn['key_ixs_set'][path_ix]\n nav_ego_feats = []\n nav_action_inputs = []\n nav_action_outputs = []\n nav_pos_queues = []\n for i, key_ix in enumerate(raw_key_ixs):\n start_ix = 0 if i == 0 else raw_key_ixs[i-1] # we use last key_ix moment as start (spawn location)\n end_ix = raw_key_ixs[i]+1\n ego_feats = raw_ego_feats[start_ix:end_ix]\n action_inputs = np.array([4] + raw_actions[start_ix:end_ix][:-1], dtype=np.int64)\n action_outputs = np.array(raw_actions[start_ix:end_ix-1] + [3], dtype=np.int64)\n pos_queue = raw_pos_queue[start_ix:end_ix]\n assert ego_feats.shape[0] == len(pos_queue) == action_inputs.shape[0]\n # add to list\n nav_ego_feats.append(ego_feats)\n nav_action_inputs.append(action_inputs)\n nav_action_outputs.append(action_outputs)\n nav_pos_queues.append(pos_queue)\n if self.requires_imgs:\n nav_ego_imgs.append(raw_ego_imgs[start_ix:end_ix])\n\n # cache\n if self.to_cache and index not in self.img_data_cache:\n self.img_data_cache[index] = True # TODO: replace with ego_feats \n\n # private variable\n self.episode_house = self.env_loaded[house]\n self.nav_pos_queues = nav_pos_queues\n self.path_len = raw_path_len\n\n # return\n data = {}\n data['idx'] = idx\n data['qid'] = qid\n data['house'] = qn['house']\n data['question'] = qn['question']\n data['answer'] = qn['answer']\n data['type'] = qn['type']\n data['attr'] = attr\n data['qe'] = qe\n data['ae'] = ae\n data['path_name'] = qn['path_name']\n data['path_ix'] = path_ix\n data['nav_ids'] = nav_ids\n data['nav_types'] = nav_types\n data['nav_phrases'] = nav_phrases\n data['nav_phrase_embs'] = nav_phrase_embs\n data['nav_ego_feats'] = nav_ego_feats\n data['nav_action_inputs'] = nav_action_inputs\n data['nav_action_outputs'] = nav_action_outputs\n if self.requires_imgs:\n data['nav_ego_imgs'] = nav_ego_imgs\n return data", "def scenes_to_frames():\n # Scene 001 from frames 1-150\n cmd.scene('001', animate=0)\n cmd.mview('store', 1)\n cmd.mview('store', 150)\n # Scene 002 from frames 250-400\n cmd.scene('002', animate=0)\n cmd.mview('store', 250)\n cmd.mview('store', 400)", "def __getitem__(self, index):\n image_id = self.image_ids[index]\n\n filename = self.image_id_to_filename[image_id]\n image_path = os.path.join(self.image_dir, filename)\n\n with open(image_path, 'rb') as f:\n with PIL.Image.open(f) as image:\n WW, HH = image.size\n image = self.transform(image.convert('RGB'))\n\n H, W = self.image_size\n objs, boxes, masks = [], [], []\n\n for object_data in self.image_id_to_objects[image_id]:\n # objs.append(object_data['category_id'])\n objs.append(int(object_data.find('name').get(\"id\")))\n\n bndbox = object_data.findall('bndbox')[0]\n xmin = int(bndbox.find('xmin').text)\n ymin = int(bndbox.find('ymin').text)\n xmax = int(bndbox.find('xmax').text)\n ymax = int(bndbox.find('ymax').text)\n w = xmax - xmin\n h = ymax - ymin\n\n boxes.append(torch.FloatTensor([xmin, ymin, xmax, ymax]))\n\n # This will give a numpy array of shape (HH, WW)\n mask = torch.zeros(1, H, W)\n # mask = seg_to_mask(object_data['segmentation'], WW, HH)\n mask[:, round(ymin * H):max(round(ymin * H) + 1, round(ymax * H)),\n round(xmin * W):max(round(xmin * W) + 1, round(xmax * W))] = 1\n masks.append(mask)\n # shuffle objs\n O = len(objs)\n rand_idx = list(range(O))\n random.shuffle(rand_idx)\n\n objs = [objs[i] for i in rand_idx]\n boxes = [boxes[i] for i in rand_idx]\n masks = [masks[i] for i in rand_idx]\n\n objs = torch.LongTensor(objs)\n boxes = torch.stack(boxes, dim=0)\n masks = torch.stack(masks, dim=0)\n\n # print(image_path)\n\n return image, objs, boxes, masks", "def place_items(self):\n for item in self.item_kit:\n coords = self.maze.random_coordinates()\n item(coords, self.scale)", "def show(self):\n for x in range(0,3):\n for y in range(0,3):\n item = self[x,y]\n print(f\"({x},{y}): {item.id}, {item.cw}\")", "def render(self):\n step = 1\n while step < self.number_steps and self.update():\n step += 1", "def __getitem__(self, item):\n c_ex = self.examples[item]\n # randomly select ref mic\n mics = [x for x in c_ex.keys()]\n if self.train:\n np.random.shuffle(mics) # randomly permute during training to change ref mics\n\n mixtures = []\n sources = []\n for i in range(len(mics)):\n c_mic = c_ex[mics[i]]\n\n if self.segment:\n offset = 0\n if c_mic[\"length\"] > int(self.segment * self.sample_rate):\n offset = np.random.randint(\n 0, c_mic[\"length\"] - int(self.segment * self.sample_rate)\n )\n\n # we load mixture\n mixture, fs = sf.read(\n c_mic[\"mixture\"],\n start=offset,\n stop=offset + int(self.segment * self.sample_rate),\n dtype=\"float32\",\n )\n spk1, fs = sf.read(\n c_mic[\"spk1\"],\n start=offset,\n stop=offset + int(self.segment * self.sample_rate),\n dtype=\"float32\",\n )\n spk2, fs = sf.read(\n c_mic[\"spk2\"],\n start=offset,\n stop=offset + int(self.segment * self.sample_rate),\n dtype=\"float32\",\n )\n else:\n mixture, fs = sf.read(c_mic[\"mixture\"], dtype=\"float32\") # load all\n spk1, fs = sf.read(c_mic[\"spk1\"], dtype=\"float32\")\n spk2, fs = sf.read(c_mic[\"spk2\"], dtype=\"float32\")\n\n mixture = torch.from_numpy(mixture).unsqueeze(0)\n spk1 = torch.from_numpy(spk1).unsqueeze(0)\n spk2 = torch.from_numpy(spk2).unsqueeze(0)\n\n assert fs == self.sample_rate\n mixtures.append(mixture)\n sources.append(torch.cat((spk1, spk2), 0))\n\n mixtures = torch.cat(mixtures, 0)\n sources = torch.stack(sources)\n # we pad till max_mic\n valid_mics = mixtures.shape[0]\n if mixtures.shape[0] < self.max_mics:\n dummy = torch.zeros((self.max_mics - mixtures.shape[0], mixtures.shape[-1]))\n mixtures = torch.cat((mixtures, dummy), 0)\n sources = torch.cat((sources, dummy.unsqueeze(1).repeat(1, sources.shape[1], 1)), 0)\n return mixtures, sources, valid_mics", "def _get_visible_idxs_and_update_texs(self, estack):\n layer_stack = self.layer_stack\n if layer_stack.examine_layer_mode_enabled:\n idx = layer_stack.focused_layer_idx\n visible_idxs = [] if idx is None or layer_stack.layers[idx].image is None else [idx]\n elif layer_stack.layers:\n visible_idxs = [idx for idx, layer in enumerate(layer_stack.layers) if layer.visible and layer.image is not None]\n else:\n visible_idxs = []\n for tex_unit, idx in enumerate(visible_idxs):\n layer = layer_stack.layers[idx]\n image = layer.image\n image.async_texture.bind(tex_unit, estack)\n # The following generateMipMaps call completes in microseconds as mipmaps were already auto-generated on an _AsyncTextureUploadThread. In fact, we should not have to call\n # generateMipMaps at this point. However, OS X needs this call in order to see mipmaps generated on another thread. Without it, all mip levels aside from base are black\n # on OS X.\n image.async_texture.tex.generateMipMaps()\n return visible_idxs", "def animate_linearized_modes(self ):\n \n from pyro.dynamic.statespace import linearize\n \n linearized_sys = linearize( self )\n \n animations = []\n \n for i in range(self.n):\n ani = linearized_sys.animate_eigen_mode( i , self.is_3d )\n \n animations.append( ani )\n \n return linearized_sys , animations", "def __getitem__(self, item):\n return self.cube[item]", "def draw(self):\n for section in self.sections:\n canvas_reset(self.canvas)\n section.draw(self.canvas)", "def draw(self):\n\n for item in self.vis:\n item.undraw()\n self.render()\n for item in self.vis:\n item.draw(self.win)\n self.drawn = True", "def __getitem__(self, idx) :\n\n return self.getitem_all(idx * settings.WORLD_SIZE + settings.RANK)", "def test_sceneImport24281(self):\r\n\r\n self.delayDisplay(\"Starting the test\")\r\n #\r\n # first, get some data\r\n #\r\n self.delayDisplay(\"Getting Data\")\r\n import SampleData\r\n head = SampleData.downloadSample(\"MRHead\")\r\n\r\n #\r\n # create a label map and set it for editing\r\n #\r\n self.delayDisplay(\"Setting up LabelMap\")\r\n volumesLogic = slicer.modules.volumes.logic()\r\n headLabel = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, head, head.GetName() + '-label' )\r\n selectionNode = slicer.app.applicationLogic().GetSelectionNode()\r\n selectionNode.SetActiveVolumeID( head.GetID() )\r\n selectionNode.SetActiveLabelVolumeID( headLabel.GetID() )\r\n slicer.app.applicationLogic().PropagateVolumeSelection(0)\r\n\r\n #\r\n # got to the editor and do some drawing\r\n #\r\n self.delayDisplay(\"Setting up Editor and drawing\")\r\n parameterNode = EditUtil.getParameterNode()\r\n lm = slicer.app.layoutManager()\r\n paintEffectOptions = EditorLib.PaintEffectOptions()\r\n paintEffectOptions.setMRMLDefaults()\r\n paintEffectOptions.__del__()\r\n\r\n self.delayDisplay('Paint radius is %s' % parameterNode.GetParameter('PaintEffect,radius'))\r\n sliceWidget = lm.sliceWidget('Red')\r\n size = min(sliceWidget.width,sliceWidget.height)\r\n step = int(size / 12)\r\n center = int(size / 2)\r\n parameterNode.SetParameter('PaintEffect,radius', '20')\r\n paintTool = EditorLib.PaintEffectTool(sliceWidget)\r\n self.delayDisplay('Paint radius is %s, tool radius is %d' % (parameterNode.GetParameter('PaintEffect,radius'),paintTool.radius))\r\n for label in range(1,5):\r\n EditUtil.setLabel(label)\r\n pos = center - 2*step + (step * label)\r\n self.delayDisplay('Painting %d, at (%d,%d)' % (label,pos,pos),200)\r\n paintTool.paintAddPoint(pos,pos)\r\n paintTool.paintApply()\r\n paintTool.cleanup()\r\n paintTool = None\r\n\r\n #\r\n # now build:\r\n # create a model using the command line module\r\n # based on the current editor parameters\r\n # - make a new hierarchy node\r\n #\r\n\r\n self.delayDisplay( \"Building...\" )\r\n\r\n parameters = {}\r\n parameters[\"InputVolume\"] = headLabel.GetID()\r\n # create models for all labels\r\n parameters[\"JointSmoothing\"] = True\r\n parameters[\"StartLabel\"] = -1\r\n parameters[\"EndLabel\"] = -1\r\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\r\n outHierarchy.SetScene( slicer.mrmlScene )\r\n outHierarchy.SetName( \"sceneImport2428Hierachy\" )\r\n slicer.mrmlScene.AddNode( outHierarchy )\r\n parameters[\"ModelSceneFile\"] = outHierarchy\r\n\r\n modelMaker = slicer.modules.modelmaker\r\n self.CLINode = None\r\n self.CLINode = slicer.cli.runSync(modelMaker, self.CLINode, parameters, delete_temporary_files=False)\r\n\r\n self.delayDisplay(\"Models built\")\r\n\r\n success = self.verifyModels()\r\n\r\n success = success and (slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelNode\" ) > 3)\r\n\r\n self.delayDisplay(\"Test finished\")\r\n\r\n if success:\r\n self.delayDisplay(\"Ahh... test passed.\")\r\n else:\r\n self.delayDisplay(\"!$!$!#!@#!@!@$%! Test Failed!!\")\r\n\r\n self.assertTrue(success)", "def render(self):\n\n # self.screen.blit(self.background, (0, 0))\n\n \"\"\"work with ever game state\"\"\"\n if self.state == FIRST_ENTER:\n \"\"\"welcome screen))\"\"\"\n text = self.font.render(\"PETERSBURG VAMPIRES\", True, (255, 0, 0))\n self.screen.blit(text, (SCREEN_WIDTH / 5, SCREEN_HEIGHT / 3))\n text = self.font.render(\"press ENTER to start\", True, (255, 0, 0))\n self.screen.blit(text, (SCREEN_WIDTH / 4, SCREEN_HEIGHT / 2))\n elif self.state == GAME:\n \"\"\"rendering enemies and player\"\"\"\n old_time = time.time()\n self.levels[self.curient_level].render(self.screen)\n # self.levels[self.curient_level].render(self.screen)\n for obj in self.levels[self.curient_level].objects:\n \"\"\"debug mode\"\"\"\n # text = self.debug_font.render(\"koor:\" + str(obj.position_np), True, (255, 0, 0))\n # self.screen.blit(text, (obj.position_np[X], obj.position_np[Y]))\n\n # text = self.debug_font.render(\"x\", True, (255, 0, 0))\n # self.screen.blit(text, (obj.position_np[X]+obj.image_size[X], obj.position_np[Y]))\n #\n # text = self.debug_font.render(\"x\", True, (255, 0, 0))\n # self.screen.blit(text, (obj.position_np[X], obj.position_np[Y]+obj.image_size[Y]))\n #\n # text = self.debug_font.render(\"x\", True, (255, 0, 0))\n # self.screen.blit(text, (obj.position_np[X]+obj.image_size[X], obj.position_np[Y]+obj.image_size[Y]))\n #\n # x, y = get_grid_xy(obj.position_np, ZOMBIE_SIZE)\n # text = self.debug_font.render(\"x_grid: \" + str(x) + \",y_grid: \" + str(y), True, (255, 0, 0))\n # self.screen.blit(text, (obj.position_np[X], obj.position_np[Y]+20))\n\n obj.render(self.screen)\n \"\"\"rendering HP status\"\"\"\n text = self.font.render(\"HP: \" + str(self.player.HP), True, (255, 0, 0))\n self.screen.blit(text, (0, 10))\n new_time = time.time()\n self.render_time.append(new_time - old_time)\n elif self.state == END:\n \"\"\"The end screen\"\"\"\n self.screen.blit(pygame.image.load(END_BUTTON_PATH), (SCREEN_WIDTH / 5, SCREEN_HEIGHT / 8))\n elif self.state == WIN:\n text = self.font.render(\"YOU WIN, REPEAT? \", True, (255, 0, 0))\n self.screen.blit(text, (SCREEN_HEIGHT / 2, SCREEN_WIDTH / 2))\n\n pygame.display.flip()", "def simulate_scenes(scenes, features, step_size, params):\n instances = []\n\n for scene in scenes:\n # We simulate a sweep at each starting position. Note that we are \n # assuming that the initial three focus measures were obtained \n # by moving right.\n for lens_pos in range(step_size * 2, scene.step_count):\n\n # Going right.\n simulate_sweep(scene, features, instances, lens_pos, \n Direction(\"right\"), get_move_right_classification, params)\n\n # Going left. \n simulate_sweep(scene, features, instances, lens_pos, \n Direction(\"left\"), get_move_left_classification, params)\n\n # Balance datasets.\n if params.outlierHandling == OutlierHandling.SAMPLING:\n instances = balance_dataset_sampling(instances)\n assert_balanced_sampling(instances)\n elif params.outlierHandling == OutlierHandling.WEIGHTING:\n instances = balance_dataset_weighting(instances)\n assert_balanced_weighting(instances)\n else:\n assert False\n\n return instances", "def positions(self, tileID, numSamples):", "def do_scenes(self, line):\n\n print 'List of Scenes \\n'\n print 'ID\\tName'\n\n for index, scene in enumerate(self.huuey.scenes):\n print u\"{index}\\t{unique}\".format(index=index+1, unique=scene)", "def moveItems(rectList, enemiesList, mushrooms, goombas, spinys):\n X, Y, DELAY, MOVEUP, MUSHVX, MUSHVY, INFLOOR = 0, 1, 4, 5, 6, 7, 8\n ENMYVX, ENMYVY, ENMYIDLE, ENMYINFLOOR = 4, 5, 6, 7\n # Making sure all mushrooms are activated\n for mushroom in mushrooms: # Going through each mushroom\n if mushroom[DELAY] > 0: # Checking if it's being delayed and progressing the counter\n mushroom[DELAY] -= 1\n elif mushroom[MOVEUP] > 0: # Checking if it's animating and progressing the animation\n mushroom[MOVEUP] -= 1\n mushroom[1] -= 1\n else: # If delay and animation are done, check for its collision\n itemCollide(mushroom, rectList, [X, Y, MUSHVX, MUSHVY, INFLOOR])\n for goomba in goombas: # Going through each goomba\n if goomba[ENMYIDLE] == 1: # Checking if the goomba is active\n itemCollide(goomba, rectList, [X, Y, ENMYVX, ENMYVY, ENMYINFLOOR], enemiesList[:2]) # Checking collision\n if goomba[ENMYIDLE] == 2: # Checking if the goomba is dying\n goomba[ENMYINFLOOR] -=1 # Using the INFLOOR as a counter for removal\n for spiny in spinys: # Going through each spiny\n if spiny[ENMYIDLE] == 1: # Checking if it's active\n itemCollide(spiny, rectList, [X, Y, ENMYVX, ENMYVY, ENMYINFLOOR], enemiesList[:2]) # Checking collision", "def ROOMSELECTION_LOOP():\n pass", "def test_scenes_get(self):\n pass", "def read_from_ses3d_block(directory):\n\n # Initialise arrays of Cartesian coordinates.\n\n x=[]\n y=[]\n z=[]\n\n # Read yaml file containing information on the ses3d submodel.\n with io.open(os.path.join(directory,'modelinfo.yml'), 'rt') as fh:\n model_info = yaml.load(fh)\n\n rot_vec = np.array([model_info['geometry']['rot_x'], model_info['geometry']['rot_y'], model_info['geometry']['rot_z']])\n rot_angle = model_info['geometry']['rot_angle']\n\n # Read block files.\n\n fid_x = open(os.path.join(directory,'block_x'), 'r')\n fid_y = open(os.path.join(directory,'block_y'), 'r')\n fid_z = open(os.path.join(directory,'block_z'), 'r')\n\n dx = np.array(fid_x.read().strip().split('\\n'), dtype=float)\n dy = np.array(fid_y.read().strip().split('\\n'), dtype=float)\n dz = np.array(fid_z.read().strip().split('\\n'), dtype=float)\n\n fid_x.close()\n fid_y.close()\n fid_z.close()\n\n # Setup of coordinate lines.\n\n nsubvol = int(dx[0])\n\n idx = np.ones(nsubvol, dtype=int)\n idy = np.ones(nsubvol, dtype=int)\n idz = np.ones(nsubvol, dtype=int)\n\n for k in np.arange(1, nsubvol, dtype=int):\n idx[k] = int(dx[idx[k - 1]]) + idx[k - 1] + 1\n idy[k] = int(dy[idy[k - 1]]) + idy[k - 1] + 1\n idz[k] = int(dz[idz[k - 1]]) + idz[k - 1] + 1\n\n for k in np.arange(nsubvol, dtype=int):\n\n # Coordinates of the box corners.\n colat = dx[(idx[k] + 1):(idx[k] + 1 + int(dx[idx[k]]))]\n lon = dy[(idy[k] + 1):(idy[k] + 1 + int(dy[idy[k]]))]\n rad = dz[(idz[k] + 1):(idz[k] + 1 + int(dz[idz[k]]))]\n \n # Coordinates of the box centroids.\n colat_c = (np.array(colat[0:-1])+np.array(colat[1:]))/2.0\n lon_c = (np.array(lon[0:-1]) + np.array(lon[1:]))/2.0\n rad_c = (np.array(rad[0:-1]) + np.array(rad[1:]))/2.0\n \n # Compute Cartesian coordinates for all grid points.\n for c in colat_c:\n for l in lon_c:\n xx=np.cos(l*np.pi/180.0)*np.sin(c*np.pi/180.0)\n yy=np.sin(l*np.pi/180.0)*np.sin(c*np.pi/180.0)\n zz=np.cos(c*np.pi/180.0)\n for r in rad_c:\n x.append(r*xx)\n y.append(r*yy)\n z.append(r*zz)\n \n\n # Rotate, if needed.\n\n if (rot_angle!=0.0):\n rot_mat = get_rot_matrix(rot_angle*np.pi/180.0, *rot_vec)\n x, y, z = rotate(x, y, z, rot_mat)\n\n # Return.\n\n return x, y, z", "def _collect_scene_data(self, config):\n\n self._config = config\n self.scenes_root_path = config['scenes_root_path']\n assert(os.path.isdir(self.scenes_root_path))\n\n self._scene_dict = dict()\n # each one is a list of scenes\n self._all_image_paths = {\"train\": [], \"test\": []}\n\n for key, val in self._all_image_paths.items():\n for scene_collection_name in config[key]:\n scene_collection_dir = os.path.join(self.scenes_root_path, scene_collection_name)\n assert os.path.isdir(scene_collection_dir), scene_collection_dir\n # Scan all scenes in this scene dir\n for scene_name in os.listdir(scene_collection_dir):\n full = os.path.join(scene_collection_dir, scene_name)\n if os.path.isdir(full):\n val += self._get_all_rgb_image_paths_in_scene_dir(full)", "def get_items(self):\n\n self.logger.info(\"Site-Descriptors Builder Started\")\n\n self.logger.info(\"Setting indexes\")\n\n # All relevant materials that have been updated since site-descriptors\n # were last calculated\n\n q = dict(self.mat_query)\n all_task_ids = list(self.materials.distinct(self.materials.key, q))\n q.update(self.materials.lu_filter(self.site_descriptors))\n new_task_ids = list(self.materials.distinct(self.materials.key, q))\n self.logger.info(\n \"Found {} entirely new materials for site-descriptors data\".format(\n len(new_task_ids)))\n for task_id in all_task_ids:\n if task_id in new_task_ids:\n any_piece = True\n\n else: # Any piece of info missing?\n data_present = self.site_descriptors.query(\n properties=[self.site_descriptors.key, \"site_descriptors\", \"statistics\"],\n criteria={self.site_descriptors.key: task_id}).limit(1)[0]\n any_piece = False\n for k, v in self.all_output_pieces.items():\n if k not in list(data_present.keys()):\n any_piece = True\n break\n else:\n any_piece = False\n for e in v:\n if e not in data_present[k]:\n any_piece = True\n break\n if not any_piece:\n for l in self.sds['csf'].feature_labels():\n for fpi in data_present['site_descriptors']['csf']:\n if l not in fpi.keys():\n any_piece = True\n break\n if any_piece:\n yield self.materials.query(\n properties=[self.materials.key, \"structure\"],\n criteria={self.materials.key: task_id}).limit(1)[0]", "def deleteAllModelsFromScene(self):\n #productive #onButton\n profprint()\n while slicer.util.getNodes('python-catch-round_*') != {}:\n nodes = slicer.util.getNodes('python-catch-round_*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n while slicer.util.getNodes('manual-seg_*') != {}:\n nodes = slicer.util.getNodes('manual-seg_*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n while slicer.util.getNodes('obturator-seg_*') != {}:\n nodes = slicer.util.getNodes('obturator-seg_*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n #while slicer.mrmlScene.GetNodesByClass('vtkMRMLAnnotationFiducialNode') !={}:\n # nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLAnnotationFiducialNode')\n # for node in nodes.values():\n # slicer.mrmlScene.RemoveNode(node)\n while slicer.util.getNodes('template slice position*') != {}:\n nodes = slicer.util.getNodes('template slice position*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeYellow\")\n if sYellow ==None :\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode2\")\n sYellow.SetSliceVisible(0)\n reformatLogic = slicer.vtkSlicerReformatLogic()\n reformatLogic.SetSliceNormal(sYellow,1,0,0)\n tempFidNodes = slicer.mrmlScene.GetNodesByName('Temp')\n for i in range(tempFidNodes.GetNumberOfItems()):\n node = tempFidNodes.GetItemAsObject(i)\n if node:\n slicer.mrmlScene.RemoveNode(node)\n sYellow.Modified()", "def event_loop(self):\n for event in pygame.event.get():\n self.scene.get_event(event)", "def _get_observations(self):\n food = np.array(self.game.state.data.food.data)\n walls = np.array(self.game.state.data.layout.walls.data)\n map_shape = walls.shape\n capsules = self.game.state.data.capsules\n pacman_pos = self.game.state.data.agentStates[0].configuration.pos\n\n gosts_pos = list(map(lambda agent: agent.configuration.pos,\n self.game.state.data.agentStates[1:]))\n gosts_scared = list(\n map(lambda agent: agent.scaredTimer > 0, self.game.state.data.agentStates[1:]))\n\n \"\"\"\n 0: empty,\n 1: wall,\n 2: food,\n 3: capsules,\n 4: ghost,\n 5: scared ghost,\n 6: pacman\n \"\"\"\n\n view_slices = ((max(pacman_pos[0]-self.view_distance[0], 0), min(pacman_pos[0]+self.view_distance[0]+1, map_shape[0])),\n (max(pacman_pos[1]-self.view_distance[1], 0), min(pacman_pos[1]+self.view_distance[1]+1, map_shape[1])))\n\n def select(l):\n return l[view_slices[0][0]:view_slices[0][1], view_slices[1][0]:view_slices[1][1]]\n\n obs = np.vectorize(lambda v: 1 if v else 0)(select(walls))\n obs = obs + np.vectorize(lambda v: 2 if v else 0)(select(food))\n\n def pos_to_relative_pos(pos):\n if (pos[0] < view_slices[0][0] or view_slices[0][1] <= pos[0]\n or pos[1] < view_slices[1][0] or view_slices[1][1] <= pos[1]):\n return None\n else:\n return pos[0]-view_slices[0][0], pos[1]-view_slices[1][0]\n\n for c_relative_pos in filter(lambda x: x is not None, map(pos_to_relative_pos, capsules)):\n obs[c_relative_pos[0], c_relative_pos[1]] = 3\n\n for i, g_relative_pos in enumerate(map(pos_to_relative_pos, gosts_pos)):\n if (g_relative_pos is not None):\n obs[int(g_relative_pos[0]), int(g_relative_pos[1])\n ] = 5 if gosts_scared[i] else 4\n\n pacman_relative_pos = pos_to_relative_pos(pacman_pos)\n\n obs[pacman_relative_pos[0], pacman_relative_pos[1]] = 6\n\n obs[0, 0] = 2 if np.any(\n food[0:pacman_pos[0]+1, 0:pacman_pos[1]+1]) else 0\n obs[obs.shape[0]-1,\n 0] = 2 if np.any(food[pacman_pos[0]:map_shape[0], 0:pacman_pos[1]+1])else 0\n\n obs[0, obs.shape[1] -\n 1] = 2 if np.any(food[0:pacman_pos[0]+1, pacman_pos[1]:map_shape[0]]) else 0\n obs[obs.shape[0]-1, obs.shape[1]-1] = 2 if np.any(\n food[pacman_pos[0]:map_shape[0], pacman_pos[1]:map_shape[0]]) else 0\n\n # print(np.transpose(obs)[::-1, :])\n\n return obs", "def step(self, memories):\n return", "def _load( self, i ):\n if ir.config.verbosity_level >= 2: print(\"[observation] Lazy loading raster\")\n self._raster_data[i] = raster_cube( self._raster_files, line=self._line_info['description'][i], keep_null=self._keep_null )", "def test_sections_json_spider_three_levels(self):\n\n title = (\"Taking Action for the Social and Emotional Health of \"\n\t \"Young Children: A Report to the Community from the \" \n \"Denver Early Childhood Council\")\n summary = (\"Now, Denver has a plan of action to make it easier \"\n \"for families to access early childhood mental health \"\n \"information, intervention and services.\")\n byline = \"Denver Early Childhood Council\"\n story = create_story(title=title, summary=summary, byline=byline)\n layout = SectionLayout.objects.get(sectionlayouttranslation__name=\"Side by Side\")\n section1 = create_section(\"We're ready to take action. Are you?\",\n story=story, layout=layout, weight=7)\n section2 = create_section(\"Ricardo's Story\",\n\t\t\t story=story, layout=layout, weight=2)\n section3 = create_section(\"Meeting the need for better child mental health services\",\n\t\t\t story=story, layout=layout, root=True,\n weight=1)\n section4 = create_section(\"Healthy Minds Support Strong Futures\",\n story=story, layout=layout, weight=5) \n section5 = create_section(\"Community Voices\",\n\t\t\t story=story, layout=layout, weight=3)\n section6 = create_section(\"Our Vision: That All Children in Denver are Valued, Healthy and Thriving\",\n\t\t\t story=story, layout=layout, weight=4)\n section7 = create_section(\"Defining a \\\"Framework for Change\\\" with Actionable Goals and Strategies\",\n\t\t\t story=story, layout=layout, weight=5) \n section8 = create_section(\"How Can the Plan Make a Difference?\",\n\t\t\t story=story, layout=layout, weight=5)\n section9 = create_section(\"Impact\", story=story, layout=layout,\n weight=6)\n SectionRelation.objects.create(parent=section6, child=section8,\n weight=0)\n SectionRelation.objects.create(parent=section7, child=section9,\n weight=0)\n SectionRelation.objects.create(parent=section6, child=section7,\n weight=0)\n SectionRelation.objects.create(parent=section3, child=section1,\n weight=0)\n SectionRelation.objects.create(parent=section3, child=section6,\n weight=0)\n SectionRelation.objects.create(parent=section3, child=section4,\n weight=0)\n SectionRelation.objects.create(parent=section3, child=section5,\n weight=0)\n SectionRelation.objects.create(parent=section3, child=section2,\n weight=0)\n json_sections = simplejson.loads(story.structure.sections_json(\n include_summary=False, include_call_to_action=False))\n self.assertIn(section8.section_id,\n self._get_section(\n json_sections, section6.section_id)['children'])\n self.assertIn(section9.section_id,\n self._get_section(json_sections, section7.section_id)['children'])\n self.assertIn(section7.section_id,\n self._get_section(json_sections, section6.section_id)['children'])\n self.assertIn(section1.section_id,\n self._get_section(json_sections, section3.section_id)['children'])\n self.assertIn(section6.section_id,\n self._get_section(json_sections, section3.section_id)['children'])\n self.assertIn(section4.section_id,\n self._get_section(json_sections, section3.section_id)['children'])\n self.assertIn(section5.section_id,\n self._get_section(json_sections, section3.section_id)['children'])\n self.assertIn(section2.section_id,\n self._get_section(json_sections, section3.section_id)['children'])", "def __getitem__(self, index):\n training_data_structure = {\n 'session1': list(range(1, 12 + 1)),\n 'session2': list(range(1, 9 + 1)),\n 'session3': list(range(1, 9 + 1)),\n }\n\n # 30 x 3 x 32 x 64\n def random_si_idx():\n if self.is_train_data:\n si_idx = np.random.choice(self.train_subjects)\n labels = self.train_subjects.index(si_idx)\n else:\n\n si_idx = np.random.choice(self.test_subjects)\n labels = self.test_subjects.index(si_idx)\n return si_idx, labels\n\n def random_vi_idx(si):\n\n if si in list(range(1, 147 + 1)):\n if si in [1, 2, 4, 7, 8, 12, 13, 17, 31, 40, 48, 77]:\n reading_dir = random.choice(['session1', 'session3'])\n else:\n reading_dir = 'session1'\n else:\n reading_dir = 'session2'\n\n vi_idx = np.random.choice(training_data_structure[reading_dir])\n\n return reading_dir, vi_idx\n\n def random_length(dirt, length):\n files = sorted(os.listdir(dirt))\n num = len(files)\n if num - length < 2:\n return None\n start = np.random.randint(1, num - length)\n end = start + length\n return files[start:end]\n\n def read_frames(frames_pth, file_names):\n # frames = np.zeros(self.im_shape, np.float32)\n frames = []\n for f in file_names:\n frame = np.asarray(Image.open(os.path.join(frames_pth, f)))\n frame = self.transform(frame)\n frames.append(frame)\n frames = torch.stack(frames)\n return frames\n\n si, labels = random_si_idx()\n session_dir1, vi1 = random_vi_idx(si)\n session_dir2, vi2 = random_vi_idx(si)\n frames_pth1 = os.path.join(self.data_root, session_dir1, '%03d_%02d' % (si, vi1))\n frames_pth2 = os.path.join(self.data_root, session_dir2, '%03d_%02d' % (si, vi2))\n file_names1 = random_length(frames_pth1, self.clip_len)\n file_names2 = random_length(frames_pth2, self.clip_len)\n\n while True:\n if file_names1 == None or file_names2 == None:\n session_dir1, vi1 = random_vi_idx(si)\n session_dir2, vi2 = random_vi_idx(si)\n frames_pth1 = os.path.join(self.data_root, session_dir1, '%03d_%02d' % (si, vi1))\n frames_pth2 = os.path.join(self.data_root, session_dir2, '%03d_%02d' % (si, vi2))\n file_names1 = random_length(frames_pth1, self.clip_len)\n file_names2 = random_length(frames_pth2, self.clip_len)\n else:\n break\n\n data1 = read_frames(frames_pth1, file_names1)\n data2 = read_frames(frames_pth2, file_names2)\n\n return data1, data2, labels", "def walk(self):\n context, active_obj, actual_mode, mode = self.get_context()\n if not mode: return\n \n container = self.container\n sel_map = {False: container(), True: container((\"select\",))}\n \n if mode != 'EDIT_MESH': self.bmesh = None\n \n if mode == 'OBJECT':\n total = len(context.selected_objects)\n item = active_obj\n yield ([], item, total)\n \n select = sel_map[True] # selected by definition\n for item in context.selected_objects:\n if not (item and item.name): return # object deleted (state disrupted)\n yield (item, select)\n elif mode == 'EDIT_MESH':\n mesh = active_obj.data\n elem_types = self.elem_types\n if actual_mode == 'EDIT_MESH':\n if self.copy_bmesh:\n self.bmesh = bmesh.from_edit_mesh(mesh).copy()\n else:\n if not (self.bmesh and self.bmesh.is_valid):\n self.bmesh = bmesh.from_edit_mesh(mesh)\n bm = self.bmesh\n \n item = bm.faces.active\n \n if mesh.total_vert_sel == 0: # non-0 only in Edit mode\n yield ([], item, 0)\n return\n \n # No, by default all selected elements should be returned!\n #if not elem_types:\n # elem_types = bm.select_mode\n \n colls = []\n if (not elem_types) or ('FACE' in elem_types):\n colls.append(bm.faces)\n if (not elem_types) or ('EDGE' in elem_types):\n colls.append(bm.edges)\n if (not elem_types) or ('VERT' in elem_types):\n colls.append(bm.verts)\n \n total = sum(len(items) for items in colls)\n if bm.select_history:\n yield (list(bm.select_history), item, total)\n else:\n yield ([], item, total)\n \n for items in colls:\n for item in items:\n if not item.is_valid:\n self.bmesh = None\n return\n yield (item, sel_map[item.select])\n else:\n self.bmesh = None\n \n colls = []\n if (not elem_types) or ('FACE' in elem_types):\n colls.append(mesh.polygons)\n if (not elem_types) or ('EDGE' in elem_types):\n colls.append(mesh.edges)\n if (not elem_types) or ('VERT' in elem_types):\n colls.append(mesh.vertices)\n \n total = sum(len(items) for items in colls)\n item = None\n if mesh.polygons.active >= 0:\n item = mesh.polygons[mesh.polygons.active]\n yield ([], item, total)\n \n for items in colls:\n for item in items:\n yield (item, sel_map[item.select])\n elif mode in {'EDIT_CURVE', 'EDIT_SURFACE'}:\n total = sum(len(spline.bezier_points) + len(spline.points)\n for spline in active_obj.data.splines)\n yield ([], None, total)\n \n bezier_sel_map = {\n (False, False, False): container(),\n (True, False, False): container((\"select_left_handle\",)),\n (False, True, False): container((\"select_control_point\",)),\n (False, False, True): container((\"select_right_handle\",)),\n (True, True, False): container((\"select_left_handle\", \"select_control_point\")),\n (False, True, True): container((\"select_control_point\", \"select_right_handle\")),\n (True, False, True): container((\"select_left_handle\", \"select_right_handle\")),\n (True, True, True): container((\"select_left_handle\", \"select_control_point\", \"select_right_handle\")),\n }\n \n # It seems like the only way the validity of spline can be determined\n # is to check if path_from_id() returns empty string.\n # However, it also seems that Blender does not crash when trying to\n # access deleted splines or their points.\n for spline in active_obj.data.splines:\n for item in spline.bezier_points:\n yield (item, bezier_sel_map[(item.select_left_handle, item.select_control_point, item.select_right_handle)])\n \n for item in spline.points:\n yield (item, sel_map[item.select])\n elif mode == 'EDIT_METABALL':\n total = 1 # only active is known in current API\n item = active_obj.data.elements.active\n yield ([], item, total)\n \n # We don't even know if active element is actually selected\n # Just assume it is, to have at least some information\n #yield (item, container())\n yield (item, sel_map[True])\n elif mode == 'EDIT_LATTICE':\n total = len(active_obj.data.points)\n yield ([], None, total)\n \n for item in active_obj.data.points:\n yield (item, sel_map[item.select])\n elif mode == 'EDIT_ARMATURE':\n total = len(active_obj.data.edit_bones)\n item = active_obj.data.edit_bones.active\n yield ([], item, total)\n \n editbone_sel_map = {\n (False, False, False): container(),\n (True, False, False): container((\"select_head\",)),\n (False, True, False): container((\"select\",)),\n (False, False, True): container((\"select_tail\",)),\n (True, True, False): container((\"select_head\", \"select\")),\n (False, True, True): container((\"select\", \"select_tail\")),\n (True, False, True): container((\"select_head\", \"select_tail\")),\n (True, True, True): container((\"select_head\", \"select\", \"select_tail\")),\n }\n \n for item in active_obj.data.edit_bones:\n if not (item and item.name): return # object deleted (state disrupted)\n yield (item, editbone_sel_map[(item.select_head, item.select, item.select_tail)])\n elif mode == 'POSE':\n total = len(active_obj.data.bones)\n item = active_obj.data.bones.active\n \n if self.pose_bones:\n pose_bones = active_obj.pose.bones\n \n pb = (pose_bones.get(item.name) if item else None)\n yield ([], pb, total)\n \n for item in active_obj.data.bones:\n if not (item and item.name): return # object deleted (state disrupted)\n yield (pose_bones.get(item.name), sel_map[item.select])\n else:\n yield ([], item, total)\n \n for item in active_obj.data.bones:\n if not (item and item.name): return # object deleted (state disrupted)\n yield (item, sel_map[item.select])\n elif mode == 'PARTICLE':\n # Theoretically, particle keys can be selected,\n # but there seems to be no API for working with this\n pass\n else:\n pass # no selectable elements in other modes", "def run(self):\n print('Running test of the markups in different views')\n\n #\n # first load the data\n #\n import SampleData\n sampleDataLogic = SampleData.SampleDataLogic()\n print(\"Getting MR Head Volume\")\n mrHeadVolume = sampleDataLogic.downloadMRHead()\n\n #\n # link the viewers\n #\n sliceLogic = slicer.app.layoutManager().sliceWidget('Red').sliceLogic()\n compositeNode = sliceLogic.GetSliceCompositeNode()\n compositeNode.SetLinkedControl(1)\n\n #\n # MR Head in the background\n #\n sliceLogic.StartSliceCompositeNodeInteraction(1)\n compositeNode.SetBackgroundVolumeID(mrHeadVolume.GetID())\n sliceLogic.EndSliceCompositeNodeInteraction()\n\n #\n # switch to conventional layout\n #\n lm = slicer.app.layoutManager()\n lm.setLayout(2)\n\n # create a fiducial list\n displayNode = slicer.vtkMRMLMarkupsDisplayNode()\n slicer.mrmlScene.AddNode(displayNode)\n fidNode = slicer.vtkMRMLMarkupsFiducialNode()\n slicer.mrmlScene.AddNode(fidNode)\n fidNode.SetAndObserveDisplayNodeID(displayNode.GetID())\n\n # make it active\n selectionNode = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSelectionNodeSingleton\")\n if (selectionNode is not None):\n selectionNode.SetReferenceActivePlaceNodeID(fidNode.GetID())\n\n # add some known points to it\n eye1 = [33.4975, 79.4042, -10.2143]\n eye2 = [-31.283, 80.9652, -16.2143]\n nose = [4.61944, 114.526, -33.2143]\n index = fidNode.AddFiducialFromArray(eye1)\n fidNode.SetNthFiducialLabel(index, \"eye-1\")\n index = fidNode.AddFiducialFromArray(eye2)\n fidNode.SetNthFiducialLabel(index, \"eye-2\")\n # hide the second eye as a test of visibility flags\n fidNode.SetNthFiducialVisibility(index, 0)\n index = fidNode.AddFiducialFromArray(nose)\n fidNode.SetNthFiducialLabel(index, \"nose\")\n\n self.logicDelayDisplay(\"Placed 3 fiducials\")\n\n # self.printViewAndSliceNodes()\n\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n #\n # switch to 2 3D views layout\n #\n lm.setLayout(15)\n self.logicDelayDisplay(\"Switched to 2 3D views\")\n # self.printViewAndSliceNodes()\n\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0 or self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1 and 2\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n #\n # show only in view 2\n #\n displayNode.AddViewNodeID(\"vtkMRMLViewNode2\")\n self.logicDelayDisplay(\"Showing only in view 2\")\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 1:\n self.logicDelayDisplay(\"Test failed: widget is not supposed to be visible in view 1\")\n # self.printViewNodeIDs(displayNode)\n return False\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 2\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n #\n # remove it so show in all\n #\n displayNode.RemoveAllViewNodeIDs()\n self.logicDelayDisplay(\"Showing in both views\")\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0 or self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1 and 2\")\n self.printViewNodeIDs(displayNode)\n return False\n\n #\n # show only in view 1\n #\n displayNode.AddViewNodeID(\"vtkMRMLViewNode1\")\n self.logicDelayDisplay(\"Showing only in view 1\")\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 1:\n self.logicDelayDisplay(\"Test failed: widget is not supposed to be visible in view 2\")\n # self.printViewNodeIDs(displayNode)\n return False\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n # switch back to conventional\n lm.setLayout(2)\n self.logicDelayDisplay(\"Switched back to conventional layout\")\n # self.printViewAndSliceNodes()\n\n # test of the visibility in slice views\n displayNode.RemoveAllViewNodeIDs()\n\n # jump to the last fiducial\n slicer.modules.markups.logic().JumpSlicesToNthPointInMarkup(fidNode.GetID(), index, 1)\n # refocus the 3D cameras as well\n slicer.modules.markups.logic().FocusCamerasOnNthPointInMarkup(fidNode.GetID(), index)\n\n # show only in red\n displayNode.AddViewNodeID('vtkMRMLSliceNodeRed')\n self.logicDelayDisplay(\"Show only in red slice\")\n if self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeRed') != 1:\n self.logicDelayDisplay(\"Test failed: widget not displayed on red slice\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n # remove all, add green\n # print 'before remove all, after added red'\n # self.printViewNodeIDs(displayNode)\n displayNode.RemoveAllViewNodeIDs()\n # print 'after removed all'\n # self.printViewNodeIDs(displayNode)\n displayNode.AddViewNodeID('vtkMRMLSliceNodeGreen')\n self.logicDelayDisplay('Show only in green slice')\n if self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeRed') != 0 or self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeGreen') != 1:\n self.logicDelayDisplay(\"Test failed: widget not displayed only on green slice\")\n print '\\tred = ',self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeRed')\n print '\\tgreen =',self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeGreen')\n self.printViewNodeIDs(displayNode)\n return False\n\n return True", "def _init_components(self):\n for line in self:\n if line.is_kit_invoice_line:\n comp_obj = line.env['account.invoice.line.comp'].search([('invoice_line_id','=',self.id),('children_loaded','=',False)])\n if line.pricing == 'dynamic':\n hide_prices = False\n else:\n hide_prices = True \n for comp in comp_obj:\n comp.load_under_components(True,hide_prices)", "def select_items(self):\n\n self.listWidget.currentItem().setSelected(True)\n self.items_selected = self.listWidget.selectedItems()\n\n if self.frame_ordering == \"quality\":\n self.indices_selected = [self.quality_sorted_indices[self.listWidget.row(item)] for item\n in self.items_selected]\n self.frame_index = self.indices_selected[0]\n self.quality_index = self.rank_indices[self.frame_index]\n else:\n self.indices_selected = [self.listWidget.row(item) for item in self.items_selected]\n self.frame_index = self.indices_selected[0]\n self.quality_index = self.rank_indices[self.frame_index]\n\n self.synchronize_slider()", "def run(self):\n for msr in self.msrs:\n # find state transition matrix\n phi_p, state_prop = self._compute_stm(msr.time)\n\n # use stm to propagate perturbation and covariance\n cov_m = np.matmul(phi_p, np.matmul(self.cov_list[-1],\n np.transpose(phi_p)))\n\n # compute observation deviation, obs_state matrix\n y_i, h_tilde = self._msr_resid(msr, state_prop)\n\n # calculate kalman gain\n k_gain = self._calc_k_gain(cov_m, h_tilde, msr.cov)\n\n # measurement update\n cov_p, state_est = self._measurement_update(y_i,\n h_tilde,\n k_gain,\n cov_m,\n state_prop)\n\n # update the state lists\n self.residuals.append(y_i)\n self.prop_state_list.append(state_est)\n self.estimates.append(state_est)\n self.cov_list.append(cov_p)\n self.times.append(msr.time)", "def refreshStates(self):\n # Update the comboboxes\n self.updateLayersComboboxes()\n # Update the edit mode buttons\n self.updateEditState_pairsLayer()\n self.updateEditState_toBendLayer()\n # Update the transformation type\n self.updateTransformationType()", "def create_playthrough(world):\n # get locations containing progress items\n prog_locations = {location for location in world.get_filled_locations() if location.item.advancement}\n state_cache = [None]\n collection_spheres = []\n state = CollectionState(world)\n sphere_candidates = set(prog_locations)\n logging.debug('Building up collection spheres.')\n while sphere_candidates:\n\n # build up spheres of collection radius.\n # Everything in each sphere is independent from each other in dependencies and only depends on lower spheres\n\n sphere = {location for location in sphere_candidates if state.can_reach(location)}\n\n for location in sphere:\n state.collect(location.item, True, location)\n\n sphere_candidates -= sphere\n collection_spheres.append(sphere)\n state_cache.append(state.copy())\n\n logging.debug('Calculated sphere %i, containing %i of %i progress items.', len(collection_spheres), len(sphere),\n len(prog_locations))\n if not sphere:\n logging.debug('The following items could not be reached: %s', ['%s (Player %d) at %s (Player %d)' % (\n location.item.name, location.item.player, location.name, location.player) for location in\n sphere_candidates])\n if any([world.accessibility[location.item.player] != 'minimal' for location in sphere_candidates]):\n raise RuntimeError(f'Not all progression items reachable ({sphere_candidates}). '\n f'Something went terribly wrong here.')\n else:\n world.spoiler.unreachables = sphere_candidates\n break\n\n # in the second phase, we cull each sphere such that the game is still beatable,\n # reducing each range of influence to the bare minimum required inside it\n restore_later = {}\n for num, sphere in reversed(tuple(enumerate(collection_spheres))):\n to_delete = set()\n for location in sphere:\n # we remove the item at location and check if game is still beatable\n logging.debug('Checking if %s (Player %d) is required to beat the game.', location.item.name,\n location.item.player)\n old_item = location.item\n location.item = None\n if world.can_beat_game(state_cache[num]):\n to_delete.add(location)\n restore_later[location] = old_item\n else:\n # still required, got to keep it around\n location.item = old_item\n\n # cull entries in spheres for spoiler walkthrough at end\n sphere -= to_delete\n\n # second phase, sphere 0\n removed_precollected = []\n for item in (i for i in chain.from_iterable(world.precollected_items.values()) if i.advancement):\n logging.debug('Checking if %s (Player %d) is required to beat the game.', item.name, item.player)\n world.precollected_items[item.player].remove(item)\n world.state.remove(item)\n if not world.can_beat_game():\n world.push_precollected(item)\n else:\n removed_precollected.append(item)\n\n # we are now down to just the required progress items in collection_spheres. Unfortunately\n # the previous pruning stage could potentially have made certain items dependant on others\n # in the same or later sphere (because the location had 2 ways to access but the item originally\n # used to access it was deemed not required.) So we need to do one final sphere collection pass\n # to build up the correct spheres\n\n required_locations = {item for sphere in collection_spheres for item in sphere}\n state = CollectionState(world)\n collection_spheres = []\n while required_locations:\n state.sweep_for_events(key_only=True)\n\n sphere = set(filter(state.can_reach, required_locations))\n\n for location in sphere:\n state.collect(location.item, True, location)\n\n required_locations -= sphere\n\n collection_spheres.append(sphere)\n\n logging.debug('Calculated final sphere %i, containing %i of %i progress items.', len(collection_spheres),\n len(sphere), len(required_locations))\n if not sphere:\n raise RuntimeError(f'Not all required items reachable. Unreachable locations: {required_locations}')\n\n def flist_to_iter(node):\n while node:\n value, node = node\n yield value\n\n def get_path(state, region):\n reversed_path_as_flist = state.path.get(region, (region, None))\n string_path_flat = reversed(list(map(str, flist_to_iter(reversed_path_as_flist))))\n # Now we combine the flat string list into (region, exit) pairs\n pathsiter = iter(string_path_flat)\n pathpairs = zip_longest(pathsiter, pathsiter)\n return list(pathpairs)\n\n world.spoiler.paths = {}\n topology_worlds = (player for player in world.player_ids if world.worlds[player].topology_present)\n for player in topology_worlds:\n world.spoiler.paths.update(\n {str(location): get_path(state, location.parent_region) for sphere in collection_spheres for location in\n sphere if location.player == player})\n if player in world.get_game_players(\"A Link to the Past\"):\n # If Pyramid Fairy Entrance needs to be reached, also path to Big Bomb Shop\n # Maybe move the big bomb over to the Event system instead?\n if any(exit_path == 'Pyramid Fairy' for path in world.spoiler.paths.values() for (_, exit_path) in path):\n if world.mode[player] != 'inverted':\n world.spoiler.paths[str(world.get_region('Big Bomb Shop', player))] = \\\n get_path(state, world.get_region('Big Bomb Shop', player))\n else:\n world.spoiler.paths[str(world.get_region('Inverted Big Bomb Shop', player))] = \\\n get_path(state, world.get_region('Inverted Big Bomb Shop', player))\n\n # we can finally output our playthrough\n world.spoiler.playthrough = {\"0\": sorted([str(item) for item in\n chain.from_iterable(world.precollected_items.values())\n if item.advancement])}\n\n for i, sphere in enumerate(collection_spheres):\n world.spoiler.playthrough[str(i + 1)] = {str(location): str(location.item) for location in sorted(sphere)}\n\n # repair the world again\n for location, item in restore_later.items():\n location.item = item\n\n for item in removed_precollected:\n world.push_precollected(item)", "def items(self):", "def _get_next_state(self):\n self.string_level_blocks.popleft()\n self.sprite_level_blocks.popleft()\n self._generate_next_blocks()\n self.is_start = False", "def reset(self):\n self.objects = []\n hero = GameObject(self.__new_position(), 1, 1, 2, None, \"hero\")\n self.objects.append(hero)\n goal = GameObject(self.__new_position(), 1, 1, 1, 1, \"goal\")\n self.objects.append(goal)\n fire = GameObject(self.__new_position(), 1, 1, 0, -1, \"fire\")\n self.objects.append(fire)\n goal2 = GameObject(self.__new_position(), 1, 1, 1, 1, \"goal\")\n self.objects.append(goal2)\n fire2 = GameObject(self.__new_position(), 1, 1, 0, -1, \"fire\")\n self.objects.append(fire2)\n goal3 = GameObject(self.__new_position(), 1, 1, 1, 1, \"goal\")\n self.objects.append(goal3)\n goal4 = GameObject(self.__new_position(), 1, 1, 1, 1, \"goal\")\n self.objects.append(goal4)\n # print(self.objects)\n self.state = self.render_env()\n return self.state", "def setup_scenes(self, scene_dict, start_scene):\n self._scene_dict = scene_dict\n self._scene_name = start_scene\n self._scene = self._scene_dict[self._scene_name]", "def anim():\n i = 0\n while 1:\n\n for r in Reprs:\n r.draw(i)\n i = i+ 1\n i = i % len(t)\n yield", "def update(self, dt):\n store = self.entity_manager.get_all_components_of_type(tilemap.Tilemap)\n\n if store:\n for entity, component in store.iteritems():\n trender = self.entity_manager.get_component(entity, tilemap_render.TilemapRender)\n \n if trender and trender.need_to_update:\n \"\"\" Lets calculate the tiles. \"\"\"\n tiles_to_draw = ((int(math.floor(trender.world_y/component.tileheight)), int((trender.world_y+trender.view_height)/component.tileheight + 2)),\n (int(math.floor(-trender.world_x/component.tilewidth)), int((-trender.world_x+trender.view_width)/component.tilewidth + 2)))\n\n vertex_data = []\n texture_data = []\n color_data = []\n vertices = 0\n\n for y in range(tiles_to_draw[0][0], tiles_to_draw[0][1]):\n # 720 is screen height!\n y1 = (720 - trender.view_y) + component.tileheight * -y\n y2 = y1 - component.tileheight\n \n for x in range(tiles_to_draw[1][0], tiles_to_draw[1][1]):\n x1 = trender.view_x + component.tilewidth * x\n x2 = x1 + component.tilewidth\n \n \n for layer in reversed(component.layers):\n if (x,y) in layer.tiles:\n \n vertex_data.extend([x1, y2, x2, y2, x2, y1, x1, y1])\n texture_data.extend(component.tileset_bin.tiles[layer.tiles[(x,y)].gid].tex_coords)\n color_data.extend((255, 255, 255, 255)*4)\n \n vertices = vertices + 1\n \n trender.batch = pyglet.graphics.Batch()\n trender.batch.add(vertices*4, \n pyglet.gl.GL_QUADS, \n pyglet.graphics.TextureGroup(component.tileset_bin.atlas.texture),\n ('v2i', vertex_data),\n ('t3f', texture_data),\n ('c4B', color_data))\n \n trender.need_to_update = False", "def getGameState(self):\n ### Student code goes here\n\n ask_tile_11 = parse_input(\"fact: (located ?X pos1 pos1)\")\n ask_tile_12 = parse_input(\"fact: (located ?X pos2 pos1)\")\n ask_tile_13 = parse_input(\"fact: (located ?X pos3 pos1)\")\n ask_tile_21 = parse_input(\"fact: (located ?X pos1 pos2)\")\n ask_tile_22 = parse_input(\"fact: (located ?X pos2 pos2)\")\n ask_tile_23 = parse_input(\"fact: (located ?X pos3 pos2)\")\n ask_tile_31 = parse_input(\"fact: (located ?X pos1 pos3)\")\n ask_tile_32 = parse_input(\"fact: (located ?X pos2 pos3)\")\n ask_tile_33 = parse_input(\"fact: (located ?X pos3 pos3)\")\n\n bindings_11 = self.kb.kb_ask(ask_tile_11)\n bindings_12 = self.kb.kb_ask(ask_tile_12)\n bindings_13 = self.kb.kb_ask(ask_tile_13)\n bindings_21 = self.kb.kb_ask(ask_tile_21)\n bindings_22 = self.kb.kb_ask(ask_tile_22)\n bindings_23 = self.kb.kb_ask(ask_tile_23)\n bindings_31 = self.kb.kb_ask(ask_tile_31)\n bindings_32 = self.kb.kb_ask(ask_tile_32)\n bindings_33 = self.kb.kb_ask(ask_tile_33)\n\n row1_list = []\n row2_list = []\n row3_list = []\n\n row1_list.append(bindings_11.list_of_bindings[0][0].bindings[0].constant.element)\n row1_list.append(bindings_12.list_of_bindings[0][0].bindings[0].constant.element)\n row1_list.append(bindings_13.list_of_bindings[0][0].bindings[0].constant.element)\n\n row2_list.append(bindings_21.list_of_bindings[0][0].bindings[0].constant.element)\n row2_list.append(bindings_22.list_of_bindings[0][0].bindings[0].constant.element)\n row2_list.append(bindings_23.list_of_bindings[0][0].bindings[0].constant.element)\n\n row3_list.append(bindings_31.list_of_bindings[0][0].bindings[0].constant.element)\n row3_list.append(bindings_32.list_of_bindings[0][0].bindings[0].constant.element)\n row3_list.append(bindings_33.list_of_bindings[0][0].bindings[0].constant.element)\n\n counter = 0\n for tile in row1_list:\n if tile == \"empty\":\n row1_list[counter] = -1\n else:\n row1_list[counter] = int(tile[4:])\n counter += 1\n\n counter = 0\n for tile in row2_list:\n if tile == \"empty\":\n row2_list[counter] = -1\n else:\n row2_list[counter] = int(tile[4:])\n counter += 1\n\n counter = 0\n for tile in row3_list:\n if tile == \"empty\":\n row3_list[counter] = -1\n else:\n row3_list[counter] = int(tile[4:])\n counter += 1\n\n gamestate = (tuple(row1_list), tuple(row2_list), tuple(row3_list))\n return gamestate", "def __getitem__(self, item):\n # loader for x0 = sentinel1 and x00 = sentinel2\n\n x0 = np.load(os.path.join(self.folder, 'DATA', '{}.npy'.format(self.pid[item])))\n x00 = np.load(os.path.join(self.folder.replace('s1_data', 's2_data'), 'DATA', '{}.npy'.format(self.pid[item])))\n y = self.target[item]\n \n s1_item_date = self.date_positions_s1[item] \n s2_item_date = self.date_positions_s2[item] \n \n \n # sample S2 using minimum sampling\n if self.minimum_sampling is not None:\n indices = list(range(self.minimum_sampling))\n random.shuffle(indices)\n indices = sorted(indices)\n x00 = x00[indices, :,:]\n \n # subset dates using sampling idx.\n s2_item_date = [s2_item_date[i] for i in indices] \n \n \n if x0.shape[-1] > self.npixel:\n idx = np.random.choice(list(range(x0.shape[-1])), size=self.npixel, replace=False)\n x = x0[:, :, idx]\n x2 = x00[:, :, idx]\n mask1, mask2 = np.ones(self.npixel), np.ones(self.npixel)\n\n elif x0.shape[-1] < self.npixel:\n\n if x0.shape[-1] == 0:\n x = np.zeros((*x0.shape[:2], self.npixel))\n x2 = np.zeros((*x00.shape[:2], self.npixel))\n mask1, mask2 = np.zeros(self.npixel), np.zeros(self.npixel)\n mask1[0], mask2[0] = 1, 1\n else:\n x = np.zeros((*x0.shape[:2], self.npixel))\n x2 = np.zeros((*x00.shape[:2], self.npixel))\n \n x[:, :, :x0.shape[-1]] = x0\n x2[:, :, :x00.shape[-1]] = x00\n \n x[:, :, x0.shape[-1]:] = np.stack([x[:, :, 0] for _ in range(x0.shape[-1], x.shape[-1])], axis=-1)\n x2[:, :, x00.shape[-1]:] = np.stack([x2[:, :, 0] for _ in range(x00.shape[-1], x2.shape[-1])], axis=-1)\n mask1 = np.array(\n [1 for _ in range(x0.shape[-1])] + [0 for _ in range(x0.shape[-1], self.npixel)])\n mask2 = np.array(\n [1 for _ in range(x00.shape[-1])] + [0 for _ in range(x00.shape[-1], self.npixel)])\n else:\n x = x0\n x2 = x00\n mask1, mask2 = np.ones(self.npixel), np.ones(self.npixel)\n\n if self.norm is not None:\n m, s = self.norm\n m = np.array(m)\n s = np.array(s)\n\n if len(m.shape) == 0:\n x = (x - m) / s\n elif len(m.shape) == 1: # Normalise channel-wise\n x = (x.swapaxes(1, 2) - m) / s\n x = x.swapaxes(1, 2) # Normalise channel-wise for each date\n elif len(m.shape) == 2:\n x = np.rollaxis(x, 2) # TxCxS -> SxTxC\n x = (x - m) / s\n x = np.swapaxes((np.rollaxis(x, 1)), 1, 2)\n \n x = x.astype('float')\n x2 = x2.astype('float')\n\n if self.jitter is not None:\n sigma, clip = self.jitter\n x = x + np.clip(sigma * np.random.randn(*x.shape), -1 * clip, clip)\n x2 = x2 + np.clip(sigma * np.random.randn(*x2.shape), -1 * clip, clip)\n\n mask1 = np.stack([mask1 for _ in range(x.shape[0])], axis=0) # Add temporal dimension to mask\n mask2 = np.stack([mask2 for _ in range(x2.shape[0])], axis=0)\n\n\n # interpolate s1 at s2 date\n if self.fusion_type == 'early' or self.fusion_type == 'pse':\n \n if self.interpolate_method == 'nn':\n output_doy = self.similar_sequence(input_s1 = s1_item_date, input_s2 = s2_item_date)\n\n # get index of subset sequence\n x_idx = [i for i in range(len(s1_item_date)) if self.date_positions_s1[i] in output_doy]\n x = x[x_idx, :, :]\n mask1 = mask1[x_idx,:]\n \n elif self.interpolate_method == 'linear':\n x = self.interpolate_s1(arr_3d = x, s1_date = s1_item_date, s2_date = s2_item_date)\n mask1 = mask1[:len(s2_item_date), :] # slice to length of s2_sequence\n\n \n # create tensor from numpy\n data = (Tensor(x), Tensor(mask1))\n data2 = (Tensor(x2), Tensor(mask2))\n\n if self.extra_feature is not None:\n ef = (self.extra[str(self.pid[item])] - self.extra_m) / self.extra_s\n ef = torch.from_numpy(ef).float()\n\n ef = torch.stack([ef for _ in range(data[0].shape[0])], dim=0)\n data = (data, ef)\n\n if self.return_id :\n return data, data2, torch.from_numpy(np.array(y, dtype=int)), (Tensor(s1_item_date), Tensor(s2_item_date)), self.pid[item]\n #return data, data2 , torch.from_numpy(np.array(y, dtype=int)),self.pid[item]\n else:\n return data, data2, torch.from_numpy(np.array(y, dtype=int)), (Tensor(s1_item_date), Tensor(s2_item_date)) \n #return data, data2, torch.from_numpy(np.array(y, dtype=int))", "def prepare_raw_data(self, idx: int):\n info = super().prepare_raw_data(idx)\n if self.cache_reader is not None:\n self.human_data = self.cache_reader.get_item(idx)\n idx = idx % self.cache_reader.slice_size\n\n if 'smplx' in self.human_data:\n smplx_dict = self.human_data['smplx']\n info['has_smplx'] = 1\n else:\n smplx_dict = {}\n info['has_smplx'] = 0\n if 'global_orient' in smplx_dict:\n info['smplx_global_orient'] = smplx_dict['global_orient'][idx]\n info['has_smplx_global_orient'] = 1\n else:\n info['smplx_global_orient'] = np.zeros((3), dtype=np.float32)\n info['has_smplx_global_orient'] = 0\n\n if 'body_pose' in smplx_dict:\n info['smplx_body_pose'] = smplx_dict['body_pose'][idx]\n info['has_smplx_body_pose'] = 1\n else:\n info['smplx_body_pose'] = np.zeros((21, 3), dtype=np.float32)\n info['has_smplx_body_pose'] = 0\n\n if 'right_hand_pose' in smplx_dict:\n info['smplx_right_hand_pose'] = smplx_dict['right_hand_pose'][idx]\n info['has_smplx_right_hand_pose'] = 1\n else:\n info['smplx_right_hand_pose'] = np.zeros((15, 3), dtype=np.float32)\n info['has_smplx_right_hand_pose'] = 0\n\n if 'left_hand_pose' in smplx_dict:\n info['smplx_left_hand_pose'] = smplx_dict['left_hand_pose'][idx]\n info['has_smplx_left_hand_pose'] = 1\n else:\n info['smplx_left_hand_pose'] = np.zeros((15, 3), dtype=np.float32)\n info['has_smplx_left_hand_pose'] = 0\n\n if 'jaw_pose' in smplx_dict:\n info['smplx_jaw_pose'] = smplx_dict['jaw_pose'][idx]\n info['has_smplx_jaw_pose'] = 1\n else:\n info['smplx_jaw_pose'] = np.zeros((3), dtype=np.float32)\n info['has_smplx_jaw_pose'] = 0\n\n if 'betas' in smplx_dict:\n info['smplx_betas'] = smplx_dict['betas'][idx]\n info['has_smplx_betas'] = 1\n else:\n info['smplx_betas'] = np.zeros((self.num_betas), dtype=np.float32)\n info['has_smplx_betas'] = 0\n\n if 'expression' in smplx_dict:\n info['smplx_expression'] = smplx_dict['expression'][idx]\n info['has_smplx_expression'] = 1\n else:\n info['smplx_expression'] = np.zeros((self.num_expression),\n dtype=np.float32)\n info['has_smplx_expression'] = 0\n\n return info", "def __getitem__(self, item):\r\n for old_r, new_r in self.random_streams.random_state_variables:\r\n if item is old_r:\r\n container = self.memo[item].value\r\n return container.value\r\n raise KeyError(item)", "def scrollPoint(self):\n #productive #onButton\n profprint()\n self.changeValue()\n widget = slicer.modules.NeedleFinderWidget\n needle = widget.editNeedleTxtBox.value\n #print self.ptNumber\n #print needle\n coord = [0,0,0]\n ptName = '.'+str(needle)+'-'+str(self.ptNumber)\n #print ptName\n modelNode = slicer.util.getNode(ptName)\n if modelNode != None:\n self.ptNumber = self.ptNumber+1\n if modelNode.GetAttribute(\"ValidationNeedle\") == \"1\":\n modelNode.GetFiducialCoordinates(coord)\n X = coord[0]\n Y = coord[1]\n Z = coord[2]\n \n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeRed\")\n if sRed ==None :\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode1\")\n\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeYellow\")\n if sYellow ==None :\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode2\")\n \n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeGreen\")\n if sGreen ==None :\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode3\") \n\n mYellow= sYellow.GetSliceToRAS()\n mYellow.SetElement(0,3,X)\n sYellow.Modified()\n sYellow.UpdateMatrices()\n\n mGreen= sGreen.GetSliceToRAS()\n mGreen.SetElement(1,3,Y)\n sGreen.Modified()\n sGreen.UpdateMatrices()\n\n mRed= sRed.GetSliceToRAS()\n mRed.SetElement(2,3,Z)\n sRed.Modified()\n sRed.UpdateMatrices()\n elif self.ptNumber!=0:\n self.ptNumber=0\n self.scrollPoint()", "def _next_test(self):\n idx = self.it\n self.it = (self.it + 1) % self.n_examples\n\n if self.render_path:\n target_view = data_types.Views(\n rays=jax.tree_map(lambda r: r[idx], self.render_rays),)\n else:\n target_view = data_types.Views(\n rays=jax.tree_map(lambda r: r[idx], self.rays), rgb=self.images[idx])\n\n #--------------------------------------------------------------------------------------\n # Get the reference data\n batch_near_cam_idx = self.sorted_near_cam[idx]\n ref_images = self.train_images[batch_near_cam_idx]\n ref_images = ref_images.reshape(ref_images.shape[0], self.h, self.w, 3)\n\n ref_cameratoworld = self.train_camtoworlds[batch_near_cam_idx]\n ref_worldtocamera = self.train_worldtocamera[batch_near_cam_idx]\n\n #--------------------------------------------------------------------------------------\n # Replicate these so that they may be distributed onto several devices for\n # parallel computaion.\n l_devices = jax.local_device_count()\n reference_views = data_types.ReferenceViews(\n rgb=np.tile(ref_images, (l_devices, 1, 1, 1)),\n ref_worldtocamera=np.tile(ref_worldtocamera, (l_devices, 1, 1)),\n ref_cameratoworld=np.tile(ref_cameratoworld, (l_devices, 1, 1)),\n intrinsic_matrix=np.tile(self.intrinsic_matrix[None, :],\n (l_devices, 1, 1)),\n idx=np.tile(batch_near_cam_idx[None, :], (jax.local_device_count(), 1)),\n )\n\n return_batch = data_types.Batch(\n target_view=target_view, reference_views=reference_views)\n\n return return_batch", "def get_items(self, start, stop, next_position=None):", "def on_enter(self):\n\n super(BaseScene, self).on_enter()\n\n self.load_map()\n self.load_players()\n self.load_enemies()\n self.load_status_bar()\n\n self.enemies_layer.next_wave()", "def prep_naves(self):\r\n self.naves = Group()\r\n for nave_num in range(self.stats.nave_left):\r\n nave = Nave(self.ik_game)\r\n nave.image = pygame.transform.rotate(pygame.transform.scale(\r\n pygame.image.load(os.path.join('complemento/aventine_ship.bmp')), (50, 20)), 90)\r\n nave.rect.x = 10 + nave_num * nave.rect.width\r\n nave.rect.y = 10\r\n self.naves.add(nave)", "def setUp(self):\n slicer.mrmlScene.Clear(0)", "def multistorey_house(nStorey):\n\tdef renderWindows(XWindow, YWindow, occurrencyWindow, windowModel = False):\n\t\t\"\"\"\n\t\trenderWindows accept the window's cells and the occurrency, and optionally a window generating function \n\t\t\"\"\"\n\t\tdef renderDoors(XDoor, YDoor, occurrencyDoor, doorModel = False):\n\t\t\t\"\"\"\n\t\t\trenderWindows accept the door's cells and the occurrency, and optionally a door generating function \n\t\t\t\"\"\"\n\t\t\tdef renderRoof(vertices, pitchAngle, height):\n\t\t\t\t\"\"\"\n\t\t\t\trenderRoof accept the vertices of the base roof, a pitch angle and the desired height \n\t\t\t\tof the roof\n\t\t\t\t\"\"\"\n\t\t\t\tdef renderLadder(ladderHeight, interStep, riser):\n\t\t\t\t\t\"\"\"\n\t\t\t\t\trenderLadder is the inner function used to assembly all together, it takes the \n\t\t\t\t\tdesired height of the ladder, an interstep between two step and a riser for the single\n\t\t\t\t\tstep.\n\t\t\t\t\t\"\"\"\n\n\t\t\t\t\t#building the ladder model and the ladder box\n\t\t\t\t\tladderModel = ladder.make_ladder(ladderHeight, interStep, riser)\n\t\t\t\t\twith open(\"lines/ladder.lines\", \"rb\") as ladderFile:\n\t\t\t\t\t\treader = csv.reader(ladderFile, delimiter=\",\")\n\t\t\t\t\t\trow = next(reader)\n\t\t\t\t\t\tladderModel = T([1,2])([float(row[0])*xfactor, float(row[1])*yfactor])(ladderModel)\n\t\t\t\t\tladderBOX = CUBOID([SIZE([1])(ladderModel)[0]/xfactor,SIZE([2])(ladderModel)[0]/yfactor, SIZE([3])(ladderModel)[0]/zfactor])\n\t\t\t\t\tladderBOX = T([1,2])([float(row[0])-SIZE([1])(ladderBOX)[0]/2., float(row[1])-SIZE([2])(ladderBOX)[0]/2.])(ladderBOX)\n\n\t\t\t\t\t#building roof model\n\t\t\t\t\tif isinstance(vertices, basestring):\n\t\t\t\t\t\twith open(\"lines/\" + vertices + \".lines\", \"rb\") as file:\n\t\t\t\t\t\t\treader = csv.reader(file, delimiter=\",\")\n\t\t\t\t\t\t\tnewVertices = []\n\t\t\t\t\t\t\tfor row in reader:\n\t\t\t\t\t\t\t\tnewVertices.append([float(row[0]), float(row[1])])\n\t\t\t\t\tif newVertices:\n\t\t\t\t\t\troofModel = roof.roofBuilder(newVertices, pitchAngle, height)\n\t\t\t\t\telse:\n\t\t\t\t\t\troofModel = roof.roofBuilder(vertices, pitchAngle, height)\n\t\t\t\t\troofModel = T([3])([nStorey*3/zfactor])(roofModel)\n\t\t\t\t\troofModel = S([1,2,3])([xfactor*1.09,yfactor*1.09,zfactor])(roofModel)\n\t\t\t\t\troofModel = T([1,2])([-SIZE([1])(roofModel)[0]*0.05,-SIZE([2])(roofModel)[0]*0.05]) (roofModel)\n\n\t\t\t\t\t#building full house model with windows and doors\n\t\t\t\t\tfullHouse = []\n\t\t\t\t\tfor story in range(nStorey):\n\t\t\t\t\t\thouseModel = house.build_house(story, windowModel, doorModel, ladderBOX)\n\t\t\t\t\t\tfullHouse.append(houseModel)\n\t\t\t\t\t\tfullHouse.append(T([3])([3]))\n\t\t\t\t\tfullHouse = STRUCT(fullHouse)\n\n\t\t\t\t\t#returning the result\n\t\t\t\t\treturn STRUCT([roofModel, ladderModel, fullHouse])\n\n\t\t\t\treturn renderLadder\n\n\t\t\treturn renderRoof\n\n\t\treturn renderDoors\n\n\treturn renderWindows", "def __handle_view_tile(self, gamestate_component):", "def _ui_content(self):\n\n # Cleat the tree\n self.clear()\n\n # Set the font\n font = QtGui.QFont()\n font.setPointSize(11)\n\n # Add the id sets and set items\n for id_set, id_dict in sorted(self.scnData.items()):\n tree_item = QtGui.QTreeWidgetItem(self)\n\n tree_item.setText(0, id_set)\n tree_item.setFont(0, font)\n\n icon_folder = os.path.dirname(os.path.abspath(__file__))\n icon_path = os.path.join(icon_folder, \"icons\", \"IdSet.png\")\n\n tree_item.setIcon(0, QtGui.QIcon(icon_path))\n\n tree_item.setData(0, QtCore.Qt.UserRole, \"set\")\n tree_item.setData(1, QtCore.Qt.UserRole, id_set)\n\n for id_color, id_objects in sorted(id_dict.items()):\n if id_color != \"Holdout\":\n self._add_id_color(id_objects,\n id_color,\n tree_item)\n\n self._add_id_color(id_dict[\"Holdout\"], \"Holdout\", tree_item)\n\n return", "def refresh_view(self):\n if self._step_number % 2 == 0:\n self._view.draw_enemies(self._game.enemies)\n self._view.draw_towers(self._game.towers)\n self._view.draw_obstacles(self._game.obstacles)", "def populateMasteredAssets(*args):\n #clear the lists first\n cmds.textScrollList(widgets[\"shotAssRigCharListTSL\"], e=True, ra=True)\n cmds.textScrollList(widgets[\"shotAssRigPropListTSL\"], e=True, ra=True)\n cmds.textScrollList(widgets[\"shotAssRigSetListTSL\"], e=True, ra=True)\n cmds.textScrollList(widgets[\"shotAnmMstListTSL\"], e=True, ra=True)\n\n chars, props, sets = cFuncs.getProjectAssetList(pi.assetFolder)\n\n #check for rig masters\n for char in chars:\n cMstr = cFuncs.getAssetMaster(char, cFuncs.fixPath(os.path.join(pi.assetFolder, \"characters\", char)), \"rig\")\n if cMstr:\n cmds.textScrollList(widgets[\"shotAssRigCharListTSL\"], e=True, a=char, dcc=showAssetImage)\n for prop in props:\n pMstr = cFuncs.getAssetMaster(prop, cFuncs.fixPath(os.path.join(pi.assetFolder, \"props\", prop)), \"rig\") \n if pMstr:\n cmds.textScrollList(widgets[\"shotAssRigPropListTSL\"], e=True, a=prop, dcc=showAssetImage)\n for sett in sets:\n sMstr = cFuncs.getAssetMaster(sett, cFuncs.fixPath(os.path.join(pi.assetFolder, \"sets\", sett)), \"rig\") \n if sMstr:\n cmds.textScrollList(widgets[\"shotAssRigSetListTSL\"], e=True, a=sett, dcc=showAssetImage)\n\n #check for anim variants and masters\n varAnm = []\n shots = cFuncs.getProjectShotList(pi.currentProject)\n # print \"shotWin.populateMasteredAssets (line 937): shots =\", shots\n if shots:\n for shot in shots:\n shotVars = cFuncs.getShotVariantDict(os.path.join(pi.currentProject, \"shots\", shot))\n if shotVars[\"anm\"]:\n for anm in shotVars[\"anm\"]:\n aMstr = cFuncs.getVarMaster(cFuncs.fixPath(os.path.join(pi.currentProject, \"shots\", shot, \"anm\", anm)))\n #print cFuncs.fixPath(os.path.join(pi.currentProject, \"shots\", shot, \"anm\", anm))\n if aMstr: \n varAnm.append(\"{0}.{1}\".format(anm, shot))\n\n for av in varAnm:\n cmds.textScrollList(widgets[\"shotAnmMstListTSL\"], e=True, a=av)\n\n populateSceneRefs()", "def _load_item(self, idx):\n ret = dict()\n try:\n for group_id, per_processors_group in enumerate(self.processors):\n per_sample_features = get_per_sample_features(\n modality_features=getattr(self, f\"modality_features_{group_id}\"),\n modality_types=getattr(self, f\"modality_types_{group_id}\"),\n idx=idx,\n id_mappings=self.id_mappings,\n )\n per_ret = apply_data_processor(\n per_sample_features=per_sample_features,\n data_processors=per_processors_group,\n feature_modalities=getattr(self, f\"modality_types_{group_id}\"),\n is_training=self.is_training,\n load_only=True,\n )\n ret.update(per_ret)\n except Exception as e:\n logger.debug(f\"Skipping sample {idx} due to '{e}'\")\n self._consecutive_errors += 1\n if self._consecutive_errors < GET_ITEM_ERROR_RETRY:\n return self.__getitem__((idx + 1) % self.__len__())\n else:\n raise e\n self._consecutive_errors = 0\n\n return ret", "def __getitem__(self, index):\n inputs = {}\n\n do_color_aug = self.is_train and random.random() > 0.5\n do_flip = self.is_train and random.random() > 0.5\n\n for i in self.frame_idxs:\n if i=='s':\n filename = os.path.join('cam1', 'data', self.filenames[index])\n else:\n filename = os.path.join('cam0', 'data', self.filenames[index+i])\n\n inputs[(\"color\", i, -1)] = self.get_color(filename, do_flip)\n\n # adjusting intrinsics to match each scale in the pyramid\n K = self.K.copy()\n K[0, :] *= self.width\n K[1, :] *= self.height\n inv_K = np.linalg.pinv(K)\n\n inputs[(\"K\")] = torch.from_numpy(K)\n inputs[(\"inv_K\")] = torch.from_numpy(inv_K)\n\n if do_color_aug:\n color_aug = transforms.ColorJitter.get_params(self.brightness, self.contrast, self.saturation, self.hue)\n else:\n color_aug = (lambda x: x)\n\n self.preprocess(inputs, color_aug)\n\n for i in self.frame_idxs:\n del inputs[(\"color\", i, -1)]\n\n if \"s\" in self.frame_idxs:\n stereo_T = np.eye(4, dtype=np.float32)\n baseline_sign = -1 if do_flip else 1\n side_sign = -1\n stereo_T[0, 3] = side_sign * baseline_sign * 0.1\n inputs[\"stereo_T\"] = torch.from_numpy(stereo_T)\n\n return inputs" ]
[ "0.5440126", "0.5405451", "0.5320529", "0.5313562", "0.5288161", "0.52768517", "0.52676857", "0.52513534", "0.52176017", "0.521186", "0.5171025", "0.5155609", "0.5147708", "0.51323056", "0.51169014", "0.5096112", "0.5093142", "0.5089115", "0.50864947", "0.5068929", "0.50373554", "0.5031787", "0.50227576", "0.5019718", "0.5002441", "0.49724245", "0.49591285", "0.49512327", "0.49497825", "0.4947155", "0.49445894", "0.49421948", "0.49334458", "0.4884433", "0.48771033", "0.4872794", "0.4871317", "0.48554578", "0.48553342", "0.48451343", "0.48428944", "0.48398674", "0.48381734", "0.48325348", "0.48255116", "0.48190683", "0.48179337", "0.48089755", "0.4798573", "0.4798462", "0.47829387", "0.47808447", "0.4772404", "0.4761862", "0.4760724", "0.4757588", "0.47469652", "0.4737753", "0.47305664", "0.47219712", "0.47206008", "0.4719918", "0.47154182", "0.4711105", "0.47029555", "0.47001407", "0.46988124", "0.46952838", "0.4685335", "0.46843597", "0.46829507", "0.46789646", "0.46785903", "0.4672212", "0.46714452", "0.46665445", "0.4664017", "0.46607426", "0.46596056", "0.4657328", "0.46502373", "0.46499446", "0.46492872", "0.46347326", "0.46334276", "0.4627596", "0.4626083", "0.46229354", "0.46210226", "0.46166265", "0.46163464", "0.46127653", "0.46126407", "0.46124932", "0.46120477", "0.46116358", "0.46098205", "0.46080086", "0.4606309", "0.46015936" ]
0.56032324
0
Given a state dict in the form of
def render_state_dict(self, target_state, dmx_universe_target): if not target_state: return # Copy the alias over this bytearray if isinstance(target_state, str): target_state = {'use': target_state} alias_name = target_state.get('use') if alias_name: assert alias_name in self.dmx_universe_alias, "alias '{0}' not defined".format(alias_name) dmx_universe_target[:] = self.dmx_universe_alias[alias_name] # Render items for dmx_device_name, color_value in target_state.items(): self.config.render_device(dmx_universe_target, dmx_device_name, color_value) # Mute items for dmx_device_name in self.mute_devices: self.config.render_device(dmx_universe_target, dmx_device_name, None) # Add an alias for this state if a name is provided if target_state.get('name'): self.dmx_universe_alias[target_state.get('name')] = dmx_universe_target
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fromState(state):", "def load_state_dict(self, state_dict: Dict[str, torch.Tensor]):\n pass", "def load_from_state_dict(self, state_dict):\n raise NotImplementedError", "def load_state_dict(\n self,\n state_dict: Mapping[str, Any],\n *args,\n **kwargs,\n ) -> NamedTuple:\n return super().load_state_dict(state_dict, *args)", "def _load_state_dict(self, state: dict):\n for o, dct in zip(self.optimizers, state.get('optimizers', [])):\n o.load_state_dict(dct)\n for s, dct in zip(self.schedulers, state.get('schedulers', [])):\n s.load_state_dict(dct)", "def _load_state_dict(self, state: dict):\n for o, dct in zip(self.optimizers, state.get('optimizers', [])):\n o.load_state_dict(dct)\n for s, dct in zip(self.schedulers, state.get('schedulers', [])):\n s.load_state_dict(dct)", "def load_state_dict(self, state_dict):\n own_state = self.state_dict()\n new_state = OrderedDict()\n for name, param in state_dict.items():\n if name in own_state:\n new_state[name] = param\n\n super(EncoderImagePrecomp, self).load_state_dict(new_state)", "def upgrade_state_dict(self, state_dict):\n return state_dict", "def upgrade_state_dict(self, state_dict):\n return state_dict", "def upgrade_state_dict(self, state_dict):\n self.upgrade_state_dict_named(state_dict, \"\")", "def state_dict(self, *args, **kwargs):\n return self.module.state_dict(*args, **kwargs)", "def convert_state_dict(self, state_dict):\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n return new_state_dict", "def convert_state_dict(state_dict):\r\n new_state_dict = OrderedDict()\r\n for k, v in state_dict.items():\r\n name = k[7:] # remove `module.`\r\n new_state_dict[name] = v\r\n return new_state_dict", "def toState(attrs=ALL):", "def state(self, state: str) -> None:", "def convert_state_dict(state_dict):\n new_state_dict = OrderedDict()\n\n for k, v in state_dict.items():\n name = k[7:] # remove `module`\n new_state_dict[name] = v\n return new_state_dict", "def create_state_dict(self):\n return {\n 'resting': self.resting,\n 'moving': self.moving,\n 'animated resting': self.animated_resting,\n 'autoresting': self.auto_resting,\n 'automoving': self.auto_moving,\n 'battle resting': self.battle_resting,\n 'attack': self.attack,\n 'enemy attack': self.enemy_attack,\n c.RUN_AWAY: self.run_away,\n c.VICTORY_DANCE: self.victory_dance,\n c.KNOCK_BACK: self.knock_back,\n c.FADE_DEATH: self.fade_death\n }", "def setup_states(self, state_dict, start_state):\n self.state_dict = state_dict\n self.state_name = start_state\n self.state = self.state_dict[self.state_name]()", "def convert_state_dict(state_dict):\n\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n state_dict[name] = v\n del state_dict[k]\n return state_dict", "def convert_state_dict(state_dict):\n if not next(iter(state_dict)).startswith(\"module.\"):\n return state_dict # abort if dict is not a DataParallel model_state\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n return new_state_dict", "def load_state_dict(self, state_dict):\n self.XY_net.load_state_dict(state_dict['XY_net'])\n self.XY_optimizer_minee.load_state_dict(\n state_dict['XY_optimizer_minee'])\n self.X_net.load_state_dict(state_dict['X_net'])\n self.X_optimizer_minee.load_state_dict(state_dict['X_optimizer_minee'])\n self.Y_net.load_state_dict(state_dict['Y_net'])\n self.Y_optimizer_minee.load_state_dict(state_dict['Y_optimizer_minee'])\n self.X = state_dict['X']\n self.Y = state_dict['Y']\n if 'lr' in state_dict:\n self.lr = state_dict['lr']\n if 'batch_size' in state_dict:\n self.batch_size = state_dict['batch_size']\n if 'ref_batch_factor' in state_dict:\n self.ref_batch_factor = state_dict['ref_batch_factor']", "def map(s,dic):\n state=s.getstate()\n if not state in dic:raise Exception(\"the current state \"+str(state)+\" is not available to map to using the dictionary \"+str(dic))\n val=dic[state]\n if callable(val):\n return val()\n states=s.getstates()\n if val in states:\n return s.setstate(val)\n raise Exception(\"I dont know how to use this \"+str(state)+\" since it maps to a type of \"+str(type(val))+\" namely \"+str(val))", "def _extract_state(self, state):\n raise NotImplementedError", "def _stateDict(self):\n\n data = {}\n # if self.currentState[4]:\n # data['action'] = 'BRAK'\n # else:\n data['action'] = 'MCTL'\n data['speed'] = float(self.speed)\n data['steerAngle'] = float(self.steering_angle)\n\n return data", "def load_state_dict(self, state_dict):\n self.__dict__.update(state_dict)", "def load_state_dict(self, state_dict):\n self.__dict__.update(state_dict)", "def load_state_dict(self, state_dict):\n self.__dict__.update(state_dict)", "def add_to_state_dict(all_states_dict: dict, state: str) -> None:\n if state not in all_states_dict:\n url = 'https://www.vaccinespotter.org/api/v0/states/' +\\\n f'{state}.json'\n all_states_dict[state] = get_json_dict(url)", "def verify_state_dict(state_dict, state_name_list, exp_param_specs_dict):\n\n def _verify_content(state_content_list):\n \"\"\"Checks that a state content list specification is valid.\"\"\"\n CONTENT_ITEM_SCHEMA = [\n ('type', basestring), ('value', basestring)]\n ALLOWED_CONTENT_TYPES = ['text', 'image', 'video']\n\n for content_item in state_content_list:\n utils.verify_dict_keys_and_types(content_item, CONTENT_ITEM_SCHEMA)\n if content_item['type'] not in ALLOWED_CONTENT_TYPES:\n raise Exception('Unsupported content type %s.' %\n content_item['type'])\n\n def _verify_param_changes(param_changes, exp_param_specs_dict):\n \"\"\"Checks that a param_changes specification is valid.\"\"\"\n\n PARAM_CHANGE_SCHEMA = [\n ('name', basestring), ('generator_id', basestring),\n ('customization_args', utils.ANY_TYPE)]\n\n generator_registry = value_generators_domain.Registry\n\n for pc in param_changes:\n utils.verify_dict_keys_and_types(pc, PARAM_CHANGE_SCHEMA)\n if pc['name'] not in exp_param_specs_dict:\n raise Exception('Undeclared param name: %s' % pc['name'])\n\n value_generator = generator_registry.get_generator_class_by_id(\n pc['generator_id'])\n\n for arg_name in pc['customization_args']:\n if not isinstance(arg_name, basestring):\n raise Exception('Invalid param change customization arg '\n 'name: %s' % arg_name)\n\n # TODO(sll): Find a way to verify the customization args when they\n # depend on context parameters. Can we get sample values for the\n # reader's answer and these parameters by looking at states that\n # link to this one?\n\n ATOMIC_RULE_DEFINITION_SCHEMA = [\n ('inputs', dict), ('name', basestring), ('rule_type', basestring),\n ('subject', basestring)]\n COMPOSITE_RULE_DEFINITION_SCHEMA = [\n ('children', list), ('rule_type', basestring)]\n DEFAULT_RULE_DEFINITION_SCHEMA = [('rule_type', basestring)]\n ALLOWED_COMPOSITE_RULE_TYPES = [\n rule_domain.AND_RULE_TYPE, rule_domain.OR_RULE_TYPE,\n rule_domain.NOT_RULE_TYPE]\n\n def _verify_rule_definition(rule_definition, exp_param_specs_dict):\n \"\"\"Verify a rule definition.\"\"\"\n\n if 'rule_type' not in rule_definition:\n raise Exception('Rule definition %s contains no rule type.'\n % rule_definition)\n\n rule_type = rule_definition['rule_type']\n\n if rule_type == rule_domain.DEFAULT_RULE_TYPE:\n utils.verify_dict_keys_and_types(\n rule_definition, DEFAULT_RULE_DEFINITION_SCHEMA)\n elif rule_type == rule_domain.ATOMIC_RULE_TYPE:\n utils.verify_dict_keys_and_types(\n rule_definition, ATOMIC_RULE_DEFINITION_SCHEMA)\n\n if (rule_definition['subject'] not in exp_param_specs_dict\n and rule_definition['subject'] != 'answer'):\n raise Exception('Unrecognized rule subject: %s' %\n rule_definition['subject'])\n else:\n if rule_type not in ALLOWED_COMPOSITE_RULE_TYPES:\n raise Exception('Unsupported rule type %s.' % rule_type)\n\n utils.verify_dict_keys_and_types(\n rule_definition, COMPOSITE_RULE_DEFINITION_SCHEMA)\n for child_rule in rule_definition['children']:\n _verify_rule_definition(child_rule, exp_param_specs_dict)\n\n STATE_DICT_SCHEMA = [\n ('content', list), ('name', basestring), ('param_changes', list),\n ('widget', dict)]\n WIDGET_SCHEMA = [\n ('widget_id', basestring), ('customization_args', dict),\n ('handlers', list), ('sticky', bool)]\n HANDLER_SCHEMA = [('name', basestring), ('rule_specs', list)]\n RULE_SCHEMA = [\n ('definition', dict), ('dest', basestring), ('feedback', list),\n ('param_changes', list)]\n\n utils.verify_dict_keys_and_types(state_dict, STATE_DICT_SCHEMA)\n _verify_content(state_dict['content'])\n _verify_param_changes(state_dict['param_changes'], exp_param_specs_dict)\n utils.verify_dict_keys_and_types(state_dict['widget'], WIDGET_SCHEMA)\n\n curr_state_name = state_dict['name']\n\n for handler in state_dict['widget']['handlers']:\n utils.verify_dict_keys_and_types(handler, HANDLER_SCHEMA)\n\n if not handler['rule_specs']:\n raise Exception('There must be at least one rule.')\n\n for rule in handler['rule_specs']:\n utils.verify_dict_keys_and_types(rule, RULE_SCHEMA)\n\n _verify_rule_definition(rule['definition'], exp_param_specs_dict)\n\n if rule['dest'] not in state_name_list + [feconf.END_DEST]:\n raise Exception('Destination %s is invalid.' % rule['dest'])\n\n # Check that there are no feedback-less self-loops. \n # NB: Sometimes it makes sense for a self-loop to not have\n # feedback, such as unreachable rules in a ruleset for multiple-\n # choice questions. This should be handled in the frontend so\n # that a valid dict with feedback for every self-loop is always\n # saved to the backend.\n if (rule['dest'] == curr_state_name and not rule['feedback']\n and not state_dict['widget']['sticky']):\n raise Exception('State \"%s\" has a self-loop with no feedback. '\n 'This is likely to frustrate the reader.' %\n curr_state_name)\n\n _verify_param_changes(rule['param_changes'], exp_param_specs_dict)\n\n for wp_name, wp_value in (\n state_dict['widget']['customization_args'].iteritems()):\n if not isinstance(wp_name, basestring):\n raise Exception('Invalid widget customization arg name: %s'\n % wp_name)\n\n try:\n widget = widget_domain.Registry.get_widget_by_id(\n feconf.INTERACTIVE_PREFIX, state_dict['widget']['widget_id'])\n except Exception as e:\n raise Exception(\n '%s; widget id: %s' % (e, state_dict['widget']['widget_id']))\n\n widget_param_names = [wp.name for wp in widget.params]\n if wp.name not in widget_param_names:\n raise Exception('Parameter %s for widget %s is invalid.' % (\n wp_name, state_dict['widget']['widget_id']))\n\n # Get the object class used to normalize the value for this param.\n for wp in widget.params:\n if wp.name == wp_name:\n obj_class = obj_services.get_object_class(wp.obj_type)\n if obj_class is None:\n raise Exception('No obj_class specified.' % obj_class)\n break\n\n # TODO(sll): Find a way to verify that the widget parameter values\n # have the correct type. Can we get sample values for the context\n # parameters?", "def set_state(canvas, state):\n for key, value in state.items():\n set_attribute(canvas, key, value)", "def _localSetState(self,pdict):\n self.mapping = pdict.pop('mapping')\n self.values = pdict.pop('values')", "def _state_convert(self, raw_state):\n variables_dict = dict()\n variables_dict[\"s_t\"] = np.hstack((0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.0, 0.0, 1.0))\n variables_dict[\"v_t\"] = np.hstack((0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n variables_dict[\"add_s_t\"] = np.hstack((0.8, 0.8))\n variables_dict[\"add_v_t\"] = np.hstack((0.0, 0.0))\n variables_dict[\"flag_t\"] = 0.0\n variables_dict[\"add_dist_min\"] = np.hstack((1000.0, 1000.0, 1000.0, 1000.0))\n variables_dict[\"dist_min\"] = np.hstack((1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0))\n variables_dict[\"ego_lane\"] = raw_state[16]\n variables_dict[\"lane_ids\"] = raw_state[18]\n variables_dict[\"ego_lane\"] = variables_dict[\"lane_ids\"].index(variables_dict[\"ego_lane\"])\n if variables_dict[\"ego_lane\"] == 0 or variables_dict[\"ego_lane\"] == 2:\n variables_dict[\"s_t\"][variables_dict[\"ego_lane\"]] = 1.0\n variables_dict[\"s_t\"][variables_dict[\"ego_lane\"] + 3] = 1.0\n variables_dict[\"flag_t\"] = 1 if variables_dict[\"ego_lane\"] == 0 else -1\n\n variables_dict[\"ego_raw_speed\"] = raw_state[3]\n variables_dict[\"filter_speed\"] = (variables_dict[\"ego_raw_speed\"]\n if variables_dict[\"ego_raw_speed\"] >= 10.0 else 10.0)\n variables_dict[\"s_t\"][6] = variables_dict[\"ego_raw_speed\"] / SPEED_RANGE\n objects = raw_state[-1]\n # print(\"ego_speed\",ego_raw_speed,\"ego_lane\",ego_lane)\n if objects[0] is not None:\n # for i in range(len(objects)):\n for i, _object in enumerate(objects):\n lane_id = objects[i][0]\n dist = abs(objects[i][1]) * np.sign(objects[i][1])\n speed = objects[i][2]\n pre_post = np.sign(dist)\n flag = 0 if pre_post == 1.0 else 1\n\n if abs(dist) < VIEW_RANGE:\n for j in range(3):\n adjacent_lane = variables_dict[\"ego_lane\"] - 1 + j\n dist_index = j + flag * 3\n if (lane_id == adjacent_lane and abs(dist) < variables_dict[\"dist_min\"][dist_index]):\n self.min_dist(\n variables_dict[\"v_t\"],\n variables_dict[\"s_t\"],\n dist_index,\n speed,\n dist,\n variables_dict[\"filter_speed\"],\n )\n variables_dict[\"dist_min\"][dist_index] = abs(dist)\n\n if abs(dist) < variables_dict[\"add_dist_min\"][flag]:\n if (variables_dict[\"ego_lane\"] == 0 and lane_id == variables_dict[\"ego_lane\"] + 2\n or variables_dict[\"ego_lane\"] == len(variables_dict[\"lane_ids\"]) - 1\n and lane_id == variables_dict[\"ego_lane\"] - 2):\n self.min_dist(\n variables_dict[\"add_v_t\"],\n variables_dict[\"add_s_t\"],\n flag,\n speed,\n dist,\n variables_dict[\"filter_speed\"],\n )\n\n state = np.hstack((\n variables_dict[\"s_t\"],\n variables_dict[\"v_t\"],\n variables_dict[\"add_s_t\"],\n variables_dict[\"add_v_t\"],\n variables_dict[\"flag_t\"],\n ))\n return state", "def clone_state_dict(state_dict):\n return OrderedDict([(name, clone(param)) for name, param in state_dict.items()])", "def set_state(self, state_dict):\n for key, target_object in self._map.items():\n self.set_single_state(target_object,\n value=state_dict.get(key, None))", "def make_state_dict(self):\r\n state_dict = {c.TRANSITION_IN: self.transition_in,\r\n c.TRANSITION_OUT: self.transition_out,\r\n c.NORMAL: self.normal_update}\r\n\r\n return state_dict", "def set_classy_state(self, state: Dict[str, Any]) -> None:\n return self.load_state_dict(state)", "def set_state(self, state_dict):\n self.set_script_output(state_dict.get('script_text', ''))\n for key, target_object in self._map.items():\n self.set_single_state(target_object,\n value=state_dict.get(key, None))", "def load_state_dict(model, src_state_dict):\n from torch.nn import Parameter\n dest_state_dict = model.state_dict()\n for name, param in src_state_dict.items():\n ### CHANGED HERE FOR FINE TUNING\n if name not in dest_state_dict:\n continue\n if isinstance(param, Parameter):\n # backwards compatibility for serialized parameters\n param = param.data\n try:\n dest_state_dict[name].copy_(param)\n except Exception as e:\n print(\"Warning: Error occurs when copying '{}': {}\"\n .format(name, str(e)))\n\n # src_missing = set(dest_state_dict.keys()) - set(src_state_dict.keys())\n # if len(src_missing) > 0:\n # print(\"Keys not found in source state_dict: \")\n # for n in src_missing:\n # print('\\t', n)\n\n # dest_missing = set(src_state_dict.keys()) - set(dest_state_dict.keys())\n # if len(dest_missing) > 0:\n # print(\"Keys not found in destination state_dict: \")\n # for n in dest_missing:\n # print('\\t', n)", "def __getstate__(self):\n exclude_keys = ['_is_zero', '_is_positive', '_is_nonneg']\n state = {}\n for key,value in self.__dict__.items():\n if key in exclude_keys:\n continue\n state[key] = value\n return state", "def _sharded_state_dict(self, *args: Any, **kwargs: Any) -> Any:\n with self.set_state_dict_type(StateDictType.SHARDED_STATE_DICT):\n return self.state_dict(self, *args, **kwargs)", "def __setstate__(self, state):\n exclude_keys = ['_is_zero', '_is_positive', '_is_nonneg']\n for key,value in state.items():\n if key in exclude_keys:\n continue\n if key == '_field':\n self._init_field(value)\n continue\n self.__dict__[key] = value\n return state", "def state_dict(self):\n return {k: getattr(self, k) for k in self.VARS}", "def state_encod_arch2(self, state, action):", "def __setstate__(self, statedict):\n for k, v in list(statedict.items()):\n setattr(self, k, v)", "def __setstate__(self, statedict):\n for k, v in list(statedict.items()):\n setattr(self, k, v)", "def _extract_state(self, state):\n extracted_state = {}\n\n legal_actions = [self.actions.index(a) for a in state[\"legal_actions\"]]\n extracted_state[\"legal_actions\"] = legal_actions\n\n public_card = state[\"public_card\"]\n hand = state[\"hand\"]\n obs = np.zeros(36)\n obs[self.card2index[hand]] = 1\n if public_card:\n obs[self.card2index[public_card] + 3] = 1\n obs[state[\"my_chips\"] + 6] = 1\n obs[state[\"all_chips\"][1] + 20] = 1\n extracted_state[\"obs\"] = obs\n\n if self.allow_raw_data:\n extracted_state[\"raw_obs\"] = state\n extracted_state[\"raw_legal_actions\"] = [a for a in state[\"legal_actions\"]]\n if self.record_action:\n extracted_state[\"action_record\"] = self.action_recorder\n\n return extracted_state", "def make_state_dict(self):\n state_dict = {c.TRANSITION_IN: self.transition_in,\n c.TRANSITION_OUT: self.transition_out,\n c.NORMAL: self.normal_update}\n\n return state_dict", "def __getstate__(self):\n return dict(self.items())", "def __getstate__(self):\n return dict(self.items())", "def __setstate__(self, _state : dict):\n self.__init__(**_state)", "def _state_actions(self) -> dict:\n return {}", "def state_dict(self) -> dict:\n _state_dict: dict[str, Any] = super().state_dict\n _state_dict[\"rng_state\"] = self.rng.get_state()\n _state_dict[\"seed\"] = self.seed\n _state_dict[\"strategy\"] = self.strategy.state_dict\n return _state_dict", "def make_init_state(self,state,start_case = None):\n # Init state dict\n d = {}\n\n # Init first population values in the first state\n if self.granularity is not None:\n for dimensions in self.dimensions_product:\n N,s = self.get([\"N\",state],dimensions)\n d[s] = N\n else:\n d[state] = self.N\n \n # Start the propagation with the first case\n if start_case is not None:\n\n # If there is a granularity (age / region)\n if self.granularity is not None:\n\n # If input is a tuple (only case in one category)\n if isinstance(start_case,tuple):\n assert len(start_case) == 2\n start_state,value = start_case\n modified_state = f\"{state}_{start_state.split('_',1)[1]}\"\n d[modified_state] -= value\n d[start_state] = value\n\n # If input is a list of tuples (cases in several categories)\n elif isinstance(start_case,list):\n for case in start_case:\n assert len(case) == 2\n start_state,value = case\n modified_state = f\"{state}_{start_state.split('_',1)[1]}\"\n d[modified_state] -= value\n d[start_state] = value\n\n # Without granularity\n else:\n assert isinstance(start_case,tuple)\n assert len(start_case) == 2\n start_state,value = start_case\n d[state] -= value\n d[start_state] = value\n\n return d", "def _get_state_dict(self) -> dict:\n return {\n 'optimizers': [o.state_dict() for o in self.optimizers],\n 'schedulers': [s.state_dict() for s in self.schedulers],\n }", "def _get_state_dict(self) -> dict:\n return {\n 'optimizers': [o.state_dict() for o in self.optimizers],\n 'schedulers': [s.state_dict() for s in self.schedulers],\n }", "def __setstate__(self, state):\n if len(state) != 1:\n raise TypeError('Invalid state length, expected 1; received %i' %\n len(state))\n kwargs = state[0]\n if not isinstance(kwargs, dict):\n raise TypeError('Key accepts a dict of keyword arguments as state; '\n 'received %r' % kwargs)\n self.__reference = None\n self.__pairs = tuple(kwargs['pairs'])\n self.__app = kwargs['app']\n self.__namespace = kwargs['namespace']", "def _encode_state_dict(\n state_dict: Optional[StateMap[str]],\n) -> Optional[List[Tuple[str, str, str]]]:\n if state_dict is None:\n return None\n\n return [(etype, state_key, v) for (etype, state_key), v in state_dict.items()]", "def update_state(state, **kwargs):\n # TODO(b/129569441): Support Struct as well.\n if not (py_typecheck.is_named_tuple(state) or py_typecheck.is_attrs(state) or\n isinstance(state, collections.abc.Mapping)):\n raise TypeError('state must be a structure with named fields (e.g. '\n 'dict, attrs class, collections.namedtuple), '\n 'but found {}'.format(type(state)))\n if py_typecheck.is_named_tuple(state):\n # In Python 3.8 and later `_asdict` no longer return OrdereDict, rather a\n # regular `dict`.\n d = collections.OrderedDict(state._asdict())\n elif py_typecheck.is_attrs(state):\n d = attr.asdict(state, dict_factory=collections.OrderedDict)\n else:\n for key in kwargs:\n if key not in state:\n raise KeyError(\n 'state does not contain a field named \"{!s}\"'.format(key))\n d = state\n d.update(kwargs)\n if isinstance(state, collections.abc.Mapping):\n return d\n return type(state)(**d)", "def parse_state(self, state: str):\r\n state = state.strip()\r\n state = state.split(';')\r\n\r\n if len(state) < 2:\r\n print(state)\r\n return\r\n\r\n for field in state:\r\n split = field.split(':')\r\n if len(split) < 2:\r\n continue\r\n\r\n key = split[0]\r\n value = split[1]\r\n\r\n if key in Tello.state_field_converters:\r\n try:\r\n value = Tello.state_field_converters[key](value)\r\n except Exception as e:\r\n print('Error parsing state value for {}: {} to {}'\r\n .format(key, value, Tello.state_field_converters[key]))\r\n self.state[key] = value\r\n return", "def state_dict(self):\n return self._prbm.state_dict", "def __getstate__(self) -> Dict[str, Any]:\n return {\"name\": self.name}", "def state_dict(self):\n if ADVERSARIAL_FLAG:\n return {'Net': self.net.state_dict(),\n 'AdvNet': self.adv_net.state_dict(),\n 'Optimizer': self.optimizer.state_dict(),\n 'AdvOptimizer': self.adv_optimizer.state_dict(),\n 'History': self.history,\n 'Stats': self.stats}\n return {'Net': self.net.state_dict(),\n 'Optimizer': self.optimizer.state_dict(),\n 'History': self.history,\n 'Stats': self.stats}", "def upgrade_state_dict_named(self, state_dict, name):\n return state_dict", "def __init__(self,ParamFunctionStateTuples):\n self.mDict = dict()\n for stateInit,param,func,stateFinal in ParamFunctionStateTuples:\n assert param not in stateInit\n self.mDict[param] = StateDict.EmitObj(stateInit,func,stateFinal)", "def get_state_dict(self):\n raise NotImplemented()", "def __getstate__(self):\n return {}", "def _from_state_dict(self, state_dict):\n root = DictNode(None, '')\n stack = [(value, key, root) for key, value in state_dict.items()]\n while len(stack) > 0:\n value, name, parent = stack.pop()\n if isinstance(value, dict):\n node = DictNode(parent, name)\n for name in value.keys():\n stack.append((value[name], name, node))\n elif '.' in name:\n chunks = name.split('.')\n for chunk in chunks[:-1]:\n next = parent.child(chunk)\n if next is None:\n next = PartialNode(parent, chunk)\n parent.add_child(next)\n else:\n assert isinstance(next, PartialNode)\n parent = next\n node = ValueNode(parent, chunks[-1], value)\n else:\n node = ValueNode(parent, name, value)\n parent.add_child(node)\n return root", "def get_state(self):\n state_dict = OrderedDict()\n for key, target_object in self._map.items():\n state = self._get_single_state(target_object)\n if state is not None:\n # pushbuttons for example are not defined in the get function\n state_dict[key] = state\n return state_dict", "def __getstate__(self):\n state = {\n 'connector_keys' : self.connector_keys,\n 'metric_key' : self.metric_key,\n 'location_key' : self.location_key,\n 'parameters' : self.parameters,\n 'mrsm_instance' : self.instance_keys,\n }\n return state", "def _get_state(self):", "def __setstate__(self, state):\n self.__dict__ = dict(state)\n self._init_compiled()", "def load_state_from_dict(self, dictionary):\n # _Context object should be empty\n if self.__dict__:\n log.warning(\"useful.logs.context should be empty before loading a \"\n \"new state into it\",\n extra={\"current_state\": self.__dict__})\n for key, value in dictionary.items():\n self.__setattr__(key, value)", "def _load_state_dict(optimizer, state: dict) -> None:\n if is_scheduler(optimizer):\n optimizer.load_state_dict(state[\"scheduler\"])\n optimizer.optimizer.load_state_dict(state[\"optimizer\"])\n else:\n optimizer.load_state_dict(state)", "def load_state(self, dictionary):\n self.log_formatstr = dictionary['log_formatstr']\n self.backend_interval = dictionary['backend_interval']", "def getstate(self):\n return {}", "def get_classy_state(self) -> Dict[str, Any]:\n return self.state_dict()", "def state_(state):\n return tuple( [ tuple( row ) for row in state ] )", "def _localSetState(self,pdict):\n pass", "def state(self):\n return {self._reverse_mapping[k]: v for k, v in enumerate(self._state)}", "def test_state(self, api, state):\n stream = AdsInsights(api=api, start_date=datetime(2010, 1, 1), end_date=datetime(2011, 1, 1), insights_lookback_window=28)\n\n assert stream.state == {}\n\n stream.state = state\n actual_state = stream.state\n actual_state[\"slices\"] = sorted(actual_state.get(\"slices\", []))\n state[\"slices\"] = sorted(state.get(\"slices\", []))\n state[\"time_increment\"] = 1\n\n assert actual_state == state", "def getState():\n # TODO: this isn't nearly as meaningful as it used to be", "def _localSetState(self,pdict):\n super()._localSetState(pdict)\n self.transition = pdict.pop('transition')\n self.steadyStatePb = pdict.pop('steadyStatePb')", "def get_state_walk(self, state_description, class_=State):\n states = {}\n names = state_description.keys()\n for dt in self.data.index:\n name_value_mapping = {name: self.get(name, dt) for name in names}\n states[dt] = state_description.to_state(class_,\n **name_value_mapping)\n\n return states", "def load_state_dict(module, state_dict, strict=False):\n unexpected_keys = []\n own_state = module.state_dict()\n for name, param in state_dict.items():\n if name not in own_state:\n unexpected_keys.append(name)\n continue\n if isinstance(param, torch.nn.Parameter):\n # backwards compatibility for serialized parameters\n param = param.data\n\n try:\n own_state[name].copy_(param)\n except Exception:\n Log.warn('While copying the parameter named {}, '\n 'whose dimensions in the model are {} and '\n 'whose dimensions in the checkpoint are {}.'\n .format(name, own_state[name].size(),\n param.size()))\n \n missing_keys = set(own_state.keys()) - set(state_dict.keys())\n\n err_msg = []\n if unexpected_keys:\n err_msg.append('unexpected key in source state_dict: {}\\n'.format(', '.join(unexpected_keys)))\n if missing_keys:\n # we comment this to fine-tune the models with some missing keys.\n err_msg.append('missing keys in source state_dict: {}\\n'.format(', '.join(missing_keys)))\n err_msg = '\\n'.join(err_msg)\n if err_msg:\n if strict:\n raise RuntimeError(err_msg)\n else:\n Log.warn(err_msg)", "def updateSimState(self):\n self.sim_state = {k: v for k,v in self.state.iteritems()}", "def _decode_state_dict(\n input: Optional[List[Tuple[str, str, str]]]\n) -> Optional[StateMap[str]]:\n if input is None:\n return None\n\n return frozendict({(etype, state_key): v for etype, state_key, v in input})", "def _load_sharded_state_dict(\n self,\n state_dict: Union[Dict[str, torch.Tensor], \"OrderedDict[str, torch.Tensor]\"],\n strict: bool = True,\n ) -> NamedTuple:\n with self.set_state_dict_type(StateDictType.SHARDED_STATE_DICT):\n return self.load_state_dict(state_dict, strict)", "def to_state_json(self) -> Dict[str, Any]:\n return self.state_to_json()", "def setState(self):\n\t\tself.stateDict = {'playerlist': self.playerList[:], \\\n\t\t\t\t\t\t\t'comcards': self.communityCards[:], \\\n\t\t\t\t\t\t\t'pots':\t\tself.pots[:], \\\n\t\t\t\t\t\t\t'curbet':\tself.currentBet[:], \\\n\t\t\t\t\t\t\t'turn':\t\tself.turn, \\\n\t\t\t\t\t\t\t'isGameEnd': self.isGameEnd}", "def represent_state(state):\n return tuple(state[0]), tuple(state[1]), tuple(state[2])", "def split_state_dict(state_dict):\n\n optimizer_keys = ['Moment_1_', 'Moment_2_', 'Update_Count_', 'Step']\n split_sd = {'optimizer': {}, 'fp32_param': {}, 'fp16_param': {}}\n for k, v in state_dict.items():\n mode = 'fp32_param'\n for optim_key in optimizer_keys:\n if k.startswith(optim_key):\n mode = 'optimizer'\n break\n if k.endswith('_fp16'):\n mode = 'fp16_param'\n split_sd[mode][k] = v\n return split_sd", "def set_states(self, state_dict):\n self.trainer.get_model().load_state_dict(state_dict)", "def state_dict(self):\n return {\n 'XY_net': self.XY_net.state_dict(),\n 'XY_optimizer_minee': self.XY_optimizer_minee.state_dict(),\n 'X_net': self.X_net.state_dict(),\n 'X_optimizer_minee': self.X_optimizer_minee.state_dict(),\n 'Y_net': self.Y_net.state_dict(),\n 'Y_optimizer_minee': self.Y_optimizer_minee.state_dict(),\n 'X': self.X,\n 'Y': self.Y,\n 'lr': self.lr,\n 'batch_size': self.batch_size,\n 'ref_batch_factor': self.ref_batch_factor\n }", "def _localSetState(self,pdict):\n self.n = pdict.pop('n')\n self.p = pdict.pop('p')", "def load_state_dict(self, state_dict):\n if self._lr_scheduler is not None:\n self._lr_scheduler.load_state_dict(state_dict)\n else: # here we store the state_dict until we instantiate the optimizer\n self._state_dict = state_dict", "def cpu_state_dict(state_dict):\n for k, v in state_dict.items():\n state_dict[k] = v.cpu()\n return state_dict", "def __init__(self, n_states: int, n_actions: int):\n self._p = {s: {a: [] for a in range(n_actions)} for s in range(n_states)}", "def load_module_state_dict(model, state_dict):\n import warnings\n from torch.nn import Parameter\n\n own_state = model.state_dict()\n for name, param in state_dict.items():\n if name not in own_state:\n warnings.warn('Skipping unexpected key \"{}\" in state_dict'.format(name))\n continue\n if isinstance(param, Parameter):\n # backwards compatibility for serialized parameters\n param = param.data\n try:\n own_state[name].copy_(param)\n except Exception, msg:\n warnings.warn(\"Error occurs when copying from state_dict['{}']: {}\"\n .format(name, str(msg)))\n\n missing = set(own_state.keys()) - set(state_dict.keys())\n if len(missing) > 0:\n warnings.warn(\n \"Keys not found in state_dict and thus not overwritten: '{}'\"\n .format(missing))", "def _localSetState(self,pdict):\n self.low = pdict.pop('low' )\n self.high = pdict.pop('high' )\n self.alpha = pdict.pop('alpha')\n self.beta = pdict.pop('beta' )", "def state_transition(self, curr_state, curr_action):\n curr_state[curr_action[0]] = curr_action[1]\n return curr_state", "def load_state_dict(model, src_state_dict, fold_bnt=True):\n from torch.nn import Parameter\n\n dest_state_dict = model.state_dict()\n for name, param in src_state_dict.items():\n if name not in dest_state_dict:\n continue\n if isinstance(param, Parameter):\n # backwards compatibility for serialized parameters\n param = param.data\n try:\n dest_state_dict[name].copy_(param)\n except (Exception, msg):\n print(\"Warning: Error occurs when copying '{}': {}\".format(name, str(msg)))\n\n # New version of BN has buffer `num_batches_tracked`, which is not used\n # for normal BN, so we fold all these missing keys into one line\n def _fold_nbt(keys):\n nbt_keys = [s for s in keys if s.endswith('.num_batches_tracked')]\n if len(nbt_keys) > 0:\n keys = [s for s in keys if not s.endswith('.num_batches_tracked')] + ['num_batches_tracked x{}'.format(len(nbt_keys))]\n return keys" ]
[ "0.77548754", "0.7265476", "0.6977761", "0.69326776", "0.69324243", "0.69324243", "0.693159", "0.68787336", "0.68787336", "0.6834486", "0.6827123", "0.6820761", "0.6802007", "0.6770729", "0.6729137", "0.6722943", "0.6710804", "0.6681896", "0.6674843", "0.66132194", "0.6602126", "0.660181", "0.6593994", "0.65590245", "0.6552003", "0.6552003", "0.6552003", "0.6538589", "0.65351516", "0.65327126", "0.6524001", "0.6523319", "0.6508271", "0.6505032", "0.6504496", "0.6497112", "0.64851743", "0.6478223", "0.64670134", "0.6449392", "0.6443862", "0.6434615", "0.64266366", "0.63929033", "0.63929033", "0.6391568", "0.6374287", "0.6368772", "0.6368772", "0.63572323", "0.6331497", "0.6328505", "0.6321949", "0.632146", "0.632146", "0.6309207", "0.63090706", "0.63060015", "0.629172", "0.6282743", "0.62767494", "0.6270483", "0.6269773", "0.62653166", "0.62639856", "0.62638044", "0.62619936", "0.62614906", "0.62527096", "0.62500435", "0.6240604", "0.6234313", "0.6230693", "0.61973053", "0.61965525", "0.6180526", "0.6174383", "0.61679226", "0.61643356", "0.61596215", "0.61455774", "0.61451834", "0.61441666", "0.61423653", "0.61404", "0.61399287", "0.61141276", "0.6109626", "0.6106216", "0.6105114", "0.60881937", "0.6083561", "0.6082212", "0.6078591", "0.6072678", "0.6072063", "0.60709786", "0.6070798", "0.60698986", "0.60687226", "0.60684204" ]
0.0
-1
Given a list of parsed scene_items (a plain list of dicts) Provide methods for redering that data timesigniture is only used for debug printing
def __init__(self, scene_items, timesigniture=DEFAULT_TIMESIGNITURE_): self.scene_items = scene_items self.total_beats = sum(scene_item['duration'] for scene_item in self.scene_items) self.timesigniture = timesigniture
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_items_function(self):\n ars = self.ar[2009][11]['general']\n self.assertEqual(list(ars.items()), [('LastLine', ['20091202000343', '1011585', '206082338', '54716901457']), ('FirstTime', ['20091101000237']), ('LastTime', ['20091130234113']), ('LastUpdate', ['20091201094510', '1011585', '0', '886950', '70062', '54572']), ('TotalVisits', ['1475']), ('TotalUnique', ['547']), ('MonthHostsKnown', ['397']), ('MonthHostsUnknown', ['196'])])", "def parse_items(self):", "def process_items():\n global HAS_WATCH\n global HAS_FIRST_AID_KIT\n global HAS_FLASHLIGHT\n global HAS_RAINCOAT\n global HAS_COMPASS\n global HAS_BEARTRAP\n\n if \"Watch\" in ITEMS:\n HAS_WATCH = True\n if \"First Aid Kit\" in ITEMS:\n HAS_FIRST_AID_KIT = True\n if \"Flashlight\" in ITEMS:\n HAS_FLASHLIGHT = True\n if \"Raincoat\" in ITEMS:\n HAS_RAINCOAT = True\n if \"Compass\" in ITEMS:\n HAS_COMPASS = True\n if \"Bear Trap\" in ITEMS:\n HAS_BEARTRAP = True\n\n # Stupid little hack to provide 'immediate updates/effect' of having the below items\n if HAS_WATCH:\n update_title_area(\" Day: %d Time: %d:00 \" % (DAY, TIME))\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS] = \"Y\"", "def _parse_audit_items(self, items, function_name):\n for item in items:\n yield {\n \"snippet\": item[\"node\"][\"snippet\"],\n \"selector\": item[\"node\"][\"selector\"],\n \"colors\": self._extract_hex_codes(item[\"node\"][\"explanation\"]),\n \"pipeline\": [function_name],\n # path is in the format \"1,HTML,1,BODY,0,DIV,...\"\n # we only need to keep the numbers (as integers)\n \"path\": tuple(int(i) for i in item[\"node\"][\"path\"].split(\",\")[::2]),\n }", "def parse_scene_order(self, data, timesigniture):\n if not data:\n return ()\n\n num_scenes = len(data)\n\n def attempt_parse_key_timecode(value):\n if not value:\n return value\n try:\n return float(value)\n except (ValueError, TypeError):\n pass\n try:\n return timecode_to_beat(value, timesigniture)\n except (AssertionError, ValueError, AttributeError):\n pass\n return value\n # Surface the original key value in the dict (useful for debugging)\n for key, value in data.items():\n if value:\n value['key'] = key\n data_float_indexed = {attempt_parse_key_timecode(k): v for k, v in data.items()}\n assert len(data_float_indexed) == num_scenes\n sorted_keys = sorted(data_float_indexed.keys())\n assert len(sorted_keys) == num_scenes\n\n def normalise_duration(index):\n \"\"\"\n Convert any time code or alias to a linear float value. e.g.\n '1.2' parses to -> 1.5\n 'match_next' resolves to -> 4.0\n \"\"\"\n key = sorted_keys[index]\n item = data_float_indexed[key]\n if not item:\n item = {'duration': 'auto'}\n data_float_indexed[key] = item\n duration = attempt_parse_key_timecode(item.get('duration'))\n if duration == 'match_next':\n duration = normalise_duration(index+1)\n if duration == 'match_prev':\n duration = normalise_duration(index-1)\n if isinstance(duration, str) and duration.startswith('match '):\n duration = normalise_duration(sorted_keys.index(float(duration.strip('match '))))\n if (not duration or duration == 'auto') and index < len(sorted_keys)-1:\n duration = sorted_keys[index+1] - key\n if not isinstance(duration, float):\n #log.info('Unparsed duration: {0}'.format(duration))\n duration = self.DEFAULT_DURATION\n if duration != item.get('duration'):\n item['duration'] = duration\n return duration\n for index in range(len(sorted_keys)):\n normalise_duration(index)\n scene_items = []\n for key in sorted_keys:\n scene_item = data_float_indexed[key]\n assert scene_item and scene_item.get('duration') >= 0, \"All scene must have durations. Something has failed in parsing. {0}:{1}\".format(key, scene_item)\n scene_items.append(scene_item)\n return scene_items", "def log_items(items):\n\tif len(items) < max_print:\n\t\tlogging.info(\"ITEMS : %s\", json.dumps(items))", "def items():", "def report_dump_runinfo(dump_items):\n runinfo_lines = [\"name:%s; status:%s; updated:%s\" %\n (item.name(), item.status(), item.updated())\n for item in dump_items]\n runinfo_lines.reverse()\n txt_content = \"\\n\".join(runinfo_lines)\n content = {}\n content['txt'] = txt_content + \"\\n\"\n # {\"jobs\": {name: {\"status\": stuff, \"updated\": stuff}}, othername: {...}, ...}\n content_json = {\"jobs\": {}}\n for item in sorted(dump_items, reverse=True, key=lambda job: job.name()):\n content_json[\"jobs\"][item.name()] = {'status': item.status(), 'updated': item.updated()}\n content['json'] = json.dumps(content_json)\n return content", "def rapl_timeline():\n\n return [{ \"timestamp\": \"2021-10-05T09:14:58.226\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 5558763520.0, \"time_enabled\": 1000770053.0, \"time_running\": 1000770053.0 } } } } },\n { \"timestamp\": \"2021-10-05T09:14:59.226\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 4777050112.0, \"time_enabled\": 2001065535.0, \"time_running\": 2001065535.0 } } } } },\n { \"timestamp\": \"2021-10-05T09:15:00.227\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 6847987712.0, \"time_enabled\": 3001449088.0, \"time_running\": 3001449088.0 } } } } },\n { \"timestamp\": \"2021-10-05T09:15:01.227\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 5054922752.0, \"time_enabled\": 4001882359.0, \"time_running\": 4001882359.0 } } } } },\n { \"timestamp\": \"2021-10-05T09:15:02.228\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 5434507264.0, \"time_enabled\": 5002352709.0, \"time_running\": 5002352709.0 } } } } }\n ]", "def process_item(self, item):\n entries = self.compat.process_entries(item)\n try:\n pd = PhaseDiagram(entries)\n analyzer = PDAnalyzer(pd)\n\n docs = []\n\n for e in entries:\n (decomp, ehull) = \\\n analyzer.get_decomp_and_e_above_hull(e)\n\n d = {\"material_id\": e.entry_id}\n d[\"thermo\"] = {}\n d[\"thermo\"][\"formation_energy_per_atom\"] = pd.get_form_energy_per_atom(e)\n d[\"thermo\"][\"e_above_hull\"] = ehull\n d[\"thermo\"][\"is_stable\"] = e in pd.stable_entries\n if d[\"thermo\"][\"is_stable\"]:\n d[\"thermo\"][\"eq_reaction_e\"] = analyzer.get_equilibrium_reaction_energy(e)\n d[\"thermo\"][\"decomposes_to\"] = [{\"material_id\": de.entry_id,\n \"formula\": de.composition.formula,\n \"amount\": amt}\n for de, amt in decomp.items()]\n d[\"thermo\"][\"entry\"] = e.as_dict()\n d[\"thermo\"][\"explanation\"] = self.compat.get_explanation_dict(e)\n docs.append(d)\n except PhaseDiagramError as p:\n self.__logger.warning(\"Phase diagram error: {}\".format(p))\n return []\n\n return docs", "def get_info(data):\n # type: (dict) -> dict\n item = data.get(\"item\", {})\n plot = item.get(\"summary\", data.get(\"description\"))\n if plot and isinstance(plot, list):\n plot = plot[0]\n # TODO : some-kind of duration calculation...\n return {\n \"title\": item.get(\"title\", \"\").title(),\n \"plot\": plot,\n \"year\": extract_year(item.get(\"date\", \"\")),\n \"genre\": item.get(\"genre\")\n }", "def scene_to_text(scenes):\n scene_text_dict = []\n scene_text_list = []\n for i, scene in enumerate(scenes):\n if len(scene['frame_data']) == 0:\n break\n scene_image = Image.fromarray(scene['frame_data'])\n str_text = pytesseract.image_to_string(scene_image)\n #list_text = list(filter(('').__ne__, re.split(\" |\\n|, |. |:|.\\n|\\x0c\", str_text)))\n list_text = list(filter(('').__ne__, re.split(\" |\\n\", str_text)))\n bag_of_word = collections.Counter(list_text)\n scene_text_dict.append(\n {'start': scene['start'], \n 'end': scene['end'], \n 'bag_of_word': dict(bag_of_word)\n })\n scene_text_list.append(list_text)\n return scene_text_dict, scene_text_list", "def serializeItemsData(items, highlight=False):\n from debra.models import ProductModelShelfMap\n #items = items.filter(added_datetime__gte=datetime.date.today()-datetime.timedelta(days=30))\n # unordered_pair = list(items.values_list('added_datetime', 'id'))\n unordered_pair = []\n\n for item in items:\n unordered_pair.append((item.added_datetime, item.id))\n\n unordered_pair.sort()\n unordered_pair.reverse()\n ids = [x[1] for x in unordered_pair[:60]]\n items = ProductModelShelfMap.objects.select_related(\n 'product_model__brand').filter(id__in=ids)\n items_data = []\n prod_model_existing = set()\n for item in items:\n if item.product_model.name in prod_model_existing:\n continue\n prod_model_existing.add(item.product_model.name)\n item_data = {\n \"name\": item.product_model.name,\n \"img_url_feed_view\": item.product_model.img_url,\n \"img_url_panel_view\": item.img_url_panel_view,\n }\n if highlight:\n item_data[\"highlight\"] = True\n if item.product_model.brand:\n item_data[\"brand\"] = item.product_model.brand.name\n items_data.append(item_data)\n return items_data", "def _item_to_elements_parser(self, item):\n elements = {}\n\n ####### Sad solution - look for better one. #######\n items = [\"data\", \"img\", \"title\", \"link\", \"price\"]\n values = (\"item.p.string.strip()\", 'item.img[\"src\"]', 'item.img[\"alt\"]',\n '''item.find(\"a\", {\"class\":\"detailsLink\"})['href']''',\n '''item.find('strong').string.strip()''')\n for key, value in zip(items, values):\n\n # CONVERT TIME\n # if key == \"data\":\n # try:\n # print (time.strptime(eval(value), \"%d %b\"))\n # except Exception as error:\n # print (error) # time data '5 paz' does not match format '%d %b'\n\n try:\n elements.update({key:eval(value)})\n except (TypeError, AttributeError):\n elements.update({key:None})\n\n\n # print()\n # for key, val in elements.items():\n # print (key, val)\n # print()\n ###################################################\n return elements", "def extract_scene_info(self) -> None:\n records = [\n (self.level5data.get(\"sample\", rec[\"first_sample_token\"])[\"timestamp\"], rec)\n for rec in self.level5data.scene\n ]\n\n entries = []\n for start_time, record in sorted(records):\n start_time = (\n self.level5data.get(\"sample\", record[\"first_sample_token\"])[\"timestamp\"]\n / 1000000\n )\n\n token = record[\"token\"]\n name = record[\"name\"]\n date = datetime.utcfromtimestamp(start_time)\n host = \"-\".join(record[\"name\"].split(\"-\")[:2])\n first_sample_token = record[\"first_sample_token\"]\n\n entries.append((host, name, date, token, first_sample_token))\n\n self.df = pd.DataFrame(\n entries,\n columns=[\"host\", \"scene_name\", \"date\", \"scene_token\", \"first_sample_token\"],\n )\n host_count_df = self.df.groupby(\"host\")[\"scene_token\"].count()\n print(\"the number of host\", host_count_df)", "def transform_item(item):\n\n if isinstance(item, dict):\n new = {}\n for k, v in item.items():\n # Replace hyphens with underscores for BigQuery compatibility\n k = k.replace(\"-\", \"_\")\n\n # Get inner array for date parts\n if k == \"date_parts\":\n v = v[0]\n if None in v:\n # \"date-parts\" : [ [ null ] ]\n v = []\n elif k == \"award\":\n if isinstance(v, str):\n v = [v]\n elif k == \"date_time\":\n try:\n datetime.strptime(v, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n v = \"\"\n\n new[k] = transform_item(v)\n return new\n elif isinstance(item, list):\n return [transform_item(i) for i in item]\n else:\n return item", "def test_items(self):\n obs = self.tester.items()\n self.assertTrue(isinstance(obs, Iterable))\n exp = [('1.SKB1.640202', PrepSample('1.SKB1.640202', self.tester)),\n ('1.SKB2.640194', PrepSample('1.SKB2.640194', self.tester)),\n ('1.SKB3.640195', PrepSample('1.SKB3.640195', self.tester)),\n ('1.SKB4.640189', PrepSample('1.SKB4.640189', self.tester)),\n ('1.SKB5.640181', PrepSample('1.SKB5.640181', self.tester)),\n ('1.SKB6.640176', PrepSample('1.SKB6.640176', self.tester)),\n ('1.SKB7.640196', PrepSample('1.SKB7.640196', self.tester)),\n ('1.SKB8.640193', PrepSample('1.SKB8.640193', self.tester)),\n ('1.SKB9.640200', PrepSample('1.SKB9.640200', self.tester)),\n ('1.SKD1.640179', PrepSample('1.SKD1.640179', self.tester)),\n ('1.SKD2.640178', PrepSample('1.SKD2.640178', self.tester)),\n ('1.SKD3.640198', PrepSample('1.SKD3.640198', self.tester)),\n ('1.SKD4.640185', PrepSample('1.SKD4.640185', self.tester)),\n ('1.SKD5.640186', PrepSample('1.SKD5.640186', self.tester)),\n ('1.SKD6.640190', PrepSample('1.SKD6.640190', self.tester)),\n ('1.SKD7.640191', PrepSample('1.SKD7.640191', self.tester)),\n ('1.SKD8.640184', PrepSample('1.SKD8.640184', self.tester)),\n ('1.SKD9.640182', PrepSample('1.SKD9.640182', self.tester)),\n ('1.SKM1.640183', PrepSample('1.SKM1.640183', self.tester)),\n ('1.SKM2.640199', PrepSample('1.SKM2.640199', self.tester)),\n ('1.SKM3.640197', PrepSample('1.SKM3.640197', self.tester)),\n ('1.SKM4.640180', PrepSample('1.SKM4.640180', self.tester)),\n ('1.SKM5.640177', PrepSample('1.SKM5.640177', self.tester)),\n ('1.SKM6.640187', PrepSample('1.SKM6.640187', self.tester)),\n ('1.SKM7.640188', PrepSample('1.SKM7.640188', self.tester)),\n ('1.SKM8.640201', PrepSample('1.SKM8.640201', self.tester)),\n ('1.SKM9.640192', PrepSample('1.SKM9.640192', self.tester))]\n # Creating a list and looping over it since unittest does not call\n # the __eq__ function on the objects\n for o, e in zip(sorted(list(obs)), sorted(exp)):\n self.assertEqual(o, e)", "def do_scenes(self, line):\n\n print 'List of Scenes \\n'\n print 'ID\\tName'\n\n for index, scene in enumerate(self.huuey.scenes):\n print u\"{index}\\t{unique}\".format(index=index+1, unique=scene)", "def process(data):\n items = data.get('items', [])\n logging.info('- processing %d items', len(items))\n return [_flatten_dimensions(t['properties']['dimensions']) for t in items]", "def test_items(self):\n obs = self.tester.items()\n self.assertTrue(isinstance(obs, Iterable))\n exp = [('1.SKB1.640202', Sample('1.SKB1.640202', self.tester)),\n ('1.SKB2.640194', Sample('1.SKB2.640194', self.tester)),\n ('1.SKB3.640195', Sample('1.SKB3.640195', self.tester)),\n ('1.SKB4.640189', Sample('1.SKB4.640189', self.tester)),\n ('1.SKB5.640181', Sample('1.SKB5.640181', self.tester)),\n ('1.SKB6.640176', Sample('1.SKB6.640176', self.tester)),\n ('1.SKB7.640196', Sample('1.SKB7.640196', self.tester)),\n ('1.SKB8.640193', Sample('1.SKB8.640193', self.tester)),\n ('1.SKB9.640200', Sample('1.SKB9.640200', self.tester)),\n ('1.SKD1.640179', Sample('1.SKD1.640179', self.tester)),\n ('1.SKD2.640178', Sample('1.SKD2.640178', self.tester)),\n ('1.SKD3.640198', Sample('1.SKD3.640198', self.tester)),\n ('1.SKD4.640185', Sample('1.SKD4.640185', self.tester)),\n ('1.SKD5.640186', Sample('1.SKD5.640186', self.tester)),\n ('1.SKD6.640190', Sample('1.SKD6.640190', self.tester)),\n ('1.SKD7.640191', Sample('1.SKD7.640191', self.tester)),\n ('1.SKD8.640184', Sample('1.SKD8.640184', self.tester)),\n ('1.SKD9.640182', Sample('1.SKD9.640182', self.tester)),\n ('1.SKM1.640183', Sample('1.SKM1.640183', self.tester)),\n ('1.SKM2.640199', Sample('1.SKM2.640199', self.tester)),\n ('1.SKM3.640197', Sample('1.SKM3.640197', self.tester)),\n ('1.SKM4.640180', Sample('1.SKM4.640180', self.tester)),\n ('1.SKM5.640177', Sample('1.SKM5.640177', self.tester)),\n ('1.SKM6.640187', Sample('1.SKM6.640187', self.tester)),\n ('1.SKM7.640188', Sample('1.SKM7.640188', self.tester)),\n ('1.SKM8.640201', Sample('1.SKM8.640201', self.tester)),\n ('1.SKM9.640192', Sample('1.SKM9.640192', self.tester))]\n # Creating a list and looping over it since unittest does not call\n # the __eq__ function on the objects\n for o, e in zip(sorted(list(obs)), sorted(exp)):\n self.assertEqual(o, e)", "def draw_items_value(content):\n draw_data = {}\n ####\n #Not want to get the last running time data. That makes fault\n if \":\" in content[0]:\n return draw_data\n ####\n draw_data[\"CONFIG\"] = content[0]\n for line in content:\n if \":\" in line:\n item_and_value = strip_data(line)\n draw_data[item_and_value[0]] = item_and_value[1]\n return draw_data", "def joinData(item_list):\n\n t_1 = datetime.now()\n news_dict = {}\n ln_item_list = len(item_list)\n for i, r in enumerate(item_list):\n str_date = r[\"date\"].strftime(\"%Y-%m\")\n if str_date not in news_dict:\n news_dict[str_date] = \"\"\n news_dict[str_date] += \" %s\" % r[\"text\"]\n print (i * 100.) / ln_item_list, datetime.now() - t_1\n return news_dict", "def _iter_items(data_sequence):\n for time, element in data_sequence:\n for item in element:\n yield time, item", "def process_scene_data(self, scene, data, tmp_dir):\n pass", "def find_sequence_items(data):\n results = []\n cnt = 1\n seqs = SeqIO.parse(StringIO(data), 'fasta')\n for seq in seqs:\n results.append({\n 'idx': cnt,\n 'name': seq.name,\n 'sequence': str(seq.seq)\n })\n cnt += 1\n SequenceListItems.verify_unique_names(results)\n return results", "def print_items(items): \n print(items)", "def on_new_json_items(self, items_params, new_items=None):\n if self.json_progress_message_bar:\n self.json_progress.setValue(self.json_progress.value() + 1)\n if new_items:\n if KEY_ESRI_GEOMETRY_POLYGON in self.geometries and self.geometries[KEY_ESRI_GEOMETRY_POLYGON].is_checked:\n for polygon in new_items[KEY_POLYGON]:\n self.write_to_file(FILE_POLYGON, u\"\\n\")\n if self.written_first_polygon:\n self.write_to_file(FILE_POLYGON, u\",\")\n else:\n self.written_first_polygon = True\n self.write_to_file(FILE_POLYGON, polygon)\n if KEY_ESRI_GEOMETRY_POLYLINE in self.geometries and self.geometries[KEY_ESRI_GEOMETRY_POLYLINE].is_checked:\n for line in new_items[KEY_LINE]:\n self.write_to_file(FILE_LINE, u\"\\n\")\n if self.written_first_line:\n self.write_to_file(FILE_LINE, u\",\")\n else:\n self.written_first_line = True\n self.write_to_file(FILE_LINE, line)\n if KEY_ESRI_GEOMETRY_POINT in self.geometries and self.geometries[KEY_ESRI_GEOMETRY_POINT].is_checked:\n for point in new_items[KEY_POINT]:\n self.write_to_file(FILE_POINTS, u\"\\n\")\n if self.written_first_point:\n self.write_to_file(FILE_POINTS, u\",\")\n else:\n self.written_first_point = True\n self.write_to_file(FILE_POINTS, point)\n if KEY_ESRI_GEOMETRY_MULTI_POINT in self.geometries and\\\n self.geometries[KEY_ESRI_GEOMETRY_MULTI_POINT].is_checked:\n for point in new_items[KEY_MULTI_POINT]:\n self.write_to_file(FILE_POINTS, u\"\\n\")\n if self.written_first_point:\n self.write_to_file(FILE_POINTS, u\",\")\n else:\n self.written_first_point = True\n self.write_to_file(FILE_POINTS, point)\n\n self.on_new_json_task_complete()", "def GetItemsAtTime(self, time_elapsed):\n items = []\n\n if self.data == None:\n raise Exception('TimelineData: Trying to GetState when data==None')\n\n # Go through each of our items\n for item in self.data:\n # Ignore items that cant be retrieved by time_elapsed\n if 'start' not in item or 'duration' not in item:\n #print 'TimelineData: Skipping Item: %s: %s' % (self.path, item)\n continue\n\n # If time_elapsed is between start and end of this item\n if time_elapsed >= item['start'] and time_elapsed < item['start'] + item['duration']:\n print 'TimelineData: Found Item: %s: %s' % (self.path, item)\n items.append(item)\n else:\n #print 'TimelineData: Unmatched Item: %s: %s' % (self.path, item)\n pass\n\n return items", "def test_filter_data_by_race():\n data = race.filter_data_by_race(random.randint(1, 3))\n assert len(data) == 11\n assert type(data) == list\n for datum in data:\n assert type(datum) == dict", "def read_item(data: DataModel) -> Dict:\n convertor = Convertor(data)\n return {'output': convertor.get_humanized_data()}", "def test_train_slice(train_items: List[JSONDict]) -> None:\n validated = TrainCollection(items=train_items)\n\n assert validated[0:2] == validated", "def print_entry(item):\n print('Date: ', item[\"Date\"])\n print('Task: ', item[\"Task\"])\n print('Time Spent: ', item[\"Time\"])\n print('Notes: ', item[\"Notes\"], '\\n')", "def populateSceneRefs(*args):\n pi.referenceDictionary = {}\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, ra=True)\n\n #get reference paths\n refs = cmds.file(q=True, r=True)\n\n buff = []\n # loaded = []\n for ref in refs:\n #get the associated namespace\n ns = cmds.file(ref, q=True, ns=True)\n pi.referenceDictionary[ns] = ref\n\n # put files in buffer list to sort\n for g in pi.referenceDictionary.keys():\n buff.append(g)\n buff.sort()\n\n # now put the sorted namespaces in the list\n for b in buff:\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, append=b, dcc = selectRefs)\n\n # if ref is deferred(not loaded), change it's font\n for ref in refs:\n if cmds.file(ref, q=True, deferReference=True):\n ns = cmds.file(ref, q=True, ns=True) # get the namespace in order to get the item name\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, selectItem=ns) # sel the item in order to query it\n index = cmds.textScrollList(widgets[\"shotAssListTSL\"], q=True, selectIndexedItem=True)[0] # query the index of sel\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, lineFont = [index, \"obliqueLabelFont\"])\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, deselectAll=True)\n\n # if we're in a lgt file, look through current refs and for each one of type \"anm\", check the frame rates, etc. and give option to change\n curr = paths.PathManager(cmds.file(q=True, sn=True))\n if curr.shotType == \"lgt\":\n for ref in refs:\n p=paths.PathManager(ref)\n if p.shotType == \"anm\":\n dict = cFuncs.getFileFrameInfo(cFuncs.fixPath(ref))\n csi.compareSceneInfo(dict)", "def test_items(self):\n obs = self.tester.items()\n self.assertTrue(isinstance(obs, Iterable))\n exp = {('physical_location', 'ANL'), ('has_physical_specimen', True),\n ('has_extracted_data', True), ('sample_type', 'ENVO:soil'),\n ('required_sample_info_status', 'completed'),\n ('collection_timestamp', datetime(2011, 11, 11, 13, 00, 00)),\n ('host_subject_id', '1001:M7'),\n ('description', 'Cannabis Soil Microbiome'),\n ('season_environment', 'winter'), ('assigned_from_geo', 'n'),\n ('texture', '64.6 sand, 17.6 silt, 17.8 clay'),\n ('taxon_id', '1118232'), ('depth', 0.15),\n ('host_taxid', '3483'), ('common_name', 'root metagenome'),\n ('water_content_soil', 0.164), ('elevation', 114), ('temp', 15),\n ('tot_nitro', 1.41), ('samp_salinity', 7.15), ('altitude', 0),\n ('env_biome',\n 'ENVO:Temperate grasslands, savannas, and shrubland biome'),\n ('country', 'GAZ:United States of America'), ('ph', 6.94),\n ('anonymized_name', 'SKB8'), ('tot_org_carb', 5),\n ('description_duplicate', 'Burmese root'),\n ('env_feature', 'ENVO:plant-associated habitat'),\n ('latitude', 74.0894932572),\n ('longitude', 65.3283470202)}\n self.assertEqual(set(obs), exp)", "def test_list(self, items: list) -> None:\r\n if not isinstance(items, list):\r\n raise ValueError(f'Expected list, but received {type(items)}')\r\n for item in items:\r\n if isinstance(item, dict):\r\n self.test_dict(dictionary=item)\r\n elif isinstance(item, list):\r\n self.test_list(items=item)", "def make_scene_folders_0001(jsonFile, rootDir):\n sceneShotList = [[1, ['A', 'AA', 'AB', 'AC', 'AD', 'AE', 'AF', 'AG', 'AH', 'AJ', 'AK',\n 'C', 'D', 'E', 'E_v2', 'G', 'H', 'N']],\n [8, ['D', 'DA', 'DB', 'DC', 'D_v2']],\n [9, ['A', 'A_v2', 'B', 'B_v2']],\n [11, ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'G_v2',\n 'K', 'K_v2', 'K_v3', 'K_v4', 'K_v5', 'K_v6',\n 'M', 'M_v2', 'M_v3', 'M_v4', 'M_v5', 'M_v6', 'M_v7',\n 'N', 'N_v2', 'N_v3', 'N_v4', 'N_v5', 'R', 'R_v2', 'R_v3', 'R_v4', 'R_v5']],\n [12, ['A', 'A_v2', 'B', 'C', 'C_v2', 'C_v3', 'C_v4', 'D',\n 'E', 'E_v2']],\n [13, ['A', 'C', 'D', 'E', 'F']],\n [14, ['A', 'B', 'B_v2', 'B_v3', 'B_v4', 'B_v5', 'B_v6',\n 'C', 'C_v2', 'C_v3', 'C_v4', 'D', 'F', 'FF', 'H',\n 'H_v2', 'J', 'JJ', 'JJ_v2', 'K', 'L', 'M', 'M_v2',\n 'M_v3', 'M_v4', 'M_v5', 'N', 'P', 'P_v2', 'P_v3',\n 'R', 'R_v2', 'T', 'T_v2']],\n [15, ['A', 'J', 'J_v2', 'K', 'K_v2', 'L']]]\n\n for i, l in enumerate(sceneShotList):\n shotFolder = \"s\" + str(l[0]).zfill(3) # gives padding of 4\n shotFolderFinal = shotFolder\n if len(l) == 1 or l[1] == []:\n shotFolderFinal = os.path.join(rootDir, shotFolder)\n make_tree_from_dict(jsonFile, shotFolderFinal)\n else:\n for shot in l[1]:\n shotFolderFinal = shotFolder + shot\n shotFolderFinal = os.path.join(rootDir, shotFolderFinal)\n make_tree_from_dict(jsonFile, shotFolderFinal)", "def patient_view_post(items):\n\n now = datetime.datetime.now()\n iter_dict = {'True': 'num_views_details_list', 'False': 'num_views_match_list'}\n\n db = database.get_db()\n for item in items:\n\n nav = iter_dict[str(item['from_details'])]\n not_nav = iter_dict[str(not item['from_details'])]\n user = get_current_user(settings.NO_AUTH, app)\n\n # from CTI-mode filter matches\n if 'filter_match' in item and item['filter_match'] is True:\n patient_view = db.patient_view.find_one({\n 'user_id': user['_id'],\n 'mrn': item['mrn'],\n 'filter_label': item['filter_label'],\n 'filter_protocol_no': item['filter_protocol_no']\n })\n\n item['view_date'] = now\n item['requires_manual_review'] = True\n if patient_view is not None:\n db.patient_view.remove({'_id': patient_view['_id']})\n item['user_id'] = patient_view['user_id']\n item['user_first_name'] = patient_view['user_first_name']\n item['user_last_name'] = patient_view['user_last_name']\n item['user_email'] = patient_view['user_email']\n item[nav] = patient_view[nav] + 1\n item[not_nav] = patient_view[not_nav]\n\n if 'user_user_name' not in patient_view:\n item['user_user_name'] = user['user_name']\n else:\n item['user_user_name'] = patient_view['user_user_name']\n\n else:\n item['user_id'] = user['_id']\n item['user_first_name'] = user['first_name']\n item['user_last_name'] = user['last_name']\n item['user_email'] = user['email']\n item['user_user_name'] = user['user_name']\n item[nav] = 1\n item[not_nav] = 0\n\n else:\n # from patient search\n patient_view = db.patient_view.find_one({\n 'user_id': user['_id'],\n 'mrn': item['mrn'],\n 'protocol_no': item['protocol_no']\n })\n\n if patient_view is not None:\n db.patient_view.remove({'_id': patient_view['_id']})\n item['user_id'] = patient_view['user_id']\n item['user_first_name'] = patient_view['user_first_name']\n item['user_last_name'] = patient_view['user_last_name']\n item['user_email'] = patient_view['user_email']\n item[nav] = patient_view[nav] + 1\n item[not_nav] = patient_view[not_nav]\n item['view_date'] = now\n\n if 'user_user_name' not in patient_view:\n item['user_user_name'] = user['user_name']\n else:\n item['user_user_name'] = patient_view['user_user_name']\n\n else:\n item['user_id'] = user['_id']\n item['user_first_name'] = user['first_name']\n item['user_last_name'] = user['last_name']\n item['user_email'] = user['email']\n item['user_user_name'] = user['user_name']\n item[nav] = 1\n item[not_nav] = 0\n item['view_date'] = now\n\n if 'from_details' in item:\n del item['from_details']\n if 'filter_match' in item:\n del item['filter_match']", "def items(self):", "def _read(self, openf=None, stepfilter=None):\n itemstack = []\n current = None\n result = {}\n xkeys = None\n timeskip = False\n laststep = False\n\n if openf is None:\n f = open(self.filepath)\n else:\n f = openf\n\n line = 'start'\n while line != '':\n lastpos = f.tell()\n line = f.readline()\n if line == '':\n continue\n \n if itemstack is not None and len(itemstack) > 0: \n cast = itemstack.pop()\n raw = line.split()\n values = [t(r) for t, r in zip(cast, raw)]\n if len(values) == 1:\n values = values[0]\n\n if current == \"time\":\n if stepfilter is not None and values not in stepfilter:\n timeskip = True\n elif (self.index is not None and values != self.index):\n if values > self.index:\n if openf is None:\n return {}\n else:\n timeskip = True\n laststep = True\n else:\n timeskip = True\n elif self.index is None:\n self.index = values\n else:\n timeskip = False\n \n if len(itemstack) == 0 and current not in result:\n result[current] = values\n else:\n if current not in result:\n result[current] = []\n result[current].append(values)\n continue\n elif itemstack is None and current == \"atoms\":\n if \"ITEM\" in line:\n current = None\n if openf is not None:\n f.seek(lastpos)\n break\n else:\n #E.g. line: 1 4 -65.9625 1.54915 1.46824 5 30.976 \n vals = line.split()\n sid, atype = tuple(map(int, vals[0:2]))\n result[\"type\"].append(atype)\n result[\"id\"].append(sid)\n x, y, z = tuple(map(float, vals[2:5]))\n result[\"xyz\"].append((x, y, z))\n if len(vals) > 5 and xkeys is not None:\n for ikey, v in enumerate(vals[5:]):\n result[xkeys[ikey]].append(eval(v))\n continue # pragma: no cover\n \n if \"ITEM: TIMESTEP\" in line:\n if laststep:\n f.seek(lastpos)\n break\n itemstack.append((int,))\n current = \"time\"\n timeskip = False\n elif not timeskip:\n if \"ITEM: NUMBER OF ATOMS\" in line:\n itemstack.append((int,))\n current = \"natoms\"\n elif \"ITEM: BOX BOUNDS\" in line:\n period = line.strip().split(\"BOX BOUNDS\")\n if len(period) == 2 and period[1] != '':\n result[\"periodic\"] = period[1].strip().split()\n else:\n result[\"periodic\"] = (\"ss\", \"ss\" ,\"ss\")\n \n\t\t # Changes by JPRIEDS to accommodate triclinic boxes\n\t\t # Written 170719\n\t\t if len(result[\"periodic\"]) == 6:\n\t\t\titemstack.extend([(float, float, float)]*3)\n\t\t\tcurrent = \"box\"\n\t\t\tresult[\"periodic\"] = result[\"periodic\"][3:]\n\t\t elif len(result[\"periodic\"]) == 3:\n\t\t\titemstack.extend([(float, float)]*3)\n\t\t\tcurrent = \"box\"\n\t\t else:\n emsg = \"Could not classify periodic bounds: {}\"\n raise ValueError(emsg.format(result[\"periodic\"]))\n elif \"ITEM: ATOMS\" in line:\n itemstack = None\n current = \"atoms\"\n result[\"type\"] = []\n result[\"id\"] = []\n result[\"xyz\"] = []\n \n #The first two headings in the line have \"ITEM: ATOMS\", the\n #rest are usuall id, type, x, y, z, rest...\n headings = line.split()\n extras = len(headings) > 7\n if extras:\n xkeys = []\n xheadings = headings[7:]\n for xhead in xheadings:\n key = \"atom:{}\".format(xhead)\n result[key] = []\n xkeys.append(key)\n \n if openf is None:\n #Close the file since we opened it.\n f.close()\n \n return result", "def parse_drawer(drawer: OrgDrawer.Element, headings: List[OrgNode.Element],\n filepath: str) -> List[LogItem]:\n items = []\n current_item_lines = []\n for line in drawer.content:\n if line.startswith(CLOCK_PREFIX):\n continue\n\n if not line.startswith('- '):\n current_item_lines.append(line)\n continue\n\n time = try_parse_datetime(line)\n if not time:\n current_item_lines.append(line)\n continue\n\n if current_item_lines:\n item = LogItem(current_item_lines, headings, filepath)\n items.append(item)\n\n current_item_lines = [line]\n\n if current_item_lines:\n item = LogItem(current_item_lines, headings, filepath)\n items.append(item)\n\n return items", "def main(cls, abcnodes):\n abclist = []\n\n for node in abcnodes:\n logger = getLogger(str(node))\n trans = node.getParent()\n data = {}\n \n # maya\n data['transformNode'] = str(trans)\n data['shapeNode'] = str(node)\n\n # live scene assignments\n data[\"shadersAssignationAttr\"] = pm.getAttr(\"%s.shadersAssignation\" % node)\n data[\"attributesAttr\"] = pm.getAttr(\"%s.attributes\" % node)\n data[\"displacementsAssignationAttr\"] = pm.getAttr(\"%s.displacementsAssignation\" % node)\n data[\"layersOverrideAttr\"] = pm.getAttr(\"%s.layersOverride\" % node)\n data[\"shadersNamespaceAttr\"] = pm.getAttr(\"%s.shadersNamespace\" % node)\n\n # external files\n data[\"abcShadersAttr\"] = pm.getAttr(\"%s.abcShaders\" % node)\n data[\"jsonFileAttr\"] = pm.getAttr(\"%s.jsonFile\" % node)\n data[\"cacheFileNameAttr\"] = str(pm.getAttr(\"%s.cacheFileNames[0]\" % node))\n\n # transform attributes\n data[\"translateXAttr\"] = pm.getAttr(\"%s.translateX\" % trans)\n data[\"translateYAttr\"] = pm.getAttr(\"%s.translateY\" % trans)\n data[\"translateZAttr\"] = pm.getAttr(\"%s.translateZ\" % trans)\n\n data[\"rotateXAttr\"] = pm.getAttr(\"%s.rotateX\" % trans)\n data[\"rotateYAttr\"] = pm.getAttr(\"%s.rotateY\" % trans)\n data[\"rotateZAttr\"] = pm.getAttr(\"%s.rotateZ\" % trans)\n \n data[\"scaleXAttr\"] = pm.getAttr(\"%s.scaleX\" % trans)\n data[\"scaleYAttr\"] = pm.getAttr(\"%s.scaleY\" % trans)\n data[\"scaleZAttr\"] = pm.getAttr(\"%s.scaleZ\" % trans)\n\n if not cls:\n abclist.append(data)\n else:\n abclist.append(alembicHolderClass(data, logger))\n\n return abclist", "def test_get_results_by_race():\n data = race.get_results_by_race(random.randint(1, 3))\n assert type(data) == list\n for lines in data:\n assert type(lines) == dict\n assert len(lines) == 7\n assert \"Points\" in lines.keys() and \"Place\" in lines.keys()\n assert len(data) == 11", "def extract_key_item_data(item_data):\n extracted_item_data = {}\n\n for item_id in item_data:\n key_data = {}\n key_data[\"id\"] = item_id\n key_data[\"name\"] = item_data[item_id][\"name\"]\n key_data[\"image\"] = item_data[item_id][\"image\"][\"full\"]\n key_data[\"gold\"] = item_data[item_id][\"gold\"][\"total\"]\n key_data[\"tags\"] = item_data[item_id][\"tags\"]\n extracted_item_data[item_id] = key_data\n \n return extracted_item_data", "def fullLoad(f_list):\n\n t_1 = datetime.now()\n\n id_list = []\n data_dict = {}\n\n for i, f in enumerate(f_list):\n \n i_t_1 = datetime.now()\n\n r_f = open(f, \"rb\")\n r_data = r_f.read().split(\"\\n\")\n\n ln_r_data = len(r_data)\n\n for j, r in enumerate(r_data[:-1]):\n j_r = json.loads(r)\n if j_r[\"_id\"] not in id_list:\n id_list.append(j_r[\"_id\"])\n date = datetime.strptime(j_r[\"pub_date\"][0:10], \"%Y-%m-%d\")\n if date not in data_dict:\n data_dict[date] = \"\"\n data_dict[date] += \" %s\" % j_r[\"lead_paragraph\"]\n print (j * 100.) / ln_r_data, datetime.now() - t_1, datetime.now() - i_t_1, i \n return data_dict", "def parse_items(lines):\n # pozice bodu\n anchors = [idx for idx, line in enumerate(lines) if PROGRAMME_ITEM_RE.match(line)]\n\n # syrove bloky po jednotlivych bodech\n blocks = []\n for idx in range(len(anchors)-1):\n blocks.append(lines[anchors[idx]:anchors[idx+1]])\n blocks.append(lines[anchors[idx+1]:])\n\n # preciznejsi vyparsovani udaju z bloku\n out = []\n for block in blocks:\n data = [i.strip() for i in block if i.strip()]\n if not len(data):\n continue\n item = {'proposer':None, 'number':None, 'title':None}\n\n # predkladatel\n proposer_found = False\n m = PROGRAMME_PROPOSER_RE.match(data[-1])\n if m:\n item['proposer'] = m.group(1).strip()\n proposer_found = True\n\n # cislo bodu\n m = PROGRAMME_ITEM_RE.match(data[0])\n item['number'] = m.group(2).strip()\n\n # titulek bodu\n title = [m.group(3)]\n if proposer_found:\n title.extend(data[1:-1])\n else:\n title.extend(data[1:])\n item['title'] = u\" \".join([i.strip() for i in title])\n out.append(item)\n\n return out", "def format(self) -> list[dict]: #type: ignore\r\n\r\n msgs: list = []\r\n mtype: str = self.data[\"type\"]\r\n\r\n #Requests from obs-websockets version 4.8.0\r\n\r\n #Configurable Interactions\r\n if mtype == \"showSource\":\r\n msg: dict = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = \"SetSceneItemRender\"\r\n msg[\"source\"] = self.data[\"target\"]\r\n msg[\"render\"] = True\r\n msgs.append(msg)\r\n\r\n elif mtype == \"hideSource\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = \"SetSceneItemRender\"\r\n msg[\"source\"] = self.data[\"target\"]\r\n msg[\"render\"] = False\r\n msgs.append(msg)\r\n\r\n elif mtype == \"toggleSource\":\r\n source = self.obs.getSource(self.data[\"target\"])\r\n if source != None:\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = \"SetSceneItemRender\"\r\n msg[\"source\"] = self.data[\"target\"]\r\n msg[\"render\"] = not source.isVisible() #type: ignore\r\n msgs.append(msg)\r\n\r\n elif mtype == \"showAllSources\":\r\n if self.obs.currentScene != None:\r\n for source in self.obs.currentScene.sources: #type: ignore\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = \"SetSceneItemRender\"\r\n msg[\"source\"] = source.name\r\n msg[\"render\"] = True\r\n msgs.append(msg)\r\n\r\n elif mtype == \"hideAllSources\":\r\n if self.obs.currentScene != None:\r\n for source in self.obs.currentScene.sources: #type: ignore\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = \"SetSceneItemRender\"\r\n msg[\"source\"] = source.name\r\n msg[\"render\"] = False\r\n msgs.append(msg)\r\n\r\n elif mtype == \"transitionToScene\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = \"SetCurrentScene\"\r\n msg[\"scene-name\"] = self.data[\"target\"]\r\n msgs.append(msg)\r\n\r\n elif mtype == \"transitionToPreviousScene\":\r\n if self.obs.previousScene != None:\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = \"SetCurrentScene\"\r\n msg[\"scene-name\"] = self.obs.previousScene.name #type: ignore\r\n msgs.append(msg)\r\n\r\n elif mtype == \"showFilter\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = \"SetSourceFilterVisibility\"\r\n msg[\"sourceName\"] = self.data[\"targetSource\"]\r\n msg[\"filterName\"] = self.data[\"targetFilter\"]\r\n msg[\"filterEnabled\"] = True\r\n msgs.append(msg)\r\n\r\n elif mtype == \"hideFilter\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = \"SetSourceFilterVisibility\"\r\n msg[\"sourceName\"] = self.data[\"targetSource\"]\r\n msg[\"filterName\"] = self.data[\"targetFilter\"]\r\n msg[\"filterEnabled\"] = False\r\n msgs.append(msg)\r\n\r\n elif mtype == \"toggleFilter\":\r\n source = self.obs.getSource(self.data[\"targetSource\"])\r\n if source != None:\r\n _filter = source.getFilter(self.data[\"targetFilter\"]) #type: ignore\r\n if _filter != None:\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = \"SetSourceFilterVisibility\"\r\n msg[\"sourceName\"] = self.data[\"targetSource\"]\r\n msg[\"filterName\"] = self.data[\"targetFilter\"]\r\n msg[\"filterEnabled\"] = not _filter.isVisible() #type: ignore\r\n msgs.append(msg)\r\n\r\n elif mtype == \"editFilter\":\r\n #TODO: Better arbitrary filter support\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = \"SetSourceFilterSettings\"\r\n msg[\"sourceName\"] = self.data[\"targetSource\"]\r\n msg[\"filterName\"] = self.data[\"targetFilter\"]\r\n value = self.data[\"data\"]\r\n if self.data[\"targetSetting\"] == \"hue_shift\":\r\n value = (((value - 0) * (180 - -180)) / (127 - 0)) + -180\r\n msg[\"filterSettings\"] = {self.data[\"targetSetting\"]: value}\r\n msgs.append(msg)\r\n\r\n #General Requests\r\n elif mtype == \"GetVersion\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetAuthRequired\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n msgs.append(msg)\r\n\r\n elif mtype == \"Authenticate\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n msg[\"auth\"] = self.data[\"auth\"]\r\n msgs.append(msg)\r\n\r\n elif mtype == \"SetHeartbeat\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetFilenameFormatting\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetFilenameFormatting\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetStats\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"BroadcastCustomMessage\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetVideoInfo\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"OpenProjector\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"TriggerHotkeyByName\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"TriggerHotkeyBySequence\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n #Media Control\r\n elif mtype == \"PlayPauseMedia\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"RestartMedia\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"StopMedia\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"NextMedia\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"PreviousMedia\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetMediaDuration\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetMediaTime\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetMediaTime\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"ScrubMedia\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetMediaState\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n #Sources\r\n elif mtype == \"GetMediaSourcesList\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetSourcesList\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetSourceTypesList\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetVolume\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetVolume\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetMute\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetMute\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"ToggleMute\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetAudioActive\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetSourceName\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetSyncOffset\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetSyncOffset\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetSourceSettings\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetSourceSettings\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetTextGDIPlusProperties\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetTextGDIPlusProperties\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetTextFreetype2Properties\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetTextFreetype2Properties\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetBrowserSourceProperties\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetBrowserSourceProperties\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetSpecialSources\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetSourceFilters\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n msg[\"sourceName\"] = self.data[\"target\"]\r\n msgs.append(msg)\r\n\r\n elif mtype == \"GetSourceFilterInfo\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"AddFilterToSource\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"RemoveFilterFromSource\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"ReorderSourceFilter\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"MoveSourceFilter\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetSourceFilterSettings\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetSourceFilterVisibility\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetAudioMonitorType\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetAudioMonitorType\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"TakeSourceScreenshot\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n #Outputs\r\n elif mtype == \"ListOutputs\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetOutputInfo\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"StartOutput\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"StopOutput\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n #Profiles\r\n elif mtype == \"SetCurrentProfile\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetCurrentProfile\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"ListProfiles\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n #Recording\r\n elif mtype == \"GetRecordingStatus\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"StartStopRecording\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"StartRecording\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"StopRecording\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"PauseRecording\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"ResumeRecording\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetRecordingFolder\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetRecordingFolder\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n #Replay Buffer\r\n elif mtype == \"GetReplayBufferStatus\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"StartStopReplayBuffer\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"StartReplayBuffer\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"StopReplayBuffer\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SaveReplayBuffer\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n #Scene Collections\r\n elif mtype == \"SetCurrentSceneCollection\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetCurrentSceneCollection\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"ListSceneCollections\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n #Scene Items\r\n elif mtype == \"GetSceneItemList\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetSceneItemProperties\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetSceneItemProperties\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"ResetSceneItem\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetSceneItemRender\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetSceneItemPosition\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetSceneItemTransform\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetSceneItemCrop\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"DeleteSceneItem\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"AddSceneItem\":\r\n #Unreleased\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"DuplicateSceneItem\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n #Scenes\r\n elif mtype == \"SetCurrentScene\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetCurrentScene\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n msgs.append(msg)\r\n\r\n elif mtype == \"GetSceneList\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n msgs.append(msg)\r\n\r\n elif mtype == \"CreateScene\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"ReorderSceneItems\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetSceneTransitionOverride\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"RemoveSceneTransitionOverride\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetSceneTransitionOverride\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n #Streaming\r\n elif mtype == \"GetStreamingStatus\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"StartStopStreaming\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"StartStreaming\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"StopStreaming\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetStreamSettings\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetStreamSettings\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SaveStreamSettings\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SendCaptions\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n #Studio Mode\r\n elif mtype == \"GetStudioModeStatus\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetPreviewScene\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetPreviewScene\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"TransitionToProgram\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"EnableStudioMode\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"DisableStudioMode\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"ToggleStudioMode\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n #Transitions\r\n elif mtype == \"GetTransitionList\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetCurrentTransition\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetCurrentTransition\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"SetTransitionDuration\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetTransitionDuration\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n elif mtype == \"GetTransitionPosition\":\r\n msg = {\"message-id\": next(self.id)}\r\n msg[\"request-type\"] = mtype\r\n #msgs.append(msg)\r\n\r\n else:\r\n print(f\"Unknown request with type {mtype}.\")\r\n\r\n return msgs", "def test_unique_train_items(train_items: List[JSONDict]) -> None:\n validated = TrainCollection(items=train_items)\n assert validated.dict() == train_items", "def loopitems(items, db):\n for item in items:\n if 'error' in item.keys():\n saveimage(item, db, 'collection_image_status')\n else:\n if checkfieldunicity(db, item['md5']):\n saveimage(item, db, 'collection_image')\n saveimage(itemstatus(item, error='RAS'),\n db,\n 'collection_image_status')\n else:\n saveimage(itemstatus(item), db, 'collection_image_status')", "def test_items(self):\n obs = self.tester.items()\n self.assertTrue(isinstance(obs, Iterable))\n exp = {('center_name', 'ANL'), ('center_project_name', None),\n ('emp_status', 'EMP'), ('barcodesequence', 'AGCGCTCACATC'),\n ('library_construction_protocol',\n 'This analysis was done as in Caporaso et al 2011 Genome '\n 'research. The PCR primers (F515/R806) were developed against '\n 'the V4 region of the 16S rRNA (both bacteria and archaea), '\n 'which we determined would yield optimal community clustering '\n 'with reads of this length using a procedure similar to that '\n 'of ref. 15. [For reference, this primer pair amplifies the '\n 'region 533_786 in the Escherichia coli strain 83972 sequence '\n '(greengenes accession no. prokMSA_id:470367).] The reverse '\n 'PCR primer is barcoded with a 12-base error-correcting Golay '\n 'code to facilitate multiplexing of up to 1,500 samples per '\n 'lane, and both PCR primers contain sequencer adapter '\n 'regions.'), ('linkerprimersequence', 'GTGCCAGCMGCCGCGGTAA'),\n ('target_subfragment', 'V4'), ('target_gene', '16S rRNA'),\n ('run_center', 'ANL'), ('run_prefix', 's_G1_L001_sequences'),\n ('run_date', '8/1/12'), ('experiment_center', 'ANL'),\n ('experiment_design_description',\n 'micro biome of soil and rhizosphere of cannabis plants '\n 'from CA'), ('experiment_title', 'Cannabis Soil Microbiome'),\n ('platform', 'Illumina'), ('samp_size', '.25,g'),\n ('sequencing_meth', 'Sequencing by synthesis'),\n ('illumina_technology', 'MiSeq'), ('sample_center', 'ANL'),\n ('pcr_primers',\n 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT'),\n ('study_center', 'CCME')}\n self.assertEqual(set(obs), exp)", "def procfs_timeline():\n\n return [\n {\n \"timestamp\": \"2021-09-14T12:37:37.168817\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:37.669237\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.170142\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.670338\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.171321\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.671572\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.172503\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.672693\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.173552\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.673815\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.174560\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.674690\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.175441\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.675743\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.176551\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.677307\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.178049\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.678310\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.179120\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.679308\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.180223\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.680468\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.181316\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.681683\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.182522\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.682731\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.183680\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.683812\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.184792\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.685027\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.185709\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.686065\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.186929\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.687190\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.188031\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.688674\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.59000000000001,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.189489\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.59000000000001,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.690299\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.59000000000001,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:56.191124\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.59000000000001,\n },\n ]", "def parseDynamoFeed(data):\n\tdynamoResult = []\n\titems = data['Items']\n\tif items:\n\t\tfor item in items:\n\t\t\tdate = item['date']\n\t\t\tdate = dateutil.parser.parse(date).isoformat(' ').split('+')[0]\n\t\t\tdate = datetime.datetime.strptime( date, \"%Y-%m-%d %H:%M:%S\" )\n\t\t\tname = item['name']\n\t\t\ttext = item['text']\n\t\t\titemResult = copy.deepcopy(templateResult)\n\t\t\titemResult['message'] = text\n\t\t\titemResult['author'] = name\n\t\t\titemResult['datetime'] = date\n\t\t\titemResult['source'] = 'DynamoDB'\n\t\t\tdynamoResult.append(itemResult)\n\treturn dynamoResult", "def test_list(self, array: dict) -> None:\r\n item = read_items(array)\r\n if read_type(item) == 'object':\r\n logger.debug('list -> dict')\r\n self.test_dict(obj=item)\r\n elif read_type(item) == 'array':\r\n logger.debug('list -> list')\r\n self.test_list(array=item)", "def process_item(self, _item: dict):\n _item['coordinates'] = self.process_coordinates(\n _item['coordinates']\n )\n _item['countryName'] = self.process_country_name(\n _item['countryName']\n )\n _item['portName'] = self.process_port_name(\n _item['portName']\n )\n _item['unlocode'] = self.process_unlocode(\n _item['unlocode']\n )\n return _item", "def test_multiple_data():\n response = test_app.post(\"/bkt_service/unwind\", params='''[{\n \"event\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/OutcomeEvent\",\n \"actor\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"student-1462300421838-1\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/lis/Person\",\n \"roles\": [\n \"urn:lti:instrole:ims/lis/Learner\"\n ]\n },\n \"action\": \"http://purl.imsglobal.org/vocab/caliper/v1/action#Graded\",\n \"object\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"attempt-1462300421838-4\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Attempt\",\n \"extensions\": {\n \"assessmentType\": \"Diagnostic Assessment\",\n \"assessmentId\": \"assessment-1462300421838-4\"\n },\n \"count\": 1,\n \"startedAtTime\": \"2016-05-03T21:33:41.844Z\",\n \"endedAtTime\": \"2016-05-03T22:03:41.844Z\"\n },\n \"generated\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"result-1462300421838-4\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Result\",\n \"assignableId\": \"assessment-1462300421838-4\",\n \"normalScore\": 80,\n \"totalScore\": 100,\n \"itemResults\": [\n {\n \"@id\": \"item-result-1462300421838-4-1\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Result\",\n \"question_type\": \"mcq\",\n \"automarkable\": 1,\n \"score\": 7,\n \"max_score\": 10,\n \"question_reference\": \"c0a3f0c8-eac7-4795-8c7a-adf98e336a7b\",\n \"item_reference\": \"Adaptive_Item2_extract_USMOs\",\n \"sequenceNumber\": 1\n },\n {\n \"@id\": \"item-result-1462300421838-4-2\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Result\",\n \"question_type\": \"mcq\",\n \"automarkable\": 0,\n \"score\": 8,\n \"max_score\": 10,\n \"question_reference\": \"5ee295ad-5e8b-413f-9fe6-87038e8e6e42\",\n \"item_reference\": \"Adaptive_Item4_extract_USMOs\",\n \"sequenceNumber\": 2\n },\n {\n \"@id\": \"item-result-1462300421838-4-3\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Result\",\n \"question_type\": \"mcq\",\n \"automarkable\": 1,\n \"score\": 7,\n \"max_score\": 10,\n \"question_reference\": \"047c4139-a64b-4596-8169-7a294d0c69d7\",\n \"item_reference\": \"Adaptive_Item3_extract_USMOs\",\n \"sequenceNumber\": 3\n },\n {\n \"@id\": \"item-result-1462300421838-4-4\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Result\",\n \"question_type\": \"mcq\",\n \"automarkable\": 0,\n \"score\": 10,\n \"max_score\": 10,\n \"question_reference\": \"b7cc7839-63d4-4e12-93ce-f25fad380aaa\",\n \"item_reference\": \"Adaptive_Item1_extract_USMOs\",\n \"sequenceNumber\": 4\n }\n ]\n },\n \"group\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"class-01\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/lis/CourseOffering\",\n \"name\": null,\n \"description\": null,\n \"extensions\": {\n \"pageNumber\": null,\n \"courseCode\": \"course-01\",\n \"calmsCourseId\": \"calms-course-01\",\n \"lessonId\": \"lesson-01\",\n \"platform\": \"D2L\",\n \"classroomTypeId\": \"3500.0\",\n \"activityId\": \"10\",\n \"gradeLevel\": \"8\",\n \"CourseOfferingId\": \"1200.0\",\n \"adaptivewrapperId\": \"\",\n \"schoolYear\": \"2015-20116\",\n \"unitId\": \"3201.0\",\n \"moduleId\": \"1110.0\",\n \"courseId\": \"2550.0\",\n \"assessmentId\": \"4520.0\",\n \"originSystemId\": \"sams\",\n \"businessLineId\": \"1300.0\",\n \"contextId\": \"587279312bf9a9afd947ddab\"\n },\n \"dateCreated\": null,\n \"dateModified\": null,\n \"courseNumber\": null,\n \"academicSession\": null,\n \"subOrganizationOf\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"1001.0\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/w3c/Organization\",\n \"name\": null,\n \"description\": null,\n \"extensions\": {},\n \"dateCreated\": null,\n \"dateModified\": null,\n \"subOrganizationOf\": null\n }\n },\n \"eventTime\": \"2017-01-09T14:21:00Z\"\n }\n }\n ]''')\n assert response.status == '200 OK'\n assert len(response.json) == 4", "def process_metadata_items(self):\n for item_id, item in self.metadata.items():\n assert item_id not in self.processed_metadata, 'Item {} presents twice'.format(item_id)\n self.processed_metadata[item_id] = {}\n for field, field_vals in item['metadata'].items():\n curr_field = ''\n # availability field is always empty\n if field == 'availability' or field == 'url':\n continue\n values = field_vals\n if field == 'availableSizes' and not isinstance(values, list,):\n values = self.repair_size_list(values)\n\n #field_tokens = tokenizer.tokenize(field)\n field_tokens = re.split('_|\\s', field)\n for tok in field_tokens:\n cleaned_tok = self._ATTR2STR[tok.lower()] if tok.lower() in self._ATTR2STR else tok.lower()\n curr_field += cleaned_tok + ' '\n curr_field = curr_field[:-1]\n \n curr_val = ''\n proc_values = []\n if isinstance(values, list,):\n for val in values:\n curr_val = ''\n #value_tokens = tokenizer.tokenize(val)\n value_tokens = re.split('_|\\s', val)\n proc_values.append(' '.join(value_tokens))\n else:\n value_tokens = re.split('_|\\s', values)\n proc_values.append(' '.join(value_tokens))\n\n #metadata JSON files contains different samples having hemLenght field twice.\n # In this case just discard the one with no values.\n if curr_field == 'hem length' and curr_field in self.processed_metadata[item_id]:\n if not len(self.processed_metadata[item_id][curr_field]):\n self.processed_metadata[item_id][curr_field] = proc_values\n continue\n assert curr_field not in self.processed_metadata[item_id], 'Field {} presents twice in item {}. Please remove one of them (preferably the empty one)'.format(curr_field, item_id)\n self.processed_metadata[item_id][curr_field] = proc_values", "def on_cam_list(self, data):\n json_data = json.loads(data)\n for k in json_data:\n print (json_data[k])", "def test_sort_data_by_time():\n data = race.read_file_to_list()\n sorted_data = race.sort_data_by_time(data)\n assert data != sorted_data\n assert len(data) == len(sorted_data)\n assert type(sorted_data) == list\n for lines in sorted_data:\n assert type(lines) == dict", "def _format_zone_list(self, instance_list):\n result = []\n if instance_list is not None:\n if \"items\" in instance_list:\n items = instance_list[\"items\"]\n for item in items:\n result.append(self._process_instance(item))\n return result", "def parse_data(paints_list,data):\n paints_list.extend(data)", "def print_all_items_in_dict(all_items):\n if config.output.csv:\n print_all_items_in_dict_for_csv(all_items)\n else:\n print_all_items_in_dict_for_human(all_items)", "def infotodict(seqinfo):\n\n # data = create_key('run{item:03d}')\n # info = {data: []}\n # last_run = len(seqinfo)\n\n \"\"\"\n The namedtuple `s` contains the following fields:\n\n * total_files_till_now\n * example_dcm_file\n * series_id\n * dcm_dir_name\n * unspecified2\n * unspecified3\n * dim1\n * dim2\n * dim3\n * dim4\n * TR\n * TE\n * protocol_name\n * is_motion_corrected\n * is_derived\n * patient_id\n * study_description\n * referring_physician_name\n * series_description\n * image_type\n \"\"\"\n\n t1w = create_key('sub-{subject}/{session}/anat/sub-{subject}_{session}_T1w')\n t2w = create_key('sub-{subject}/{session}/anat/sub-{subject}_{session}_T2w')\n func_rest = create_key('sub-{subject}/{session}/func/sub-{subject}_{session}_task-rest_bold')\n dwi_ap = create_key('sub-{subject}/{session}/dwi/sub-{subject}_{session}_acq-AP_dwi')\n dwi_pa = create_key('sub-{subject}/{session}/dwi/sub-{subject}_{session}_acq-PA_dwi')\n t2star = create_key('sub-{subject}/{session}/dwi/sub-{subject}_{session}_T2star')\n t2w_fatsat = create_key('sub-{subject}/{session}/anat/sub-{subject}_{session}_acq-fatsat_T2w')\n \n info = {t1w: [], t2w: [], func_rest: [], dwi_ap: [], dwi_pa: [], t2star: [], t2w_fatsat: []}\n\n for idx, s in enumerate(seqinfo):\n if (s.example_dcm_file == 'mp_rage_1_mm-00001.dcm'):\n info[t1w].append(s.series_id)\n if ('edti_2mm_cdif45_AP' in s.series_description):\n info[dwi_ap].append(s.series_id)\n if ('edti_2mm_cdif45_PA' in s.series_description):\n info[dwi_pa].append(s.series_id)\n if (s.series_description == 'Sag CUBE T2'):\n info[t2w].append(s.series_id)\n if (s.series_description == 'ORIG Sag CUBE T2'):\n info[t2w_orig].append(s.series_id)\n if ('T2_1.7mm_fat_sat' in s.series_description): \n info[t2w_fatsat].append(s.series_id)\n if (s.series_description == 'Reverse blip EPI 3mm iso'):\n info[t2star].append(s.series_id) \n if (s.series_description == 'Resting EPI 3mm iso RS') and (s.dim3 == 12300):\n info[func_rest].append(s.series_id)\n return info", "def export_json(data):\n keep_keys = ['ability', 'level', 'set', 'slot', 'rarity']\n stats = [\"Atk\", \"AtkP\", \"CChance\", \"CDmg\", \"Def\", \"DefP\", \"Eff\", \"HP\", \"HPP\", \"Res\", \"Spd\"]\n my_inventory = {'processVersion': '1', 'heroes': [], 'items': []}\n for item in data:\n try:\n gear = {}\n i = 1\n for key, val in item.items():\n if key in keep_keys:\n gear.update({key: val})\n\n elif key in stats:\n substat = f\"subStat{i}\"\n gear.update({substat: [key, int(val)]})\n i += 1\n\n gear.update({\"mainStat\": [item['main'], item['value']]})\n\n lettersAndDigits = string.ascii_lowercase + string.digits\n item_id = 'dt' + ''.join(random.choice(lettersAndDigits) for l in range(6))\n gear.update({\"locked\": False, \"efficiency\": 0, \"id\": item_id})\n my_inventory['items'].append(gear)\n except Exception as err:\n print(err)\n continue\n return my_inventory", "def run_attributes (ins, exp, run) :\n t0_sec = time()\n list_of_dicts = experiment_info.run_attributes(ins, exp, run)\n #print 'run_attributes for %s %s run:%d, t(sec) = %f' % (ins, exp, run, time()-t0_sec)\n return list_of_dicts", "def print_items(items):\n for item in items:\n print(item)", "def test_pipeline_data(self, doc_list):\n summaries = []\n for item in doc_list:\n summaries.append(self.preprocess(item))\n return summaries", "def display_lists(self):\n for ob in self.scenelist[:]:\n try:\n ob.roster.roster.refresh_from_db()\n ob.roster.refresh_from_db()\n ob.refresh_from_db()\n if ob.roster.roster.name != \"Active\":\n self.caller.player_ob.db.random_scenelist.remove(ob)\n except (AttributeError, TypeError, ValueError):\n pass\n if self.need_to_generate_lists:\n self.generate_lists()\n scenelist = self.scenelist\n claimlist = self.claimlist\n validated = self.validatedlist\n gms = self.gms\n newbies = [ob for ob in self.newbies if ob not in claimlist]\n msg = \"{w@Randomscene Information for this week:{n \"\n if \"online\" in self.switches:\n msg += \"{yOnly displaying online characters.{n\"\n scenelist = [ob for ob in scenelist if ob.show_online(self.caller.player)]\n newbies = [ob for ob in newbies if ob.show_online(self.caller.player)]\n if scenelist:\n msg += \"\\n{wRandomly generated RP partners:{n \"\n msg += list_to_string([ob.key for ob in scenelist])\n if newbies:\n msg += \"\\n{wNew players who can be also RP'd with for credit:{n \"\n msg += list_to_string([ob.key for ob in newbies])\n if gms:\n msg += \"\\n{wGMs for events here that can be claimed for credit:{n \"\n msg += list_to_string(gms)\n if not any((scenelist, newbies, gms)):\n msg += \"\\n{wNo players remain to be claimed.{n\"\n else:\n msg += \"\\n{yReminder: Please only /claim those you have interacted with significantly in a scene.{n\"\n if claimlist:\n msg += \"\\n{wThose you have already RP'd with:{n \"\n msg += list_to_string([ob.key for ob in claimlist])\n if validated:\n msg += \"\\n{wThose you have validated scenes for:{n \"\n masked = dict(self.masked_validated_list)\n msg += list_to_string(\n [ob.key if ob not in masked else masked[ob] for ob in validated]\n )\n if not any((scenelist, newbies, gms, claimlist, validated)):\n msg = \"No characters qualify for @randomscene information to be displayed.\"\n # random RP Tool!\n if (\n not self.caller.db.random_rp_command_this_week\n and not self.caller.db.rp_command_used\n ):\n self.generate_random_command()\n msg += (\n \"\\n|wRandomly chosen Roleplay Tool:|n %s\"\n % self.caller.db.random_rp_command_this_week\n )\n if self.caller.db.rp_command_used:\n msg += \"|y (Already used)|n\"\n self.msg(msg)", "def scraping_episodes(self, serie_data, episodes_list):\n episodes_data = []\n for episode in episodes_list:\n # Se arma este dict para localizar los campos\n # en el json y que sea mas facil procesarlos mas adelante\n epi_details = episode[0]['body']['details']\n epi_dict = {\n 'ParentId': serie_data.id,\n 'ParentTitle': serie_data.clean_title,\n 'Id': episode[0]['id'],\n 'Title': epi_details['title'],\n 'Type': 'episode',\n 'JSON': {\n 'Synopsis': epi_details['description'],\n 'Metadata': epi_details['metadata'].replace('\\xa0', ''),\n 'Rating': epi_details['localizedRating']['value'],\n 'Image': epi_details,\n 'Groups': episode[1]['body']['groups'],\n 'SeasonAndNumber': episode[2]['body']['metadata'],\n 'isFree': episode[0]['body']['isFree']\n }\n }\n payload_epi = self.build_payload(epi_dict)\n # Si la serie es original sus episodios también\n payload_epi.is_original = serie_data.is_original\n episodes_data.append(payload_epi)\n payload_epi = payload_epi.payload_episode()\n Datamanager._checkDBandAppend(\n self, payload_epi, self.scraped_epi, self.payloads_epi,\n isEpi=True\n )\n return episodes_data", "def process_item(self, item, spider):\n tmp_dict = {}\n tmp_dict['comments'] = item['comments']\n tmp_dict['referenceName'] = item['referenceName']\n tmp_dict['referenceTime'] = item['referenceTime']\n tmp_dict['productColor'] = item['productColor']\n tmp_dict['productSize'] = item['productSize']\n self.savefile.write(u\"{0}\\n\".format(json.dumps(tmp_dict)))\n #raise DropItem()", "def test_msg15():\n f = Level2File(get_test_data('KTLX20130520_201643_V06.gz', as_file_obj=False))\n data = f.clutter_filter_map['data']\n assert isinstance(data[0][0], list)\n assert f.clutter_filter_map['datetime'] == datetime(2013, 5, 19, 5, 15, 0, 0)", "def debug_print(*items):\n\t\n\tif items and app.config['DEBUG']:\n\t\tprint ' '.join([str(item) for item in items])", "def dictagdur2(kind, fname):\n\n #x = ['movie', 'moviecode', 'offset', 'well', 'agmin', 'agsec', 'agdur', \n #'agtype', 'agcomm', 'escmin', 'escsec', 'escdur', 'esctype', 'escbeh', \n #'esccomm', 'gen', 'date', 'assay', 'fps', 'flyid', 'side', 'moviepart']\n\n d = {}\n y = '1'\n b = []\n dur = []\n \n with open(fname) as f:\n for l in f:\n #print(l)\n adict = agline2(l)\n \n if adict['well'] != y:\n if len(dur) > 0:\n agdurcmd(kind, b, dur, d[gen])\n b = []\n dur = []\n \n if adict['agtype'] != '-' and adict['agtype'] != 'x' and \\\n adict['agdur'] != '':\n b.append(adict['agtype'])\n dur.append(adict['agdur'])\n \n if adict['esctype'] != '' and adict['escdur'] != '':\n b.append(adict['esctype'])\n dur.append(adict['escdur'])\n\n gen = adict['gen']\n #print(gen)\n if gen not in d:\n d[gen] = []\n \n y = adict['well']\n \n agdurcmd(kind, b, dur, d[gen])\n\n return(d)", "def serialize_items_data_v2(items, highlighted_ids=[]):\n items_data = []\n prod_model_existing = set()\n for item in items:\n if item.product_model.name in prod_model_existing:\n continue\n prod_model_existing.add(item.product_model.name)\n item_data = {\n \"name\": item.product_model.name,\n \"img_url_feed_view\": item.product_model.img_url,\n \"img_url_panel_view\": item.img_url_panel_view,\n }\n if item.id in highlighted_ids:\n item_data[\"highlight\"] = True\n if item.product_model.brand:\n item_data[\"brand\"] = item.product_model.brand.name\n items_data.append(item_data)\n return items_data", "def parse_item(self, response):\n self.check_Tor_time()\n print(\"Looking\", response.url)\n # Create the loader using the response\n l = ItemLoader(item=PropertiesItem(), response=response)\n l.default_output_processor = TakeFirst()\n try:\n self.fill_from_Json(l)\n except Exception as e:\n print('exception->', e)\n print('1')\n for node in response.css('div.padding-phone-only > .padding-small-top'):\n try:\n title = node.xpath('div[1]/h6/text()').extract()\n except Exception as e:\n print 1, e\n print('title:', title)\n try:\n val = node.xpath('div[2]/text()').extract()\n except Exception as e:\n print 2, e\n try:\n if \"code\" in title[0]:\n l.add_value('unique_id', val[0],\n MapCompose(unicode.strip, unicode.title))\n if \"Bedrooms\" in title[0]:\n l.add_value('property_rooms_num', val[0],\n MapCompose(unicode.strip, unicode.title))\n if \"Construction\" in title[0]:\n l.add_value('construction_num', val[0],\n MapCompose(unicode.strip, unicode.title))\n if \"Modified\" in title[0]:\n l.add_value('on_site_date', node.xpath('div[2]/time/text()').extract()[0],\n MapCompose(\n lambda i: parse(i, fuzzy=True)))\n print(node.xpath('div[2]/time/text()').extract())\n except Exception as e:\n print 3, e\n print('2')\n # Housekeeping fields\n l.add_value('url', response.url)\n # l.add_value('spider', self.name)\n l.add_value('source', self.allowed_domains[0])\n l.add_value('imported_date', datetime.now())\n l.add_value('asset_type', 'realestate')\n l.add_value('transaction_type', 'commercial')\n tp = response.xpath(\n '//*[@id=\\\"breadCrumbs\\\"]/a[1]/text()').extract()[0]\n print('3')\n if \"Sales\" in tp:\n l.replace_value('property_buy_or_rent', \"sale\")\n else:\n l.replace_value('property_buy_or_rent', \"rent\")\n if \"residential\" in tp:\n l.add_value('category_major', \"residential\")\n elif \"commercial\" in tp:\n l.add_value('category_major', \"commercial\")\n else:\n l.add_value('category_major', \"land\")\n # a = l.load_item()\n # print(a)\n # return\n print('4')\n\n print(l)\n return l.load_item()", "def mixed_train_items(train_items: List[JSONDict]) -> List[JSONDict]:\n train_items[1][\"categoryid\"] = 9107252648\n return train_items", "def additional_data_dict(titles: list) -> dict or str:\n try:\n additional_data = {}\n for title in titles:\n url = \"http://www.omdbapi.com/?i=tt3896198&apikey=6b513db6&t=\" + title\n headers = {\"Accept\": \"application/json\"}\n req = requests.get(url, headers=headers)\n api_content = json.loads(req.content.decode('utf-8'))\n # Because of no BoxOffice key in API for movie 'Ben Hur' (ID 68 in db):\n api_content.setdefault('BoxOffice', 'N/A')\n additional_data[title] = {}\n if api_content['imdbRating']:\n additional_data[title]['imdb_rating'] = float(api_content['imdbRating'])\n else:\n additional_data[title]['imdb_rating'] = -1\n if api_content['Runtime'] == 'N/A':\n additional_data[title]['runtime'] = -1\n else:\n additional_data[title]['runtime'] = int(re.sub(r'[^0-9]', '', api_content['Runtime']))\n if api_content['BoxOffice'] == 'N/A':\n additional_data[title]['box_office'] = -1\n else:\n additional_data[title]['box_office'] = int(re.sub(r'[^0-9]', '', api_content['BoxOffice']))\n nominations_oscars = re.search(r'Nominated for (.+?) Oscar', api_content['Awards'])\n if nominations_oscars:\n additional_data[title]['nominations_oscars'] = int(nominations_oscars.group(1))\n else:\n additional_data[title]['nominations_oscars'] = 0\n oscars = re.search(r'Won (.+?) Oscar', api_content['Awards'])\n if oscars:\n additional_data[title]['oscars'] = int(oscars.group(1))\n else:\n additional_data[title]['oscars'] = 0\n nominations_others = re.search(r'(\\d+) nomination', api_content['Awards'])\n if nominations_others:\n additional_data[title]['nominations_others'] = int(nominations_others.group(1))\n else:\n additional_data[title]['nominations_others'] = 0\n wins_others = re.search(r'(\\d+) win', api_content['Awards'])\n if wins_others:\n additional_data[title]['wins_others'] = int(wins_others.group(1))\n else:\n additional_data[title]['wins_others'] = 0\n return additional_data\n except KeyError:\n return \"No data about some movie(s). Check data source.\"\n except requests.exceptions.ConnectionError:\n return \"No access. Check internet connection or API is down.\"", "def makeTimeSeriesData(self,cluster,server,items):\n start = 0\n end = len(items)\n step = 1\n values = []\n for key,value in items.iteritems():\n values.append(value)\n \n name = cluster+\",\"+server+\",alert\"\n series = TimeSeries(name, start, end, step, values)\n #for key,value in items:\n return series", "def filter_var_timesteps(list_of_msg_dicts, grib_var_name, grib_level,\n include_analysis=True):\n\n list_of_i = []\n analysis = 'unknown'\n skip_6 = False\n for j, msg_dict in enumerate(list_of_msg_dicts):\n cfsr_var = CFSRVariable(msg_dict)\n if cfsr_var.name != grib_var_name:\n continue\n if cfsr_var.level != grib_level:\n continue\n if (msg_dict['startStep'] == 0) and (msg_dict['endStep'] == 0):\n # assume this is the analysis, expect another instance afterward\n if analysis == 'unknown':\n analysis = 'candidate'\n elif analysis == 'candidate':\n # all is well, we have the 3min spinup afterward, which we\n # ignore. Reset analysis to unknown.\n if include_analysis:\n list_of_i.append(j - 1)\n skip_6 = True\n analysis = 'unknown'\n elif (msg_dict['startStep'] == 1) and (msg_dict['endStep'] == 1):\n if analysis == 'candidate':\n # We are in a typical 0,1,2,3,4,5 timeseries, add the analysis\n list_of_i.append(j - 1)\n analysis = 'unknown'\n elif analysis == 'candidate':\n # the candidate is not the analysis, reset analysis\n analysis = 'unknown'\n if (msg_dict['startStep'] == 6) and (msg_dict['endStep'] == 6):\n if not skip_6:\n list_of_i.append(j)\n elif (msg_dict['startStep'] != 0) or (msg_dict['endStep'] != 0):\n list_of_i.append(j)\n return list_of_i, skip_6", "def parse_data(data, activity_id, activity_start_date):\n data_dict = {}\n final_dict = {}\n for i in data:\n data_dict[i['type']] = i['data']\n\n counter = 1\n nrange = len(data_dict['time'])\n for item in range(1, nrange + 1):\n final_dict[item] = {}\n\n for key, value in data_dict.items():\n counter = 1\n for i in value:\n final_dict[counter][key] = i\n final_dict[counter]['activity_id'] = activity_id\n\n if 'time' in key:\n final_dict[counter]['time'] = final_dict[counter]['time'] + activity_start_date\n final_dict[counter]['time'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(final_dict[counter]['time']))\n\n if 'latlng' in key:\n final_dict[counter]['lat'] = final_dict[counter]['latlng'][0]\n final_dict[counter]['lon'] = final_dict[counter]['latlng'][1]\n final_dict[counter].pop('latlng')\n counter += 1\n\n result_list = [value for key, value in final_dict.items()]\n\n for event in result_list:\n write_to_splunk(index=helper.get_output_index(), sourcetype='strava:activities:stream', data=json.dumps(event))\n\n helper.log_info(f'Added activity stream {activity_id} for {athlete_id}.')\n return True", "def animationsToTxt( allAnims, astrListToExport = [] ):\n import motiontools # to avoid cyclic import\n strOut = \"\";\n allAnimExported = [];\n for k,v in allAnims.iteritems():\n if( len(astrListToExport) > 0 ):\n for strExport in astrListToExport:\n if( strExport in k ):\n break;\n else:\n print( \"INF: animationsToTxt: skipping animation '%s'\" % k.encode(\"utf-8\") ); # even if the box is named \"sendPaté\", we can so print it.\n continue;\n allAnimExported.append( k );\n rTotalLength = motiontools.getTimelineDuration( v[1] );\n strOut += \"animation_%s=[\\n\" % k;\n strOut += \" # duration: %5.2fs\\n\" % rTotalLength;\n strOut += \" # Names (%d joint(s)):\\n\" % len(v[0]);\n strOut +=\" \" + str( v[0] ) + \",\\n\";\n strOut += \" # Times:\\n\";\n if( True ):\n # output key info\n strOut += \" # KeyInfo:\";\n for idx, timeArray in enumerate(v[1]):\n if( idx < len(v[0]) and len( timeArray ) > 0 ):\n strOut += \" %s: %d key(s), from: %5.2fs to %5.2fs;\" % (v[0][idx], len(timeArray), timeArray[0], timeArray[-1]);\n else:\n print( \"WRN: animationsToTxt: this case should not been found if exporter has removed empty joint\" );\n if( idx < len(v[0]) ):\n strOut += \" %s: %d key(s), no keyframe!\" % (v[0][idx], len(timeArray) );\n else:\n strOut += \" nothing !?!\";\n strOut += \"\\n\"\n \n strOut +=\" \" + stringtools.floatArrayToText( v[1], 2 ) + \",\\n\";\n strOut += \" # Values:\\n\";\n strOut +=\" \" + stringtools.floatArrayToText( v[2], 2 ) + \",\\n\";\n strOut += \"];\\n\";\n strOut += \"allAnims = [\";\n for k in allAnimExported:\n strOut += \" animation_%s,\" % k;\n strOut += '];\\n';\n strOut += \"dictAnims = {\";\n for k in allAnimExported:\n strOut += \"\\\"%s\\\": animation_%s,\" % (k,k);\n strOut += '};\\n'; \n return strOut;", "def create_scenes_timestamps_csv():\n with open(scenes_json, 'r') as k, open(\"scenes_timestamps.csv\", 'w') as outfile:\n w = csv.writer(outfile)\n data = json.load(k)\n for ep in data[\"episodes\"]:\n s = ep[\"seasonNum\"]\n epNum = ep[\"episodeNum\"]\n scene_num = 0\n for scene in ep[\"scenes\"]:\n start_ts = scene[\"sceneStart\"]\n end_ts = scene[\"sceneEnd\"]\n characters = scene[\"characters\"]\n w.writerow([s, epNum, scene_num, start_ts, end_ts, characters])\n scene_num += 1", "def test_valid_data():\n response = test_app.post(\"/bkt_service/unwind\", params='''[{\n \"event\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/OutcomeEvent\",\n \"actor\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"student-1462300421838-1\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/lis/Person\",\n \"roles\": [\n \"urn:lti:instrole:ims/lis/Learner\"\n ]\n },\n \"action\": \"http://purl.imsglobal.org/vocab/caliper/v1/action#Graded\",\n \"object\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"attempt-1462300421838-4\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Attempt\",\n \"extensions\": {\n \"assessmentType\": \"Diagnostic Assessment\",\n \"assessmentId\": \"assessment-1462300421838-4\"\n },\n \"count\": 1,\n \"startedAtTime\": \"2016-05-03T21:33:41.844Z\",\n \"endedAtTime\": \"2016-05-03T22:03:41.844Z\"\n },\n \"generated\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"result-1462300421838-4\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Result\",\n \"assignableId\": \"assessment-1462300421838-4\",\n \"normalScore\": 80,\n \"totalScore\": 100,\n \"itemResults\": [\n {\n \"@id\": \"item-result-1462300421838-4-1\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Result\",\n \"question_type\": \"mcq\",\n \"automarkable\": 1,\n \"score\": 7,\n \"max_score\": 10,\n \"question_reference\": \"c0a3f0c8-eac7-4795-8c7a-adf98e336a7b\",\n \"item_reference\": \"Adaptive_Item2_extract_USMOs\",\n \"sequenceNumber\": 1\n }\n ]\n },\n \"group\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"class-01\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/lis/CourseOffering\",\n \"name\": null,\n \"description\": null,\n \"extensions\": {\n \"pageNumber\": null,\n \"courseCode\": \"course-01\",\n \"calmsCourseId\": \"calms-course-01\",\n \"lessonId\": \"lesson-01\",\n \"platform\": \"D2L\",\n \"classroomTypeId\": \"3500.0\",\n \"activityId\": \"10\",\n \"gradeLevel\": \"8\",\n \"CourseOfferingId\": \"1200.0\",\n \"adaptivewrapperId\": \"\",\n \"schoolYear\": \"2015-20116\",\n \"unitId\": \"3201.0\",\n \"moduleId\": \"1110.0\",\n \"courseId\": \"2550.0\",\n \"assessmentId\": \"4520.0\",\n \"originSystemId\": \"sams\",\n \"businessLineId\": \"1300.0\",\n \"contextId\": \"587279312bf9a9afd947ddab\"\n },\n \"dateCreated\": null,\n \"dateModified\": null,\n \"courseNumber\": null,\n \"academicSession\": null,\n \"subOrganizationOf\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"1001.0\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/w3c/Organization\",\n \"name\": null,\n \"description\": null,\n \"extensions\": {},\n \"dateCreated\": null,\n \"dateModified\": null,\n \"subOrganizationOf\": null\n }\n },\n \"eventTime\": \"2017-01-09T14:21:00Z\"\n }\n }\n ]''')\n assert response.status == '200 OK'\n assert len(response.json) == 1\n assert response.json[0][\"error\"][\"code\"] == 0", "def print_items(items):\n for item in items:\n print(item)", "def visualise(cut_list): \r\n\tcutlist = json.load(cut_list)\r\n\tmodified_list =[]\r\n\tz_set = 0\r\n\tc_set = 0\r\n\ta_set = 0\r\n\tcut_num = 0\r\n\tfor a in cutlist:\r\n\t\tif a[0] == \"jump\" or a[0] == \"mark\":\r\n\t\t\ta.pop(0)\r\n\t\t\ta = list(map(float,a)) + [z_set]\r\n\t\t\t\r\n\t\t\tif a_set != 0 or c_set != 0:\r\n\t\t\t\ta = rotate_a(a_set,a)\r\n\t\t\t\ta = rotate_c(c_set,a_set,a)\r\n\r\n\t\t\ta = a +[f\"a_set {a_set} c_set {c_set} z_set {z_set:.1f} cut_num {cut_num}\"]\r\n\t\t\tmodified_list.append(a)\r\n\r\n\t\telif a[0] == \"z_abs\":\r\n\t\t\tz_set = float(a[1])\r\n\t\t\tcut_num += 1\r\n\t\telif a[0] == \"c_abs\":\r\n\t\t\tc_set = float(a[1])\r\n\t\telif a[0] == \"a_abs\":\r\n\t\t\ta_set = float(a[1])\r\n\r\n\t\telif a[0] == \"z_rel\" or a[0] == \"z_step\":\r\n\t\t\tz_set = z_set + float(a[1])\r\n\t\telif a[0] == \"c_rel\" or a[0] == \"c_step\":\r\n\t\t\tc_set = c_set + float(a[1])\r\n\t\telif a[0] == \"a_rel\" or a[0] == \"a_step\":\r\n\t\t\ta_set = a_set + float(a[1])\r\n\t\telse:\r\n\t\t\tpass\r\n\tdf = pd.DataFrame(modified_list, columns = [\"x\",\"y\",\"z\",\"layer\"])\r\n\tfig = px.line_3d(df,\"x\",\"y\",\"z\",color=\"layer\")\r\n\t#fig.update_layout(scene_aspectmode = \"data\")\r\n\tfig.show()", "def test_correct_train_item(train_items: List[JSONDict]) -> None:\n validated = TrainItem(**train_items[0])\n assert validated.dict() == train_items[0]", "def loadData(self, actions):\n # begin to clear the scene\n self.scene.clear()\n self.scene.drawGrid()\n \n # and draw all items\n maxItemId = self.itemId\n for graphicalItem in actions:\n\n # extract item info\n itemType = int(graphicalItem['item-type'])\n itemId = graphicalItem['item-id']\n if sys.version_info > (3,): # py3 support\n graphicalItem['item-text'] = graphicalItem['item-text']\n else:\n graphicalItem['item-text'] = graphicalItem['item-text'].decode('utf8')\n itemText = graphicalItem['item-text']\n posX = float(graphicalItem['pos-x'])\n posY = float(graphicalItem['pos-y'])\n itemData = graphicalItem['item-data']\n\n\n # define the color of the item\n color = self.getItemColor(itemType=itemType)\n \n # add item in first\n self.addItem( itemType=itemType, itemId=itemId, itemText=itemText, \n itemColor=QBrush(color), itemPos=QPointF(posX,posY), itemData=itemData )\n \n # kept the max id\n if int(itemId) > maxItemId:\n maxItemId = int(itemId)\n \n self.itemId = maxItemId\n\n # endly draw all arrows\n for curItem in self.scene.items():\n for saveItem in actions:\n if not isinstance(curItem, DiagramItem):\n continue\n if curItem.itemId == int(saveItem['item-id']):\n if 'item-links' in saveItem:\n if isinstance(saveItem['item-links'], dict):\n saveItem['item-links'] = [saveItem['item-links']]\n for lnk in saveItem['item-links']:\n itemId = lnk['next-item-id']\n toHotspotId = lnk['to-hotspot-id']\n fromHotspotId = lnk['from-hotspot-id']\n \n endItem = self.findItem(id=itemId)\n if endItem is not None:\n self.trace( \"Arrow: %s -> %s\" % (fromHotspotId,toHotspotId) )\n arrow = Arrow(curItem, endItem, toHotspotId=toHotspotId, fromHotspotId=fromHotspotId)\n arrow.setColor(self.scene.myLineColor)\n curItem.addArrow(arrow)\n endItem.addArrow(arrow)\n arrow.setZValue(-1000.0)\n self.scene.addItem(arrow)\n arrow.updatePosition()", "def transform_data(titles, people):\n \n movies = list()\n\n for t in titles:\n title = t['title']\n year = t['year']\n director = random.choice(people)\n producer = random.choice(people)\n actor = random.choice(people)\n castcrew = {'director' : director, 'producer': producer, 'actor': actor}\n movies.append((title, year, json.dumps(castcrew)))\n\n return movies", "def test_scenes_get(self):\n pass", "def parseArray(self, data):\n self.title = data[0]\n self.director = data[1]\n self.cast = data[2]\n self.producer = data[3]\n self.writer = data[4]\n self.country = data[5]\n self.language = data[6]\n self.year = data[7]\n self.genres = data[8]\n self.votes = data[9]\n self.rating = float(data[10])\n self.runtime = data[11]\n self.plot = data[12]\n self.coverUrl = data[13]", "def print_room_items(room):\r\n room_items = room[\"items\"]\r\n if (len(room_items) != 0):\r\n return \" There is \" + list_of_objects(room_items) + \" here.\"\r\n else:\r\n return \" There are no items here.\"", "def dictionary_of_metrics(items):\n \n np_list = np.array(items) #create an array of list to use numpy functions on list\n metric_dict = {'mean': np.mean(np_list).round(2),\n 'median': np.median(np_list).round(2),\n 'var': np.var(np_list, ddof=1).round(2),\n 'std': np.std(np_list, ddof=1).round(2),\n 'min': np.min(np_list).round(2),\n 'max': np.max(np_list).round(2),} #create a dictionary that calculates the five metrics\n \n return metric_dict #return result as a dictionary", "def read_data(in_f):\n with io.open(in_f, 'r', encoding='utf-8') as json_data:\n data = json.load(json_data)\n for show in data:\n show_id = show['id']\n for id_s, scene in enumerate(show['scenes']):\n for id_t, talk in enumerate(scene):\n if 'meta' in talk: continue\n text = talk['text']\n if text.startswith('['):\n continue\n yield (text, show_id, id_s, id_t)", "def infotodict(seqinfo):\n\n t1 = create_key('anat/sub-{subject}_run-{item:02d}_T1w')\n rest_fmri_ap = create_key('func/sub-{subject}_dir-ap_task-rest_run-{item:02d}_bold')\n rest_topup_ap = create_key('func/sub-{subject}_dir-ap_run-{item:02d}_bold')\n rest_topup_pa = create_key('func/sub-{subject}_dir-pa_run-{item:02d}_bold')\n fmap_rest_magnitude1 = create_key('fmap/sub-{subject}_run-{item:02d}_magnitude1')\n fmap_rest_phasediff = create_key('fmap/sub-{subject}_run-{item:02d}_phasediff')\n\n # Create an empty dictionary called info for each key\n\n info = {t1: [],\n rest_fmri_ap: [],\n rest_topup_ap: [],\n rest_topup_pa: [],\n fmap_rest_magnitude1: [],\n fmap_rest_phasediff: [],\n }\n\n # Loop over each sequence. Use if statements to determine which sequences should be linked to which key\n\n for idx, s in enumerate(seqinfo):\n\n if (('MPRAGE_GRAPPA2' in s.series_id) and\n ('tfl3d1_16ns' in s.sequence_name) and\n (s.dim3 == 192) and\n (s.dim4 == 1)):\n info[t1] = [s.series_id]\n\n if (('BOLD_resting 4X4X4 A>>P' in s.series_id) and\n ('epfid2d1_64' in s.sequence_name) and\n (s.dim3 == 35) and\n (s.dim4 == 190)):\n info[rest_fmri_ap] = [s.series_id]\n\n if (('rest_topup_A>>P' in s.series_id) and\n ('epse2d1_64' in s.sequence_name) and\n (s.dim3 == 140) and\n (s.dim4 == 1)):\n info[rest_topup_ap] = [s.series_id]\n\n if (('rest_topup_P>>A' in s.series_id) and\n ('epse2d1_64' in s.sequence_name) and\n (s.dim3 == 140) and\n (s.dim4 == 1)):\n info[rest_topup_pa] = [s.series_id]\n\n if (('Field_mapping 4X4X4 A>>P' in s.series_id) and\n ('fm2d2r' in s.sequence_name) and\n (s.dim3 == 35) and\n (s.dim4 == 1) and\n (s.TE == 4.92)):\n info[fmap_rest_magnitude1] = [s.series_id]\n\n if (('Field_mapping 4X4X4 A>>P' in s.series_id) and\n ('fm2d2r' in s.sequence_name) and\n (s.dim3 == 35) and\n (s.dim4 == 1) and\n (s.TE == 7.38)):\n info[fmap_rest_phasediff] = [s.series_id]\n\n return info", "def printable_item(item):\n printable = {}\n printable['validation'] = item['validation']\n printable['name'] = item['name']\n printable['optional'] = item['optional']\n if 'define' in item:\n printable['define'] = item['define']\n return printable", "def test_format_of_generated_msgs():\n for msg in it.islice(generate_msgs(), 0, 5):\n message = Message.parse(msg) # checks the json keys have the right names\n assert type(message.timestamp) is datetime\n assert type(message.power) is int", "def get_items(data, requisites, formatted):\n returndata = \"\"\n traits = requisites['trait']\n allergens = requisites['allergens']\n\n if formatted:\n prefix = '\\t'\n suffix = '\\n'\n else:\n prefix = ''\n suffix = ', '\n\n for course in data['menu']['meal']['course']:\n item_data = []\n datatype = type(course['menuitem'])\n\n if datatype is list:\n item_data += course['menuitem']\n else:\n item_data.append(course['menuitem'])\n\n for item in item_data:\n if check_item_specifications(item, traits, allergens) and 'No Service at this Time' not in item['name']:\n returndata += (prefix + (item['name']).rstrip(', ') + suffix)\n\n return returndata", "def test_load_jsonlines():\n\n res_file_name = 'test_fooofgroup_res'\n\n for data in load_jsonlines(res_file_name, TEST_DATA_PATH):\n assert data", "def get_shot_items(self, context: bpy.types.Context) -> List[Tuple[str, str, str]]:\n result = []\n self._ensure_shot_data(context)\n sequences: Dict[str, List[Shot]] = defaultdict(list)\n for shot in self.shots:\n if not shot.is_valid():\n continue\n sequences[shot.sequence_code].append(shot)\n\n sorted_sequences = sorted(sequences.keys())\n for sequence in sorted_sequences:\n result.append((\"\", sequence, sequence))\n for shot in sorted(sequences[sequence], key=lambda x: x.name):\n result.append((shot.name, self.__format_shot_name(\n shot), shot.name))\n\n return result", "def parse_menu_items(menu_items: List[dict]) -> List[str]:\n\n def parse_menu_item(menu_item: dict) -> str:\n text = menu_item.get(\"text\").strip()\n\n allergens = menu_item.get(\"allergens\")\n allergen_emojis = []\n if allergens:\n for allergen in allergens:\n allergen_key = allergen.get(\"slug\")\n emoji = ALLERGEN_MAP.get(allergen_key)\n if emoji:\n allergen_emojis.append(emoji)\n\n return f'{text} {\" \".join(allergen_emojis)}'.strip()\n\n handlers = {\n \"menu_title\": lambda text: f\"\\n*{text}*\",\n \"menu_description\": lambda text: f\"*{text}*\",\n \"menu_item\": parse_menu_item,\n }\n\n parsed_items = []\n\n for item in menu_items:\n # acf_fc_layout conveniently tells us how to format each row\n item_type = item.get(\"acf_fc_layout\")\n item_text = item.get(item_type)\n parsed_item = prettify(handlers[item_type](item_text))\n parsed_items.append(parsed_item)\n\n return parsed_items", "def data_load(json_data: Dict) -> Tuple[AuxEffects, str]:\n auxleds = AuxEffects()\n warning = \"\"\n for led in json_data.get(\"LedGroups\", []):\n try:\n ledgroup = LedGroup(**led)\n ledgroup.Name = str(ledgroup.Name)\n ledgroup.Leds = [str(led) for led in ledgroup.Leds]\n auxleds.LedGroups.append(ledgroup)\n LedGroup.verify_length(led)\n except Exception:\n warning += LedGroup.creation_error(led, sys.exc_info()[1].args[0])\n for sequencer in json_data.get(\"Sequencers\", []):\n try:\n name, group, sequence = sequencer.values()\n auxleds.Sequencers.append(Sequencer(Name=str(name), Group=str(group)))\n except Exception:\n warning += Sequencer.creation_error(sequencer, sys.exc_info()[1].args[0])\n\n for step in sequencer.get(\"Sequence\", []):\n current_sequence = auxleds.Sequencers[-1].Sequence\n if \"Repeat\" not in step:\n try:\n new_step = Step(**step)\n new_step.Name = str(new_step.Name)\n current_sequence.append(new_step)\n except Exception:\n warning += Step.creation_error(step, sys.exc_info()[1].args[0])\n else:\n try:\n new_repeat = Repeater(**step['Repeat'])\n new_repeat.StartingFrom = str(new_repeat.StartingFrom)\n current_sequence.append(new_repeat)\n except Exception:\n warning += Repeater.creation_error(step, sys.exc_info()[1].args[0])\n auxleds.Sequencers[-1].remove_duplicates()\n return auxleds, warning", "def extract_movies(dom):\n\n # extract data per movie\n movies = dom.find_all('div', class_ = 'lister-item mode-advanced')\n\n # list to store scraped data\n movielist = []\n\n for movie in movies:\n\n # append extracted data to this dict\n moviedict = {}\n\n # scrape titles and add to dict\n moviedict['title'] = movie.h3.a.text\n\n # scrape ratings and add to dict\n moviedict['rating'] = float(movie.strong.text)\n\n # scrape year of release and add to dict\n year = movie.h3.find('span', class_ = 'lister-item-year text-muted unbold')\n moviedict['year'] = re.findall('\\d+', year.text.strip('()'))[0]\n\n # scrape actors and add to dict\n actors = movie.find_all(href=re.compile(\"adv_li_st\"))\n actorlist = []\n for actor in actors:\n actorlist.append(actor.text)\n actorstring = ', '.join(actorlist)\n moviedict['actors'] = actorstring\n\n # scrape runtime and add to dict\n moviedict['runtime'] = movie.p.find('span', class_ = 'runtime').text.split(' ')[0]\n movielist.append(moviedict)\n\n\n # ADD YOUR CODE HERE TO EXTRACT THE ABOVE INFORMATION ABOUT THE\n # HIGHEST RATED MOVIES\n # NOTE: FOR THIS EXERCISE YOU ARE ALLOWED (BUT NOT REQUIRED) TO IGNORE\n # UNICODE CHARACTERS AND SIMPLY LEAVE THEM OUT OF THE OUTPUT.\n\n return movielist # REPLACE THIS LINE AS WELL IF APPROPRIATE" ]
[ "0.5740542", "0.56621575", "0.5524638", "0.5478715", "0.54087716", "0.53677016", "0.53536004", "0.52064437", "0.51765895", "0.5169679", "0.51359755", "0.51294976", "0.5126739", "0.5117502", "0.51156247", "0.511032", "0.49845767", "0.49424937", "0.49424642", "0.49300858", "0.48953164", "0.48901466", "0.48895273", "0.4884608", "0.4816748", "0.48146847", "0.48141533", "0.48104206", "0.48055044", "0.48018357", "0.47936383", "0.47929245", "0.47897688", "0.4782784", "0.4781078", "0.4752238", "0.47443053", "0.47442904", "0.47345287", "0.47331196", "0.47281495", "0.47277862", "0.47258022", "0.4694716", "0.46922803", "0.4692115", "0.4691755", "0.46907014", "0.4688784", "0.46774563", "0.4673838", "0.4655309", "0.46536347", "0.46428013", "0.46362406", "0.46279526", "0.46271127", "0.4622944", "0.46109527", "0.46065423", "0.4605658", "0.45966816", "0.45942962", "0.45908818", "0.45853543", "0.45840937", "0.45817304", "0.45778358", "0.45765597", "0.4571795", "0.4571031", "0.4570772", "0.45686162", "0.45626962", "0.45566404", "0.45554602", "0.455539", "0.45533255", "0.4550383", "0.45500904", "0.4543565", "0.45429704", "0.45347905", "0.45199108", "0.4514316", "0.44992656", "0.44969615", "0.44941097", "0.44885668", "0.44881514", "0.44827315", "0.4478914", "0.44784385", "0.44718412", "0.4470968", "0.44665524", "0.4463642", "0.446243", "0.446075", "0.44568416" ]
0.64365894
0
Return a list of all live Python objects, not including the list itself.
def get_all_objects(): gc.collect() gcl = gc.get_objects() olist = [] seen = {} # Just in case: seen[id(gcl)] = None seen[id(olist)] = None seen[id(seen)] = None # _getr does the real work. _getr(gcl, olist, seen) return olist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_objects():\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist", "def get_all_objects():\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist", "def _get_all_tracked_objects(self):\n all = []\n for obj in gc.get_objects():\n if any([mod.is_module_object(obj) for mod in self.tracked_modules]):\n all.append(TrackedObject(obj))\n return all", "def obj_list(self):\n return self._obj_list", "def objects (self):\n return InternalObjectList (self)", "def objects(self):\n\t\treturn self._objects", "def getinstances(cls):\n\t\t\tdead = set()\n\t\t\tfor ref in cls._instances:\n\t\t\t\tobj = ref()\n\t\t\t\tif obj is not None:\n\t\t\t\t\tyield obj\n\t\t\t\telse:\n\t\t\t\t\tdead.add(ref)\n\t\t\tcls._instances -= dead", "def all_objects(self) -> List[StorageObject]:\n return [item for item in self._store.values()]", "def get_downstream_objects(obj):\n # gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n # seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr([obj], olist, seen)\n return olist", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def objects(self):\r\n return self._objects", "def all(self):\n return (self.__objects)", "def get_all_cached_instances(cls):\n return list(cls.__dbclass__.__instance_cache__.values())", "def get_objects(self):\n return self._objects", "def list(self):\n return self.cell.objects+self.cell.tempObjects", "def list(self) -> list:\n return list(self)", "def all(self):\n return list(self)", "def get_greenlets(cls):\n return { obj for obj in gc.get_objects() if isinstance(obj, greenlet) and not obj.dead }", "def hbObjects(self):\r\n return self.__hbObjs", "def list(self):\n return self._observe_list", "def list_objects(self, path):\n return [x for x in self.list_objects_generator(path)]", "def zombies(self):\r\n # replace with an actual generator\r\n return (zombie for zombie in self._zombie_list)", "def get_leaks(self):\n _run_garbage_collection()\n\n remaining_objects = self._get_all_tracked_objects()\n remaining_objects = self._remove_initial_objects_from_list(remaining_objects)\n\n return remaining_objects", "def GetObjects(self): \r\n return self.model.GetObjects()", "def watch_list(self) -> list:\n return []", "def dataObjects(self):\n\t\treturn self._objects", "def get_all_refobjs(self, ):\n return cmds.ls(type=\"jb_reftrack\")", "def list(self):\r\n return List(self)", "def list(self):\r\n return List(self)", "def list(self):\r\n return List(self)", "def getListOfAllInstantiatedElements(self):\n return _libsbml.Submodel_getListOfAllInstantiatedElements(self)", "def objects(self):", "def get_object (self) :\n\n # object is a weak_ref, and may have been garbage collected - we simply\n # return 'None' then\n return self._object ()", "def active_objects(self):\n return self._active_objects", "def list_objects(remote):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_ListObjects()\n remote.runCommand(cmd1)\n objects = mmapi.vectori()\n cmd1.GetSceneCommandResult_ListObjects(key1, objects)\n return vectori_to_list(objects)", "def fetch_all(self):\n return list(iter(self))", "def get(self):\r\n\t\treturn list(self)", "def get_mem(self) -> list:\n return self.__mem", "def _get_embedded_objects(self):\n return [getattr(self, name) for name, field in self._get_fields().items() if isinstance(field, fields.Object)]", "def list():", "def list():", "def zombies(self):\n return (_ for _ in self._zombie_list)", "def list(cls):\n return [cls.__dict__.get(name) for name in dir(cls) if (\n not callable(getattr(cls, name)) and not name.startswith(\"_\")\n )]", "def rtsobjects():\n pass", "def getAll(self):\n return self.__lst", "def iter_context_objects(self):\n use_gevent = is_gevent_enabled()\n use_context = is_context_enabled()\n\n if use_context:\n tid = context_get_ident()\n elif use_gevent:\n tid = greenlet_get_ident()\n else:\n tid = thread_get_ident()\n\n objects = self._cache.get(tid)\n if objects is None:\n if len(self._cache) > _MAX_CONTEXT_OBJECT_CACHE:\n self._cache.clear()\n objects = self._global[:]\n objects.extend(getattr(self._thread_context, \"stack\", ()))\n\n if use_gevent:\n objects.extend(getattr(self._greenlet_context, \"stack\", ()))\n\n if use_context:\n objects.extend(self._context_stack.get([]))\n\n objects.sort(reverse=True)\n objects = [x[1] for x in objects]\n self._cache[tid] = objects\n return iter(objects)", "def objects_rst(self):\n return [_.as_rst for _ in self.objects]", "def getActiveObjects(doc):\n lst = list()\n op = doc.GetFirstObject()\n while op:\n if op.GetBit(c4d.BIT_ACTIVE) == True: \n lst.append(op)\n op = Helpers.getHNext(op)\n return lst", "def list() -> List:\n pass", "def zombies(self):\n return (zombie for zombie in self._zombie_list)", "def all(cls):\n return []", "def list(self):\n return self._get_list()", "def objects_in_use(self):\n return set()", "def list_instances(self):\n nodes = self._driver.list_nodes()\n return [[n.name, n.state, n.public_ips] for n in nodes]", "def list(self):", "def get_list(self):\n a = []\n l = self\n while l.is_block():\n a.append(l.field(0))\n l = l.field(1)\n return a", "def get_pyweakrefs(obj: typing.Any) -> list[ReferenceType]:\r\n seq = _reference_id_registry.get(id(obj), [])\r\n return [seq[0] for item in seq]", "def object_here(obj=None): #py:object_here\n if obj is not None:\n ans = RUR._object_here_(obj)\n else:\n ans = RUR._object_here_()\n return list(ans) # convert from JS list-like object to proper Python list", "def __iter__(self):\n return (x for x in vars(self))", "def objects():\n subclasses = StorableObject.descendants()\n return {subclass.__name__: subclass for subclass in subclasses\n if not subclass.__module__.startswith(\n 'openpathsampling.experimental.storage'\n )}", "def get_all(self):\n return [_ for _ in self]", "def to_list(self):\n _return = []\n pointer = self.first\n while pointer is not None:\n _return.append(pointer.data)\n pointer = pointer.next\n return _return", "def dump_objects():\n pass", "def managed_objects(self):\n return self._managed_object_list", "def objects(self):\n if not self._objects:\n id_set = {}\n for x in self.addition_events():\n if 'id' in x: id_set[UUID(x['id'])] = 1\n self._objects = id_set.keys()\n\n return self._objects", "def list_instances(self):\n # list instances\n self._list_instances()", "def getinstancelist():\n dbcursor_dict.execute(dbq.get_all_instance_list, )\n db_instance_list = dbcursor_dict.fetchall()\n return db_instance_list", "def _unpickle_cached_list(cls, *args, **kwargs):\n new_list = cls(*args, **kwargs)\n new_list._unpack = True\n return new_list", "def collect_all(self) -> list:\n raise NotImplementedError()", "def getList(self):\n\treturn self.list", "def get_all_object_classes(cls) -> Dict[str, Type[objects.BaseObject]]:\n cls._refresh_registry()\n return copy.deepcopy(cls.objects_dict)", "def iter(self):\n return []", "def _get_subobjects(self) -> Iterable[SymbolicObject]:\n\n return self._subobjects", "def query_all(cls)->List:\n database.cursor.execute(\"SELECT * FROM {}\".format(cls.table_name))\n items = database.cursor.fetchall()\n return [cls.to_object(item) for item in items]", "def all(self):\n return list(self.iterator())", "def get_all_objects(self, table_name: str) -> list:\n print(f\"INFO Get all objects from table: {table_name}\")\n\n if not self.mydb:\n print(f\"INFO Conn ong is empty. Opening new connection...\")\n self.open_connection()\n\n mycursor = self.mydb.cursor()\n\n mycursor.execute(f\"SELECT * FROM {table_name}\")\n\n myresult = mycursor.fetchall()\n\n res = []\n for x in myresult:\n res.append(x)\n print(f\"INFO Got items: {len(res)}\")\n return res", "def recall_objects(self):\n statement = self.ask(\"neo\", \"sql_statement\")\n conn = sqlite3.connect('neo_test.db')\n\n cursor = conn.cursor()\n\n cursor.execute(statement)\n\n self.short_term_memory = cursor.fetchall()\n conn.close()", "def getList(self):\n pass", "def __iter__(self):\n return iter(vars(self.obj))", "def list():\n\n return cache.codeTableList()", "def getFrameList(self):\n with self.frameLock:\n return list(self.frameList)", "def get_all(self):\n\n return self._items[:]", "def all(self):\n return FileStorage.__objects", "def all(self):\n return FileStorage.__objects", "def all(self):\n return FileStorage.__objects", "def _list(self):\n raise NotImplementedError", "def list_refs(self):\n pass", "def get_class_list(self):\n t = []\n for cls in self.classes:\n if not self.is_opaque(cls.classobj):\n t.append(cls)\n elif cls.parents or cls.childs:\n t.append(cls)\n \n return t", "async def as_list(self):\n l = []\n async for item in self:\n l.append(item)\n return l", "def list() -> Iterator['Wall']:\n pass", "def all(self):\n return self[:]", "def ole_objects(self):\n return self.container['ole_objects']", "def get_non_inheriting_objects(self):\n return get_non_inheriting_objects(self)", "def getEntireASGList( self ):\r\n return self.__trackASG.keys()", "def dump_objects(self):\n #print 'Object Count: ', self.object_store.len()\n \n for item in self.object_store:\n print 'Object Name: ', item.__dict__['Name'], ' LocalID: ', item.__dict__['LocalID']" ]
[ "0.736436", "0.729643", "0.6785282", "0.6629329", "0.65524584", "0.6406", "0.6404724", "0.63662046", "0.6298751", "0.62828684", "0.62828684", "0.62828684", "0.62828684", "0.62828684", "0.62828684", "0.626183", "0.6255329", "0.6242195", "0.61547345", "0.6152571", "0.6122982", "0.6087435", "0.6062149", "0.6041642", "0.6034878", "0.60320884", "0.5966505", "0.5945022", "0.5939678", "0.5932139", "0.5890966", "0.58778864", "0.58620405", "0.58620405", "0.58620405", "0.58594996", "0.5853227", "0.5849852", "0.5827", "0.58219624", "0.58153903", "0.5813434", "0.58132106", "0.5789247", "0.5787983", "0.5787983", "0.57859606", "0.57778066", "0.5755923", "0.5753423", "0.573601", "0.57293206", "0.5722558", "0.571294", "0.5693527", "0.56932133", "0.56861526", "0.5685928", "0.5677742", "0.56744176", "0.56656563", "0.5653068", "0.56408864", "0.5636291", "0.5630084", "0.56252307", "0.56233555", "0.56026584", "0.55965334", "0.55950373", "0.5594602", "0.5587549", "0.5587212", "0.55803365", "0.55719936", "0.5562401", "0.5558042", "0.55539006", "0.55391455", "0.5536995", "0.55241853", "0.5521396", "0.55138123", "0.551199", "0.5505615", "0.5501971", "0.5483231", "0.54825854", "0.54825854", "0.54825854", "0.5479929", "0.5477785", "0.5476038", "0.5470062", "0.5464833", "0.5463903", "0.54621625", "0.54517996", "0.54489696", "0.5446637" ]
0.7565201
0
Convert um to m.
def convert_units(self): for prod in ("ier", "ier_inc_rain"): self.data[prod].data[:] /= 1e6
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def NM2m(NM):\n return NM * 1852", "def m_to_mm(meters):\n return meters * 1000.0", "def nm2m(self):\n return self._nm2m", "def m_to_mm(): \n # Set blender unit in mm\n bpy.context.scene.unit_settings.scale_length = 0.001\n bpy.context.scene.unit_settings.length_unit = 'MILLIMETERS'", "def mm_to_m(millimeters):\n return millimeters / 1000.0", "def km2m(km):\n return km * 1000", "def human_m(v):\n if v < 1e-2:\n return (v*1.0e3, 'mm')\n if v < 1:\n return (v*1.0e2, 'cm')\n if v < 1000:\n return (v, 'm')\n return (v/1.0e3, 'km')", "def distance2m(d, unit):\n if unit == UOM_M:\n d_m = d\n elif unit == UOM_KM:\n d_m = d * 1000\n elif unit == UOM_F:\n d_m = d * F_FEET2M\n elif unit == UOM_SM:\n d_m = d * F_SM2M\n elif unit == UOM_NM:\n d_m = d * F_NM2M\n \n return d_m", "def nu_to_M(nu, ecc):\n if ecc > 1:\n F = nu_to_F(nu, ecc)\n M = F_to_M(F, ecc)\n else:\n E = nu_to_E(nu, ecc)\n M = E_to_M(E, ecc)\n return M", "def vm(x):\r\n return str(x) + 'mm'", "def inches_to_mm(inches):\n\tmm=inches*25.4\n\treturn mm", "def inches_to_mm(inches):\n mm = inches * 25.4\n return mm", "def molar_mass_amu():\n return Equivalency([(si.g / si.mol, misc.u)], \"molar_mass_amu\")", "def SM2m(sm):\n return sm * 1609.344", "def mi_to_m(radius):\n return int(float(radius) * 1609.34)", "def yymm(self) -> str:\n if self.is_old_style:\n numeric_part = self.split('/', 1)[1]\n yy = numeric_part[0:2]\n mm = numeric_part[2:4]\n else:\n yy = self[:2]\n mm = self[2:4]\n return f'{yy}{mm}'", "def mm(self):\n return '%02d' % self._month", "def convert_mu_Tau(mu,mef):\n m_el = 9.109e-31 #kg\n meff = m_el*mef #kg\n q = 1.6e-19 #C\n g = q/(meff*mu) # en s-1 (use E=mc2 : [C][V]=[M][L]2[T]2)\n mu_cm = mu/(3e8*1e2)\n return mu_cm", "def __convert_to_mm(self, value, in_devunit=True):\r\n if in_devunit:\r\n value /= self.DEVUNIT_RATIO\r\n return value", "def feet2m(feet):\n return feet * 0.3048", "def to_ms(self, bpm):\n raise NotImplementedError", "def getM(self):\r\n return self.M", "def _parse_unit(unit: str) -> str:\n if unit == 'm_v':\n return 'm'\n return unit", "def M(self):\n return self._properties['M']", "def unit_converter(val, from_u, to_u):\n\tconverter = {'b':0, 'k':1, 'm':2, 'g':3, 't':4}\n\tif converter[from_u] < converter[to_u]:\n\t\tval = float(val)\n\t\tfor _ in range(converter[to_u] - converter[from_u]):\n\t\t\tval = val/1024\n\telse:\n\t\tfor _ in range(converter[from_u] - converter[to_u]):\n\t\t\tval = val * 1024\n\t\t\t\n\treturn val", "def clean_unit(unit):\n return 'M' if unit.lower() == 'month' else unit[0].lower()", "def to_meters(d, d_unit):\n if d_unit == UOM_M:\n dm = d\n elif d_unit == UOM_KM:\n dm = d * 1000\n elif d_unit == UOM_FEET:\n dm = feet2m(d)\n elif d_unit == UOM_SM:\n dm = SM2m(d)\n elif d_unit == UOM_NM:\n dm = NM2m(d)\n return dm", "def uom(self):\n return self.__uom", "def _mol_to_mal(mol):\n mal = []\n for mode_number, mode_occupation in enumerate(mol):\n mal += [mode_number] * mode_occupation\n return tuple(sorted(mal))", "def read_cm(self):\n\n reading = self.read()\n if not reading:\n return None\n\n if reading.unit == \"mm\":\n return reading.value * 10\n if reading.unit == \"in\":\n return reading.value * 2.54\n\n # Unlikely, but future proof.\n raise \"Reading has unknown unit: %s\" % reading.unit", "def galactic_to_MS():\n return MS_MATRIX", "def fnu(self, m):\n return 10**(-0.4*(m -23.9))", "def convert_mass(self, event):\n try:\n #Compare other unit to one unit(kilograms)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Earth masses\": 5.97219e+24, \"Solar masses\": 1.9890000000000002e+30, \"carats\": 0.0002, \"cental\": 45.359237, \"decagrams\": 0.01, \"femtograms\": 1e-18, \"grains\": 6.479891000000001e-05, \"grams\": 0.001, \"hectograms\": 0.1, \"hundredweights\": 50.802345, \"kilograms\": 1.0, \"kilotonnes\": 1000000.0, \"megatonnes\": 1000000000.0, \"micrograms\": 1e-09, \"milligrams\": 1e-06, \"nanograms\": 1e-12, \"ounces(US & UK)\": 0.02835, \"ounces(precious metals)\": 0.031103, \"picograms\": 1e-15, \"pounds(US & UK)\": 0.453592, \"pounds(precious metals)\": 0.373242, \"slugs\": 14.593903, \"stones\": 6.350293, \"tonnes(metric)\": 1000.0, \"tons(UK)\": 1016.046909, \"tons(US)\": 907.18474}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def get_unit(scale):\n scale2unit = { 1e-9: 'nm',\n 1e-6: u'\\N{MICRO SIGN}m', #or hex id (lookup): u'\\u00B5'\n 1e-3: 'mm',\n 0.01: 'cm',\n 0.1:'dm',\n 1:'m',\n 1000:'km',\n # time\n 8.6400e4:'day',\n 3.1536e7:'yr',\n 3.1536e10:'ka',\n 3.1536e13:'Ma',\n #Pressure\n 1e9: 'GPa',\n 1e6: 'MPa',\n }\n return scale2unit[scale]", "def momentToMatrix(m):\n angle = vectorops.norm(m)\n axis = vectorops.div(m,angle)\n return so3.rotation(axis,angle)", "def millify(n):\n if math.isnan(n):\n return n\n millnames = ['', ' K', ' M', ' B', ' T']\n n = float(n)\n millidx = max(0,min(len(millnames)-1,int(math.floor(0 if n == 0 else math.log10(abs(n))/3))))\n return '{:.2f}{}'.format(n / 10**(3 * millidx), millnames[millidx])", "def _momentum_unit(eq):\n mp=1.66e-27; A=2;\n q=1.602e-19; Z=1\n B = np.abs(eq.B0EXP)\n R = eq.R0EXP\n\n mom_unit= Z*q*B*R**2 #pphi=pphi[SI]*pphi_unit\n energy_unit = mp*A/(Z*Z*q*q*R**2*B**2) #E=E[J]*energy_unit\n mu_unit = mp*A/(Z*Z*q*q*R**2*B) #mu=mu[SI]*mu_unit\n return mom_unit, energy_unit, mu_unit", "def Int2Lum(distance_in_pc, cm_or_m='cm'):\n if cm_or_m == 'm':\n return 4 * math.pi * (distance_in_pc * const.parsec_in_m_1) ** 2\n if cm_or_m == 'cm':\n return 4 * math.pi * ( distance_in_pc * const.parsec_in_cm) ** 2", "def get_unc_m(self):\n return self.uncm", "def msqrd_mu_to_enunu(momenta):\n pe = momenta[:, 0]\n pve = momenta[:, 1]\n pvmu = momenta[:, 2]\n\n pmu = np.sum(momenta, axis=1)\n\n return 64.0 * GF**2 * ldot(pe, pvmu) * ldot(pmu, pve)", "def convert_pct_hu_to_umap(pct_file,\n structural_mri_file,\n ute_echo2_file):\n import os\n # Convert pseudoCT in HU to pseudoCT_mmrumap in 10000*cm-1\n upet_w = 0.096\n b = 0.7445\n cmd1 = 'seg_maths %s -thr 0 -sub 1024 -thr 0 -div 1000 -mul %s mmrumap.nii.gz' % (pct_file, str(b))\n os.system(cmd1)\n cmd2 = 'seg_maths %s -thr 0 -sub 1024 -mul -1 -thr 0 -mul -1 -div 1000 -add mmrumap.nii.gz -add 1 ' % pct_file +\\\n '-mul %s -thr 0 -uthr 0.4095 -mul 10000 -scl -range -odt ushort mmrumap.nii.gz' % str(upet_w)\n os.system(cmd2)\n cmd3 = 'reg_aladin -voff -rigOnly -ref %s -flo %s -aff affine2UTE.txt -res unused.nii.gz' %\\\n (ute_echo2_file, structural_mri_file)\n os.system(cmd3)\n cmd4 = 'reg_resample -ref %s -flo mmrumap.nii.gz -trans affine2UTE.txt -pad 0 -res mmrumap.nii.gz' % ute_echo2_file\n os.system(cmd4)\n\n return os.path.abspath('mmrumap.nii.gz')", "def step2mm(step):\n return step / KST101.STEPS_PER_MM", "def to_meme(self):\n motif_id = self.id.replace(\" \", \"_\")\n m = \"MOTIF %s\\n\" % motif_id\n m += \"BL MOTIF %s width=0 seqs=0\\n\"% motif_id\n m += \"letter-probability matrix: alength= 4 w= %s nsites= %s E= 0\\n\" % (len(self), np.sum(self.pfm[0]))\n m +=\"\\n\".join([\"\\t\".join([\"%s\" % x for x in row]) for row in self.pwm])\n return m", "def unit_mo(self):\n return (((self.time_base * 60.0) * 24.0) * 365.0) / 12", "def getMiriadUTStamp() :\n def maybeAddAZero(xS) :\n if (xS < 10) : return '0'+str(xS)\n else : return str(xS)\n calendar = dict()\n calendar['1'] = 'jan'\n calendar['2'] = 'feb'\n calendar['3'] = 'mar'\n calendar['4'] = 'apr'\n calendar['5'] = 'may'\n calendar['6'] = 'jun'\n calendar['7'] = 'jul'\n calendar['8'] = 'aug'\n calendar['9'] = 'sep'\n calendar['10'] = 'oct'\n calendar['11'] = 'nov'\n calendar['12'] = 'dec'\n utStamp = time.gmtime()\n utYear = str(utStamp[0])[2:]\n utMon = str(utStamp[1])\n utMonU = calendar[utMon]\n utDay = maybeAddAZero(utStamp[2])\n utHour = maybeAddAZero(utStamp[3])\n utMin = maybeAddAZero(utStamp[4])\n utSec = maybeAddAZero(utStamp[5])\n return ('%s%s%s:%s:%s:%s' % (utYear,utMonU,utDay,utHour,utMin,utSec) )", "def M_to_nu(M, ecc):\n if ecc > 1:\n F = M_to_F(M, ecc)\n nu = F_to_nu(F, ecc)\n else:\n E = M_to_E(M, ecc)\n nu = E_to_nu(E, ecc)\n return nu", "def get_mass(self):\n return self.m", "def _get_um_dm(cls, data):\n\t\thd = data['High'] - data['Open']\n\t\tdata['um'] = (hd + hd.abs()) / 2\n\t\tld = -data['Low'] - data['Open']\n\t\tdata['dm'] = (ld + ld.abs()) / 2", "def fuel_from_mass(m):\n return (m/3).astype(int) - 2", "def get_M(self):\n return 1.0", "def toisomonth(month):\n mes = {'JAN': '01', 'FEV': '02', 'FEB': '02', 'MAR': '03',\n 'ABR': '04', 'APR': '04', 'MAI': '05', 'MAY': '05',\n 'JUN': '06', 'JUL': '07', 'AGO': '08', 'AUG': '08',\n 'SET': '09', 'SEP': '09', 'OUT': '10', 'OCT': '10',\n 'NOV': '11', 'DEZ': '12', 'DEC': '12'}\n\n try:\n return mes[month.upper()[:3]]\n except KeyError:\n return month", "def convert_to_minutes(s):\r\n m = math.floor(s / 60)\r\n s -= m * 60\r\n return '%dm %ds' % (m, s)", "def ml(milliliters):\n return ul(milliliters*1000)", "def ul(microliters):\n if isinstance(microliters,str) and ':' in microliters:\n return Unit(microliters).to('microliter') \n return Unit(microliters,\"microliter\")", "def m1(self):\n return self.mass[0]", "def pt2m(x):\n return x * pt_size", "def get_molec_uc_to_mg_g(isot_dict):\n if 'conversion_factor_molec_uc_to_gr_gr' in isot_dict.get_dict():\n molec_uc_to_mg_g = isot_dict['conversion_factor_molec_uc_to_gr_gr']\n elif 'conversion_factor_molec_uc_to_mg_g' in isot_dict.get_dict():\n molec_uc_to_mg_g = isot_dict['conversion_factor_molec_uc_to_mg_g']\n return molec_uc_to_mg_g", "def get_m1(self):\n\n pass", "def mm_to_inches(rainfall_in_mm):\r\n rainfall_in_inches = rainfall_in_mm * 0.0393701\r\n return rainfall_in_inches", "def convert_to_minutes(s):\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)", "def _unit_mo(self):\n return (((self.time_base * 60.0) * 24.0) * 365.0) / 12", "def set_unc_m(self, unc):\n self.uncm = unc", "def normalize_m11(x):\n return x / 127.5 - 1", "def mc2ms(mc,eta):\n root = np.sqrt(0.25-eta)\n fraction = (0.5+root) / (0.5-root)\n invfraction = 1/fraction\n\n m2= mc * np.power((1+fraction),0.2) / np.power(fraction,0.6)\n\n m1= mc* np.power(1+invfraction,0.2) / np.power(invfraction,0.6)\n return (m1,m2)", "def _convert_meta(m):\n # Decode Pascal style string with 4 bytes length field\n l = struct.unpack(\"<I\", m[:4])[0]\n return m[4:4+l]", "def convert_uM_to_kJ_per_mole(amount_in_uM: float, experiment_temp: float = 298.15) -> float:\n return convert_uIC50_to_kJ_per_mole(amount_in_uM, experiment_temp=experiment_temp)", "def mu(self):\n return self.mass * G", "def _convert_unit(self, unit):\n if unit in self.units:\n return self.units[unit]\n elif unit in unit_map:\n return unit_map[unit]\n else:\n raise SBMLError('Unit not recognized: ' + str(unit))", "def convertir_interes_efectivo_anula_a_mensual(ea):\n return (1 + ea)**(1/12) - 1", "def convert_eV_kJmol(en_eV):\n return en_eV/kJmol_eV", "def get_U(self):\n if self.U is not None:\n return self.U\n return self.calc_Uiso()", "def convert(self):\n return _libsbml.SBMLUnitsConverter_convert(self)", "def parse_MoMC_output(out):\n for line in out.splitlines():\n temp = str(line.decode())\n if temp.startswith('M'):\n return [int(x)-1 for x in temp[2:].split()]", "def chave_uf_ano_mes_de_lista(elemento):\n data,mm,uf = elemento\n anomes = '-'.join(data.split('-')[:2])\n chave = f'{uf}-{anomes}'\n if float(mm) < 0:\n mm = 0.0\n else:\n mm = float(mm)\n return chave,mm", "def convert_mev_microns(toto):\n hb=1.05458e-34\n ev=1.60218e-19\n c= 3e8\n return 1e6*hb*2*np.pi*c/(toto*1e-3*ev)", "def get_M_as_string(self):\n return '\\n'.join(['M({})={}'.format(p.name, p.M) for p in self.P])", "def convert_eV_kcalmol(en_eV):\n return en_eV/kcalmol_eV", "def convert_kcalmol_kJmol(en_kcalmol):\n return en_kcalmol/kJmol_kcalmol", "def minimum(self):\n return self.cleaning('Mínimo')", "def to_motevo(self):\n m = \"//\\n\"\n m += \"NA {}\\n\".format(self.id)\n m += \"P0\\tA\\tC\\tG\\tT\\n\"\n for i, row in enumerate(self.pfm):\n m += \"{}\\t{}\\n\".format(i, \"\\t\".join([str(int(x)) for x in row]))\n m += \"//\"\n return m", "def get_units(self):\r\n msg = struct.pack('>2B', 56, 14)\r\n response = self.query(msg)\r\n\r\n if response[1] == 2:\r\n units = 'A'\r\n to_nm_multiplier = 1 / 10\r\n elif response[1] == 1:\r\n units = 'nm'\r\n to_nm_multiplier = 1\r\n elif response[1] == 0:\r\n units = 'um'\r\n to_nm_multiplier = 1000\r\n else:\r\n raise ValueError('Units not recognised.')\r\n\r\n # Save results locally too for quick re-use\r\n self._current_units = units\r\n self._current_to_nm_multiplier = to_nm_multiplier\r\n\r\n return units, to_nm_multiplier", "def convert_to_mm_per_day(maps, units='kg m-2 s-1'):\n if units == 'kg m-2 s-1':\n return np.multiply(maps, 86400)\n else:\n raise ValueError('Conversion for units=%s not supported' % units)", "def km_to_mi(r: Number, scale: int = 6) -> float:\n if not isinstance(r, (float, int)):\n raise ValueError(\"Float or integer value expected.\")\n return round(r / KM_PER_MI, scale)", "def mjdToUT(mjd=None, use_metool=True, prec=6):\n if mjd is None:\n mjdsec = getCurrentMJDSec()\n else:\n mjdsec = mjd*86400\n utstring = mjdSecondsToMJDandUT(mjdsec, use_metool, prec=prec)[1]\n return(utstring)", "def get_mom(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.MOM(data)\n if result is None:\n raise IndicatorException\n return result", "def yymm(self) -> str:\n return self.arxiv_id.yymm", "def rpm_to_mps(self, rpm):\n\n mps = rpm / 60 * MuleBot.CIRCUM_M\n return mps", "def parse_unit(self, unitelem) -> Unit:\n u = Unit()\n\n u.unitid = unitelem.attrib['id'].strip()\n div = unitelem.find('{*}divide')\n if div is not None:\n nom = div.find('{*}unitNumerator').find('{*}measure')\n denom = div.find('{*}unitDenominator').find('{*}measure')\n u.nom = re.sub('.*:', '', nom.text).lower()\n u.denom = re.sub('.*:', '', denom.text).lower()\n else:\n m = unitelem.find('{*}measure')\n u.nom = re.sub('.*:', '', m.text).lower()\n\n return u", "def convertDistance(self, mm):\n\t\treturn mm/(self.microstep)", "def doconvert(self):\n if self.amt < 0:\n raise ValueError('Amount must be a positive number')\n conv = (self.amt * self.getuval(self.ufrom)) / self.getuval(self.uto)\n return conv", "def mapMaufromByte(self, date, bytes):\n sMonth = date.strftime(self.config.MONTH_FORMAT)\n reKey = self.config.dau_keys_conf['mau'].format(month=sMonth)\n redis_cli = self.get_redis_cli()\n logging.debug('Save mau from bytes: %s' % reKey)\n redis_cli.set(reKey, bytes)", "def Mi(m):\n return 700 * (numpy.exp(m/1127.0) - 1)", "def u2fkn( self , u ):", "def dpi_to_dpmm(dpi):\n return dpi / 25.4", "def mass(self, element):\n return self.m(element)", "def mtms_str(self):\n return self.machine_type + '-' + self.model + '*' + self.serial", "def typej_to_mv(degc):\n tab1 = [\n 0.000000000000E+00,\n 0.503811878150E-01,\n 0.304758369300E-04,\n -0.856810657200E-07,\n 0.132281952950E-09,\n -0.170529583370E-12,\n 0.209480906970E-15,\n -0.125383953360E-18,\n 0.156317256970E-22,\n ]\n\n tab2 = [\n 0.296456256810E+03,\n -0.149761277860E+01,\n 0.317871039240E-02,\n -0.318476867010E-05,\n 0.157208190040E-08,\n -0.306913690560E-12,\n ]\n\n if -210 <= degc <= 760:\n c = tab1\n elif 760 < degc <= 1200:\n c = tab2\n else:\n raise ValueError(\"Temperature specified is out of range for Type J thermocouple\")\n\n e = 0\n for p in range(0, len(c)):\n e += c[p] * math.pow(degc, p)\n return e", "def unit_of_measurement(self) -> str:\n return MS", "def month(self) -> int:\n if self.is_old_style:\n return int(self.split('/', 1)[1][2:4])\n return int(self[2:4])", "def M(self):\n return self._M", "def mu(self):\n return self.generic_getter(get_chemical_potential, \"mu\", \"convert_energy\")" ]
[ "0.69458026", "0.68633324", "0.68530375", "0.66572696", "0.65932775", "0.65664166", "0.6551626", "0.65338874", "0.63150936", "0.62804407", "0.6278391", "0.62339175", "0.62174183", "0.6206542", "0.61768055", "0.61204433", "0.6057365", "0.60438204", "0.59712124", "0.5944185", "0.5846866", "0.5737418", "0.5711883", "0.5696782", "0.56335825", "0.5610527", "0.56012136", "0.5571705", "0.55555147", "0.5539175", "0.5519546", "0.5509795", "0.54948735", "0.54407245", "0.5409868", "0.5400411", "0.53842455", "0.5362379", "0.53496766", "0.53481096", "0.53429794", "0.5330505", "0.5330012", "0.53260386", "0.53191185", "0.5304124", "0.5298795", "0.5288271", "0.52665764", "0.52540594", "0.5253771", "0.5249397", "0.5246295", "0.5243921", "0.5234951", "0.52296805", "0.5229649", "0.52144444", "0.5212847", "0.52006406", "0.5193662", "0.51914454", "0.51873404", "0.51849306", "0.51847154", "0.5177225", "0.51693356", "0.5159887", "0.51534456", "0.513596", "0.51295435", "0.512737", "0.5127118", "0.51234704", "0.51222426", "0.51172316", "0.51101065", "0.5106071", "0.5100784", "0.50910395", "0.50842416", "0.50822425", "0.5081694", "0.5071371", "0.5064281", "0.506288", "0.5062781", "0.5059399", "0.50589293", "0.50561696", "0.5054212", "0.50453645", "0.50427544", "0.50412154", "0.5040799", "0.5040326", "0.50229216", "0.5018844", "0.50180835", "0.5013931", "0.50138044" ]
0.0
-1
Adds 8% tax to a restaurant bill.
def tax(bill): bill *= 1.08 print "With tax: %.2f" % bill return bill
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_tax(self,tax):\n return self.price + (self.price * tax)", "def tax(bill):\r\n bill *= 1.08\r\n print(\"With tax: %f\" % bill)\r\n return bill", "def tax(bill):\n bill *= 1.08\n print \"With tax: %f\" % bill\n return bill", "def tax(bill):\n bill *= 1.08\n print \"With tax: %f\" % bill\n return bill", "def _compute_tax(self):\n for line in self:\n line.tax = (line.amount_untaxed * 14) / 100", "def tax(subtotal, discount):\n return (subtotal - discount) * 0.12", "def calculate_tax(subtotal):\n return \"TAX: \"+format_usd(0.0875*subtotal)", "def tax(rate, total):\n taxes = rate * total\n return taxes", "def tax_rate(self) -> float:\n return round((self.total / self.income) * 100, 2)", "def tax_calculator(tax, cost):\n return float(tax * cost)", "def pay_tax(self):\n\t\t# the money comes from nowhere, settlers seem to have an infinite amount of money.\n\t\t# see http://wiki.unknown-horizons.org/index.php/DD/Economy/Settler_taxing\n\t\thappiness_tax_modifier = (float(self.happiness)-50)/200 + 1\n\t\ttaxes = self.tax_base * happiness_tax_modifier * self.inhabitants * self.settlement.tax_setting\n\t\ttaxes = int(round(taxes))\n\t\tself.settlement.owner.inventory.alter(RES.GOLD_ID, taxes)\n\t\tself.last_tax_payed = taxes\n\n\t\t# decrease happiness\n\t\thappiness_decrease = taxes + self.tax_base + ((self.settlement.tax_setting-1)*10)\n\t\thappiness_decrease = int(round(happiness_decrease))\n\t\tself.inventory.alter(RES.HAPPINESS_ID, -happiness_decrease)\n\t\tself._changed()\n\t\tself.log.debug(\"%s: pays %s taxes, -happy: %s new happiness: %s\", self, taxes, \\\n\t\t\t\t\t\t\t\t\t happiness_decrease, self.happiness)", "def tax(self):\n\n self.x = self.a\n self.set_zn(self.x)", "def base_tax_amount(self, base_tax_amount):\n\n self._base_tax_amount = base_tax_amount", "def base_price_incl_tax(self, base_price_incl_tax):\n\n self._base_price_incl_tax = base_price_incl_tax", "def price_incl_tax(self, price_incl_tax):\n\n self._price_incl_tax = price_incl_tax", "def tax_amount(self, tax_amount):\n\n self._tax_amount = tax_amount", "def tax_amount(self, tax_amount):\n\n self._tax_amount = tax_amount", "def tax_rate(self, income):\n if income <= 1500:\n rate = 0.03\n elif income <= 4500:\n rate = 0.1\n elif income <= 9000:\n rate = 0.2\n elif income <= 35000:\n rate = 0.25\n elif income <= 55000:\n rate = 0.3\n elif income <= 80000:\n rate = 0.35\n else:\n rate = 0.45\n return rate", "def calculate_total_price(total, taxes):\n total_price = total + taxes\n return total_price", "def get_total(self):\n\n base_price = 5\n total = (1 + int(self.tax)) * int(self.qty) * base_price\n\n return total", "def test_tax_age_bracket_45(self):\n net_pay_age = elijah.total_calc_tax()\n self.assertEqual(91, net_pay_age)", "def get_total(self):\n\n base_price=5\n if self.species == \"Christmas\":\n base_price=1.5*base_price\n \n total = (1 + self.tax) * self.qty * base_price\n\n if self.order_type==\"international\" and self.qty<10:\n total+=3\n\n return total", "def base_row_total_incl_tax(self, base_row_total_incl_tax):\n\n self._base_row_total_incl_tax = base_row_total_incl_tax", "def base_currency_tax_amount(self, base_currency_tax_amount):\n\n self._base_currency_tax_amount = base_currency_tax_amount", "def test_add_taxation_strategy_to_rate_plan(self):\n pass", "def withholding_tax_rate(self, withholding_tax_rate):\n\n self._withholding_tax_rate = withholding_tax_rate", "def apply_tax(order_obj):\n tax_rule = taxes.get()\n all_credits = order_obj.credits\n other_credit = filter(lambda x: x[\"coll_name\"] != taxes.TaxRule.coll_name(), all_credits)\n\n if tax_rule is not None:\n order_obj.credits = other_credit + [{\n \"obj_id\": tax_rule._id,\n \"coll_name\": taxes.TaxRule.coll_name(),\n \"amount\": taxes.amount(tax_rule, order_obj),\n }]\n else:\n order_obj.credits = other_credit", "def total_cost_w_tax(tax_rate, state, cost_amount):\n state.upper()\n default_tax_rate = 0.05\n if state == 'CA':\n total_cost = (cost_amount * .07) + cost_amount\n elif tax_rate != 0.07 or tax_rate != 0.05:\n total_cost = (cost_amount * tax_rate) + cost_amount\n else:\n total_cost = (cost_amount * default_tax_rate) + cost_amount\n return total_cost, state.upper()", "def withholding_tax_amount(self, withholding_tax_amount):\n\n self._withholding_tax_amount = withholding_tax_amount", "def base_tax_amount(self):\n return self._base_tax_amount", "def get_total(self):\n\n base_price = 5\n \n if self.species == \"Christmas melon\":\n base_price = base_price * 1.5 \n\n total = (1 + self.tax) * self.qty * base_price \n\n if self.order_type == \"international\" and self.qty>10:\n total += 3\n\n\n return total", "def base_currency_withholding_tax_amount(self, base_currency_withholding_tax_amount):\n\n self._base_currency_withholding_tax_amount = base_currency_withholding_tax_amount", "def update_taxes(origin_matrix, tax_percent_fees, tax_percent_interest):\n new_taxes = np.zeros(len(origin_matrix[0]))\n fees_paid = origin_matrix[FEES_IDX]\n interest_paid_arr = origin_matrix[INTEREST_PAID_IDX]\n taxes_on_fee = fees_paid * tax_percent_fees\n taxes_on_interest = interest_paid_arr * tax_percent_interest\n new_taxes = taxes_on_fee + taxes_on_interest\n return new_taxes", "def tip(bill):\r\n bill *= 1.15\r\n print(\"With tip: %f\" % bill)\r\n return bill", "def calculateSingleTax(monthlyIncome):\n pass", "def total(self) -> float:\n\n remained_to_be_taxed = self.income\n # taxed = list()\n self.tax_amounts = []\n start_tax_range = 0\n end_tax_range = self.bracket\n\n for i, b in enumerate(self.bracket):\n\n amount_to_tax = b.end - start_tax_range\n t = Taxed(min(amount_to_tax, remained_to_be_taxed), b.rate,\n min(amount_to_tax, remained_to_be_taxed) * b.rate)\n self.tax_amounts.append(t)\n # print(i, start_t ax_range, b.end, amount_to_tax, b.rate)\n\n remained_to_be_taxed -= amount_to_tax\n # print(remained_to_be_taxed)\n\n if b.end > self.income:\n break\n\n start_tax_range = b.end\n\n # print(taxed)\n return sum([t.tax for t in self.tax_amounts])", "def row_total_incl_tax(self, row_total_incl_tax):\n\n self._row_total_incl_tax = row_total_incl_tax", "def taxes(self) -> float:\n return self.total", "def tip(bill):\n bill *= 1.15\n print \"With tip: %f\" % bill\n return bill", "def tip(bill):\n bill *= 1.15\n print \"With tip: %f\" % bill\n return bill", "def tip(bill):\n bill *= 1.15\n print \"With tip: %f\" % bill\n return bill", "def _compute_amount(self, base_amount, price_unit, quantity=1.0, product=None, partner=None):\n\n self.ensure_one()\n\n if self.amount_type != 'margin':\n return super(AccountTax, self)._compute_amount(\n base_amount,\n price_unit,\n quantity=quantity,\n product=product,\n partner=partner\n )\n\n return base_amount - (base_amount / (1 + self.amount / 100))", "def tip(bill):\n bill *= 1.15\n print \"With tip: %.2f\" % bill\n return bill", "def basket_total_incl_tax(self):\n return self.total_incl_tax - self.shipping_incl_tax - self.surcharge_incl_tax", "def base_price_incl_tax(self):\n return self._base_price_incl_tax", "def amount(self):\n return self.subtotal + self.tax_subtotal + self.shipping", "def tax_amount(self):\n return self._tax_amount", "def calculate_tax_for_new_regime(income, **kwargs):\n tax_amount = 0\n init_income = income\n\n slab_breaks = sorted(new_tax_regime.keys(), reverse=True)\n\n for upper_tax_slab in slab_breaks:\n if income > upper_tax_slab:\n curr_tax = new_tax_regime[upper_tax_slab] * (income - upper_tax_slab)\n tax_amount += curr_tax\n print(\"Tax slab: {}\\tAmount in the slab: {}\\tTax amount: {}\".format(upper_tax_slab, (income - upper_tax_slab), curr_tax))\n income = upper_tax_slab\n\n if init_income <= 500000:\n rebate = -1 * tax_amount\n print(\"Rebate: {}\".format(rebate))\n tax_amount += rebate\n\n health_ed_cess = 0.04 * tax_amount # 4% health and education cess on the tax amount\n print(\"Cess: {}\".format(health_ed_cess))\n tax_amount += health_ed_cess\n\n return tax_amount", "def get_quote_taxation(self):\n if ProductInfo.taxation:\n total, discount = self.get_total_quote_price(), self.get_quote_discount()\n return (total - discount) * 0.09\n else:\n return 0", "def total_discount_incl_tax(self):\n discount = D(\"0.00\")\n for line in self.lines.all():\n discount += line.discount_incl_tax\n return discount", "def test_tax_age_bracket_65(self):\n net_pay_age = elijah.total_calc_tax()\n self.assertNotEqual(95, net_pay_age)", "def update_totals(self, commit=False):\n sub_total = 0.0\n tax = 0.0\n for item in self.invoice_items:\n sub_total += float(item.total if item.total else 0)\n tax += float(item.tax if item.tax else 0)\n\n self.tax = tax\n self.sub_total = sub_total\n self.total = float(self.tax) + float(self.sub_total)\n\n if commit:\n db.session.add(self)\n db.session.commit()\n return True", "def calculateMarriedTax(husbandIncome, wifeIncome):\n pass", "def set_total_amount_with_taxes(self, amount):\n self.set_value_into_input_field(self.total_amount_with_taxes_textbox_locator, amount, True)", "def addTaxon(self, taxon_id, genopart_id, genopart_category=None):\n self.graph.addTriple(genopart_id, self.globaltt['in taxon'], taxon_id)", "def add_rent_to_balance(amount: float):\n\n date_cell, description_cell, payment_received_cell, rent_due_cell = find_empty_cell()\n\n try:\n worksheet.update_value((date_cell.row, date_cell.col), current_date)\n worksheet.update_value((description_cell.row, description_cell.col), \"Rent Due\")\n worksheet.update_value((rent_due_cell.row, rent_due_cell.col), amount)\n\n except pygsheets.exceptions.PyGsheetsException:\n return \"Your update was not successful. Please try again.\"\n else:\n return f\"${amount:.2f} was added to the balance of the account.\"", "def get_total(self):\n\n self.base_price = self.get_base_price()\n\n if self.species == \"christmas melon\":\n self.base_price = self.base_price * 1.5\n\n total = (1 + self.tax) * self.qty * self.base_price\n return total", "def calc_tax(state: str, income: float, federal_tax: float = 10.0):\n states_taxes = {\n \"AL\": (\"Alabama\", 5),\n \"AK\": (\"Alaska\", 3),\n \"FL\": (\"Florida\", 4),\n \"IL\": (\"Illinois\", 8),\n }\n\n state = state.upper()\n\n if state not in states_taxes:\n raise AssertionError(\"Taxes calculation of '{}' is not available. List: {}\"\n .format(state, states_taxes.keys()))\n\n net = income - (income * federal_tax / 100)\n print(\"Net after Federal Taxes:\", net)\n\n tax_to_deduct = net * states_taxes[state][1] / 100.0\n net = net - tax_to_deduct\n print(\"Net after {} Taxes: {}\".format(states_taxes[state][0], net))", "def base_row_total_incl_tax(self):\n return self._base_row_total_incl_tax", "def tax_override(self, tax_override):\n\n self._tax_override = tax_override", "def save_tax(request):\n if request.method == \"POST\":\n initial_data, data = process_request(request)\n return render(request,\n \"invoice/invoice_create.html\",\n {\n \"form\": InvoiceForm,\n \"stage\": \"3\",\n \"prev_data\": data,\n \"initial_data\": initial_data\n })", "def add_shipping_rate(self, country, shipping_company_id, description, base_rate, addon_rate, product_ids):\n self.login()\n self.form_request('shipping/create/', data={\n 'country': country,\n 'shipping_company': shipping_company_id,\n 'description': description,\n 'base_rate': \"%.2f\" % base_rate,\n 'addon_rate': \"%.2f\" % addon_rate,\n 'products': product_ids,\n })", "def calculate_tip(meal_base, tip_rate):", "def gold_card(subtotal):\n return subtotal * 0.05", "def get_total(self):\n\n base_price = self.get_base_price()\n\n if self.species == \"Christmas\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def get_total(self):\n\n base_price = self.get_base_price()\n if self.species == \"christmas melon\":\n base_price = base_price * 1.5\n\n total = ((1 + self.tax) * self.qty * base_price)\n\n return total", "def withdraw(self, currency, amount, address):\n pass", "def tax(self, tax_dict: dict):\n if tax_dict:\n self._tax = SalesTransactionTaxModel(**tax_dict)\n else:\n self._tax = None", "def calculate_tax(taxinfo):\n if isinstance(taxinfo, int):\n raise ValueError('Invalid input of type int not allowed')\n people = taxinfo.keys()\n for peo in people:\n if isinstance(taxinfo[peo], str):\n raise ValueError('Allow only numeric input')\n earning = taxinfo[peo]\n if earning <= 1000:\n taxinfo[peo] = (earning * 0)\n elif earning in range(1001, 10001):\n taxinfo[peo] = (earning - 10000) * 0.1\n elif earning in range(10001, 20201):\n tax1 = 1000 * 0\n tax2 = 9000 * 0.1\n tax3 = (earning - 10000) * 0.15\n taxinfo[peo] = (tax1 + tax2 + tax3)\n elif earning in range(20201, 30750):\n tax1 = 1000 * 0\n tax2 = 9000 * 0.1\n tax3 = 10200 * 0.15\n tax4 = (earning - 20200) * 0.20\n taxinfo[peo] = (tax1 + tax2 + tax3 + tax4)\n elif earning in range(30751, 50001):\n tax1 = 1000 * 0\n tax2 = 9000 * 0.1\n tax3 = 10200 * 0.15\n tax4 = (30750 - 20200) * 0.20\n tax5 = (earning - 30750) * 0.25\n taxinfo[peo] = (tax1 + tax2 + tax3 + tax4 + tax5)\n elif earning >= 50001:\n tax1 = 1000 * 0\n tax2 = 9000 * 0.1\n tax3 = 10200 * 0.15\n tax4 = (30750 - 20200) * 0.20\n tax5 = (50000 - 30750) * 0.25\n tax6 = (earning - 50000) * 0.30\n taxinfo[peo] = (tax1 + tax2 + tax3 + tax4 + tax5 + tax6)\n return taxinfo", "def calculate_price(self):\n\n cargo_weight = self.cargo.weight\n tax_rate = Decimal(0.18)\n\n untaxed_total = Decimal(cargo_weight) * Decimal(self.price_per_unit_weight)\n\n total_price = (untaxed_total * tax_rate) + untaxed_total\n\n return total_price", "def tax_id(self, tax_id):\n\n self._tax_id = tax_id", "def price_incl_tax(self):\n return self._price_incl_tax", "def total_item_cost(state, cost_before_tax, tax = .05):\n\n\tif state == \"CA\":\n\t\ttax = .07\n\t\n\ttotal_cost = cost_before_tax + (cost_before_tax * tax)\n\n\treturn total_cost", "def AttendanceRewardExcelAddRewardAmount(builder, RewardAmount):\n return AddRewardAmount(builder, RewardAmount)", "def pay(self, amt: float):\n self._money += amt", "def get_total(self):\n # method on the class DomesticMelonOrder\n base_price = 5\n\n if self.species == \"Christmas melons\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def addMoney(self, deposit_amount):\r\n self.balance_amt = self.balance_amt + deposit_amount", "def add_discount(self, bill):\n\n discounts_queryset = Discount.objects.prefetch_related('product')\n\n total_discount = 0\n\n for discount in discounts_queryset:\n discount_products = discount.product.all()\n if self.order.product in discount_products:\n bill['discounts'].append({'discount_title': discount.title,\n 'discount_size': discount.size})\n\n total_discount += discount.size\n if total_discount > 100:\n total_discount = 100\n\n bill['total'] = bill['total'] - bill['total'] / 100 * total_discount\n\n return bill", "def calculate_meal_costs(meal_base, tax_rate, tip_rate):\n tax_value = calculate_rate(meal_base, tax_rate)\n meal_with_tax = tax_value + meal_base\n tip_value = calculate_rate(meal_with_tax, tip_rate)\n total = meal_with_tax + tip_value\n meal_info = dict(meal_base=meal_base,\n tax_rate=tax_rate,\n tip_value=tip_value,\n tax_value=tax_value,\n total = total)\n return meal_info", "def count_tax_neuron(self,quantity):\n\t\tassert quantity > 0\n\t\tif quantity == 1:\n\t\t\treturn self.__smallPunishment\n\t\telif quantity == 2:\n\t\t\treturn self.__smallPunishment ** 2\n\t\telse:\n\t\t\treturn self.__alpha*exp(quantity)", "def tax(self) -> SalesTransactionTaxModel:\n return self._tax", "def apply_20percent_shipping_offer(self):\n product_range = Range.objects.create(\n name=\"All products range\", includes_all_products=True\n )\n benefit = Benefit.objects.create(\n range=product_range, type=Benefit.SHIPPING_PERCENTAGE, value=20\n )\n offer = factories.create_offer(product_range=product_range, benefit=benefit)\n Applicator().apply_offers(self.basket, [offer])\n return offer", "def test_tax_age_bracket_above_65(self):\n net_pay_age = elijah.total_calc_tax()\n self.assertNotEqual(97, net_pay_age)", "def withdraw(self, amount):\n self.balance -= amount\n if self.balance < 10:\n self.balance -= 5\n self.fees += 5", "def fee_base(self, fee_base):\n\n self._fee_base = fee_base", "def base_weee_tax_applied_amount(self, base_weee_tax_applied_amount):\n\n self._base_weee_tax_applied_amount = base_weee_tax_applied_amount", "def test_tax_net_pay_45(self):\n net_pay_age = tc.total_calc_tax(100, 45)\n self.assertEqual(91, net_pay_age)", "def give_raise(self,amount=5000):\n self.salary += amount", "def get_total(self):\n\n base_price = self.get_base_price()\n\n # Christmas Melons are more x1.5 expensive than other melons\n if self.species == \"Christmas Melon\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def _compute_amount(self):\n for line in self:\n price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n new_price = price\n if line.lot_id and line.product_id.tracking in ['lot','serial']:\n lot_id = self.env['stock.production.lot'].search([('name', '=', line.lot_id), ('product_id', '=', line.product_id.id)])\n if lot_id.tax_ids.filtered(lambda tax: tax.amount_type == 'based_on_margin'):\n if lot_id.cost_price:\n new_price -= lot_id.cost_price\n sh_tax = line.tax_id.filtered(lambda tax: tax.amount_type =='based_on_margin').compute_all(new_price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n taxes = line.tax_id.filtered(lambda tax: tax.amount_type !='based_on_margin').compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n print(taxes)\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])) + sum(t.get('amount', 0.0) for t in sh_tax.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': taxes['total_excluded'],\n })\n if self.env.context.get('import_file', False) and not self.env.user.user_has_groups('account.group_account_manager'):\n line.tax_id.invalidate_cache(['invoice_repartition_line_ids'], [line.tax_id.id])", "def test_tax_net_pay_65(self):\n net_pay_age = tc.total_calc_tax(100, 65)\n self.assertEqual(95, net_pay_age)", "def add_fuel(self, amount):\n if (self.fuel_level + amount <= self.fuel_capacity):\n self.fuel_level += amount\n print(\"Added fuel to \"+ self.make + \".\")\n else:\n print(\"The tank won't hold that much.\")", "def deposit(self, amount, budget):\r\n if budget != \"Total Balance\":\r\n assert budget in self.budgets, \"Specified budget doesn't exist\"\r\n self.budgets[budget] += float(amount)\r\n self.balance += float(amount)", "def base_weee_tax_applied_amount(self):\n return self._base_weee_tax_applied_amount", "def calculate_taxes(self, proforma, technologies):\n tax_calcs = copy.deepcopy(proforma)\n # 1) Redistribute capital cost according to the DER's MACRS value to get depreciation\n for der_inst in technologies:\n tax_contribution = der_inst.tax_contribution(self.macrs_depreciation,\n tax_calcs.index, self.start_year)\n if tax_contribution is not None:\n tax_calcs = pd.concat([tax_calcs, tax_contribution], axis=1)\n # 2) calculate yearly_net (taking into account the taxable contribution of each technology\n # asset)\n yearly_net = tax_calcs.sum(axis=1)\n tax_calcs['Taxable Yearly Net'] = yearly_net\n\n # 3) Calculate State tax based on the net cash flows in each year\n tax_calcs['State Tax Burden'] = yearly_net * -self.state_tax_rate\n\n # 4) Calculate Federal tax based on the net cash flow in each year minus State taxes\n # from that year\n yearly_net_post_state_tax = yearly_net + tax_calcs['State Tax Burden']\n tax_calcs['Federal Tax Burden'] = yearly_net_post_state_tax * -self.federal_tax_rate\n\n # 5) Add the overall tax burden (= state tax + federal tax) to proforma\n tax_calcs['Overall Tax Burden'] = tax_calcs['State Tax Burden'] + tax_calcs['Federal Tax Burden']\n proforma['State Tax Burden'] = tax_calcs['State Tax Burden']\n proforma['Federal Tax Burden'] = tax_calcs['Federal Tax Burden']\n proforma['Overall Tax Burden'] = tax_calcs['Overall Tax Burden']\n self.tax_calculations = tax_calcs\n return proforma", "def give_raise(self, amount=5000):\n self.salary += amount", "def VoiceCommonExcelAddRate(builder, Rate):\n return AddRate(builder, Rate)", "def test_client_tax_information_update(self):\n pass", "def tax_id(self, tax_id: str):\n\n self._tax_id = tax_id", "def calculateMarriedTax(husbandIncome, wifeIncome):\r\n pass\r\n a = husbandIncome + wifeIncome\r\n if a == 0:\r\n return 0 \r\n elif 1 <= a <= 19050:\r\n return 10\r\n elif 19051 <= a <= 77400:\r\n return 12\r\n elif 77401 <= a <= 165000:\r\n return 22\r\n elif 165001 <= a <= 315000:\r\n return 24\r\n elif 315001 <= a <= 400000:\r\n return 32\r\n elif 400001 <= a <= 600000:\r\n return 35\r\n else:\r\n return 37" ]
[ "0.7644522", "0.758284", "0.7505996", "0.7505996", "0.7157527", "0.7034859", "0.70066726", "0.69643134", "0.65901643", "0.65687346", "0.64239335", "0.6421261", "0.6313586", "0.62876344", "0.62859863", "0.6272636", "0.6272636", "0.6179959", "0.605302", "0.60120994", "0.59760493", "0.5955941", "0.5920652", "0.59197927", "0.5902741", "0.58887225", "0.58672494", "0.5866327", "0.58464783", "0.58368725", "0.58253485", "0.5813147", "0.58048445", "0.57370096", "0.57168406", "0.567852", "0.5677728", "0.56704175", "0.56432956", "0.56432956", "0.56432956", "0.56358653", "0.56065273", "0.5600792", "0.55880606", "0.5585525", "0.5573022", "0.5559816", "0.5557035", "0.55524415", "0.5527604", "0.55035806", "0.55008", "0.549054", "0.54501384", "0.5448096", "0.544271", "0.5431974", "0.5419146", "0.5415887", "0.5410491", "0.54090905", "0.5383982", "0.5381096", "0.5368887", "0.536475", "0.535666", "0.53558725", "0.5353785", "0.53292614", "0.53221166", "0.53145915", "0.5300605", "0.52715296", "0.5262153", "0.5261056", "0.5251301", "0.5241198", "0.5199526", "0.51898575", "0.5187223", "0.51863307", "0.51834804", "0.51830745", "0.51736915", "0.51640785", "0.5149678", "0.51400685", "0.513263", "0.512784", "0.5127346", "0.51247644", "0.51235574", "0.5109936", "0.5109854", "0.5096292", "0.50925404", "0.5089245", "0.50861865", "0.5085994" ]
0.7485274
4
Adds 15% tip to a restaurant bill.
def tip(bill): bill *= 1.15 print "With tip: %.2f" % bill return bill
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tip(bill):\r\n bill *= 1.15\r\n print(\"With tip: %f\" % bill)\r\n return bill", "def tip(bill):\n bill *= 1.15\n print \"With tip: %f\" % bill\n return bill", "def tip(bill):\n bill *= 1.15\n print \"With tip: %f\" % bill\n return bill", "def tip(bill):\n bill *= 1.15\n print \"With tip: %f\" % bill\n return bill", "def calculate_tip(meal_base, tip_rate):", "def tip_calulator(total, people, tip):\n tip = tip / 100\n total = total / people\n tip_amount = total * tip\n new_total = total + tip_amount\n\n return tip_amount, new_total\n # pass", "def add_tax(self,tax):\n return self.price + (self.price * tax)", "def tax(bill):\r\n bill *= 1.08\r\n print(\"With tax: %f\" % bill)\r\n return bill", "def calc_tip(bill, pct):\n tip = bill * (pct * .01) # convert pct to a decimal and calculate\n tip = round(tip, 2) # round the tip to 2 decimal places\n return tip", "def tax(bill):\n bill *= 1.08\n print \"With tax: %f\" % bill\n return bill", "def tax(bill):\n bill *= 1.08\n print \"With tax: %f\" % bill\n return bill", "def tax(bill):\n bill *= 1.08\n print \"With tax: %.2f\" % bill\n return bill", "def bill_call(self, call: Call) -> None:\n duration = ceil(call.duration / 60.0)\n self.bill.add_billed_minutes(duration)\n self.balance += (PREPAID_MINS_COST * duration)", "def tax(subtotal, discount):\n return (subtotal - discount) * 0.12", "def calculate_tax(subtotal):\n return \"TAX: \"+format_usd(0.0875*subtotal)", "def gold_card(subtotal):\n return subtotal * 0.05", "def tax(rate, total):\n taxes = rate * total\n return taxes", "def _compute_tax(self):\n for line in self:\n line.tax = (line.amount_untaxed * 14) / 100", "def apply_20percent_shipping_offer(self):\n product_range = Range.objects.create(\n name=\"All products range\", includes_all_products=True\n )\n benefit = Benefit.objects.create(\n range=product_range, type=Benefit.SHIPPING_PERCENTAGE, value=20\n )\n offer = factories.create_offer(product_range=product_range, benefit=benefit)\n Applicator().apply_offers(self.basket, [offer])\n return offer", "def test_tax_age_bracket_45(self):\n net_pay_age = elijah.total_calc_tax()\n self.assertEqual(91, net_pay_age)", "def withdraw(self, amount):\n self.balance -= amount\n if self.balance < 10:\n self.balance -= 5\n self.fees += 5", "def lot_leverage(self): \n return 20", "def use(self):\n if self.credit < self.price_of_trip:\n print(\"Your credit is not enough, please increase your credit\")\n else:\n self.credit -= self.price_of_trip\n print(\"Done\")", "def addTN(self, num=1):\n self.tn += num", "def updatefrom155(self, newballot, pctnumber, ballotstyle):\n self._foundin155 = True\n if '*' == newballot:\n self._votescast155 += 1\n self._votescast155bypct[pctnumber] += 1\n\n if pctnumber < '0750':\n self._notabsentee = True\n else:\n self._absentee = True\n\n self._pctnumbers.add(pctnumber)\n self._ballotstyles.add(ballotstyle)", "def get_total(self):\n\n base_price=5\n if self.species == \"Christmas\":\n base_price=1.5*base_price\n \n total = (1 + self.tax) * self.qty * base_price\n\n if self.order_type==\"international\" and self.qty<10:\n total+=3\n\n return total", "def addMoney(self, deposit_amount):\r\n self.balance_amt = self.balance_amt + deposit_amount", "def handle_tip_request(author, body, comment):\n\n recipient = get_tip_recipient(comment)\n amount = parse_tip_amount(body=body, botname=helper.botname)\n\n if recipient is None or amount is None:\n reply = \"Nothing interesting happens.\\n\\n*In case you were trying to tip, I didn't understand you.*\"\n elif Decimal(amount) < Decimal(0.0001):\n reply = helper.get_below_threshold_message()\n else:\n tipper_logger.log(f'{author} is sending {recipient} {amount} XMR.')\n generate_wallet_if_doesnt_exist(recipient.lower())\n\n res = tip(sender=author, recipient=recipient, amount=amount)\n\n reply = f'{res[\"response\"]}'\n tipper_logger.log(\"The response is: \" + reply)\n\n if res[\"message\"] is not None:\n helper.praw.redditor(author).message(subject=\"Your tip\", message=f\"Regarding your tip here: {comment.context}\\n\\n\" + res[\"message\"] + get_signature())\n\n helper.praw.comment(str(comment)).reply(reply + get_signature())", "def get_total(self):\n\n base_price = 5\n \n if self.species == \"Christmas melon\":\n base_price = base_price * 1.5 \n\n total = (1 + self.tax) * self.qty * base_price \n\n if self.order_type == \"international\" and self.qty>10:\n total += 3\n\n\n return total", "def get_total(self):\n\n base_price = 5\n total = (1 + int(self.tax)) * int(self.qty) * base_price\n\n return total", "def bill_call(self, call: Call) -> None:\n self.bill.add_billed_minutes(ceil(call.duration / 60.0))", "def silver_card(subtotal):\n return subtotal * 0.02", "def test_add_taxation_strategy_to_rate_plan(self):\n pass", "def bill_call(self, call: Call) -> None:\n duration = ceil(call.duration / 60.0)\n if self._carried_term is False:\n if (self.bill.free_min + duration) <= TERM_MINS:\n self.bill.add_free_minutes(duration)\n else:\n self.bill.add_free_minutes(\n TERM_MINS - self.bill.free_min)\n self.bill.add_billed_minutes(\n self.bill.free_min + duration - TERM_MINS)\n else:\n self.bill.add_billed_minutes(duration)", "def get_total(self):\n\n total = super().get_total()\n if self.qty < 10:\n total += 3.00\n return total", "def buy_cost(self, buy_price, count):\n fee = 20 if math.floor(count*buy_price*1000*self.fee_count*self.handling_fee) <= 20 else math.ceil(count*buy_price*1000*self.fee_count*self.handling_fee)\n return int(buy_price*1000*count+fee)", "def add_tip(self, tip_length=None, at_start=False):\n tip = self.create_tip(tip_length, at_start)\n self.reset_endpoints_based_on_tip(tip, at_start)\n self.asign_tip_attr(tip, at_start)\n self.add(tip)\n return self", "def withdraw(self, amount):\n self.withdrw = amount\n \n if (self.balance-self.withdrw) < 0:\n self.balance = self.balance - 5 - self.withdrw\n self.fee += 5\n else:\n self.balance -= self.withdrw", "def update_total(self):\r\n likes = 0.00\r\n\r\n if self.get_hamburger.get():\r\n likes += 1.35\r\n\r\n if self.get_cheeseburger.get():\r\n likes += 1.59\r\n\r\n if self.get_bacon.get():\r\n likes += 1.79\r\n\r\n if self.get_drink.get():\r\n likes += 0.49\r\n \r\n self.result_txt.delete(0.0, END)\r\n self.result_txt.insert(0.0, likes)", "def tax(self):\n\n self.x = self.a\n self.set_zn(self.x)", "def deposit(self, deposit_money):\r\n self.balance += deposit_money", "def give_raise(self, amount=5000):\n self.salary += amount", "def tax_calculator(tax, cost):\n return float(tax * cost)", "def deposit(self, amount):\n self.balance += amount\n self.transactions.append((\"Deposit\", amount))\n print \"Your new balance is $%d.\" % self.balance", "def give_raise(self,amount=5000):\n self.salary += amount", "def show_results(bill, tip, pct):\n \n total = tip + bill\n\n print(\"Bill amount: $\" + str(bill))\n print(\"Tip percentage: \" + str(pct) + \"%\")\n print(\"Tip amount due: $\" + str(tip))\n print(\"Total with tip: $\" + str(total))\n\n print(\"\"\"\n-----------------------------------\n GOOD BYE \n-----------------------------------\n\"\"\")", "def addTP(self, num=1):\n self.tp += num", "def GachaCraftNodeExcelAddTier(builder, Tier):\n return AddTier(builder, Tier)", "def update_total_price():\n tk_total_price.set('Total: {0:>6}'.format(str(total_price)))\n print(total_price)", "def add_cash(self, num):\r\n self.cash += num", "def revenue(tips, n_players, price_per_player):\n\n total_revenue = tips + (n_players * price_per_player)\n return total_revenue", "def add_rent_to_balance(amount: float):\n\n date_cell, description_cell, payment_received_cell, rent_due_cell = find_empty_cell()\n\n try:\n worksheet.update_value((date_cell.row, date_cell.col), current_date)\n worksheet.update_value((description_cell.row, description_cell.col), \"Rent Due\")\n worksheet.update_value((rent_due_cell.row, rent_due_cell.col), amount)\n\n except pygsheets.exceptions.PyGsheetsException:\n return \"Your update was not successful. Please try again.\"\n else:\n return f\"${amount:.2f} was added to the balance of the account.\"", "def do_fee(self,args):\n totalamount,fee,howmanyto,nexttier = bitstamp.fee_schedule()\n print \"Your 30 day volume is: %.5f. Your trade fee is: %.2f%%\" % (totalamount,fee)\n print \"You are $%s away from the next tier of: $%s\" % (howmanyto,nexttier)", "def addToolTip(self, tip: str):\n self.setToolTip(tip)\n self.setFont(qtawesome.font('fa', 13))\n self.setText(self.text() + ' ' + chr(0xf059))", "def calculate_total_price(total, taxes):\n total_price = total + taxes\n return total_price", "def get_total(self):\n total = super().get_total()\n\n if self.qty < 10:\n total += 3\n\n return total", "def add(self, amount):\n self.amount += amount", "def main(price, service, vat):\n service = (price * 10)/100\n if service < 50:\n service = 50\n elif service > 1000:\n service = 1000\n price += service\n vat = (price * 7)/100\n price += vat\n print(\"%.2f\" % (price))", "def add_fuel(self, amount):\n if (self.fuel_level + amount <= self.fuel_capacity):\n self.fuel_level += amount\n print(\"Added fuel to \"+ self.make + \".\")\n else:\n print(\"The tank won't hold that much.\")", "def withdraw(self, currency, amount, address):\n pass", "def get_total(self):\n\n total = super(InternationalMelonOrder, self).get_total()\n if self.qty < 10:\n total = total + 3\n return total", "def use(self):\n if self.price_of_trip == 0:\n print(\"Sorry your card has been used\")\n else:\n self.price_of_trip -= self.price_of_trip\n print(\"Done\")", "def update_balance(self, multiplier: int) -> int:\n self.user.balance += DEFAULT_BET * multiplier\n return self.user.balance", "def pay_tax(self):\n\t\t# the money comes from nowhere, settlers seem to have an infinite amount of money.\n\t\t# see http://wiki.unknown-horizons.org/index.php/DD/Economy/Settler_taxing\n\t\thappiness_tax_modifier = (float(self.happiness)-50)/200 + 1\n\t\ttaxes = self.tax_base * happiness_tax_modifier * self.inhabitants * self.settlement.tax_setting\n\t\ttaxes = int(round(taxes))\n\t\tself.settlement.owner.inventory.alter(RES.GOLD_ID, taxes)\n\t\tself.last_tax_payed = taxes\n\n\t\t# decrease happiness\n\t\thappiness_decrease = taxes + self.tax_base + ((self.settlement.tax_setting-1)*10)\n\t\thappiness_decrease = int(round(happiness_decrease))\n\t\tself.inventory.alter(RES.HAPPINESS_ID, -happiness_decrease)\n\t\tself._changed()\n\t\tself.log.debug(\"%s: pays %s taxes, -happy: %s new happiness: %s\", self, taxes, \\\n\t\t\t\t\t\t\t\t\t happiness_decrease, self.happiness)", "def increment_amount(self, add_amount=1):\n new_amount = self.amount + add_amount\n if new_amount < self.min:\n new_amount = self.min\n if new_amount > self.max:\n new_amount = self.max\n self.amount = new_amount\n self.build_bar()", "def set_prix(self, annonce):\n p = annonce.find_element_by_class_name(\"price\")\n self.prix = p.text", "async def admin_credit(self, ctx, target: discord.Member, sum: int = 100):\n if is_registered(target.id):\n \n inventories = get_file(\"inventories\")\n inventories[str(target.id)][\"balance\"] += sum\n update_file(\"inventories\", inventories)\n\n embed = discord.Embed(color=admin_color)\n embed.set_author(name=\"🛠️ Admin\")\n embed.add_field(name=\"💰 Credit\",\n value=f\"{ctx.author.mention}, {target.mention} a été crédité de `{sum}` PO (pièces d'or)\")\n embed = set_footer(embed, ctx)\n await ctx.send(embed=embed)", "def total_item_cost(state, cost_before_tax, tax = .05):\n\n\tif state == \"CA\":\n\t\ttax = .07\n\t\n\ttotal_cost = cost_before_tax + (cost_before_tax * tax)\n\n\treturn total_cost", "def deposit(self, amount):\n self.balance += amount", "def deposit(self, amount):\n self.balance += amount", "def addToPot(self, amount, index):\n\t\tself.pots[index] = self.pots[index] + amount", "def AddTE(self, te):\n self._totalte += te", "def amount(self):\n return self.subtotal + self.tax_subtotal + self.shipping", "def luk_plus_five(unit: ActiveUnit) -> None:\n unit.mod_luk += 5", "def update_total(self):\n self.objects[self.ids.AMOUNT].setText(\"Total Spend: \\xA3%.2f\" % (self.owner.total_price() / 100))", "def calculate_bonuses (the_sum_of_current_purchase):\n the_sum_of_previous_purchases = 0\n blue_card_percent = 0.05\n silver_card_percent = 0.07\n gold_card_percent = 0.1\n the_sum_of_previous_purchases = the_sum_of_previous_purchases + the_sum_of_current_purchase\n\n if the_sum_of_previous_purchases <1000:\n bonus_for_purchase = 0\n if 1000 <= the_sum_of_previous_purchases <= 15_000:\n bonus_for_purchase = the_sum_of_current_purchase * blue_card_percent\n\n if 15001 <= the_sum_of_previous_purchases < 150_000:\n bonus_for_purchase = the_sum_of_current_purchase * silver_card_percent\n\n if the_sum_of_previous_purchases >= 150_000:\n bonus_for_purchase = the_sum_of_current_purchase * gold_card_percent\n\n return bonus_for_purchase", "def addqty(b, name, fn, *args, **kwargs):\n if b is None or brevity < b:\n with _timed_block(name, formatStr='{:45}', printer=printer, verbosity=2):\n qtys[name] = fn(*args, **kwargs)", "def addqty(b, name, fn, *args, **kwargs):\n if b is None or brevity < b:\n with _timed_block(name, formatStr='{:45}', printer=printer, verbosity=2):\n qtys[name] = fn(*args, **kwargs)", "def test_tax_net_pay_45(self):\n net_pay_age = tc.total_calc_tax(100, 45)\n self.assertEqual(91, net_pay_age)", "def create_deposit_bonus(sender, instance, created, **kwargs):\n if created:\n instance.wallet.value += Decimal(instance.value)\n instance.wallet.save()\n if instance.value >= Decimal('100.00'):\n user = instance.wallet.user\n bonus_wallet = BonusWallet.objects.filter(user=user)\n if not bonus_wallet.exists():\n bonus_wallet = BonusWallet.objects.create(user=user)\n bonus_wallet.save()\n else:\n bonus_wallet = bonus_wallet[0]\n\n deposit_bonus = DepositBonus.objects.create(wallet=bonus_wallet)\n bonus_wallet.value += Decimal(deposit_bonus.value)\n bonus_wallet.save()", "def add_fuel(self, amount):\n if (self.fuel_level + amount\n <= self.fuel_capacity):\n self.fuel_level += amount\n print(\"Added fuel.\")\n else:\n print(\"The tank won't hold that much.\")", "def restock(self):\n self.money = 9999", "def get_total(self):\n\n self.base_price = self.get_base_price()\n\n if self.species == \"christmas melon\":\n self.base_price = self.base_price * 1.5\n\n total = (1 + self.tax) * self.qty * self.base_price\n return total", "def add_to_excess(self) -> None:\n if self.msg.value <= 0:\n revert(\"No amount added to excess\")\n self._treasury_balance.set(self.icx.get_balance(self.address))\n self.FundReceived(self.msg.sender, self.msg.value, f\"{self.msg.value} added to excess\")", "def withdrawMoney(self, withdraw_amount):\r\n self.balance_amt = self.balance_amt - withdraw_amount", "def add_spot_dust(self, tran_id: str, time: int, asset: str, asset_amount: float, bnb_amount: float, bnb_fee: float,\n auto_commit: bool = True):\n\n row = (tran_id, time, asset, asset_amount, bnb_amount, bnb_fee)\n self.add_row(tables.SPOT_DUST_TABLE, row, auto_commit=auto_commit)", "def withdraw(self, amount):\r\n self.balance = self.balance - amount\r\n self.transactions.append(-amount)\r\n return amount", "def earnCoin(self, amount):\n self.coins += amount", "def compute_total(price):\n\n quantity = 20\n return price * quantity", "def get_base_price(self):\n base_price = random.randint(5,9)\n print(base_price)\n\n # see if the order was placed during rush hour\n now = datetime.datetime.now()\n\n dow = now.weekday() # Mon is 0, Sun is 6\n hour = now.hour\n\n if hour >= 8 and hour < 11 and dow >= 0 and dow < 5:\n base_price += 4\n\n return base_price", "def add_discount(self, bill):\n\n discounts_queryset = Discount.objects.prefetch_related('product')\n\n total_discount = 0\n\n for discount in discounts_queryset:\n discount_products = discount.product.all()\n if self.order.product in discount_products:\n bill['discounts'].append({'discount_title': discount.title,\n 'discount_size': discount.size})\n\n total_discount += discount.size\n if total_discount > 100:\n total_discount = 100\n\n bill['total'] = bill['total'] - bill['total'] / 100 * total_discount\n\n return bill", "def short(self, amount):", "def calculate_meal_costs(meal_base, tax_rate, tip_rate):\n tax_value = calculate_rate(meal_base, tax_rate)\n meal_with_tax = tax_value + meal_base\n tip_value = calculate_rate(meal_with_tax, tip_rate)\n total = meal_with_tax + tip_value\n meal_info = dict(meal_base=meal_base,\n tax_rate=tax_rate,\n tip_value=tip_value,\n tax_value=tax_value,\n total = total)\n return meal_info", "def add_to_water_level(self, amount):\n LandCell.add_to_water_level(self, amount)\n if self.water_level > 0:\n self.reset_food_level()", "def add_comment_silver_award(user, kb):\n #section for sliver \n reach_top = MoneyComm.reachCommentTopMoney(user) \n if not reach_top: \n MoneyComm.awardComment(user, kb)", "def update_total(self):\n # the 'or 0' sets order_total as 0 instead of None,\n # preventing an error when calculating delivery_costs\n self.order_total = self.lineitems.aggregate(\n Sum('lineitem_total'))['lineitem_total__sum'] or 0\n if self.order_total < settings.FREE_DELIVERY_THRESHOLD:\n sdp = settings.STANDARD_DELIVERY_PERCENTAGE\n self.delivery_cost = self.order_total * sdp / 100\n else:\n self.delivery_cost = 0\n self.grand_total = self.order_total + self.delivery_cost\n self.save()", "def main():\n \n welcome()\n myBill = get_bill_amt()\n pct = get_tip_pct()\n tip = calc_tip(myBill, pct)\n show_results(myBill, tip, pct)", "def bonus(self, value=0.0, feedback=None):\n self.hit.generate_connection()\n\n self.hit.connection.grant_bonus(\n self.worker_id,\n self.mturk_id,\n bonus_price=boto.mturk.price.Price(amount=value),\n reason=feedback)\n self.update()", "def pay(self, amt: float):\n self._money += amt", "def line_cost(self):\r\n return self.qty * self.unit_cost" ]
[ "0.7132613", "0.70187575", "0.70187575", "0.70187575", "0.6926353", "0.63815933", "0.6208463", "0.6160494", "0.6047981", "0.6044608", "0.6044608", "0.601777", "0.58578914", "0.5761912", "0.5701724", "0.56948024", "0.565889", "0.55814856", "0.5531272", "0.55295736", "0.550463", "0.54616636", "0.54476863", "0.5408935", "0.5390254", "0.5347043", "0.5338784", "0.5316994", "0.5299725", "0.529723", "0.5280277", "0.52411956", "0.52167994", "0.5211925", "0.52106005", "0.51946515", "0.518469", "0.51842475", "0.51822495", "0.5170324", "0.5162772", "0.5159994", "0.5151348", "0.51441234", "0.51440847", "0.5142432", "0.5141496", "0.5135224", "0.51322275", "0.5125968", "0.5125498", "0.5119717", "0.5104651", "0.5070422", "0.5066453", "0.50525546", "0.50496024", "0.50484467", "0.50441843", "0.5043183", "0.50429547", "0.5037659", "0.502868", "0.49951932", "0.4994535", "0.49846682", "0.49775186", "0.49751666", "0.49679336", "0.49679336", "0.4967382", "0.4966576", "0.4960041", "0.49553022", "0.4952399", "0.49473122", "0.49453408", "0.49453408", "0.49442172", "0.49413335", "0.49411994", "0.49386814", "0.4933548", "0.49330807", "0.49182293", "0.49149343", "0.4909925", "0.48918357", "0.4882617", "0.48674694", "0.48664322", "0.4865005", "0.48526958", "0.48517916", "0.48463553", "0.48455325", "0.48421812", "0.4841975", "0.4840283", "0.48401234" ]
0.6996032
4
Returns the square of a number.
def square(n): squared = n**2 print "%d squared is %d." % (n, squared) return squared
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def square(num):\n square = num ** 2\n return square", "def square_number(number: int) -> int:\n return number * number", "def square(num):\n return num * num", "def square(value):\n return value ** 2", "def my_square(x):\n return x ** 2", "def square(n: int) -> int:\n return int(n ** 2)", "def square(n):\n squared = n ** 2\n print \"%d squared is %d.\" % (n, squared)\n return squared", "def square(num1):\n squared = num1*num1\n return squared", "def square(x):\n\n\treturn x * x", "def square(n):\n squared = n**2\n print \"%d squared is %d.\" % (n, squared) # %d is used for decimals instead of %s for strings\n return squared", "def square(x):\n return x**2", "def square( x ):\n return x * x", "def square(n):\r\n squared = n ** 2\r\n print (\"%d squared is %d.\" % (n, squared)) ## გიო: შეცდომას აგდებდა სანამ ფრჩხილებში არ ჩავსვი\r\n return squared", "def square(x):\n return x * x", "def square(x):\n return x*x", "def my_square(y):\n\treturn (y ** 2)", "def square_value(s):\n return s ** 2", "def my_square(y):\n\treturn (y **2)", "def my_square(y):\n\treturn(y ** 2)", "def square(numbers):\n\n # Needs only one argument\n\n return numbers[0] ** 2", "def square(a):\n return a ** 2", "def Sqr(num):\n return math.sqrt(float(num))", "def square(a):\n return(a**2)", "def square(x: float) -> float:\n return x * x", "def squareOfSum(num):\n return sum(range(1, num + 1)) ** 2", "def my_square2(x):\n\treturn(x * x)", "def square2(x):\n return x * x", "def squareroot(number):\n return math.sqrt(number)", "def sqr(x):\n return x ** 2", "def square(n):\r\n try:\r\n assert(type(n) is int)\r\n if n == 1:\r\n return 1\r\n s = square(n - 1) + 2*(n - 1) + 1\r\n return s\r\n except:\r\n return None", "def print_square(num):\n print(\"Square: {}\".format(num * num))", "def print_square(num):\n print(\"Square: {}\".format(num * num))", "def sqr(x):\n return x * x", "def square(x):\n if type(x) not in (int, float, int, complex):\n raise TypeError(\"argument must be a number\")\n\n return x*x", "def square(n):\n\n result = [num * num for num in range(n)]\n\n return result[1:]", "def is_square(n):\r\n m = int(sqrt(n))\r\n return m * m == n", "def squared(x=2):\n return x ** 2", "def my_square(d):asaasasassssssssssssssssssssssssss\n\t return (d ** 3)", "def sqr(a):\n return a * a", "def square_nums(number_list):", "def sq(x):\n\n return x ** x", "def square(original_number):\n running_total = 0\n for counter in range(original_number):\n running_total = running_total + original_number\n return running_total", "def sqrt(number):\n assert number >= 0\n candidates = [x for x in range(number + 1)]\n return sqrtHelper(candidates, 0, len(candidates) - 1, number)", "def my_sqrt(x):\n square_root = x**(0.5)\n return square_root", "def square_of_sum(n):\n return ((n * (n+1)) / 2)**2", "def sqrt(n):\n pass", "def sqrt(number):\n if number < 2:\n return number\n \n left = 0\n right = number\n \n while left<right:\n mid = (left + right) // 2\n square = mid*mid\n \n if square == number:\n return mid\n \n elif square < number:\n left = mid + 1\n \n else:\n right = mid\n \n return left-1", "def sqrt(number):\n number = abs(number)\n bit = 1 << 62 # second to top of 64 bit is 62, 32 bit would be 1 << 30\n result = 0\n\n # Start with the highest power of 4 that is less than the number\n while bit > number:\n bit >>= 2\n\n while bit != 0:\n if number >= result + bit:\n number -= result + bit\n result = (result >> 1) + bit\n else:\n result >>= 1\n bit >>= 2\n\n return result", "def is_square(number): \n s = number * number\n return is_palindrome(s)", "def sqrt(number):\n if number is None or number < 0:\n return None\n \n start = 0\n end = number\n \n while start <= end:\n mid = (start + end) // 2\n square = mid * mid\n if square == number:\n return mid\n \n if square > number:\n end = mid - 1\n elif square < number:\n start = mid + 1\n return start - 1", "def sqrt(number):\n if number == 0 or number == 1:\n return number\n if number < 0:\n return None\n s = 1\n e = number/2\n while s <= e:\n mid = (s+e)//2\n if (mid*mid == number):\n return mid\n if mid*mid < number:\n s = mid+1\n res = mid\n else:\n e = mid - 1\n return res", "def sumOfSquares(num):\n sum = 0\n for i in range(1, num + 1):\n sum += i ** 2\n return sum", "def nearest_square(num):\n\n answer = 0\n while (answer+1)**2 < num:\n answer += 1\n return answer**2", "def is_square(N):\n return N == round(N**(0.5))**2", "def get_squares(n):\n\n return sum([i * i for i in range(n)])", "def sq(self, x):\n\t\treturn x * x", "def do_sqrt(num):\n if num < 0:\n from cmath import sqrt\n return sqrt(num)\n from math import sqrt\n return sqrt(num)", "def square_digits(num):\n nums = ''\n for n in str(num):\n nums += str(int(n)* int(n))\n return int(nums)", "def sum_squares(num):\n sum = 0\n while (num != 0):\n sum += math.pow((num % 10), 2)\n num = num/10\n return int(sum)", "def is_square(x):\n\n if x < 0:\n return False\n if math.pow(int(math.sqrt(x)), 2) == x:\n return True", "def sqrt(number):\n if number < 0 or number is None:\n return None\n elif number == 0:\n return 0\n elif number == 1:\n return 1\n\n previous = 0\n n = number // 2\n\n while True:\n if n * n == number:\n return n\n elif n * n < number:\n return previous - 1\n else:\n previous = n\n n = n // 2", "def _square_rooted(x):\n return sqrt(sum([(a * a) for a in x]))", "def sqrt(number):\n if number==0 or number==1:\n return number\n \n start=1\n end=number\n\n while start<=end:\n mid=(start+end)//2\n if mid*mid==number:\n return mid\n elif mid*mid<number:\n start=mid+1\n sqrt=mid\n else:\n end=mid-1\n \n return sqrt", "def sqrt_recursive(number):\n if number < 0:\n raise ValueError('Value must be greater than 0')\n return _sqrt_recursive(0, number, number)", "def find_square_pr(a,b):\n return a * b", "def sqrt(a):", "def square_factor(a):\n f = a if isinstance(a, dict) else factorint(a)\n return Mul(*[p**(e//2) for p, e in f.items()])", "def sum_of_squares(n):\n result = i = 0\n while i < n:\n result += i\n i += 1\n return result", "def square_digit_sum(number):\n return sum(precomputed_digit_squares[digit] for digit in str(number))", "def square_circumference(a):\n return (4*a)", "def sum_of_squares(n):\n sum = 0\n\n for i in range(0,n):\n sum += i*i\n\n return sum", "def sqrt(x: int):\n pass", "def is_square(n):\n if type(n) is not int:\n raise ValueError(\"Wrong given type, should be integer instead\")\n return n > -1 and math.sqrt(n) == int(math.sqrt(n))", "def squared(num_list):\n new_list=[]\n for num in num_list:\n sq_num=pow(num,2)\n new_list.append(sq_num)\n return new_list", "def double(n):\n return 2 * n", "def sqrt(x):\n return 0.0", "def perfect_square(num: int) -> bool:\n return math.sqrt(num) * math.sqrt(num) == num", "def sqrt_round(num):\n out = math.ceil(math.sqrt(num))\n return out", "def is_square(N):\n if N < 0:\n print(\"N is negative number @is_square in ModulesFactorization.\")\n sys.exit()\n\n sqrt_N=round(math.sqrt(N))\n if N == sqrt_N*sqrt_N:\n return True\n else:\n return False", "def sqrt(number):\n # check for negative inputs\n if number < 0:\n return None\n # square root of 1 and 0 is 1 and 0\n elif number in [1, 0]:\n return number\n\n # initialise upper and lower bound\n high = number\n low = 0\n\n while low < high:\n # mid is the average of high and low\n mid = (high + low) // 2\n # if mid ** 2 is the number, return the mid value\n # OR, if mid ** 2 is smaller than the number and (mid + 1) ** 2 is larger than the number,\n # return the mid number as it's the floor value\n if mid**2 <= number < (mid+1)**2:\n return mid\n # mid is too high, change high var to mid\n elif mid**2 > number:\n high = mid\n # mid is too low, change low var to mid\n else:\n low = mid", "def sum_of_squares(n):\n return (n * (n+1) * (2*n + 1)) / 6", "def sqrt(n, one):\n # Use floating point arithmetic to make an initial guess\n floating_point_precision = 10**16\n n_float = float((n * floating_point_precision) // one) / floating_point_precision\n x = (int(floating_point_precision * math.sqrt(n_float)) * one) // floating_point_precision\n n_one = n * one\n while 1:\n x_old = x\n x = (x + n_one // x) // 2\n if x == x_old:\n break\n return x", "def sqrt(x):\r\n # see decorator for function body\r", "def square(self):\n return self.x * self.x + self.y * self.y", "def square_value(a):\n try:\n out = a*a\n except TypeError:\n raise TypeError(\"Input should be a string:\")\n\n return out", "def generate_square_number(square_limit):\n for i in range(0,square_limit):\n yield i**2", "def square(numbers):\n\n # Needs only one argument\n newlist = []\n for num in numbers:\n newlist.append(num*num)\n return newlist", "def sqrt(number):\n # return number if its 0 or 1 since 0*0=0 and 1*1=1\n if number == 0 or number ==1:\n return number\n #square root negative number is not real number\n if number < 0:\n return None\n\n #using binary search, if s is square root of y that means 0<=s<=y. That is\n #s*s = y. if for a given number x, x*x > y that means square root s should lie\n #between 0<=s<x. if for a given number x, x*x < y that means square root s should lie\n #between x<s<=y\n start = 1\n end = number\n floor_sqrt = 0\n iterate=0\n while(start <= end):\n iterate+=1\n #start with middle elemment\n mid = (start + end )//2\n #check if mid * mid = number, then mid is answer\n if mid * mid == number:\n return mid\n #if mid * mid < number, check between range (mid,end]\n #also maintain mid value since, we have to return floor value\n if mid * mid < number:\n start = mid + 1\n floor_sqrt = mid\n #if mid * mid > number, check between range [start,end)\n else:\n end = mid - 1\n return floor_sqrt", "def mySqrt(self, x: int) -> int:\n if x == 0:\n return 0\n d = 0.1\n y = x / 2\n z = (y + x/y) / 2\n e = abs(z-y)\n while e > d:\n y = z\n z = (y + x/y) / 2\n e = abs(z - y)\n return int(z)", "def squares(s):\n\n \"*** YOUR CODE HERE ***\"\n return [int(x**(1/2)) for x in s if x**(1/2) == round(x**(1/2))]", "def sum_of_squares(x):\r\n return dot(x, x)", "def times2(number):\n\tnumber = number * 2\n\treturn number", "def square(i, j):\n return map(sq_start, [i, j, i + 1, j + 1])", "def summation_i_squared(n):\n if (type(n) is not int) or (n is None) or (n < 1):\n return None\n else:\n numbers = range(1, n + 1)\n result = 0\n result = map(lambda i: i ** 2, numbers)\n return sum(result)", "def is_square(apositiveint):\n x = apositiveint // 2\n seen = set([x])\n while x * x != apositiveint:\n x = (x + (apositiveint // x)) // 2\n if x in seen: return False\n seen.add(x)\n return True", "def squares(s):\n \"*** YOUR CODE HERE ***\"\n result = []\n for num in s:\n sr = round(math.sqrt(num))\n if sr * sr == num:\n result.append(sr)\n return result", "def d(n):\n rt = math.sqrt(n)\n i = 2\n result = 1\n while i < rt:\n if n % i == 0:\n result += i\n result += n // i\n i += 1\n\n # i == rt implies that n is a square number\n if i == rt and n % i == 0:\n result += i\n return result", "def square_norm(x):\n return np.linalg.norm(x) ** 2" ]
[ "0.86342794", "0.8617322", "0.8552498", "0.80486435", "0.80462384", "0.79968894", "0.7947229", "0.79286706", "0.7911237", "0.7883522", "0.78671736", "0.78318316", "0.78140074", "0.78127563", "0.7788222", "0.77676326", "0.7688305", "0.7677109", "0.76735294", "0.763642", "0.76268077", "0.7580315", "0.75691795", "0.74651915", "0.7454181", "0.7434435", "0.73860633", "0.7330594", "0.7270361", "0.725276", "0.72418386", "0.72418386", "0.7181147", "0.7173479", "0.7080809", "0.6984464", "0.69581455", "0.69329464", "0.6926902", "0.6917082", "0.68823344", "0.6857497", "0.6853538", "0.68145514", "0.6811526", "0.67958075", "0.67935467", "0.6779376", "0.67439103", "0.6722509", "0.67167985", "0.6712913", "0.6652715", "0.66508", "0.66478765", "0.6645134", "0.662597", "0.6584951", "0.65812606", "0.6573875", "0.65474355", "0.65438855", "0.6501044", "0.65001273", "0.6499959", "0.6450393", "0.64375937", "0.6435262", "0.64322674", "0.6430152", "0.63782346", "0.6378087", "0.63664174", "0.6340777", "0.6340147", "0.6275641", "0.6272328", "0.62602466", "0.62418354", "0.62315047", "0.6223405", "0.62175363", "0.6206211", "0.6201819", "0.61754274", "0.61718833", "0.6162493", "0.61453944", "0.6109177", "0.6103648", "0.6095594", "0.60897917", "0.60791296", "0.6062002", "0.6047688", "0.60175645", "0.60041803", "0.6000633" ]
0.78975934
11
The constructor for Particle Class
def __init__(self, position=np.array([0,0,0], dtype=float), velocity=np.array([0,0,0], dtype=float), acceleration=np.array([0,0,0], dtype=float), name='A Particle', restMass=1.0, charge=const.elementary_charge): self.name = name self.position = np.array(position, dtype=float) self.velocity = np.array(velocity, dtype=float) self.acceleration = np.array(acceleration, dtype=float) self.restMass = restMass self.charge = charge self.electricField = PointElectricFieldClass(sourceParticle=self , name='Field from %s'%(self.name)) self.magneticField = PointMagneticFieldClass(sourceParticle=self , name='Field from %s'%(self.name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, particles):\n self.particles = particles", "def __init__(self,particle):\n self.par = particle", "def particle(self) -> Particle:\n return Particle()", "def particle(self) -> Particle:\n return Particle()", "def __init__(self, init_pos_1, init_pos_2, M_1, M_2, spring_cos, equi_len):\n self.p1 = Particle(init_pos_1, M_1)\n self.p2 = Particle(init_pos_2, M_2)\n self.k = spring_cos\n self.L0 = equi_len", "def __init__(self,E,px,py,pz):\n Particle.__init__(self)\n self.E=float(E)\n self.px=float(px)\n self.py=float(py)\n self.pz=float(pz)\n self.cal_pt()\n self.cal_phi()\n self.cal_eta()\n #self.cal_mass()\n #print self.E,self.px,self.py,self.pz\n #print self.pt,self.phi,self.eta", "def particle(self) -> Particle:\n return Particle(diameter=self.diameter)", "def __init__(self,mass,time,**kwargs):\n self.setTime(time)\n ParticlePhaseCoordinates.__init__(self,mass,**kwargs)", "def __init__(self, *fname):\n # Atom positions, types and form factor table\n self.atom_pos = None # atom position -> N x 3 array, sorted based on atom type id\n # Index array saving indices that split atom_pos to get pos for each atom type\n # More specifically, let m = split_idx[i] and n = split_idx[i+1], then\n # atom_pos[m:n] contains all atoms for the ith atom type.\n self.split_idx = None\n self.num_atom_types = None # number of atom types\n self.ff_table = None # form factor table -> atom_type x qSample\n\n # Scattering\n self.q_sample = None # q vector sin(theta)/lambda\n self.num_q_samples = None # number of q samples\n # Compton scattering\n self.compton_q_sample = None # Compton: q vector sin(theta)/lambda\n self.num_compton_q_samples = 0 # number of Compton q samples\n self.sBound = None # Compton: static structure factor S(q)\n self.nFree = None # Compton: number of free electrons\n if len(fname) != 0:\n # read from pmi file to get info about radiation damage at a certain time slice\n if len(fname) == 1:\n datasetname = 'data/snp_0000001' # default dataset name -> set to be initial time\n self.read_h5file(fname[0], datasetname)\n elif len(fname) == 2:\n # both pmi file and the time slice (dataset) are provided\n self.read_h5file(fname[0], fname[1])\n else:\n raise ValueError('Wrong number of parameters to construct the particle object!')", "def __init__(self,nparticles,initial_condition):\n self.nparticles = nparticles\n self.particles = np.array([Particle(mass,x,y) for x,y,mass in initial_condition])\n self.mass = np.array([self.particles[i].mass for i in range(len(self.particles))])\n self.position = np.array([self.particles[i].position for i in range(len(self.particles))])\n self.momentum = np.array([self.particles[i].momentum for i in range(len(self.particles))])", "def __init__(self, label, mass, position, velocity, prev_force=[0,0,0], current_force=[0,0,0], prev_pos=[0,0,0], angle_traversed = 0):\n try:\n self.label = str(label)\n self.mass = float(mass)\n self.position = array(position, dtype=np.float64)\n self.prev_pos = array(prev_pos, dtype=np.float64)\n self.velocity = array(velocity, dtype=np.float64)\n self.prev_force = array(prev_force, dtype=np.float64)\n self.current_force = array(current_force, dtype=np.float64)\n self.angle_traversed = float(angle_traversed)\n except:\n raise ValueError('Something went wrong initialising a Particle3D')", "def __init__(self,mass,x=None,p=None,v=None):\n self.setPosition(x)\n self.setMass(mass)\n if p is not None and v is not None:\n raise CoordinateException(\"Initializing a particle can only have momentum or velocity, not both.\")\n elif p is None:\n self.setVelocity(v)\n self.calcMomentumFromVelocity()\n elif v is None:\n self.setMomentum(p)\n self.calcVelocityFromMomentum()", "def __init__(self, T_e, n_e, Z=None, particle=\"p\"):\n self.T_e = T_e\n self.n_e = n_e\n self.particle = particle\n self.Z = _grab_charge(particle, Z)\n # extract mass from particle\n self.ionMass = particle_mass(self.particle)", "def init_particles(self):\n \n # Each particle is a dimension-K vector. We generate each particle \n # uniformly at random from the space [0,1]^K. \n self.Particles = np.random.uniform(0, 1, (self.Npar, self.K))\n #print(\"Particles: \", self.Particles) \n return None", "def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.__iteration_number = kwargs['iteration_number']\n self.__particles = [\n PSOParticle(**kwargs, bit_generator=self._random)\n for _ in range(kwargs['particles'])\n ]\n\n # The library stores particles in the visualizer .... groan\n positions = [particle.position for particle in self.__particles]\n self._visualizer = NoVisualizer(**kwargs)\n self._visualizer.add_data(positions=positions)", "def __init__(self,nparticles,size, mass=1, G=1, boundary_periodic = True,early_universe=False, softner=1, position = [], momentum = []):\n self.softner = softner\n self.G = G\n self.boundary_periodic = boundary_periodic\n self.nparticles = nparticles\n self.size = size\n self.mass = np.ones(nparticles)*mass\n #If the boundary condition are not periodic, the grid_size is double but particle kept in the first quadrant so \n #that the particles cannot feel the effect of the particles closed to the opposite boundary when we take the convolution\n if boundary_periodic==True:\n self.grid_size = size\n else:\n self.grid_size = 2*size\n #Initialize the partticle grid\n # if early_universe == True:\n # self.ptclgrid.early_universe_grid(softner)\n # self.mass = self.ptclgrid.mass\n self.ptclgrid = ParticleGrid(nparticles,self.grid_size,self.size, mass=self.mass, soft=softner, early_universe=early_universe)\n #If initial position are givem, place the particle to the right place on the grid\n if len(position) != 0:\n self.ptclgrid.update_position(position, mass)\n\n self.grid = self.ptclgrid.grid\n self.grid_pos = self.ptclgrid.grid_pos\n x0,y0 = self.ptclgrid.position.transpose()\n initial_condition = np.array([x0,y0, self.mass]).transpose()\n #Initialize the Particle list containing the position and momentum of the particles\n self.particles = ParticleList(nparticles, initial_condition)\n #If initial mometa are given, intialize it \n if len(momentum) != 0:\n self.particles.momentum = momentum\n #Computes the green function on the grid\n self.compute_green_function(self.grid_size)\n #Initialize the array with the acceleration of the particles\n self.acc = np.zeros((len(self),2))", "def __init__(self, dim: tuple, count: int):\n self.surface = pygame.Surface(dim)\n # initialize\n self.particles = []\n # initialize\n for counter in range(count):\n pos = pygame.Vector2(random.randint(0, self.surface.get_width()), random.randint(0, self.surface.get_height()))\n direction = pygame.Vector2(10 * (random.random() - 0.5), 10 * (random.random() - 0.5))\n color = pygame.Color(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), 255)\n size = 5 + random.randint(0, 10)\n particle = Particle(self.surface, pos, direction, size, color)\n self.particles.append(particle)", "def __init__(self, mass, x, y,px=0.0,py=0.0):\n self.mass = mass\n self.position = np.array([x,y])\n self.momentum = np.array([px,py])", "def __init__(self, Nparticles,Nkicks,GAMMA, KAPPA):\n\n\n\t\tself.__Nparticles = Nparticles\n\t\tself.__Nkicks = Nkicks\n\n\t\t\n\t\tself.__kappa = KAPPA\n\t\tself.__gamma = GAMMA\n\t\tself.__omega = 1\n\t\tself.__dt = 0.0005\n\t\tself.__Kbt = 0\n\t\tself.__q = 4\n\t\tself.__tk = 2*np.pi/self.__q\n\n\n\t\t#Fa is the matrix to solve the Langevin equation using the Euler's method.\n\t\tself.__Fa = np.array([[0,-self.__omega**2],[1,-self.__gamma]])\n\t\tself.__eta = 0.1\n\n\t\t#self.__XPinit = np.random.random((self.__Nparticles,2))*10\n\t\tself.__XPinit = np.random.normal(0,3.5,(self.__Nparticles,2))\n\t\tself.__XPEnsembleBefore, self.__XPEnsembleAfter = self.__trajectories()", "def __init__(self):\n self.center = Point()\n self.velocity = Velocity()", "def __init__(self, init_pos, init_stdev, num_particles, sense_noise):\n self.particles = np.random.multivariate_normal(\n init_pos, [[init_stdev**2, 0], [0, init_stdev**2]], num_particles)\n self.weights = np.array(\n [1. / num_particles for _ in range(num_particles)])\n self.n = num_particles\n self.sense_noise = sense_noise", "def __init__(self, func, init_pos, n_particles):\n self.func = func\n self.n_particles = n_particles\n self.init_pos = np.array(init_pos)\n self.particle_dim = len(init_pos)\n # Initialize particle positions using a uniform distribution\n self.particles_pos = np.random.uniform(size=(n_particles, self.particle_dim) ) \\\n * self.init_pos\n # Initialize particle velocities using a uniform distribution\n self.velocities = np.random.uniform(size=(n_particles, self.particle_dim))\n\n # Initialize the best positions\n self.g_best = init_pos\n self.p_best = self.particles_pos\n self.phi = 2", "def __init__(self, p=0.5):\n self.p = p", "def __init__(self, total_args):\n\t\tself.alpha = 0.0\n\t\tself.salida = 0.0\n\t\tself.bias = pseudoaleatorio(-1.0, 1.0)\n\t\tself.pesos = []\n\t\tfor i in range(total_args):\n\t\t\tself.pesos.append(pseudoaleatorio(-1.0, 1.0))", "def __init__(self, *args, **kwargs):\n\n try:\n self._color = kwargs['color']\n except KeyError:\n self._color = 'white'\n\n self._color_rgb = convert.to_rgb(self._color)\n #Now we use same approach as in VisualizationFrame\n #for setting reference_frame and origin\n i = 0\n #If first arg is not str, name the visualization frame 'unnamed'\n if isinstance(args[i], str):\n self._name = args[i]\n i += 1\n else:\n self._name = 'unnamed'\n\n try:\n self._reference_frame = args[i].get_frame()\n self._origin = args[i].get_masscenter()\n\n except AttributeError:\n #It is not a rigidbody, hence this arg should be a\n #reference frame\n try:\n dcm = args[i]._dcm_dict\n self._reference_frame = args[i]\n i += 1\n except AttributeError:\n raise TypeError(''' A ReferenceFrame is to be supplied\n before a Particle/Point. ''')\n\n #Now next arg can either be a Particle or point\n try:\n self._origin = args[i].get_point()\n except AttributeError:\n self._origin = args[i]\n\n #basic thing required, transform matrix\n self._transform = Identity(4).as_mutable()", "def __init__(self, probability: float):\n super().__init__()\n\n # store input parameters\n self.probability = probability", "def __init__(self, _x, _y, _z):\n self.position = Position3d(int(_x), int(_y), int(_z))\n self.velocity = Velocity3d(0, 0, 0)", "def __init__(self, position, momentum, mass):\n self.position = position\n self.momentum = momentum\n self.mass = mass", "def __init__(self, mass, position, velocity):\n self.mass = mass\n self.position = position\n self.velocity = velocity", "def __init__(self,SO,d_dimp,rd_dimp,N_e,N_a):\n self.sphere = SO;\n self.d_dimp = d_dimp;\n self.rd_dimp = rd_dimp;\n self.N_e = N_e;\n self.N_a = N_a;", "def __init__(self, func, init_pos, n_particles):\n self.func = func\n self.n_particles = n_particles\n self.init_pos = np.array(init_pos)\n self.particle_dim = len(init_pos)\n # Initialize particle positions using a uniform distribution\n self.particles_pos = np.random.uniform(size=(n_particles, self.particle_dim)) \\\n * self.init_pos\n # Initialize particle velocities using a uniform distribution\n self.velocities = np.random.uniform(size=(n_particles, self.particle_dim))\n\n # Initialize the best positions\n self.g_best = init_pos\n self.p_best = self.particles_pos", "def __init__(self, Position, M):\n self.pos = Position # Sets x position\n self.m = M # Sets mass\n # Initial velocity and acceleration set to be zero\n self.vel = np.zeros((2,))\n self.acc = np.zeros((2,))", "def particle(*args, attribute: Union[AnyStr, bool]=\"\", cache: bool=True, conserve: Union[float,\n bool]=0.0, count: bool=True, deleteCache: bool=True, dynamicAttrList: bool=True,\n floatValue: float=0.0, gridSpacing: Union[float, List[float], bool]=0.0, inherit:\n Union[float, bool]=0.0, jitterBasePoint: Union[List[float, float, float],\n List[List[float, float, float]], bool]=None, jitterRadius: Union[float,\n List[float], bool]=0.0, lowerLeft: Union[List[float, float, float],\n List[List[float, float, float]], bool]=None, name: Union[AnyStr, bool]=\"\",\n numJitters: Union[int, List[int], bool]=0, order: Union[int, bool]=0, particleId:\n Union[int, bool]=0, perParticleDouble: bool=True, perParticleVector: bool=True,\n position: Union[List[float, float, float], List[List[float, float, float]]]=None,\n shapeName: Union[AnyStr, bool]=\"\", upperRight: Union[List[float, float, float],\n List[List[float, float, float]], bool]=None, vectorValue: List[float, float,\n float]=None, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def new_gas_particle():\n function = LegacyFunctionSpecification()\n function.must_handle_array = True\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.OUT, description =\n \"\"\"\n An index assigned to the newly created particle.\n This index is supposed to be a local index for the code\n (and not valid in other instances of the code or in other codes)\n \"\"\"\n )\n for par in [\"x\", \"y\", \"z\"]:\n function.addParameter(par, dtype='float64', unit=generic_unit_system.length, direction=function.IN, \n description = \"The initial position vector of the particle\")\n function.addParameter('radius', dtype='float64', unit=generic_unit_system.length, direction=function.IN, description = \"The radius of the particle\")\n for par in [\"red\", \"green\", \"blue\"]:\n function.addParameter(par, dtype='float64', direction=function.IN, \n description = \"The RGB color of the particle\")\n function.addParameter(\"alpha\", dtype='float64', direction=function.IN, description = \"The opacity of the particle\", default = 1.0)\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n return function", "def __init__(self,x,y,width,height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.velocity_x = 0.0", "def define_particle(self,line):\n\n pattern=re.compile(r'''^\\s*\n (?P<pid>-?\\d+)\\s+ #PID\n (?P<status>1)\\s+ #status (1 for output particle)\n (?P<mother>-?\\d+)\\s+ #mother\n (?P<dum3>-?\\d+)\\s+ #mother\n (?P<color1>[+-e.\\d]*)\\s+ #color1\n (?P<color2>[+-e.\\d]*)\\s+ #color2\n (?P<px>[+-e.\\d]*)\\s+ #px\n (?P<py>[+-e.\\d]*)\\s+ #py\n (?P<pz>[+-e.\\d]*)\\s+ #pz\n (?P<E>[+-e.\\d]*)\\s+ #E\n (?P<mass>[+-e.\\d]*)\\s+ #mass\n (?P<dum1>[+-e.\\d]*)\\s+ #dummy1\n (?P<dum2>[+-e.\\d]*)\\s* #dummy2\n $ #end of string\n ''',66) #verbose+ignore case\n if pattern.search(line):\n obj=pattern.search(line)\n E=obj.group('E')\n px=obj.group('px')\n py=obj.group('py')\n pz=obj.group('pz')\n particle=part_quadvec(E,px,py,pz)\n particle.def_mass(obj.group('mass'))\n particle.def_pid(obj.group('pid'))\n return particle\n else:\n return 0", "def __init__(self, Pnt, FID):\n self.x = Pnt.X\n self.y = Pnt.Y\n self.fid = FID", "def __init__(self, x, y, width, height):\r\n self.position = Vector(x, y)\r\n vec = (np.random.rand(2) - 0.5)*10 # generating random velocity vector\r\n self.velocity = Vector(*vec)\r\n self.x = x\r\n self.y = y\r\n\r\n vec = (np.random.rand(2) - 0.5)/2 # generating random acceleration vector\r\n self.acceleration = Vector(*vec)\r\n # initialization of speed and force limits\r\n self.max_force = 0.3\r\n self.max_speed = 5\r\n self.perception = 100\r\n\r\n self.width = width\r\n self.height = height", "def __init__(self, x=0, y=0, a=2, b=1, ang=0):\n self.center = (x, y)\n self.lengths = (a, b)\n self.angle = ang\n # self.eps = create_ellipse(self.center, self.lengths, self.angle)", "def __init__(self,pforce,beta,D):\n \n self.pforce = pforce\n\n # we use another method to set D and beta\n # bc we are going to do it a lot\n\n self._set_beta_D(beta,D)", "def __init__(self):\n self.ascii_filename = None\n\n # name and calc_id\n self.name = None\n self.calc_id = None\n self.i_ts = None\n self.time = None\n # data members\n self.x = None\n self.p = None\n # particle number density normalized to nGJ: n/n_{GJ} \n self.fmci_XP = None\n # PSR parameters\n self.PSR_P = None\n self.PSR_B12 = None\n self.PSR_Lcm = None\n self.PSR_Theta = None\n self.PSR_Chi = None", "def __init__(self, position, intensity):\n self.position = position\n self.intensity = intensity", "def __init__(self,\r\n vertices: List[Vector],\r\n sensor: bool = False,\r\n bounciness: float = 0,\r\n friction: float = 0,\r\n density: float = 1,\r\n loop: bool = False\r\n ):\r\n super().__init__(sensor, Vector.Zero(), bounciness, friction, density)\r\n\r\n # the vertices\r\n self._vertices = vertices # type: List[Vector]\r\n self.loop = loop", "def __init__(self):\n super().__init__()\n self.mu = 0.0\n self.type = 'Poisson'\n self.hasInfiniteBound = True\n self.distType = 'Discrete'\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def test_particle_obj():\n # Set up the base parameters describing a particle object\n T = 273.15 + 15.\n P = 150e5\n Sa = 35.\n Ta = 273.15 + 4.\n composition = ['methane', 'ethane', 'propane', 'oxygen']\n yk = np.array([0.85, 0.07, 0.08, 0.0])\n de = 0.005\n K = 1.\n Kt = 1.\n fdis = 1e-6\n\n # Compute a few derived quantities\n bub = dbm.FluidParticle(composition)\n m0 = bub.masses_by_diameter(de, T, P, yk)\n\n # Create a `SingleParticle` object\n bub_obj = dispersed_phases.SingleParticle(bub, m0, T, K, fdis=fdis,\n K_T=Kt)\n\n # Check if the initial attributes are correct\n for i in range(len(composition)):\n assert bub_obj.composition[i] == composition[i]\n assert_array_almost_equal(bub_obj.m0, m0, decimal=6)\n assert bub_obj.T0 == T\n assert bub_obj.cp == seawater.cp() * 0.5\n assert bub_obj.K == K\n assert bub_obj.K_T == Kt\n assert bub_obj.fdis == fdis\n for i in range(len(composition)-1):\n assert bub_obj.diss_indices[i] == True\n assert bub_obj.diss_indices[-1] == False\n\n # Check if the values returned by the `properties` method match the input\n (us, rho_p, A, Cs, beta, beta_T, T_ans) = bub_obj.properties(m0, T, P,\n Sa, Ta, 0.)\n us_ans = bub.slip_velocity(m0, T, P, Sa, Ta)\n rho_p_ans = bub.density(m0, T, P)\n A_ans = bub.surface_area(m0, T, P, Sa, Ta)\n Cs_ans = bub.solubility(m0, T, P, Sa)\n beta_ans = bub.mass_transfer(m0, T, P, Sa, Ta)\n beta_T_ans = bub.heat_transfer(m0, T, P, Sa, Ta)\n assert us == us_ans\n assert rho_p == rho_p_ans\n assert A == A_ans\n assert_array_almost_equal(Cs, Cs_ans, decimal=6)\n assert_array_almost_equal(beta, beta_ans, decimal=6)\n assert beta_T == beta_T_ans\n assert T == T_ans\n\n # Check that dissolution shuts down correctly\n m_dis = np.array([m0[0]*1e-10, m0[1]*1e-8, m0[2]*1e-3, 1.5e-5])\n (us, rho_p, A, Cs, beta, beta_T, T_ans) = bub_obj.properties(m_dis, T, P,\n Sa, Ta, 0)\n assert beta[0] == 0.\n assert beta[1] == 0.\n assert beta[2] > 0.\n assert beta[3] > 0.\n m_dis = np.array([m0[0]*1e-10, m0[1]*1e-8, m0[2]*1e-7, 1.5e-16])\n (us, rho_p, A, Cs, beta, beta_T, T_ans) = bub_obj.properties(m_dis, T, P,\n Sa, Ta, 0.)\n assert np.sum(beta[0:-1]) == 0.\n assert us == 0.\n assert rho_p == seawater.density(Ta, Sa, P)\n\n # Check that heat transfer shuts down correctly\n (us, rho_p, A, Cs, beta, beta_T, T_ans) = bub_obj.properties(m_dis, Ta, P,\n Sa, Ta, 0)\n assert beta_T == 0.\n (us, rho_p, A, Cs, beta, beta_T, T_ans) = bub_obj.properties(m_dis, T, P,\n Sa, Ta, 0)\n assert beta_T == 0.\n\n # Check the value returned by the `diameter` method\n de_p = bub_obj.diameter(m0, T, P, Sa, Ta)\n assert_approx_equal(de_p, de, significant=6)\n\n # Check functionality of insoluble particle\n drop = dbm.InsolubleParticle(isfluid=True, iscompressible=True)\n m0 = drop.mass_by_diameter(de, T, P, Sa, Ta)\n\n # Create a `Particle` object\n drop_obj = dispersed_phases.SingleParticle(drop, m0, T, K, fdis=fdis,\n K_T=Kt)\n\n # Check if the values returned by the `properties` method match the input\n (us, rho_p, A, Cs, beta, beta_T, T_ans) = drop_obj.properties(\n np.array([m0]), T, P, Sa, Ta, 0)\n us_ans = drop.slip_velocity(m0, T, P, Sa, Ta)\n rho_p_ans = drop.density(T, P, Sa, Ta)\n A_ans = drop.surface_area(m0, T, P, Sa, Ta)\n beta_T_ans = drop.heat_transfer(m0, T, P, Sa, Ta)\n assert us == us_ans\n assert rho_p == rho_p_ans\n assert A == A_ans\n assert beta_T == beta_T_ans\n\n # Check that heat transfer shuts down correctly\n (us, rho_p, A, Cs, beta, beta_T, T_ans) = drop_obj.properties(m_dis, Ta, P,\n Sa, Ta, 0)\n assert beta_T == 0.\n (us, rho_p, A, Cs, beta, beta_T, T_ans) = drop_obj.properties(m_dis, T, P,\n Sa, Ta, 0)\n assert beta_T == 0.\n\n # Check the value returned by the `diameter` method\n de_p = drop_obj.diameter(m0, T, P, Sa, Ta)\n assert_approx_equal(de_p, de, significant=6)", "def __init__(self, temperatures, daytypes, consumptions, nb_days, nb_particles, sigma2, kappa, u_heat):\n self.temperatures = temperatures\n self.daytypes = daytypes\n self.consumptions = consumptions\n self.nb_days = nb_days\n self.nb_particles = nb_particles\n self.sigma2 = sigma2\n self.kappa = kappa\n self.u_heat = u_heat\n #Var init\n self.s = np.zeros((nb_days, nb_particles)) \n self.g_heat = np.zeros((nb_days, nb_particles))\n #sigma_s and sigma_g are fixed\n self.sigma_s_star_2 = np.zeros((1, nb_particles)) \n self.sigma_g_star_2 = np.zeros((1, nb_particles))\n self.x_season = np.zeros((1, nb_particles))\n self.x_heat = np.zeros((1, nb_particles))\n self.x = np.zeros((1, nb_particles))\n self.w = np.zeros((1, nb_particles))", "def __init__(self, sigma):\n self.sigma = float(sigma)\n super().__init__(domain=functional.domain,\n range=functional.domain, linear=False)\n\n # Setting up parameters\n self.const = 1 / (functional.epsilon * sigma)", "def __init__(self, calc_id, particle_name, xp_partition):\n tdc_FMCI_XP_Data_Base.__init__(self)\n # name and calc_id\n self.name = particle_name\n self.calc_id = calc_id\n # setup XP_Data --------------------\n sample_dict = dict(name='regular', n_reduce=1, n_min=1)\n self.xp = tdc_XP_Data(calc_id, particle_name, sample_dict, get_weight=True)\n # interface to timetable -----------\n self.timetable = self.xp.timetable\n # setup properties -----------------\n setup_props = tdc_Setup_Props(calc_id)\n # normalization parameters\n self.W0 = setup_props.get_papam('FMPProps/W0')\n self.L = setup_props.get_papam('/GridProps/L')\n # physical parameters from \"setup_properties.h5\"\n self.PSR_P = setup_props.get_papam('/PulsarGapProps/P')\n self.PSR_B12 = setup_props.get_papam('/PulsarGapProps/B_12')\n self.PSR_Lcm = setup_props.get_papam('/GridProps/L_cm')\n # physical parameters from \"cascade.input\": THETA and CHI\n infile=AT.FileInput()\n infile.ReadFile(tdc_Filenames.get_full_filename(calc_id, 'cascade.input'))\n infile.ChangeGroup('GEOMETRY')\n self.PSR_Theta = infile.get_param('THETA')\n infile.ChangeGroup() \n infile.ChangeGroup('DIMENSIONAL_CONSTANTS::PSR_ConstsInitializer')\n self.PSR_Chi = infile.get_param('CHI')\n infile.ChangeGroup() \n # set xp_partition =================\n self.set_xp_partition(xp_partition)", "def __init__(self):\n\n self._P = 0 # number of pixels\n self._x = 0.0 # x-coordinate of center of mass, i.e.\n # the avg x-coordinate\n self._y = 0.0 # y-coordinate of center of mass, i.e.\n # the avg y-coordinate", "def __init__(self, pos=np.zeros((3,)), vel=np.zeros((3,)),\n acc=np.zeros((3,)), rpy=np.zeros((3,)),\n airVel=np.zeros((3,)), thrust=0):\n self.pos = pos\n self.vel = vel\n self.acc = acc\n self.rpy = rpy\n self.airVel = airVel\n self.thrust = thrust", "def __init__(self):\n self.position = Vector2()\n self.velocity = Vector2()\n self.update_parameters()\n self.mass = 0.18 # Mass of Sphero robot in kilograms", "def __init__(self, name):\r\n\r\n # Define attributes\r\n self.name = name\r\n self.pos = []\r\n self.Pn = []\r\n self.flux = []\r\n self.pointCloud = []\r\n self.readpil3d()", "def new_marker_particle():\n function = LegacyFunctionSpecification()\n function.must_handle_array = True\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.OUT, description =\n \"\"\"\n An index assigned to the newly created particle.\n This index is supposed to be a local index for the code\n (and not valid in other instances of the code or in other codes)\n \"\"\"\n )\n for par in [\"x\", \"y\", \"z\"]:\n function.addParameter(par, dtype='float64', unit=generic_unit_system.length, direction=function.IN, \n description = \"The initial position vector of the particle\")\n function.addParameter('radius', dtype='float64', unit=generic_unit_system.length, direction=function.IN, description = \"The radius of the particle\")\n for par in [\"red\", \"green\", \"blue\"]:\n function.addParameter(par, dtype='float64', direction=function.IN, \n description = \"The RGB color of the particle\")\n function.addParameter(\"alpha\", dtype='float64', direction=function.IN, description = \"The opacity of the particle\", default = 1.0)\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n return function", "def __init__(\n self,\n oxygen: MeasurementStatistics,\n pulse: MeasurementStatistics,\n blood_pressure_systolic: MeasurementStatistics,\n blood_pressure_diastolic: MeasurementStatistics,\n ):\n self.oxygen = oxygen\n self.pulse = pulse\n self.blood_pressure_systolic = blood_pressure_systolic\n self.blood_pressure_diastolic = blood_pressure_diastolic", "def __init__(self, *args):\n this = _CompuCell.new_Potts3D(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, x, y, th):\n self.x = x\n self.y = y\n self.th = th", "def __init__(self, pos):\r\n self.pos = pos\r\n self.vel = [0, 0]\r\n self.acc = [0, 0]\r\n self.heading = math.pi\r\n self.screen = [0, 0]", "def __init__(self, **params):\n # Dimension of the true signal x\n self.N = params.get('N', 1024)\n\n # Dimension of the measurement vector y\n self.M = params.get('M', 256)\n\n # Number of timesteps\n self.T = params.get('T', 4)\n\n # Type of the random measurement matrix to generate\n # (1) : normalized Gaussian matrix\n self.A_type = params.get('A_type', 1)\n\n # Active support probability\n self.lambda_ = params.get('lambda_', 0.08) # high sparsity default\n\n # Amplitude mean\n self.zeta = params.get('zeta', 0)\n\n # Amplitude variance\n self.sigma2 = params.get('sigma2', 1)\n\n # Amplitude innovation rate\n self.alpha = params.get('alpha', 0.10)\n\n # Active-to-inactive transition probability\n self.p01 = params.get('p01', 0.10)\n\n # Desired signal-to-noise ratio, in dB\n self.desired_SNR = params.get('desired_SNR', 25)", "def __init__(self, r=1, p=3):\n self.p = p\n self.r = r", "def __init__(self, p, multiplier=1e-1):\r\n super().__init__(multiplier=multiplier)\r\n self.p = p", "def __init__(self, p=1.5, eps=1e-8):\n assert 1 < p < 2, \"make sure 1 < p < 2\" \n self.p, self.eps = p, eps", "def __init__(self, values, p=None):\n self.values = values\n self.p = p", "def __init__(self, values, p=None):\n self.values = values\n self.p = p", "def __init__(self, values, p=None):\n self.values = values\n self.p = p", "def __init__(self, values, p=None):\n self.values = values\n self.p = p", "def __init__(\n self,\n oxygen: int = None,\n pulse: int = None,\n blood_pressure_systolic: int = None,\n blood_pressure_diastolic: int = None,\n ):\n self.oxygen = oxygen\n self.pulse = pulse\n self.blood_pressure_systolic = blood_pressure_systolic\n self.blood_pressure_diastolic = blood_pressure_diastolic", "def __init__(self, parant):\n pass", "def __init__(self, x):\n self.x = np.array(x)", "def __init__(self, num_points=5000):\n self.num_points = num_points\n\n # All motion starts at (0,0).\n self.x_values = [0]\n self.y_values = [0]", "def __init__(self):\n super().__init__()\n self.p = 0.0\n self.type = 'Bernoulli'\n self.distType = 'Discrete'\n self.lowerBound = 0.0\n self.upperBound = 1.0\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self,x_pos, y_pos, velocity, kind, fillcolor = 'red'):\n self._velocity = velocity\n self._kind = kind\n super().__init__(x = x_pos, y=y_pos, width = BOLT_WIDTH, \\\n height = BOLT_HEIGHT, fillcolor=fillcolor)", "def __init__(self, number_of_particles, restitution_coefficient, initial_positions, initial_velocities, masses,\n radii, pbc):\n self.N = number_of_particles # amount of particles\n self.restitution_coefficient = restitution_coefficient # coefficient determining the energy lost in collisions\n # initialize variables used in the class\n self.positions = np.zeros((self.N, 3)) # positions of particles\n self.initial_positions = np.zeros((self.N, 3)) # help variable to compute mean square displacement\n self.velocities = np.zeros((self.N, 3)) # velocities of particles\n self.masses = np.zeros(self.N) # mass of each particle\n self.radii = np.zeros(self.N) # radius of each particle\n self.collision_count_particles = np.zeros(self.N) # array keeping track of the number of collisions\n\n # set parameters equal to the input to the class. Use .copy() such that the parameters can be used in outer loop\n self.positions = initial_positions.copy()\n self.initial_positions = initial_positions.copy()\n self.velocities = initial_velocities.copy()\n self.masses = masses\n self.radii = radii\n # a priority queue / heap queue of tuples of (time_collision, collision_entities, collision_count when\n # computing the collision, box number of the particles). The collision count at computation is used to\n # ignore non-valid collisions due to the involved particles being in other collisions between computation and\n # collision. Box number is needed for the pbc.\n self.collision_queue = [] # heap queue needs list structure to work\n\n # In order to create 27 copies for pbc in three dimensions one need to known their relation to the original\n # box. These are given by offsets. Offsets is also used to correct positions of particles colliding in\n # different boxes (due to the pbc).\n self.offsets = [(-1, 1, 1), (0, 1, 1), (1, 1, 1), (-1, 0, 1), (0, 0, 1), (1, 0, 1), (-1, -1, 1), (0, -1, 1),\n (1, -1, 1), (-1, 1, 0), (0, 1, 0), (1, 1, 0), (-1, 0, 0), (0, 0, 0), (1, 0, 0), (-1, -1, 0),\n (0, -1, 0), (1, -1, 0), (-1, 1, -1), (0, 1, -1), (1, 1, -1), (-1, 0, -1), (0, 0, -1),\n (1, 0, -1), (-1, -1, -1), (0, -1, -1), (1, -1, -1)]\n # Crossings is used to compute current positions due to the periodic boundary conditions. It essentially get\n # updated every time a particle cross the edge in the x-, y- or z-direction.\n self.crossings = np.zeros((self.N, 3))\n\n self.pbc = pbc # periodic boundary conditions", "def __init__(self):\n\n\t\tself.position = np.array([0, 0])", "def __init__(self, X, y):\n pass", "def __init__(self, spawner: Spawner, data: Any, pt: Sequence[float]):\n self.spawner = spawner\n self.data = data\n self.pt = pt # pylint: disable=invalid-name", "def __init__(self,x=0, y=0):\n self.x = x\n self.y = y", "def __init__(\n self,\n model,\n particles: int,\n resampling: Callable[[torch.Tensor], torch.Tensor] = systematic,\n proposal: Union[str, Proposal] = \"auto\",\n ess=0.9,\n **kwargs\n ):\n\n super().__init__(model, **kwargs)\n\n self.register_buffer(\"_particles\", torch.tensor(particles, dtype=torch.int))\n self._resample_threshold = ess\n self._resampler = resampling\n\n if proposal == \"auto\":\n try:\n proposal = _PROPOSAL_MAPPING[self._model.__class__.__name__]()\n except KeyError:\n proposal = Bootstrap()\n\n self._proposal = proposal.set_model(self._model) # type: Proposal", "def __init__(self, x = 0, y = 0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, particle_id, size):\r\n self.values = list()\r\n self.velocities = list()\r\n self.personal_best = list()\r\n \r\n self.particle_id = particle_id\r\n self.size = size\r\n self.graph = None\r\n self.best_coloring = None\r\n self.sync = False\r\n \r\n # Initializes the fitness as an arbitrary bad value. \r\n self.best_fitness = -(2**63)\r\n self.current_fitness = self.best_fitness\r\n \r\n # Initialise values to random numbers within the range.\r\n for index in range(self.size):\r\n self.values.append(random.uniform(\r\n Particle.MIN_VALUE, Particle.MAX_VALUE)\r\n )\r\n self.velocities.append(0);\r\n \r\n # Since there is no previous values, the current value is the best\r\n self.personal_best = self.values[:]", "def __init__(self,):\r\n self.g = 9.81\r\n self.l = 0.5\r\n self.m1 = 1.0\r\n self.m2 = 1.0\r\n self.m3 = 1.0\r\n self.r1 = 1.0\r\n self.r2 = 1.0\r\n self.tau = 0.001\r\n self.theta1 = 1.0\r\n self.theta2 = 1.0\r\n self.theta3 = 1.0", "def __init_primitive(self, point_x, point_y, point_z):\n self.x = point_x\n self.y = point_y\n self.z = point_z", "def initialise_particle_cloud(self, initialpose):\n # ----- Initialize the particle cloud as an empty array\n self.particlecloud = PoseArray()\n\n \"\"\"Create the noise to multiply by the random Gaussian number that will\n get added to each of the Poses, that are set to a random position\n and orientation around the initial pose\"\"\"\n sensorSigma=3 #variance\n sensorMu=0 #mean\n noise=sensorSigma * numpy.random.randn() + sensorMu\n\n \"\"\"Create a range for the ammount of random Gaussian values to generate \"\"\"\n randomGauss = 10*self.NUMBER_PREDICTED_READINGS\n\n gaussianRandomNumX = []\n gaussianRandomNumY = []\n randomYawArray = []\n\n for i in range (0,randomGauss):\n gaussianRandomNumX.append(random.gauss(0,1))\n gaussianRandomNumY.append(random.gauss(0,1))\n x=random.randint(1,180)\n randomYaw=(math.pi/x)\n randomYawArray.append(randomYaw)\n\n iterator = 0\n\n \"\"\"\n\t Set the particles to a random position and orientation around the initial pose\n \"\"\"\n particleNumber = 10**2 # 10**3 # 10**4 # 10**5 experiment with different ammounts of particles\n\n while iterator < particleNumber:\n particle = Pose()\n particle.position.x = initialpose.pose.pose.position.x + (gaussianRandomNumX[iterator] * noise)\n particle.position.y = initialpose.pose.pose.position.y + (gaussianRandomNumY[iterator] * noise)\n particle.position.z = initialpose.pose.pose.position.z\n particle.orientation = rotateQuaternion(initialpose.pose.pose.orientation, randomYawArray[iterator])\n\n self.particlecloud.poses.append(particle)\n iterator += 1\n\n return self.particlecloud", "def __init__(self, pts=[]):\n self.set_points(pts)", "def __init__(self, graph):\r\n self.population = list()\r\n self.leader = -1\r\n self.iteration = 0\r\n \r\n for index in range(POPULATION_SIZE):\r\n p = Particle(index, PARTICLE_SIZE)\r\n p.graph = copy.deepcopy(graph)\r\n self.population.append(p)\r\n \r\n self.find_leader()", "def __init__(self, x_pos, y_pos, radius, colour, moving = False):\n\t\t\n\t\tself.x_pos = x_pos\n\t\tself.y_pos = y_pos\n\t\tself.radius = radius\n\t\tself.diameter = 2*radius\n\t\tself.colour = colour\n\t\tself.moving = moving\n\t\tself.x_vec = 0\n\t\tself.y_vec = 0", "def create_particles(self):\n # xf, yf = create_fluid_with_solid_cube()\n xf, yf = create_fluid()\n uf = np.zeros_like(xf)\n vf = np.zeros_like(xf)\n m = initialize_mass(xf, yf)\n rho = initialize_density_fluid(xf, yf)\n h = np.ones_like(xf) * self.hdx * self.dx\n fluid = get_particle_array_wcsph(x=xf, y=yf, h=h, m=m, rho=rho, u=uf,\n v=vf, name=\"fluid\")\n\n xt, yt = create_boundary(self.dx / 2.)\n ut = np.zeros_like(xt)\n vt = np.zeros_like(xt)\n m = np.ones_like(xt) * 1500 * self.dx * self.dx\n rho = np.ones_like(xt) * 1000\n h = np.ones_like(xt) * self.hdx * self.dx / 2.\n tank = get_particle_array_wcsph(x=xt, y=yt, h=h, m=m, rho=rho, u=ut,\n v=vt, name=\"tank\")\n\n return [fluid, tank]", "def __init__(self):\n self.pidDict = { # particle_name, pid\n \"total\" : 0,\n \"charged\" : 1,\n \"charged_eta\" : 2,\n \"pion\" : 6, # sum(7, 8, -7)\n \"pion_p\" : 7,\n \"pion_0\" : 8,\n \"pion_m\" : -7,\n \"kaon\" : 11, # sum(12, 13)\n \"kaon_p\" : 12,\n \"kaon_0\" : 13,\n \"anti_kaon\" : -11, # sum(-12, -13)\n \"kaon_m\" : -12,\n \"anti_kaon_0\" : -13,\n \"nucleon\" : 16, # sum(17, 18)\n \"proton\" : 17,\n \"neutron\" : 18,\n \"anti_nucleon\" : -16, # sum(-17, -18)\n \"anti_proton\" : -17,\n \"anti_neutron\" : -18,\n \"sigma\" : 21, # sum(22, 23, 24)\n \"sigma_p\" : 22,\n \"sigma_0\" : 23,\n \"sigma_m\" : 24,\n \"anti_sigma\" : -21,\n \"anti_sigma_p\" : -22,\n \"anti_sigma_0\" : -23,\n \"anti_sigma_m\" : -24,\n \"xi\" : 26, # sum(27, 28)\n \"xi_0\" : 27,\n \"xi_m\" : 28,\n \"anti_xi\" : -26,\n \"anti_xi_0\" : -27,\n \"anti_xi_m\" : -28,\n \"lambda\" : 31,\n \"anti_lambda\" : -31,\n \"omega\" : 36,\n \"anti_omega\" : -36,\n \"phi\" : 41,\n \"rho\" : 46, #sum(47, 48, -47)\n \"rho_p\" : 47,\n \"rho_0\" : 48,\n \"rho_m\" : -47,\n \"eta\" : 51,\n \"eta_prime\" : 52,\n \"gamma\" : 61,\n \"omega782\" : 65,\n \"eta\" : 71,\n \"etap\" : 72,\n }\n\n for aParticle in self.pidDict.keys():\n if self.pidDict[aParticle]>=0:\n self.pidDict[aParticle+\"_hydro\"] = self.pidDict[aParticle]+1000\n else:\n self.pidDict[aParticle+\"_hydro\"] = self.pidDict[aParticle]-1000\n if self.pidDict[aParticle]>=0:\n self.pidDict[aParticle+\"_thermal\"] = self.pidDict[aParticle]+2000\n else:\n self.pidDict[aParticle+\"_thermal\"] = self.pidDict[aParticle]-2000\n\n self.pidDict.update({\n \"photon_total\" : 9000,\n \"photon_total_eq\" : 9001,\n \"photon_QGP_tot\" : 9002,\n \"photon_QGP_eq\" : 9003,\n \"photon_HG_tot\" : 9004,\n \"photon_HG_eq\" : 9005,\n \"direct_gamma_shortdecay_hydro\" : 9006,\n \"decay_gamma_pi0_hydro\" : 9007,\n \"decay_gamma_eta_hydro\" : 9008,\n \"decay_gamma_omega_hydro\" : 9009,\n \"decay_gamma_phi_hydro\" : 9010,\n \"decay_gamma_etap_hydro\" : 9011,\n \"decay_gamma_Sigma0_hydro\" : 9012,\n })\n\n #UrQMD pid Dictionary, name conversion defined as in binUtility\n self.UrQMDpidDict = { #particle name, UrQMD id# : isospin*2000 + pid\n 2101 : \"pion_p\",\n -1899 : \"pion_m\",\n 101 : \"pion_0\",\n 1106 : \"kaon_p\",\n -894 : \"kaon_0\",\n -1106 : \"kaon_m\",\n 894 : \"anti_kaon_0\",\n 1001 : \"proton\",\n -999 : \"neutron\",\n -1001 : \"anti_proton\",\n 999 : \"anti_neutron\",\n 2040 : \"sigma_p\",\n -1960 : \"sigma_m\",\n 40 : \"sigma_0\",\n -2040 : \"anti_sigma_p\",\n 1960 : \"anti_sigma_m\",\n -40 : \"anti_sigma_0\",\n 1049 : \"xi_0\",\n -951 : \"xi_m\",\n -1049 : \"anti_xi_0\",\n 951 : \"anti_xi_m\",\n 27 : \"lambda\",\n -27 : \"anti_lambda\",\n 55 : \"omega\",\n -55 : \"anti_omega\",\n 109 : \"phi\",\n 102 : \"eta\",\n 107 : \"eta_prime\",\n 100 : \"gamma\",\n }\n\n #pdg pid Dictionary\n self.PDGpidDict = { #pdg id#, particle name\n 211 : \"pion_p\",\n -211 : \"pion_m\",\n 111 : \"pion_0\",\n 321 : \"kaon_p\",\n 311 : \"kaon_0\",\n -321 : \"kaon_m\",\n -311 : \"anti_kaon_0\",\n 2212 : \"proton\",\n 2112 : \"neutron\",\n -2212 : \"anti_proton\",\n -2112 : \"anti_neutron\",\n 3222 : \"sigma_p\",\n 3112 : \"sigma_m\",\n 3212 : \"sigma_0\",\n -3222 : \"anti_sigma_p\",\n -3112 : \"anti_sigma_m\",\n -3212 : \"anti_sigma_0\",\n 3322 : \"xi_0\",\n 3312 : \"xi_m\",\n -3322 : \"anti_xi_0\",\n -3312 : \"anti_xi_m\",\n 3122 : \"lambda\",\n -3122 : \"anti_lambda\",\n 3334 : \"omega\",\n -3334 : \"anti_omega\",\n 333 : \"phi\",\n 221 : \"eta\",\n 331 : \"eta_prime\",\n 22 : \"gamma\",\n }\n\n #particle mass Dictionary (unit in GeV)\n self.masspidDict = {\n \"pion\" : 0.13957,\n \"pion_p\" : 0.13957,\n \"pion_0\" : 0.13498,\n \"pion_m\" : 0.13957,\n \"kaon\" : 0.49368,\n \"kaon_p\" : 0.49368,\n \"kaon_0\" : 0.49765,\n \"anti_kaon\" : 0.49368,\n \"kaon_m\" : 0.49368,\n \"anti_kaon_0\" : 0.49765,\n \"nucleon\" : 0.93827,\n \"proton\" : 0.93827,\n \"neutron\" : 0.93957,\n \"anti_nucleon\" : 0.93827,\n \"anti_proton\" : 0.93827,\n \"anit_neutron\" : 0.93957,\n \"sigma\" : 1.18937,\n \"sigma_p\" : 1.18937,\n \"sigma_0\" : 1.19264,\n \"sigma_m\" : 1.19745,\n \"anti_sigma\" : 1.18937,\n \"anti_sigma_p\" : 1.18937,\n \"anti_sigma_0\" : 1.19264,\n \"anti_sigma_m\" : 1.19745,\n \"xi\" : 1.31483,\n \"xi_0\" : 1.31483,\n \"xi_m\" : 1.32131,\n \"anti_xi\" : 1.31483,\n \"anti_xi_0\" : 1.31483,\n \"anti_xi_m\" : 1.32131,\n \"lambda\" : 1.11568,\n \"anti_lambda\" : 1.11568,\n \"omega\" : 1.67243,\n \"anti_omega\" : 1.67243,\n \"rho\" : 0.77580,\n \"rho_p\" : 0.77580,\n \"rho_0\" : 0.77580,\n \"rho_m\" : 0.77580,\n \"phi\" : 1.01946,\n \"eta\" : 0.54775,\n \"eta_prime\" : 0.95778,\n \"gamma\" : 0.0,\n }\n for aParticle in self.masspidDict.keys():\n self.masspidDict[aParticle+\"_hydro\"] = self.masspidDict[aParticle]\n self.masspidDict[aParticle+\"_thermal\"] = self.masspidDict[aParticle]\n\n # charged hadrons list\n self.charged_hadron_list = [\n \"pion_p\", \"pion_m\", \"kaon_p\", \"kaon_m\", \"proton\", \"anti_proton\",\n \"sigma_p\", \"sigma_m\", \"anti_sigma_p\", \"anti_sigma_m\",\n \"xi_m\", \"anti_xi_m\"]", "def new_star_particle():\n function = LegacyFunctionSpecification()\n function.must_handle_array = True\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.OUT, description =\n \"\"\"\n An index assigned to the newly created particle.\n This index is supposed to be a local index for the code\n (and not valid in other instances of the code or in other codes)\n \"\"\"\n )\n for par in [\"x\", \"y\", \"z\"]:\n function.addParameter(par, dtype='float64', unit=generic_unit_system.length, direction=function.IN, \n description = \"The initial position vector of the particle\")\n function.addParameter('radius', dtype='float64', unit=generic_unit_system.length, direction=function.IN, description = \"The radius of the particle\")\n for par in [\"red\", \"green\", \"blue\"]:\n function.addParameter(par, dtype='float64', direction=function.IN, \n description = \"The RGB color of the particle\")\n function.addParameter(\"alpha\", dtype='float64', direction=function.IN, description = \"The opacity of the particle\", default = 1.0)\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n return function", "def __init__(self, stars_x, stars_y, stars_f):\n self.xpos = stars_x\n self.ypos = stars_y\n self.flux = stars_f\n\n return" ]
[ "0.846487", "0.8411835", "0.7966551", "0.7966551", "0.7707181", "0.76299787", "0.74913836", "0.7429638", "0.73224574", "0.72718245", "0.7198867", "0.699789", "0.6965769", "0.6918702", "0.6917369", "0.68837965", "0.68785137", "0.68018293", "0.67048806", "0.6683209", "0.6675657", "0.66272813", "0.6595465", "0.6592495", "0.6568835", "0.65675277", "0.65499866", "0.654518", "0.65245897", "0.6470143", "0.6462984", "0.6451504", "0.643961", "0.64152795", "0.6401495", "0.64000136", "0.640001", "0.6390891", "0.6361036", "0.634712", "0.634297", "0.63397676", "0.6335422", "0.6329078", "0.6324455", "0.6316157", "0.6308525", "0.6307478", "0.6304348", "0.628092", "0.6280218", "0.62748426", "0.6266774", "0.6266764", "0.6260895", "0.6246678", "0.62385553", "0.62327844", "0.6225411", "0.62185895", "0.62003154", "0.6200066", "0.6200066", "0.6200066", "0.6200066", "0.61977595", "0.6173828", "0.61714154", "0.61703116", "0.6165774", "0.61581457", "0.6155874", "0.6154586", "0.6147693", "0.6143731", "0.6142938", "0.6134307", "0.61325735", "0.6131517", "0.6131517", "0.6131517", "0.6131517", "0.6131517", "0.6131517", "0.6131517", "0.6131517", "0.6131517", "0.6131517", "0.6131517", "0.6128285", "0.61252207", "0.6120408", "0.61201847", "0.6112784", "0.6110927", "0.6110673", "0.6108878", "0.61035204", "0.60976005", "0.60957354" ]
0.7450836
7
Method that returns the rest energy of the particle.
def RestEnergy(self): return (self.restMass * const.speed_of_light * const.speed_of_light)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def energy(self):\n energy = -0.5*np.sum(self.phi)+0.5*np.sum(self.mass*np.sqrt(self.particles.momentum[:,0]**2+self.particles.momentum[:,1]**2)**2)\n return energy", "def TotalEnergy(self):\n return (math.sqrt((Particle.RestEnergy(self) ** 2)\n + (np.linalg.norm(Particle.Momentum(self)) * const.speed_of_light) ** 2))", "def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)", "def calcEnergy(self):\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the energy.\")\n return speed_light*math.sqrt(self.p*self.p + (self.mass*speed_light)**2)", "def get_energy(self):\r\n return self._energy", "def getEnergy(self):\n if not hasattr(self,\"energy\"):\n self.energy = self.calcEnergy()\n return self.energy", "def energy(self):\n return self._energy", "def energy(self):\n return self.mc.energy(self.chain)", "def KineticEnergy(self):\n return Particle.TotalEnergy(self) - Particle.RestEnergy(self)", "def energy(self) -> Union[int, float]:\n return self.proto.energy", "def energy(self) -> Union[int, float]:\n return self.proto.energy", "def get_total_energy_produced (self):\n return self.pre_intertie_generation[:self.actual_project_life]", "def E(self):\n return self.generic_getter(get_energy, \"E\", \"convert_energy\")", "def energy(self):\n self.E = - np.sum(self.phi) + 0.5 * self.mass * np.sqrt((self.v_x ** 2 + self.v_y **2))", "def energy(self):\n return self._accelerator.energy", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def energy(self, r):\n sigma = self.params['sigma']\n epsilon = self.params['epsilon']\n s = sigma / r\n s6 = s**6; s12 = s6 * s6\n pot = 4.0 * epsilon * (s12 - s6)\n return pot", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))", "def getEnergy(self):\n energy = 0.0\n\n for i in range(0, self.nPoints):\n energy += self.tDomain[i] ** 2\n\n energy /= self.nPoints\n return energy", "def getEnergyEvolution(self):\n\n\t\tEBefore = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleBefore]\n\t\tEAfter = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleAfter]\n\n\t\treturn EBefore, EAfter", "def get_energy():\n\n # open the psi4 log file\n with open('output.dat', 'r') as log:\n lines = log.readlines()\n\n # find the total converged energy\n for line in lines:\n if 'Total Energy =' in line:\n energy = float(line.split()[3])\n break\n else:\n raise EOFError('Cannot find energy in output.dat file.')\n\n return energy", "def get_energy():\n\n # open the psi4 log file\n with open(\"output.dat\", \"r\") as log:\n for line in log:\n if \"Total Energy =\" in line:\n return float(line.split()[3])\n\n raise EOFError(\"Cannot find energy in output.dat file.\")", "def compute_energy(self):\n energy = 0.5 * self.masses * np.sum(self.velocities * self.velocities, axis=1)\n avg_energy = np.mean(energy) # average kinetic energy of all particles\n return avg_energy", "def get_e(self):\n return self.e_min + self.e_ * self.e_range", "def potentialEnergy(self):\n return 0.5*(pdist(self.positions)**2).sum()", "def total_energy(self):\n return self._total_energy", "def energy(ps):\n return kinetic_energy(ps) + potential_energy(ps)", "def get_energy(self):\n return self.bot_client.send_command(_Command.GetEnergy)", "def energy(self):\n e = 0\n\n restoration = RestorationModel(self.graph_damaged)\n restoration.run(self.state)\n restoration_graphs = restoration.get_restoration_graphs()\n restoration_times = restoration.get_restoration_times()\n restoration_costs = restoration.get_restoration_costs()\n\n damaged = []\n damaged.append(get_delta(self.no_damage, self.initial_damage))\n\n sim_results = Parallel(n_jobs=4)(delayed(parallel_model)(\n graph, self.od_graph, self.od_matrix) for graph in restoration_graphs[:-1])\n for values in sim_results:\n damaged.append(get_delta(self.no_damage, values))\n\n for idx, values in enumerate(damaged):\n dt = restoration_times[idx] if idx == 0 else restoration_times[idx] - \\\n restoration_times[idx-1]\n e += sum(restoration_costs[idx]) + dt * (self.day_factor * values[2] * np.sum(self.mu*self.xi) +\n values[3] * np.sum(self.mu * (self.nu * self.F_w + self.rho)) + values[4] * self.upsilon)\n with open(self.fdir+'energy.csv', 'a') as f:\n f.write('\\n'+str(e))\n\n return e", "def energy_func(self):\n return (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) +\n self.inl[1].m.val_SI * (\n self.outl[1].h.val_SI - self.inl[1].h.val_SI))", "def block_energy():\r\n\r\n my_block = q.get()\r\n my_block = my_block.flatten()\r\n energy = np.sum(my_block ** 2)\r\n return energy, my_block", "def energy(self):\n nocc, ntot, gmo, e = self.nocc, self.ntot, self.gmo, self.e\n\n Ec = 0.0\n for i in range(nocc):\n for j in range(nocc):\n for a in range(nocc, ntot):\n for b in range(nocc, ntot):\n Ec += gmo[i, a, j, b]*(2*gmo[i, a, j, b] - gmo[i, b, j, a])/\\\n (e[i] + e[j] - e[a] - e[b])\n\n self.Ec = Ec\n self.E_mp2 = Ec + self.E_scf\n\n print('@MP2 correlation energy: {:15.10f}\\n'.format(self.Ec))\n print('@Total MP2 energy: {:15.10f}\\n'.format(self.E_mp2))\n\n return self.E_mp2", "def energy(self):\n nocc, gmo, e = self.nocc, self.gmo, self.e\n\n Ec = 0.0\n for i in range(nocc):\n for j in range(nocc):\n for a in range(nocc, len(e)):\n for b in range(nocc, len(e)):\n Ec += (1/4.0) * gmo[i, j, a, b]**2 / (e[i]+e[j]-e[a]-e[b])\n\n self.Ec = Ec\n self.E_mp2 = Ec + self.E_scf\n\n print('@MP2 correlation energy: {:15.10f}\\n'.format(self.Ec))\n print('@Total MP2 energy: {:15.10f}\\n'.format(self.E_mp2))\n\n return self.E_mp2", "def ComputeEnergyConsumption(self):\r\n pass", "def computeEnergy(self):\n _cgco.gcoComputeEnergy(self.handle, self.energyTempArray)\n return self._convertEnergyBack(self.energyTempArray[0])", "def get_energies(self):\n N = len(self.particles)\n\n # Use C++ version if cppenabled\n if(self.cppenabled):\n energies = np.zeros(3) # Initialises Energy output array\n accelerate_lib.c_getenergies(self.get_positions(), self.get_velocities(), \\\n energies, self.boxdim, self.LJ_cutoff)\n return np.array(energies)\n\n # Python calculation if cppenabled = False:\n pot = Total_PE(self.particles, self.LJ_cutoff, self.boxdim)\n kin = Total_KE(self.get_velocities())\n\n return np.array([pot, kin, pot+kin])", "def energy_function(self):\n E = 0\n for i in range(len(self.config)):\n for j in range(len(self.config)):\n s = self.config[i,j]\n #Calculate the impact of neighboring particle pairs\n neighbors = (self.config[(i+1)%L, j] +\n self.config[i, (j+1)%L] + \n self.config[(i-1)%L, j] + \n self.config[i, (j-1)%L])\n E += -J*s*neighbors\n #fix for extra neighbors\n return E/4", "def energy(energy_name: str) -> float:\n pass", "def energy(self):\n sum_energy = 0.0\n for i in range(0,self.natoms-1):\n for j in range(i+1,self.natoms):\n rij = (self.atoms[i].xyz - self.atoms[j].xyz)\n rij = rij - self.pbc_correction(rij)\n mag_rij = la.norm(rij)\n sum_energy = sum_energy + self.pair_energy(self.epsilon, self.sigma, mag_rij) \n return sum_energy", "def energyplus_its(self):\n if self._energyplus_its is None:\n self._energyplus_its = 0\n return self._energyplus_its", "def getEnergy(self, normalized=True, mask=None):\n psi = self.psi.get()\n n = self.n.get()\n density = np.absolute(psi) ** 2\n gradx = np.gradient(psi)[0]\n normFactor = density.sum() if normalized else 1.0\n return np.ma.array(-(0.25 * np.gradient(\n np.gradient(density)[0])[0]\n - 0.5 * np.absolute(gradx) ** 2\n - (self.g_C * density + self.g_R * n)\n * density), mask=mask).sum() / normFactor", "def getEnergyExpended(self, sample):\r\n if sample is not None:\r\n if len(sample._data) > self.ENERGY_EXPENDED_INDEX:\r\n ee = sample._data[self.ENERGY_EXPENDED_INDEX]\r\n if ee is not None:\r\n return int(ee)\r\n return -1", "def energy_yield(self):\n return self['kwh_per_kw']", "def energy(e: float) -> float:\n\n return (1/np.sqrt(2))*(gamma(-e/2+1/2)/(gamma(-e/2+3/4)))", "def energy_atom(atom,layer):\n global r,c,h\n backval= r*((atom**2/layer**2))\n return float('%.2E' % Decimal(str(backval)))", "def _get_mean(self):\n return self._get_conditional_negative_energy()", "def energy_pfu(self):\n return self._energy_pfu", "def kinetic_energy(self):\r\n return self.mass * np.dot(self.vel, self.vel) / 2", "def internalenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n u = g - temp*g_t - pres*g_p\n return u", "def kinetic_energy(self):\r\n position, velocity, escaped_particles,impact, wall_collision,mom = self.box_collision_info()\r\n for j in xrange(1,self.n):\r\n abs_velocity = np.sqrt(velocity[:,0]**2+velocity[:,1]**2\r\n + velocity[:,2]**2)\r\n KE = 0.5*self.m*abs_velocity**2\r\n total_KE = np.sum(KE)\r\n invid_KE = total_KE/self.Npart\r\n\r\n return total_KE, invid_KE", "def energy(p,m):\n return math.sqrt(p*p + m*m)", "def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)", "def get_energy(self, position):\n\n # update the positions of the system\n self.simulation.context.setPositions(position)\n\n # Get the energy from the new state\n state = self.simulation.context.getState(getEnergy=True)\n\n energy = state.getPotentialEnergy().value_in_unit(unit.kilocalories_per_mole)\n\n return energy", "def etol(self) -> PotentialEnergy:\n return self._etol", "def energy_tot(P,F,H,molecule):\n return energy_el(P,F,H) + energy_n(molecule)", "def finalE(self, file, path, chunk=100):\n reader = IO(file, path)\n tail = reader.tail(chunk)\n marker = 'FINAL SINGLE POINT ENERGY'\n energyline = [s for s in tail if marker in s]\n if chunk > 1000:\n self.logfile.appendline(file + ': cannot find final energy.')\n return np.nan\n elif energyline == []:\n return self.finalE(file, path, chunk+100)\n else:\n return float(energyline[-1].split()[-1])", "def incident_energy(self):\n return self._incident_energy", "def last_energy_change(self) -> PotentialEnergy:", "def last_energy_change(self) -> PotentialEnergy:", "def total_KE(particles):\r\n return sum([particle.kinetic_energy() for particle in particles])", "def energy(n):\n return (n * pi * hbar / (2 * a)) ** 2 / (2 * m)", "def get_total_energy_produced (self):\n return self.net_generation_wind", "def computeEnergy(self):\n\t\tGmo = self.Gmo\n\t\te = self.e\n\t\tself.Ec = 0.0\n\n\t\tfor i in range( self.nocc ):\n\t\t\tfor j in range( self.nocc ):\n\t\t\t\tfor a in range( self.nocc,self.norb ):\n\t\t\t\t\tfor b in range( self.nocc,self.norb ):\n\t\t\t\t\t\tself.Ec += 0.25*(Gmo[i,j,a,b]*Gmo[a,b,i,j])/(e[i]+e[j]-e[a]-e[b])\n\n\t\treturn self.E0 + self.Ec", "def intEnergy_mass(self):\n return _cantera.reactor_intEnergy_mass(self.__reactor_id)", "def energy(nx,ny):\n return 1+nx+ny", "def send_energy(self) -> float:\n # Ensure that the molecule currently passes validation\n if not self.molecule_validated:\n raise Exception(\"MDI attempting to compute energy on an unvalidated molecule\")\n self.run_energy()\n properties = self.compute_return.properties.dict()\n energy = properties[\"return_energy\"]\n MDI_Send(energy, 1, MDI_DOUBLE, self.comm)\n return energy", "def energy(data):\n return sum(pow(data, 2))", "def self_energy(gf_imp0, gf_imp):\n return 1/gf_imp0 - 1/gf_imp", "def _UpdateEnergy(self):\n self.mol.GetEnergy('nokinetic')", "def current_energy_produced(self):\n return self.df.exp.sum()", "def delta_energy(atom,layer1,layer2):\n global r,c,h\n return float('%.2E' % Decimal(str(r*((atom**2/layer1**2)-(atom**2/layer2**2)))))", "def getEnergyAdded(self):\n return self.json_state.get(\"charging\").get(\"wh_energy\")", "def _energy(self, X, y):\n yhat = self.evaluate(X)\n loss = ((y - yhat) ** 2).sum() / 2\n return loss", "def absorption_energy_eV(self):\n return self._absorption_energy_eV.copy()", "def getEnergy(self, normalized=True, mask=None):\n if self.gpu:\n psi = self.psi.get()\n V = self.Vdt.get() / self.dt\n else:\n psi = self.psi\n V = self.Vdt.get() / self.dt\n density = np.absolute(psi) ** 2\n gradx = np.gradient(psi)[0]\n normFactor = density.sum() if normalized else 1.0\n return np.ma.array(-(0.25 * np.gradient(\n np.gradient(density)[0])[0]\n - 0.5 * np.absolute(gradx) ** 2\n - (self.g_C * density + V)\n * density), mask=mask).sum() / normFactor", "def energies(self) -> np.ndarray:\n return np.array([item.energy for item in self])", "def energy(self, state):\n return _modeller.mod_state_optimizer_energy(self._modpt,\n self.__edat.modpt,\n state, self.__libs.modpt)", "def energy(data):\n\n return np.real(np.mean(np.abs(data)**2, axis=1))", "def find_local_energy(self):\n state = self.current_state\n (mat_elements, spin_flip_sites) = self.hamiltonian.find_nonzero_elements(state)\n\n flipped_states = [np.copy(state) for _ in spin_flip_sites]\n for i, site in enumerate(spin_flip_sites):\n flipped_states[i][0][site] *= -1\n\n energies = [self.amplitude_ratio(state, flipped_states[i])* element for (i, element) in enumerate(mat_elements)]\n return sum(energies)", "def compute_energy(self):\n\n # radiation energy\n Qsqrd = self.omega_coords[:,:,1]*self.omega_coords[:,:,1]\n Psqrd = self.omega_coords[:,:,0]*self.omega_coords[:,:,0]\n\n e_rad = (Psqrd/self.mode_mass + (self.mode_mass*self.omega**2)*Qsqrd)*.5\n\n # space charge energy\n Dsqrd = self.dc_coords[:,:,0]*self.dc_coords[:,:,0]\n\n e_drft = Dsqrd/(2.*self.mode_mass)\n\n energy = e_rad+e_drft\n\n return energy", "def calculate_energy(self):\n temp_e = 0\n\n for i in range(0,self.neuron_count):\n for j in range(0, self.neuron_count):\n if i != j:\n temp_e += self.get_weight(i, j) * self.current_state[i] * \\\n self.current_state[j]\n return -1 * temp_e / 2", "def energy_percentage(self) -> Union[int, float]:\n if not self.proto.energy_max:\n return 0\n return self.proto.energy / self.proto.energy_max", "def energy_percentage(self) -> Union[int, float]:\n if not self.proto.energy_max:\n return 0\n return self.proto.energy / self.proto.energy_max", "def calc_gravitational_energy(self):\n\n star = self.star\n\n K, N = star.mesh_size\n mu = star.mu_coords\n r = star.r_coords\n\n def S1(j):\n return np.sum((mu[2::2] - mu[:-2:2]) * (star.rho[:-2:2, j] * star.Phi[:-2:2, j] +\n 4 * star.rho[1:-1:2, j] * star.Phi[1:-1:2, j] +\n star.rho[2::2, j] * star.Phi[2::2, j])) / 6\n\n W = 0\n\n for j in range(0, N - 2, 2):\n W += (r[j + 2] - r[j]) * (r[j]**2 * S1(j) +\n 4 * r[j + 1]**2 * S1(j + 1) +\n r[j + 2]**2 * S1(j + 2))\n\n return -1 / 3 * np.pi * W", "def computeDataEnergy(self):\n _cgco.gcoComputeDataEnergy(self.handle, self.energyTempArray)\n return self._convertEnergyBack(self.energyTempArray[0])", "def free_energy(self, T=0, P=0, mus={}):\n # global environment\n return self.energy", "def ee_radius_diffraction(self, energy=FIRST_AIRY_ENCIRCLED):\n return _inverse_analytic_encircled_energy(self.fno, self.wavelength, energy)", "def stEnergy(frame):\n return numpy.sum(frame ** 2) / numpy.float64(len(frame))", "def ee_radius(self, energy=FIRST_AIRY_ENCIRCLED):\n k, v = list(self._ee.keys()), list(self._ee.values())\n if energy in v:\n idx = v.index(energy)\n return k[idx]\n\n def optfcn(x):\n return (self.encircled_energy(x) - energy) ** 2\n\n # golden seems to perform best in presence of shallow local minima as in\n # the encircled energy\n return optimize.golden(optfcn)", "def _calc_energy( self, V_a, eos_d ):\n pass", "def electronicMinusHalfEntropy(self):\n\t\tif self.entropy is None:\n\t\t\toutVal = None\n\t\telse:\n\t\t\toutVal = self.electronicTotalE - (0.5*self.entropy)\n\t\treturn outVal", "def get_econs(self):\n eham = self.beads.vpath*self.nm.omegan2 + self.nm.kin + self.forces.pot\n eham += self.bias.pot # bias\n for e in self._elist:\n eham += e.get()\n\n return eham + self.eens", "def stEnergy(frame):\n return np.sum(frame ** 2) / np.float64(len(frame))", "def compute_energy(self, protein): \n return utils.score_pose(protein.pose, self.scorefxn)", "def load_energy(self, zpe_scale_factor=1.):\n e_elect = None\n with open(self.path, 'r') as f:\n for line in f:\n if 'FINAL SINGLE POINT ENERGY' in line: # for all methods in Orca\n e_elect = float(line.split()[-1])\n if e_elect is None:\n raise LogError('Unable to find energy in Orca output file.')\n return e_elect * constants.E_h * constants.Na", "def E(self):\n return self._properties['E']", "def get_total_energy(parameters):\n return orm.Float(parameters.get_attribute('energy'))", "def mass_energy(particle: Particle, mass_numb: Optional[Integral] = None) -> u.J:\n return particle.mass_energy", "def get_data_term(self):\n \n if self.num_hidden == 0:\n \n data_term = -self.compute_energy(self.x, self.batch_size)\n \n else:\n \n data_term = -self.compute_free_energy(self.x)\n \n return T.sum(T.exp(-data_term))", "def energy(self):\n E = sum([1 for c in self.constraints if self._is_constraint_violated(c)])\n if E == 0:\n self._save_solution()\n print(\"exiting...\")\n exit()\n return E" ]
[ "0.7668576", "0.7595888", "0.7482029", "0.7454896", "0.74329084", "0.74117565", "0.7405934", "0.7287761", "0.7283499", "0.70380706", "0.70380706", "0.6973054", "0.69607884", "0.6895652", "0.67723006", "0.6750274", "0.67392176", "0.67377317", "0.6689237", "0.6675212", "0.66727716", "0.662599", "0.6617034", "0.65936416", "0.6591", "0.6571902", "0.65373886", "0.6530975", "0.6525246", "0.6520253", "0.6493072", "0.6484672", "0.64761317", "0.6466534", "0.64517367", "0.6449745", "0.6434459", "0.6414185", "0.6394232", "0.63889843", "0.6388255", "0.6375405", "0.63644403", "0.63588965", "0.63365793", "0.63231593", "0.63146126", "0.6313592", "0.6310332", "0.6301231", "0.6297854", "0.62960476", "0.6279603", "0.62734455", "0.627314", "0.6259053", "0.62433743", "0.6209346", "0.6209346", "0.62072456", "0.6199067", "0.61947715", "0.61922383", "0.6182214", "0.6178504", "0.61755854", "0.616519", "0.61554873", "0.6117812", "0.61029243", "0.6090343", "0.6071089", "0.6055177", "0.60493064", "0.60490096", "0.60472304", "0.60408616", "0.6037621", "0.60150695", "0.60106516", "0.59695804", "0.5952028", "0.5952028", "0.5947439", "0.59328693", "0.5921908", "0.5914834", "0.59069264", "0.590101", "0.5898622", "0.5895646", "0.58884233", "0.5886166", "0.5884835", "0.5843008", "0.5842345", "0.58417636", "0.5837123", "0.58337224", "0.5828887" ]
0.7802598
0
Method that returns Beta (velocity/speed of light) as a float
def BetaVelocity(self): return np.linalg.norm(self.velocity) / const.speed_of_light
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def betaT(self):\n if self.maTail > 1:\n return 0\n else:\n return sqrt(1 - self.maTail**2)", "def getBeta(self, alpha):\n return 2.0*(2.0-alpha) + -4.0*np.sqrt(1.0-alpha)", "def beta(self):\n eTheta = self.eTheta()\n cosOmg = np.cos(self.omega())\n return self.a1()/c.c*(1-eTheta**2)**0.5*cosOmg", "def B(alpha: float, beta: float) -> float:\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)", "def _beta(self):\n return _handle_ab(self.solution, self.use_const)[1]", "def beta(self):\n return self._beta", "def beta(self):\n return self._beta", "def B(alpha, beta):\n return math.gamma(apha) * math.gamma(beta) / math.gamma(alpha + beta)", "def B(alpha, beta):\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)", "def B(alpha, beta):\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)", "def B(alpha, beta):\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)", "def betaW(self):\n if self.maCruise > 1:\n return 0\n else:\n return sqrt(1 - self.maCruise**2)", "def get_beta(self):\n\n return np.matmul(self.rotation_x, self.beta_z)", "def getBeta(self):\n\t\treturn self.relativistic_beta", "def beta_r(r):\n return 0.", "def pvalue_beta(self):\n return self._pvalue_beta", "def _get_alpha_beta(self, a, b):\n beta = a / b\n alpha = a * beta\n return alpha, beta", "def beta_factor(mol_data, ephemobj):\n # imported here to avoid circular dependency with activity.gas\n from .core import photo_timescale\n from ...data import Ephem\n\n if not isinstance(ephemobj, Ephem):\n raise ValueError('ephemobj must be a `sbpy.data.ephem` instance.')\n if not isinstance(mol_data, Phys):\n raise ValueError('mol_data must be a `sbpy.data.phys` instance.')\n\n orb = ephemobj\n delta = (orb['delta']).to('m')\n r = (orb['r'])\n\n if not isinstance(mol_data['mol_tag'][0], str):\n cat = JPLSpec.get_species_table()\n mol = cat[cat['TAG'] == mol_data['mol_tag'][0]]\n name = mol['NAME'].data[0]\n\n else:\n name = mol_data['mol_tag'][0]\n\n timescale = photo_timescale(name)\n\n if timescale.ndim != 0:\n # array\n timescale = timescale[0]\n\n beta = (timescale) * r**2\n\n return beta", "def beta(self):\n return self[1::2]", "def _tstat_beta(self):\n return _handle_ab(self._tstat_all, self.use_const)[1]", "def beta_r(r):\n return 1.", "def beta(theta, a, b):\n B = math.gamma(a) * math.gamma(b) / math.gamma(a + b)\n return (theta ** (a - 1)) * ((1 - theta) ** (b - 1)) / B", "def getGamma(self, alpha, beta):\n return np.power(beta,2.0)/2.0/alpha", "def brate(self):\n try:\n return self.pos / self.runtime\n except ZeroDivisionError:\n return 0", "def tstat_beta(self):\n return self._tstat_beta", "def getB(self):\n return ((self.bPlusbStar() / self.nPos) + (self.bMinusbStar / self.nNeg)) / 2", "def _pvalue_beta(self):\n return _handle_ab(self._pvalues_all, self.use_const)[1]", "def _get_alpha_beta(self):\n alpha = tf.nn.softplus(self.alpha_prime)\n beta = -alpha + tf.nn.softplus(self.beta_prime)\n return alpha, beta", "def get_b(self):\n return ((self.b_plus_bstar / self.n_pos) + (self.b_minus_bstar / self.n_neg)) / 2", "def B(param):\n return (param.delta + param.nu + param.mu0 - param.beta) * param.A + (param.beta - param.nu) * (param.delta + param.nu + param.mu1) * param.b", "def gradient_descent_beta(self):\n return self._gradient_descent_beta", "def getEta(self):\n self.__eta = 3./8.*(1. - self.__alpha0 - self.__alpha1 - 2.*self.__beta)\n if self.__eta<0.: self.__eta=0. # erreur d'arrondi\n return self.__eta", "def litBetaAlpha(inc,wave,m,d):\n psi = blazeYaw(inc,wave,m,d)\n beta1 = cos(inc)*cos(psi)\n alpha1 = cos(inc)*sin(psi)-m*wave/d\n return beta1,alpha1", "def num_beta(self) -> int:\n return self._num_beta", "def calc_beta(Phi):\n beta = betaFunction(-Phi)\n #Note: the - is because betaFunction uses -Phi when calculating the Phase Function\n #This is because PchipInterpolator used requires monotonically increasing function\n return beta", "def get_beta(self,df,tick,ind):\n cov = get_cov(df,tick,ind)\n var = df[ind].var()\n beta = cov / var\n return beta", "def beta(alpha, aw, ap):\n if alpha == 0:\n return np.zeros_like(aw)\n elif alpha == 1:\n return np.ones_like(aw)\n else:\n return 1-(1 / (ap - aw) * (-aw + np.sqrt((1-alpha)*ap**2 + alpha*aw**2)))", "def _se_beta(self):\n return _handle_ab(self._se_all, self.use_const)[1]", "def get_alpha_beta(self,n=50):\n return self.tau(self.f0(self.rho),n),self.tau_plus(self.f1(self.rho),n)", "def get_b(self):\n return ((self.s_pos / self.n_pos) + (self.s_neg / self.n_neg)) / 2", "def beta_r(r, r_ani):\n return 1./2 * r / (r + r_ani)", "def nbeta(self) -> int:\n return self._core.nbeta()", "def beta_r(r, r_ani):\n return r**2/(r_ani**2 + r**2)", "def B(self, t):\n return np.sqrt((3 * self.eta_B * self.snr.L0 *\n YEAR_TO_SEC * self.pulsar.tau_0) /\n (self.r(t) * PC_TO_CM) ** 3 *\n (1 - (1 + (t / self.pulsar.tau_0)) ** (-1)))", "def beta_Sonoi2015(self):\n return 10.0**(-3.86*self.string_to_param(\"log_Teff\") + 0.235*self.string_to_param(\"log_g\")+14.2)", "def beta_star(self):\n return self.reciprocal_lattice_parameters[4]", "def boost(beta):\n if abs(beta) >= 1:\n print('***ERROR in SpecialRelativity.boost, beta is {:.3f} .'.format(beta) )\n return( np.array( [ [1.0, 0], [0, 1.0] ] ) )\n gamma = 1.0/np.sqrt( 1.0-beta*beta )\n ch = gamma\n sh = gamma*beta\n return( np.array( [ [ch, -sh], [-sh, ch] ] ) )", "def beta_r(r, r_ani, beta_inf):\n return beta_inf * r**2/(r_ani**2 + r**2)", "def lightSpeed():\n return const.c.value", "def beta_r(r, beta):\n return beta", "def LorentzFactor(self):\n # Use of abs() and x ** 0.5 provides a more stable calculation of lorentz\n # factor than math.sqrt() at high velocities.\n return 1 / abs( 1 - Particle.BetaVelocity(self) * Particle.BetaVelocity(self))**0.5", "def getETA():", "def getETA():", "def beta(p, m):\r\n return p/np.sqrt(p**2 + m**2)", "def beta_func(eps, E, theta):\n\n def parameter_s(eps, E, theta):\n\n \"\"\"\n Return the parameter s (formula 3 from Gould's article)\n\n Parameters:\n eps : energy of the target photon (eV)\n E : energy of the gamma photon (eV)\n theta : angle between the two momenta of the two photons (rad)\n \"\"\"\n\n s = eps*E/(2*(mc2*keV2eV)**2)*(1-np.cos(theta))\n ind = np.where(s>=1) #for pair production to occur, s>=1 and if s=1, it is the threshold condition.\n\n return s, ind\n\n s, ind = parameter_s(eps, E, theta)\n s = s[ind[0]]\n\n return np.sqrt(1-1/s), ind", "def convertGammaToLaguerre(self,y):\n return (y-self.low)*(self.beta)", "def se_beta(self):\n return self._se_beta", "def B(self) -> int:\n return self.params.B", "def beta(self, index):\n index_change = index.close.pct_change()\n beta = self.pct_change.cov(index_change) / index_change.var()\n return beta", "def get_velocity(self):\n return self.momentum/self.mass", "def boost(self):\n ch = self.gamma\n sh = self.gamma*self.beta\n return( np.array( [ [ch, -sh], [-sh, ch] ] ) )", "def _fv(self):\n return self.beta * (self.x ** self.c)", "def fbeta_measure(self, beta=1.):\n P = self.precision\n R = self.recall\n if P + R == 0.0:\n return 0.0\n return (1 + beta**2) * P * R / (P * beta**2 + R)", "def eta(z, x, beta2):\n return -6 * z / beta2 / (1+x)", "def betaln(alpha, beta):\n ln_beta = gammaln(alpha) + gammaln(beta) - gammaln(alpha + beta)\n return ln_beta", "def get_battery(self) -> float:\r\n resp = self.send_command(self._GET_BATTERY)\r\n try:\r\n return float(resp)\r\n except ValueError as e:\r\n print(f\"Error parsing battery voltage '{resp}':\", e)\r\n return 0.0", "def calculateGammaFactors(self):\n return (self.time/self.expectedDuration)**self.beta", "def updateBeta(self):\n\n priorBeta = np.copy(self.beta) # 返り値で更新幅を与えるので初期値を保持しておく\n W = self.__genW() # diag Matrix\n # update beta : Fisher Scoring Update\n result = np.matmul(np.matmul(self.X.T, W), self.X)\n result = np.matmul(np.linalg.inv(result), self.X.T)\n result = np.matmul(result, W)\n # claimFreq=0の人は, firstIterationでmu=0の0割が必ず発生する. 適切な対処法は+epsilonで良い?\n z = (self.Y - self.mu)/(self.mu + DoubleGLM.EPSILON) + np.log(self.mu + DoubleGLM.EPSILON)\n self.beta = np.matmul(result, z)\n\n # update current mu\n self.mu = np.exp(np.matmul(self.X, self.beta))\n # update current deviance\n d1 = self.Y * (self.Y**(1-self.p) - self.mu**(1-self.p)) / (1-self.p)\n d2 = (self.Y**(2-self.p) - self.mu**(2-self.p)) / (2-self.p)\n self.d = 2*self.w * (d1 - d2)\n\n return np.abs(priorBeta - self.beta)", "def _j_beta(r, s, r_ani, beta_inf):\n return ((s**2 + r_ani**2) / (r**2 + r_ani**2)) ** beta_inf", "def bethe(beta, z, ne, exc_en):\n exc_en *= 1e-6 # convert to MeV\n beta_sq = beta**2\n\n if beta_sq == 0.0:\n # The particle has stopped, so the dedx should be infinite\n dedx = float('inf')\n elif beta_sq == 1.0:\n # This is odd, but then I guess dedx -> 0\n dedx = 0\n else:\n frnt = ne * z**2 * e_chg**4 / (e_mc2 * MeVtokg * c_lgt**2 * beta_sq * 4 * pi * eps_0**2)\n lnt = log(2 * e_mc2 * beta_sq / (exc_en * (1 - beta_sq)))\n dedx = frnt*(lnt - beta_sq) # this should be in SI units, J/m\n\n return dedx / e_chg * 1e-6 # converted to MeV/m", "def _get_ucb_beta_th(dim, time_step):\n return np.sqrt(0.5 * dim * np.log(2 * dim * time_step + 1))", "def beta(self,k1,k2,c):\n if (k1*k2 == 0.):\n return 0.\n else:\n return c*(k1/k2 + k2/k1)/2. + c*c", "def get_beta(self, epoch, beta_start=None, beta_end=None):\r\n if beta_start is None:\r\n beta_start = self.beta_start\r\n if beta_end is None:\r\n beta_end = self.beta_end\r\n\r\n if epoch < beta_start:\r\n return self.beta0\r\n elif beta_start <= epoch <= beta_end:\r\n if self.schedule_type == 'constant':\r\n return self.beta0\r\n elif self.schedule_type == 'linear':\r\n return (self.beta0\r\n + (self.beta1 - self.beta0) * (epoch - beta_start)\r\n / (beta_end - beta_start))\r\n elif self.schedule_type == 'cosine':\r\n return self.cosine_annealing(epoch, beta_start, beta_end)\r\n elif self.schedule_type == 'cosine-restarts':\r\n cycle_len = (beta_end - beta_start) // self.cycles\r\n cycle_idx = (epoch - beta_start) // cycle_len\r\n start_epoch = beta_start + cycle_len * cycle_idx\r\n end_epoch = beta_start + cycle_len * (cycle_idx + 1)\r\n return self.cosine_annealing(epoch, start_epoch, end_epoch)\r\n else:\r\n raise ValueError('Unsupported schedule type {}'\r\n .format(self.schedule_type))\r\n else:\r\n return self.beta1", "def _get_ucb_beta_th(dim, time_step):\n return np.sqrt(5 * dim * np.log(2 * dim * time_step + 1))", "def get_optimal_beta(self):\n if self.annealing:\n # find the epoch/index that had the highest NDCG@k value\n index_max_ndcg = np.argmax(self.val_ndcg)\n\n # using this index find the value that beta had at this epoch\n return self.ls_beta[index_max_ndcg]\n else:\n return self.beta", "def Bo_Bosol_calc(self):\n self.Bosol = (self.g*self.alpha * self.srflx ) \n #ZEROS FOR T3W APPLICATION\n self.Bo = np.zeros([self.b.shape[0]])", "def get_beta0(self):\n\n beta0 = np.zeros((self.nresp,))\n beta = self.beta\n beta0 = beta0 + self.mu_y if self.mu_y is not None else beta0\n beta0 = beta0 - np.matmul(beta.T, self.mu_x) if self.mu_x is not None else beta0\n return beta0", "def get_data(self):\n return self._beta", "def test_rb_decay(self, a, b, alpha):\n x = np.arange(1, 100, 5)\n y = a * alpha**x + b\n\n alpha_guess = guess.rb_decay(x, y, b=b)\n\n self.assertAlmostEqual(alpha, alpha_guess, delta=alpha * 0.1)", "def beta_fct(M_p, F_xuv, R_p):\n\n M_EARTH= const.M_earth.cgs.value\n R_EARTH = const.R_earth.cgs.value\n\n if (type(F_xuv) == float) or (type(F_xuv) == np.float64):\n # if F_xuv is single value\n grav_pot = -const.G.cgs.value * (M_p*M_EARTH) / (R_p*R_EARTH)\n log_beta = max(0.0, -0.185 * np.log10(-grav_pot)\n \t\t\t\t\t+ 0.021 * np.log10(F_xuv) + 2.42)\n beta = 10**log_beta\n return beta\n\n elif len(F_xuv) > 1:\n # if F_xuv is a list\n betas = []\n for i in range(len(F_xuv)):\n grav_pot_i = -const.G.cgs.value \\\n * (M_p[i]*M_EARTH) / (R_p[i]*R_EARTH)\n log_beta_i = max(0.0, -0.185 * np.log10(-grav_pot_i)\n \t\t\t\t\t + 0.021 * np.log10(F_xuv[i]) + 2.42)\n beta_i = 10**log_beta_i\n betas.append(beta_i)\n betas = np.array(betas)\n return betas", "def goal_velocity(self):\n return self._read(MX_GOAL_VELOCITY)", "def v(self):\n\n # TODO This translation formula works, but needs simplified.\n\n # PWM duration can go from 0 to 4095 with 4095 representing max rpm\n# print(\"MuleBot.v MuleBot.dcMotorPWMDurationLeft:\", MuleBot.dcMotorPWMDurationLeft)\n speed_percentage = float(MuleBot.dcMotorPWMDurationLeft) / 4095.0\n# print(\"speed_percentage: \", speed_percentage)\n\n rpm = speed_percentage * self.motorMaxRPM\n# print(\"rpm: \", rpm)\n\n secondsPerMinute = 60\n revs_per_second = rpm / secondsPerMinute\n# print(\"--revs_per_second\", revs_per_second)\n\n inches_per_rev = 2.0 * math.pi * MuleBot.WHEEL_RADIUS\n INCHES_PER_METER = 39.3701\n meters_per_rev = inches_per_rev / INCHES_PER_METER\n# print(\"--meters_per_rev\", meters_per_rev)\n\n meters_per_second = meters_per_rev * revs_per_second\n\n# print(\"--meters_per_second: \", meters_per_second)\n return meters_per_second", "def eta(self, total):\n try:\n return (total - self.pos) / self.brate\n except ZeroDivisionError:\n return 0.0", "def thermal_state(self, beta: float = .1) -> numpy.ndarray:\n rho = numpy.exp(-beta * self.cost)\n return rho / numpy.sum(rho)", "def compute_vel(self, state, goal):\n\n print(state)\n print(goal)\n dx = goal[0] - state[0]\n dy = goal[1] - state[1]\n theta = state[2]\n rho = np.sqrt(dx**2+dy**2)\n #alpha = np.minimum(-theta + np.arctan2(dy,dx), - theta + np.arctan2(dy,dx)+2*np.pi)\n #beta = theta + alpha\n beta = np.arctan2(dy,dx)\n alpha = beta - theta\n print(\"beta\", beta, \"theta\", theta, \"alpha1\", alpha)\n\tif (alpha > np.pi):\n alpha = alpha - 2*np.pi\n \telse:\n\t if (alpha < -np.pi):\n \talpha = alpha + 2*np.pi\n \n v = self.kp*rho\n omega = self.ka*alpha+self.kb*beta\n print(alpha)\n if (np.cos(alpha)*np.cos(alpha) < 0.2):\n v = 0.1\n else:\n v = np.cos(alpha)*rho\n\n if (v < 0):\n #exit()\n\t v = 0.1\n\t \n if (v > self.MAX_SPEED):\n v = self.MAX_SPEED\n \n omega = 4*alpha\n if (omega > self.MAX_OMEGA):\n omega = self.MAX_OMEGA\n #if (omega < -self.MAX_OMEGA):\n # omega = -self.MAX_OMEGA\n\n done = False\n if (np.absolute(dx) < 0.01 and np.absolute(dy) < 0.01):\n done = True\n \n\treturn (v,omega,done)", "def powBeta( n ):\n return (1-alphaval)*Fib(n) + Fib(n-1)\n #return Fib(n+1) - Fib(n) * alphaval", "def hern_bulge_mass(r,b):\n rb = r/b\n return ((rb*rb)/(2*(1+rb)**2.))", "def getParam(self):\n return self.__alpha0, self.__alpha1, self.__beta, self.__eta", "def B_func(Th33, Th1500):\n\n D = ln(Th33) - ln(Th1500)\n B = (ln(1500) - ln(33)) / D\n\n def lbd_func(C):\n \"\"\"return the slope of logarithmic tension-moisture curve\"\"\"\n if C == 0:\n return 0.0\n lbd = 1 / C\n return lbd\n\n return lbd_func(B)", "def convertLaguerreToGamma(self,x):\n return x/self.beta+self.low", "def betabinom_B(k,n, alpha,beta, precision=50):\n mp.dps = precision # Set precision\n #return float( mpmath.gamma(k+alpha) * mpmath.gamma(n-k+beta) / mpmath.gamma(alpha+n+beta) )\n return float(mpmath.beta(k+alpha, n-k+beta))", "def get_beta_z(self):\n\n return np.matmul(self.sigma_zinv, self.sigma_zw)", "def get_xray_delta_beta_intrinsic(self, energy=13.0):\n \n delta, beta = self.get_xray_delta_beta(energy)\n delta *= self.atomic_weight/self.density\n beta *= self.atomic_weight/self.density\n \n return delta, beta", "def value(self) -> float:", "def vel(self, time):\n if (time < self.ti):\n t = 0\n elif (time > self.tf):\n t = self.tf - self.ti\n else:\n t = time - self.ti\n return self.a1 + 2.0 * self.a2 * t + 3.0 * self.a3 * pow(t, 2) + 4.0 * self.a4 * pow(t, 3) + 5.0 * self.a5 * pow(t, 4)", "def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):\n\n def alpha_bar(time_step):\n return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2\n\n betas = []\n for i in range(num_diffusion_timesteps):\n t1 = i / num_diffusion_timesteps\n t2 = (i + 1) / num_diffusion_timesteps\n betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))\n return torch.tensor(betas, dtype=torch.float32)", "def bias(self):\n return self.mbmod.bias", "def getBat():\n ina = INA219(address=int('0x41', 16))\n bat_bus_v = ina.getBusVoltage_V()\n bat_shunt_mv = ina.getShuntVoltage_mV()\n bat_curr_ma = ina.getCurrent_mA()\n bat_volt_v = (ina.getBusVoltage_V() + ina.getShuntVoltage_mV() / 1000)\n bat_power_mw = ina.getPower_mW()\n return bat_volt_v, bat_curr_ma", "def Fx_case_B(z, x, gamma):\n \n if z == 0 and x == 0:\n return 0\n \n beta2 = 1-1/gamma**2\n beta = sqrt(beta2)\n \n alp = alpha(z, x, beta2)\n sin2a = sin(2*alp)\n cos2a = cos(2*alp) \n\n kap = 2*(alp - z)/beta\n #kap = sqrt(x**2 + 4*(1+x)*sin(alp)**2) # kappa for case B\n \n N1 = sin2a - beta*(1+x)*kap\n N2 = (1+x)*sin2a - beta*kap\n D = kap - beta*(1+x)*sin2a\n \n return N1*N2/D**3", "def alpha(self) -> float:\n return self._alpha" ]
[ "0.7552084", "0.754992", "0.7535426", "0.7439897", "0.74009764", "0.7362183", "0.7362183", "0.71039945", "0.707216", "0.707216", "0.707216", "0.7020982", "0.69366413", "0.6877576", "0.6866696", "0.67877996", "0.6773138", "0.6760468", "0.6758375", "0.67385536", "0.670362", "0.67025906", "0.6696022", "0.66733295", "0.66308504", "0.6612742", "0.6595469", "0.6531038", "0.65085256", "0.6470471", "0.64633876", "0.6463078", "0.642669", "0.6419045", "0.6409368", "0.6393864", "0.63898003", "0.6337302", "0.63084346", "0.6269883", "0.6250577", "0.6247466", "0.6209574", "0.61954665", "0.61941856", "0.61494875", "0.6121731", "0.6096687", "0.6077243", "0.60716105", "0.6040074", "0.60236835", "0.60236835", "0.6021308", "0.60161", "0.59980756", "0.59951407", "0.5964739", "0.5964441", "0.5959524", "0.59593236", "0.5955266", "0.59535867", "0.59495705", "0.5915316", "0.59152", "0.59149575", "0.59034634", "0.58995104", "0.588749", "0.58874565", "0.5876289", "0.5849159", "0.58476114", "0.58385706", "0.5838104", "0.5825148", "0.58250916", "0.5816935", "0.5814285", "0.58053607", "0.5804134", "0.580123", "0.5799669", "0.5792415", "0.5790235", "0.57852703", "0.5770424", "0.5769299", "0.57483655", "0.5725717", "0.57238024", "0.57135665", "0.57103133", "0.5701564", "0.57003045", "0.5697845", "0.5684247", "0.5677956", "0.56778926" ]
0.8313839
0
Method that returns the Lorentz Factor of the particle.
def LorentzFactor(self): # Use of abs() and x ** 0.5 provides a more stable calculation of lorentz # factor than math.sqrt() at high velocities. return 1 / abs( 1 - Particle.BetaVelocity(self) * Particle.BetaVelocity(self))**0.5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def L(self) -> float:\n return self._L", "def Lorentz(x, x0, A, B, d):\n return B + A / (((x - x0) / d) ** 2 + 1)", "def lorentz(x, gamma):\n return 1 / cs.pi * 0.5 * gamma / ((0.5 * gamma**2) + x**2)", "def relu(z: float) -> float:\n return z if z > 0 else 0.01 * z", "def lorentz(x, x0, gamma): \n return (0.5/pi) * gamma / ((x-x0)**2 + 0.25 * gamma**2)", "def lp_factor(self):\n num = 1 + np.cos(2 * self.angle) ** 2\n den = np.cos(self.angle) * np.sin(self.angle) ** 2\n return num / den", "def lfn(self):\n if self.precision:\n return self.evaluations.exposedWing.edges[1].point1.z - self.evaluations.chordIntersected.edges[1].length\n else:\n return (self.acW + self.longPosW) / 2 # first guess for a faster evaluation", "def Luminosity(self, z, f=1., dnu=1000.):\n ld = self.Luminosity_Distance(z)\n ld2 = ld * ld\n lum = f * self.Jy2CGS * dnu * self.MHz2Hz * 4 * np.pi * ld2\n return lum", "def lorentzian(self, params):\n height, width, c_freq = params\n return height / (1.0+ (4.0 / width**2)*(self.freqs - c_freq)**2)", "def lorentzian(self, params):\n height, width, frequency = params\n\n return height / (1.0+ (4.0 / width**2)*(self.freq - frequency)**2)", "def calc_lamb(self, x_surface, geom):\n\n return self.rfl", "def get_lz(self):\r\n return self.dz * self.nz - self.oz", "def fun_lorentzian(p,r):\n return p[1] / ((r/p[0])**2 + 1)", "def relu_prime(z: float) -> float:\n return 1.0 if z > 0 else 0.0", "def L(self, t):\n return (3e-16 * self.pulsar.tau_0 * self.pulsar.L_0 *\n (t / (t + self.pulsar.tau_0)))", "def calcLorentzGammaFromMomentum(self,direction):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the lorentz gamma.\")\n if direction not in self.x.order: \n raise CoordinateVector(\"The direction, \"+str(direction)+ \" needs to be one of \" +\",\".join(self.x.order) + \" to calculated the lorentz gamma.\")\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n return math.sqrt(1 + (getattr(self.p,direction)/(self.mass*speed_light))**2)", "def get_coeff(self):\n return bernoulli(self.degree+1) / factorial(self.degree + 1)", "def omLz(self,z):\n return self.omL/(self.omL + self.omR*(1.0 + z)**2 + self.om0*(1.0 + z)**3)", "def lorentz_func(x, center, width):\n return 1/np.pi*width/2 *1/((x-center)**2+(x/width)**2)", "def lorentzian(x, x0=0.0, fwhm=1.0, ampl=1.0):\n return ampl * (1 + 4 * ((x - x0) / fwhm) ** 2) ** (-1)", "def lvec(self):\n lv = ROOT.TLorentzVector()\n# if self.pt < 0 or abs(self.eta) > 6:\n# raise Exception(\"Invalid values for TLorentzVector\")\n lv.SetPtEtaPhiM(self.pt, self.eta, self.phi, self.mass)\n# if abs(lv.Pt()) > 100000 or abs(lv.Eta()) > 100000:\n# raise Exception(\"Invalid values for TLorentzVector\")\n return lv", "def log_likelihood_z_lognormal(self, std=1.0):\n #return self.log_det_Jxz - self.dim * tf.log(std) - (0.5 / (std**2)) * tf.reduce_sum(self.output_z**2, axis=1)\n from deep_boltzmann.util import logreg\n logz = logreg(self.output_z, a=0.001, tf=True)\n ll = self.log_det_Jxz \\\n - (0.5 / (std**2)) * tf.reduce_sum(logz**2, axis=1) \\\n - tf.reduce_sum(logz, axis=1)\n return ll", "def z_score(self, x):\n return (x - self.n) / self.p", "def lnprobability(self):\n return", "def loevinger_coeff(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = p1 + q1\n\n cov = self.covar()\n\n if n == 0:\n return np.nan\n elif a == n or d == n:\n # only one (diagonal) cell is non-zero\n return 0.5\n elif cov == 0.0:\n return 0.0\n else:\n return _div(cov, min(p1 * q2, p2 * q1))", "def get_Lf(self):\n return 0", "def get_z(self) -> int:\n return self.__z", "def luminosity_distance(self, z):\n da = self.angular_diameter_distance(z)\n dl = da*(1.+z)**2.\n return(dl)", "def calcLorentzGammaFromVelocity(self,direction):\n if direction not in self.v.order: \n raise CoordinateVector(\"The direction, \"+str(direction)+ \" needs to be one of \" +\",\".join(self.x.order) + \" to calculated the lorentz gamma.\")\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n return math.sqrt(1 /(1 - (getattr(self.v,direction)/speed_light)**2))", "def L(n):\n\tif (n==0):\n\t\treturn lambda x: 1.0\n\n\telif (n==1):\n\t\treturn lambda x: x\n\n\telse:\n\t\treturn lambda x: ( (2.0*n-1.0) * x * L(n-1)(x)-(n-1) * L(n-2)(x) ) / n", "def distance_modulus(self, z):\n return(5.0*np.log10(self.luminosity_distance(z))+25.0)", "def lam(E):\n return (12398.4/E)*1e-10", "def relu(self, z):\n return np.maximum(0, z)", "def get_llss(self):\n return float(self.input.get_text(liquidaciones_historicas_catalog.LLSS).replace(\".\", \"\").replace(\",\", \".\"))", "def Lorenz(s):\n x = s[0]\n y = s[1]\n z = s[2]\n \n # constants for the equations\n sigma = 10.0\n rho = 28.0\n beta = 8.0/3.0\n \n # Return the state derivatives.\n return [sigma * (y-x), (rho-z)*x -y, x*y - beta*z]", "def Lq(self):\n if not self.isVaild():\n pass\n temp = ((self.r()**self.C)*self.Rho()) / \\\n (math.factorial(self.C)*((1 - self.Rho())**2))\n return temp*self.P0()", "def lorentz(self, X, xm, amp, w):\n return amp / (1 + ((X - xm) / (w / 2)) ** 2)", "def _getLaplaceCovar(self):\n assert self.init, 'GP not initialised'\n assert self.fast is False, 'Not supported for fast implementation'\n\n if self.cache['Sigma'] is None:\n self.cache['Sigma'] = sp.linalg.inv(self._getHessian())\n return self.cache['Sigma']", "def get_duct_linear_heat_loss_coefficient() -> float:\n return 0.49", "def _calculate_helium_nlte(self, level_boltzmann_factor,\n ion_number_density, levels, partition_function,\n helium_population_updated):\n level_number_density = self._calculate_dilute_lte(\n level_boltzmann_factor, ion_number_density, levels,\n partition_function)\n if helium_population_updated is not None:\n level_number_density.ix[2].update(helium_population_updated)\n return level_number_density", "def get_lx(self):\r\n return int(self.dx * self.nx - self.ox)", "def Luminosity(self):\n try:\n L = (self.E*self.Weight).sum()\n N = self.E.count()\n except:\n L = self.E.sum()\n N = self.E.count()\n return L, L/np.sqrt(N)", "def n_z(self, level):\n resolution = self.resolution(level)\n return (self.z_extent // resolution + 63) // 64", "def RelativisticMass(self):\n return Particle.LorentzFactor(self) * self.restMass", "def lorentzian(mu, wid, x): \n return np.ones(len(x) ) / (1 + ( (x - mu) / (0.5 * wid) )**2)", "def lvar (inlist):\r\n n = len(inlist)\r\n mn = mean(inlist)\r\n deviations = [0]*len(inlist)\r\n for i in range(len(inlist)):\r\n deviations[i] = inlist[i] - mn\r\n return ss(deviations)/float(n-1)", "def get_L(self, X):\n if issparse(X):\n return slinalg.norm(X, axis=1) ** 2\n else:\n return norm(X, axis=1) ** 2", "def pr(self, vertex):\n log_pr = self.log_pr(vertex)\n return np.exp(log_pr - self.logZ)", "def coefficient(self) -> float:\n ...", "def log_likelihood_z_normal(self, std=1.0):\n #return self.log_det_Jxz - self.dim * tf.log(std) - (0.5 / (std**2)) * tf.reduce_sum(self.output_z**2, axis=1)\n return self.log_det_Jxz - (0.5 / (std**2)) * tf.reduce_sum(self.output_z**2, axis=1)", "def log_likelihood_z_uniform(self, std=1.0):\n return self.log_det_Jxz - self.dim * (tf.log(std) + 0.5*tf.log(12.0))", "def Luminosity_Distance(self, z):\n dl = (1 + z) * self.Comoving_Distance(z)\n return dl", "def nu3(self):\n n = self.level()\n if (n % 9 == 0):\n return ZZ(0)\n return prod([ 1 + kronecker_symbol(-3, p) for p, _ in n.factor()])", "def scale_factor(self, z = 0.):\n return 1./(1.+z)", "def factor_carga(self):\r\n return self.nelementos() / self.n", "def getLiters(self):\n return self._count/self._ppl", "def N_z(self) -> int:\n return self.params.N_z", "def nfactors(self):\n return self.L.nnz", "def coeff(self):\n return self._coeff", "def z(self) -> float:\n return self.A[3] if self.scalar_vector else self.A[2]", "def llh(self):\n return Station._ellipsoid.geodetic(self.xyz())", "def _nelec(self):\n pd = self.particle_distribution(self._gam * mec2)\n return pd.to(1/mec2_unit).value", "def get_luminosity(self):\n\n if self.no_dist is False and self.no_flux is False:\n\n dist = self.distance\n snu = self.snu_at_1GHz\n lum = lumin(dist, snu)\n\n self.lum = lum\n else:\n self.lum = -1 # use -1 to indicate unknown luminosity\n return self.lum", "def lorentzian(f, A, fc):\n return A/(1+(2*np.pi*f/fc)**2)", "def eccentricity(self):\n return sqrt(self.f * 2 - self.f ** 2)", "def Momentum(self):\n return (np.multiply(Particle.LorentzFactor(self)\n , np.array(self.velocity,dtype=float))* self.restMass)", "def dL(n):\n\n\t#[TODO]: allow evaluation at 0\n\tif (n==0):\n\t\treturn lambda x: 0.0\n\n\telif (n==1):\n\t\treturn lambda x: 1.0\n\n\telse:\n\t\treturn lambda x: (n/(x**2-1.0))*(x*L(n)(x)-L(n-1)(x))", "def kl_divergence(self) -> Tensor:\n return torch.tensor(0.0)", "def lorentz_deriv(coord, sigma=10., beta=8./3, rho=28.0):\n x, y, z = coord # unpack coordinates\n return np.array([sigma * (y - x), x * (rho - z) - y, x * y - beta * z])", "def luminosity_distance(self, z):\n return self.proper_distance(z) * (1 + z)", "def kl_policy(self):\n r = .5 * (np.trace(np.dot(self.behavior_policy.precision, self.target_policy.noise))\n - self.behavior_policy.dim_A - np.log(np.linalg.det(self.target_policy.noise) / np.linalg.det(self.behavior_policy.noise)))\n\n dtheta = (self.behavior_policy.theta - self.target_policy.theta)\n da = np.dot(dtheta, self.mu.T)\n m = float(np.sum(\n da * np.dot(self.target_policy.precision, da))) / self.mu.shape[0]\n\n r += .5 * m\n return r", "def get_vlb(self):\n vlb = 0\n\n # First term\n # E[LN p(g | \\gamma)]\n E_ln_g = self.expected_log_g()\n vlb += Dirichlet(self.gamma[None, None, :]).negentropy(E_ln_g=E_ln_g).sum()\n\n # Second term\n # E[LN q(g | \\tilde{gamma})]\n vlb -= Dirichlet(self.mf_gamma).negentropy().sum()\n\n return vlb", "def f_o(self, z):\n\t return exp(-(z/self.MU)**self.CIRC_3)", "def f_o(self, z):\n\t return exp(-(z/self.MU)**self.CIRC_3)", "def CalcLinearForce(self):\n\t\t\n\t\tself.F = -self.s * self.X\n\t\t\n\t\treturn self.F", "def diffuse_coefficient(self):\n return self._diffuse_coefficient", "def l2_norm(self):\n return (self.x**2 + self.y**2 + self.z**2)**0.5", "def calc_rfl(self, x_surface, geom):\n\n return self.rfl", "def get_vlb(self):\n raise NotImplementedError()\n vlb = 0\n\n # First term\n # E[LN p(g | \\gamma)]\n E_ln_g = self.expected_log_g()\n vlb += Dirichlet(self.gamma[None, None, :]).negentropy(E_ln_g=E_ln_g).sum()\n\n # Second term\n # E[LN q(g | \\tilde{gamma})]\n vlb -= Dirichlet(self.mf_gamma).negentropy().sum()\n\n return vlb", "def modified_sommerfeld_number(self):\n return (\n self.radius_stator * 2 * self.omega * self.viscosity * (self.length ** 3)\n ) / (8 * self.load * (self.radial_clearance ** 2))", "def get_pressure_coefficient(self):\n depth = self.params[\"Measured_Pressure\"][\"depth\"]\n coef = self.params[\"Measured_Pressure\"][\"coef\"]\n pres = self.params[\"Measured_Pressure\"][\"data\"]\n if depth and not coef and pres:\n hydro = hydrostatic_pressure(self.depth,\n kelly_bushing=self.kelly_bushing,\n depth_w=self.water_depth)\n coef_data = list()\n for dp, pr in zip(depth, pres):\n idx = np.searchsorted(self.depth, dp)\n coef_data.append(pr / hydro[idx])\n log = Log()\n log.depth = depth\n log.data = coef_data\n return log\n else:\n log = Log()\n log.depth = depth\n log.data = coef\n return log", "def fcp(self, var='x'):\n return self.charpoly(var).factor()", "def E(self, z):\n return np.sqrt(self.Omega_m * np.power(1 + z, 3.0) + self.Omega_L)", "def kl(\n self,\n x: Tensor,\n covariates: Tensor,\n use_temp: bool,\n ) -> Tuple[Tensor, Tensor]:\n log_probs, ldj_sum, _ = self.compute_probabilities(x, covariates, use_temp)\n\n return -(torch.logsumexp(log_probs, dim=1) + ldj_sum).mean()", "def normal_lcdf(mu, sigma, x):\n z = (x - mu) / sigma\n return pt.switch(\n pt.lt(z, -1.0),\n pt.log(pt.erfcx(-z / pt.sqrt(2.0)) / 2.0) - pt.sqr(z) / 2.0,\n pt.log1p(-pt.erfc(z / pt.sqrt(2.0)) / 2.0),\n )", "def Z_lopass(C, L, R_L, f):\n return 1/(1/Xcap(C,f) + 1/Z_low(L, R_L, f))", "def calc_Ls(self, x_surface, geom):\n\n return np.zeros((self.n_wl,))", "def _compute_kl(self, lvl):\n kl = [] # kernal length\n for n in range(lvl):\n fct = self.scaling**n # up-sampling factor\n kl.append(fct*(self.nfreq-1)+1)\n kl.append(kl[-1]) # repeat the value of the coarsest scale for the approximation coefficient\n return kl[::-1]", "def get_ICL(self) -> float:\n assert (\n self.trained_successfully_ == True\n ), \"Model not trained successfully\"\n return (\n self.loglikelihood_\n - (self.n_row_clusters - 1) / 2 * np.log(self._nb_rows)\n - (self.n_column_clusters - 1) / 2 * np.log(self._nb_cols)\n - (self.n_column_clusters * self.n_row_clusters)\n / 2\n * np.log(self._nb_cols * self._nb_rows)\n )", "def factor(self):\n return np.real_if_close(self.scale * np.exp(1j*self.phase))", "def lherzolite():\n\n rho = 3270.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 187.4; C[0,1] = 63.71; C[0,2] = 63.87; C[0,3] = 0.78; C[0,4] = 2.02; C[0,5] = -3.2\n C[1,0] = C[0,1]; C[1,1] = 211.25; C[1,2] = 64.5; C[1,3] = -3.07; C[1,4] = 0.87; C[1,5] = -5.78\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 190.; C[2,3] = 0.38; C[2,4] = 2.38; C[2,5] = -0.12\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 67.9; C[3,4] = -2.12; C[3,5] = 1.6\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 63.12; C[4,5] = -0.55\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 66.83\n\n return C, rho", "def getZCoord(self, x, y):\n n = self.normal()\n z = (-n.x * (x - self.p0.x) - n.y * (y - self.p0.y) + n.z * self.p0.z) / n.z\n return z", "def getVelZ(self):\n return self.posvel.getZ()", "def nCr():\n return math.factorial(self.nn) / (math.factorial(self.rr) * math.factorial(self.nn - self.rr))", "def dloglam(self):\n # This number was determined using the resolution and sampling quoted on the FIRE website\n R = 6000.0 * 2.7\n dloglam = 1.0 / R / np.log(10.0)\n return dloglam", "def get_fnllh(self):\n\n def fnllh(p):\n return 0.5 * anp.sum(self.get_fres()(p) ** 2)\n\n return fnllh", "def growth_factor_scale_independent(self, z):\n z = np.atleast_1d(z)\n nz = len(z)\n #if self.M_nu_tot == 0. and self.w0 == -1. and self.wa==0.:\n # aa = 1./(1.+z)\n # ww = self.w0 + (1.-aa)*self.wa\n # d1 = aa*ss.hyp2f1(1/3., 1., 11/6., -aa**3/self.Omega_m*(1.-self.Omega_m))/ss.hyp2f1(1/3., 1., 11/6., -(1.-self.Omega_m)/self.Omega_m)\n #else:\n # d1 = np.zeros(nz)\n # for i in range(nz):\n # LCDM, _ = sint.quad(lambda x: (1+x)*(self.H0/self.H_massive(x))**3., z[i], np.inf)\n # d1[i] = LCDM*self.H_massive(z[i])/self.H0\n # LCDM0, _ = sint.quad(lambda x: (1+x)*(self.H0/self.H_massive(x))**3., 0., np.inf)\n # d1 = d1/LCDM0\n d1 = np.zeros(nz)\n for i in range(nz):\n LCDM, _ = sint.quad(lambda x: (1+x)*(self.H0/self.H_massive(x))**3., z[i], np.inf)\n d1[i] = LCDM*self.H_massive(z[i])/self.H0\n LCDM0, _ = sint.quad(lambda x: (1+x)*(self.H0/self.H_massive(x))**3., 0., np.inf)\n d1 = d1/LCDM0\n return d1", "def _f_lcs(llcs, m, n):\n r_lcs = llcs / m\n p_lcs = llcs / n\n beta = p_lcs / (r_lcs + 1e-12)\n num = (1 + (beta ** 2)) * r_lcs * p_lcs\n denom = r_lcs + ((beta ** 2) * p_lcs)\n f_lcs = num / (denom + 1e-12)\n return f_lcs", "def residual(us):\n return self.h_S(z0, us) - h_P", "def Laplace_evidence(self):\r\n A = self.Laplace_covariance()\r\n try:\r\n hld = np.sum(np.log(np.diag(jitchol(A)[0])))\r\n except:\r\n return np.nan\r\n return 0.5 * self._get_params().size * np.log(2 * np.pi) + self.log_likelihood() - hld" ]
[ "0.65476906", "0.65036815", "0.6469627", "0.638012", "0.6348522", "0.6347106", "0.6341828", "0.6334289", "0.6333599", "0.63280964", "0.62715966", "0.62416214", "0.6189796", "0.61773413", "0.61520237", "0.6139124", "0.6098995", "0.60558134", "0.60534275", "0.5988797", "0.59480953", "0.5939049", "0.59071845", "0.58696145", "0.58549404", "0.5834981", "0.58342516", "0.5827706", "0.5827286", "0.5825165", "0.5810524", "0.57805383", "0.57647127", "0.5763339", "0.57568485", "0.5741183", "0.5734902", "0.5706007", "0.57041836", "0.5694521", "0.5690844", "0.5689414", "0.568272", "0.5681087", "0.5677779", "0.56601423", "0.5642551", "0.56342894", "0.5630433", "0.5629502", "0.5629004", "0.56283903", "0.56180817", "0.5616676", "0.5613817", "0.5610746", "0.55970556", "0.55913013", "0.55882305", "0.55878913", "0.5587782", "0.5583108", "0.5582645", "0.5582148", "0.55805844", "0.556696", "0.5563327", "0.55628866", "0.55616915", "0.5556233", "0.55527264", "0.5544616", "0.5538365", "0.5538365", "0.5526536", "0.5525618", "0.55230635", "0.55104107", "0.5507161", "0.5504975", "0.55001014", "0.54931337", "0.5491058", "0.5485914", "0.5485183", "0.5482693", "0.54780173", "0.54750276", "0.5473535", "0.54729354", "0.5472132", "0.5471891", "0.5467143", "0.5463632", "0.54629624", "0.54620624", "0.5461564", "0.54571676", "0.54553634", "0.54462504" ]
0.8493009
0
Method that returns the relativistic mass of the particle
def RelativisticMass(self): return Particle.LorentzFactor(self) * self.restMass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def particleMass(self):\n return self.params['particleMass']", "def mass(self):\n\t\treturn self.volume*self.density", "def get_mass(self):\n return self.m", "def Mass(self):\n mpa = self.MassPerLength()\n if mpa == 0.0:\n return 0.\n L = self.Length()\n mass = L * mpa\n\n #try:\n #mass = (self.Rho() * self.Area() + self.Nsm()) * L\n #except TypeError:\n #msg = 'TypeError on eid=%s pid=%s:\\n' % (self.eid, self.Pid())\n #msg += 'rho = %s\\narea = %s\\nnsm = %s\\nL = %s' % (self.Rho(),\n # self.Area(),\n # self.Nsm(), L)\n #raise TypeError(msg)\n\n return mass", "def getMass(self):\n return self.mass", "def get_mass(self):\n _pal.lib.geometry_get_mass.restype = c.c_float\n return _pal.lib.geometry_get_mass(self._geometry)", "def mass(self):\n return self._P", "def cal_mass(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for mass routine)')\n\n\n \n if self.E**2-self.px**2-self.py**2-self.pz**2>1e-7: #precision problem\n self.mass=math.sqrt(self.E**2-self.px**2-self.py**2-self.pz**2)\n else:\n self.mass=0", "def Momentum(self):\n return (np.multiply(Particle.LorentzFactor(self)\n , np.array(self.velocity,dtype=float))* self.restMass)", "def mass(self):\n return self._getAttribute(Attribute.mass)", "def dist_mass(self, Mp):\r\n\r\n Mearth = np.array(Mp, ndmin=1) * u.earthMass\r\n\r\n tmp = ((Mearth >= self.Mprange[0]) & (Mearth <= self.Mprange[1])).astype(float)\r\n Mjup = Mearth.to(\"jupiterMass\").value\r\n\r\n return tmp * Mjup ** (-1.3)", "def mass(self):\n return self._mass", "def mass(self):\n return self._mass", "def get_mass(element):\n return pt.elements.isotope(element).mass", "def particleCharge(self):\n return self.params['particleCharge']", "def totalmass_comvelocity(particle_list):\r\n total_momentum = sum([particle.linear_momentum()\r\n for particle in particle_list])\r\n total_mass = sum([particle.mass for particle in particle_list])\r\n\r\n return total_mass, total_momentum / total_mass", "def mass_energy(particle: Particle, mass_numb: Optional[Integral] = None) -> u.J:\n return particle.mass_energy", "def calcEnergy(self):\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the energy.\")\n return speed_light*math.sqrt(self.p*self.p + (self.mass*speed_light)**2)", "def calcMomentumFromVelocity(self):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the particle momentum from velocity.\")\n values = {}\n for direction in self.v.order:\n gamma = self.calcLorentzGammaFromVelocity(direction)\n values[direction] = getattr(self.v,direction)*gamma*self.mass\n self.setMomentum(Cartesian3DVector(**values))\n return self.getMomentum()", "def calc_mass(self):\n\n star = self.star\n\n K, N = star.mesh_size\n mu = star.mu_coords\n r = star.r_coords\n\n def Q1(j):\n return np.sum((mu[2::2] - mu[:-2:2]) *\n (star.rho[:-2:2, j] + 4 * star.rho[1:-1:2, j] +\n star.rho[2::2, j])) / 6\n\n mass = 0\n\n for j in range(0, N - 2, 2):\n mass += (r[j + 2] - r[j]) * (r[j]**2 * Q1(j) +\n 4 * r[j + 1]**2 * Q1(j + 1) +\n r[j + 2]**2 * Q1(j + 2))\n\n return 2 / 3 * np.pi * mass", "def get_mass(elem):\n return mass[get_num(elem)]", "def dispersion(self, p):\n return p**2 / (2*self.mass)", "def molar_mass_dry_air():\n return 28.9647", "def total_mass(self):\n return self._total_mass", "def total_mass(self):\n return self._total_mass", "def get_center_of_mass_enemies(self,obs):", "def mu(self):\n return self.mass * G", "def omega(self, mass: float) -> float:\n return np.sqrt(self.spring_constant / mass)", "def compute_protien_mass(protien_string):\r\n\r\n p={'A':'71.03711','C':'103.00919','D':'115.02694','E':'129.04259','F':'147.06841','G':'57.02146','H':'137.05891','I':'113.08406','K':'128.09496','L':'113.08406','M':'131.04049','N':'114.04293','P':'97.05276','Q':'128.05858','R':'156.10111','S':'87.03203','T':'101.04768','V':'99.06841','W':'186.07931','Y':'163.06333'}\r\n mass=0\r\n for x in protien_string:\r\n mass=mass+float(p[x])\r\n \r\n #to change number of values after decimel point to 3\r\n mass=round(mass,3)\r\n return mass", "def mass_tot_rho(self):\n\n dm = np.zeros(self.nzon)\n dm[0] = 4. * np.pi / 3. * (self.r[0] ** 3 - self.r_cen ** 3) * self.rho[0]\n for i in range(1, self.nzon):\n dm[i] = 4. / 3. * np.pi * (self.r[i] ** 3 - self.r[i - 1] ** 3) * self.rho[i]\n # print(f' M_tot(Density) = {np.sum(dm)/phys.M_sun:.3f}')\n return np.sum(dm)", "def ComponentMass(filename,particle_type):\n # read in data from file\n # numpy function will automatically organized labelled columns into\n # an array\n alldata = np.genfromtxt(filename,dtype=None,names=True,skip_header=3)\n\n # save the row indices of all particles of our given type\n indices = np.where(alldata['type'] == particle_type)\n\n # slice an array containing the masses of these particles\n # these values are in units of 10^10 Msun\n masses = alldata['m'][indices]\n\n # calculate the sum of all these masses\n total_mass = np.sum(masses)\n\n # return this number in units of 10^12 Msun, rounded to 3 places\n # this number is already in units of 10^10 Msun\n return np.around(total_mass/1e2,3)", "def omega_plasma(number_density, mass):\n return np.sqrt(4 * np.pi * number_density * cgs.e**2 / mass)", "def getMolecularMass(self):\n dataDict = self.__dict__\n # get formula dictionary\n dd = {}\n for ca in self.chemAtoms:\n if isinstance(ca, ChemAtom):\n ss = ca.elementSymbol\n ii = dd.get(ss)\n if ii is None:\n dd[ss] = 1\n else:\n dd[ss] = ii + 1\n \n # calculate mass\n xx = self.root.currentChemElementStore\n result = sum(ii * xx.findFirstChemElement(symbol=ss).mass for (ss, ii) in dd.iteritems())\n return result", "def calcVelocityFromMomentum(self):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the particle velocity from momentum.\")\n values = {}\n for direction in self.p.order:\n gamma = self.calcLorentzGammaFromMomentum(direction)\n values[direction] = getattr(self.p,direction)/(gamma*self.mass)\n self.setVelocity(Cartesian3DVector(**values))\n return self.getVelocity()", "def mass(self):\n self.check_symbols()\n return self._tree_mass(self._tokentree())", "def linear_momentum(self):\r\n return self.mass * self.vel", "def mass_eval(self):\n # Calculate lengths\n L = np.zeros(self.m)\n for i in range(self.m):\n L[i] = np.linalg.norm(self.coord[self.con[i, 0], :] - self.coord[self.con[i, 1], :])\n\n # Calculate total mass\n self.mass = 0\n for i in range(self.m):\n self.mass += L[i]*self.WEIGHT[int(self.sizes[i])]", "def get_M(self):\n return 1.0", "def m1(self):\n return self.mass[0]", "def calculate_protein_mass(protein: str):\n result = 0\n for p in protein:\n result += monoisotopic_mass_table[p]\n return result", "def get_ppm(self):\n return self.PARA * math.pow((self.get_resistance()/ self.RZERO), -self.PARB)", "def get_ppm(self):\n return self.PARA * math.pow((self.get_resistance()/ self.RZERO), -self.PARB)", "def halo_mass(self, index):\n return self.data[self.data[\"hostIndex\"] == index][\n \"particleNumber\"\n ].sum()", "def halo_mass(self, index):\n return self.data[self.data[\"hostIndex\"] == index][\n \"particleNumber\"\n ].sum()", "def Mass_in_R(self, r):\n return self.int_over_density(r)", "def mass(self):\n return _cantera.reactor_mass(self.__reactor_id)", "def totalmomentum (self):\n tot_p=0.\n for planet in self.planets: #this loop takes each 'planet' momentum in 'self.planets' and sums them.\n tot_p += planet.momentum #'tot_p' is the resulting vector of all momenta vectors.\n total_mom = np.linalg.norm(tot_p) #the 'total_mom' is the total linear momentum's magnitude, which is conserved.\n return (total_mom)", "def get_mass(atomic_symbol: str) -> float:\n\n if atomic_symbol in _masses.keys():\n return _masses[atomic_symbol]\n\n else:\n return 0", "def mass(self, polymer='rna'):\n sequence = self.seq.upper()\n\n a = sequence.count('A')\n c = sequence.count('C')\n g = sequence.count('G')\n t = u = sequence.count('T') + sequence.count('U')\n\n if polymer == 'rna':\n return (a * 329.2) + (u * 306.2) + (c * 305.2) + (g * 345.2) + 159\n elif polymer == 'dna':\n return ((a + t) * 617.4) + ((g + c) * 618.4) - 124\n elif polymer == 'ssdna':\n return (a * 313.2) + (t * 304.2) + (c * 289.2) + (g * 329.2) - 62\n else:\n raise ValueError(\"unknown polymer type: '{}'\".format(polymer))", "def mass(self, star_mass=1.0):\n m_mj = 0.004920266275467775 * star_mass**(2./3) \\\n * self.P**(1./3) * self.K * np.sqrt(1-self.e**2)\n return m_mj", "def get_velocity(self):\n return self.momentum/self.mass", "def mass(self):\n\t\traise NotImplementedError", "def mass(self) -> Mass:\n return self.weight", "def force(particle1, particle2):\n position1 = particle1.position\n position2 = particle2.position\n\n distance_12 = np.sqrt((position1.x - position2.x)**2 +\n (position1.y - position2.y)**2 +\n (position1.z - position2.z)**2)\n\n return G*particle1.mass*particle2.mass/distance_12**2", "def _reduced_mass(structure) -> float:\n reduced_comp = structure.composition.reduced_composition\n num_elems = len(reduced_comp.elements)\n elem_dict = reduced_comp.get_el_amt_dict()\n\n denominator = (num_elems - 1) * reduced_comp.num_atoms\n\n all_pairs = combinations(elem_dict.items(), 2)\n mass_sum = 0\n\n for pair in all_pairs:\n m_i = Composition(pair[0][0]).weight\n m_j = Composition(pair[1][0]).weight\n alpha_i = pair[0][1]\n alpha_j = pair[1][1]\n\n mass_sum += (alpha_i + alpha_j) * (m_i * m_j) / (m_i + m_j) # type: ignore\n\n reduced_mass = (1 / denominator) * mass_sum\n\n return reduced_mass", "def totalMass(self, trunc=None):\n if trunc is None:\n trunc = self.trunc\n rVir = self.U.rVir(m, z)\n rS, rhoS, c = self.rS_rhoS_c(m, z)\n # truncation radius over scale radius\n xMax = trunc * rVir/rS\n result = 4./3. * np.pi * rS**3 * rhoS\n result = xMax - np.log(1 + xMax)\n return result", "def totalMass(self, trunc=None):\n if trunc is None:\n trunc = self.trunc\n rVir = self.U.rVir(m, z)\n rS, rhoS, c = self.rS_rhoS_c(m, z)\n # truncation radius over scale radius\n xMax = trunc * rVir/rS\n result = 4./3. * np.pi * rS**3 * rhoS\n result = xMax - np.log(1 + xMax)\n return result", "def get_center_of_mass_allies(self,obs):", "def calc_mass(self):\n\n star = self.star\n\n M, K, N = star.mesh_size\n ph = star.phi_coords\n mu = star.mu_coords\n r = star.r_coords\n\n def Q1(j, k):\n sum = 0\n\n for i in range(0, M - 2, 2):\n sum += (1 / 6) * (ph[i + 2] - ph[i]) * (star.rho[i, j, k] +\n 4 *\n star.rho[i + 1, j, k]\n + star.rho[i + 2, j, k])\n\n return 2 * sum\n\n def Q2(k):\n sum = 0\n\n for j in range(0, K - 2, 2):\n sum += (1 / 6) * (mu[j + 2] - mu[j]) * \\\n (Q1(j, k) + 4 * Q1(j + 1, k) + Q1(j + 2, k))\n\n return 2 * sum\n\n mass = 0\n\n for k in range(0, N - 2, 2):\n mass += (1 / 6) * (r[k + 2] - r[k]) * (r[k]**2 * Q2(k) +\n 4 * r[k + 1]**2 * Q2(k + 1) +\n r[k + 2]**2 * Q2(k + 2))\n\n return mass", "def _get_molecule_center_of_mass(self):\n center_of_mass = np.zeros([3], dtype=float)\n masses = self._prmtop[\"MASS\"]\n for atom_ind in range(len(self._crd)):\n center_of_mass += masses[atom_ind] * self._crd[atom_ind]\n total_mass = masses.sum()\n if total_mass == 0:\n raise RuntimeError(\"zero total mass\")\n return center_of_mass / total_mass", "def add_mass_energy(particles: list[Particle]) -> u.Quantity:\n total_mass_energy = 0.0 * u.J\n for particle in particles:\n total_mass_energy += particle.mass_energy\n return total_mass_energy.to(u.J)", "def mass_from_composition(composition):\n mass = 0.0\n for k, v in composition.items():\n if k == 0: # electron\n mass -= v * 5.489e-4\n else:\n mass += v * relative_atomic_masses[k - 1]\n return mass", "def getMomentum(self):\n return self.p", "def total_mass_au(self):\n return np.sum(self.atomic_mass)", "def massFraction(self, species):\n k = self.speciesIndex(species)\n return _cantera.phase_massfraction(self._phase_id,k)", "def computeMassLi(polymer,loading,density=1.1):\n M = Materials()\n m = CylinderRPM()\n area = m.calculateDetectorArea()\n massLi = area*217.0*M.GetLiMassFraction(loading,polymer)*density\n return massLi", "def density(self):\n return (1e-3*self.molar_mass) * self.pressure / (gas_constant * self.temperature) # kg/m^3", "def get_total_mass(self) -> int:\n total_mass = 0\n for i_complex, i_abundance in self._complexes.items():\n total_mass += i_complex.get_size_of_complex() * i_abundance\n return total_mass", "def richness_to_mass(richness, norm=2.7e13, slope=1.4):\n mass = norm * ((richness / 20.) ** slope)\n return mass", "def estimate(self):\n mu = self.mean()\n var = np.average((self.particles - mu) ** 2, weights=self.weights, axis=0)\n\n return mu, var", "def get_effective_mass():\n\n H_BAR = 6.582119514e-16 # eV*s\n M_0 = 9.10938356e-31 # kg\n N_KPTS = 6 # Number of k-points included in the parabola.\n\n spin_up = Spin(1)\n\n band_structure = Vasprun('vasprun.xml').get_band_structure()\n\n # Locations of CBM and VBM in band_structure.bands\n cbm_band_index = band_structure.get_cbm()['band_index'][spin_up][0]\n cbm_kpoint_index = band_structure.get_cbm()['kpoint_index'][0]\n\n vbm_band_index = band_structure.get_vbm()['band_index'][spin_up][0]\n vbm_kpoint_index = band_structure.get_vbm()['kpoint_index'][0]\n\n k = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n E = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n\n e_ref_coords = band_structure.kpoints[cbm_kpoint_index]._ccoords\n h_ref_coords = band_structure.kpoints[vbm_kpoint_index]._ccoords\n\n for n in range(-N_KPTS, 1):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['left'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['left'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['left'].append(e_energy)\n E['hole']['left'].append(h_energy)\n\n for n in range(1, 1 + N_KPTS):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['right'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['right'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['right'].append(e_energy)\n E['hole']['right'].append(h_energy)\n\n # 2nd order fits\n e_l_fit = np.poly1d(\n np.polyfit(k['electron']['left'], E['electron']['left'], 2))\n e_r_fit = np.poly1d(\n np.polyfit(k['electron']['right'], E['electron']['right'], 2))\n h_l_fit = np.poly1d(\n np.polyfit(k['hole']['left'], E['hole']['left'], 2))\n h_r_fit = np.poly1d(\n np.polyfit(k['hole']['right'], E['hole']['right'], 2))\n\n # Curvatures\n e_l_curvature = e_l_fit.deriv().deriv()[0]\n e_r_curvature = e_r_fit.deriv().deriv()[0]\n h_l_curvature = h_l_fit.deriv().deriv()[0]\n h_r_curvature = h_r_fit.deriv().deriv()[0]\n\n # Unit conversion\n e_m_eff_l = 10 * ((H_BAR ** 2) / e_l_curvature) / M_0\n e_m_eff_r = 10 * ((H_BAR ** 2) / e_r_curvature) / M_0\n h_m_eff_l = -10 * ((H_BAR ** 2) / h_l_curvature) / M_0\n h_m_eff_r = -10 * ((H_BAR ** 2) / h_r_curvature) / M_0\n\n return {'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},\n 'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}", "def energy(p,m):\n return math.sqrt(p*p + m*m)", "def gen_mass(self, n):\r\n n = self.gen_input_check(n)\r\n Mpr = self.Mprange.to(\"earthMass\").value\r\n Mp = (\r\n np.exp(np.random.uniform(low=np.log(Mpr[0]), high=np.log(Mpr[1]), size=n))\r\n * u.earthMass\r\n )\r\n\r\n return Mp", "def calculate_molar_mass(collector):\n avg_temp = collector.get_average_temperature()\n avg_acceleration = collector.get_average_acceleration()\n ground_pressure = collector.get_ground_pressure()\n numerator = 0\n denominator = 0\n for altitude, pressure in\\\n collector.get_iter('altitude', 'pressure'):\n try:\n numerator -= (Calculator.R * avg_temp /\n avg_acceleration / altitude *\n math.log(pressure / ground_pressure))\n except ZeroDivisionError:\n pass\n else:\n denominator += 1\n if denominator == 0:\n raise NoDataError('No altitude/pressure to calculate molar mass')\n return numerator / denominator", "def astro_dist(mass):\n value = (G.value * mass) / (c.value ** 2)\n astro_dist = u.def_unit(\"astro-m(GM/c2)\", u.m / value)\n return astro_dist", "def total_mass_amu(self):\n return np.sum(self.mass_amu)", "def energy(self):\n energy = -0.5*np.sum(self.phi)+0.5*np.sum(self.mass*np.sqrt(self.particles.momentum[:,0]**2+self.particles.momentum[:,1]**2)**2)\n return energy", "def mass(self) -> Mass:\n return Mass(0.0)", "def mass(self, element):\n return self.m(element)", "def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)", "def enthalpy_mass(self):\n return _cantera.reactor_enthalpy_mass(self.__reactor_id)", "def mape(self) -> float:\n return float(np.mean(np.abs((self.true - self.predicted) / self.true)) * 100)", "def molarMass(matID):\n mat = goodID(matID)\n compound = xl.CompoundParser(mat)\n return atomWeight(matID) * compound['nAtomsAll']", "def _compute_mass(box_size, evo_config):\n\n # ensure format\n standard_volume = evo_config['individuals']['standard_volume']\n if isinstance(box_size, list):\n if len(box_size) == 1: # sphere\n box_size = box_size[0]\n box_size = np.asarray(box_size)\n\n if np.prod(box_size.shape) < 2: # sphere\n return 4 / 3 * np.pi * box_size**3 / standard_volume\n else: # box\n if np.ndim(box_size) == 1:\n return np.prod(box_size * 2) / standard_volume\n else:\n return np.prod(box_size * 2, axis=1) / standard_volume", "def get_stir_mass_element(tracer_id, model):\n filepath = paths.stir_filepath(tracer_id, model)\n with open(filepath, 'r') as f:\n line = f.readline()\n mass = float(line.split()[3])\n\n return mass", "def meanMolarMass(self):\n return _cantera.phase_meanmolwt(self._phase_id)", "def findCenterOfMass(self, newSampleWeights):\n # First must normalize the weights\n normedWeights = self.normalize(newSampleWeights)\n weightedAvg = 0\n for i in range(self.numParticles):\n weightedAvg += self.samples[i] * normedWeights[i]\n return weightedAvg", "def point_mass(state_vec):\n mu = set_mu(state_vec)\n x, y, z = state_vec[0:3]\n r = norm(state_vec[0:3])\n\n return [-mu * coord / r**3 for coord in state_vec[0:3]]", "def first_moment(self, mass, z=None):\n return 1.0", "def marginal(self):\n m = np.zeros(len(self.domain))\n for fnode in self.neighbors:\n m += self.received[fnode]\n return np.exp(normalize(m))", "def _nelec(self):\n pd = self.particle_distribution(self._gam * mec2)\n return pd.to(1/mec2_unit).value", "def center_of_mass(self, tolerance=1e-9):\n props = GProp_GProps()\n brepgprop_VolumeProperties(self.topods_solid(), props, tolerance)\n com = props.CentreOfMass()\n return geom_utils.gp_to_numpy(com)", "def total_charge(particles: list[Particle]) -> int:\n total_charge = 0\n for particle in particles:\n if particle.isotope:\n total_charge += particle.atomic_number\n elif not particle.element:\n total_charge += particle.charge_number\n return total_charge", "def effectsize_normal(self, prob=None):\n if prob is None:\n prob = self.prob1\n return stats.norm.ppf(prob) * np.sqrt(2)", "def propensity_to_move(self, animal):\n if isinstance(self, Mountain) or isinstance(self, Ocean):\n return 0\n else:\n return math.exp(animal.parameters['lambda'] *\n self.relative_abundance_fodder(animal))", "def concrete_total_moment(self,strain,na_z, newFOS=None):\r\n\t\tforce = self.concrete_stress(strain, newFOS)*(self.width*self.mesh_dz)\r\n\t\treturn sum(force*(self.mesh_center - na_z))", "def Mass_diff_005(self, rmax):\n rmax = 10**rmax\n mass_enc = self.int_over_density(rmax)\n return np.abs(mass_enc - 0.005 * self.halo_mass)", "def TotalEnergy(self):\n return (math.sqrt((Particle.RestEnergy(self) ** 2)\n + (np.linalg.norm(Particle.Momentum(self)) * const.speed_of_light) ** 2))", "def __calcMomenta(self, kinPara):\n self.physArea,self.__allMomenta,self.PreFac = self.__kinLib(kinPara)\n return 0", "def distance_metres(self):\n return self.polar.distance_metres" ]
[ "0.796206", "0.75452185", "0.7294323", "0.72193676", "0.7178756", "0.7156736", "0.7112089", "0.7013868", "0.697946", "0.69783187", "0.6944408", "0.6921542", "0.6921542", "0.6899986", "0.6871825", "0.6846594", "0.68247277", "0.6723331", "0.6718318", "0.6706135", "0.67014885", "0.6666063", "0.663915", "0.66258454", "0.66258454", "0.65694845", "0.6565071", "0.65323305", "0.65143484", "0.6500935", "0.64895314", "0.64887875", "0.64775664", "0.64749783", "0.6470169", "0.6466879", "0.64377826", "0.64375687", "0.6414829", "0.6405291", "0.64030254", "0.64030254", "0.6386832", "0.6386832", "0.6384418", "0.6370296", "0.63691705", "0.63429517", "0.6337249", "0.6304329", "0.6280729", "0.62803704", "0.6275512", "0.62735766", "0.6268308", "0.6240104", "0.6240104", "0.6237986", "0.6236348", "0.62234586", "0.6220993", "0.62158227", "0.62048733", "0.61778754", "0.61662185", "0.6154826", "0.606593", "0.60604066", "0.6025161", "0.60167396", "0.6009425", "0.600658", "0.5987617", "0.59862983", "0.5985658", "0.5976059", "0.59646094", "0.5935554", "0.593075", "0.5929369", "0.5928099", "0.5921375", "0.5912933", "0.5900443", "0.5889192", "0.5887921", "0.5882149", "0.58640665", "0.5862603", "0.58599246", "0.5855311", "0.58388543", "0.58361197", "0.581334", "0.57946706", "0.578199", "0.5764145", "0.5763439", "0.5756754", "0.5754655" ]
0.74535996
2
Method that returns the relativistic momentum of the particle
def Momentum(self): return (np.multiply(Particle.LorentzFactor(self) , np.array(self.velocity,dtype=float))* self.restMass)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getMomentum(self):\n return self.p", "def calcMomentumFromVelocity(self):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the particle momentum from velocity.\")\n values = {}\n for direction in self.v.order:\n gamma = self.calcLorentzGammaFromVelocity(direction)\n values[direction] = getattr(self.v,direction)*gamma*self.mass\n self.setMomentum(Cartesian3DVector(**values))\n return self.getMomentum()", "def linear_momentum(self):\r\n return self.mass * self.vel", "def calcVelocityFromMomentum(self):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the particle velocity from momentum.\")\n values = {}\n for direction in self.p.order:\n gamma = self.calcLorentzGammaFromMomentum(direction)\n values[direction] = getattr(self.p,direction)/(gamma*self.mass)\n self.setVelocity(Cartesian3DVector(**values))\n return self.getVelocity()", "def momentum (self):\n\n for planet in self.planets: #this loop takes a 'planet' from 'self.planets' and computes it linear momentum.\n planet.momentum = planet.mass * planet.velocity #Each body's resulting momentum is updated to the body's information defined in the Particle class.", "def get_velocity(self):\n return self.momentum/self.mass", "def RelativisticMass(self):\n return Particle.LorentzFactor(self) * self.restMass", "def totalmomentum (self):\n tot_p=0.\n for planet in self.planets: #this loop takes each 'planet' momentum in 'self.planets' and sums them.\n tot_p += planet.momentum #'tot_p' is the resulting vector of all momenta vectors.\n total_mom = np.linalg.norm(tot_p) #the 'total_mom' is the total linear momentum's magnitude, which is conserved.\n return (total_mom)", "def getFinalMomentum(self):\n return self.final_p_MeV", "def calcMomentum(self):\n # start conditions\n if not self.quiet:\n fs = u'''Calculating momentum gain.\n Peak field: {self.rf_peak_field:.3f} MV/m\n Phase: {self.phase:.1f}°'''\n print(fs.format(**locals()))\n\n # Fortran method (0.8 ms to run cf 11 ms for Python code)\n self.t_array, self.gamma_dash_array, self.gamma_array, self.beta_array, self.p_array = calcMomentum.calcmomentum(self.freq, self.phase, self.gamma_start, self.dz, self.gamma_tilde_dash, self.phase_offset)\n # print(self.gamma_dash_array)\n self.final_p_MeV = self.p_array[-1] * -1e-6 * epsilon_e\n\n if not self.quiet:\n print(u'Final momentum: {:.3f} MeV/c'.format(self.final_p_MeV))\n self.calc_level = CALC_MOM", "def angular_momentum(self):\n cart = self.represent_as(coord.CartesianRepresentation)\n return cart.pos.cross(cart.vel).xyz", "def totalmass_comvelocity(particle_list):\r\n total_momentum = sum([particle.linear_momentum()\r\n for particle in particle_list])\r\n total_mass = sum([particle.mass for particle in particle_list])\r\n\r\n return total_mass, total_momentum / total_mass", "def calcEnergy(self):\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the energy.\")\n return speed_light*math.sqrt(self.p*self.p + (self.mass*speed_light)**2)", "def get_momentum(self, iteration: int) -> float:\n cycle_perc = iteration / self.full_cycle\n\n # Normal cycle\n # Increase\n if cycle_perc <= 0.5:\n # starts @ 0 for baseline momentum\n momentum = self.init_momentum + iteration * self.momentum_decrement\n\n # Decrease\n elif 0.5 < cycle_perc <= 1:\n momentum = self.init_momentum + (\n self.full_cycle - iteration) * self.momentum_decrement\n\n # Tail cycle | cycle_perc > 1\n else:\n momentum = self.init_momentum\n\n return momentum", "def dispersion(self, p):\n return p**2 / (2*self.mass)", "def getMomentumMap(self):\n return self.p_array * -1e-6 * epsilon_e", "def update_moments_r(self):\n denominator = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 )\n nominator1 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 ) * self.constellation\n \n nominator2 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2) * np.power(self.constellation, 2)\n try:\n \n moment1 = nominator1.sum(axis=1) / denominator.sum(axis=1)\n moment2 = nominator2.sum(axis=1) / denominator.sum(axis=1)\n assert np.all(np.logical_not(np.isnan(moment1))) and np.all(np.logical_not(np.isnan(moment2)))\n except:\n print(\"Oops! That was no valid number. Try again...\")\n\n \n self.mu = moment1\n return moment1, moment2", "def particleMass(self):\n return self.params['particleMass']", "def gradient_descent_momentum(self):\n return self._gradient_descent_momentum", "def first_moment(self, mass, z=None):\n return 1.0", "def escaped_momentum(self):\r\n position, velocity,escaped_particles,impact,collision,mom = self.box_collision_info()\r\n\r\n for i in xrange(1,self.n):\r\n velocity[np.logical_not(impact)] = velocity[np.logical_not(\r\n impact)]\r\n momentum = self.m*velocity\r\n abs_momentum = np.sum(np.sqrt(momentum[:,0]**2 + momentum[:,1]**2\r\n + momentum[:,2]**2))/2\r\n force = abs_momentum/self.dt\r\n\r\n return abs_momentum, force", "def estimate(self):\n mu = self.mean()\n var = np.average((self.particles - mu) ** 2, weights=self.weights, axis=0)\n\n return mu, var", "def calP(self):\n N = len(self.listOfParticles)\n m = self.listOfParticles[0].m\n vsum = 0\n for particle in self.listOfParticles:\n vsum += particle.V.len()\n A = np.pi*self.R**2\n F = 0.5 * A * (2*self.R) * m * N * vsum**2\n return F", "def _velocity_position(self, particle, dim, p_nd):\n\n new_velocity = (self.w * particle.velocity[dim]) \\\n + (self.c1 *\n (particle.pbest_position[dim] - particle.position[dim])) \\\n + (self.c2 * (self.gbest_position[dim] - particle.position[dim])) \\\n + (self.c3 * (p_nd - particle.position[dim]))\n\n new_velocity = min(\n self._vmax,\n max(-self._vmax, new_velocity)\n )\n\n new_position = min(\n self._bounds[1],\n max(self._bounds[0], particle.position[dim] + new_velocity)\n )\n\n return new_velocity, new_position", "def setMomentum(self,p):\n if p is None:\n self.p = Cartesian3DVector()\n else:\n if isinstance(p,Cartesian3DVector):\n self.p = Cartesian3DVector(p.x,p.y,p.z)\n else:\n raise CoordinateVector(\"Initializing a particle with the incorrect momentum vector type.\")", "def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)", "def particleCharge(self):\n return self.params['particleCharge']", "def relative_velocity(self):\n return self.base_platform.relative_velocity", "def getMomentumGradient(self):\n dphi = 0.5\n orig_phase = self.phase\n p0 = self.phaseToMomentum(orig_phase - dphi / 2)\n p1 = self.phaseToMomentum(orig_phase + dphi / 2)\n self.setRFPhase(orig_phase)\n return (p1 - p0) / dphi", "def steel_total_moment(self,strain_dis,na_z):\r\n\t\ttotal_moment = 0.0\r\n\t\tfor steel in self.reinforcement:\r\n\t\t\tstrain = np.interp(steel[0], self.mesh_center,strain_dis)\r\n\t\t\tforce = (self.steel(strain)-self.concrete(strain))*steel[1]\r\n\t\t\ttotal_moment = total_moment + force*(steel[0]-na_z)\r\n\t\treturn total_moment", "def integrateMomentum(p, dt, fluid_v, fvolpp, mu_g, rfluid):\n\n #integration constants\n beta = 2.0/3.0 \n alpha1 = -4.0/3.0 \n alpha2 = 1.0/3.0 \n dtbeta = dt * beta \n\n vel1 = p.vel[0] \n pos1 = dtbeta * vel1 - alpha1 * p.pos[0] - alpha2 * p.pos[1] \n rp = p.get_density() \n D = p.get_diameter() \n mdot = (p.mass[0] - p.mass[1]) / dt \n \n mfluid = rfluid * fvolpp + 1e-30 # mass of fluid around particle\n fixedsrc = -alpha1 * p.vel[0] - alpha2 * p.vel[1] \n volp = math.pi * D * D * D / 6.0 \n volpp = fvolpp \n # enhance drag function for large volume fraction\n alphav = min(2.0, volp / max(volpp, 1e-30)) \n \n fp_vf = max((8.0 * alphav) ** 6.0 - 0.001, 0.0) \n\n #Integration loop\n max_iterations = 20\n for i in range(max_iterations): \n #Update fluid velocity based on delta particle momentum\n if i > 0: #Past first iteration\n fluid_v = fluid_v - ((vel1 - p.vel[0]) * p.mass[0] / mfluid ) \n\n dv = abs(fluid_v - vel1) \n Re = rfluid * D * dv / mu_g \n # blowing Reynolds number\n Re_b = abs(mdot / (D * mu_g * math.pi)) \n a = 0.09 + 0.077 * math.exp(-0.4 * Re) \n b = 0.4 + 0.77 * math.exp(-0.04 * Re) \n denom = 1.0 + a * Re_b **b \n\n fpblow = (1. + 0.0545 * Re + 0.1 * math.sqrt(Re) * (1.0 - 0.03 * Re)) / denom + fp_vf \n # Clift-Gauvin drag function (Crowe, 1998)\n fpcg = 1.0 + 0.15 * Re ** 0.687 + 0.0175 * Re / (1.0 + 4.25e4 * (Re+1e-20) **-1.16) + fp_vf \n # Choose drag function based on reynolds number. For high reynolds\n # number use Clift Gauvin, otherwise use blowing reynolds number \n if Re < 100:\n fp = fpblow\n else:\n fp = fpcg\n taup = rp * D ** 2 / (18.0 * mu_g * fp) \n vcoef = dtbeta / taup \n\n # vel1 = (vcoef*fluid_v + fixedsrc)/(1.+vcoef) \n f = (vcoef * fluid_v + fixedsrc) / (1.0 + vcoef) - vel1 \n df = -vcoef * p.mass[0] / (mfluid * (1.0 + vcoef)) - 1.0 \n vel1 -= -f/df \n pos1 = dtbeta * vel1 - alpha1 * p.pos[0] - alpha2 * p.pos[1] \n\n # If iterated at least 2 times, check for convergence\n if i > 1 and abs(f) / (abs(df) * (0.1 + abs(vel1))) < 1.0e-5 : \n break \n \n # Now advance the particle momentum in time\n p.vel[2] = p.vel[1] \n p.vel[1] = p.vel[0] \n p.vel[0] = vel1 \n p.pos[1] = p.pos[0] \n p.pos[0] = pos1", "def energy(self):\n energy = -0.5*np.sum(self.phi)+0.5*np.sum(self.mass*np.sqrt(self.particles.momentum[:,0]**2+self.particles.momentum[:,1]**2)**2)\n return energy", "def __calcMomenta(self, kinPara):\n self.physArea,self.__allMomenta,self.PreFac = self.__kinLib(kinPara)\n return 0", "def prop(self, delta: float) -> tuple[float, float, float, float, float]:\n mean_motion, ecc, raan, argp, nu = \\\n MathCore.coe_update(delta, self.mean_motion, self.ndot2, self.ecc, self.eccdot, self.raan,\n self.raandot, self.argp, self.argpdot, self.mean_anom)\n\n return mean_motion, ecc, raan, argp, nu", "def get_ppm(self):\n return self.PARA * math.pow((self.get_resistance()/ self.RZERO), -self.PARB)", "def get_ppm(self):\n return self.PARA * math.pow((self.get_resistance()/ self.RZERO), -self.PARB)", "def get_start_velocity(self):\n # uniform circular motion have a start velocity of omega\n # TODO generate from start position and rotation direction\n return np.array([0, self.wz, 0])", "def TotalEnergy(self):\n return (math.sqrt((Particle.RestEnergy(self) ** 2)\n + (np.linalg.norm(Particle.Momentum(self)) * const.speed_of_light) ** 2))", "def _momentum_update(self):\n for param_ol, param_tgt in zip(self.online_net.parameters(),\n self.target_net.parameters()):\n param_tgt.data = param_tgt.data * self.momentum + \\\n param_ol.data * (1. - self.momentum)", "def particle_velocityV(V,F,dt,Rv,sigma,epsilon,D,N): \n V += dt/2*(particle_forceV(Rv[-1], N, sigma, epsilon, D) + particle_forceV(Rv[-2], N, sigma, epsilon, D))\n return V", "def get_optimizer_momentum(optimizer: Optimizer) -> float:\n beta = safitty.get(optimizer.param_groups, 0, \"betas\", 0)\n momentum = safitty.get(optimizer.param_groups, 0, \"momentum\")\n return beta if beta is not None else momentum", "def moments(self):", "def get_mass(self):\n return self.m", "def mass(self):\n\t\treturn self.volume*self.density", "def _interpolate_cumulative_mass(self, r):\n \n # interpolate\n #spline = interpolate.UnivariateSpline(self._cumulative_mass_r,\n # self._cumulative_mass_m)\n \n # linear interpolation is more reliable, assuming number of points\n # is large enough\n spline = interpolate.interp1d(self._cumulative_mass_r, self._cumulative_mass_m)\n \n return 10.0**spline(np.log10(r))", "def cal_mass(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for mass routine)')\n\n\n \n if self.E**2-self.px**2-self.py**2-self.pz**2>1e-7: #precision problem\n self.mass=math.sqrt(self.E**2-self.px**2-self.py**2-self.pz**2)\n else:\n self.mass=0", "def var_momentum(self):\n\n if isinstance(self.momentum, MomentumNesterov):\n return self.t\n return self.k", "def dist_mass(self, Mp):\r\n\r\n Mearth = np.array(Mp, ndmin=1) * u.earthMass\r\n\r\n tmp = ((Mearth >= self.Mprange[0]) & (Mearth <= self.Mprange[1])).astype(float)\r\n Mjup = Mearth.to(\"jupiterMass\").value\r\n\r\n return tmp * Mjup ** (-1.3)", "def molar_mass_dry_air():\n return 28.9647", "def _get_proposed_values(self):\n # Take half step in time for updating momentum\n momentum_bar = self.momentum + 0.5*self.stepsize*self.model.grad_logp(self.position)\n\n # Take full step in time for updating position position\n position_bar = self.position + self.stepsize * momentum_bar\n\n grad_log = self.model.grad_logp(position_bar)\n\n # Take remaining half step in time for updating momentum\n momentum_bar = momentum_bar + 0.5 * self.stepsize * grad_log\n\n return position_bar, momentum_bar, grad_log", "def particle_initial_velocity(fignr,N,D,T,m,dim,kb):\n V = np.zeros((3,N))\n V[0:dim,:] = np.random.normal(0, kb*T/m, (dim,N))# / np.sqrt(T/(kb*m))\n plotfunctions.velocity(fignr,N,V)\n # Typical speed for particles\n return V", "def calcLorentzGammaFromMomentum(self,direction):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the lorentz gamma.\")\n if direction not in self.x.order: \n raise CoordinateVector(\"The direction, \"+str(direction)+ \" needs to be one of \" +\",\".join(self.x.order) + \" to calculated the lorentz gamma.\")\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n return math.sqrt(1 + (getattr(self.p,direction)/(self.mass*speed_light))**2)", "def moment(self, n, mu, sigma):\n return scipy_norm.moment(n, mu, sigma)", "def get_M(self):\n return 1.0", "def omega(self, mass: float) -> float:\n return np.sqrt(self.spring_constant / mass)", "def concrete_total_moment(self,strain,na_z, newFOS=None):\r\n\t\tforce = self.concrete_stress(strain, newFOS)*(self.width*self.mesh_dz)\r\n\t\treturn sum(force*(self.mesh_center - na_z))", "def setFinalMomentum(self, momentum):\n delta_p_sq = lambda pf: (self.peakFieldToMomentum(pf) - momentum) ** 2\n return self.optimiseParam(delta_p_sq, 'Set final momentum', 'rf_peak_field', 'MV/m', momentum, 'MeV/c')", "def second_moment(self, mass, z=None):\n return 1.0", "def particle_pos(particle, time):\n return particle.pos + particle.dir * particle.speed * (time - particle.time)", "def motionUpdate(self, newY, deltaY):\n result = newY + deltaY + random.gauss(0, 1)\n\n # Make sure the particle is within bounds\n\n if result > self.maxValue:\n return self.maxValue\n elif result < self.minValue:\n return self.minValue\n\n return result", "def getMass(self):\n return self.mass", "def em_var(self) -> float:\n if self.__total_pulls == 0:\n raise Exception('Number of pulls is 0. No empirical variance.')\n return (self.__sum_of_square_reward -\n self.__total_rewards**2 / self.__total_pulls) / self.__total_pulls", "def _nelec(self):\n pd = self.particle_distribution(self._gam * mec2)\n return pd.to(1/mec2_unit).value", "def compute_plume(self, full_nondim=False):\n def compute_nondimensional(x):\n \"\"\" both nondim melt rate and circulation \"\"\"\n M = self.nondim_M(x)\n M.attrs = {'long_name':'dimensionless meltrate; eqn. (26)'}\n phi0 = self.phi0(x)\n phi0.attrs = {'long_name':'dimensionless circulation; eqn. (25)'}\n return M, phi0\n\n # calculations\n self.dp['M'], self.dp['phi0'] = compute_nondimensional(self.dp.dgrl_)\n\n self.dp['m'] = self.dim_M().where(self.dp.mask==3)*3600*24*365 # [m/s] -> [m/yr]\n self.dp.m.attrs = {'long_name':'dimensional meltrates; eqn. (28a)', 'units':'m/yr'}\n \n self.dp['Phi'] = self.Phi().where(self.dp.mask==3)\n self.dp.Phi.attrs = {'long_name':'dimensional circulation; eqn. (29)', 'units':'m^3/s'}\n\n if full_nondim: # compute non-dimensional 1D melt curve for full [0,1] interval\n self.dp = self.dp.assign_coords({'x_':np.linspace(0,1,51)})\n self.dp.x_.attrs['long_name'] = 'non-dimensional coordinates in [0,1]'\n self.dp['M_full'], self.dp['phi0_full'] = compute_nondimensional(self.dp.coords['x_'])\n \n return self.dp", "def _calpara(self):\n self.up = math.exp(self.sigma*math.sqrt(self.deltatime))\n self.down = math.exp(-self.sigma*math.sqrt(self.deltatime))\n self.upprob = (math.exp((self.r-self.d)*self.deltatime)-self.down)/(self.up-self.down)", "def particle(self) -> Particle:\n return self.expiration.particle", "def getVelocity(self):\n\t\tif len(self.prevPositions) < 2:\n\t\t\tself.velocity = 0\n\t\telse:\n\t\t\ttime = self.position[2] - self.prevPositions[len(self.prevPositions)-1][2]\n\t\t\txdist = self.position[0][0] - self.prevPositions[len(self.prevPositions)-1][0][0]\n\t\t\tydist = self.position[0][1] - self.prevPositions[len(self.prevPositions)-1][0][1]\n\t\t\tself.velocity = (xdist,ydist,time.total_seconds())\n\t\treturn self.velocity\n\t\t\t#speed = math.pow(math.pow(1.0*xdist,2) + math.pow(1.0*ydist,2),0.5) / (1.0*time.total_seconds())", "def calculate_task_potential(self) -> float:\n cur_xy = self.agent.get_position()[:2]\n goal_xy = np.array([1e3, 0])\n return -np.linalg.norm(cur_xy - goal_xy) * 60", "def mass(self):\n return self._P", "def force(particle1, particle2):\n position1 = particle1.position\n position2 = particle2.position\n\n distance_12 = np.sqrt((position1.x - position2.x)**2 +\n (position1.y - position2.y)**2 +\n (position1.z - position2.z)**2)\n\n return G*particle1.mass*particle2.mass/distance_12**2", "def sectional_moment(self,strain,na_z):\r\n\t\treturn self.steel_total_moment(strain,na_z) + \\\r\n\t\t\tself.concrete_total_moment(strain,na_z)", "def get_mortality_rate(period, species):\n\n initial_value = 0.05\n mortality_decrease_rate = 0.97 # 0.97 ^ 50 .==' 0.218\n r = initial_value * (mortality_decrease_rate ** period)\n return 1 - r", "def _compute_moments(self, u):\n\n # Get the moments from the parent Gaussian Markov Chain\n #u = self.parents[0].get_moments() #message_to_child()\n\n # Send only moments <X(n)> and <X(n)X(n)> but not <X(n-1)X(n)>\n return u[:2]", "def _period( self ):\r\n\treturn 2 * pi * sqrt( self.orbital_elements[0]**3 / self.mu_central_body )\r\n\t# http://en.wikipedia.org/wiki/Orbital_period#Calculation\r", "def compute_swimming_velocity(particle, fieldset, time):\n if particle.active == 1:\n particle.u_swim = particle.vmax * (1-particle.hab) * cos(particle.theta)\n particle.v_swim = particle.vmax * (1-particle.hab) * sin(particle.theta)", "def normal_velocity(self):\n try:\n a = self.params['nct']['a']\n b = self.params['nct']['b']\n # temp_log = self.get_log('Overburden_Pressure')\n return normal(x=self.depth, a=a, b=b)\n except KeyError:\n print(\"No 'Overburden_Pressure' log found.\")", "def delta(self):\n return (self._stages[EStage.CURRENT] - self._stages[EStage.START]) \\\n / (self._stages[EStage.END] - self._stages[EStage.START])", "def initial_velocity(self) -> float:\n return self._initial_velocity", "def get_momentum(self) -> List[float]:\n _, momentum = self._get_steps_lr_momentum(self.last_epoch)\n return [momentum] * self.total_groups", "def p0(self):\n return self.lerp(0)", "def mass_energy(particle: Particle, mass_numb: Optional[Integral] = None) -> u.J:\n return particle.mass_energy", "def get_mass(self):\n _pal.lib.geometry_get_mass.restype = c.c_float\n return _pal.lib.geometry_get_mass(self._geometry)", "def long_range_fraction_deposited(self) -> _VectorisedFloat:\n return self.concentration_model.infected.particle.fraction_deposited(\n self.concentration_model.evaporation_factor)", "def momentum(self, k):\n self._momentum = k\n self._energy = self.dispersion(k)", "def p() -> float:\n return 0.9", "def update_variables_momentum(alpha, beta1, var, grad, v):\n v = beta1 * v + (1-beta1) * grad\n var = var - alpha * v\n return var, v", "def get_momentum(note, prev_note, slider_len):\n v1 = np.array([note[\"x\"], note[\"y\"]])\n v0 = get_end_point(prev_note)\n v = v1 - v0\n if note[\"time\"] - get_end_time(prev_note) == 0 or note[\"time\"] - prev_note[\"time\"] == 0:\n # it has the same time the previous note ends. either a bugged sliderend or a double note\n return 0\n end_type_momentum = np.sqrt(\n v.dot(v)) / (note[\"time\"] - get_end_time(prev_note)) / slider_len\n\n # Since slider jumps in maps cause parameters to be learned too high\n # we try to deal with slider leniency by using the beginning of slider\n v2 = np.array([prev_note[\"x\"], prev_note[\"y\"]])\n v3 = v1 - v2\n start_type_momentum = np.sqrt(\n v3.dot(v3)) / (note[\"time\"] - prev_note[\"time\"]) / slider_len\n return np.min([end_type_momentum, start_type_momentum])", "def computeAmbient(self):\n maxMDP = -(1 + 1e-10)\n array_MDP = minkowskiArrayDot(self.examples, self.centroid)\n array_MDP[array_MDP > maxMDP] = maxMDP\n\n # multiplies last column of examples by-1\n dMDP_dcent = np.copy(self.examples)\n #dMDP_dcent[:, -1] *= -1\n\n distances = np.arccosh(-array_MDP)\n scales = (-2/len(distances)) * distances / np.sqrt((array_MDP ** 2) - 1)\n for row in range(len(dMDP_dcent)):\n dMDP_dcent[row, :] *= scales[row]\n grad_temp = np.sum(dMDP_dcent, axis=0)\n return grad_temp.reshape((grad_temp.shape[0], 1))\n # return np.matmul(dMDP_dcent.T, scales)", "def DeltaGmPrime(self):\n dg0_prime = self.DeltaG0Prime()\n correction = self._GetConcentrationCorrectionMilliMolar()\n return dg0_prime + correction", "def particle(self) -> Particle:\n return Particle(diameter=self.diameter)", "def mu(self):\n return self.mass * G", "def EulerN(particle,dt):\n particle.acc = particle.acc\n particle.vel = particle.vel + particle.acc*dt\n particle.pos = particle.pos + particle.vel*dt\n\n return particle", "def momentum_op(x, wfunc, h_bar = 6.626e-34/(2*np.pi), finitediff_scheme = 'central'):\n return -1j*h_bar*first_derivative(x, wfunc, finitediff_scheme = finitediff_scheme)", "def mass_tot_rho(self):\n\n dm = np.zeros(self.nzon)\n dm[0] = 4. * np.pi / 3. * (self.r[0] ** 3 - self.r_cen ** 3) * self.rho[0]\n for i in range(1, self.nzon):\n dm[i] = 4. / 3. * np.pi * (self.r[i] ** 3 - self.r[i - 1] ** 3) * self.rho[i]\n # print(f' M_tot(Density) = {np.sum(dm)/phys.M_sun:.3f}')\n return np.sum(dm)", "def mape(self) -> float:\n return float(np.mean(np.abs((self.true - self.predicted) / self.true)) * 100)", "def spring_motion(par, data):\n mo = _mo.model_optim_extras()\n spring = _sd.spring(par, data, mo.sig_mod2)\n motion = spring.calc_dynamics()['x']\n return motion", "def moment(self, p, q):\n\n def combin(n, r):\n # compute number of combinations of size r from set n\n def prod(values):\n try:\n return reduce(lambda x, y: x * y, values)\n except TypeError:\n return 1\n\n return prod(range(n - r + 1, n + 1)) / prod(range(1, r + 1))\n\n vertices = self.vertices(closed=True)\n x = vertices[0, :]\n y = vertices[1, :]\n\n m = 0.0\n n = len(x)\n for l in range(n):\n l1 = (l - 1) % n\n dxl = x[l] - x[l1]\n dyl = y[l] - y[l1]\n Al = x[l] * dyl - y[l] * dxl\n \n s = 0.0\n for i in range(p + 1):\n for j in range(q + 1):\n s += (-1)**(i + j) \\\n * combin(p, i) \\\n * combin(q, j) / ( i+ j + 1) \\\n * x[l]**(p - i) * y[l]**(q - j) \\\n * dxl**i * dyl**j\n m += Al * s\n\n return m / (p + q + 2)", "def Mass(self):\n mpa = self.MassPerLength()\n if mpa == 0.0:\n return 0.\n L = self.Length()\n mass = L * mpa\n\n #try:\n #mass = (self.Rho() * self.Area() + self.Nsm()) * L\n #except TypeError:\n #msg = 'TypeError on eid=%s pid=%s:\\n' % (self.eid, self.Pid())\n #msg += 'rho = %s\\narea = %s\\nnsm = %s\\nL = %s' % (self.Rho(),\n # self.Area(),\n # self.Nsm(), L)\n #raise TypeError(msg)\n\n return mass", "def M(self,v):\n assert len(self.d_moments)==1, \"For now only supporting 1 dimension at a time\"\n d = int(list(self.d_moments.keys())[0])\n *start,n,a = v.shape \n N = n//(d+1) # number of extended bodies\n v_reshaped = v.reshape(*start,N,d+1,a) # (*, # separate bodies, # internal body nodes, a) \n moments = torch.exp(self.d_moments[str(d)])\n masses = moments[:,:1]\n if d==0: return (masses.unsqueeze(-1)*v_reshaped).reshape(*v.shape) # no inertia for point masses\n a00 = (masses + moments[:,1:].sum(-1,keepdims=True)).unsqueeze(-1) #(N,1,1)\n ai0 = a0i = -moments[:,1:].unsqueeze(-1) #(N,d,1)\n p0 = a00*v[...,:1,:] + (a0i*v[...,1:,:]).sum(-2,keepdims=True)\n aii = moments[:,1:].unsqueeze(-1) # (N,d,1)\n \n pi = ai0*v[...,:1,:] +aii*v[...,1:,:]\n return torch.cat([p0,pi],dim=-2).reshape(*v.shape)", "def p1(self):\n return self.lerp(1)" ]
[ "0.74322164", "0.73959345", "0.73226726", "0.72440016", "0.69533795", "0.69061005", "0.6754555", "0.665831", "0.6585926", "0.6467245", "0.6458798", "0.64507335", "0.6449878", "0.64052224", "0.63391316", "0.63144106", "0.6260632", "0.61805636", "0.61436313", "0.6129185", "0.61285204", "0.61268336", "0.61111414", "0.6062954", "0.60131866", "0.60126275", "0.6002262", "0.5959174", "0.59279263", "0.5922599", "0.590843", "0.5905199", "0.5897426", "0.58738047", "0.58723146", "0.58723146", "0.58533955", "0.58531576", "0.58415943", "0.58215946", "0.58211756", "0.58163136", "0.5815368", "0.5814143", "0.5800749", "0.5799857", "0.5794496", "0.57839596", "0.5783097", "0.57754534", "0.57745624", "0.5767069", "0.57663906", "0.57619834", "0.5761382", "0.57539195", "0.57334226", "0.57290095", "0.5725787", "0.57256323", "0.57204115", "0.5713235", "0.57013935", "0.56766963", "0.56682086", "0.56484246", "0.5644048", "0.5639512", "0.56376433", "0.563588", "0.56333673", "0.5616934", "0.5615792", "0.5611925", "0.5606579", "0.5600369", "0.55879706", "0.5572151", "0.55677915", "0.55649096", "0.5560929", "0.555992", "0.55583847", "0.5552432", "0.55521834", "0.55456805", "0.55386215", "0.55316347", "0.55280244", "0.5523175", "0.5519669", "0.5519102", "0.55120915", "0.55099124", "0.55047685", "0.55016536", "0.5499686", "0.54995155", "0.5497474", "0.54952097" ]
0.75053847
0
Method that returns the total energy of the particle
def TotalEnergy(self): return (math.sqrt((Particle.RestEnergy(self) ** 2) + (np.linalg.norm(Particle.Momentum(self)) * const.speed_of_light) ** 2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def energy(self):\n energy = -0.5*np.sum(self.phi)+0.5*np.sum(self.mass*np.sqrt(self.particles.momentum[:,0]**2+self.particles.momentum[:,1]**2)**2)\n return energy", "def calcEnergy(self):\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the energy.\")\n return speed_light*math.sqrt(self.p*self.p + (self.mass*speed_light)**2)", "def compute_energy(self):\n energy = 0.5 * self.masses * np.sum(self.velocities * self.velocities, axis=1)\n avg_energy = np.mean(energy) # average kinetic energy of all particles\n return avg_energy", "def energy_tot(P,F,H,molecule):\n return energy_el(P,F,H) + energy_n(molecule)", "def total_KE(particles):\r\n return sum([particle.kinetic_energy() for particle in particles])", "def total_energy(self):\n return self._total_energy", "def energy(ps):\n return kinetic_energy(ps) + potential_energy(ps)", "def energy(self):\n self.E = - np.sum(self.phi) + 0.5 * self.mass * np.sqrt((self.v_x ** 2 + self.v_y **2))", "def energy(self):\n return self.mc.energy(self.chain)", "def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)", "def KineticEnergy(self):\n return Particle.TotalEnergy(self) - Particle.RestEnergy(self)", "def potentialEnergy(self):\n return 0.5*(pdist(self.positions)**2).sum()", "def energy(self):\n return self._energy", "def energy(p,m):\n return math.sqrt(p*p + m*m)", "def energy(self):\n sum_energy = 0.0\n for i in range(0,self.natoms-1):\n for j in range(i+1,self.natoms):\n rij = (self.atoms[i].xyz - self.atoms[j].xyz)\n rij = rij - self.pbc_correction(rij)\n mag_rij = la.norm(rij)\n sum_energy = sum_energy + self.pair_energy(self.epsilon, self.sigma, mag_rij) \n return sum_energy", "def get_energy(self):\r\n return self._energy", "def getEnergy(self):\n energy = 0.0\n\n for i in range(0, self.nPoints):\n energy += self.tDomain[i] ** 2\n\n energy /= self.nPoints\n return energy", "def energy(self) -> Union[int, float]:\n return self.proto.energy", "def energy(self) -> Union[int, float]:\n return self.proto.energy", "def energy(data):\n return sum(pow(data, 2))", "def energy(nx,ny):\n return 1+nx+ny", "def energy_function(self):\n E = 0\n for i in range(len(self.config)):\n for j in range(len(self.config)):\n s = self.config[i,j]\n #Calculate the impact of neighboring particle pairs\n neighbors = (self.config[(i+1)%L, j] +\n self.config[i, (j+1)%L] + \n self.config[(i-1)%L, j] + \n self.config[i, (j-1)%L])\n E += -J*s*neighbors\n #fix for extra neighbors\n return E/4", "def kinetic_energy(self):\r\n position, velocity, escaped_particles,impact, wall_collision,mom = self.box_collision_info()\r\n for j in xrange(1,self.n):\r\n abs_velocity = np.sqrt(velocity[:,0]**2+velocity[:,1]**2\r\n + velocity[:,2]**2)\r\n KE = 0.5*self.m*abs_velocity**2\r\n total_KE = np.sum(KE)\r\n invid_KE = total_KE/self.Npart\r\n\r\n return total_KE, invid_KE", "def getEnergy(self):\n if not hasattr(self,\"energy\"):\n self.energy = self.calcEnergy()\n return self.energy", "def kinetic_energy(self):\r\n return self.mass * np.dot(self.vel, self.vel) / 2", "def energy(n):\n return (n * pi * hbar / (2 * a)) ** 2 / (2 * m)", "def get_total_energy_produced (self):\n return self.pre_intertie_generation[:self.actual_project_life]", "def energy(e: float) -> float:\n\n return (1/np.sqrt(2))*(gamma(-e/2+1/2)/(gamma(-e/2+3/4)))", "def energy(self):\n return self._accelerator.energy", "def get_total_energy(parameters):\n return orm.Float(parameters.get_attribute('energy'))", "def get_energy():\n\n # open the psi4 log file\n with open('output.dat', 'r') as log:\n lines = log.readlines()\n\n # find the total converged energy\n for line in lines:\n if 'Total Energy =' in line:\n energy = float(line.split()[3])\n break\n else:\n raise EOFError('Cannot find energy in output.dat file.')\n\n return energy", "def get_energy():\n\n # open the psi4 log file\n with open(\"output.dat\", \"r\") as log:\n for line in log:\n if \"Total Energy =\" in line:\n return float(line.split()[3])\n\n raise EOFError(\"Cannot find energy in output.dat file.\")", "def energy(energy_name: str) -> float:\n pass", "def compute_energy(self, protein): \n return utils.score_pose(protein.pose, self.scorefxn)", "def total_energy(sign, FS):\n time = compute_time(sign, FS)\n\n return np.sum(np.array(sign)**2)/(time[-1]-time[0])", "def energy(self, r):\n sigma = self.params['sigma']\n epsilon = self.params['epsilon']\n s = sigma / r\n s6 = s**6; s12 = s6 * s6\n pot = 4.0 * epsilon * (s12 - s6)\n return pot", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))", "def _calc_energy( self, V_a, eos_d ):\n pass", "def ComputeEnergyConsumption(self):\r\n pass", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def energy_func(self):\n return (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) +\n self.inl[1].m.val_SI * (\n self.outl[1].h.val_SI - self.inl[1].h.val_SI))", "def energy(data):\n\n return np.real(np.mean(np.abs(data)**2, axis=1))", "def get_total_energy_produced (self):\n return self.net_generation_wind", "def energy(self):\n nocc, ntot, gmo, e = self.nocc, self.ntot, self.gmo, self.e\n\n Ec = 0.0\n for i in range(nocc):\n for j in range(nocc):\n for a in range(nocc, ntot):\n for b in range(nocc, ntot):\n Ec += gmo[i, a, j, b]*(2*gmo[i, a, j, b] - gmo[i, b, j, a])/\\\n (e[i] + e[j] - e[a] - e[b])\n\n self.Ec = Ec\n self.E_mp2 = Ec + self.E_scf\n\n print('@MP2 correlation energy: {:15.10f}\\n'.format(self.Ec))\n print('@Total MP2 energy: {:15.10f}\\n'.format(self.E_mp2))\n\n return self.E_mp2", "def computeEnergy(self):\n\t\tGmo = self.Gmo\n\t\te = self.e\n\t\tself.Ec = 0.0\n\n\t\tfor i in range( self.nocc ):\n\t\t\tfor j in range( self.nocc ):\n\t\t\t\tfor a in range( self.nocc,self.norb ):\n\t\t\t\t\tfor b in range( self.nocc,self.norb ):\n\t\t\t\t\t\tself.Ec += 0.25*(Gmo[i,j,a,b]*Gmo[a,b,i,j])/(e[i]+e[j]-e[a]-e[b])\n\n\t\treturn self.E0 + self.Ec", "def energy(self):\n nocc, gmo, e = self.nocc, self.gmo, self.e\n\n Ec = 0.0\n for i in range(nocc):\n for j in range(nocc):\n for a in range(nocc, len(e)):\n for b in range(nocc, len(e)):\n Ec += (1/4.0) * gmo[i, j, a, b]**2 / (e[i]+e[j]-e[a]-e[b])\n\n self.Ec = Ec\n self.E_mp2 = Ec + self.E_scf\n\n print('@MP2 correlation energy: {:15.10f}\\n'.format(self.Ec))\n print('@Total MP2 energy: {:15.10f}\\n'.format(self.E_mp2))\n\n return self.E_mp2", "def add_mass_energy(particles: list[Particle]) -> u.Quantity:\n total_mass_energy = 0.0 * u.J\n for particle in particles:\n total_mass_energy += particle.mass_energy\n return total_mass_energy.to(u.J)", "def delta_energy(atom,layer1,layer2):\n global r,c,h\n return float('%.2E' % Decimal(str(r*((atom**2/layer1**2)-(atom**2/layer2**2)))))", "def getEnergy(pos: dc.float64[N, 3], vel: dc.float64[N, 3],\n mass: dc.float64[N], G: dc.float64):\n # Kinetic Energy:\n # KE = 0.5 * np.sum(np.sum( mass * vel**2 ))\n # KE = 0.5 * np.sum( mass * vel**2 )\n KE = 0.5 * np.sum(np.reshape(mass, (N, 1)) * vel**2)\n\n # Potential Energy:\n\n # positions r = [x,y,z] for all particles\n x = pos[:, 0:1]\n y = pos[:, 1:2]\n z = pos[:, 2:3]\n\n # matrix that stores all pairwise particle separations: r_j - r_i\n # dx = x.T - x\n # dy = y.T - y\n # dz = z.T - z\n # dx = np.transpose(x) - x\n # dy = np.transpose(y) - y\n # dz = np.transpose(z) - z\n dx = np.add.outer(-x, x)\n dy = np.add.outer(-y, y)\n dz = np.add.outer(-z, z)\n\n # matrix that stores 1/r for all particle pairwise particle separations\n inv_r = np.sqrt(dx**2 + dy**2 + dz**2)\n # inv_r[inv_r>0] = 1.0/inv_r[inv_r>0]\n I = inv_r > 0\n np.divide(1.0, inv_r, out=inv_r, where=I)\n\n # sum over upper triangle, to count each interaction only once\n # PE = G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1)))\n # PE = G * np.sum(np.triu(-(mass*mass.T)*inv_r,1))\n tmp = -np.multiply.outer(mass, mass) * inv_r\n PE = 0.0\n for j in range(N):\n for k in range(j + 1, N):\n PE += tmp[j, k]\n PE *= G\n\n return KE, PE", "def estimated_energy(self):\n energy = 0j\n for pauli_string, coef in self._pauli_coef_terms:\n a = self._zeros[pauli_string]\n b = self._ones[pauli_string]\n if a + b:\n energy += coef * (a - b) / (a + b)\n energy = complex(energy)\n if energy.imag == 0:\n energy = energy.real\n energy += self._identity_offset\n return energy", "def Total_energy(angles):\n energy = 0\n \n for i in range(0,4):\n energy += Single_spin_energy(i,angles)\n return energy", "def energy(self):\n e = 0\n\n restoration = RestorationModel(self.graph_damaged)\n restoration.run(self.state)\n restoration_graphs = restoration.get_restoration_graphs()\n restoration_times = restoration.get_restoration_times()\n restoration_costs = restoration.get_restoration_costs()\n\n damaged = []\n damaged.append(get_delta(self.no_damage, self.initial_damage))\n\n sim_results = Parallel(n_jobs=4)(delayed(parallel_model)(\n graph, self.od_graph, self.od_matrix) for graph in restoration_graphs[:-1])\n for values in sim_results:\n damaged.append(get_delta(self.no_damage, values))\n\n for idx, values in enumerate(damaged):\n dt = restoration_times[idx] if idx == 0 else restoration_times[idx] - \\\n restoration_times[idx-1]\n e += sum(restoration_costs[idx]) + dt * (self.day_factor * values[2] * np.sum(self.mu*self.xi) +\n values[3] * np.sum(self.mu * (self.nu * self.F_w + self.rho)) + values[4] * self.upsilon)\n with open(self.fdir+'energy.csv', 'a') as f:\n f.write('\\n'+str(e))\n\n return e", "def energy_atom(atom,layer):\n global r,c,h\n backval= r*((atom**2/layer**2))\n return float('%.2E' % Decimal(str(backval)))", "def compute_energy(self):\n\n # radiation energy\n Qsqrd = self.omega_coords[:,:,1]*self.omega_coords[:,:,1]\n Psqrd = self.omega_coords[:,:,0]*self.omega_coords[:,:,0]\n\n e_rad = (Psqrd/self.mode_mass + (self.mode_mass*self.omega**2)*Qsqrd)*.5\n\n # space charge energy\n Dsqrd = self.dc_coords[:,:,0]*self.dc_coords[:,:,0]\n\n e_drft = Dsqrd/(2.*self.mode_mass)\n\n energy = e_rad+e_drft\n\n return energy", "def kinetic_energy(vel):\r\n return 0.5 * (vel ** 2).sum(axis=1)", "def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)", "def mass_energy(particle: Particle, mass_numb: Optional[Integral] = None) -> u.J:\n return particle.mass_energy", "def total_kin_energy (self):\n total = 0. \n for planet in self.planets: #this loop takes each planet's kinetic energy and sums it with the others.\n total += planet.kenergy # the sum of the kinetic energies\n total_kin= total # system's kinetic energy\n \n return(total_kin)", "def kinetic_energy(ps):\n return sum([p.kinetic_energy() for p in ps])", "def energyplus_its(self):\n if self._energyplus_its is None:\n self._energyplus_its = 0\n return self._energyplus_its", "def energy(self, solution, var_type, params=None):\n dict_solution = self._parse_solution(solution, var_type)\n qubo, offset = self.to_qubo(params=params)\n s = 0.0\n for (label1, label2), value in qubo.items():\n s += dict_solution[label1] * dict_solution[label2] * value\n return s + offset", "def total_energy(positions: pd.DataFrame,\n velocities: pd.DataFrame\n ) -> int:\n pot = positions.abs().sum(axis=1)\n kin = velocities.abs().sum(axis=1)\n return sum(pot*kin)", "def get_energies(self):\n N = len(self.particles)\n\n # Use C++ version if cppenabled\n if(self.cppenabled):\n energies = np.zeros(3) # Initialises Energy output array\n accelerate_lib.c_getenergies(self.get_positions(), self.get_velocities(), \\\n energies, self.boxdim, self.LJ_cutoff)\n return np.array(energies)\n\n # Python calculation if cppenabled = False:\n pot = Total_PE(self.particles, self.LJ_cutoff, self.boxdim)\n kin = Total_KE(self.get_velocities())\n\n return np.array([pot, kin, pot+kin])", "def send_energy(self) -> float:\n # Ensure that the molecule currently passes validation\n if not self.molecule_validated:\n raise Exception(\"MDI attempting to compute energy on an unvalidated molecule\")\n self.run_energy()\n properties = self.compute_return.properties.dict()\n energy = properties[\"return_energy\"]\n MDI_Send(energy, 1, MDI_DOUBLE, self.comm)\n return energy", "def calculate_energy(self):\n temp_e = 0\n\n for i in range(0,self.neuron_count):\n for j in range(0, self.neuron_count):\n if i != j:\n temp_e += self.get_weight(i, j) * self.current_state[i] * \\\n self.current_state[j]\n return -1 * temp_e / 2", "def incident_energy(self):\n return self._incident_energy", "def block_energy():\r\n\r\n my_block = q.get()\r\n my_block = my_block.flatten()\r\n energy = np.sum(my_block ** 2)\r\n return energy, my_block", "def E(self):\n return self.generic_getter(get_energy, \"E\", \"convert_energy\")", "def energy(self):\n E = sum([1 for c in self.constraints if self._is_constraint_violated(c)])\n if E == 0:\n self._save_solution()\n print(\"exiting...\")\n exit()\n return E", "def energy(self, state):\n energy = 0.0\n if isinstance(state, dict):\n # convert to array\n state = [state[elem] for elem in self.indices]\n\n state = np.array(state)\n for coeff in self.interactions[1:]:\n for _inds, value in coeff.items():\n energy += value * np.prod(state[list(_inds)])\n for i, hi in self.interactions[0].items():\n energy += hi * state[i]\n\n return energy", "def current_energy_produced(self):\n return self.df.exp.sum()", "def get_energy(self, position):\n\n # update the positions of the system\n self.simulation.context.setPositions(position)\n\n # Get the energy from the new state\n state = self.simulation.context.getState(getEnergy=True)\n\n energy = state.getPotentialEnergy().value_in_unit(unit.kilocalories_per_mole)\n\n return energy", "def potential_energy(self, PE):\n # Creating an axis for the time steps\n x = np.linspace(0, self.t, self.N*self.t+1)\n # Initializing the figure\n plt.figure(figsize=(10, 10))\n # Creating the plot\n plt.plot(x, PE)\n # Decorating the plot\n plt.suptitle('Total potential energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yrs]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['PE'])", "def computeEnergy(self):\n _cgco.gcoComputeEnergy(self.handle, self.energyTempArray)\n return self._convertEnergyBack(self.energyTempArray[0])", "def total_charge(particles: list[Particle]) -> int:\n total_charge = 0\n for particle in particles:\n if particle.isotope:\n total_charge += particle.atomic_number\n elif not particle.element:\n total_charge += particle.charge_number\n return total_charge", "def estimate(self):\n mu = self.mean()\n var = np.average((self.particles - mu) ** 2, weights=self.weights, axis=0)\n\n return mu, var", "def total_energy(self, KE, PE):\n TOT = KE+PE\n # Printing the amplitude to command line\n amplitude = max(TOT)-min(TOT)\n print('Amplitude of total energy during %i year(s): %g[AU²*kg/yr²]' \\\n %(self.t, amplitude))\n # Creating an axis for the time steps\n x = np.linspace(0, self.t, self.N*self.t+1)\n # Initializing the second figure\n plt.figure(figsize=(10, 10))\n # Creating the plot\n plt.plot(x, KE, x, PE, x, KE+PE)\n # Decorating the plot\n plt.suptitle('Total energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE', 'PE', 'KE+PE'], loc=2)", "def RestEnergy(self):\n return (self.restMass * const.speed_of_light * const.speed_of_light)", "def energy(density, coeff=1.0):\n # implementation goes here\n energy = 0\n for n_i in density:\n \tif type(n_i) != int:\n \t\traise TypeError('Wrong type!')\n \tenergy += n_i * (n_i - 1)\n\n return energy", "def _get_mean(self):\n return self._get_conditional_negative_energy()", "def energy_percentage(self) -> Union[int, float]:\n if not self.proto.energy_max:\n return 0\n return self.proto.energy / self.proto.energy_max", "def energy_percentage(self) -> Union[int, float]:\n if not self.proto.energy_max:\n return 0\n return self.proto.energy / self.proto.energy_max", "def local_energy(self):\n state = self.current_state\n (matrix_elements, transitions) = \\\n self.hamiltonian.find_matrix_elements(state)\n energy_list = [self.nqs.amplitude_ratio(state, transitions[i]) * mel\n for (i, mel) in enumerate(matrix_elements)]\n return sum(energy_list)", "def energy_pfu(self):\n return self._energy_pfu", "def kinetic_energy(self, sys):\n v = sys.velocities\n m = sys.mass\n return 0.5*np.dot(m, np.multiply(v, v))", "def calc_energy(n: int) -> float:\n E = -2 / (n + 1) ** 2\n x, ψ = h_static(E)\n\n # Calculate potential between the e- field and the nucleus point by integrating.\n # todo: Let's do this manually first, then try to apply a scipy.integrate approach.\n\n dx = 1\n\n result = 0\n\n ψ2 = np.conj(ψ) * ψ\n\n sample_pts = np.arange(x[0], x[-1], dx)\n for pt in sample_pts:\n k = 1\n Q = 1\n V = k * Q / x\n\n q = 1\n E = V * q * np.interp([pt], x, ψ2)[0]\n\n result += E / dx\n\n return result", "def energy(self, mass = 100):\n energy = [self.proteins, self.fats, self.carbs, self.calories]\n energy = map(lambda x: x / 100 * mass, energy)\n\n return energy", "def n_e(self) -> u.m**-3:\n return np.sum(self._n_elem * self.ionic_fractions * self.charge_numbers)", "def _UpdateEnergy(self):\n self.mol.GetEnergy('nokinetic')", "def stEnergy(frame):\n return numpy.sum(frame ** 2) / numpy.float64(len(frame))", "def stEnergy(frame):\n return np.sum(frame ** 2) / np.float64(len(frame))", "def energy_Photon(freq=1.0,energy=0.0):\n global r,c,h\n if freq ==0:\n print(\"enerji yok...\")\n return 0\n if energy != 0:\n energy =energy\n else:\n energy = h*freq\n getit =str(input(\"frekans bulmak istiyorsaniz f,yoksa bos gecin.\"))\n if getit ==\"f\":\n return ('%.2E' % Decimal(str(energy/h)))\n return float('%.2E' % Decimal(str(energy)))", "def particleCharge(self):\n return self.params['particleCharge']", "def E(self, state):\n \n if state==0: # Invalid state has no energy\n return 0\n return sum([self.calcDistance(state[i+1], state[i]) for i in range(len(state)-1)])", "def get_energy(self):\n return self.bot_client.send_command(_Command.GetEnergy)", "def getEnergy(self, normalized=True, mask=None):\n psi = self.psi.get()\n n = self.n.get()\n density = np.absolute(psi) ** 2\n gradx = np.gradient(psi)[0]\n normFactor = density.sum() if normalized else 1.0\n return np.ma.array(-(0.25 * np.gradient(\n np.gradient(density)[0])[0]\n - 0.5 * np.absolute(gradx) ** 2\n - (self.g_C * density + self.g_R * n)\n * density), mask=mask).sum() / normFactor", "def intEnergy_mass(self):\n return _cantera.reactor_intEnergy_mass(self.__reactor_id)", "def _compute_epera(self, units='erg'):\n if 'a' not in self:\n raise ValueError('Photons must have effective area data to permit the computation of fluxes.')\n\n energy = _const.h * _const.c / self['w']\n energy = energy.to(units).value\n epera = energy / self['a']\n return epera", "def kinetic_energies(self):\n return sum([body.kinetic_energy\n for body in self.bodies])", "def tot_pot_energy (self):\n\n for planet_a in self.planets: #this loop takes a 'planet_a' in 'self.planets'.\n pot_energy = 0.\n for planet_b in self.planets: #this loop takes a 'planet_b' in 'self.planets'.\n if planet_a == planet_b: #This is a condition to avoid to find the potential energy of a body shared with itself, a physical nonsense\".\n continue\n G=6.67408e-11 #gravitational constant\n energy = ((-G) * (planet_a.mass*planet_b.mass))/(np.linalg.norm((planet_a.position-planet_b.position))) #potential energy of planet_a with each other body.\n pot_energy += energy #all the potential energies acting on planet_a summed together.\n\n return(pot_energy)" ]
[ "0.84132123", "0.8266951", "0.7991302", "0.7920243", "0.7864987", "0.7718018", "0.76988596", "0.76899403", "0.76686484", "0.76565665", "0.76121676", "0.7568719", "0.7546202", "0.75326943", "0.75047314", "0.7501236", "0.74609464", "0.7411283", "0.7411283", "0.74109715", "0.7385151", "0.7369838", "0.7365993", "0.7322846", "0.7257673", "0.7238203", "0.7230174", "0.7141704", "0.7084969", "0.7079791", "0.70477885", "0.70441824", "0.7037636", "0.7028822", "0.70261854", "0.70201254", "0.70127106", "0.69985896", "0.6993407", "0.6992334", "0.6978767", "0.69442546", "0.6915835", "0.69076794", "0.68868196", "0.6878857", "0.6873729", "0.6872049", "0.6855177", "0.6832286", "0.6826464", "0.68056923", "0.6775663", "0.67723584", "0.6760109", "0.6752084", "0.67357796", "0.67057055", "0.6682076", "0.66817504", "0.66608167", "0.66582423", "0.66546965", "0.66340864", "0.66072834", "0.65988773", "0.6595239", "0.6585034", "0.65844625", "0.6583544", "0.6540835", "0.6539571", "0.6534751", "0.6501487", "0.6497187", "0.64821273", "0.6480613", "0.6460508", "0.6447916", "0.6446096", "0.64459676", "0.64459676", "0.6443627", "0.64394337", "0.6426513", "0.6418928", "0.6417619", "0.6416761", "0.64090836", "0.63981676", "0.63948506", "0.6392767", "0.63875794", "0.6376095", "0.63702446", "0.63701695", "0.6361123", "0.6354879", "0.634965", "0.63460886" ]
0.821929
2
Method that returns the kinetic energy of the particle
def KineticEnergy(self): return Particle.TotalEnergy(self) - Particle.RestEnergy(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kineticEnergy(self):\n return self.params['kinetic']", "def kinetic_energy(self):\r\n return self.mass * np.dot(self.vel, self.vel) / 2", "def kinetic_energy(self):\r\n position, velocity, escaped_particles,impact, wall_collision,mom = self.box_collision_info()\r\n for j in xrange(1,self.n):\r\n abs_velocity = np.sqrt(velocity[:,0]**2+velocity[:,1]**2\r\n + velocity[:,2]**2)\r\n KE = 0.5*self.m*abs_velocity**2\r\n total_KE = np.sum(KE)\r\n invid_KE = total_KE/self.Npart\r\n\r\n return total_KE, invid_KE", "def total_KE(particles):\r\n return sum([particle.kinetic_energy() for particle in particles])", "def kinetic_energy(self, sys):\n v = sys.velocities\n m = sys.mass\n return 0.5*np.dot(m, np.multiply(v, v))", "def compute_energy(self):\n energy = 0.5 * self.masses * np.sum(self.velocities * self.velocities, axis=1)\n avg_energy = np.mean(energy) # average kinetic energy of all particles\n return avg_energy", "def kinetic_energy(vel):\r\n return 0.5 * (vel ** 2).sum(axis=1)", "def kinetic_energies(self):\n return sum([body.kinetic_energy\n for body in self.bodies])", "def kinetic_energy(ps):\n return sum([p.kinetic_energy() for p in ps])", "def energy(self):\n energy = -0.5*np.sum(self.phi)+0.5*np.sum(self.mass*np.sqrt(self.particles.momentum[:,0]**2+self.particles.momentum[:,1]**2)**2)\n return energy", "def energy(ps):\n return kinetic_energy(ps) + potential_energy(ps)", "def kinetic_energy(self, units = 'si'):\n if units == 'si':\n return 0.5 * self.mass * (linalg.norm(self.velocity) ** 2)\n if units == 'au':\n return 0.5 * self.mass * (linalg.norm(self.velocity * (1.496e11) * 86400) ** 2)", "def calcEnergy(self):\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the energy.\")\n return speed_light*math.sqrt(self.p*self.p + (self.mass*speed_light)**2)", "def _UpdateEnergy(self):\n self.mol.GetEnergy('nokinetic')", "def energy(self):\n return self.mc.energy(self.chain)", "def get_energy(self):\r\n return self._energy", "def energy(self):\n return self._energy", "def energyK(k):\r\n C1 = 9.7846113e-07\r\n C2 = 12.263868e0 \r\n E = (-1.0 + np.sqrt(1.0 + 4.0 * C1 * C2**2 * k**2))/(2.0 * C1)\r\n return E", "def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)", "def total_kin_energy (self):\n total = 0. \n for planet in self.planets: #this loop takes each planet's kinetic energy and sums it with the others.\n total += planet.kenergy # the sum of the kinetic energies\n total_kin= total # system's kinetic energy\n \n return(total_kin)", "def potentialEnergy(self):\n return 0.5*(pdist(self.positions)**2).sum()", "def energy(self) -> Union[int, float]:\n return self.proto.energy", "def energy(self) -> Union[int, float]:\n return self.proto.energy", "def TotalEnergy(self):\n return (math.sqrt((Particle.RestEnergy(self) ** 2)\n + (np.linalg.norm(Particle.Momentum(self)) * const.speed_of_light) ** 2))", "def getEnergy(self):\n energy = 0.0\n\n for i in range(0, self.nPoints):\n energy += self.tDomain[i] ** 2\n\n energy /= self.nPoints\n return energy", "def getEnergy(self):\n if not hasattr(self,\"energy\"):\n self.energy = self.calcEnergy()\n return self.energy", "def energy(self):\n return self._accelerator.energy", "def getEnergy(pos: dc.float64[N, 3], vel: dc.float64[N, 3],\n mass: dc.float64[N], G: dc.float64):\n # Kinetic Energy:\n # KE = 0.5 * np.sum(np.sum( mass * vel**2 ))\n # KE = 0.5 * np.sum( mass * vel**2 )\n KE = 0.5 * np.sum(np.reshape(mass, (N, 1)) * vel**2)\n\n # Potential Energy:\n\n # positions r = [x,y,z] for all particles\n x = pos[:, 0:1]\n y = pos[:, 1:2]\n z = pos[:, 2:3]\n\n # matrix that stores all pairwise particle separations: r_j - r_i\n # dx = x.T - x\n # dy = y.T - y\n # dz = z.T - z\n # dx = np.transpose(x) - x\n # dy = np.transpose(y) - y\n # dz = np.transpose(z) - z\n dx = np.add.outer(-x, x)\n dy = np.add.outer(-y, y)\n dz = np.add.outer(-z, z)\n\n # matrix that stores 1/r for all particle pairwise particle separations\n inv_r = np.sqrt(dx**2 + dy**2 + dz**2)\n # inv_r[inv_r>0] = 1.0/inv_r[inv_r>0]\n I = inv_r > 0\n np.divide(1.0, inv_r, out=inv_r, where=I)\n\n # sum over upper triangle, to count each interaction only once\n # PE = G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1)))\n # PE = G * np.sum(np.triu(-(mass*mass.T)*inv_r,1))\n tmp = -np.multiply.outer(mass, mass) * inv_r\n PE = 0.0\n for j in range(N):\n for k in range(j + 1, N):\n PE += tmp[j, k]\n PE *= G\n\n return KE, PE", "def useKineticEnergy(self):\n return self.params['useKinetic']", "def kin_energy (self):\n\n for planet in self.planets:\n planet.kenergy = 0.5*planet.mass*((np.linalg.norm(planet.velocity))**2) # every 'kenergy' depends by the body's mass and velocity", "def energy(self):\n self.E = - np.sum(self.phi) + 0.5 * self.mass * np.sqrt((self.v_x ** 2 + self.v_y **2))", "def kinetic_energy(self, KE):\n # Creating an axis for the time steps\n x = np.linspace(0, self.t, self.N*self.t+1)\n # Initializing the figure\n plt.figure(figsize=(10, 10))\n # Creating the plot\n plt.plot(x, KE)\n # Decorating the plot\n plt.suptitle('Total kinetic energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE'])", "def energy_function(self):\n E = 0\n for i in range(len(self.config)):\n for j in range(len(self.config)):\n s = self.config[i,j]\n #Calculate the impact of neighboring particle pairs\n neighbors = (self.config[(i+1)%L, j] +\n self.config[i, (j+1)%L] + \n self.config[(i-1)%L, j] + \n self.config[i, (j-1)%L])\n E += -J*s*neighbors\n #fix for extra neighbors\n return E/4", "def energy(p,m):\n return math.sqrt(p*p + m*m)", "def energy(nx,ny):\n return 1+nx+ny", "def total_energy(self):\n return self._total_energy", "def energy(energy_name: str) -> float:\n pass", "def energy(e: float) -> float:\n\n return (1/np.sqrt(2))*(gamma(-e/2+1/2)/(gamma(-e/2+3/4)))", "def energy(data):\n return sum(pow(data, 2))", "def ComputeEnergyConsumption(self):\r\n pass", "def energy(self):\n sum_energy = 0.0\n for i in range(0,self.natoms-1):\n for j in range(i+1,self.natoms):\n rij = (self.atoms[i].xyz - self.atoms[j].xyz)\n rij = rij - self.pbc_correction(rij)\n mag_rij = la.norm(rij)\n sum_energy = sum_energy + self.pair_energy(self.epsilon, self.sigma, mag_rij) \n return sum_energy", "def get_E(J,k):\n E = -2 * J * np.cos(k) # energyeigenvalue \n return E", "def get_energies(self):\n N = len(self.particles)\n\n # Use C++ version if cppenabled\n if(self.cppenabled):\n energies = np.zeros(3) # Initialises Energy output array\n accelerate_lib.c_getenergies(self.get_positions(), self.get_velocities(), \\\n energies, self.boxdim, self.LJ_cutoff)\n return np.array(energies)\n\n # Python calculation if cppenabled = False:\n pot = Total_PE(self.particles, self.LJ_cutoff, self.boxdim)\n kin = Total_KE(self.get_velocities())\n\n return np.array([pot, kin, pot+kin])", "def energy(n):\n return (n * pi * hbar / (2 * a)) ** 2 / (2 * m)", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def energy(self, r):\n sigma = self.params['sigma']\n epsilon = self.params['epsilon']\n s = sigma / r\n s6 = s**6; s12 = s6 * s6\n pot = 4.0 * epsilon * (s12 - s6)\n return pot", "def mass_energy(particle: Particle, mass_numb: Optional[Integral] = None) -> u.J:\n return particle.mass_energy", "def energy_yield(self):\n return self['kwh_per_kw']", "def calc_gravitational_energy(self):\n\n star = self.star\n\n K, N = star.mesh_size\n mu = star.mu_coords\n r = star.r_coords\n\n def S1(j):\n return np.sum((mu[2::2] - mu[:-2:2]) * (star.rho[:-2:2, j] * star.Phi[:-2:2, j] +\n 4 * star.rho[1:-1:2, j] * star.Phi[1:-1:2, j] +\n star.rho[2::2, j] * star.Phi[2::2, j])) / 6\n\n W = 0\n\n for j in range(0, N - 2, 2):\n W += (r[j + 2] - r[j]) * (r[j]**2 * S1(j) +\n 4 * r[j + 1]**2 * S1(j + 1) +\n r[j + 2]**2 * S1(j + 2))\n\n return -1 / 3 * np.pi * W", "def energy(data):\n\n return np.real(np.mean(np.abs(data)**2, axis=1))", "def get_total_energy_produced (self):\n return self.net_generation_wind", "def incident_energy(self):\n return self._incident_energy", "def E(self):\n return self.generic_getter(get_energy, \"E\", \"convert_energy\")", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))", "def compute_energy(self, protein): \n return utils.score_pose(protein.pose, self.scorefxn)", "def get_total_energy_produced (self):\n return self.pre_intertie_generation[:self.actual_project_life]", "def energy_tot(P,F,H,molecule):\n return energy_el(P,F,H) + energy_n(molecule)", "def computeEnergy(self):\n _cgco.gcoComputeEnergy(self.handle, self.energyTempArray)\n return self._convertEnergyBack(self.energyTempArray[0])", "def total_kinetic_energy(V,M):\r\n N = V.shape[0] # number of bodies\r\n K = 0 # initialize kinetic energy\r\n V0 = np.zeros(3) # initialize center of mass velocity\r\n # find refernce velocity\r\n for n in range(N):\r\n V0 = V0 + V[n,:]*M[n]\r\n V0 = V0/np.sum(M)\r\n # find kinetic energy\r\n for n in range(N):\r\n K = K + ((util.enod(V[n,:],V0))**2)* 0.5 * M[n]\r\n \r\n return K", "def computeEnergy(self):\n\t\tGmo = self.Gmo\n\t\te = self.e\n\t\tself.Ec = 0.0\n\n\t\tfor i in range( self.nocc ):\n\t\t\tfor j in range( self.nocc ):\n\t\t\t\tfor a in range( self.nocc,self.norb ):\n\t\t\t\t\tfor b in range( self.nocc,self.norb ):\n\t\t\t\t\t\tself.Ec += 0.25*(Gmo[i,j,a,b]*Gmo[a,b,i,j])/(e[i]+e[j]-e[a]-e[b])\n\n\t\treturn self.E0 + self.Ec", "def self_energy(gf_imp0, gf_imp):\n return 1/gf_imp0 - 1/gf_imp", "def e_kinetic(self, q = np.zeros(2) , dq = np.zeros(2) , R_index = 0 ): \n \n Ha = self.H_all( q , R_index ) \n \n e_k = 0.5 * np.dot( dq , np.dot( Ha , dq ) )\n \n return e_k", "def e_kinetic(self, q = np.zeros(2) , dq = np.zeros(2) , R_index = 0 ): \n \n Ha = self.H_all( q , R_index ) \n \n e_k = 0.5 * np.dot( dq , np.dot( Ha , dq ) )\n \n return e_k", "def _get_mean(self):\n return self._get_conditional_negative_energy()", "def get_energy(self, position):\n\n # update the positions of the system\n self.simulation.context.setPositions(position)\n\n # Get the energy from the new state\n state = self.simulation.context.getState(getEnergy=True)\n\n energy = state.getPotentialEnergy().value_in_unit(unit.kilocalories_per_mole)\n\n return energy", "def measure_kinetic_energy(self, t_measure=None):\n assert self.data is not None\n time = self.get_time()\n N = self.get_N()\n m0 = self.sim_chain.m0\n #print 'm0', m0\n \n if t_measure is not None:\n # find index for time closest to t_measure\n idx = self._find_index_for_time(t_measure)\n \n # calculate kinetic energy\n velocities = self.data[idx,N:2*N]\n masses = self.sim_chain.m\n kinetic_energy = 0.5*np.sum( np.multiply(masses/m0, velocities**2.0) )\n return kinetic_energy\n else:\n kinetic_energies = np.zeros(len(time))\n ctr = 0\n for tau in time:\n kinetic_energies[ctr] = self.measure_kinetic_energy(tau)\n ctr += 1\n return kinetic_energies", "def getEnergy(self, normalized=True, mask=None):\n psi = self.psi.get()\n n = self.n.get()\n density = np.absolute(psi) ** 2\n gradx = np.gradient(psi)[0]\n normFactor = density.sum() if normalized else 1.0\n return np.ma.array(-(0.25 * np.gradient(\n np.gradient(density)[0])[0]\n - 0.5 * np.absolute(gradx) ** 2\n - (self.g_C * density + self.g_R * n)\n * density), mask=mask).sum() / normFactor", "def energyplus_its(self):\n if self._energyplus_its is None:\n self._energyplus_its = 0\n return self._energyplus_its", "def calc_gravitational_energy(self):\n\n star = self.star\n\n M, K, N = star.mesh_size\n ph = star.phi_coords\n mu = star.mu_coords\n r = star.r_coords\n\n def S1(j, k):\n sum = 0\n\n for i in range(0, M - 2, 2):\n sum += (1 / 6) * (ph[i + 2] - ph[i]) * (star.rho[i, j, k] * star.Phi[i, j, k] + 4 *\n star.rho[i + 1, j, k] * star.Phi[i + 1, j, k] +\n star.rho[i + 2, j, k] * star.Phi[i + 2, j, k])\n return 2 * sum\n\n def S2(k):\n sum = 0\n\n for j in range(0, K - 2, 2):\n sum += (1 / 6) * (mu[j + 2] - mu[j]) * \\\n (S1(j, k) + 4 * S1(j + 1, k) + S1(j + 2, k))\n\n return 2 * sum\n\n W = 0\n\n for k in range(0, N - 2, 2):\n W -= 0.5 * (1 / 6) * (r[k + 2] - r[k]) * (r[k]**2 * S2(k) +\n 4 * r[k + 1]**2 * S2(k + 1) +\n r[k + 2]**2 * S2(k + 2))\n\n return W", "def particleCharge(self):\n return self.params['particleCharge']", "def energy(self):\n nocc, ntot, gmo, e = self.nocc, self.ntot, self.gmo, self.e\n\n Ec = 0.0\n for i in range(nocc):\n for j in range(nocc):\n for a in range(nocc, ntot):\n for b in range(nocc, ntot):\n Ec += gmo[i, a, j, b]*(2*gmo[i, a, j, b] - gmo[i, b, j, a])/\\\n (e[i] + e[j] - e[a] - e[b])\n\n self.Ec = Ec\n self.E_mp2 = Ec + self.E_scf\n\n print('@MP2 correlation energy: {:15.10f}\\n'.format(self.Ec))\n print('@Total MP2 energy: {:15.10f}\\n'.format(self.E_mp2))\n\n return self.E_mp2", "def energy_func(self):\n return (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) +\n self.inl[1].m.val_SI * (\n self.outl[1].h.val_SI - self.inl[1].h.val_SI))", "def intEnergy_mass(self):\n return _cantera.reactor_intEnergy_mass(self.__reactor_id)", "def compute_energy_density(kT):\n h=u.planck\n c=u.speed_of_light\n pi=np.pi\n return (8*pi/(h*c)**3)*((pi*kT)**4/15)", "def block_energy():\r\n\r\n my_block = q.get()\r\n my_block = my_block.flatten()\r\n energy = np.sum(my_block ** 2)\r\n return energy, my_block", "def delta_energy(atom,layer1,layer2):\n global r,c,h\n return float('%.2E' % Decimal(str(r*((atom**2/layer1**2)-(atom**2/layer2**2)))))", "def current_energy_produced(self):\n return self.df.exp.sum()", "def energy_atom(atom,layer):\n global r,c,h\n backval= r*((atom**2/layer**2))\n return float('%.2E' % Decimal(str(backval)))", "def energy_pfu(self):\n return self._energy_pfu", "def energy(self):\n nocc, gmo, e = self.nocc, self.gmo, self.e\n\n Ec = 0.0\n for i in range(nocc):\n for j in range(nocc):\n for a in range(nocc, len(e)):\n for b in range(nocc, len(e)):\n Ec += (1/4.0) * gmo[i, j, a, b]**2 / (e[i]+e[j]-e[a]-e[b])\n\n self.Ec = Ec\n self.E_mp2 = Ec + self.E_scf\n\n print('@MP2 correlation energy: {:15.10f}\\n'.format(self.Ec))\n print('@Total MP2 energy: {:15.10f}\\n'.format(self.E_mp2))\n\n return self.E_mp2", "def particle_energies_Nashgyro(xyv, NL, KL, BM_rest, OmK, Omg):\n # Split xyv\n xy = xyv[:, 0:2]\n v = xyv[:, 2:4]\n\n # Potential energy\n BL = NL2BL(NL, KL)\n bo = BM2bL(NL, BM_rest, BL)\n bL = bond_length_list(xy, BL)\n kL = KL2kL(NL, OmK, BL)\n U = 0.5 * abs(kL) * (bL - bo) ** 2\n # # Check\n # print 'KL = ', KL\n # print 'BL = ', BL\n # print 'bo = ', bo\n # print 'kL = ', kL\n # print 'U = ', U\n\n # Kinetic energy\n speed_squared = v[:, 0] ** 2 + v[:, 1] ** 2\n KE = 0.5 * (abs(Omg) * speed_squared)\n\n # Check\n if (U < 0).any() or (KE < 0).any():\n print 'KE = ', KE\n print 'U = ', U\n print 'kL*(bL-bo)**2 = ', kL * (bL - bo) ** 2\n print 'kL = ', kL\n raise RuntimeError('NEGATIVE ENERGY!')\n\n return U, KE", "def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)", "def internalenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n u = g - temp*g_t - pres*g_p\n return u", "def get_energy(self):\n return self.bot_client.send_command(_Command.GetEnergy)", "def calculate_energy(self):\n temp_e = 0\n\n for i in range(0,self.neuron_count):\n for j in range(0, self.neuron_count):\n if i != j:\n temp_e += self.get_weight(i, j) * self.current_state[i] * \\\n self.current_state[j]\n return -1 * temp_e / 2", "def energy(self, state):\n energy = 0.0\n if isinstance(state, dict):\n # convert to array\n state = [state[elem] for elem in self.indices]\n\n state = np.array(state)\n for coeff in self.interactions[1:]:\n for _inds, value in coeff.items():\n energy += value * np.prod(state[list(_inds)])\n for i, hi in self.interactions[0].items():\n energy += hi * state[i]\n\n return energy", "def energy(self):\n E = sum([1 for c in self.constraints if self._is_constraint_violated(c)])\n if E == 0:\n self._save_solution()\n print(\"exiting...\")\n exit()\n return E", "def compute_energy(self):\n\n # radiation energy\n Qsqrd = self.omega_coords[:,:,1]*self.omega_coords[:,:,1]\n Psqrd = self.omega_coords[:,:,0]*self.omega_coords[:,:,0]\n\n e_rad = (Psqrd/self.mode_mass + (self.mode_mass*self.omega**2)*Qsqrd)*.5\n\n # space charge energy\n Dsqrd = self.dc_coords[:,:,0]*self.dc_coords[:,:,0]\n\n e_drft = Dsqrd/(2.*self.mode_mass)\n\n energy = e_rad+e_drft\n\n return energy", "def send_energy(self) -> float:\n # Ensure that the molecule currently passes validation\n if not self.molecule_validated:\n raise Exception(\"MDI attempting to compute energy on an unvalidated molecule\")\n self.run_energy()\n properties = self.compute_return.properties.dict()\n energy = properties[\"return_energy\"]\n MDI_Send(energy, 1, MDI_DOUBLE, self.comm)\n return energy", "def get_energy():\n\n # open the psi4 log file\n with open('output.dat', 'r') as log:\n lines = log.readlines()\n\n # find the total converged energy\n for line in lines:\n if 'Total Energy =' in line:\n energy = float(line.split()[3])\n break\n else:\n raise EOFError('Cannot find energy in output.dat file.')\n\n return energy", "def kinetic_energy_rigidbody(theta, phi, vX, vY, vtheta, vphi, vpsi, Mm, params):\n l = params['l']\n I3 = params['I3']\n I1star = params['I1'] + Mm * l ** 2\n\n # gw3 = vpsi + vphi* np.cos(theta)\n w3 = params['w3']\n\n v_sq = vX ** 2 + vY ** 2\n vXprod = vX * (vtheta * np.cos(theta) * np.cos(phi) - vphi * np.sin(theta) * np.sin(phi))\n vYprod = vY * (vtheta * np.cos(theta) * np.sin(phi) + vphi * np.sin(theta) * np.cos(phi))\n T1 = 0.5 * Mm * (v_sq)\n T2 = Mm * l * (vXprod + vYprod)\n T3 = 0.5 * I1star * (vphi ** 2 * np.sin(theta) ** 2 + vtheta ** 2)\n T4 = 0.5 * I3 * w3 ** 2\n\n KEvec = T1 + T2 + T3 + T4\n KE = sum(KEvec)\n if 'BIND' in params:\n if len(params['BIND']) > 0:\n KEnonboundary = KE - sum(KEvec[params['BIND']])\n else:\n KEnonboundary = 0 * KE\n else:\n KEnonboundary = 0 * KE\n\n return KE, KEvec, KEnonboundary, sum(T1), sum(T2), sum(T3), sum(T4)", "def get_energy():\n\n # open the psi4 log file\n with open(\"output.dat\", \"r\") as log:\n for line in log:\n if \"Total Energy =\" in line:\n return float(line.split()[3])\n\n raise EOFError(\"Cannot find energy in output.dat file.\")", "def getEnergy(self, normalized=True, mask=None):\n if self.gpu:\n psi = self.psi.get()\n V = self.Vdt.get() / self.dt\n else:\n psi = self.psi\n V = self.Vdt.get() / self.dt\n density = np.absolute(psi) ** 2\n gradx = np.gradient(psi)[0]\n normFactor = density.sum() if normalized else 1.0\n return np.ma.array(-(0.25 * np.gradient(\n np.gradient(density)[0])[0]\n - 0.5 * np.absolute(gradx) ** 2\n - (self.g_C * density + V)\n * density), mask=mask).sum() / normFactor", "def total_energy(state, k=1, m=1):\n return 0.5*k*state[..., 0]*state[..., 0]+0.5*m*state[..., 1]*state[..., 1]", "def _J(self):\n pd = self.particle_distribution(self._Ep * u.GeV)\n return pd.to('1/GeV').value", "def kinetic_energy(v, Mm=1.):\n speed_squared = v[:, 0] ** 2 + v[:, 1] ** 2\n # timeit.timeit('vt[:,0]**2+vt[:,1]**2', setup='import numpy as np; vt = np.random.rand(10000,2)', number=1000)\n KE = 0.5 * sum(Mm * speed_squared)\n return KE", "def _energy(self, X, y):\n yhat = self.evaluate(X)\n loss = ((y - yhat) ** 2).sum() / 2\n return loss", "def energy(self, state):\n return _modeller.mod_state_optimizer_energy(self._modpt,\n self.__edat.modpt,\n state, self.__libs.modpt)", "def getEnergyAdded(self):\n return self.json_state.get(\"charging\").get(\"wh_energy\")", "def _calc_energy( self, V_a, eos_d ):\n pass" ]
[ "0.85219467", "0.85176307", "0.84117717", "0.7932918", "0.7827377", "0.7754195", "0.7666235", "0.7638704", "0.74971205", "0.7490171", "0.74387", "0.7403423", "0.7370556", "0.72361696", "0.7233435", "0.7216185", "0.7176531", "0.7157923", "0.715126", "0.70478064", "0.7043319", "0.69634485", "0.69634485", "0.6958873", "0.6943801", "0.69312835", "0.68825376", "0.6831241", "0.6771883", "0.6698421", "0.6692308", "0.66866934", "0.6683666", "0.66637677", "0.666342", "0.6580406", "0.6575394", "0.65664285", "0.65606755", "0.6534673", "0.64716494", "0.64556473", "0.64125067", "0.6407887", "0.64058256", "0.6385434", "0.6384101", "0.637545", "0.6371406", "0.63627183", "0.6353022", "0.63199663", "0.63031644", "0.6297718", "0.62962794", "0.6280796", "0.62715465", "0.626381", "0.62429845", "0.62402225", "0.6234947", "0.6224938", "0.6224938", "0.6221994", "0.621512", "0.61853737", "0.61662877", "0.61531717", "0.6152436", "0.61454713", "0.61425066", "0.61323714", "0.61316013", "0.6125603", "0.6120472", "0.6117994", "0.6105819", "0.6104246", "0.610134", "0.609703", "0.60965407", "0.60918415", "0.6090605", "0.60742813", "0.6067524", "0.60596734", "0.60420173", "0.603952", "0.6037307", "0.6035532", "0.6022898", "0.60150677", "0.6011539", "0.60052526", "0.59887886", "0.59875643", "0.59850377", "0.59536934", "0.5945202", "0.59248054" ]
0.8440398
2
Method that updates the particle's velocity and position with the Euler Cromer method
def UpdateCromer(self, deltaT): self.velocity += self.acceleration * deltaT self.position += self.velocity * deltaT
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def EulerN(particle,dt):\n particle.acc = particle.acc\n particle.vel = particle.vel + particle.acc*dt\n particle.pos = particle.pos + particle.vel*dt\n\n return particle", "def update(self, delta):\n # Computes new positions\n for part in self.particles:\n part.set_xyvxvy(self.runge_kutta(part.to_y(), 0, delta))", "def update_euler(self, delta_t=None):\n\n delta_t = delta_t or self.delta_t\n\n kap = (self.vel, self.force(self.pos,\n self.vel,\n self.time, drag=False), self.time)\n\n pos = self.pos+delta_t*self.vel\n vel = self.vel+delta_t*kap[1]\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n\n try:\n self.pos, self.vel = self.check_collision_full(pos, self.pos,\n vel, self.vel,\n delta_t, drag=True)\n except Collision.CollisionException as col:\n vel = self.vel+col.delta_t*kap[1]\n C, fvel = self.drag_coefficient(col.pos, vel, self.time+col.delta_t, nearest = True)\n col.vel = (self.vel+col.delta_t*(kap[1]+C*fvel))/(1.0+col.delta_t*C)\n raise col\n \n self.time += delta_t\n\n return kap", "def update_position(self):\n self.position[0] += self.velocity[0]\n self.position[1] += self.velocity[1]", "def apply_velocity(self):\n self.position.data += self.velocity.data", "def particle_velocityV(V,F,dt,Rv,sigma,epsilon,D,N): \n V += dt/2*(particle_forceV(Rv[-1], N, sigma, epsilon, D) + particle_forceV(Rv[-2], N, sigma, epsilon, D))\n return V", "def update(self, timestep):\n # force_x, force_y = self.pending_force_update\n # vel_x, vel_y = self.velocity\n # vel_x += force_x / self.mass * TIMESTEP\n # vel_y += force_y / self.mass * TIMESTEP\n # # Update positions\n # pos_x, pos_y = self.position\n # pos_x += vel_x * TIMESTEP\n # pos_y += vel_y * TIMESTEP\n # # vel_abs_old = math.sqrt(self.velocity.x ** 2 +\n # # self.velocity.y ** 2)\n # # vel_abs_new = math.sqrt(vel_x ** 2 + vel_y ** 2)\n # # if self.name == \"earth\":\n # # print(math.sqrt(vel_x ** 2 + vel_y ** 2))\n # # multiplicator = (vel_abs_old / vel_abs_new)**2\n # # if self.name == \"earth\": print(multiplicator)\n # self.position = Position(pos_x, pos_y)\n # self.velocity = Direction(vel_x, vel_y)\n # # body.goto(body.px*SCALE, body.py*SCALE)\n # # body.dot(3)\n\n self.velocity += self.pending_force_update / self.mass * timestep\n self.position += self.velocity * timestep\n self.pending_force_update = None", "def update(self, dt):\n for p in self.listOfParticles:\n if self.willcollide(p, can.C, can.R):\n p1 = p.P\n pc, u = p.findpc()\n vp = p1.vec(p1.proj(pc.vec(can.C)))\n p1m = p1.__add__(vp.scale(vp.len()))\n v2 = pc.vec(p1m).scale(p.V.len())\n p.V = v2\n p.P = pc + p.V.scale(p.V.len()*(1-u))\n else:\n p.P = p.step(dt)", "def update(self, delta_time):\n self.velocity = (sin(self.sin_val), cos(self.sin_val))\n self.sin_val += delta_time*self.speed\n translate_indexes(self.velocity, self.points[0], self.top_idx)", "def update(self):\r\n # change in position -> velocity\r\n self.position += self.velocity\r\n # change in celocity -> acceleration\r\n self.velocity += self.acceleration\r\n \r\n # if velocity magnitude is higher than the defined limit set the velocity \r\n # magnitude to max speed\r\n if np.linalg.norm(self.velocity) > self.max_speed:\r\n self.velocity = self.velocity / np.linalg.norm(self.velocity) * self.max_speed\r\n \r\n # reset the acceleration\r\n self.acceleration = Vector(*np.zeros(2))", "def main():\n \n # Particle in SHO - c.f. Mocz & Succi (2015) Fig. 2\n # parameters\n n = 100 # number of particles\n dt = 0.02 # timestep\n nt = 100 # number of timesteps\n nt_setup = 400 # number of timesteps to set up simulation\n n_out = 25 # plot solution every nout steps\n b = 4 # velocity damping for acquiring initial condition\n m = 1/n # mass of SPH particle ( m * n = 1 normalizes |wavefunction|^2 to 1)\n h = 40/n # smoothing length\n t = 0. # time\n\n # plot potential\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n fig = plt.plot(xx, 0.5*xx**2, linewidth=5, color=[0.7, 0.7, 0.9])\n \n # initialize\n x = np.linspace(-3.0, 3.0, num=n)\n x = np.reshape(x,(n,1))\n u = np.zeros((n,1))\n \n rho = density( x, m, h )\n P = pressure( x, rho, m, h )\n a = acceleration( x, u, m, rho, P, b, h )\n\n # get v at t=-0.5*dt for the leap frog integrator using Euler's method\n u_mhalf = u - 0.5 * dt * a\n\n # main loop (time evolution)\n for i in np.arange(-nt_setup, nt): # negative time (t<0, i<0) is used to set up initial conditions\n\n # leap frog\n u_phalf = u_mhalf + a*dt\n x = x + u_phalf*dt\n u = 0.5*(u_mhalf+u_phalf)\n u_mhalf = u_phalf\n if (i >= 0):\n t = t + dt\n print(\"%.2f\" % t)\n \n if (i == -1 ): # switch off damping before t=0\n u = np.zeros((n,1)) + 1.0\n u_mhalf = u\n b = 0 # switch off damping at time t=0\n \n # update densities, pressures, accelerations\n rho = density( x, m, h )\n P = pressure( x, rho, m, h )\n a = acceleration( x, u, m, rho, P, b, h)\n \n # plot solution every n_out steps\n if( (i >= 0) and (i % n_out) == 0 ):\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n rr = probeDensity(x, m, h, xx)\n rr_exact = 1./np.sqrt(np.pi) * np.exp(-(xx-np.sin(t))**2/2.)**2\n fig = plt.plot(xx, rr_exact, linewidth=2, color=[.6, .6, .6])\n fig = plt.plot(xx, rr, linewidth=2, color=[1.*i/nt, 0, 1.-1.*i/nt], label='$t='+\"%.2f\" % t +'$')\n # plot the t<0 damping process for fun\n if( i==-nt_setup or i==-nt_setup*3/4 or i==-nt_setup/2 ):\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n rr = probeDensity(x, m, h, xx)\n fig = plt.plot(xx, rr, linewidth=1, color=[0.9, 0.9, 0.9])\n \n plt.legend()\n plt.xlabel('$x$')\n plt.ylabel('$|\\psi|^2$')\n plt.axis([-2, 4, 0, 0.8])\n plt.savefig('solution.pdf', aspect = 'normal', bbox_inches='tight', pad_inches = 0)\n plt.close()", "def energy(self):\n self.E = - np.sum(self.phi) + 0.5 * self.mass * np.sqrt((self.v_x ** 2 + self.v_y **2))", "def update(self):\n self.velocity = [math.cos(self.angle), - math.sin(self.angle)]\n self.velocity = [self.speed * i for i in self.velocity]\n\n super().update()", "def call_odeint(particle_object,dt,omega):\n \n# print \n# print '##########################'\n# \n r = particle_object.position\n v = particle_object.velocity \n beta = particle_object.beta \n# \n# print 'Initial parameters'\n# print 'r', r\n# print 'v', v\n# print 'beta',beta\n# print \n# beta = 0.15\n #particle_radius = particle_object.radius\n \n# ### updating the acceleration due to planet history with the position value before it is moved so that it is the acceleration due to the planet at the position that led to this nett acceleration. \n# particle_object.update_planet_acceleration_history(AccelerationDueToPlanet(r))\n \n u0 = np.array([r[0],r[1],r[2],v[0],v[1],v[2]])\n \n \n sol = odeint(acceleration_for_odeint, u0, np.array([0,dt]), args=(beta, omega,planet_pos)) ## The solution in the from [rx,ry,rz,vx,vy,vz]\n \n r_new = np.array([sol[1,0],sol[1,1],sol[1,2]])\n v_new = np.array([sol[1,3],sol[1,4],sol[1,5]]) \n \n# print 'New parameters'\n# print 'r', r_new\n# print 'v', v_new\n#\n# print \n \n #particle_object.update_position(r_new) \n #particle_object.update_velocity(v_new)\n \n ### add one to the particle's age \n particle_object.update_age() \n particle_object.update_timestep_history(timestep)\n \n #last_uvect = sol[1,:]\n \n #return last_uvect\n \n #return 0,0\n return r_new,v_new", "def setVelocity(self,v):\n if v is None:\n self.v = Cartesian3DVector()\n else:\n if isinstance(v,Cartesian3DVector):\n self.v = Cartesian3DVector(v.x,v.y,v.z)\n else:\n raise CoordinateVector(\"Initializing a particle with the incorrect velocity vector type.\")", "def update(self):\n\n\t\tfor i, particle in enumerate(self.particles):\n\t\t\tparticle.move()\n\t\t\t#if self.acceleration:\n\t\t\t#\tparticle.accelerate(self.acceleration)\n\t\t\tself.bounce(particle)\n\t\t\tfor particle2 in self.particles[i+1:]:\n\t\t\t\tcollide(particle, particle2)", "def change_velocity(self, delta):\n self.velocity += delta", "def change_velocity(self, delta):\n self.velocity += delta", "def _update_position(self, delta_t):\n\n self.pos += self.vel*delta_t", "def new_velocity(self):\n self.velocity = self.vafter", "def update_E(self):\n self.grid.E[self.loc] += (\n self.grid.courant_number\n * self.grid.inverse_permittivity[self.loc]\n * self.phi_E\n )", "def do_physics(self, delta_time):\n self.x += (self.vel_x * delta_time)\n self.y += (self.vel_y * delta_time)", "def _update_positions(self):\n self._velocities += self._accelerations * self.time_step\n self._positions += self._velocities * self.time_step", "def AddEarthVelocity(self, ds):\n self.IsEarthVelocity = True\n self.EarthVelocity = ds", "def __init__(self,E,px,py,pz):\n Particle.__init__(self)\n self.E=float(E)\n self.px=float(px)\n self.py=float(py)\n self.pz=float(pz)\n self.cal_pt()\n self.cal_phi()\n self.cal_eta()\n #self.cal_mass()\n #print self.E,self.px,self.py,self.pz\n #print self.pt,self.phi,self.eta", "def calcVelocityFromMomentum(self):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the particle velocity from momentum.\")\n values = {}\n for direction in self.p.order:\n gamma = self.calcLorentzGammaFromMomentum(direction)\n values[direction] = getattr(self.p,direction)/(gamma*self.mass)\n self.setVelocity(Cartesian3DVector(**values))\n return self.getVelocity()", "def update(self):\r\n t = 1 # time step\r\n # Calculate new position\r\n pos_x = self.pos[0] + self.vel[0]*t + self.acc[0]*t*t\r\n pos_y = self.pos[1] + self.vel[1]*t + self.acc[1]*t*t\r\n if self.screen[0]:\r\n pos_x = pos_x % self.screen[0]\r\n if self.screen[1]:\r\n pos_y = pos_y % self.screen[1]\r\n # Bound velocities by -MAX_VEL and MAX_VEL\r\n vel_x = max(min(self.vel[0] + self.acc[0]*t, self.MAX_VEL), -self.MAX_VEL)\r\n vel_y = max(min(self.vel[1] + self.acc[1]*t, self.MAX_VEL), -self.MAX_VEL)\r\n # Don't update position and velocity until end so all values used in\r\n # calculations are previous values\r\n self.pos = [pos_x, pos_y]\r\n self.vel = [vel_x, vel_y]", "def calcEnergy(self):\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the energy.\")\n return speed_light*math.sqrt(self.p*self.p + (self.mass*speed_light)**2)", "def move(self):\r\n for index in range(self.size):\r\n self.values[index] = self.values[index] + self.velocities[index]\r\n \r\n # Adjust values to keep particle inside boundaries.\r\n if self.values[index] < Particle.MIN_VALUE:\r\n self.values[index] = (-self.values[index] % Particle.MAX_VALUE)\r\n elif self.values[index] > Particle.MAX_VALUE:\r\n self.values[index] = (self.values[index] % Particle.MAX_VALUE)", "def particle_initial_velocity(fignr,N,D,T,m,dim,kb):\n V = np.zeros((3,N))\n V[0:dim,:] = np.random.normal(0, kb*T/m, (dim,N))# / np.sqrt(T/(kb*m))\n plotfunctions.velocity(fignr,N,V)\n # Typical speed for particles\n return V", "def Verlet2(particle_object, dt,omega):\n \n r = particle_object.position\n v = particle_object.velocity \n beta = particle_object.beta \n particle_radius = particle_object.radius\n particle_mass_kg = ((4.0/3.0)*np.pi*particle_radius**3)*particle_density ## Assuming spherical particles \n \n ### updating the acceleration due to planet history with the position value before it is moved so that it is the acceleration due to the planet at the position that led to this nett acceleration. \n particle_object.update_planet_acceleration_history(AccelerationDueToPlanet(r))\n \n (r_new, v_new) = VerletHope2(r,v,beta,dt,particle_radius,particle_mass_kg)\n \n particle_object.update_position(r_new) \n \n particle_object.update_velocity(v_new)\n \n ### add one to the particle's age \n particle_object.update_age() \n \n particle_object.update_timestep_history(timestep)\n \n return None", "def add_velocity(self, Mextra=0, period=0, model=1):\n \n if self.npart == 0:\n self.vel = transpose(array([[],[]]))\n return\n \n print(\" Adding velocities...\")\n \n if model==0: vel = zeros((self.npart, 2))\n \n elif model in [1,2]:\n print(\" Setting keplerian velocities...\")\n pos = self.pos - self.center\n radii = norm(pos, axis=1)\n self.v_kep = sqrt(Mextra * G / radii)\n if model==2: Mextra += sum(self.mass)\n v_kep = sqrt(Mextra * G / radii)\n vel = matmul(pos / radii[:, newaxis], array([[0, 1], [-1, 0]])) * v_kep[:, newaxis]\n \n\n elif model==3:\n print(\" Setting velocities from binary period...\")\n if period==0:\n print(\" Incorrect period for setting disk velocities.\")\n print(\" Disk velocities are set to zero.\")\n vel = zeros((self.npart, 2))\n \n else:\n pos = self.pos - self.center\n v_ang = 1 / float(period) \n vel = v_ang * matmul(pos, array([[0, 1], [-1, 0]]))\n \n else:\n print(\"Model must be 0, 1, 2 or 3.\")\n print(\" {:d} was given. Exiting.\".format(model))\n exit()\n \n \n self.vel = vel", "def fv(X,Y,dx,dy,r2,i,append,L,N,U,dt,close_list,Nlist,vel_verlet_on,R,menu,submenu,n1,grid,G,wallcount,X2):\r\n\r\n \"\"\"JV: append is a boolean. If it's true, adds the energy to our list, if it isn't, it doesn't.\r\n We do that because in some cases we will call the algorithm more times than the actual step number (and\r\n we only want to sum the value T/dt times), this is needed in the velocity-Verlet algorithm, that we call the fv()\r\n function one more time than needed just to start the loop.\"\"\"\r\n\r\n# L = self.param[2]\r\n#\r\n# N = self.particles.size\r\n\r\n #For computing all the distances I use a trick with the meshgrid function,\r\n #see the documentation on how this works if you dont see it.\r\n\r\n \"\"\"JV: X is an array that contains each position, mx is an nxn array that each column is the position of one particle (so it's a matrix\r\n that has n X rows) and mxt is the same but tranposed (so it's a matrix of n X columns)\"\"\"\r\n\r\n \"\"\"\r\n UPDATE: This block of code is commented because now it's done in a loop inside solve_verlet() (due to Numba...).\r\n Looks a little bit messy but if Numba allowed me to call the np.meshgrid() function we would do this here. Sorry, but I like to keep the comment to remind me that.\r\n \"\"\"\r\n # MX, MXT = np.meshgrid(X,X,copy=False)\r\n # MY, MYT = np.meshgrid(Y,Y,copy=False)\r\n\r\n #JV: So dx is a nxn simetric array with 0 in the diagonal, and each position is the corresponding distance between the particles,\r\n # so the position [1,2] is the distance between partcle 1 and 2 (x1-x2), and so on\r\n # dx = MXT - MX\r\n # dx = dx\r\n\r\n # dy = MYT - MY\r\n # dy = dy\r\n\r\n # r2 = np.square(dx)+np.square(dy)\r\n\r\n # if(menu == \"Free!\"):\r\n # #JV: We do this to get the actual distance in the case of the \"Free!\" simulation, in which there is no elastic collision between the particles and the boundaries\r\n # dx_v2 = (np.abs(dx.copy())-1*L)\r\n # r2_v2 = dx_v2**2+dy**2\r\n # dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n # dy_v2 = (np.abs(dy.copy())-1*L)\r\n # r2_v2 = dx**2+dy_v2**2\r\n # dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n # r2_v2 = dx_v2**2+dy_v2**2\r\n # dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n # dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n\r\n dUx = 0.\r\n dUy = 0.\r\n utot = np.zeros((N))\r\n f = np.zeros((N,2))\r\n\r\n for j in range(0,N):\r\n dUx = 0.\r\n dUy = 0.\r\n u = 0.\r\n\r\n #JV: we now calculate the force with only the Nlist closest particles\r\n for k in range(0,Nlist):\r\n c = int(close_list[j][k])\r\n\r\n #In the force computation we include the LJ and the walls (JV: in the verlet case). I truncate the interaction at self.R units of lenght,\r\n #I also avoid distances close to 0 (which only should affect the diagonal in the matrix of distances)\r\n #All these conditions are included using the numpy.where function.\r\n #If you want to include more forces you only need to add terms to these lines.\r\n\r\n if(vel_verlet_on == True):\r\n if((r2[j,c] < 4*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n dUx = dUx + dLJverlet(dx[j,c],r2[j,c],R[j],R[c])\r\n dUy = dUy + dLJverlet(dy[j,c],r2[j,c],R[j],R[c])\r\n # print(dUx,dUy,dx[j,c],r2[j,c],R[j],R[c])\r\n#JV: COMMENTED PART BECAUSE NUMBA HAS PROBLEMS WITH THIS BLOCK OF CODE THAT DOES THE CALCULATION IN THE VERLET ALGORITHM, NOW IT ONLY WORKS WITH THE VELOCITY VERLET, TO FIX\"\r\n# else:\r\n# if((r2[j,c] < 4*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n# dUx = dUx + dLJverlet(dx[j,c],r2[j,c],R[j],R[c]) - dwalls([X[j],Y[j]],param)\r\n# dUy = dUy + dLJverlet(dy[j,c],r2[j,c],R[j],R[c]) - dwalls([X[j],Y[j]],param)\r\n\r\n #JV: We add the energy in the corresponding array in both cases, remember that the verlet algorithm will include the energy from the walls\r\n # and that will be visible in fluctuations on the energy\r\n if(vel_verlet_on == True):\r\n if((r2[j,c] < 2*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n u = u + LJverlet(r2[j,c],R[c],R[j])\r\n# else:\r\n# u = u + walls([X[j],Y[j]])#JV: TO CHANGE; NOW ONLY WORKS WITH VEL_VERLET_ON\r\n# else:\r\n# if((r2[j,c] < 2*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n# u = u + LJverlet(r2[j,c],R[c],R[j],param)\r\n#\r\n# if((X[j]**2+Y[j]**2) > (0.8*L)**2):\r\n# u = u + walls([X[j],Y[j]],param)\r\n #JV: COMMENTED FOR NOW\r\n\r\n #JV: If the argument it's True, we will append the energy to our corresponding array\r\n if(append == True):\r\n utot[j] = u\r\n\r\n f[j,:] = f[j,:]+np.array([dUx,dUy])\r\n\r\n if(append == True):\r\n U[int(i)] = np.sum(utot) #JV: Finally, we add the total energy so we have the global energy in a step of time\r\n\r\n return f", "def updatePosition(self):\n\n #For this update, a time-step of 1 is assumed ->Change Code if not true\n self.position = [self.position[0] + self.velocity[0], self.position[1]+self.velocity[1]]", "def run(self):\r\n\r\n self.tick = self.tick + 1\r\n print 'Particle tick=:', self.tick", "def __init__(self, mass, radius, position, velocity):\n self.mass = mass\n self.radius = radius\n self.position = position\n self.velocity = velocity\n print(self.velocity)\n self.vafter = np.copy(velocity) # temp storage for velocity of next step\n self.delete = False", "def __init__(self, temperature=298 * simtk.unit.kelvin, collision_rate=91.0 / simtk.unit.picoseconds, timestep=1.0 * simtk.unit.femtoseconds):\n super(AndersenVelocityVerletIntegrator, self).__init__(timestep)\n\n #\n # Integrator initialization.\n #\n kT = kB * temperature\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addGlobalVariable(\"p_collision\", timestep * collision_rate) # per-particle collision probability per timestep\n self.addPerDofVariable(\"sigma_v\", 0) # velocity distribution stddev for Maxwell-Boltzmann (computed later)\n self.addPerDofVariable(\"collision\", 0) # 1 if collision has occured this timestep, 0 otherwise\n self.addPerDofVariable(\"x1\", 0) # for constraints\n\n #\n # Update velocities from Maxwell-Boltzmann distribution for particles that collide.\n #\n self.addComputePerDof(\"sigma_v\", \"sqrt(kT/m)\")\n self.addComputePerDof(\"collision\", \"step(p_collision-uniform)\") # if collision has occured this timestep, 0 otherwise\n self.addComputePerDof(\"v\", \"(1-collision)*v + collision*sigma_v*gaussian\") # randomize velocities of particles that have collided\n\n #\n # Velocity Verlet step\n #\n self.addUpdateContextState()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x+dt*v\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m+(x-x1)/dt\")\n self.addConstrainVelocities()", "def update(self):\n self.setVector(0.15, 0.0)", "def _update_vel(self) -> None:\n self.state[:, :, Boids.Attr.VEL] += self.state[:, :, Boids.Attr.ACC]\n self.state[:, :, Boids.Attr.VEL] = maglim(\n self.state[:, :, Boids.Attr.VEL], self.max_vel)", "def update(self):\n self.x += self.vx\n self.y += self.vy", "def ball_increase_velocity():\n global ball_vel\n ball_vel[0] = ball_vel[0] * 1.10\n ball_vel[1] = ball_vel[1] * 1.10", "def velocity_step(self, dt, force):\r\n self.vel += dt * force / self.mass", "def update(self,time,mposition):\n max_speed=100\n self.circlePos += self.circleVel * time\n if mposition!=None:\n mouseDir = mposition - self.circlePos\n self.circleVel += mouseDir.normalized() * 80 * time #This is the terminal velocity being applied\n t=self.circleVel.magnitude()\n if t>=max_speed:\n self.circleVel=self.circleVel.normalized()*max_speed", "def update_vel(self, forces, dt):\n\n for particle, force in zip(self.particles, forces):\n particle.leap_velocity(dt, force)\n return None", "def particle_positionV(R,V,dt,F,D):\n R += (dt*V+dt**2/2*F) \n p = 1\n mn = 0\n R = check_bc(R, D, p, mn)\n return R", "def velocity(self, X, Y):\n self.u = (self.strength / (2 * np.pi) *\n (X - self.xc) / ((X - self.xc)**2 + (Y - self.yc)**2))\n self.v = (self.strength / (2 * np.pi) *\n (Y - self.yc) / ((X - self.xc)**2 + (Y - self.yc)**2))", "def velocity(self, X, Y):\n self.u = self.Vinf * np.ones_like(X)\n self.v = np.zeros_like(X)", "def settling_velocity(self, evaporation_factor: float=0.3) -> _VectorisedFloat:\n if self.diameter is None:\n return 1.88e-4\n else:\n return 1.88e-4 * (self.diameter*evaporation_factor / 2.5)**2", "def evolve_system(self,dt, energy_file = None):\n phi = self.compute_field()\n force_m = self.compute_forces_mesh()\n self.acc_new = np.zeros([len(self),2])\n #Computes the force felt by each particles and deduce the acceleration\n for i in range(len(self)):\n x,y = self.ptclgrid.ixy[i]\n x = int(x)\n y = int(y)\n self.acc_new[i][0] += (1/self.mass[i]*force_m[0][x,y])\n self.acc_new[i][1] += (1/self.mass[i]*force_m[1][x,y])\n #Evolve the position and momenta of the particle in the list\n self.particles.evolve(self.acc,self.acc_new,dt,self.size, boundary_periodic=self.boundary_periodic)\n #For non-periodic condition, deletes the particles that leave the grid from the list\n if self.boundary_periodic!=True: \n index = np.argwhere((self.particles.position>self.size-1))\n index2 = np.argwhere((self.particles.position<0))\n index = {a for a in np.append(index,index2)}\n index = list(index)\n self.particles.momentum = np.delete(self.particles.momentum,index,axis=0)\n self.acc = np.delete(self.acc,index,axis=0)\n self.acc_new = np.delete(self.acc_new,index,axis=0)\n self.mass = np.delete(self.mass,index,axis=0)\n self.particles.position = np.delete(self.particles.position,index,axis=0)\n self.acc = self.acc_new.copy()\n #Update the position of the particles on the grid\n self.ptclgrid.update_position(self.particles.position,self.mass)\n self.grid = self.ptclgrid.grid\n self.grid_pos = self.ptclgrid.grid_pos\n #Write the energy in a file if on is given\n if energy_file != None:\n energy_file.write(f'{self.energy()}\\n')\n energy_file.flush()\n return self.grid_pos", "def update_particles_with_odom(self, msg):\n new_odom_xy_theta = convert_pose_to_xy_and_theta(self.odom_pose.pose)\n # compute the change in x,y,theta since our last update\n if self.current_odom_xy_theta:\n old_odom_xy_theta = self.current_odom_xy_theta\n delta = (new_odom_xy_theta[0] - self.current_odom_xy_theta[0],\n new_odom_xy_theta[1] - self.current_odom_xy_theta[1],\n new_odom_xy_theta[2] - self.current_odom_xy_theta[2])\n\n self.current_odom_xy_theta = new_odom_xy_theta\n else:\n self.current_odom_xy_theta = new_odom_xy_theta\n return\n\n for particle in self.particle_cloud:\n r1 = math.atan2(delta[1], delta[0]) - old_odom_xy_theta[2]\n d = math.sqrt((delta[0] ** 2) + (delta[1] ** 2))\n\n particle.theta += r1 % 360\n particle.x += d * math.cos(particle.theta) + normal(0, 0.1)\n particle.y += d * math.sin(particle.theta) + normal(0, 0.1)\n particle.theta += (delta[2] - r1 + normal(0, 0.1)) % 360\n # For added difficulty: Implement sample_motion_odometry (Prob Rob p 136)", "def compute_coll(self, particle, step):\r\n m1, m2 = self.mass, particle.mass\r\n r1, r2 = self.radius, particle.radius\r\n v1, v2 = self.velocity, particle.velocity\r\n x1, x2 = self.position, particle.position\r\n di = x2-x1\r\n norm = np.linalg.norm(di)\r\n if norm-(r1+r2)*1.1 < step*abs(np.dot(v1-v2, di))/norm:\r\n self.velocity = v1 - 2. * m2/(m1+m2) * np.dot(v1-v2, di) / (np.linalg.norm(di)**2.) * di\r\n particle.velocity = v2 - 2. * m1/(m2+m1) * np.dot(v2-v1, (-di)) / (np.linalg.norm(di)**2.) * (-di)", "def setMomentum(self,p):\n if p is None:\n self.p = Cartesian3DVector()\n else:\n if isinstance(p,Cartesian3DVector):\n self.p = Cartesian3DVector(p.x,p.y,p.z)\n else:\n raise CoordinateVector(\"Initializing a particle with the incorrect momentum vector type.\")", "def update_velocity(self):\n # Set thruster (up/down) movement\n if self.thrusters:\n self.velocity_y -= self.gravity\n else:\n self.velocity_y += self.velocity_slowing\n\n # Set left movement\n if self.moving_left:\n self.velocity_x -= self.gravity\n else:\n if self.velocity_x < 0:\n self.velocity_x += self.velocity_slowing\n \n # Set right movement\n if self.moving_right:\n self.velocity_x += self.gravity\n else:\n if self.velocity_x > 0:\n self.velocity_x -= self.velocity_slowing", "def advanceVelocity(self,time,acceleration):\n if not isinstance(acceleration,Cartesian3DVector):\n raise CoordinateException(\"Advancing particle momentum with the incorrect acceleration type.\")\n return self.v + time*acceleration", "def cmd_velocity(self, vn, ve, vd, heading):\n pass", "def update(self, time_step):\n a = [0,0]\n F = self.force()\n for i in [0,1]: # We have to update x and y\n a[i] = self.force()[i] / self.mass\n self.velocity[i] = self.velocity[i] + a[i]*time_step\n self.position[i] = self.position[i] + self.velocity[i]*time_step # I'm lazy\n self.turtle.goto(self.position) # Comment out the goto if you need the simulation to run really fast; you won't get the animation", "def update(self):\n if self.dir == \"r\":\n self.vx = 10\n self.vy = 0\n elif self.dir == \"l\":\n self.vx = -10\n self.vy = 0\n elif self.dir == \"u\":\n self.vx = 0\n self.vy = -10\n elif self.dir == \"d\":\n self.vx = 0\n self.vy = 10\n elif self.dir == \"None\":\n self.vx = 0\n self.vy = 0\n self.x += self.vx\n self.y += self.vy", "def update_particles(self):\n for particle in self.particles:\n particle.update_coordinates(self.bounds)", "def test_particle_velocity():\n\ttest_orientation = o_gen_instance.generate_orientation_vector()\n\ttest_speed = path_instance.generate_velocity().dot(geom_instance.source_direction)\n\tdetector_length = 0.3 + 5*0.5 # hard-coded for now\n\tassert test_speed*trial_samples > detector_length", "def update(self, dt):\n\n self.velocity += self.acceleration * dt\n self.position += self.velocity * dt \n\n self.ship.rotation = atan2(-self.velocity[1], self.velocity[0]) * 180 / pi #self.angularPosition", "def particle_pos(particle, time):\n return particle.pos + particle.dir * particle.speed * (time - particle.time)", "def compute_swimming_velocity(particle, fieldset, time):\n if particle.active == 1:\n particle.u_swim = particle.vmax * (1-particle.hab) * cos(particle.theta)\n particle.v_swim = particle.vmax * (1-particle.hab) * sin(particle.theta)", "def vel(self, *args, **kwargs) -> Any:\n pass", "def set_velocity(self):\r\n if self.direction == 'left':\r\n self.x_vel = -2\r\n else:\r\n self.x_vel = 2\r\n\r\n self.y_vel = 0", "def getVelocity(self):\n return self.v", "def momentum (self):\n\n for planet in self.planets: #this loop takes a 'planet' from 'self.planets' and computes it linear momentum.\n planet.momentum = planet.mass * planet.velocity #Each body's resulting momentum is updated to the body's information defined in the Particle class.", "def velocity_field(xt,yt,x0,y0,Vinf,dia,rot,chord,B,param=None,veltype='all',integration='simp',m=220,n=200):\n rad = dia/2.\n tsr = rad*fabs(rot)/Vinf\n solidity = (chord*B)/rad\n\n # Translating the turbine position\n x0t = x0 - xt\n y0t = y0 - yt\n\n coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9 = coef_val()\n\n # Calculating EMG distribution parameters (based on polynomial surface fitting)\n if param is None:\n loc1 = _parameterval(tsr,solidity,coef0)\n loc2 = _parameterval(tsr,solidity,coef1)\n loc3 = _parameterval(tsr,solidity,coef2)\n spr1 = _parameterval(tsr,solidity,coef3)\n spr2 = _parameterval(tsr,solidity,coef4)\n skw1 = _parameterval(tsr,solidity,coef5)\n skw2 = _parameterval(tsr,solidity,coef6)\n scl1 = _parameterval(tsr,solidity,coef7)\n scl2 = _parameterval(tsr,solidity,coef8)\n scl3 = _parameterval(tsr,solidity,coef9)\n\n else:\n # Reading in EMG distribution parameters\n loc1 = param[0]\n loc2 = param[1]\n loc3 = param[2]\n spr1 = param[3]\n spr2 = param[4]\n skw1 = param[5]\n skw2 = param[6]\n scl1 = param[7]\n scl2 = param[8]\n scl3 = param[9]\n\n ###################################\n if veltype == 'vort':\n # VORTICITY CALCULATION (NO INTEGRATION)\n if x0t < 0.:\n vel = 0.\n else:\n vel = _vawtwake.vorticitystrength(x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)/rot\n ###################################\n else:\n # Integration of the vorticity profile to calculate velocity\n if integration == 'simp':\n # SIMPSON'S RULE INTEGRATION (must use polynomial surface coefficients from VAWTPolySurfaceCoef.csv)\n inte = 1 # Simpson's Rule\n # inte = 2 # Trapezoidal Rule (optional ability of the code-- faster but less accurate)\n\n if param is not None:\n print \"**** Using polynomial surface coefficients from VAWTPolySurfaceCoef.csv for Simpson's rule integration ****\"\n\n vel_xs,vel_ys = _vawtwake.vel_field(xt,yt,x0,y0,dia,rot,chord,B,Vinf,coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9,m,n,inte)\n\n if veltype == 'all':\n vel = sqrt((vel_xs*Vinf + Vinf)**2 + (vel_ys*Vinf)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs*Vinf + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])\n ###################################\n elif integration == 'gskr':\n # 21-POINT GAUSS-KRONROD RULE QUADRATURE INTEGRATION\n xbound = (scl3+5.)*dia\n argval = (x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)\n if veltype == 'all' or veltype == 'x' or veltype == 'ind':\n vel_x = _dblquad(_vawtwake.integrandx,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_xs = (vel_x[0]*fabs(rot))/(2.*pi)\n if veltype == 'all' or veltype == 'y' or veltype == 'ind':\n vel_y = _dblquad(_vawtwake.integrandy,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_ys = (vel_y[0]*fabs(rot))/(2.*pi)\n\n if veltype == 'all':\n vel = sqrt((vel_xs + Vinf)**2 + (vel_ys)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys/Vinf\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])/Vinf\n ###################################\n\n return vel", "def cmd_vel_callback(self, msg):\n # Just store the desired velocity. The actual control runs on odometry callbacks\n v_l = msg.linear\n v_a = msg.angular\n self.v_linear_des = numpy.array([v_l.x, v_l.y, v_l.z])\n self.v_angular_des = numpy.array([v_a.x, v_a.y, v_a.z])", "def update(self, env, u, z, marker_id):\n # YOUR IMPLEMENTATION HERE\n\n new_particles_bar = np.zeros((self.num_particles, 3))\n importance_weights = np.ones(self.num_particles)\n ita = 0\n for m in range(self.num_particles):\n u_noisy = env.sample_noisy_action(u, self.alphas)\n xt = env.forward(self.particles[m,:].reshape(-1, 1), u_noisy)\n zt_hat = env.observe(xt, marker_id)\n importance_weights[m] = env.likelihood(minimized_angle(z - zt_hat), self.beta)\n new_particles_bar[m,:] = xt.reshape(1, -1)\n ita += importance_weights[m]\n \n importance_weights = importance_weights/ita\n\n self.particles, self.weights = self.resample(new_particles_bar, importance_weights)\n mean, cov = self.mean_and_variance(self.particles)\n return mean, cov", "def edge_velocity(self):\n #reflext x values at x edges\n self.u[1,:,0] = -self.u[1,:,1]\n self.u[1,:,-1] = -self.u[1,:,-2]\n #mirror x values at y edges \n self.u[1,0,:] = self.u[1,1,:]\n self.u[1,-1,:] = self.u[1,-2,:]\n #mirror y values at x edges\n self.u[0,:,0] = self.u[0,:,1]\n self.u[0,:,-1] = self.u[0,:,-2]\n #mirror y values at y edges \n self.u[0,0,:] = -self.u[0,1,:]\n self.u[0,-1,:] = -self.u[0,-2,:]", "def _velocity_position(self, particle, dim, p_nd):\n\n new_velocity = (self.w * particle.velocity[dim]) \\\n + (self.c1 *\n (particle.pbest_position[dim] - particle.position[dim])) \\\n + (self.c2 * (self.gbest_position[dim] - particle.position[dim])) \\\n + (self.c3 * (p_nd - particle.position[dim]))\n\n new_velocity = min(\n self._vmax,\n max(-self._vmax, new_velocity)\n )\n\n new_position = min(\n self._bounds[1],\n max(self._bounds[0], particle.position[dim] + new_velocity)\n )\n\n return new_velocity, new_position", "def VelPlanet (self, deltaT):\n\n for planet in self.planets:\n velocity = planet.velocity + (planet.acceleration * deltaT)\n planet.velocity = velocity #Each body's resulting velocity is updated to the body's information defined in the Particle class.", "def evolve(self, dt):\n TIMESTEP = 1e-5\n num_steps = int(dt / TIMESTEP)\n\n for _ in range(num_steps):\n for particle in self.particles:\n # Calculates direction\n norm = (particle.x ** 2 + particle.y ** 2) ** 0.5\n v_x, v_y = -particle.y / norm, particle.x / norm\n\n # Calculates displacement\n dx = TIMESTEP * particle.angular_velocity * v_x\n dy = TIMESTEP * particle.angular_velocity * v_y\n\n particle.x += dx\n particle.y += dy", "def get_velocity(self):\n return self.momentum/self.mass", "def calcVelocity(self, iteration):\n self.setIteration(iteration)\n if self.parallelization_mode == \"serial\":\n # calculate and set coordinates\n for this_bin in iteration:\n for this_segment in this_bin:\n self.calcSegmentVelocities(this_segment)\n \n else:\n sys.stderr.write(\"ERROR: Velocity calculation is only available in serial mode\")\n sys.exit(-1)", "def compute_step(self, step):\n #print(self.velocity)\n self.position += step * self.velocity\n #print(self.position)", "def vel(z, c = cp.cc.c_light_cm_s/1e5):\n # return z*c/(1+z)\n return c*((1+z)**2-1)/((1+z)**2+1)", "def physics(self):\n\n self.v_y += self.a_y * self.dt # v =at\n dy = self.v_y * self.dt # x = vt\n self.rect.move_ip(0, -dy)", "def define_ufl_velocity_equation(self):\n\n if hasattr(self, 'f1'):\n return None\n\n if self.config['material']['type'] == 'viscous':\n self.f1 = 0\n return None\n\n if not self.config['formulation']['time']['unsteady']:\n self.f1 = 0\n return None\n\n theta = self.config['formulation']['time']['theta']\n dt = self.config['formulation']['time']['dt']\n f1 = self.displacement - self.displacement0 \\\n - dt*(theta*self.velocity + (1.0 - theta)*self.velocity0)\n\n self.f1 = dlf.dot(self.test_vector, f1)*dlf.dx\n\n return None", "def update_E(self):", "def advance(self): \n self.center.x = self.center.x + self.velocity.dx\n self.center.y = self.center.y + self.velocity.dy", "def box_collision_info(self):\r\n position = np.zeros((self.Npart,3)) # antall part, dim, iterasjoner\r\n position[:,:] = np.random.uniform(0,1e-6, size = (self.Npart,3))\r\n velocity = np.zeros((self.Npart,3))\r\n velocity[:,:] = np.random.normal(0,self.sigma,size = (self.Npart,3))\r\n\r\n part_collided = 0\r\n part_escaped = 0\r\n momentum = 0\r\n\r\n print 'engine started'\r\n for i in xrange(1,self.n):\r\n #collision\r\n position += velocity*dt\r\n l_hole = position[:,0:2] > self.L/4\r\n h_hole = position[:,0:2] < (3*self.L)/4\r\n pos_xy = np.logical_and(l_hole, h_hole)\r\n pos_xy = np.logical_and(pos_xy[:,0], pos_xy[:,1])\r\n pos_z = position[:,2] < 0\r\n esc_part = np.logical_and(pos_z, pos_xy)\r\n\r\n #velocity[esc_part] = velocity[esc_part]\r\n part_escaped += np.sum(esc_part)\r\n\r\n for j in xrange(0,3):\r\n impact_wall_pos = np.logical_and(position[:,j] > 0,\r\n position[:,j] < self.L)\r\n velocity[np.logical_not(impact_wall_pos),j] = -velocity[\r\n np.logical_not(impact_wall_pos),j]\r\n\r\n\r\n if j == 0:\r\n part_collided += np.sum(np.logical_not(impact_wall_pos),j)\r\n momentum += np.sum(2*self.m*abs(velocity[np.logical_not(\r\n impact_wall_pos),j]))\r\n\r\n\r\n\r\n position[position < 0] = 0\r\n position[position >self.L] = self.L\r\n\r\n particle_collided = part_collided/2\r\n return position, velocity,part_escaped, impact_wall_pos, particle_collided, momentum", "def verletIntegration(self):\n for atom in range(0, self.numAtoms):\n \n # Update velocities\n self.atoms[atom].vx += (self.atoms[atom].fx/self.m)*self.dt\n self.atoms[atom].vy += (self.atoms[atom].fy/self.m)*self.dt\n self.atoms[atom].vz += (self.atoms[atom].fz/self.m)*self.dt\n \n \n # Update positions\n newX = self.atoms[atom].x + self.atoms[atom].vx*self.dt\n newY = self.atoms[atom].y + self.atoms[atom].vy*self.dt\n newZ = self.atoms[atom].z + self.atoms[atom].vz*self.dt\n\n # Update current positions (applying PBC)\n if newX < 0:\n self.atoms[atom].x = newX + self.lbox\n elif newX > self.lbox:\n self.atoms[atom].x = newX - self.lbox\n else:\n self.atoms[atom].x = newX\n \n if newY < 0:\n self.atoms[atom].y = newY + self.lbox\n elif newY > self.lbox:\n self.atoms[atom].y = newY - self.lbox\n else:\n self.atoms[atom].y = newY\n \n if newZ < 0:\n self.atoms[atom].z = newZ + self.lbox\n elif newZ > self.lbox:\n self.atoms[atom].z = newZ - self.lbox\n else:\n self.atoms[atom].z = newZ", "def updatePos(self):\n self.timeDriving +=1\n self.pos[0] += self.vx\n self.pos[1] += self.vy", "def compute_step(self, step):\r\n self.position += step * self.velocity\r\n self.solpos.append(np.copy(self.position)) \r\n self.solvel.append(np.copy(self.velocity)) \r\n self.solvel_mag.append(np.linalg.norm(np.copy(self.velocity)))", "def updateHUD(self, x, y, pDeg, vMag, vDeg, aMag, aDeg, components, fps):\n self._xPosition = x\n self._yPosition = y\n self._positionDegree = pDeg\n self._velocityMag = vMag\n self._velocityDegree = vDeg\n self._accelerationMag = aMag\n self._accelerationDegree = aDeg\n self.thrusters = filter(lambda c: isinstance(c, Thruster), components)\n self.SASmodules = filter(lambda c: isinstance(c, SAS), components)\n\n graph.drawText((10, 10), \"X Position: \"\n + str(\"{:10.4f}\".format(self._xPosition))\n + \" m\", self._font, (255, 0, 0))\n graph.drawText((10, 30), \"Y Position: \"\n + str(\"{:10.4f}\".format(self._yPosition))\n + \" m\", self._font, (255, 0, 0))\n graph.drawText((10, 50), \"Nose Degree: \"\n + str(\"{:10.4f}\".format(self._positionDegree))\n + \" degrees\", self._font, (255, 0, 0))\n graph.drawText((10, 70), \"Velocity Magnitude: \"\n + str(\"{:10.4f}\".format(self._velocityMag))\n + \" m/s\", self._font, (255, 0, 0))\n graph.drawText((10, 90), \"Velocity Degree: \"\n + str(\"{:10.4f}\".format(self._velocityDegree))\n + \" degrees\", self._font, (255, 0, 0))\n graph.drawText((10, 110), \"Acceleration Magnitude: \"\n + str(\"{:10.4f}\".format(self._accelerationMag))\n + \" m/s^2\", self._font, (255, 0, 0))\n graph.drawText((10, 130), \"Acceleration Degree: \"\n + str(\"{:10.4f}\".format(self._accelerationDegree))\n + \" degrees\", self._font, (255, 0, 0))\n\n numThruster = 0\n for thruster in self.thrusters:\n numThruster = numThruster + 1\n graph.drawText((10, 130 + numThruster*20), \"Thruster Module \"\n + str(numThruster) + \" Fuel Remaining: \"\n + str(\"{:10.0f}\".format(thruster.fuel))\n + \" Liters\", self._font, (255, 0, 0))\n\n numSAS = 0\n for sas in self.SASmodules:\n numSAS = numSAS + 1\n graph.drawText((10, 130 + numThruster*20 + numSAS*20),\n \"SAS Module \" + str(numSAS) + \" Fuel Remaining: \"\n + str(\"{:10.0f}\".format(sas.fuel))\n + \" Liters\", self._font, (255, 0, 0))\n\n graph.drawText((10, 150 + numThruster*20 + numSAS*20),\n \"FPS: \"\n + \"{:0.3f}\".format(fps), self._font, (255, 0, 0))", "def _velocity_verlet(vel_update, pos_update=update.PositionUpdate()):\n return Symmetric([vel_update, pos_update])", "def _compute_solar_torque(self):\n pass", "def cal_pt(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for pt routine)')\n\n self.pt =math.sqrt(self.px**2+self.py**2)", "def calcMomentumFromVelocity(self):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the particle momentum from velocity.\")\n values = {}\n for direction in self.v.order:\n gamma = self.calcLorentzGammaFromVelocity(direction)\n values[direction] = getattr(self.v,direction)*gamma*self.mass\n self.setMomentum(Cartesian3DVector(**values))\n return self.getMomentum()", "def __init__(self):\n self.position = Vector2()\n self.velocity = Vector2()\n self.update_parameters()\n self.mass = 0.18 # Mass of Sphero robot in kilograms", "def update(self):\n # Update the decimal position of the beam. \n self.x += self.settings.laser_speed\n # Update the rect position.\n self.rect.x = self.x", "def update(self, u_vector, increment = True):\n if increment:\n # Move the prototype closer to input vector\n self.p_vector = self.p_vector + self.epsilon * (u_vector - self.p_vector)\n else:\n # Move the prototype away from input vector\n self.p_vector = self.p_vector - self.epsilon * (u_vector - self.p_vector)", "def update_current_velocity(self, target_velocity, n):\r\n a=3/float(n)\r\n b=1-a\r\n self.current_velocity=[a*tv+b*cv for (tv,cv) in zip(target_velocity, self.current_velocity)]", "def E(self):\n\n print(\"\", file=self.logfile)\n print(\"Updating R\", file=self.logfile)\n\n\n TAE = toeplitz(self.A*self.e2[:self.P+1], np.zeros(self.P+1))\n TA = toeplitz(self.A, np.zeros(self.P+1))\n M = np.dot(TAE.transpose(), TA)\n res = toeplitz(np.concatenate([M[:,0], np.zeros((self.L_h-self.P-1))]),\n np.concatenate([M[0,:], np.zeros((self.L_h-self.P-1))]))\n res[-self.P:, -self.P:] = M[1:,1:]\n res = res*np.array([self.e2]).transpose()\n self.R = self.la*self.sigma2*np.linalg.inv(self.la*np.eye(self.L_h) + self.sigma2*res)\n\n\n\n print(\"\", file=self.logfile)\n print(\"Updating mu\", file=self.logfile)\n self.mu = np.dot(self.R, self.h)/self.sigma2\n\n\n # Propagate\n self._propagate_mu()\n self._propagate_R()", "def cal_eta(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for eta routine)')\n \n theta=math.acos(self.pz/math.sqrt(self.px**2+self.py**2+self.pz**2))\n self.eta=-math.log(math.tan(theta/2.0))", "def __init__(self):\n self.center = Point()\n self.velocity = Velocity()", "def updateVelocity(self, glob, latency):\n # Velocity equation parameters that control efficacy and behavior of the PSO, selected arbitrarily by us \n omega= .40 # coefficient for influence of currect velocity\n psi_loc= 0.30 # coefficient for influence of local best\n psi_glob= 0.30 # coefficient for influence of global best \n #calculates random weights between 0 and 1 (non-inclusive) for influence of individual (local) or social (global) best\n randLocalWeight= .01*random.randrange(-100,100,1)\n randGlobalWeight= .01*random.randrange(-100,100,1)\n \n #multiplies weights with best vectors (glob is the Swarm object) and current velocity to get new velocity\n #function below comes from wikipedia.org/wiki/Particle_swarm_optimization\n self.velocity= (omega*self.velocity + psi_loc*(self.bestXYZ[0:2] - self.position)*randLocalWeight + psi_glob*(np.array(glob.overBestPos)-self.position)*randGlobalWeight)*latency\n # latency multiplies velocity by #time-steps until update", "def create_particle(self,r,v=(0.0,0.0,0.0)):\n self.r[self.n] = r\n self.m[self.n] = self.m[self.n-1] \n self.v[self.n] = v\n self.n = self.n+1\n self.rebuild_lists()", "def accelerate(self):\n\t\tself.velocity += self.direction * self.ACCELERATION" ]
[ "0.72924954", "0.6944585", "0.66634274", "0.65291256", "0.6407229", "0.640443", "0.6364964", "0.6361303", "0.6322322", "0.63013875", "0.627813", "0.62754977", "0.62680924", "0.626102", "0.62548554", "0.6244272", "0.62382376", "0.62382376", "0.62364995", "0.61982566", "0.61757505", "0.61459625", "0.61380273", "0.6116767", "0.6103645", "0.60913616", "0.60899705", "0.60511184", "0.60479236", "0.6041662", "0.6023521", "0.6020801", "0.602067", "0.6014098", "0.5998692", "0.5986179", "0.5985838", "0.59855306", "0.5970554", "0.59656334", "0.59638286", "0.5950824", "0.5941412", "0.59224373", "0.5914769", "0.59142125", "0.59037733", "0.5900259", "0.58804095", "0.5877925", "0.5867588", "0.58661", "0.5860639", "0.58591163", "0.5858563", "0.5841062", "0.58394116", "0.58292013", "0.5817663", "0.5800447", "0.579224", "0.5765289", "0.57590026", "0.5755484", "0.5739347", "0.573627", "0.5735436", "0.57021296", "0.56838167", "0.5680009", "0.5676024", "0.5674384", "0.56705284", "0.5666485", "0.56547296", "0.56440425", "0.56334364", "0.5628806", "0.5624036", "0.56189907", "0.5613758", "0.56095344", "0.5606799", "0.56061494", "0.5604489", "0.5603398", "0.5602622", "0.5601356", "0.5600733", "0.56004", "0.55970556", "0.5590147", "0.55900913", "0.5585148", "0.558248", "0.5581609", "0.5578668", "0.5578481", "0.55684036", "0.5563389" ]
0.64788157
4
Method that updates the particle's velocity and position with the Euler Forward method
def UpdateForward(self, deltaT): self.position += self.velocity * deltaT self.velocity += self.acceleration * deltaT
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def EulerN(particle,dt):\n particle.acc = particle.acc\n particle.vel = particle.vel + particle.acc*dt\n particle.pos = particle.pos + particle.vel*dt\n\n return particle", "def update_position(self):\n self.position[0] += self.velocity[0]\n self.position[1] += self.velocity[1]", "def apply_velocity(self):\n self.position.data += self.velocity.data", "def update(self):\r\n # change in position -> velocity\r\n self.position += self.velocity\r\n # change in celocity -> acceleration\r\n self.velocity += self.acceleration\r\n \r\n # if velocity magnitude is higher than the defined limit set the velocity \r\n # magnitude to max speed\r\n if np.linalg.norm(self.velocity) > self.max_speed:\r\n self.velocity = self.velocity / np.linalg.norm(self.velocity) * self.max_speed\r\n \r\n # reset the acceleration\r\n self.acceleration = Vector(*np.zeros(2))", "def forward(self):\n #print('forward\\r')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def _update_positions(self):\n self._velocities += self._accelerations * self.time_step\n self._positions += self._velocities * self.time_step", "def forward(self):\n print('forward')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def updatePosition(self):\n\n #For this update, a time-step of 1 is assumed ->Change Code if not true\n self.position = [self.position[0] + self.velocity[0], self.position[1]+self.velocity[1]]", "def update(self, delta):\n # Computes new positions\n for part in self.particles:\n part.set_xyvxvy(self.runge_kutta(part.to_y(), 0, delta))", "def update(self, timestep):\n # force_x, force_y = self.pending_force_update\n # vel_x, vel_y = self.velocity\n # vel_x += force_x / self.mass * TIMESTEP\n # vel_y += force_y / self.mass * TIMESTEP\n # # Update positions\n # pos_x, pos_y = self.position\n # pos_x += vel_x * TIMESTEP\n # pos_y += vel_y * TIMESTEP\n # # vel_abs_old = math.sqrt(self.velocity.x ** 2 +\n # # self.velocity.y ** 2)\n # # vel_abs_new = math.sqrt(vel_x ** 2 + vel_y ** 2)\n # # if self.name == \"earth\":\n # # print(math.sqrt(vel_x ** 2 + vel_y ** 2))\n # # multiplicator = (vel_abs_old / vel_abs_new)**2\n # # if self.name == \"earth\": print(multiplicator)\n # self.position = Position(pos_x, pos_y)\n # self.velocity = Direction(vel_x, vel_y)\n # # body.goto(body.px*SCALE, body.py*SCALE)\n # # body.dot(3)\n\n self.velocity += self.pending_force_update / self.mass * timestep\n self.position += self.velocity * timestep\n self.pending_force_update = None", "def update(self):\n self.velocity = [math.cos(self.angle), - math.sin(self.angle)]\n self.velocity = [self.speed * i for i in self.velocity]\n\n super().update()", "def _update_position(self, delta_t):\n\n self.pos += self.vel*delta_t", "def main():\n \n # Particle in SHO - c.f. Mocz & Succi (2015) Fig. 2\n # parameters\n n = 100 # number of particles\n dt = 0.02 # timestep\n nt = 100 # number of timesteps\n nt_setup = 400 # number of timesteps to set up simulation\n n_out = 25 # plot solution every nout steps\n b = 4 # velocity damping for acquiring initial condition\n m = 1/n # mass of SPH particle ( m * n = 1 normalizes |wavefunction|^2 to 1)\n h = 40/n # smoothing length\n t = 0. # time\n\n # plot potential\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n fig = plt.plot(xx, 0.5*xx**2, linewidth=5, color=[0.7, 0.7, 0.9])\n \n # initialize\n x = np.linspace(-3.0, 3.0, num=n)\n x = np.reshape(x,(n,1))\n u = np.zeros((n,1))\n \n rho = density( x, m, h )\n P = pressure( x, rho, m, h )\n a = acceleration( x, u, m, rho, P, b, h )\n\n # get v at t=-0.5*dt for the leap frog integrator using Euler's method\n u_mhalf = u - 0.5 * dt * a\n\n # main loop (time evolution)\n for i in np.arange(-nt_setup, nt): # negative time (t<0, i<0) is used to set up initial conditions\n\n # leap frog\n u_phalf = u_mhalf + a*dt\n x = x + u_phalf*dt\n u = 0.5*(u_mhalf+u_phalf)\n u_mhalf = u_phalf\n if (i >= 0):\n t = t + dt\n print(\"%.2f\" % t)\n \n if (i == -1 ): # switch off damping before t=0\n u = np.zeros((n,1)) + 1.0\n u_mhalf = u\n b = 0 # switch off damping at time t=0\n \n # update densities, pressures, accelerations\n rho = density( x, m, h )\n P = pressure( x, rho, m, h )\n a = acceleration( x, u, m, rho, P, b, h)\n \n # plot solution every n_out steps\n if( (i >= 0) and (i % n_out) == 0 ):\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n rr = probeDensity(x, m, h, xx)\n rr_exact = 1./np.sqrt(np.pi) * np.exp(-(xx-np.sin(t))**2/2.)**2\n fig = plt.plot(xx, rr_exact, linewidth=2, color=[.6, .6, .6])\n fig = plt.plot(xx, rr, linewidth=2, color=[1.*i/nt, 0, 1.-1.*i/nt], label='$t='+\"%.2f\" % t +'$')\n # plot the t<0 damping process for fun\n if( i==-nt_setup or i==-nt_setup*3/4 or i==-nt_setup/2 ):\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n rr = probeDensity(x, m, h, xx)\n fig = plt.plot(xx, rr, linewidth=1, color=[0.9, 0.9, 0.9])\n \n plt.legend()\n plt.xlabel('$x$')\n plt.ylabel('$|\\psi|^2$')\n plt.axis([-2, 4, 0, 0.8])\n plt.savefig('solution.pdf', aspect = 'normal', bbox_inches='tight', pad_inches = 0)\n plt.close()", "def update(self):\n self.x += self.vx\n self.y += self.vy", "def particle_velocityV(V,F,dt,Rv,sigma,epsilon,D,N): \n V += dt/2*(particle_forceV(Rv[-1], N, sigma, epsilon, D) + particle_forceV(Rv[-2], N, sigma, epsilon, D))\n return V", "def update(self, delta_time):\n self.velocity = (sin(self.sin_val), cos(self.sin_val))\n self.sin_val += delta_time*self.speed\n translate_indexes(self.velocity, self.points[0], self.top_idx)", "def update_euler(self, delta_t=None):\n\n delta_t = delta_t or self.delta_t\n\n kap = (self.vel, self.force(self.pos,\n self.vel,\n self.time, drag=False), self.time)\n\n pos = self.pos+delta_t*self.vel\n vel = self.vel+delta_t*kap[1]\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n\n try:\n self.pos, self.vel = self.check_collision_full(pos, self.pos,\n vel, self.vel,\n delta_t, drag=True)\n except Collision.CollisionException as col:\n vel = self.vel+col.delta_t*kap[1]\n C, fvel = self.drag_coefficient(col.pos, vel, self.time+col.delta_t, nearest = True)\n col.vel = (self.vel+col.delta_t*(kap[1]+C*fvel))/(1.0+col.delta_t*C)\n raise col\n \n self.time += delta_t\n\n return kap", "def update(self):\r\n t = 1 # time step\r\n # Calculate new position\r\n pos_x = self.pos[0] + self.vel[0]*t + self.acc[0]*t*t\r\n pos_y = self.pos[1] + self.vel[1]*t + self.acc[1]*t*t\r\n if self.screen[0]:\r\n pos_x = pos_x % self.screen[0]\r\n if self.screen[1]:\r\n pos_y = pos_y % self.screen[1]\r\n # Bound velocities by -MAX_VEL and MAX_VEL\r\n vel_x = max(min(self.vel[0] + self.acc[0]*t, self.MAX_VEL), -self.MAX_VEL)\r\n vel_y = max(min(self.vel[1] + self.acc[1]*t, self.MAX_VEL), -self.MAX_VEL)\r\n # Don't update position and velocity until end so all values used in\r\n # calculations are previous values\r\n self.pos = [pos_x, pos_y]\r\n self.vel = [vel_x, vel_y]", "def move(self):\r\n for index in range(self.size):\r\n self.values[index] = self.values[index] + self.velocities[index]\r\n \r\n # Adjust values to keep particle inside boundaries.\r\n if self.values[index] < Particle.MIN_VALUE:\r\n self.values[index] = (-self.values[index] % Particle.MAX_VALUE)\r\n elif self.values[index] > Particle.MAX_VALUE:\r\n self.values[index] = (self.values[index] % Particle.MAX_VALUE)", "def velocity_step(self, dt, force):\r\n self.vel += dt * force / self.mass", "def update(self):\n\n\t\tfor i, particle in enumerate(self.particles):\n\t\t\tparticle.move()\n\t\t\t#if self.acceleration:\n\t\t\t#\tparticle.accelerate(self.acceleration)\n\t\t\tself.bounce(particle)\n\t\t\tfor particle2 in self.particles[i+1:]:\n\t\t\t\tcollide(particle, particle2)", "def euler_forward(self, u, v, I):\n # The constants 0.04, 5, 140 are the one fits all parameters from Simple Model of Spiking Neurons E. Izhikevich.\n # These constants are justified when simulating large networks of neurons.\n # TODO: Parameterise the four constants 0.04, 5, 140.\n _v = v + self.dt * (0.04 * v ** 2 + 5 * v + 140 - u + I)\n _u = u + self.dt * (self.a * (self.b * v - u))\n return _v, _u", "def velocity(self, X, Y):\n self.u = self.Vinf * np.ones_like(X)\n self.v = np.zeros_like(X)", "def update(self, time_step):\n a = [0,0]\n F = self.force()\n for i in [0,1]: # We have to update x and y\n a[i] = self.force()[i] / self.mass\n self.velocity[i] = self.velocity[i] + a[i]*time_step\n self.position[i] = self.position[i] + self.velocity[i]*time_step # I'm lazy\n self.turtle.goto(self.position) # Comment out the goto if you need the simulation to run really fast; you won't get the animation", "def advance(self): \n self.center.x = self.center.x + self.velocity.dx\n self.center.y = self.center.y + self.velocity.dy", "def update(self, u_vector, increment = True):\n if increment:\n # Move the prototype closer to input vector\n self.p_vector = self.p_vector + self.epsilon * (u_vector - self.p_vector)\n else:\n # Move the prototype away from input vector\n self.p_vector = self.p_vector - self.epsilon * (u_vector - self.p_vector)", "def update(self, dt):\n\n self.velocity += self.acceleration * dt\n self.position += self.velocity * dt \n\n self.ship.rotation = atan2(-self.velocity[1], self.velocity[0]) * 180 / pi #self.angularPosition", "def update(self, force=False) -> None:\n self.acceleration = self.calculate_acceleration()\n self.velocity += self.acceleration\n if np.linalg.norm(self.velocity) > 1:\n self.velocity = self.velocity / np.linalg.norm(self.velocity)\n\n if force:\n self.position += self.velocity\n else:\n self.new_position = self.position + self.velocity", "def do_physics(self, delta_time):\n self.x += (self.vel_x * delta_time)\n self.y += (self.vel_y * delta_time)", "def new_velocity(self):\n self.velocity = self.vafter", "def accelerate(self):\n\t\tself.velocity += self.direction * self.ACCELERATION", "def change_velocity(self, delta):\n self.velocity += delta", "def change_velocity(self, delta):\n self.velocity += delta", "def update(self):\n self.setVector(0.15, 0.0)", "def run(self):\r\n\r\n self.tick = self.tick + 1\r\n print 'Particle tick=:', self.tick", "def forwardEuler(self,un, tn):\n return un + self.dt*self.f(un, tn)", "def update(self, dt):\n for p in self.listOfParticles:\n if self.willcollide(p, can.C, can.R):\n p1 = p.P\n pc, u = p.findpc()\n vp = p1.vec(p1.proj(pc.vec(can.C)))\n p1m = p1.__add__(vp.scale(vp.len()))\n v2 = pc.vec(p1m).scale(p.V.len())\n p.V = v2\n p.P = pc + p.V.scale(p.V.len()*(1-u))\n else:\n p.P = p.step(dt)", "def updatePos(self):\n self.timeDriving +=1\n self.pos[0] += self.vx\n self.pos[1] += self.vy", "def call_odeint(particle_object,dt,omega):\n \n# print \n# print '##########################'\n# \n r = particle_object.position\n v = particle_object.velocity \n beta = particle_object.beta \n# \n# print 'Initial parameters'\n# print 'r', r\n# print 'v', v\n# print 'beta',beta\n# print \n# beta = 0.15\n #particle_radius = particle_object.radius\n \n# ### updating the acceleration due to planet history with the position value before it is moved so that it is the acceleration due to the planet at the position that led to this nett acceleration. \n# particle_object.update_planet_acceleration_history(AccelerationDueToPlanet(r))\n \n u0 = np.array([r[0],r[1],r[2],v[0],v[1],v[2]])\n \n \n sol = odeint(acceleration_for_odeint, u0, np.array([0,dt]), args=(beta, omega,planet_pos)) ## The solution in the from [rx,ry,rz,vx,vy,vz]\n \n r_new = np.array([sol[1,0],sol[1,1],sol[1,2]])\n v_new = np.array([sol[1,3],sol[1,4],sol[1,5]]) \n \n# print 'New parameters'\n# print 'r', r_new\n# print 'v', v_new\n#\n# print \n \n #particle_object.update_position(r_new) \n #particle_object.update_velocity(v_new)\n \n ### add one to the particle's age \n particle_object.update_age() \n particle_object.update_timestep_history(timestep)\n \n #last_uvect = sol[1,:]\n \n #return last_uvect\n \n #return 0,0\n return r_new,v_new", "def update_pos(self, forces, dt):\n\n for particle, force in zip(self.particles, forces):\n particle.leap_position(dt, force)\n return None", "def _update_position(self):\r\n for tstep in range(0, self.MAX_VELOCITY + 1):\r\n t = tstep / self.MAX_VELOCITY\r\n pos = self.position + np.round(self.velocity * t).astype(np.int16)\r\n if self._is_wall(pos):\r\n self._random_start_position()\r\n self.velocity = np.array([0, 0], dtype=np.int16)\r\n return\r\n if self._is_finish(pos):\r\n self.position = pos\r\n self.velocity = np.array([0, 0], dtype=np.int16)\r\n return\r\n self.position = pos", "def update_vel(self, forces, dt):\n\n for particle, force in zip(self.particles, forces):\n particle.leap_velocity(dt, force)\n return None", "def compute_step(self, step):\n #print(self.velocity)\n self.position += step * self.velocity\n #print(self.position)", "def define_ufl_velocity_equation(self):\n\n if hasattr(self, 'f1'):\n return None\n\n if self.config['material']['type'] == 'viscous':\n self.f1 = 0\n return None\n\n if not self.config['formulation']['time']['unsteady']:\n self.f1 = 0\n return None\n\n theta = self.config['formulation']['time']['theta']\n dt = self.config['formulation']['time']['dt']\n f1 = self.displacement - self.displacement0 \\\n - dt*(theta*self.velocity + (1.0 - theta)*self.velocity0)\n\n self.f1 = dlf.dot(self.test_vector, f1)*dlf.dx\n\n return None", "def step(self, f):\n\n NVTBerendsen.scale_velocities(self)\n self.scale_positions_and_cell()\n\n #one step velocity verlet\n atoms = self.atoms\n p = self.atoms.get_momenta()\n p += 0.5 * self.dt * f\n\n if self.fixcm:\n # calculate the center of mass\n # momentum and subtract it\n psum = p.sum(axis=0) / float(len(p))\n p = p - psum\n\n self.atoms.set_positions(self.atoms.get_positions() +\n self.dt * p / self.atoms.get_masses()[:,np.newaxis])\n\n # We need to store the momenta on the atoms before calculating\n # the forces, as in a parallel Asap calculation atoms may\n # migrate during force calculations, and the momenta need to\n # migrate along with the atoms. For the same reason, we\n # cannot use self.masses in the line above.\n\n self.atoms.set_momenta(p)\n f = self.atoms.get_forces()\n atoms.set_momenta(self.atoms.get_momenta() + 0.5 * self.dt * f)\n\n\n return f", "def update_velocity(self):\n # Set thruster (up/down) movement\n if self.thrusters:\n self.velocity_y -= self.gravity\n else:\n self.velocity_y += self.velocity_slowing\n\n # Set left movement\n if self.moving_left:\n self.velocity_x -= self.gravity\n else:\n if self.velocity_x < 0:\n self.velocity_x += self.velocity_slowing\n \n # Set right movement\n if self.moving_right:\n self.velocity_x += self.gravity\n else:\n if self.velocity_x > 0:\n self.velocity_x -= self.velocity_slowing", "def update_current(self):\n velocity, horizontal_angle, vertical_angle = self.current_function()\n self.set_current_velocity(velocity, horizontal_angle, vertical_angle)", "def update(self):\n if self.dir == \"r\":\n self.vx = 10\n self.vy = 0\n elif self.dir == \"l\":\n self.vx = -10\n self.vy = 0\n elif self.dir == \"u\":\n self.vx = 0\n self.vy = -10\n elif self.dir == \"d\":\n self.vx = 0\n self.vy = 10\n elif self.dir == \"None\":\n self.vx = 0\n self.vy = 0\n self.x += self.vx\n self.y += self.vy", "def setVelocity(self,v):\n if v is None:\n self.v = Cartesian3DVector()\n else:\n if isinstance(v,Cartesian3DVector):\n self.v = Cartesian3DVector(v.x,v.y,v.z)\n else:\n raise CoordinateVector(\"Initializing a particle with the incorrect velocity vector type.\")", "def update(self):\n\t\tself.x += (self.ai_settings.alien_speed_factor * \n\t\t\t\t\t\t\t\t\tself.ai_settings.fleet_direction)\n\t\tself.rect.x = self.x", "def position_step(self, dt, force):\r\n self.pos += dt * self.vel + dt * dt * force / (2 * self.mass)", "def update_current_velocity(self, target_velocity, n):\r\n a=3/float(n)\r\n b=1-a\r\n self.current_velocity=[a*tv+b*cv for (tv,cv) in zip(target_velocity, self.current_velocity)]", "def edge_velocity(self):\n #reflext x values at x edges\n self.u[1,:,0] = -self.u[1,:,1]\n self.u[1,:,-1] = -self.u[1,:,-2]\n #mirror x values at y edges \n self.u[1,0,:] = self.u[1,1,:]\n self.u[1,-1,:] = self.u[1,-2,:]\n #mirror y values at x edges\n self.u[0,:,0] = self.u[0,:,1]\n self.u[0,:,-1] = self.u[0,:,-2]\n #mirror y values at y edges \n self.u[0,0,:] = -self.u[0,1,:]\n self.u[0,-1,:] = -self.u[0,-2,:]", "def advanceVelocity(self,time,acceleration):\n if not isinstance(acceleration,Cartesian3DVector):\n raise CoordinateException(\"Advancing particle momentum with the incorrect acceleration type.\")\n return self.v + time*acceleration", "def forward(self) -> Vec:\n return (self.emitters[0][1] - self.emitters[0][0]).norm()", "def update_position(self, time_step):\n if not self.gpu:\n self.psi_hat[...] = fft.fftn(self.psi)\n update_position(self.psi_hat, time_step, self.m)\n self.psi[...] = fft.ifftn(self.psi_hat)\n else:\n cufft.cufftExecZ2Z(self.psi_plan, self.g_psi.ptr, self.g_psi_hat.ptr, cufft.CUFFT_FORWARD)\n self.g_psi_hat /= self.N\n self.g_mom_func(self.g_psi_hat, np.float64(time_step), np.int64(self.psi_hat.shape[0]), np.int64(self.psi_hat.shape[1]), np.int64(self.psi_hat.shape[2]), block=(8,8,8), grid=tuple([(i+7)/8 for i in self.psi_hat.shape]))\n cufft.cufftExecZ2Z(self.psi_plan, self.g_psi_hat.ptr, self.g_psi.ptr, cufft.CUFFT_INVERSE)", "def velocity(self, X, Y):\n self.u = (self.strength / (2 * np.pi) *\n (X - self.xc) / ((X - self.xc)**2 + (Y - self.yc)**2))\n self.v = (self.strength / (2 * np.pi) *\n (Y - self.yc) / ((X - self.xc)**2 + (Y - self.yc)**2))", "def Verlet2(particle_object, dt,omega):\n \n r = particle_object.position\n v = particle_object.velocity \n beta = particle_object.beta \n particle_radius = particle_object.radius\n particle_mass_kg = ((4.0/3.0)*np.pi*particle_radius**3)*particle_density ## Assuming spherical particles \n \n ### updating the acceleration due to planet history with the position value before it is moved so that it is the acceleration due to the planet at the position that led to this nett acceleration. \n particle_object.update_planet_acceleration_history(AccelerationDueToPlanet(r))\n \n (r_new, v_new) = VerletHope2(r,v,beta,dt,particle_radius,particle_mass_kg)\n \n particle_object.update_position(r_new) \n \n particle_object.update_velocity(v_new)\n \n ### add one to the particle's age \n particle_object.update_age() \n \n particle_object.update_timestep_history(timestep)\n \n return None", "def cal_eta(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for eta routine)')\n \n theta=math.acos(self.pz/math.sqrt(self.px**2+self.py**2+self.pz**2))\n self.eta=-math.log(math.tan(theta/2.0))", "def apply_velocity(self, angles, velocity, phase, x):\r\n \r\n # VX\r\n v=velocity[0]*self.parameters[\"vx_amplitude\"]\r\n d=(x*2-1)*v\r\n if phase:\r\n angles[\"l_thigh_joint\"]+=d\r\n angles[\"l_ankle_joint\"]+=d\r\n angles[\"r_thigh_joint\"]+=d\r\n angles[\"r_ankle_joint\"]+=d\r\n else:\r\n angles[\"l_thigh_joint\"]-=d\r\n angles[\"l_ankle_joint\"]-=d\r\n angles[\"r_thigh_joint\"]-=d\r\n angles[\"r_ankle_joint\"]-=d\r\n\r\n # VY\r\n v=velocity[1]*self.parameters[\"vy_amplitude\"]\r\n d=(x)*v\r\n d2=(1-x)*v\r\n if v>=0:\r\n if phase:\r\n angles[\"l_hip_joint\"]-=d\r\n angles[\"l_foot_joint\"]-=d\r\n angles[\"r_hip_joint\"]+=d\r\n angles[\"r_foot_joint\"]+=d\r\n else:\r\n angles[\"l_hip_joint\"]-=d2\r\n angles[\"l_foot_joint\"]-=d2\r\n angles[\"r_hip_joint\"]+=d2\r\n angles[\"r_foot_joint\"]+=d2\r\n else:\r\n if phase:\r\n angles[\"l_hip_joint\"]+=d2\r\n angles[\"l_foot_joint\"]+=d2\r\n angles[\"r_hip_joint\"]-=d2\r\n angles[\"r_foot_joint\"]-=d2\r\n else:\r\n angles[\"l_hip_joint\"]+=d\r\n angles[\"l_foot_joint\"]+=d\r\n angles[\"r_hip_joint\"]-=d\r\n angles[\"r_foot_joint\"]-=d\r\n \r\n ## VT\r\n #v=velocity[2]*self.parameters[\"vt_amplitude\"]\r\n #d=(x)*v\r\n #d2=(1-x)*v\r\n #if v>=0:\r\n #if phase:\r\n #angles[\"j_pelvis_l\"]=-d\r\n #angles[\"j_pelvis_r\"]=d\r\n #else:\r\n #angles[\"j_pelvis_l\"]=-d2\r\n #angles[\"j_pelvis_r\"]=d2\r\n #else:\r\n #if phase:\r\n #angles[\"j_pelvis_l\"]=d2\r\n #angles[\"j_pelvis_r\"]=-d2\r\n #else:\r\n #angles[\"j_pelvis_l\"]=d\r\n #angles[\"j_pelvis_r\"]=-d\r", "def UpdateCromer(self, deltaT):\n self.velocity += self.acceleration * deltaT\n self.position += self.velocity * deltaT", "def move(self, surface):\n\t\tself.position = self.position + self.velocity", "def update(self):\n self.updateCount = self.updateCount + 1\n if self.updateCount > self.updateCountMax:\n\n # update previous positions\n for i in range(self.length - 1, 0, -1):\n self.x[i] = self.x[i - 1]\n self.y[i] = self.y[i - 1]\n\n # update position of player : party lead\n if self.direction == 0:\n self.x[0] = self.x[0] + self.step\n if self.direction == 1:\n self.x[0] = self.x[0] - self.step\n if self.direction == 2:\n self.y[0] = self.y[0] - self.step\n if self.direction == 3:\n self.y[0] = self.y[0] + self.step\n\n self.updateCount = 0", "def propagate(self, t, pos, vel):\n \n if str(t)=='inf':\n print('---------------Warning! (inf value is first element)---------------')\n else:\n # Simple propagation of particles\n pos = pos + vel*t\n \n if self.periodicSideWalls==True:\n # If lateral boundary conditions are periodic we need to\n # calculate where the particle needs to appear after leaving\n # the box (only coordinate x in this case)\n pos[:,0] = pos[:,0] - np.floor(pos[:,0]/self.size_X)*self.size_X\n # This is to see how many times it has crossed the box:\n # np.floor(pos[:,0]/self.size_X)*self.size_X\n # Then this is to account for particles leaving both walls:\n # -np.sign(vel[:,0]) \n \n if self.periodicWalls==True:\n # If boundary conditions are periodic we need to\n # calculate where the particle needs to appear after leaving\n # the box\n pos[:,0] = pos[:,0] - np.floor(pos[:,0]/self.size_X)*self.size_X\n pos[:,1] = pos[:,1] - np.floor(pos[:,1]/self.size_Y)*self.size_Y\n \n return pos", "def update(self):\n self.x = self.x + (self.speed * self.x_direction)\n if self.x > self.unit or self.x < 0:\n self.x_direction = (-1) * self.x_direction\n self.x = self.x + self.x_direction\n self.y = self.y + self.y_direction\n\n if self.y >= unit or self. y <= 0:\n self.y_direction = (-1) * self.y_direction", "def advance(self):\r\n #if see if the UFO is almost at the edge of the screen\r\n if (self.center.x >= SCREEN_WIDTH-20 or self.center.y >= SCREEN_HEIGHT-20):\r\n #if it is change the velocity to negative to reverse direction\r\n self.velocity.dx *= -2\r\n self.velocity.dy *= -2\r\n \r\n # set x equal to x plus dx\r\n self.center.x += self.velocity.dx\r\n # set y equal to y plus dy\r\n self.center.y += self.velocity.dy\r\n #draw the flying object at its new point.\r\n self.draw()", "def set_velocity(self):\r\n if self.direction == 'left':\r\n self.x_vel = -2\r\n else:\r\n self.x_vel = 2\r\n\r\n self.y_vel = 0", "def set_velocity_x(self):\n self.__dx *= -1", "def update(self):\n self.x += (self.ai_settings.alien_speed_factor *\n self.ai_settings.fleet_direction)\n self.rect.x = self.x", "def tick(self, dt: float):\n self.x_pos += dt * self.x_velocity\n self.y_pos += dt * self.y_velocity\n\n self.x_velocity += dt * self.x_acceleration\n self.y_velocity += dt * self.y_velocity", "def F(self, points):\n\n # Perform calculation in the frame parallel to the ellipse\n # and then rotate it back\n rotatedP = self.rotatePoint(points)\n\n surface_pot = np.sqrt((rotatedP[..., 0]/self.axisA[:, None])**2 + (rotatedP[..., 1]/self.axisB[:, None])**2)\n\n # Force = -grad potential\n # swapaxes is necesary to make the created\n # force array of shape [N_samples, N-particles, 2]\n\n surface_force = - 1/surface_pot[..., None]*np.array([\n rotatedP[..., 0]/self.axisA[:, None]**2, \n rotatedP[..., 1]/self.axisB[:, None]**2\n ]).swapaxes(0, 2).swapaxes(0, 1) \n\n # Rotate back to the original frame\n surface_force = self.rotatePoint(surface_force, -1)\n\n # Force is scaled with A_conf and applied only if the particle is outside\n return self.A_conf*surface_force*self.isOutside(points)[:, :, None]", "def update(self):\n # Update the decimal position of the beam. \n self.x += self.settings.laser_speed\n # Update the rect position.\n self.rect.x = self.x", "def compute_step(self, step):\r\n self.position += step * self.velocity\r\n self.solpos.append(np.copy(self.position)) \r\n self.solvel.append(np.copy(self.velocity)) \r\n self.solvel_mag.append(np.linalg.norm(np.copy(self.velocity)))", "def update(self):\r\n self.x += (self.invasion_settings.alien_speed *\r\n self.invasion_settings.fleet_direction)\r\n self.rect.x = self.x", "def tick(self, dt):\n self.pos += self.vel * dt", "def physics(self):\n\n self.v_y += self.a_y * self.dt # v =at\n dy = self.v_y * self.dt # x = vt\n self.rect.move_ip(0, -dy)", "def fv(X,Y,dx,dy,r2,i,append,L,N,U,dt,close_list,Nlist,vel_verlet_on,R,menu,submenu,n1,grid,G,wallcount,X2):\r\n\r\n \"\"\"JV: append is a boolean. If it's true, adds the energy to our list, if it isn't, it doesn't.\r\n We do that because in some cases we will call the algorithm more times than the actual step number (and\r\n we only want to sum the value T/dt times), this is needed in the velocity-Verlet algorithm, that we call the fv()\r\n function one more time than needed just to start the loop.\"\"\"\r\n\r\n# L = self.param[2]\r\n#\r\n# N = self.particles.size\r\n\r\n #For computing all the distances I use a trick with the meshgrid function,\r\n #see the documentation on how this works if you dont see it.\r\n\r\n \"\"\"JV: X is an array that contains each position, mx is an nxn array that each column is the position of one particle (so it's a matrix\r\n that has n X rows) and mxt is the same but tranposed (so it's a matrix of n X columns)\"\"\"\r\n\r\n \"\"\"\r\n UPDATE: This block of code is commented because now it's done in a loop inside solve_verlet() (due to Numba...).\r\n Looks a little bit messy but if Numba allowed me to call the np.meshgrid() function we would do this here. Sorry, but I like to keep the comment to remind me that.\r\n \"\"\"\r\n # MX, MXT = np.meshgrid(X,X,copy=False)\r\n # MY, MYT = np.meshgrid(Y,Y,copy=False)\r\n\r\n #JV: So dx is a nxn simetric array with 0 in the diagonal, and each position is the corresponding distance between the particles,\r\n # so the position [1,2] is the distance between partcle 1 and 2 (x1-x2), and so on\r\n # dx = MXT - MX\r\n # dx = dx\r\n\r\n # dy = MYT - MY\r\n # dy = dy\r\n\r\n # r2 = np.square(dx)+np.square(dy)\r\n\r\n # if(menu == \"Free!\"):\r\n # #JV: We do this to get the actual distance in the case of the \"Free!\" simulation, in which there is no elastic collision between the particles and the boundaries\r\n # dx_v2 = (np.abs(dx.copy())-1*L)\r\n # r2_v2 = dx_v2**2+dy**2\r\n # dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n # dy_v2 = (np.abs(dy.copy())-1*L)\r\n # r2_v2 = dx**2+dy_v2**2\r\n # dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n # r2_v2 = dx_v2**2+dy_v2**2\r\n # dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n # dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n\r\n dUx = 0.\r\n dUy = 0.\r\n utot = np.zeros((N))\r\n f = np.zeros((N,2))\r\n\r\n for j in range(0,N):\r\n dUx = 0.\r\n dUy = 0.\r\n u = 0.\r\n\r\n #JV: we now calculate the force with only the Nlist closest particles\r\n for k in range(0,Nlist):\r\n c = int(close_list[j][k])\r\n\r\n #In the force computation we include the LJ and the walls (JV: in the verlet case). I truncate the interaction at self.R units of lenght,\r\n #I also avoid distances close to 0 (which only should affect the diagonal in the matrix of distances)\r\n #All these conditions are included using the numpy.where function.\r\n #If you want to include more forces you only need to add terms to these lines.\r\n\r\n if(vel_verlet_on == True):\r\n if((r2[j,c] < 4*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n dUx = dUx + dLJverlet(dx[j,c],r2[j,c],R[j],R[c])\r\n dUy = dUy + dLJverlet(dy[j,c],r2[j,c],R[j],R[c])\r\n # print(dUx,dUy,dx[j,c],r2[j,c],R[j],R[c])\r\n#JV: COMMENTED PART BECAUSE NUMBA HAS PROBLEMS WITH THIS BLOCK OF CODE THAT DOES THE CALCULATION IN THE VERLET ALGORITHM, NOW IT ONLY WORKS WITH THE VELOCITY VERLET, TO FIX\"\r\n# else:\r\n# if((r2[j,c] < 4*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n# dUx = dUx + dLJverlet(dx[j,c],r2[j,c],R[j],R[c]) - dwalls([X[j],Y[j]],param)\r\n# dUy = dUy + dLJverlet(dy[j,c],r2[j,c],R[j],R[c]) - dwalls([X[j],Y[j]],param)\r\n\r\n #JV: We add the energy in the corresponding array in both cases, remember that the verlet algorithm will include the energy from the walls\r\n # and that will be visible in fluctuations on the energy\r\n if(vel_verlet_on == True):\r\n if((r2[j,c] < 2*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n u = u + LJverlet(r2[j,c],R[c],R[j])\r\n# else:\r\n# u = u + walls([X[j],Y[j]])#JV: TO CHANGE; NOW ONLY WORKS WITH VEL_VERLET_ON\r\n# else:\r\n# if((r2[j,c] < 2*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n# u = u + LJverlet(r2[j,c],R[c],R[j],param)\r\n#\r\n# if((X[j]**2+Y[j]**2) > (0.8*L)**2):\r\n# u = u + walls([X[j],Y[j]],param)\r\n #JV: COMMENTED FOR NOW\r\n\r\n #JV: If the argument it's True, we will append the energy to our corresponding array\r\n if(append == True):\r\n utot[j] = u\r\n\r\n f[j,:] = f[j,:]+np.array([dUx,dUy])\r\n\r\n if(append == True):\r\n U[int(i)] = np.sum(utot) #JV: Finally, we add the total energy so we have the global energy in a step of time\r\n\r\n return f", "def step_velocity(self, force, timestep):\n self.velocity = self.velocity + (force * timestep)/self.mass", "def _update_vel(self) -> None:\n self.state[:, :, Boids.Attr.VEL] += self.state[:, :, Boids.Attr.ACC]\n self.state[:, :, Boids.Attr.VEL] = maglim(\n self.state[:, :, Boids.Attr.VEL], self.max_vel)", "def update(self):\n\n self.accelerate(0, GRAVITY)\n if (not self.ground_detector.is_on_ground()):\n self.velocity.x *= AIR_RESISTANCE\n else:\n self.velocity.x *= FRICTION\n\n # Updating physics object attributes\n self.move()", "def __init__(self,E,px,py,pz):\n Particle.__init__(self)\n self.E=float(E)\n self.px=float(px)\n self.py=float(py)\n self.pz=float(pz)\n self.cal_pt()\n self.cal_phi()\n self.cal_eta()\n #self.cal_mass()\n #print self.E,self.px,self.py,self.pz\n #print self.pt,self.phi,self.eta", "def AddEarthVelocity(self, ds):\n self.IsEarthVelocity = True\n self.EarthVelocity = ds", "def calcVelocityFromMomentum(self):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the particle velocity from momentum.\")\n values = {}\n for direction in self.p.order:\n gamma = self.calcLorentzGammaFromMomentum(direction)\n values[direction] = getattr(self.p,direction)/(gamma*self.mass)\n self.setVelocity(Cartesian3DVector(**values))\n return self.getVelocity()", "def update_position(self, dt):\n self._x += self._vx * dt\n self._y += self._vy * dt\n\n # TODO: Add timer countdown if infected and logic for dying/recovering.", "def update(self):\r\n #calling animate function\r\n self.animate()\r\n #set vertical acceleration according to gravity constant\r\n self.acc = vec(0,PLAYERGRAV)\r\n \r\n #setting acceleration based on direction\r\n if self.moveLeft and self.rect.left > 0:\r\n self.acc.x = -PLAYERACC\r\n elif self.moveRight and self.rect.right < WINDOWWIDTH:\r\n self.acc.x = PLAYERACC\r\n\r\n #accelerate in x direction and slow down due to friction\r\n self.acc.x += self.vel.x*PLAYERFRIC\r\n self.vel += self.acc\r\n self.pos += self.vel + 0.5 *self.acc\r\n\r\n #position referenced from bottom of player\r\n self.rect.midbottom = self.pos", "def update(self):\n self.x += (self.settings.alien_speed * self.settings.fleet_direction)\n self.rect.x = self.x", "def particle_initial_velocity(fignr,N,D,T,m,dim,kb):\n V = np.zeros((3,N))\n V[0:dim,:] = np.random.normal(0, kb*T/m, (dim,N))# / np.sqrt(T/(kb*m))\n plotfunctions.velocity(fignr,N,V)\n # Typical speed for particles\n return V", "def __init__(self, mass, radius, position, velocity):\n self.mass = mass\n self.radius = radius\n self.position = position\n self.velocity = velocity\n print(self.velocity)\n self.vafter = np.copy(velocity) # temp storage for velocity of next step\n self.delete = False", "def setMomentum(self,p):\n if p is None:\n self.p = Cartesian3DVector()\n else:\n if isinstance(p,Cartesian3DVector):\n self.p = Cartesian3DVector(p.x,p.y,p.z)\n else:\n raise CoordinateVector(\"Initializing a particle with the incorrect momentum vector type.\")", "def update(self):\n if self.hasMovementAngX():\n self.rotateX(self.angvel.getX())\n if self.hasMovementAngY():\n self.rotateY(self.angvel.getY())\n if self.hasMovementAngZ():\n self.rotateZ(self.angvel.getZ())\n if self.hasMovementX():\n self.moveX(self.posvel.getX())\n if self.hasMovementY():\n self.moveY(self.posvel.getY())\n if self.hasMovementZ():\n self.moveZ(self.posvel.getZ())\n f_count = 0\n for func in self.functions:\n if self.functionUpdate[f_count]:\n func(*self.functionArguments[f_count])\n f_count += 1", "def cmd_velocity(self, vn, ve, vd, heading):\n pass", "def ball_increase_velocity():\n global ball_vel\n ball_vel[0] = ball_vel[0] * 1.10\n ball_vel[1] = ball_vel[1] * 1.10", "def update_position(self, dt=1):\n #Lets suppose that space is cubic for now...\n #If position is not in the given space, set it to the minimum or maximum\n for dim in range(len(self.position)):\n if (self.position[dim] + self.velocity[dim] * dt) > x_end:\n self.position[dim] = x_end\n\n if (self.position[dim] + self.velocity[dim] * dt) < x_begin:\n self.position[dim] = x_begin\n\n else:\n self.position[dim] += self.velocity[dim] * dt", "def move_euler(point, uvw_func, dt):\n return move(point, uvw_func(point), dt)", "def move(self, surface):\n\t\tself.position = wrap_position(self.position + self.velocity, surface)", "def vel(self, *args, **kwargs) -> Any:\n pass", "def move(self):\n t = self.get_time_step() # Get the timestep\n\n for body in self.bodies:\n body.update_pos_revs(t, self.time)\n\n # Once all the positions are updated, update all velocities and\n # accelerations.\n for body in self.bodies:\n body.update_vel_acc(t, self.bodies)\n\n self.get_energies() # Get the total energy\n self.time += t # Increase the time by time step.", "def verletIntegration(self):\n for atom in range(0, self.numAtoms):\n \n # Update velocities\n self.atoms[atom].vx += (self.atoms[atom].fx/self.m)*self.dt\n self.atoms[atom].vy += (self.atoms[atom].fy/self.m)*self.dt\n self.atoms[atom].vz += (self.atoms[atom].fz/self.m)*self.dt\n \n \n # Update positions\n newX = self.atoms[atom].x + self.atoms[atom].vx*self.dt\n newY = self.atoms[atom].y + self.atoms[atom].vy*self.dt\n newZ = self.atoms[atom].z + self.atoms[atom].vz*self.dt\n\n # Update current positions (applying PBC)\n if newX < 0:\n self.atoms[atom].x = newX + self.lbox\n elif newX > self.lbox:\n self.atoms[atom].x = newX - self.lbox\n else:\n self.atoms[atom].x = newX\n \n if newY < 0:\n self.atoms[atom].y = newY + self.lbox\n elif newY > self.lbox:\n self.atoms[atom].y = newY - self.lbox\n else:\n self.atoms[atom].y = newY\n \n if newZ < 0:\n self.atoms[atom].z = newZ + self.lbox\n elif newZ > self.lbox:\n self.atoms[atom].z = newZ - self.lbox\n else:\n self.atoms[atom].z = newZ", "def update_E(self):\n self.grid.E[self.loc] += (\n self.grid.courant_number\n * self.grid.inverse_permittivity[self.loc]\n * self.phi_E\n )", "def update(self,time,mposition):\n max_speed=100\n self.circlePos += self.circleVel * time\n if mposition!=None:\n mouseDir = mposition - self.circlePos\n self.circleVel += mouseDir.normalized() * 80 * time #This is the terminal velocity being applied\n t=self.circleVel.magnitude()\n if t>=max_speed:\n self.circleVel=self.circleVel.normalized()*max_speed" ]
[ "0.7147672", "0.71063995", "0.68057793", "0.6710489", "0.6636017", "0.6588007", "0.65872145", "0.65763974", "0.65613395", "0.6534683", "0.65088755", "0.6481874", "0.6460222", "0.64475214", "0.6444629", "0.63995236", "0.6384533", "0.63719004", "0.63658047", "0.6355781", "0.6334964", "0.6274445", "0.6272949", "0.6255004", "0.6242244", "0.6206135", "0.6205284", "0.6203857", "0.6180659", "0.61780643", "0.6169437", "0.61614096", "0.61614096", "0.61349994", "0.6095063", "0.6093058", "0.6086489", "0.60815734", "0.6079215", "0.6076145", "0.60463905", "0.6044779", "0.60444206", "0.60274243", "0.6014782", "0.60087335", "0.60066867", "0.6006685", "0.5990343", "0.59832317", "0.59799194", "0.5973953", "0.5961164", "0.59580374", "0.5931807", "0.59275997", "0.5919324", "0.58981967", "0.58975244", "0.58920836", "0.5891173", "0.5888572", "0.58879113", "0.58829844", "0.587916", "0.5877712", "0.58760846", "0.58741575", "0.5873929", "0.58652717", "0.58599085", "0.584281", "0.58353543", "0.58347845", "0.5833146", "0.5829908", "0.5823751", "0.5818789", "0.5818027", "0.58153003", "0.5804579", "0.58033675", "0.5801473", "0.5789454", "0.5789267", "0.57872796", "0.5783825", "0.5780742", "0.5776553", "0.577155", "0.5766629", "0.57661855", "0.57560915", "0.5744509", "0.57420194", "0.57323086", "0.5730723", "0.5725385", "0.57249427", "0.57184845" ]
0.6623841
5
Method that returns the electric field from the particle that affects another particle.
def GenerateElectricField(self, affectedParticle): return self.electricField.GenerateField(affectedParticle)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def electric_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber*r\n ikr = 1j * kr\n\n front_term = (\n (1j * self.omega * self.mu * self.moment) / (4. * np.pi * r**2) *\n (ikr + 1) * np.exp(-ikr)\n )\n return front_term * self.cross_orientation(dxyz) / r", "def compute_electric_field(self):\n self.set_grid()\n rho = self.grid.distribute(self.bunch.positions)\n rho *= self.bunch.line_charge_density * 4 # unknown origin\n phi = self.solver.get_potential(rho, self.bunch.line_charge_density)\n Ex, Ey = self.grid.gradient(-phi)\n self.fields[:, 0] = self.grid.interpolate(Ex, self.bunch.positions)\n self.fields[:, 1] = self.grid.interpolate(Ey, self.bunch.positions)", "def electric_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber * r\n ikr = 1j * kr\n\n front_term = (\n (self.current * self.length) / (4 * np.pi * self.sigma * r**3) *\n np.exp(-ikr)\n )\n symmetric_term = (\n spatial.repeat_scalar(self.dot_orientation(dxyz)) * dxyz *\n (-kr**2 + 3*ikr + 3) / r**2\n )\n oriented_term = (\n (kr**2 - ikr - 1) *\n np.kron(self.orientation, np.ones((dxyz.shape[0], 1)))\n )\n return front_term * (symmetric_term + oriented_term)", "def electric_field(self, xyz):\n\n xyz = check_xyz_dim(xyz)\n if np.any(xyz[..., -1] > 0):\n raise ValueError(\n f\"z value must be less than or equal to 0 in a halfspace, got {(xyz[..., -1])}\"\n )\n\n e = self._primary.electric_field(xyz) + self._image.electric_field(xyz)\n return e", "def GenerateMagneticField(self, affectedParticle):\n return self.magneticField.GenerateField(affectedParticle)", "def field ( self , xyz ) :\n return self._ilhcbmagnet.fieldVector ( xyz )", "def _evaluate_electric(snapshot, params):\n positions = snapshot.particles.position\n charges = snapshot.particles.charge\n E_field = params\n energies = -charges * np.dot(positions, E_field)\n forces = np.outer(charges, E_field)\n return forces, energies", "def electric_field(self, xyz_m, xyz_n=None):\n\n xyz_m = check_xyz_dim(xyz_m)\n if np.any(xyz_m[..., -1] > 0):\n raise ValueError(\n f\"z value must be less than or equal to 0 in a halfspace, got {(xyz_m[..., -1])}\"\n )\n\n if xyz_n is not None:\n xyz_n = check_xyz_dim(xyz_n)\n if np.any(xyz_n[..., -1] > 0):\n raise ValueError(\n f\"z value must be less than or equal to 0 in a halfspace, got {(xyz_n[..., -1])}\"\n )\n\n em = self._a.electric_field(xyz_m) - self._b.electric_field(xyz_m)\n\n if xyz_n is not None:\n en = self._a.electric_field(xyz_n) - self._b.electric_field(xyz_n)\n e = em - en\n return e\n else:\n return em", "def compute_rf_field(self, r):\r\n\t\tE = np.zeros((3))\r\n\t\tfor nam, e in self.rf_electrode_list:\r\n\t\t\tE += e.compute_electric_field(r)\r\n\t\treturn E", "def calcEnergy(self):\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the energy.\")\n return speed_light*math.sqrt(self.p*self.p + (self.mass*speed_light)**2)", "def ee_radius_diffraction(self, energy=FIRST_AIRY_ENCIRCLED):\n return _inverse_analytic_encircled_energy(self.fno, self.wavelength, energy)", "def F(self):\n return self.generic_getter(get_F_potential, \"F\", \"convert_energy\")", "def external_field(self):\n # TODO: return curl(A) for non-homogeneous external_field\n A = self.external_vector_potential\n if A is not None:\n Ax, Ay = A\n # TODO: check expression below\n return (- np.diff(Ax, axis=1) * cfg.idy\n + np.diff(Ay, axis=0) * cfg.idx)\n else:\n return None", "def particleCharge(self):\n return self.params['particleCharge']", "def getValueFromFieldname(self,fieldname):\n if hasattr(self,fieldname): #Standard attributes.\n value = getattr(self,fieldname)\n if not isinstance(value,Cartesian3DVector):\n return value\n if fieldname == \"E\": #Interprets E as energy\n return self.getEnergy()\n momentum_direction = fieldname.replace(\"p\",\"\")\n velocity_direction = fieldname.replace(\"v\",\"\")\n if fieldname.startswith(\"p\") and momentum_direction in [\"x\",\"y\",\"z\"]:\n return getattr(self.p,momentum_direction)\n if fieldname.startswith(\"v\") and velocity_direction in [\"x\",\"y\",\"z\"]:\n return getattr(self.v,velocity_direction)\n elif fieldname in [\"x\",\"y\",\"z\"]:\n return getattr(self.x,fieldname)\n raise Exception(\"The given field, \"+fieldname+\", is not defined for the particle.\")", "def energy(self):\n energy = -0.5*np.sum(self.phi)+0.5*np.sum(self.mass*np.sqrt(self.particles.momentum[:,0]**2+self.particles.momentum[:,1]**2)**2)\n return energy", "def current_density(self, xyz):\n\n j = self.electric_field(xyz) / self.rho\n return j", "def current_density(self, xyz):\n return self.sigma * self.electric_field(xyz)", "def current_density(self, xyz):\n return self.sigma * self.electric_field(xyz)", "def getEnergyEvolution(self):\n\n\t\tEBefore = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleBefore]\n\t\tEAfter = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleAfter]\n\n\t\treturn EBefore, EAfter", "def getPeakMagneticField(self):\n return self.solenoid.getPeakMagneticField()", "def value(self):\n nd1 = super().nd1()\n nd2 = super().nd2()\n f1 = nd1 * self.s\n f2 = nd2 * self.x * math.e ** (-self.rf * self.t)\n return f1 - f2", "def potentialEnergy(self):\n return 0.5*(pdist(self.positions)**2).sum()", "def energy_func(self):\n return (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) +\n self.inl[1].m.val_SI * (\n self.outl[1].h.val_SI - self.inl[1].h.val_SI))", "def __call__(self, x):\n\n self.dbeads.q = x\n e = self.dforces.pot # Energy\n g = -self.dforces.f # Gradient\n\n return e, g", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n return self._scalar * self._field.compute_magnetic_field(coords, params, basis)", "def get_E(self):\r\n return self.Real.E, self.Ideal.E", "def get_E(self):\r\n return self.Real.E, self.Ideal.E", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))", "def current_density(self, xyz_m, xyz_n=None):\n\n j = self.electric_field(xyz_m, xyz_n=xyz_n) / self.rho\n return j", "def force_morse(p1,p2, D, alpha,r_e):\n\n #the magnitude of the particles' separation\n r_12=np.linalg.norm(Particle3D.separation(p1,p2))\n #the force on p1 due to p2\n coeff = (-2.0*float(alpha)*float(D)*(1.0-m.exp(-1.0*float(alpha)*(float(r_12)-float(r_e))))*(m.exp(-1.0*float(alpha)*(float(r_12)-float(r_e)))))*(1.0/(float(r_12)))\n force=float(coeff)*Particle3D.separation(p1,p2)\n return force", "def compute_energy(self):\n\n # radiation energy\n Qsqrd = self.omega_coords[:,:,1]*self.omega_coords[:,:,1]\n Psqrd = self.omega_coords[:,:,0]*self.omega_coords[:,:,0]\n\n e_rad = (Psqrd/self.mode_mass + (self.mode_mass*self.omega**2)*Qsqrd)*.5\n\n # space charge energy\n Dsqrd = self.dc_coords[:,:,0]*self.dc_coords[:,:,0]\n\n e_drft = Dsqrd/(2.*self.mode_mass)\n\n energy = e_rad+e_drft\n\n return energy", "def fluxonium_potential(self):\n return -0.5*(self.Ej * ((1+self.d)*cos(self.phis - 2. * pi * self.phi - 2. * pi * self.phiL) + (1-self.d)*cos(self.phis-2. * pi * self.phiL))) + self.El/2. * (self.phis) ** 2\n #return -0.5*(self.Ej * cos(self.phis - 2. * pi * self.phi) + self.Ej * cos(self.phis)) + self.El/2. * (self.phis-self.phiL)** 2", "def magnetic_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber * r\n ikr = 1j*kr\n\n front_term = (\n self.current * self.length / (4 * np.pi * r**2) * (ikr + 1) *\n np.exp(-ikr)\n )\n return -front_term * self.cross_orientation(dxyz) / r", "def energy_function(self):\n E = 0\n for i in range(len(self.config)):\n for j in range(len(self.config)):\n s = self.config[i,j]\n #Calculate the impact of neighboring particle pairs\n neighbors = (self.config[(i+1)%L, j] +\n self.config[i, (j+1)%L] + \n self.config[(i-1)%L, j] + \n self.config[i, (j-1)%L])\n E += -J*s*neighbors\n #fix for extra neighbors\n return E/4", "def total_loc_efield(i, r1, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod, nmin_sc, nmax_sc,\n R_particle, eps_particle):\n if i == 1:\n ri = r1\n rj = r2\n j = 2\n elif i == 2:\n ri = r2\n rj = r1\n j = 1\n else:\n ri = np.array([0, 0, 0])\n rj = np.array([0, 0, 0])\n j = 0\n print('ERROR: i is out of range!')\n\n k2_eps0 = k**2 / const.epsilon0\n E0i = E0_sum(ri, k, fiber_radius, eps_out, eps_in,\n E0_mod, nmin_sc, nmax_sc, case)\n\n Gsii = gff.GF_pol(k, eps_out, eps_in, fiber_radius,\n ri, ri, nmin, nmax, kzimax)\n G0ij = gfv.GF_vac_pol(ri, rj, k)\n\n Gsij = gff.GF_pol(k, eps_out, eps_in, fiber_radius,\n ri, rj, nmin, nmax, kzimax)\n Gij = G0ij + Gsij\n\n pi = dipole_moment(i, r1, r2, R_particle, eps_particle, k, eps_out, eps_in,\n fiber_radius, nmin, nmax, kzimax,\n E0_mod, nmin_sc, nmax_sc, case)\n pj = dipole_moment(j, r1, r2, R_particle, eps_particle, k, eps_out, eps_in,\n fiber_radius, nmin, nmax, kzimax,\n E0_mod, nmin_sc, nmax_sc, case)\n return(E0i + k2_eps0 * (np.dot(Gij, pj) + np.dot(Gsii, pi)))", "def HalfReactionDeltaGPrime(self):\n dg_prime = self.DeltaGPrime()\n delta_electrons = self._GetElectronDiff() \n return dg_prime + constants.F * delta_electrons * \\\n self.aq_params.e_reduction_potential", "def index_of_refraction(self):\n return self.microsphere.index_of_refraction(self.wavelength)", "def getMagneticField(self, z):\n return float(self.solenoid.B_interp(z))", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def attraction(self, other: Body) -> Vector:\n dist = self.position - other.position\n dist_modsq = dist.lensq\n dist_unit = dist / math.sqrt(dist_modsq) # Unit vector\n G = 6.674384e-11\n force_mod = G * self.mass * other.mass / dist_modsq\n return dist_unit * force_mod", "def _nelec(self):\n pd = self.particle_distribution(self._gam * mec2)\n return pd.to(1/mec2_unit).value", "def get_force(self):\n # @todo: Probably need to check the state of the landing gear for this (e.g. are they on the track?).\n # Note: you can get the state of the landing gear by going through self.sim \n return 0.0", "def compute_magnetic_field(self, coords, params={}, basis=\"rpz\"):", "def force(particle1, particle2):\n position1 = particle1.position\n position2 = particle2.position\n\n distance_12 = np.sqrt((position1.x - position2.x)**2 +\n (position1.y - position2.y)**2 +\n (position1.z - position2.z)**2)\n\n return G*particle1.mass*particle2.mass/distance_12**2", "def particle(self) -> Particle:\n return Particle(diameter=self.diameter)", "def potential_energies(self):\n # Create all pairs of planets\n pairs = itertools.combinations(self.bodies, 2)\n # Return the sum of all potential energies.\n return sum([-G * pair[0].mass * pair[1].mass /\n norm(pair[0].position - pair[1].position) for pair in pairs])", "def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)", "def fieldGradient(self):\n return self.params['fieldGradient']", "def absorption_energy_eV(self):\n return self._absorption_energy_eV.copy()", "def add(self,particle):\n\n if not self.check_def(['E','px','py','pz']): \n sys.exit('Particle error: Quadri impulsion not define')\n if not particle.check_def(['E','px','py','pz']): \n sys.exit('Particle error: Quadri impulsion not define')\n \n neut=part_quadvec(self.E+particle.E,self.px+particle.px,self.py+particle.py,self.pz+particle.pz)\n neut.cal_mass()\n return neut", "def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)", "def delta_energy(atom,layer1,layer2):\n global r,c,h\n return float('%.2E' % Decimal(str(r*((atom**2/layer1**2)-(atom**2/layer2**2)))))", "def delta(self):\r\n return 1 - xl.Refractive_Index_Re(self.compound, self.energy, self.density)", "def driftRHS_3D(field,drift_velocity,t,x):\n f = field.getValue(x)\n fs = np.sqrt(f[0]**2 + f[1]**2 + f[2]**2)\n f = f/fs\n return -f*drift_velocity(fs)", "def energy(self, r):\n sigma = self.params['sigma']\n epsilon = self.params['epsilon']\n s = sigma / r\n s6 = s**6; s12 = s6 * s6\n pot = 4.0 * epsilon * (s12 - s6)\n return pot", "def Drepp(self):\n sinE = np.sin(self.E())\n cosE = np.cos(self.E())\n return -self.alpha()*cosE-(self.beta()+self.GAMMA)*sinE", "def Dre(self):\n er = self.er()\n sinE = np.sin(self.E())\n cosE = np.cos(self.E())\n return self.alpha()*(cosE- er)+ \\\n (self.beta()+self.GAMMA)*sinE", "def energy(self):\n self.E = - np.sum(self.phi) + 0.5 * self.mass * np.sqrt((self.v_x ** 2 + self.v_y **2))", "def TotalEnergy(self):\n return (math.sqrt((Particle.RestEnergy(self) ** 2)\n + (np.linalg.norm(Particle.Momentum(self)) * const.speed_of_light) ** 2))", "def get_nuclear_potential(self, r):\n\n return -self.nuclear_charge/r", "def particle(self) -> Particle:\n return self.expiration.particle", "def dynamics(self,x,p,dEf):\n fout = self.dynamicsf(x,p)+dEf(x,p)\n return fout", "def get_f1_f2(self, energy=13.0):\n \n try:\n f1 = self.xray_properties[energy][0]\n f2 = self.xray_properties[energy][1]\n except KeyError:\n # Interpolate instead\n #energy_close = min(self.xray_properties.keys(), key=lambda k: abs(k-energy))\n\n keys = np.sort(self.xray_properties.keys())\n idx = -1\n for i, key in enumerate(keys):\n if idx==-1 and key>energy:\n idx = i\n \n energy_low = keys[idx-1]\n energy_high = keys[idx]\n extent = (energy-energy_low)/(energy_high-energy_low)\n \n f1 = self.xray_properties[energy_high][0]*extent + self.xray_properties[energy_low][0]*(1.0-extent)\n f2 = self.xray_properties[energy_high][1]*extent + self.xray_properties[energy_low][1]*(1.0-extent)\n \n return f1, f2", "def value(self): \r\n c = self.nd1() * self.s * math.exp(-self.div * self.t)\r\n c -= self.nd2() * self.x * math.exp(-self.rf * self.t)\r\n \r\n return c", "def getEichFromEQ(self, ep, verbose=False):\n #assuming plasma is centered in machine here\n zMin = ep.g['ZmAxis'] - 0.25\n zMax = ep.g['ZmAxis'] + 0.25\n zWall = np.linspace(zMin, zMax, 1000)\n zLCFS = ep.g['lcfs'][:,1]\n #this prevents us from getting locations not at midplane\n idx = np.where(np.logical_and(zLCFS>zMin,zLCFS<zMax))\n Rmax = ep.g['lcfs'][:,0][idx].max()\n Rmin = ep.g['lcfs'][:,0][idx].min()\n # geometric quantities\n Rgeo = (Rmax + Rmin) / 2.0\n a = (Rmax - Rmin) / 2.0\n aspect = a/Rgeo\n\n #Regression 15\n C = 1.35\n Cp = -0.02\n Cr = 0.04\n Cb = -0.92\n Ca = 0.42\n # Evaluate Bp at outboard midplane\n Z_omp_sol = 0.0\n Bp = abs(ep.BpFunc.ev(Rmax,Z_omp_sol))\n #Evaluate lq\n self.lqEich = C * self.Psol**Cp * Rgeo**Cr * Bp**Cb * aspect**Ca # in mm\n Bt = abs(ep.BtFunc.ev(ep.g['RmAxis'],ep.g['ZmAxis']))\n if verbose==True:\n print(\"Poloidal Field at midplane: {:f}\".format(Bp))\n print(\"Toroidal Field at axis: {:f}\".format(Bt))\n print(\"Found heat flux width value of: {:f} mm\".format(self.lqEich))\n log.info(\"Found heat flux width value of: {:f} mm\".format(self.lqEich))\n return", "def getCF(self):\r\n return self.lEq;", "def energy(self):\n nocc, gmo, e = self.nocc, self.gmo, self.e\n\n Ec = 0.0\n for i in range(nocc):\n for j in range(nocc):\n for a in range(nocc, len(e)):\n for b in range(nocc, len(e)):\n Ec += (1/4.0) * gmo[i, j, a, b]**2 / (e[i]+e[j]-e[a]-e[b])\n\n self.Ec = Ec\n self.E_mp2 = Ec + self.E_scf\n\n print('@MP2 correlation energy: {:15.10f}\\n'.format(self.Ec))\n print('@Total MP2 energy: {:15.10f}\\n'.format(self.E_mp2))\n\n return self.E_mp2", "def _get_deform_field_dm(self):\n self.deformationStrength = self.deformrandomstate.rand()\n adr = [w // d + 4 for w, d in zip(self.w, self.deform)]\n deformshape = [len(self.w)] + adr\n tmp = np.zeros([4] * (len(self.w) - 1) + [len(self.w)] + self.w)\n\n if np.isscalar(self.deformSigma):\n myDeformSigma = np.array(len(self.w), self.deformSigma)\n else:\n myDeformSigma = np.asarray(self.deformSigma)\n\n strngs = [self.deformrandomstate.normal(0, myDeformSigma[i], deformshape[1:]) * self.deformationStrength\n for i in range(len(myDeformSigma))]\n tdf = np.asarray(strngs, dtype=np.float32)\n\n if self.truncated_deform:\n upperBound = 3 * myDeformSigma\n for i in range(len(myDeformSigma)):\n overshoot_coordinates = np.where(np.abs(tdf[i]) > upperBound[i])\n while len(overshoot_coordinates[0]):\n tdf[i][overshoot_coordinates] = np.float32(self.deformrandomstate.normal(0, myDeformSigma[i], len(\n overshoot_coordinates[0])) * self.deformationStrength)\n overshoot_coordinates = np.where(np.abs(tdf[i]) > upperBound[i])\n\n # logging.getLogger('data').info('truncated deformation field')\n\n def cint(x, pnm1, pn, pnp1, pnp2):\n return 0.5 * (\n x * ((2 - x) * x - 1) * pnm1 + (x * x * (3 * x - 5) + 2) * pn + x * ((4 - 3 * x) * x + 1) * pnp1 + (\n x - 1) * x * x * pnp2)\n\n r = [np.asarray([x * 1.0 / self.deform[i] - x // self.deform[i] for x in range(self.w[i])]).reshape(\n [self.w[i] if t == i + 1 else 1 for t in range(len(self.w) + 1)]) for i in range(len(self.w))]\n d = [np.asarray([x // self.deform[i] for x in range(self.w[i])]).reshape(\n [self.w[i] if t == i else 1 for t in range(len(self.w))]) for i in range(len(self.w))]\n\n if len(self.w) == 3:\n for i in range(4):\n for j in range(4):\n xx = d[0] + i\n yy = d[1] + j\n zz = d[2] + 1\n tmp[i, j] = cint(r[2], tdf[:, xx, yy, zz - 1], tdf[:, xx, yy, zz], tdf[:, xx, yy, zz + 1],\n tdf[:, xx, yy, zz + 2])\n for i in range(4):\n tmp[i, 0] = cint(r[1], tmp[i, 0], tmp[i, 1], tmp[i, 2], tmp[i, 3])\n return cint(r[0], tmp[0, 0], tmp[1, 0], tmp[2, 0], tmp[3, 0])\n\n elif len(self.w) == 2:\n for j in range(4):\n xx = d[0] + j\n yy = d[1] + 1\n tmp[j] = cint(r[1], tdf[:, xx, yy - 1], tdf[:, xx, yy], tdf[:, xx, yy + 1], tdf[:, xx, yy + 2])\n return cint(r[0], tmp[0], tmp[1], tmp[2], tmp[3])\n\n else:\n raise Exception('only implemented for 2d and 3d case. feel free to contribute')", "def gravitation_force(self, other):\n force = ((CONSTANTS.G * self.mass * other.mass) /\n (self.distance(other) ** 2))\n return force", "def receptive_field(self):\n return self.LocalLayer_Torso.receptive_field()", "def driftRHS(field,drift_velocity,t,x):\n f = field.getValue(x)\n fs = np.linalg.norm(f)\n f = f/fs\n return -f*drift_velocity(fs)", "def _encircled_energy_core(mtf_data, radius, nu_p, dx, dy):\n integration_fourier = special.j1(2 * np.pi * radius * nu_p) / nu_p\n dat = mtf_data * integration_fourier\n return radius * dat.sum() * dx * dy", "def get_xray_delta_beta_intrinsic(self, energy=13.0):\n \n delta, beta = self.get_xray_delta_beta(energy)\n \n delta *= self.get_molecular_weight()/self.get_density()\n beta *= self.get_molecular_weight()/self.get_density()\n \n return delta, beta", "def F(self, points):\n\n # Perform calculation in the frame parallel to the ellipse\n # and then rotate it back\n rotatedP = self.rotatePoint(points)\n\n surface_pot = np.sqrt((rotatedP[..., 0]/self.axisA[:, None])**2 + (rotatedP[..., 1]/self.axisB[:, None])**2)\n\n # Force = -grad potential\n # swapaxes is necesary to make the created\n # force array of shape [N_samples, N-particles, 2]\n\n surface_force = - 1/surface_pot[..., None]*np.array([\n rotatedP[..., 0]/self.axisA[:, None]**2, \n rotatedP[..., 1]/self.axisB[:, None]**2\n ]).swapaxes(0, 2).swapaxes(0, 1) \n\n # Rotate back to the original frame\n surface_force = self.rotatePoint(surface_force, -1)\n\n # Force is scaled with A_conf and applied only if the particle is outside\n return self.A_conf*surface_force*self.isOutside(points)[:, :, None]", "def get_ecc(self):\n mu_mass = G*(self._mm + self._sm)\n h_mom = self.sp_ang_mom()\n vel = self.getvel_xyz()\n pos = self.getpos_xyz()\n e_vec = 1.0/mu_mass*(np.cross(vel, h_mom) -\n mu_mass*pos/np.linalg.norm(pos))\n return e_vec", "def external_vector_potential(self):\n assert (self.ae is None) == (self.be is None)\n \n if self.ae is not None:\n return self.ae, self.be\n\n return None", "def __call__(self, coords, params={}, basis=\"rpz\"):\n return self.compute_magnetic_field(coords, params, basis)", "def delta(self):\n return 1 - xl.Refractive_Index_Re(self.compound, self.energy, self.density)", "def CylinderEff(cylinder):\r\n\r\n cylinder['inlet']['h'] = pt2h(cylinder['inlet']['p'], cylinder['inlet']['t'])\r\n cylinder['inlet']['s'] = pt2s(cylinder['inlet']['p'], cylinder['inlet']['t'])\r\n\r\n cylinder['outlet']['h'] = pt2h(cylinder['outlet']['p'], cylinder['outlet']['t'])\r\n cylinder['outlet']['s'] = pt2s(cylinder['outlet']['p'], cylinder['outlet']['t'])\r\n\r\n # h2s is the specific enthalpy at state 2 for the isentropic turbine\r\n h2s = ps2h(cylinder['outlet']['p'], cylinder['inlet']['s'])\r\n \r\n cylinder['h2s'] = h2s\r\n \r\n hds = cylinder['inlet']['h'] - h2s # isentropic specific enthalpy drop\r\n hd = cylinder['inlet']['h'] - cylinder['outlet']['h'] # specific enthalpy drop\r\n\r\n cylinder['ef'] = 100 * hd / hds\r\n\r\n return cylinder", "def particle(self) -> Particle:\n return Particle()", "def particle(self) -> Particle:\n return Particle()", "def get_E(self):\r\n return self.E", "def getEnergy(pos: dc.float64[N, 3], vel: dc.float64[N, 3],\n mass: dc.float64[N], G: dc.float64):\n # Kinetic Energy:\n # KE = 0.5 * np.sum(np.sum( mass * vel**2 ))\n # KE = 0.5 * np.sum( mass * vel**2 )\n KE = 0.5 * np.sum(np.reshape(mass, (N, 1)) * vel**2)\n\n # Potential Energy:\n\n # positions r = [x,y,z] for all particles\n x = pos[:, 0:1]\n y = pos[:, 1:2]\n z = pos[:, 2:3]\n\n # matrix that stores all pairwise particle separations: r_j - r_i\n # dx = x.T - x\n # dy = y.T - y\n # dz = z.T - z\n # dx = np.transpose(x) - x\n # dy = np.transpose(y) - y\n # dz = np.transpose(z) - z\n dx = np.add.outer(-x, x)\n dy = np.add.outer(-y, y)\n dz = np.add.outer(-z, z)\n\n # matrix that stores 1/r for all particle pairwise particle separations\n inv_r = np.sqrt(dx**2 + dy**2 + dz**2)\n # inv_r[inv_r>0] = 1.0/inv_r[inv_r>0]\n I = inv_r > 0\n np.divide(1.0, inv_r, out=inv_r, where=I)\n\n # sum over upper triangle, to count each interaction only once\n # PE = G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1)))\n # PE = G * np.sum(np.triu(-(mass*mass.T)*inv_r,1))\n tmp = -np.multiply.outer(mass, mass) * inv_r\n PE = 0.0\n for j in range(N):\n for k in range(j + 1, N):\n PE += tmp[j, k]\n PE *= G\n\n return KE, PE", "def dcm(self, otherframe):\n\n self._check_frame(otherframe)\n flist = self._dict_list(otherframe, 0)\n outdcm = eye(3)\n for i in range(len(flist) - 1):\n outdcm = outdcm * flist[i + 1]._dcm_dict[flist[i]]\n return outdcm", "def m_field(self):\n grad = np.gradient(self.A)\n\n B_x = grad[1] - grad[2]\n B_y = - grad[2] - grad[0]\n B_z = - grad[0] - grad[1]\n return (B_x, B_y, B_z)", "def get_force(self):\n displ = self.get_displ()\n equil = displ / np.linalg.norm(displ) * self.L0\n return self.k * (displ - equil)", "def value(self):\n nd1 = super().nd1()\n nd2 = super().nd2()\n _nd1 = 1 - nd1\n _nd2 = 1 - nd2\n f1 = _nd1 * self.s\n f2 = _nd2 * self.x * math.exp(-self.rf * self.t)\n return f2 - f1", "def get_ergodic_region(self):\n return [self.f1(self.rho),self.f0(self.rho)]", "def get_e(self):\n return self.e_min + self.e_ * self.e_range", "def getfquad(element, energy=None, fwhm_ev = 0, lw = 50, f1f2 = \"f1\",\n kernel=\"cauchy\"):\n try: Z = int(element)\n except: Z = elements.Z[element]\n \n fwhm_ev = abs(fwhm_ev)\n \n if energy is None:\n energy, iedge = get_energies(Z, 1000, 10000, fwhm_ev=fwhm_ev)\n return_ene = True\n else:\n energy = np.array(energy, ndmin=1)\n return_ene = False\n \n if f1f2 == \"f2\" or f1f2 == 1:\n ind = 1\n else:\n ind = 0\n \n \n if fwhm_ev <= np.finfo(float).eps:\n result = deltaf.clcalc(Z, energy)[ind]\n else:\n \n if kernel==\"cauchy\":\n corrfac = 1./quad(cauchy, -lw, lw, args=(1,), limit=500)[0]\n integrand = lambda x, E: cauchy(x, fwhm_ev) * deltaf.clcalc(Z, E-x)[ind]\n elif kernel==\"normal\":\n corrfac = 1./quad(normal, -lw, lw, args=(1,), limit=500)[0]\n integrand = lambda x, E: normal(x, fwhm_ev) * deltaf.clcalc(Z, E-x)[ind]\n \n def ffunc(E):\n return quad(integrand, -lw*fwhm_ev, lw*fwhm_ev, args=(E,), limit=500)[0]\n \n if np.isscalar(energy) or len(energy)==1:\n result = corrfac * ffunc(energy)\n else:\n fvec = np.vectorize(ffunc)\n result = corrfac * fvec(energy)\n \n if return_ene:\n return energy, result\n else:\n return result", "def get_E(self):\n return self.E", "def E(self, t):\n\n\t\tE = self.E0\n\n\t\t# Gaussian pulse shape\n\t\tE *= np.exp(-2.*np.log(2.)*((t-self.t0)/self.pulse_duration)**2.)\n\n\t\t# Instantaneous phase\n\t\tif self.phase:\n\t\t\tE *= np.cos(self.omega*(t-self.t0))\n\n\t\t# Transmition\n\t\tif self.remove_reflected_part and self.domain.D == 0:\n\t\t\tmaterial = self.domain.materials[0]\n\t\t\tE *= ((1.-material.Reflectivity)/material._Drude_index.real)**0.5\n\n\t\treturn E", "def interaction(self):\n\n Fint1 = self._calc_Fint(self._mu_0, self._mu_ex, self.p_0, self.p_ex)\n Fint2 = self._calc_Fint(self._mu_ex, self._mu_0, self.p_ex, self.p_0)\n\n Iint = (self.I0 * self._mu_0 * self.V.omega *\n (np.exp(-self.V.tau / self._mu_ex) * Fint1 +\n np.exp(-self.V.tau / self._mu_0) * Fint2))\n\n return self.SRF.NormBRDF * (1. - self.bsf) * Iint", "def RestEnergy(self):\n return (self.restMass * const.speed_of_light * const.speed_of_light)", "def get_econs(self):\n eham = self.beads.vpath*self.nm.omegan2 + self.nm.kin + self.forces.pot\n eham += self.bias.pot # bias\n for e in self._elist:\n eham += e.get()\n\n return eham + self.eens", "def scalar_potential_difference(field, coord_sys, point1, point2):\n\n if not isinstance(coord_sys, CoordSys3D):\n raise TypeError(\"coord_sys must be a CoordSys3D\")\n if isinstance(field, Vector):\n # Get the scalar potential function\n scalar_fn = scalar_potential(field, coord_sys)\n else:\n # Field is a scalar\n scalar_fn = field\n # Express positions in required coordinate system\n origin = coord_sys.origin\n position1 = express(point1.position_wrt(origin), coord_sys,\n variables=True)\n position2 = express(point2.position_wrt(origin), coord_sys,\n variables=True)\n # Get the two positions as substitution dicts for coordinate variables\n subs_dict1 = {}\n subs_dict2 = {}\n scalars = coord_sys.base_scalars()\n for i, x in enumerate(coord_sys.base_vectors()):\n subs_dict1[scalars[i]] = x.dot(position1)\n subs_dict2[scalars[i]] = x.dot(position2)\n return scalar_fn.subs(subs_dict2) - scalar_fn.subs(subs_dict1)", "def dynamics(x,Earth):\r\n\r\n # precompute a few terms to reduce number of operations\r\n r = norm(x[0:3])\r\n Re_r_sqr = 1.5*Earth.J2*(Earth.R/r)**2\r\n five_z_sqr = 5*x[2]**2/(r**2)\r\n\r\n # two body and J2 acceleration together\r\n accel = (-Earth.mu/(r**3))*np.array([x[0]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[1]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[2]*(1 - Re_r_sqr*(five_z_sqr - 3))])\r\n\r\n return np.array([x[3],x[4],x[5],accel[0],accel[1],accel[2]])", "def dc_potential(trap,VMULT,E,update=None):\n ne=trap.numElectrodes\n X,Y,Z=trap.X,trap.Y,trap.Z # grid vectors\n x,y,z=np.meshgrid(X,Y,Z) \n [Ex,Ey,Ez]=E\n Vout = np.zeros((len(X),len(Y),len(Z)))\n # build up the potential from the normal DC elctrodes\n for ii in range(ne):\n Vout = Vout + VMULT[ii]*trap['EL_DC_{}'.format(ii)].potential\n #Vout = Vout -Ex*X-Ey*Y-Ez*Z ### sara didn't try to get this to work.\n # update the trapping field data structure with instance attributes\n trap.instance.DC=Vout\n trap.instance.RF=trap.EL_RF # not needed, but may be useful notation\n trap.instance.X=X\n trap.instance.Y=Y\n trap.instance.Z=Z\n trap.instance.E=E\n trap.instance.check=True\n return trap", "def last_energy_change(self) -> PotentialEnergy:" ]
[ "0.6565109", "0.6523056", "0.6255628", "0.62081003", "0.61325175", "0.6086595", "0.5954075", "0.5868522", "0.58501065", "0.5768421", "0.5700194", "0.5639009", "0.5562006", "0.55513227", "0.55293006", "0.54763085", "0.5419618", "0.53979456", "0.53979456", "0.53847796", "0.53787816", "0.53736144", "0.536788", "0.53602344", "0.53591824", "0.53238064", "0.5322031", "0.5322031", "0.5321132", "0.5288567", "0.52786905", "0.52719766", "0.526115", "0.5257814", "0.52571726", "0.52530414", "0.5242203", "0.5227023", "0.52246135", "0.5211256", "0.51841515", "0.5165499", "0.51594365", "0.5152609", "0.51518905", "0.5143304", "0.51431173", "0.5141251", "0.5135713", "0.51310086", "0.5128219", "0.5125863", "0.51235485", "0.51187474", "0.5117663", "0.51045823", "0.50990635", "0.5093751", "0.50701797", "0.5068298", "0.5068258", "0.5054859", "0.5053137", "0.504866", "0.50382125", "0.50368136", "0.5032515", "0.5029987", "0.50263137", "0.5025144", "0.5020122", "0.5018565", "0.5011534", "0.5005393", "0.5002801", "0.5001598", "0.4999863", "0.49995524", "0.49987", "0.49974078", "0.49945903", "0.49945903", "0.499021", "0.49813932", "0.49712852", "0.49559346", "0.49541473", "0.49534416", "0.49520987", "0.49408233", "0.49332163", "0.49327332", "0.49155045", "0.49084434", "0.49068663", "0.49036258", "0.49013492", "0.49002492", "0.48934555", "0.48933396" ]
0.72869897
0
Method that returns the magnetic field from the particle that affects another particle.
def GenerateMagneticField(self, affectedParticle): return self.magneticField.GenerateField(affectedParticle)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def magnetic_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber * r\n ikr = 1j*kr\n\n front_term = (\n self.current * self.length / (4 * np.pi * r**2) * (ikr + 1) *\n np.exp(-ikr)\n )\n return -front_term * self.cross_orientation(dxyz) / r", "def magnetisation(field):\n norm_field = df.Field(field.mesh, dim=1, value=(field.norm.array != 0))\n volume = df.integral(norm_field * df.dV, direction='xyz')\n return df.integral(field * df.dV / volume, direction='xyz')", "def field ( self , xyz ) :\n return self._ilhcbmagnet.fieldVector ( xyz )", "def magnetic_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber*r\n ikr = 1j*kr\n\n front_term = self.moment / (4. * np.pi * r**3) * np.exp(-ikr)\n symmetric_term = (\n spatial.repeat_scalar(self.dot_orientation(dxyz)) * dxyz *\n (-kr**2 + 3*ikr + 3) / r**2\n )\n oriented_term = (\n (kr**2 - ikr - 1) *\n np.kron(self.orientation, np.ones((dxyz.shape[0], 1)))\n )\n\n return front_term * (symmetric_term + oriented_term)", "def attraction(self, other: Body) -> Vector:\n dist = self.position - other.position\n dist_modsq = dist.lensq\n dist_unit = dist / math.sqrt(dist_modsq) # Unit vector\n G = 6.674384e-11\n force_mod = G * self.mass * other.mass / dist_modsq\n return dist_unit * force_mod", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n return self._scalar * self._field.compute_magnetic_field(coords, params, basis)", "def getMagneticField(self, z):\n return float(self.solenoid.B_interp(z))", "def compute_magnetic_field(self, coords, params={}, basis=\"rpz\"):", "def gravitation_force(self, other):\n force = ((CONSTANTS.G * self.mass * other.mass) /\n (self.distance(other) ** 2))\n return force", "def m_field(self):\n grad = np.gradient(self.A)\n\n B_x = grad[1] - grad[2]\n B_y = - grad[2] - grad[0]\n B_z = - grad[0] - grad[1]\n return (B_x, B_y, B_z)", "def GenerateElectricField(self, affectedParticle):\n return self.electricField.GenerateField(affectedParticle)", "def getPeakMagneticField(self):\n return self.solenoid.getPeakMagneticField()", "def force(particle1, particle2):\n position1 = particle1.position\n position2 = particle2.position\n\n distance_12 = np.sqrt((position1.x - position2.x)**2 +\n (position1.y - position2.y)**2 +\n (position1.z - position2.z)**2)\n\n return G*particle1.mass*particle2.mass/distance_12**2", "def getMagneticFieldMap(self):\n return self.solenoid.B_interp(self.z_array)", "def getMagFlux(self):\n return self.magflux", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n bp = self._B0 * self._R0 / coords[:, 0]\n brz = jnp.zeros_like(bp)\n B = jnp.array([brz, bp, brz]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n\n return B", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n bz = self._B0 * jnp.ones_like(coords[:, 2])\n brp = jnp.zeros_like(bz)\n B = jnp.array([brp, brp, bz]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n\n return B", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n\n R, phi, Z = coords.T\n r = jnp.sqrt((R - self._R0) ** 2 + Z**2)\n theta = jnp.arctan2(Z, R - self._R0)\n br = -r * jnp.sin(theta)\n bp = jnp.zeros_like(br)\n bz = r * jnp.cos(theta)\n bmag = self._B0 * self._iota / self._R0\n B = bmag * jnp.array([br, bp, bz]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n\n return B", "def __call__(self, coords, params={}, basis=\"rpz\"):\n return self.compute_magnetic_field(coords, params, basis)", "def Force_on_aircraft_in_body_reference_frame(m, V_B, V_dot_B, omega_B):\n return m * (V_dot_B + omega_B.cross(V_B))", "def _get_deform_field_dm(self):\n self.deformationStrength = self.deformrandomstate.rand()\n adr = [w // d + 4 for w, d in zip(self.w, self.deform)]\n deformshape = [len(self.w)] + adr\n tmp = np.zeros([4] * (len(self.w) - 1) + [len(self.w)] + self.w)\n\n if np.isscalar(self.deformSigma):\n myDeformSigma = np.array(len(self.w), self.deformSigma)\n else:\n myDeformSigma = np.asarray(self.deformSigma)\n\n strngs = [self.deformrandomstate.normal(0, myDeformSigma[i], deformshape[1:]) * self.deformationStrength\n for i in range(len(myDeformSigma))]\n tdf = np.asarray(strngs, dtype=np.float32)\n\n if self.truncated_deform:\n upperBound = 3 * myDeformSigma\n for i in range(len(myDeformSigma)):\n overshoot_coordinates = np.where(np.abs(tdf[i]) > upperBound[i])\n while len(overshoot_coordinates[0]):\n tdf[i][overshoot_coordinates] = np.float32(self.deformrandomstate.normal(0, myDeformSigma[i], len(\n overshoot_coordinates[0])) * self.deformationStrength)\n overshoot_coordinates = np.where(np.abs(tdf[i]) > upperBound[i])\n\n # logging.getLogger('data').info('truncated deformation field')\n\n def cint(x, pnm1, pn, pnp1, pnp2):\n return 0.5 * (\n x * ((2 - x) * x - 1) * pnm1 + (x * x * (3 * x - 5) + 2) * pn + x * ((4 - 3 * x) * x + 1) * pnp1 + (\n x - 1) * x * x * pnp2)\n\n r = [np.asarray([x * 1.0 / self.deform[i] - x // self.deform[i] for x in range(self.w[i])]).reshape(\n [self.w[i] if t == i + 1 else 1 for t in range(len(self.w) + 1)]) for i in range(len(self.w))]\n d = [np.asarray([x // self.deform[i] for x in range(self.w[i])]).reshape(\n [self.w[i] if t == i else 1 for t in range(len(self.w))]) for i in range(len(self.w))]\n\n if len(self.w) == 3:\n for i in range(4):\n for j in range(4):\n xx = d[0] + i\n yy = d[1] + j\n zz = d[2] + 1\n tmp[i, j] = cint(r[2], tdf[:, xx, yy, zz - 1], tdf[:, xx, yy, zz], tdf[:, xx, yy, zz + 1],\n tdf[:, xx, yy, zz + 2])\n for i in range(4):\n tmp[i, 0] = cint(r[1], tmp[i, 0], tmp[i, 1], tmp[i, 2], tmp[i, 3])\n return cint(r[0], tmp[0, 0], tmp[1, 0], tmp[2, 0], tmp[3, 0])\n\n elif len(self.w) == 2:\n for j in range(4):\n xx = d[0] + j\n yy = d[1] + 1\n tmp[j] = cint(r[1], tdf[:, xx, yy - 1], tdf[:, xx, yy], tdf[:, xx, yy + 1], tdf[:, xx, yy + 2])\n return cint(r[0], tmp[0], tmp[1], tmp[2], tmp[3])\n\n else:\n raise Exception('only implemented for 2d and 3d case. feel free to contribute')", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n Rq, phiq, Zq = coords.T\n\n if (params is None) or (len(params) == 0):\n params = self._params\n r, p, z = coords.T\n funR = lambda x: self._potential(x, p, z, **params)\n funP = lambda x: self._potential(r, x, z, **params)\n funZ = lambda x: self._potential(r, p, x, **params)\n br = Derivative.compute_jvp(funR, 0, (jnp.ones_like(r),), r)\n bp = Derivative.compute_jvp(funP, 0, (jnp.ones_like(p),), p)\n bz = Derivative.compute_jvp(funZ, 0, (jnp.ones_like(z),), z)\n B = jnp.array([br, bp / r, bz]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n return B", "def magnetometer(self):\n self._mag[X] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_X_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_X_L_M), 16)\n self._mag[Y] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Y_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Y_L_M), 16)\n self._mag[Z] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Z_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Z_L_M), 16)\n\n return vector(self._mag)", "def magnetic_flux(self, *args):\n\t\tarea = self.area(*args)\n\t\tfield = self.los_corr(*args)\n\t\tif isinstance(args[0], np.ndarray):\n\t\t\tself.mgnt_flux = area*field\n\t\treturn area*field", "def get_force(self):\n # @todo: Probably need to check the state of the landing gear for this (e.g. are they on the track?).\n # Note: you can get the state of the landing gear by going through self.sim \n return 0.0", "def get_M(self):\n return self.get_par('MORB')", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n if params is None:\n params = [None] * len(self._fields)\n if isinstance(params, dict):\n params = [params]\n B = 0\n for i, field in enumerate(self._fields):\n B += field.compute_magnetic_field(coords, params[i % len(params)], basis)\n return B", "def calcMagneticFieldMap(self):\n # Normalised b-field (note lower case)\n self.solenoid.calcMagneticFieldMap()\n self.b = lambda z: self.solenoid.B_interp(z) * -e / (2 * m * c)\n self.calc_level = CALC_B_MAP", "def magnetic_tension(self, method='spectral'):\n import numpy as np\n gradB, B = self.magnetic_gradient_tensor(method=method, return_B=True)\n F = np.zeros_like(B)\n for i in range(3):\n for j in range(3):\n F[j] += B[i] * gradB[i,j]\n return F", "def m2(self):\n return self.mass[1]", "def getFar(self):\n return self.light.node().getLens().getFar()", "def magnetic_field(date: datetime.datetime, lat, lon, alt, output_format='cartesian'):\n g = GeoMag()\n return g.GeoMag(np.array([lat, lon, alt]), date, location_format='geodetic', output_format=output_format)", "def NormalF(self,other, kn, gamman, dt):\n return (kn*self.par.overlap(other)+gamman*diff(self.par.overlap(other),EulerN(self.par,dt).overlap(other),dt))*(self.par.pos-other.pos).direction()", "def get_m_eng_body(f_eng_body, P):\n m = np.zeros(3)\n for i in range(0, P.eng_nb):\n m += np.cross(P.eng_pos[i], f_eng_body[i])\n return m", "def magnetic_flux_density(self, xyz):\n return self.mu * self.magnetic_field(xyz)", "def magnetic_flux_density(self, xyz):\n return self.mu * self.magnetic_field(xyz)", "def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO", "def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO", "def get_mag(self):\n raise NotImplementedError", "def displacement_mag(self):\n print(3284 * math.pow(self.concentration, -0.158))\n\n return 3284 * math.pow(self.concentration, -0.158)", "def F(self):\n return self.generic_getter(get_F_potential, \"F\", \"convert_energy\")", "def electric_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber*r\n ikr = 1j * kr\n\n front_term = (\n (1j * self.omega * self.mu * self.moment) / (4. * np.pi * r**2) *\n (ikr + 1) * np.exp(-ikr)\n )\n return front_term * self.cross_orientation(dxyz) / r", "def make_field(self):\n uniaxial = self.u[0]*self.u[1]*self.u[2] != 0\n cubic = self.c1[0]*self.c1[1]*self.c1[2]*self.c2[0]*self.c2[1]*self.c2[2] != 0\n @nb.njit\n def field_func(m):\n heff = self.hext + field.demagnetization(m, self.Nd)\n if uniaxial:\n heff += field.uniaxial_anisotropy(m, self.u, self.hu1, self.hu2)\n if cubic:\n heff += field.cubic_anisotropy(m, self.c1, self.c2, self.c3, self.hc1, self.hc2)\n return heff\n self.field = field_func", "def force_gravity(body1, body2, rel_positions):\n r_mag = np.linalg.norm(rel_positions)\n F = (CONST_G * body1.mass * body2.mass / r_mag ** 3) * rel_positions\n \n return F", "def getMagBoundary(self):\n\n # Get the boundary of magnitude based on the filter\n lowMagnitude = nan\n highMagnitude = nan\n if (self.filter == self.FilterU):\n lowMagnitude = 7.94\n highMagnitude = 14.80\n\n elif (self.filter == self.FilterG):\n lowMagnitude = 9.74\n highMagnitude = 16.17\n\n elif (self.filter == self.FilterR):\n lowMagnitude = 9.56\n highMagnitude = 15.73\n\n elif (self.filter == self.FilterI):\n lowMagnitude = 9.22\n highMagnitude = 15.26\n\n elif (self.filter == self.FilterZ):\n lowMagnitude = 8.83\n highMagnitude = 14.68\n \n elif (self.filter == self.FilterY):\n lowMagnitude = 8.02\n highMagnitude = 13.76\n\n return lowMagnitude, highMagnitude", "def gravitational_force(position_a, mass_a, position_b, mass_b):\r\n distance = distance_between(position_a, position_b)\r\n\r\n # Calculate the direction and magnitude of the force.\r\n angle = math.atan2(position_a[1] - position_b[1], position_a[0] - position_b[0])\r\n magnitude = G * mass_a * mass_b / (distance**2)\r\n\r\n # Find the x and y components of the force.\r\n # Determine sign based on which one is the larger body.\r\n sign = -1 if mass_b > mass_a else 1\r\n x_force = sign * magnitude * math.cos(angle)\r\n y_force = sign * magnitude * math.sin(angle)\r\n return x_force, y_force", "def gravity(self, second_object):\n # Fg = F12 = F21 = G(m1)(m2)/r^2\n m1, m2 = self.mass, second_object.mass\n r = self.radial_distance(second_object)\n return (self.G*m1*m2)/pow(r, 2)", "def getTOF(mass, energy, distance):\n velocity = speedOfLight * np.sqrt(2 * energy / mass)\n tof = distance / velocity\n return tof", "def Moment_on_aircraft_in_body_reference_frame(h_B, h_dot_B, omega_B):\n return h_dot_B + omega_B.cross(h_B)", "def _forces_moments(self, delta):\n # assert delta.shape == (4,1)\n da = delta[0]\n de = delta[1]\n dt = delta[2]\n dr = delta[3]\n\n e0 = self._state[3]\n e1 = self._state[4]\n e2 = self._state[5]\n e3 = self._state[6]\n u = self._state[7]\n v = self._state[8]\n w = self._state[9]\n p = self._state[10]\n q = self._state[11]\n r = self._state[12]\n\n self._Va = np.sqrt(u**2 + v**2 + w**2)\n self._alpha = np.arctan(1.0*w/u)\n self._beta = np.arcsin(1.0*v/self._Va)\n\n\n\n Fg = self.mass*self.gravity*np.array([2*(e1*e3-e2*e0),\n 2*(e2*e3 + e1*e0),\n e3**2 + e0**2 - e1**2 - e2**2,\n ])\n\n # Fg = self.mass*self.gravity*np.array([2*(e1*e3 - e2*e0),\n # 2*(e2*e3 + e1*e0),\n # e3**2 + e0**2 - e1**2 - e2**2,\n # ])\n\n M_e = 25\n sig = lambda a: (1+np.exp(-M_e*(a-self.alpha0))+np.exp(M_e*(a+self.alpha0)))/((1+np.exp(-M_e*(a-self.alpha0)))*(1+np.exp(M_e*(a+self.alpha0))))\n cla = lambda a: (1-sig(a))*(self.C_L_0+self.C_L_alpha*a)+sig(a)*(2*np.sign(a)*np.sin(a)**2*np.cos(a))\n cda = lambda a: self.C_D_p + (self.C_L_0+self.C_L_alpha*a)**2/(np.pi*self.e*self.AR)\n\n cxa = lambda a: -(cda(a)) * np.cos(a) + (cla(a)) * np.sin(a)\n\n cxq = lambda a: -self.C_D_q * np.cos(a) +self.C_L_q * np.sin(a)\n\n cxde = lambda a: -self.C_D_delta_e * np.cos(a) + self.C_L_delta_e * np.sin(a)\n\n cza = lambda a: -(cda(a)) * np.sin(a) - (cla(a)) * np.cos(a)\n\n czq = lambda a: -self.C_D_q * np.sin(a) - self.C_L_q * np.cos(a)\n\n czde = lambda a: -self.C_D_delta_e * np.sin(a) - self.C_L_delta_e * np.cos(a)\n\n c = self.c/(2.0*self._Va)\n b = self.b/(2.0*self._Va)\n\n\n\n one = 0.5*self.rho*self._Va**2*self.S_wing\n # two = np.array([[1,0,0],[0,1,0],[0,0,1]])\n three = np.array([[cxa(self._alpha)+cxq(self._alpha)*c*q+cxde(self._alpha)*de],\n [self.C_Y_0+self.C_Y_beta*self._beta+self.C_Y_p*b*p+self.C_Y_r*b*r+self.C_Y_delta_a*da+self.C_Y_delta_r*dr],\n [cza(self._alpha)+czq(self._alpha)*c*q+czde(self._alpha)*de]])\n\n Fa = np.squeeze(three) * one\n # pdb.set_trace()\n Fa = Fa.reshape((3,-1))\n\n F = Fg + Fa\n #\n # print(\"Fa:\",Fa)\n\n Fp = 0.5*self.rho*self.S_prop*self.C_prop*((self.k_motor*dt)**2-self._Va**2)\n\n # print(\"FP:\", Fp)\n\n fx = F[0] + Fp\n # + 0.5*MAV.rho*self._Va**2*MAV.S_wing*(\\\n # +cxa(self._alpha)\\\n # + cxq(self._alpha)*c*q\\\n # + cxde(self._alpha)*de\n # )\n\n fy = F[1]\n fz = F[2]\n\n # Moment time!!!\n one = 0.5*self.rho*self._Va**2*self.S_wing\n two = np.array([\\\n [self.b*(self.C_ell_0+self.C_ell_beta*self._beta+self.C_ell_p*b*p+self.C_ell_r*b*r+self.C_ell_delta_a*da+self.C_ell_delta_r*dr)],\n [self.c*(self.C_m_0+(self.C_m_alpha*self._alpha)+(self.C_m_q*c*q)+(self.C_m_delta_e*de))],\n [self.b*(self.C_n_0+(self.C_n_beta*self._beta)+(self.C_n_p*b*p)+(self.C_n_r*b*r)+(self.C_n_delta_a*da)+(self.C_n_delta_r*dr))]\n ])\n Ma = one * np.squeeze(two)\n # print(\"\\nMa:\", Ma)\n # pdb.set_trace()\n Ma = Ma.reshape((3,-1))\n\n size = Ma.shape[1]\n\n Mp = np.block([[np.ones(size)*-self.kTp*(self.kOmega*dt)**2],\n [np.zeros(size)],\n [np.zeros(size)]\n ])\n\n M = Mp + Ma\n\n Mx = M[0]\n My = M[1]\n Mz = M[2]\n\n # self._forces[0] = fx\n # self._forces[1] = fy\n # self._forces[2] = fz\n # pdb.set_trace()\n # print(fx, fy, fz, Mx, My, Mz)\n\n return np.array([fx, fy, fz, Mx, My, Mz])", "def _model_dynamics(self):\n if self.acc_term:\n rne = np.ndarray(self.sim.model.nv)\n functions.mj_rne(self.sim.model, self.sim.data, True, rne)\n return rne[self.arm_index]\n else:\n return self.sim.data.qfrc_bias[self.arm_index] # stored, no need for computation", "def mag2(self) -> numbers.Number:\n mv_val = self.layout.gmt_func(self.layout.adjoint_func(self.value), self.value)\n return mv_val[0]", "def _get_omega(self, vehicle_id):\n pos = self.positions[vehicle_id]\n omega = self.frenets[vehicle_id].get_omega(\n pos[0], pos[1], pos[2], pos[3])\n\n return omega", "def material(self):\n return self._F_Mstr", "def get_virtual_force(self, sigma_list):\n f_k = []\n for sigma in sigma_list:\n if not sigma.is_robot:\n f = self.compute_fk(\n sigma.range, self.sigma_des_obj, self.C_obj, self.c_obs)\n f_k.append(f)\n else:\n # c_robot is a bias used to keep robots at a distance\n f = self.compute_fk(\n sigma.range, self.sigma_des_robot, self.C_robot, self.c_robot)\n f_k.append(f)\n sum_x, sum_y = 0, 0\n for k, f in enumerate(f_k):\n phi = (math.pi / 4) * k\n sum_x += f * math.cos(phi)\n sum_y += f * math.sin(phi)\n # current heading of the robot in rect. coords.\n a_c = self.heading_vector\n # get the heading angle of the robot\n angle_a_c = np.arctan2(a_c[1], a_c[0])\n # get the angle of the resultant force in robot fixed frame\n angle_p = np.arctan2(sum_y, sum_x)\n # get the mag. of the resultant force\n mag_p = linalg.norm([sum_x, sum_y])\n # transform vector to fixed frame\n angle_p_bff = angle_p + angle_a_c\n # resolve resultant force in fixed frame preserve the mag.\n p = [mag_p * math.cos(angle_p_bff), mag_p * math.sin(angle_p_bff)]\n return p", "def attraction(self,astre1,astre2):\n norm=G*astre1.mass*astre2.mass/self.distance(astre1,astre2)\n return Force.createFromPolarCoordonnates(norm,angle)", "def fg(self, b1, b2):\n\n difference = b1.position.sub(b2.position)\n distance = difference.length()\n fg = (self.G * b1.m * b2.m) / (distance*distance)\n\n return Vector2(difference.x/distance * fg, difference.y/distance * fg, difference.z/distance * fg)", "def fluxonium_potential(self):\n return -0.5*(self.Ej * ((1+self.d)*cos(self.phis - 2. * pi * self.phi - 2. * pi * self.phiL) + (1-self.d)*cos(self.phis-2. * pi * self.phiL))) + self.El/2. * (self.phis) ** 2\n #return -0.5*(self.Ej * cos(self.phis - 2. * pi * self.phi) + self.Ej * cos(self.phis)) + self.El/2. * (self.phis-self.phiL)** 2", "def directed_force(self, other):\n # if self.name == \"earth\": ipdb.set_trace()\n angle = self.angle_between_two(other)\n grav_force = self.gravitation_force(other)\n return Force(math.cos(angle) * grav_force,\n math.sin(angle) * grav_force)", "def get_min_mag_edge(self):\r\n\t\treturn self.min_mag", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n Rq, phiq, Zq = coords.T\n if self._axisym:\n BRq = interp2d(\n Rq,\n Zq,\n self._R,\n self._Z,\n self._BR[:, 0, :],\n self._method,\n (0, 0),\n self._extrap,\n (None, None),\n **self._derivs[\"BR\"],\n )\n Bphiq = interp2d(\n Rq,\n Zq,\n self._R,\n self._Z,\n self._Bphi[:, 0, :],\n self._method,\n (0, 0),\n self._extrap,\n (None, None),\n **self._derivs[\"Bphi\"],\n )\n BZq = interp2d(\n Rq,\n Zq,\n self._R,\n self._Z,\n self._BZ[:, 0, :],\n self._method,\n (0, 0),\n self._extrap,\n (None, None),\n **self._derivs[\"BZ\"],\n )\n\n else:\n BRq = interp3d(\n Rq,\n phiq,\n Zq,\n self._R,\n self._phi,\n self._Z,\n self._BR,\n self._method,\n (0, 0, 0),\n self._extrap,\n (None, self._period, None),\n **self._derivs[\"BR\"],\n )\n Bphiq = interp3d(\n Rq,\n phiq,\n Zq,\n self._R,\n self._phi,\n self._Z,\n self._Bphi,\n self._method,\n (0, 0, 0),\n self._extrap,\n (None, self._period, None),\n **self._derivs[\"Bphi\"],\n )\n BZq = interp3d(\n Rq,\n phiq,\n Zq,\n self._R,\n self._phi,\n self._Z,\n self._BZ,\n self._method,\n (0, 0, 0),\n self._extrap,\n (None, self._period, None),\n **self._derivs[\"BZ\"],\n )\n B = jnp.array([BRq, Bphiq, BZq]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n return B", "def read_magnetometer(self):\n data = self.mag.read_bytes(Register.OUT_X_L_M, 6)\n return lsm9ds1.to_vector(data)", "def get_fmu(self):\n return self.fmu", "def magnetic_pressure_gradient_force(self, method='spectral'):\n import numpy as np\n gradB, B = self.magnetic_gradient_tensor(method=method, return_B=True)\n F = np.zeros_like(B)\n for i in range(3):\n for j in range(3):\n F[i] -= B[j] * gradB[i,j]\n return F", "def magnet_limits(self):\n max_currents = self.pv_monitor.get_max_currents()\n\n strengths = [np.array([max_currents[0],\n -max_currents[1],\n max_currents[2], 0, 0]),\n np.array([0, 0, max_currents[2],\n -max_currents[3],\n max_currents[4]])]\n\n edges = [[], []]\n for s in range(2):\n edges[s] = np.array(self.straight.p_beam_lim(strengths[s])\n )[:, [0, 2]]\n\n beam1max = edges[0][0]\n beam2max = edges[1][1]\n\n self.ax.plot(self.straight.data.photon_coordinates[0],\n beam1max, 'r--')\n self.ax.plot(self.straight.data.photon_coordinates[1],\n beam2max, 'r--')", "def flux_hack(self):\r\n return self.planes[1].galaxies[0].light_profiles[0].flux", "def diffuse(self) -> float:\n return self.GetDiffuse()", "def calcEnergy(self):\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the energy.\")\n return speed_light*math.sqrt(self.p*self.p + (self.mass*speed_light)**2)", "def getRefinement(self):\n return self.get('patchmesh.refinement')", "def escaped_momentum(self):\r\n position, velocity,escaped_particles,impact,collision,mom = self.box_collision_info()\r\n\r\n for i in xrange(1,self.n):\r\n velocity[np.logical_not(impact)] = velocity[np.logical_not(\r\n impact)]\r\n momentum = self.m*velocity\r\n abs_momentum = np.sum(np.sqrt(momentum[:,0]**2 + momentum[:,1]**2\r\n + momentum[:,2]**2))/2\r\n force = abs_momentum/self.dt\r\n\r\n return abs_momentum, force", "def dcm(self, otherframe):\n\n self._check_frame(otherframe)\n flist = self._dict_list(otherframe, 0)\n outdcm = eye(3)\n for i in range(len(flist) - 1):\n outdcm = outdcm * flist[i + 1]._dcm_dict[flist[i]]\n return outdcm", "def getNear(self):\n return self.light.node().getLens().getNear()", "def test_field():\n\n # Using mesh with radius 10 nm (nmag ex. 1)\n mesh = from_geofile(os.path.join(MODULE_DIR, \"sphere1.geo\"))\n S3 = df.VectorFunctionSpace(mesh, \"Lagrange\", 1)\n m_function = df.interpolate(df.Constant((1, 0, 0)), S3)\n m = Field(S3, m_function)\n\n demag = Demag()\n demag.setup(m, Field(df.FunctionSpace(mesh, 'DG', 0), Ms), unit_length=1e-9)\n\n # Compute demag field\n H_demag = demag.compute_field()\n H_demag.shape = (3, -1)\n x, y, z = H_demag[0], H_demag[1], H_demag[2]\n\n print \"Max values in direction:\"\n print \"x: %g, y: %g, z: %g\" % (max(x), max(y), max(z))\n print \"Min values in direction:\"\n print \"x: %g, y: %g, z: %g\" % (min(x), min(y), min(z))\n\n x, y, z = average(x), average(y), average(z)\n print \"Average values in direction\"\n print \"x: %g, y: %g, z: %g\" % (x, y, z)\n\n # Compute relative erros\n x = abs((x + 1./3*Ms)/Ms)\n y = abs(y/Ms)\n z = abs(z/Ms)\n\n print \"Relative error:\"\n print \"x: %g, y: %g, z: %g\" % (x, y, z)\n assert x < TOL, \"x-average is %g, should be -1/3.\" % x\n assert y < TOL, \"y-average is %g, should be zero.\" % y\n assert z < TOL, \"z-average is %g, should be zero.\" % z", "def compute_forces_mesh(self):\n f = self.ptclgrid.grid[:self.size,:self.size]*self.grad_phi_mesh()\n return f", "def field(self):\n return self.reference.field", "def compute_electric_field(self):\n self.set_grid()\n rho = self.grid.distribute(self.bunch.positions)\n rho *= self.bunch.line_charge_density * 4 # unknown origin\n phi = self.solver.get_potential(rho, self.bunch.line_charge_density)\n Ex, Ey = self.grid.gradient(-phi)\n self.fields[:, 0] = self.grid.interpolate(Ex, self.bunch.positions)\n self.fields[:, 1] = self.grid.interpolate(Ey, self.bunch.positions)", "def electric_field(self, xyz_m, xyz_n=None):\n\n xyz_m = check_xyz_dim(xyz_m)\n if np.any(xyz_m[..., -1] > 0):\n raise ValueError(\n f\"z value must be less than or equal to 0 in a halfspace, got {(xyz_m[..., -1])}\"\n )\n\n if xyz_n is not None:\n xyz_n = check_xyz_dim(xyz_n)\n if np.any(xyz_n[..., -1] > 0):\n raise ValueError(\n f\"z value must be less than or equal to 0 in a halfspace, got {(xyz_n[..., -1])}\"\n )\n\n em = self._a.electric_field(xyz_m) - self._b.electric_field(xyz_m)\n\n if xyz_n is not None:\n en = self._a.electric_field(xyz_n) - self._b.electric_field(xyz_n)\n e = em - en\n return e\n else:\n return em", "def particleCharge(self):\n return self.params['particleCharge']", "def make_field(self):\n def field_func(m):\n return self.hext + field.demagnetization(m, self.Nd)\n self.field = field_func", "def getFov(self):\n return self.light.node().getLens().getFov()", "def reflect_ghost(self, p0):\n # Instead of self.p1, one could take any point on the line p1--p2.\n dist = self.p1 - p0\n alpha = numpy.einsum(\"ij, ij->i\", dist, self.mirror_edge)\n # q is sits at the perpendicular intersection of the reflection\n q = dist - (alpha / self.beta)[:, None] * self.mirror_edge\n return p0 + 2 * q", "def external_field(self):\n # TODO: return curl(A) for non-homogeneous external_field\n A = self.external_vector_potential\n if A is not None:\n Ax, Ay = A\n # TODO: check expression below\n return (- np.diff(Ax, axis=1) * cfg.idy\n + np.diff(Ay, axis=0) * cfg.idx)\n else:\n return None", "def get_force(self):\n displ = self.get_displ()\n equil = displ / np.linalg.norm(displ) * self.L0\n return self.k * (displ - equil)", "def value(self):\n nd1 = super().nd1()\n nd2 = super().nd2()\n f1 = nd1 * self.s\n f2 = nd2 * self.x * math.e ** (-self.rf * self.t)\n return f1 - f2", "def get_velocity(self):\n return self.momentum/self.mass", "def electric_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber * r\n ikr = 1j * kr\n\n front_term = (\n (self.current * self.length) / (4 * np.pi * self.sigma * r**3) *\n np.exp(-ikr)\n )\n symmetric_term = (\n spatial.repeat_scalar(self.dot_orientation(dxyz)) * dxyz *\n (-kr**2 + 3*ikr + 3) / r**2\n )\n oriented_term = (\n (kr**2 - ikr - 1) *\n np.kron(self.orientation, np.ones((dxyz.shape[0], 1)))\n )\n return front_term * (symmetric_term + oriented_term)", "def HalfReactionDeltaGPrime(self):\n dg_prime = self.DeltaGPrime()\n delta_electrons = self._GetElectronDiff() \n return dg_prime + constants.F * delta_electrons * \\\n self.aq_params.e_reduction_potential", "def fieldGradient(self):\n return self.params['fieldGradient']", "def getValueFromFieldname(self,fieldname):\n if hasattr(self,fieldname): #Standard attributes.\n value = getattr(self,fieldname)\n if not isinstance(value,Cartesian3DVector):\n return value\n if fieldname == \"E\": #Interprets E as energy\n return self.getEnergy()\n momentum_direction = fieldname.replace(\"p\",\"\")\n velocity_direction = fieldname.replace(\"v\",\"\")\n if fieldname.startswith(\"p\") and momentum_direction in [\"x\",\"y\",\"z\"]:\n return getattr(self.p,momentum_direction)\n if fieldname.startswith(\"v\") and velocity_direction in [\"x\",\"y\",\"z\"]:\n return getattr(self.v,velocity_direction)\n elif fieldname in [\"x\",\"y\",\"z\"]:\n return getattr(self.x,fieldname)\n raise Exception(\"The given field, \"+fieldname+\", is not defined for the particle.\")", "def collide(self, actor, other, current, collisions, floatx, floaty):\n return floatx, floaty", "def calculate_marginal(self):\n self.marginal_ray=beam_field()\n m=self.marginal_ray\n m.U=np.array([[[0,0,1]]])\n m.Q_p=np.array([[[0,self.entrance_pupil,0]]])\n m.propagate(self.surfaces)", "def compute_swimming_velocity(particle, fieldset, time):\n if particle.active == 1:\n particle.u_swim = particle.vmax * (1-particle.hab) * cos(particle.theta)\n particle.v_swim = particle.vmax * (1-particle.hab) * sin(particle.theta)", "def forward(self) -> Vec:\n return (self.emitters[0][1] - self.emitters[0][0]).norm()", "def mass_energy(particle: Particle, mass_numb: Optional[Integral] = None) -> u.J:\n return particle.mass_energy", "def ft_raw_force(self):\r\n return self._arm.ft_raw_force", "def magnetometer(self):\n self.com.reset_input_buffer()\n self.com.write(self.HEADER + self.MAG + self.END)\n header = self.com.read(1)\n if header != self.HEADER:\n print \"Got bad header from Arduino\"\n raise ArduinoError()\n data = ''\n while len(data) < 15:\n read_data = self.com.read(1)\n if len(read_data) != 1:\n print \"Error reading from Arduino\"\n raise ArduinoError()\n data += read_data\n if read_data == self.END:\n break\n print \"Arduino mag data:\", data\n mag_x = int(data[:data.index(',')])\n mag_y = int(data[data.index(',') + 1:-1])\n return mag_x, mag_y", "def compute_rf_field(self, r):\r\n\t\tE = np.zeros((3))\r\n\t\tfor nam, e in self.rf_electrode_list:\r\n\t\t\tE += e.compute_electric_field(r)\r\n\t\treturn E", "def molecule(self):\n return self._molecule", "def dRdE_magnetic(E, m_x, mu_x, target, vlag=232.0, sigmav=156.0, vesc=544.0):\n \n A = Avals[target]\n \n #See Eq. 62 of https://arxiv.org/pdf/1307.5955.pdf, but note\n #that we're using some different normalisations for the operators\n #so there are some extra factors of m_x and m_p lurking around...\n \n amu = 931.5e3 # keV\n q1 = np.sqrt(2*A*amu*E) #Recoil momentum in keV\n \n alpha = 0.007297\n e = np.sqrt(4*np.pi*alpha)\n m_p = 0.9315\n \n #Proton and neutron g-factors\n gp = 5.59\n gn = -3.83\n \n #Bohr Magneton\n #Tesla = 194.6*eV**2 # Tesla in natural units (with e = sqrt(4 pi alpha))\n #muB = 5.7883818e-5*eV/Tesla # Bohr magneton\n mu_B = 297.45 #GeV^-1 (in natural units (with e = sqrt(4 pi alpha)))\n\n cp = [E*0.0 for i in range(11)]\n cn = [E*0.0 for i in range(11)]\n \n #Operator 1\n cp[0] = e*(mu_x*mu_B)/(2.0*m_x)\n \n #Operator 5\n cp[4] = 2*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2\n \n #Operator 4\n cp[3] = gp*e*(mu_x*mu_B)/m_p\n cn[3] = gn*e*(mu_x*mu_B)/m_p\n \n #Operator 6\n cp[5] = -gp*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2\n cn[5] = -gn*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2\n\n return dRdE_NREFT(E, m_x, cp, cn, target, vlag, sigmav, vesc)", "def index_of_refraction(self):\n return self.microsphere.index_of_refraction(self.wavelength)" ]
[ "0.67325306", "0.65006876", "0.64733946", "0.64133286", "0.61525905", "0.6089795", "0.594535", "0.5899009", "0.58383423", "0.5798657", "0.5736236", "0.57189417", "0.5650898", "0.5645273", "0.5644613", "0.55965346", "0.5590425", "0.5575774", "0.5534189", "0.5524397", "0.5512045", "0.55114114", "0.5504112", "0.54844147", "0.54765743", "0.5461062", "0.54220355", "0.5379867", "0.5334562", "0.53335094", "0.5317446", "0.5313639", "0.52886415", "0.52763784", "0.5275999", "0.5275999", "0.5250649", "0.5250649", "0.52382314", "0.5212677", "0.5211973", "0.52062345", "0.51815397", "0.5127893", "0.51127416", "0.5110817", "0.51018", "0.5098571", "0.5061058", "0.5059621", "0.5058459", "0.50553006", "0.5043271", "0.50383276", "0.5029511", "0.50205636", "0.50162405", "0.5010365", "0.5009386", "0.5005185", "0.5002381", "0.50017536", "0.49970767", "0.49952197", "0.4982884", "0.49820587", "0.497771", "0.4974258", "0.49595195", "0.49534428", "0.49438527", "0.49435303", "0.49360546", "0.49355868", "0.49325663", "0.49314287", "0.49298802", "0.49297974", "0.492601", "0.49124512", "0.49119747", "0.49046606", "0.49045733", "0.49042326", "0.49012", "0.4899335", "0.4892502", "0.48866412", "0.48827815", "0.48806107", "0.48779148", "0.48717675", "0.48700088", "0.48684603", "0.48668447", "0.48657244", "0.48618808", "0.4860952", "0.4836214", "0.48358512" ]
0.6843076
0
Returns the initialized component manager. This is used as FastAPI dependency and called for every request.
def get_component_manager( token: str = Depends(get_api_token), ) -> ComponentOperations: session = BaseUrlSession(base_url=CONTAXY_API_ENDPOINT) session.headers = {"Authorization": f"Bearer {token}"} return ComponentClient(session)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_manager():\n\n return multiprocessing.Manager()", "def GetManager(self):\r\n\r\n return self.manager", "def get_manager():\n return __manager__", "def getManager(self):\n return self._manager", "def core(self):\n return CoreManager(self)", "def manager(self):\n if not self._manager:\n self._manager = TwistedEventLoopManager()\n\n return self._manager", "def manager(self):\n if not self._manager:\n self._manager = TwistedEventLoopManager()\n\n return self._manager", "def cluster_manager(self):\n # Lazily instantiate the cluster manager the first time it is asked for.\n if not hasattr(self, '_cluster_manager'):\n if self._cluster_engine:\n self._cluster_manager = self._cluster_engine.create_manager(\n self._username,\n self._tenancy\n )\n else:\n self._cluster_manager = None\n # If there is still no cluster manager, clusters are not supported\n if not self._cluster_manager:\n raise errors.UnsupportedOperationError(\n 'Clusters are not supported for this tenancy.'\n )\n return self._cluster_manager", "def get_instance(cls):\n global FW_MANAGER_API\n if not FW_MANAGER_API:\n FW_MANAGER_API = cls()\n return FW_MANAGER_API", "def getServiceManager( cHost=\"localhost\", cPort=\"2002\" ):\n global goServiceManager\n global pythonloader\n if not goServiceManager:\n # Get the uno component context from the PyUNO runtime\n oLocalContext = uno.getComponentContext()\n # Create the UnoUrlResolver on the Python side.\n\n goServiceManager=oLocalContext.ServiceManager\n\n return goServiceManager", "def modules(self):\n return ModuleManager(self)", "def getAPIsManager(self):\n return self.apisManager", "def get_manager(api_version=None):\n from manager import get_keystone_manager\n return get_keystone_manager(get_local_endpoint(), get_admin_token(),\n api_version)", "def create_allcomponents(self):\n\n # we store all components in a list/hash which we iterate for startup/shutdown/dumps debugging, and which can be used to lookup components\n self.components = MDictList()\n\n # setup log manager helper early so that log manager can receive messages (and queue them until startup)\n self.createappendcomp('logmanager', mlogger.MewloLogManager)\n\n # now update site state (log manager should catch this)\n self.set_statelabel(mconst.DEF_SITESTATE_INITIALIZE_START)\n\n # create (non-db-persistent) site settings -- these are set by configuration at runtime\n self.settings = self.createappendcomp('settings', MewloSettings)\n\n # database manager\n self.createappendcomp('dbmanager', mdbmanager_sqlalchemy.MewloDatabaseManagerSqlA)\n\n # component registry\n self.createappendcomp('registrymanager', mregistry.MewloRegistryManager)\n\n # signal dispatcher\n self.createappendcomp('signalmanager', msignal.MewloSignalManager)\n\n # rbac permission manager\n self.createappendcomp('rbacmanager', mrbac.MewloRbacManager)\n\n # create persistent(db) pack settings\n self.createappendcomp('packsettings', mdbsettings_pack.MewloSettingsDb_Pack)\n\n # collection of mewlo addon packs\n self.createappendcomp('packmanager', mpackmanager.MewloPackManager)\n\n # site addon manager\n #self.createappendcomp('siteaddonmanager', msiteaddon.MewloSiteAddonManager)\n\n # route manager\n self.createappendcomp('routemanager', mroute.MewloRouteManager)\n\n # navnode manager\n self.createappendcomp('navnodemanager', mnav.NavNodeManager)\n\n # template manager\n self.createappendcomp('templatemanager', mtemplate.MewloTemplateManager)\n\n # asset and alias manager\n self.createappendcomp('assetmanager', massetmanager.MewloAssetManager)\n\n # template helper (this is available inside template/views and provides helper functions like navigation menus, etc.)\n self.createappendcomp('templatehelper', mtemplatehelper.MewloTemplateHelper)\n\n # session manager\n self.createappendcomp('sessionmanager', msessionmanager.MewloSessionManager)\n\n # verification manager\n self.createappendcomp('verificationmanager', mverificationmanager.MewloVerificationManager)\n\n # user manager\n self.createappendcomp('usermanager', musermanager.MewloUserManager)\n\n # mail manager\n self.createappendcomp('mailmanager', mmailmanager.MewloMailManager)", "def new_manager() -> SyncManager:\n return Manager()", "def petsc_manager():\n return PetscManager()", "def do_component_init(self):\n logger.debug(\"RwdtstaskletPython: do_component_init function called\")\n component_handle = RwTaskletPlugin.ComponentHandle()\n return component_handle", "def plugins_get_mgr():\n global pluginmgr\n return pluginmgr", "def get_collection_manager(self, *args, **kwargs):\n return CollectionManager(self, *args, **kwargs)", "def get_entity_manager(self):\n return self.game.entity_manager", "def _init_component(self):\n setup_info = self._serializer.read_msg()\n\n pid = os.getpid()\n self._serializer.send_msg({'pid': pid})\n self._create_pidfile(setup_info['pidDir'], pid)\n\n return StormConfig(setup_info['conf']), setup_info['context']", "def name(self):\n return \"component_manager\"", "def factory_manager():\n global _FACTORY_MANAGER\n\n if _FACTORY_MANAGER:\n return _FACTORY_MANAGER\n\n _FACTORY_MANAGER = Factories()\n\n return _FACTORY_MANAGER", "def get_instance(cls):\n global DNS_MANAGER_API\n if not DNS_MANAGER_API:\n DNS_MANAGER_API = cls()\n return DNS_MANAGER_API", "def _configure_manager(self):\n self._manager = CloudDatabaseManager(self,\n resource_class=CloudDatabaseInstance, response_key=\"instance\",\n uri_base=\"instances\")\n self._flavor_manager = BaseManager(self,\n resource_class=CloudDatabaseFlavor, response_key=\"flavor\",\n uri_base=\"flavors\")\n self._backup_manager = CloudDatabaseBackupManager(self,\n resource_class=CloudDatabaseBackup, response_key=\"backup\",\n uri_base=\"backups\")", "def get_extension_manager(self):\n return get_extension_manager()", "def pm(self) -> ControllerPropertyManager:\n return self._component_pm", "def setup_component(self):\n self.conf, self.context = self._init_component()\n self.initialize()", "def load(self):\n # Proceed only if singleton instance has been created\n if self.initialized:\n # The cache manager will work on manifest and cache tasks on an\n # in-process basis as load() is only called during startup from\n # the server process.\n if self.is_server_process:\n # Remove all existing manifest files from previous processes\n self._remove_all_manifest_files()\n\n # Start the watchdog if it's not alive, prevents redundant starts\n if not self.observer.is_alive():\n self.observer.start()\n\n # Fetch all component catalog instances and trigger their add to the\n # component cache if this is not already happening (it seems some server\n # test fixtures could be loading the server extensions multiple times).\n if not self.cache_manager.is_refreshing():\n self.refresh()", "def getServiceManager( cHost=\"localhost\", cPort=\"8100\" ):\n global goServiceManager\n if not goServiceManager:\n # Get the uno component context from the PyUNO runtime\n oLocalContext = uno.getComponentContext()\n # Create the UnoUrlResolver on the Python side.\n oLocalResolver = oLocalContext.ServiceManager.createInstanceWithContext(\n \"com.sun.star.bridge.UnoUrlResolver\", oLocalContext )\n # Connect to the running OpenOffice.org and get its context.\n oContext = oLocalResolver.resolve( \"uno:socket,host=\" + cHost + \",port=\" + cPort + \";urp;StarOffice.ComponentContext\" )\n # Get the ServiceManager object\n goServiceManager = oContext.ServiceManager\n return goServiceManager", "def GetAuiManager(self):\r\n\r\n return self._mgr", "def _get_tag_manager():\n\n class_path = getattr(settings, 'ES_REACT_RENDER_TAG_MANAGER', '')\n if not class_path:\n return ESModulesReactTagManager\n\n return import_string(class_path)", "def get_manager(self, name):\n\n if name == \"control\":\n manager = self._control_manager\n elif name == \"alarm\":\n manager = self._alarm_manager\n elif name == \"state\":\n manager = self._machine_manager\n else:\n manager = self._function_manager\n\n return manager", "def init_login_manager():\n login_manager = LoginManager()\n login_manager.init_app(app)\n return login_manager", "def get_task_manager(task_manager=None):\n global _task_manager\n if _task_manager is None:\n if task_manager is None:\n _task_manager = TaskManagerImpl()\n else:\n constructor = dynamic_import(task_manager)\n _task_manager = constructor()\n\n return _task_manager", "def get_data_manager(self):\n\n return self._data_manager", "def fusion_api_get_deployment_manager(self, uri=None, param='', api=None, headers=None):\n return self.dep_mgr.get(uri=uri, api=api, headers=headers, param=param)", "def auth(self):\n return AuthManager(self)", "def _setup_shared_list(self):\n self._manager = Manager().__enter__()\n self._shared_list = self._manager.list()\n return self", "def init_client_manager(ip, port, authkey):\n class ServerQueueManager(SyncManager):\n pass\n\n ServerQueueManager.register('get_trmanager_plmanager_queue')\n ServerQueueManager.register('get_player_trmanager_queue')\n\n manager = ServerQueueManager(address=(ip, port), authkey=authkey)\n print('Connecting queue to %s:%d ...' % (ip, port))\n manager.connect()\n\n print('Connected.')\n return manager", "def __components__():\n # Get the component registry of the active application.\n registry = context.app.component_registry\n # A shortcut: return cached components.\n if registry.components is not None:\n return registry.components\n # A list of `Component` subclasses defined in modules exported by addons.\n components = [Component]\n idx = 0\n while idx < len(components):\n for subclass in components[idx].__subclasses__():\n # Skip realizations.\n if issubclass(subclass, Realization):\n continue\n # Check if the component belongs to the current application.\n if subclass.__enabled__():\n components.append(subclass)\n idx += 1\n # Cache and return the components.\n registry.components = components\n return components", "def _configure_manager(self):\n self._manager = CloudLoadBalancerManager(self,\n resource_class=CloudLoadBalancer,\n response_key=\"loadBalancer\", uri_base=\"loadbalancers\")", "def _GetComponents(\n self,\n ) -> Dict[str, Dict[str, Union[PrimitiveSchema, EnumSchema, MessageSchema]]]:\n self._CreateSchemas()\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n # The `Components Object` `components` field of the root `OpenAPI Object`.\n return {\n \"schemas\":\n cast(Dict[str, Union[PrimitiveSchema, EnumSchema, MessageSchema]],\n self.schema_objs),\n }", "def getInstance():\n # TKC - Removed singleton since component and command instances were clashing.\n # if(GenFactory.__instance is None) :\n # GenFactory.__instance = GenFactory()\n #\n # return GenFactory.__instance\n return GenFactory()", "def _get_addon_manager(hass: HomeAssistant) -> AddonManager:\n addon_manager: AddonManager = get_addon_manager(hass)\n if addon_manager.task_in_progress():\n raise ConfigEntryNotReady\n return addon_manager", "def GetDocManager(self):\r\n return self._docManager", "def resource_manager():\n return visa.ResourceManager()", "def get_instance():\n if PersistenceManager._instance is None:\n PersistenceManager._instance = PersistenceManager()\n return PersistenceManager._instance", "def load_components(self):\n self_class = self.__class__\n\n # Lazy load component loader\n try:\n component_loader = self_class._component_loader\n\n if component_loader.component_tags != self_class.component_tags:\n raise AttributeError(\"Mismatch in component tags, reloading\")\n\n except AttributeError:\n component_loader = ComponentLoader(*self_class.component_tags)\n self_class._component_loader = component_loader\n\n class_name = self_class.__name__\n definitions = self_class._definitions\n\n # Lazy load definitions\n try:\n platform_definition = definitions[class_name]\n\n except KeyError:\n resources = ResourceManager[class_name]\n platform = ResourceManager.environment\n\n try:\n definition = resources[self_class.definition_name]\n\n except TypeError:\n raise FileNotFoundError(\"Could not find definition file for {}\".format(class_name))\n\n full_path = ResourceManager.get_absolute_path(definition)\n\n definition_sections = ConfigObj(full_path)\n platform_definition = definition_sections[platform]\n definitions[class_name] = platform_definition\n\n component_result = component_loader.load(self, platform_definition)\n\n # Load components\n for component_tag, component in component_result.components.items():\n setattr(self, component_tag, component)\n\n self._component_result = component_result", "def fusion_api_get_hypervisor_manager(self, uri=None, param='', api=None, headers=None):\n return self.hypervisor_mgr.get(uri=uri, api=api, headers=headers, param=param)", "def vera_component_factory():\n with patch(\"pyvera.init_controller\") as init_controller_mock:\n yield ComponentFactory(init_controller_mock)", "def _init_app(self):\n\n self._app = FastAPI(**self._app_kws)\n\n for rt, kwargs in self._app_routers:\n self._app.include_router(rt, **kwargs)\n\n self._app.dependency_overrides[get_dataset] = lambda: self._obj\n self._app.dependency_overrides[get_cache] = lambda: self.cache\n\n return self._app", "def ioserver(self) -> _IOManager:\n return self._iomanager", "def as_manager(cls):\n manager = DefaultManager.from_queryset(cls)()\n manager._built_with_as_manager = True\n return manager", "def init_mc(self) -> ManagedCluster:\n # Initialize a ManagedCluster object with mandatory parameter location.\n mc = self.models.ManagedCluster(\n location=self.context.get_location(),\n )\n\n # attach mc to AKSContext\n self.context.attach_mc(mc)\n return mc", "def global_service_collection():\n\tglobal global_lsc\n\t# If this is the first call then the object is not yet created\n\tif not global_lsc:\n\t\t# Create the global object\n\t\tglobal_lsc = LadonServiceCollection()\n\treturn global_lsc", "def Get(self):\n\n if not hasattr(self, \"_instance\"):\n self._instance = PersistenceManager()\n\n return self._instance", "def __new__(cls):\n if not MongoClientManager.__instance:\n MongoClientManager.__instance = MongoClientManager.__MongoClientManager()\n return MongoClientManager.__instance", "def init(self):\n self._service_store = ServiceStore(self.driver, self.network)\n self._emulator = NetworkEmulator(self.store, self.driver)", "def get_instance():\n if CameraManager._instance_ is None:\n CameraManager()\n return CameraManager._instance_", "def get_app():\n return ApplicationContainer()", "async def init(self):\n self.init_connection_params()\n self._pool = await self._create_pool()\n\n return self", "def _initComponent(self):\n\n self.optimizer = self._initOptimizer()\n self.scheduler = self._initScheduler()", "def GetCachedImportManager(cls, element):\n import_mngr = _CLASS_NAME_TO_IMPORT_MANAGER.get(element)\n if not import_mngr:\n # This class does not have an import manager yet. Instantiate it.\n import_mngr = cls(element)\n _CLASS_NAME_TO_IMPORT_MANAGER[element] = import_mngr\n return import_mngr", "def plugins(self):\n return PluginManager(self)", "def fusion_api_get_fabric_manager(self, uri=None, param='', api=None, headers=None):\n return self.fabricmanager.get(uri=uri, api=api, headers=headers, param=param)", "def _get_package_manager():\n\n cosmos_url = _get_cosmos_url()\n cosmos_manager = cosmospackage.Cosmos(cosmos_url)\n if cosmos_manager.enabled():\n return cosmos_manager\n else:\n msg = (\"This version of the DCOS CLI is not supported for your \"\n \"cluster. Please downgrade the CLI to an older version: \"\n \"https://dcos.io/docs/usage/cli/update/#downgrade\"\n )\n raise DCOSException(msg)", "def init():\n # make sure pool is initialized\n global pool\n if not pool:\n pool = aiohttp.ClientSession(\n connector=aiohttp.TCPConnector(limit=config.MAX_PARALLEL_REQUESTS),\n raise_for_status=False,\n trust_env=True,\n auth=aiohttp.BasicAuth( config.CACHE_USERNAME, config.CACHE_PASSWORD ),\n )", "def __init__(self, processManager, clientManager):\n self.processManager = processManager\n self.clientManager = clientManager\n self.engine_types = {}\n self.engine_allocations = {}\n self.engine_instances = {}", "def get(name):\r\n return componentManager.components[name]", "def _get_component(self):\n return AssemblyComponent.ByKeys(**self.db_key)", "def Initialize(self):\n return _gmat_py.EphemManager_Initialize(self)", "def setup_manager(self) -> None:\n\n #Clean out the process list.\n self.process_list.clear()\n for _ in range(self.num_processes):\n p = Process(target=self.multiprocessing_job,\n args=(self.process_job,))\n self.process_list.append(p)\n self.restart_required = False", "def __init__(self):\n self.module_params = utils.get_unity_management_host_parameters()\n self.module_params.update(get_unity_filesystem_parameters())\n\n mutually_exclusive = [['filesystem_name', 'filesystem_id'],\n ['pool_name', 'pool_id'],\n ['nas_server_name', 'nas_server_id'],\n ['snap_schedule_name', 'snap_schedule_id']]\n\n required_one_of = [['filesystem_name', 'filesystem_id']]\n\n # initialize the Ansible module\n self.module = AnsibleModule(\n argument_spec=self.module_params,\n supports_check_mode=False,\n mutually_exclusive=mutually_exclusive,\n required_one_of=required_one_of)\n\n if not HAS_UNITY_SDK:\n self.module.fail_json(msg=\"Ansible modules for Unity require the\"\n \" Unity python library to be \"\n \"installed. Please install the library \"\n \"before using these modules.\")\n\n if UNITY_SDK_VERSION_CHECK and not UNITY_SDK_VERSION_CHECK[\n 'supported_version']:\n err_msg = UNITY_SDK_VERSION_CHECK['unsupported_version_message']\n LOG.error(err_msg)\n self.module.fail_json(msg=err_msg)\n\n self.unity_conn = utils.get_unity_unisphere_connection(\n self.module.params, application_type)", "def setup(self):\n\n import os\n\n from drift.core import manager\n\n if not os.path.exists(self.product_directory):\n raise RuntimeError(\"Products do not exist.\")\n\n # Load ProductManager and Timestream\n pm = manager.ProductManager.from_config(self.product_directory)\n\n return pm", "def resolve_dataset_manager() -> DatasetManager:\n _dataset_manager_class = conf.getimport(\n section=\"core\",\n key=\"dataset_manager_class\",\n fallback=\"airflow.datasets.manager.DatasetManager\",\n )\n _dataset_manager_kwargs = conf.getjson(\n section=\"core\",\n key=\"dataset_manager_kwargs\",\n fallback={},\n )\n return _dataset_manager_class(**_dataset_manager_kwargs)", "def getCore(cls):\n if RegistryCore in SingletonMetaClass._instances:\n return SingletonMetaClass._instances[RegistryCore]\n else:\n dummyCore = RegistryCore() # Which will persist because it is a singleton\n return dummyCore", "def manager(self):\n if \"manager\" in self._prop_dict:\n if isinstance(self._prop_dict[\"manager\"], OneDriveObjectBase):\n return self._prop_dict[\"manager\"]\n else :\n self._prop_dict[\"manager\"] = DirectoryObject(self._prop_dict[\"manager\"])\n return self._prop_dict[\"manager\"]\n\n return None", "def load_components(self, on_initialize=False):\n for name, component_data in self.components.items():\n start_on_initialize = component_data.get('on_initialize', False)\n if start_on_initialize == on_initialize:\n component_data.pop('on_initialize', None)\n\n # inject parent module in component\n if not component_data.get('options', None):\n component_data['options'] = {}\n\n component_data['options']['module'] = self\n component = RonObject.instanceObject(component_data)\n setattr(self, name, component)", "def get_client():\n\n return MongoClientManager().client", "def load(self):\n server = Server(manager=self.manager, broker=self.broker, housekeeper=self.housekeeper)\n return server.app", "def get_device_manager(device_model: str):\n return _get_device_handler_or_manager(device_model, True)", "def components(self):\r\n return [JSONComponent(c) for c\r\n in self.container.get(\"ComponentInstances\", [])]", "def get_global_adaptation_manager():\n global adaptation_manager\n return adaptation_manager", "def initialize_registry(self):\n client = self.application.__init_blockchain_client__()\n response = client.initialize()\n client.close()\n\n return response", "def init():\n catalog = model.newCatalog()\n return catalog", "def init_services(self):\n service_prefix = rospy.get_name() + \"/\"\n\n self._request_components_serv = rospy.Service(service_prefix +\n 'list_components',\n ListComponents,\n self.get_components)\n self._request_fields_serv = rospy.Service(service_prefix +\n 'list_fields',\n ListFields,\n self.get_fields)\n self._request_values_serv = rospy.Service(service_prefix +\n 'request_values',\n RequestValues,\n self.get_values)\n self._unsubscribe_values_serv = rospy.Service(service_prefix +\n 'unsubscribe_values',\n UnsubscribeValues,\n self.unsubscribe_values)", "def _initialize_processors(self):\n # If the queue connection is TLS we need to update connection params with\n # values specified at plugin creation\n connection_info = self._instance.queue_info[\"connection\"]\n if \"ssl\" in connection_info:\n if self._config.ca_verify:\n connection_info[\"ssl\"][\"ca_verify\"] = self._config.ca_verify\n\n if self._config.ca_cert:\n connection_info[\"ssl\"][\"ca_cert\"] = self._config.ca_cert\n\n if self._config.client_cert:\n connection_info[\"ssl\"][\"client_cert\"] = self._config.client_cert\n\n # Each RequestProcessor needs a RequestConsumer, so start with those\n common_args = {\n \"connection_type\": self._instance.queue_type,\n \"connection_info\": connection_info,\n \"panic_event\": self._shutdown_event,\n \"max_reconnect_attempts\": self._config.mq.max_attempts,\n \"max_reconnect_timeout\": self._config.mq.max_timeout,\n \"starting_reconnect_timeout\": self._config.mq.starting_timeout,\n }\n admin_consumer = RequestConsumer.create(\n thread_name=\"Admin Consumer\",\n queue_name=self._instance.queue_info[\"admin\"][\"name\"],\n max_concurrent=1,\n **common_args\n )\n request_consumer = RequestConsumer.create(\n thread_name=\"Request Consumer\",\n queue_name=self._instance.queue_info[\"request\"][\"name\"],\n max_concurrent=self._config.max_concurrent,\n **common_args\n )\n\n # Both RequestProcessors need an updater\n updater = HTTPRequestUpdater(\n self._ez_client,\n self._shutdown_event,\n max_attempts=self._config.max_attempts,\n max_timeout=self._config.max_timeout,\n starting_timeout=self._config.starting_timeout,\n )\n\n # Finally, create the actual RequestProcessors\n admin_processor = AdminProcessor(\n target=self,\n updater=updater,\n consumer=admin_consumer,\n plugin_name=self.unique_name,\n max_workers=1,\n )\n request_processor = RequestProcessor(\n target=self._client,\n updater=updater,\n consumer=request_consumer,\n validation_funcs=[self._correct_system, self._is_running],\n plugin_name=self.unique_name,\n max_workers=self._config.max_concurrent,\n resolver=ResolutionManager(easy_client=self._ez_client),\n system=self._system,\n )\n\n return admin_processor, request_processor", "def get_init():\n return _module_init()", "def service():\n service = Mock()\n service.log_dir = ''\n service.persistent_root = ''\n service.context.globals = {\"cluster_size\": 1}\n service.log_config_file = ''\n\n return service", "def system(self):\n try:\n return self._system\n except AttributeError:\n raise AttributeError('You must initialize the system with '\n 'createSystem before accessing the cached '\n 'object.')", "def calibration_manager(self):\n return self._calibrationFileManager", "def getFeatureManager(address=None):\n return __mgr_cache__[address]", "def _retrieve_manager(provider_id):\n provider = _retrieve_provider(provider_id)\n MachineManager = provider.get_provider_manager()\n return MachineManager(provider)", "def __init__(self, helper=None):\n self.helper = helper\n self.sp_manager = SPManager(helper.handle, helper.service_profile)", "def GetManager(window):\r\n \r\n if not isinstance(wx.GetTopLevelParent(window), AuiFloatingFrame):\r\n if isinstance(window, auibar.AuiToolBar):\r\n return window.GetAuiManager()\r\n \r\n evt = AuiManagerEvent(wxEVT_AUI_FIND_MANAGER)\r\n evt.SetManager(None)\r\n evt.ResumePropagation(wx.EVENT_PROPAGATE_MAX)\r\n\r\n if not window.GetEventHandler().ProcessEvent(evt):\r\n return None\r\n\r\n return evt.GetManager()", "def mock_data_manager(components):\n dm = Mock()\n dm.components = components\n dm.fixed_components = []\n return dm", "def getProcessManager(self): \n \n return self.procmgr", "async def init(self) -> None:", "async def init(self) -> None:" ]
[ "0.68361783", "0.6697513", "0.66576326", "0.660257", "0.6231092", "0.61051804", "0.61051804", "0.6104129", "0.5990772", "0.5973677", "0.5936767", "0.5933692", "0.588824", "0.5797466", "0.57823783", "0.5773982", "0.5772796", "0.57626957", "0.5757743", "0.57147866", "0.5693136", "0.56847143", "0.56443584", "0.5634734", "0.5628475", "0.5609985", "0.55819297", "0.5572467", "0.557196", "0.5566214", "0.5554606", "0.55198276", "0.5517591", "0.5453152", "0.5447934", "0.5443033", "0.5419795", "0.53908485", "0.536558", "0.53576833", "0.5350365", "0.53492844", "0.5321018", "0.53103477", "0.5300616", "0.52794826", "0.5277197", "0.5271696", "0.52715427", "0.5270445", "0.52508646", "0.5246157", "0.5234938", "0.523472", "0.5227483", "0.5221426", "0.52075803", "0.5206728", "0.52028203", "0.5199973", "0.5190858", "0.51880515", "0.5188038", "0.51850194", "0.5173011", "0.5172442", "0.5151414", "0.51478606", "0.5122748", "0.508832", "0.50789815", "0.50787425", "0.50783557", "0.50780684", "0.50727355", "0.50685406", "0.5055046", "0.50469595", "0.5025914", "0.50184566", "0.5016462", "0.5015431", "0.50075525", "0.5005516", "0.5005365", "0.49998823", "0.4995954", "0.49895108", "0.49848706", "0.49768206", "0.497284", "0.49643537", "0.49634254", "0.49612838", "0.4958863", "0.4946464", "0.49419287", "0.4940719", "0.49357858", "0.49357858" ]
0.69490683
0
Return a boolean mask for a circular sector. The start/stop angles in `angle_range` should be given in clockwise order.
def sector_mask(shape, centre, radius, angle_range): x,y = np.ogrid[:shape[0],:shape[1]] cx,cy = centre tmin,tmax = np.deg2rad(angle_range) # ensure stop angle > start angle if tmax < tmin: tmax += 2*np.pi # convert cartesian --> polar coordinates r2 = (x-cx)*(x-cx) + (y-cy)*(y-cy) theta = np.arctan2(x-cx, y-cy) - tmin # wrap angles between 0 and 2*pi theta %= (2*np.pi) # circular mask circmask = r2 <= radius*radius # angular mask anglemask = theta <= (tmax-tmin) return circmask*anglemask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sector_mask(shape,centre,radius,angle_range):\n\n x,y = np.ogrid[:shape[0],:shape[1]]\n cx,cy = centre\n tmin,tmax = np.deg2rad(angle_range)\n\n # ensure stop angle > start angle\n if tmax < tmin:\n tmax += 2*np.pi\n\n # convert cartesian --> polar coordinates\n r2 = (x-cx)*(x-cx) + (y-cy)*(y-cy)\n theta = np.arctan2(x-cx,y-cy) - tmin\n\n # wrap angles between 0 and 2*pi\n theta %= (2*np.pi)\n\n # circular mask\n circmask = r2 <= radius*radius\n\n # angular mask\n anglemask = theta <= (tmax-tmin)\n\n return circmask*anglemask", "def sector_mask(shape,centre,radius,angle_range):\n\n x,y = np.ogrid[:shape[0],:shape[1]]\n cx,cy = centre\n tmin,tmax = np.deg2rad(angle_range)\n\n # ensure stop angle > start angle\n if tmax < tmin:\n tmax += 2*np.pi\n\n # convert cartesian --> polar coordinates\n r2 = (x-cx)*(x-cx) + (y-cy)*(y-cy)\n theta = np.arctan2(x-cx,y-cy) - tmin\n\n # wrap angles between 0 and 2*pi\n theta %= (2*np.pi)\n\n # circular mask\n circmask = r2 <= radius*radius\n\n # angular mask\n anglemask = theta <= (tmax-tmin)\n\n return circmask*anglemask", "def sector_mask(shape,centre,radius,angle_range):\n\n x,y = np.ogrid[:shape[0],:shape[1]]\n cx,cy = centre\n tmin,tmax = np.deg2rad(angle_range)\n\n # ensure stop angle > start angle\n if tmax < tmin:\n tmax += 2*np.pi\n\n # convert cartesian --> polar coordinates\n r2 = (x-cx)*(x-cx) + (y-cy)*(y-cy)\n theta = np.arctan2(x-cx,y-cy) - tmin\n\n # wrap angles between 0 and 2*pi\n theta %= (2*np.pi)\n\n # circular mask\n circmask = r2 <= radius*radius\n\n # angular mask\n anglemask = theta <= (tmax-tmin)\n\n return circmask*anglemask", "def sector_mask(shape,centre,radius1, radius2 = 0, angle_range = (0, np.pi)):\n\n x,y = np.ogrid[:shape[0],:shape[1]]\n cx,cy = centre\n tmin,tmax = angle_range\n\n # ensure stop angle > start angle\n if tmax < tmin:\n tmax += 2*np.pi\n\n # convert cartesian --> polar coordinates\n r2 = (x-cx)*(x-cx) + (y-cy)*(y-cy)\n theta = np.arctan2(x-cx,y-cy) - tmin\n\n # wrap angles between 0 and 2*pi\n theta %= (2*np.pi)\n\n # circular mask\n circmask = (r2 <= radius1*radius1) & (r2 >= radius2*radius2)\n\n # angular mask\n anglemask = theta <= (tmax-tmin)\n\n return circmask*anglemask", "def iscircle(a):\n if isarc(a):\n start=a[1][1] \n end=a[1][2]\n ## these are special, integer values that flag a true full\n ## circle.\n if start==0 and end==360:\n return True\n else:\n return False", "def create_circle_mask(img, center, radius):\n h, w = img.shape\n # Flipping center ordering here to account for differences in how opencv returns\n # coordinates (standard x,y) and how numpy wants them (r,c)\n act_center = np.array(center[::-1])\n r, c = np.ogrid[0:h, 0:w] - np.array(center[::-1])\n return (r ** 2 + c ** 2) < radius ** 2", "def source_arc(f, ctr, r_min, r_max, phi_min, phi_max, I0=1):\n\n phi = np.mod( 180 / np.pi * np.arctan2(f[1] - ctr[1], f[0] - ctr[0]), 360)\n \n phi_min = np.mod(phi_min, 360)\n phi_max = np.mod(phi_max, 360)\n\n if phi_min < phi_max:\n phi_mask = (phi >= phi_min) & (phi <= phi_max)\n else:\n phi_mask = (phi >= phi_min) | (phi <= phi_max)\n \n return phi_mask * source_annular(f, ctr, r_min, r_max, I0)", "def createCircularMask(shape, radius=4, center=None):\n w = shape[0]\n h = shape[1]\n if center is None: \n center = [int(w/2), int(h/2)]\n if radius is None:\n radius = min(center[0], center[1], w-center[0], h-center[1])\n X, Y = np.ogrid[:w, :h]\n dist2 = (X - center[0])**2 + (Y-center[1])**2\n mask = dist2 <= radius**2\n return mask", "def make_circle_mask(width, ratio):\n mask = np.zeros((width, width), dtype=np.float32)\n center = width // 2\n radius = ratio * center\n y, x = np.ogrid[-center:width - center, -center:width - center]\n mask_check = x * x + y * y <= radius * radius\n mask[mask_check] = 1.0\n return mask", "def circle_mask(width, ratio):\n # taken from Paul's code\n mask = np.zeros((width, width), dtype=np.float32)\n center = width // 2\n radius = ratio * center\n y, x = np.ogrid[-center:width - center, -center:width - center]\n mask_check = x * x + y * y <= radius * radius\n mask[mask_check] = 1.0\n return mask", "def _generate_circle_mask(center_y, center_x, radius):\n\n circle = draw.circle(center_y, center_x, radius)\n\n return circle", "def create_circular_mask(h, w, center=None, radius=None):\n\n if center is None: # use the middle of the image\n center = [int(w / 2), int(h / 2)]\n if radius is None: # use the smallest distance between the center and image walls\n radius = min(center[0], center[1], w - center[0], h - center[1])\n\n Y, X = np.ogrid[:h, :w]\n dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2)\n\n mask = dist_from_center <= radius\n\n return mask", "def isinsidearcXY(c,p):\n\n x = c[0]\n r = c[1][0]\n if dist(x,p) > r:\n return False\n if iscircle(c):\n return True\n start = c[1][1]%360.0\n end = c[1][2]%360.0\n if end < start:\n end+= 360.0\n p2 = sub(p,x)\n ang = (atan2(p2[1],p2[0]) % pi2)*360/pi2\n\n if end <= 360.0:\n return (ang >= start and ang <= end)\n else:\n return ang >= start or ang <= (end-360.0)", "def create_circular_mask(\n cls,\n h: int,\n w: int,\n center: typing.Tuple = None,\n radius: int = None,\n ) -> np.array:\n\n if center is None: # use the middle of the image\n center = (int(w / 2), int(h / 2))\n # use the smallest distance between the center and image walls\n # if (radius is None):\n radius = min(center[0], center[1], w - center[0], h - center[1])\n\n Y, X = np.ogrid[:h, :w]\n dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2)\n\n mask = dist_from_center <= radius\n return mask", "def circular_mask(arr_shape, r, x_offset=0, y_offset=0):\n assert len(arr_shape) == 2, 'Image is not 2-D'\n \n ny, nx = arr_shape\n assert nx > 1 and ny > 1, 'Image is too small'\n \n assert isinstance(r, (int, long)) and r > 0, 'Radius must be int > 0'\n \n xcen = np.round(0.5 * nx - 0.5 + x_offset).astype('int')\n ycen = np.round(0.5 * ny - 0.5 + y_offset).astype('int')\n \n x1, x2 = xcen - r, xcen + r\n y1, y2 = ycen - r, ycen + r\n \n assert y1 >= 0 and y2 < ny and x1 >= 0 and x2 < nx, \\\n 'Mask falls outside image bounds'\n \n y, x = np.ogrid[-r:r, -r:r]\n i = np.where(x**2 + y**2 <= r**2)\n \n a = np.zeros(arr_shape).astype('bool')\n a[y1:y2, x1:x2][i] = True\n \n return np.where(a)", "def circular(x, f, p=lambda x: True):\n y = collision_point(x, f, p)\n return p(y) and x == f(y)", "def circleMask(img, cir_x, cir_y, r, mode, filter=0):\n\n if not mode == 'interior' and not mode == 'exterior':\n print(mode, \"is not a supported mode. Please enter interior or exterior\")\n return 1\n\n #get the dimensions of the image\n n,m = img.shape\n\n #create an open grid for our image\n y,x = np.ogrid[0:n, 0:m]\n #operate on a copy of the image\n copyImg = img.copy()\n\n #get the x and y center points of our image\n center_x = cir_x\n center_y = cir_y\n\n #create a circle mask\n if mode == 'interior':\n circle_mask = (x-center_x)**2 + (y-center_y)**2 <= r**2\n elif mode == 'exterior':\n circle_mask = (x-center_x)**2 + (y-center_y)**2 >= r**2\n\n #black out anywhere within the circle mask\n copyImg[circle_mask] = [filter]\n copyImg[copyImg != filter] = [255-filter]\n\n return copyImg", "def in_circle(x0, y0, x, y, r):\n return ((x - x0) ** 2 + (y - y0) ** 2) <= (r ** 2)", "def draw_arc(self, center_x, center_y, radius, thickness, start_angle, end_angle, edge_shine=False):\n\n if end_angle >= start_angle:\n pass\n else:\n start_angle, end_angle = end_angle, start_angle\n\n rad = radius\n while rad <= radius + thickness:\n angle = start_angle\n while angle <= end_angle:\n x = center_x + rad * cos(radians(angle))\n y = center_y - rad * sin(radians(angle))\n if self.image_width >= x >= 0 and self.image_height >= y >= 0: # for the frames' limit protection.\n distance = int(sqrt((center_x - x) ** 2 + (center_y - y) ** 2))\n x = int(x)\n y = int(y)\n if radius <= distance <= radius + thickness:\n [b, g, r] = self.image[y, x] = numpy.array(self.image[y, x]) * numpy.array([0, 0, 1.1])\n\n # Following lines are for increase the visibility when the \"mark\" comes on the dark areas.\n if r <= 100:\n if r == 0:\n r = 1\n self.image[y, x] = [0, 0, 1]\n redness_rate = (255 / r) / 0.12\n self.image[y, x] = numpy.array(self.image[y, x]) * numpy.array([0, 0, redness_rate])\n\n if edge_shine:\n for thick in range(60, 100, 4):\n if radius + thickness * thick / 100 <= distance <= radius + thickness:\n # [b, g, r] = self.image[y, x]\n self.image[y, x] = numpy.array(self.image[y, x]) + numpy.array([thick * 0.06, thick * 0.06, 255])\n angle += 0.25\n rad += 1", "def filled_circle(shape, radius, center=None):\n\tr2 = radius*radius\n\tif center is None:\n\t\t### set to center of array\n\t\tcenter = (shape[0]-1)/2.0,(shape[1]-1)/2.0\n\tdef func(i0, i1):\n\t\tii0 = i0 - center[0]\n\t\tii1 = i1 - center[1]\n\t\trr2 = ii0**2 + ii1**2\n\t\tc = numpy.where(rr2<r2, 0.0, 1.0)\n\t\treturn c\n\treturn numpy.fromfunction(func, shape)", "def circular_levelset(shape, center, sqradius, scalerow=1.0):\n grid = np.mgrid[list(map(slice, shape))].T - center\n phi = sqradius - np.sqrt(np.sum((grid.T)**2, 0))\n u = np.float_(phi > 0)\n return u", "def circumradius(T,binary_mask):\n (x1, y1), (x2, y2), (x3, y3) = T # extracting the points. \n \n D=2*(x1*(y2-y3)+x2*(y3-y1)+x3*(y1-y2)) # Diameter\n if D!=0:\n #Centroid of the cicumcircle\n Ux=(((x1**2+y1**2)*(y2-y3))+((x2**2+y2**2)*(y3-y1))+((x3**2+y3**2)*(y1-y2)))/D\n Uy=(((x1**2+y1**2)*(x3-x2))+((x2**2+y2**2)*(x1-x3))+((x3**2+y3**2)*(x2-x1)))/D\n \n #radius\n r = sqrt((Ux-x2)**2+(Uy-y2)**2)\n r=r+1\n \n #Determining the sign: it is positive if the centroid of the circumcricle is in the foreground\n x=np.floor(Ux).astype(int)\n y=np.floor(Uy).astype(int)\n\n if (x >=binary_mask.shape[0] or y >=binary_mask.shape[1]):\n r=-r\n elif (x<0 or y<0):\n r=-r\n elif binary_mask[x,y]:\n r=r\n else:\n r=-r\n return r\n else:\n return False", "def source_circ(f, ctr, r, I0=1):\n\n f2 = (f[0] - ctr[0])**2 + (f[1] - ctr[1])**2\n mask = f2 < r**2\n return I0 * mask", "def check_angle_of_arcs(self):\n\n if self.thin_arc_start_angle >= 3600:\n self.thin_arc_start_angle %= 360\n self.thin_arc_start_angle += 360\n\n elif self.thin_arc_start_angle <= -3600:\n self.thin_arc_start_angle %= 360\n self.thin_arc_start_angle -= 360\n\n if self.thin_arc_end_angle >= 3600:\n self.thin_arc_end_angle %= 360\n self.thin_arc_end_angle += 360\n\n elif self.thin_arc_end_angle <= -3600:\n self.thin_arc_end_angle %= 360\n self.thin_arc_end_angle -= 360\n\n if self.thick_arc_start_angle >= 3600:\n self.thick_arc_start_angle %= 360\n self.thick_arc_start_angle += 360\n\n elif self.thick_arc_start_angle <= -3600:\n self.thick_arc_start_angle %= 360\n self.thick_arc_start_angle -= 360\n\n if self.thick_arc_end_angle >= 3600:\n self.thick_arc_end_angle %= 360\n self.thick_arc_end_angle += 360\n\n elif self.thick_arc_end_angle <= -3600:\n self.thick_arc_end_angle %= 360\n self.thick_arc_end_angle -= 360", "def strip_outside_circle(input_array, center, radius):\n\n cx, cy = center\n r = radius\n xdim, ydim = input_array.shape\n\n y, x = np.ogrid[-cx:xdim-cx,-cy:ydim-cy]\n # Small adjustment for aliasing\n r = r - 2\n mask = x*x + y*y >= r*r\n\n output_array = np.copy(input_array)\n output_array[mask] = 0\n\n return output_array", "def Arc(self, center=(0.,0.), radius=1., nrad=16, ncirc=40,\n start_angle=0., end_angle=np.pi/2., element_type=\"tri\",\n refinement=False, refinement_level=2, algorithm=\"standard\"):\n\n # CHECK FOR ANGLE\n PI = u\"\\u03C0\".encode('utf-8').strip()\n EPS = np.finfo(np.float64).eps\n if np.abs(start_angle) + EPS > 2.*np.pi:\n raise ValueError(\"The starting angle should be either in range [-2{},0] or [0,2{}]\".format(PI,PI))\n if np.abs(end_angle) + EPS > 2.*np.pi:\n raise ValueError(\"The end angle should be either in range [-2{},0] or [0,2{}]\".format(PI,PI))\n\n a1 = np.sign(start_angle) if np.sign(start_angle)!=0. else np.sign(end_angle)\n a2 = np.sign(end_angle) if np.sign(end_angle)!=0. else np.sign(start_angle)\n if a1 == a2:\n total_angle = np.abs(end_angle - start_angle)\n if np.isclose(total_angle,0.) or np.isclose(total_angle,2.*np.pi) or total_angle > 2.*np.pi:\n self.Circle(center=center, radius=radius, nrad=nrad, ncirc=ncirc, element_type=element_type)\n return\n\n if not isinstance(center,tuple):\n raise ValueError(\"The center of the arc should be given in a tuple with two elements (x,y)\")\n\n self.__reset__()\n\n if algorithm == \"midpoint_subdivision\":\n from Florence.MeshGeneration.CustomMesher import SubdivisionArc\n mesh = SubdivisionArc(center=center, radius=radius, nrad=nrad, ncirc=ncirc,\n start_angle=start_angle, end_angle=end_angle,\n element_type=element_type, refinement=refinement, refinement_level=refinement_level)\n self.__update__(mesh)\n return\n\n\n if refinement:\n ndivider = refinement_level\n else:\n ndivider = 1\n\n ncirc = int(ncirc/ndivider)\n nrad = int(nrad/ndivider)\n\n if ncirc % 2 != 0 or ncirc < 2:\n ncirc = (ncirc // 2)*2 + 2\n\n radii = radius\n\n radius = np.linspace(0,radii,nrad+1)[1:]\n t = np.linspace(start_angle,end_angle,ncirc+1)\n x = radius[0]*np.cos(t)[::-1]\n y = radius[0]*np.sin(t)[::-1]\n\n points = np.zeros((ncirc+2,2),dtype=np.float64)\n points[0,:] = [0.,0.]\n points[1:,:] = np.array([x,y]).T\n\n self.elements = np.zeros((ncirc // 2,4),dtype=np.int64)\n aranger = np.arange(ncirc // 2)\n self.elements[:,1] = 2*aranger + 1\n self.elements[:,2] = 2*aranger + 2\n self.elements[:,3] = 2*aranger + 3\n\n for i in range(1,nrad):\n t = np.linspace(start_angle,end_angle,ncirc+1)\n x = radius[i]*np.cos(t)[::-1]\n y = radius[i]*np.sin(t)[::-1]\n points = np.vstack((points,np.array([x,y]).T))\n\n points[:,0] += center[0]\n points[:,1] += center[1]\n\n elements = np.zeros((ncirc,4),dtype=np.int64)\n for i in range(1,nrad):\n aranger = np.arange(1+ncirc*(i-1),ncirc*i+1)\n elements[:,0] = aranger + i - 1\n elements[:,1] = aranger + i + ncirc\n elements[:,2] = aranger + i + ncirc + 1\n elements[:,3] = aranger + i\n\n self.elements = np.concatenate((self.elements,elements),axis=0)\n\n\n makezero(points)\n self.points = points\n self.elements[:ncirc // 2,:] = self.elements[:ncirc // 2, [1,2,3,0]]\n\n self.element_type = \"quad\"\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n self.GetBoundaryEdges()\n\n if refinement:\n mesh = self.QuadrilateralProjection(points=self.points[self.elements[0,:],:], npoints=ndivider)\n for i in range(1,self.nelem):\n mesh += self.QuadrilateralProjection(points=self.points[self.elements[i,:],:], npoints=ndivider)\n self.__update__(mesh)\n\n if element_type == \"tri\":\n sys.stdout = open(os.devnull, \"w\")\n self.ConvertQuadsToTris()\n sys.stdout = sys.__stdout__", "def arc(self, x, y, radius, startangle=0, endangle=2 * pi, anticlockwise=False):\n self._impl.arc(x, y, radius, startangle, endangle, anticlockwise)", "def get_mask(fx, fy, ft, mask_exponent=mask_exponent, radius=.5):\n R = frequency_radius(fx, fy, ft, ft_0=np.inf, clean_division=False)\n mask = ((np.cos(np.pi*R/radius)+1)/2 *(R < radius))**(1./mask_exponent)\n return mask", "def get_mask(self):\n w, h = self.rect.w, self.rect.h\n colorkey = (0, 0, 0)\n surface = pg.Surface((w, h))\n surface.set_colorkey(colorkey)\n # fill the surface with the spherical object\n color, center, radius = (255, 255, 255), self.rect.center, round(self.rect.w/2)\n pg.draw.circle(surface, color, center, radius)\n mask = pg.mask.from_surface(surface)\n return mask", "def rotate(angle_range=0):\r\n a = rand_val(angle_range)*np.pi/180.0\r\n c = np.cos(a)\r\n s = np.sin(a)\r\n return np.array((( c, s, 0),\r\n (-s, c, 0),\r\n ( 0, 0, 1)), dtype=np.float)", "def mask_particle_circle(image, circle_center, circle_radius, color):\r\n \r\n masked_image = cv2.circle(image, circle_center, circle_radius, color, -1)\r\n return masked_image", "def has_arc(self, a, b):\n return self.matrix[a][b] != 0", "def _arcArcIntersectXY(c1,c2,inside=True,params=False):\n x1=c1[0]\n x2=c2[0]\n r1=c1[1][0]\n r2=c2[1][0]\n\n # check for sample reverse condition\n sr1 = c1[1][3]==-2\n sr2 = c2[1][3]==-2\n\n ## first check for non-intersection due to distance between the\n ## centers of the arcs, treating both arcs as circles for the moment\n\n d=dist(x1,x2) #calculate the distance d between circle centers\n\n if d > r1+r2:\n return False # too far, no possiblity of intersection\n\n if ( r1> r2 and d < r1-r2) or (r2 >= r1 and d < r2-r1):\n return False # too close, little arc is fully inside bigger arc\n\n if d < epsilon:\n return False # circle centers too close for stable calculation\n\n ## OK, we are in the goldilocks zone of intersection. this means\n ## that if boh arcs are cicles or if inside=False we are\n ## guaranteed one or two intersections. Calculate those\n ## intersections and then test to see if they fall between start\n ## and end of the respective arcs\n\n ## we start by calculating the distance id of the intersection plane\n ## from the center of arc 1, knowing that by definition id <= r1\n\n ## Math: consider the triangle with side lengths r1, r2, and d,\n ## where d is the previously calculated distance between arc\n ## centers. Consider the two right triangles with side lengths\n ## r1, id, h, and r2, h, (d-id). We know that:\n ## id^2 + h^2 = r1^2, (d-id)^2 + h^2 = r2^2\n ## solving both for h2 and then substituting, this means:\n ## r1^2 - id^2 = r2^2 - (d-id)^2\n ## collecting terms and solving for id produces:\n ## id = (r1^2-r2^2 + d^2)/2d\n\n id = (r1*r1 - r2*r2 + d*d)/(2 * d)\n\n ## compute the point on the line connecting the two arc centers\n ## that is id away from the first arc\n\n v1 = scale3(sub(x2,x1),1.0/d) # unitary direction vector pointing\n # from x1 to x2\n v2 = scale3(v1,id) # point on line between two circles in\n # coordinate space centered at x1\n\n ## compute direction vector o orthgonal to v1 -- the line that\n ## intersects point v2 and v2+o will pass through our intersection\n ## points\n\n o = orthoXY(v1)\n \n ## now, transform back into world coordinates and calculate the\n ## intersection of this line with either of our arcs, treating\n ## them as circles for now\n\n l = [add(v2,x1),add(add(v2,o),x1)]\n\n s = _lineArcIntersectXY(l,c1,False)\n\n ## as a sanity check, do the same with the other arc. Results\n ## should be within epsilon\n #ss = _lineArcIntersectXY(l,c2,False)\n #foo = list(map(lambda x, y: dist(x,y) < epsilon,s,ss))\n #print(\"sanity check: \" , foo)\n\n if not s or len(s) == 0:\n raise ValueError('no computed intersections, something is wrong')\n\n if not inside and not params:\n return s\n \n ## jump back to arc1 and arc2 space and check angles\n\n s1 = list(map(lambda x: sub(x,x1),s))\n s2 = list(map(lambda x: sub(x,x2),s))\n\n ## compute start and end angles for arcs\n start1=c1[1][1]\n end1=c1[1][2]\n if not (start1 == 0 and end1 == 360):\n start1 = start1 % 360.0\n end1 = end1 % 360.0\n if end1 < start1:\n end1 = end1 + 360.0\n \n start2=c2[1][1]\n end2=c2[1][2]\n \n if not (start2 == 0 and end2 == 360):\n start2 = start2 % 360.0\n end2 = end2 % 360.0\n if end2 < start2:\n end2 = end2 + 360.0\n \n\n ## check each intersection against angles for each arc. \n ss = []\n uparam1 = []\n uparam2 = []\n for i in range(len(s)):\n p1 =s1[i]\n p2 =s2[i]\n ang1 = (atan2(p1[1],p1[0]) % pi2)*360.0/pi2\n ang2 = (atan2(p2[1],p2[0]) % pi2)*360.0/pi2\n\n if params:\n u1 = 0\n u2 = 0\n if end1 <= 360.0 or ang1 >= start1 or \\\n ( end1 > 360.0 and ang1 > end1-360.0):\n u1 = (ang1-start1)/(end1-start1)\n if sr1:\n u1 = 1.0-u1\n elif end1 > 360.0:\n u1 = (ang1+360.0-start1)/(end1-start1)\n if sr1:\n u1 = 1.0-u1\n uparam1 = uparam1 + [ u1 ]\n \n if end2 <= 360.0 or ang2 >= start2 or \\\n ( end2 > 360.0 and ang2 > end1-360.0):\n u2 = (ang2-start2)/(end2-start2)\n if sr2:\n u2 = 1.0-u2\n elif end2 > 360.0:\n u2 = (ang2+360.0-start2)/(end2-start2)\n if sr2:\n u2 = 1.0-u2\n uparam2 = uparam2 + [ u2]\n \n else:\n good = False\n ## check angle against first arc\n if end1 <= 360.0 and ang1 >= start1 and ang1 <= end1:\n good = True\n elif end1 > 360.0 and (ang1 >= start1 or ang1<= end1-360.0):\n good = True\n\n ## check angle against second arc\n if end2 <= 360.0 and ang2 >= start2 and ang2 <= end2:\n good = good and True\n elif end2 > 360.0 and (ang2 >= start2 or ang2<= end2-360.0):\n good = good and True\n else:\n good = False\n\n ## only add instersection to the list if both checks were passed\n if good:\n ss = ss + [ s[i] ]\n \n if not params and len(ss) == 0:\n return False\n else:\n if params:\n return [uparam1,uparam2]\n else:\n return ss", "def HollowArc(self, center=(0.,0.), inner_radius=1., outer_radius=2., nrad=16, ncirc=40,\n start_angle=0., end_angle=np.pi/2., element_type=\"tri\", refinement=False, refinement_level=2):\n\n # CHECK FOR ANGLE\n PI = u\"\\u03C0\".encode('utf-8').strip()\n EPS = np.finfo(np.float64).eps\n if np.abs(start_angle) + EPS > 2.*np.pi:\n raise ValueError(\"The starting angle should be either in range [-2{},0] or [0,2{}]\".format(PI,PI))\n if np.abs(end_angle) + EPS > 2.*np.pi:\n raise ValueError(\"The end angle should be either in range [-2{},0] or [0,2{}]\".format(PI,PI))\n\n\n if np.sign(start_angle) == np.sign(end_angle):\n total_angle = np.abs(end_angle - start_angle)\n if np.isclose(total_angle,0.) or total_angle > 2.*np.pi:\n self.Circle(center=center, radius=radius, nrad=nrad, ncirc=ncirc, element_type=element_type)\n return\n\n if not isinstance(center,tuple):\n raise ValueError(\"The center of the arc should be given in a tuple with two elements (x,y)\")\n\n self.__reset__()\n\n if refinement:\n ndivider = refinement_level\n else:\n ndivider = 1\n\n ncirc = int(ncirc/ndivider)\n nrad = int(nrad/ndivider) + 2\n\n if ncirc % 2 != 0 or ncirc < 2:\n ncirc = (ncirc // 2)*2 + 2\n\n # radius = np.linspace(inner_radius,outer_radius,nrad)\n # points = np.zeros((1,2),dtype=np.float64)\n # for i in range(nrad):\n # t = np.linspace(start_angle,end_angle,ncirc+1)\n # x = radius[i]*np.cos(t)[::-1]\n # y = radius[i]*np.sin(t)[::-1]\n # points = np.vstack((points,np.array([x,y]).T))\n # points = points[ncirc+2:,:]\n\n radius = np.linspace(inner_radius,outer_radius,nrad-1)\n points = np.zeros((1,2),dtype=np.float64)\n for i in range(nrad-1):\n t = np.linspace(start_angle,end_angle,ncirc+1)\n x = radius[i]*np.cos(t)[::-1]\n y = radius[i]*np.sin(t)[::-1]\n points = np.vstack((points,np.array([x,y]).T))\n points = points[1:,:]\n\n points[:,0] += center[0]\n points[:,1] += center[1]\n makezero(points)\n self.points = points\n\n self.elements = np.zeros((1,4),dtype=np.int64)\n elements = np.zeros((ncirc,4),dtype=np.int64)\n for i in range(nrad-2):\n aranger = np.arange(ncirc*i,ncirc*(i+1))\n elements[:,0] = aranger + i\n elements[:,1] = aranger + i + ncirc + 1\n elements[:,2] = aranger + i + ncirc + 2\n elements[:,3] = aranger + i + 1\n\n self.elements = np.concatenate((self.elements,elements),axis=0)\n self.elements = self.elements[1:,:]\n\n\n self.element_type = \"quad\"\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n self.GetBoundaryEdges()\n\n if refinement:\n mesh = self.QuadrilateralProjection(points=self.points[self.elements[0,:],:], npoints=ndivider)\n for i in range(1,self.nelem):\n mesh += self.QuadrilateralProjection(points=self.points[self.elements[i,:],:], npoints=ndivider)\n self.__update__(mesh)\n\n\n if element_type == \"tri\":\n sys.stdout = open(os.devnull, \"w\")\n self.ConvertQuadsToTris()\n sys.stdout = sys.__stdout__\n self.CheckNodeNumbering(change_order_to=\"anti-clockwise\", verbose=False)\n\n self.points = np.ascontiguousarray(self.points)", "def CircularArcPlate(self, side_length=15, radius=10, center=(0.,0.),\n start_angle=0., end_angle=np.pi/4., ncirc=5, nrad=2, element_type=\"tri\"):\n\n if np.allclose(radius,0):\n raise ValueError(\"Arc radius cannot be zero\")\n if element_type != \"tri\" and element_type != \"quad\":\n raise ValueError(\"Element type can only be tri or quad\")\n\n if not np.isclose(start_angle,0.) and not \\\n (np.isclose(end_angle,np.pi/4.) or np.isclose(end_angle,np.pi/2.) or np.isclose(end_angle,np.pi)):\n raise ValueError(\"Start and end angles should be 0 and 45 degrees respectively\")\n\n self.__reset__()\n\n\n tmp_end_angle = np.pi/4.\n t = np.linspace(start_angle,tmp_end_angle,ncirc+1)\n x = radius*np.cos(t)[::-1]\n y = radius*np.sin(t)[::-1]\n\n points = np.array([x,y]).T\n points = np.flipud(points)\n\n plate_wall_ys = np.linspace(0.,side_length,ncirc+1)\n plate_wall_xs = np.zeros(ncirc+1) + side_length\n wpoints = np.array([plate_wall_xs,plate_wall_ys]).T\n\n lengths = np.linalg.norm(wpoints - points, axis=1)\n xs, ys = np.zeros((ncirc+1,nrad+1)), np.zeros((ncirc+1,nrad+1))\n for j in range(ncirc+1):\n xs[j,:] = np.linspace(points[j,0],wpoints[j,0],nrad+1)\n ys[j,:] = np.linspace(points[j,1],wpoints[j,1],nrad+1)\n self.points = np.array([xs.ravel(),ys.ravel()]).T\n\n\n self.elements = np.zeros((nrad*ncirc,4),dtype=np.int64)\n node_arranger = (nrad+1)*np.arange(ncirc)\n for i in range(nrad):\n self.elements[ncirc*i:(i+1)*ncirc,0] = node_arranger + i\n self.elements[ncirc*i:(i+1)*ncirc,1] = node_arranger + i + 1\n self.elements[ncirc*i:(i+1)*ncirc,2] = node_arranger + i + nrad + 2\n self.elements[ncirc*i:(i+1)*ncirc,3] = node_arranger + i + nrad + 1\n\n self.element_type = \"quad\"\n if np.isclose(end_angle,np.pi/2.):\n # First mirror the points along 45 degree axis\n # new_points = np.copy(self.points)\n # new_elements = np.copy(self.elements)\n\n # dpoints = np.zeros((2*new_points.shape[0]-1,2))\n # dpoints[:new_points.shape[0],:] = new_points\n # dpoints[new_points.shape[0]:,0] = new_points[:-1,1][::-1]\n # dpoints[new_points.shape[0]:,1] = new_points[:-1,0][::-1]\n\n # self.points = dpoints\n # self.elements = np.vstack((new_elements,new_elements+new_elements.max()))\n\n self.elements = np.fliplr(self.elements)\n mmesh = deepcopy(self)\n mmesh.points[:,0] = self.points[:,1][::-1]\n mmesh.points[:,1] = self.points[:,0][::-1]\n mmesh.elements = np.fliplr(mmesh.elements)\n self += mmesh\n\n\n\n\n if np.isclose(end_angle,np.pi):\n # First mirror the points along 45 degree axis\n self.elements = np.fliplr(self.elements)\n mmesh = deepcopy(self)\n mmesh.points[:,0] = self.points[:,1][::-1]\n mmesh.points[:,1] = self.points[:,0][::-1]\n mmesh.elements = np.fliplr(mmesh.elements)\n self += mmesh\n\n # Mirror along Y axis\n nmesh = deepcopy(self)\n nmesh.points[:,0] *= -1.\n self += nmesh\n\n\n # If called for stetching purposes its best to keep center at (0,0)\n self.points[:,0] += center[0]\n self.points[:,1] += center[1]\n\n # self.element_type = \"quad\"\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n self.GetBoundaryEdges()\n\n if element_type == \"tri\":\n sys.stdout = open(os.devnull, \"w\")\n self.ConvertQuadsToTris()\n sys.stdout = sys.__stdout__", "def _disk(radius):\n\n coords = np.arange(-round(radius,0), round(radius,0)+1)\n X, Y = np.meshgrid(coords, coords)\n disk_out = 1*np.array((X**2 + Y**2) < (radius+0.5)**2)\n # round improves behavior with irrational radii\n return(disk_out)", "def circles_overlapping(x1, y1, x2, y2, r):\n # print(abs((x2-x1)**2 + (y2-y1)**2))\n # print((2*r)**2)\n if (abs((x2-x1)**2 + (y2-y1)**2) > (2*r)**2):\n return False\n else: return True", "def center_circle_mask(points, arrayshape=(480, 640), maskinside=True):\n mask = np.zeros(arrayshape)\n points = [float(ii) for ii in points.split(', ')]\n center_col = round(points[0] + (points[2]/2))\n center_row = round(points[1] + (points[3]/2))\n Y, X = np.ogrid[:arrayshape[0], :arrayshape[1]]\n\n dist = np.sqrt((X - center_col)**2 + (Y - center_row)**2)\n if maskinside is False:\n mask = dist <= points[2]/2\n else:\n mask = dist >= points[2]/2\n return mask * 1", "def circ(ori, bound = 360):\n\t\t\t# ori[ori<-360] += 720\n\t\t\t# ori[ori<0] += 360\n\t\t\t# ori[ori>360] -= 360\n\t\t\t# ori[ori>720] -= 720\n\n\n\t\t\treturn ori % bound", "def archimedean(\n radius_start,\n radius_end,\n step,\n center=None,\n close=False,\n point_start=None,\n angle_start=None,\n arc_res=None):\n\n if radius_start > radius_end:\n sign = 1\n else:\n sign = -1\n\n # the spiral constant\n # evaluated from: step = K * 2 * pi\n K = step / (np.pi * 2)\n\n # use our constant to find angular start and end\n theta_start = radius_start / K\n theta_end = radius_end / K\n\n # if not passed set angular resolution\n if arc_res is None:\n arc_res = constants.default_arc\n\n arc_count = int(np.ceil((\n abs(theta_end - theta_start)) / arc_res))\n\n # given that arcs will share points how many\n # points on the helix do we need\n arc_index, point_count = arc_indexes(arc_count)\n\n assert arc_index.max() == point_count - 1\n\n # create an array of angles\n theta = np.linspace(theta_start, theta_end, point_count)\n\n # use the spiral equation to generate radii\n radii = theta * K\n\n # make sure they match\n assert np.isclose(radii[0], radius_start)\n assert np.isclose(radii[-1], radius_end)\n\n # do offset AFTER radius calculation\n if angle_start is not None:\n theta += (angle_start - theta_start)\n\n # convert polar coordinates to 2D cartesian\n points = np.column_stack(\n (np.cos(theta), np.sin(theta))) * radii.reshape((-1, 1))\n\n if close:\n\n # get indexes of arcs required to close\n close_idx, close_ct = arc_indexes(\n int(np.ceil((np.pi * 2) / arc_res)))\n\n # the additional angles needed to close\n # we are cutting off the first point as its a duplicate\n t_close = np.linspace(theta[-1],\n theta[-1] + np.pi * 2 * sign,\n close_ct)[1:]\n\n # additional points to close the arc\n closer = np.column_stack((\n np.cos(t_close), np.sin(t_close))) * radii[-1]\n assert len(closer) == close_ct - 1\n assert len(points) == point_count\n\n # stack points with closing arc\n points = np.vstack((points, closer))\n # add the additional points to the count\n point_count += close_ct - 1\n # add the additional arc indexes\n\n arc_index = np.vstack((\n arc_index, arc_index[-1][-1] + close_idx))\n\n assert len(points) == point_count\n # max index of arcs should correspond to points\n assert arc_index[-1][-1] == point_count - 1\n\n if center is not None:\n points += center\n\n # convert sequential points into three point arcs\n arcs = points[arc_index]\n\n if constants.strict:\n # check all arcs to make sure the correspond\n for a, b in zip(arcs[:-1], arcs[1:]):\n assert np.allclose(a[2], b[0])\n\n if point_start is not None:\n a, b = np.clip(\n (point_start[:2] - center[:2]) / radius_start,\n -1.0, 1.0)\n assert np.isclose(a, np.cos(angle_start), atol=1e-3)\n assert np.isclose(b, np.sin(angle_start), atol=1e-3)\n\n return arcs", "def cylinder_intersection_check(r0, step, orientation, radius): \n A = 1 - (step[0]*orientation[0]+step[1]*orientation[1]+step[2]*orientation[2])**2\n B = 2 * (r0[0]*step[0]+r0[1]*step[1]+r0[2]*step[2] - (r0[0]*orientation[0]+r0[1]*orientation[1]+r0[2]*orientation[2]) * (step[0]*orientation[0]+step[1]*orientation[1]+step[2]*orientation[2]))\n C = r0[0]**2+r0[1]**2+r0[2]**2 - radius**2 -(r0[0]*orientation[0]+r0[1]*orientation[1]+r0[2]*orientation[2])**2 \n t = (-B + math.sqrt(B**2 - 4*A*C)) / (2*A)\n return t", "def ring_is_clockwise(ring):\n total = 0\n for (pt1, pt2) in pairwise(ring):\n total += (pt2[0] - pt1[0]) * (pt2[1] + pt1[1])\n return total >= 0", "def isInCircle(self,x1,y1,radius1):\r\n if(distance(self.x,x1,self.y,y1) < (self.radius+radius1)):\r\n return True\r\n return False", "def cmask(self):\n mask = np.zeros(18)\n if 'full' in self.CONS: mask[:] = 1\n if 'f0' in self.CONS: mask[0] = 1\n if 'f1' in self.CONS: mask[1:4] = 1\n if 'f2' in self.CONS: mask[4:10] = 1\n if 'vx' in self.CONS: mask[10] = 1\n if 'vy' in self.CONS: mask[11] = 1\n if 'vz' in self.CONS: mask[12] = 1\n if 'TG' in self.CONS: mask[13:18] = 1\n return mask>0", "def unsamplearc(c,p):\n x = sub(p,c[0])\n r=c[1][0]\n start=c[1][1]\n end=c[1][2]\n # if close(end-start,360.0): #rotated circles may look like this\n # start = 0\n # end = 360\n \n if start != 0 and end != 360:\n start = start % 360.0\n end = end % 360.0\n if end < start:\n end += 360.0 \n if close(start,end):\n # degenerate, zero-length arc\n return False\n if len(c) == 3:\n norm = c[2]\n if dist(norm,vect(0,0,1)) > epsilon:\n raise NotImplementedError('non x-y plane arc unsampling not yet supported')\n if abs(mag(x)-r) > epsilon:\n return False # point is not on arc circle\n ang = (atan2(x[1],x[0]) % pi2)*360/pi2\n if end > 360.0 and ang <= end-360.0:\n ang = ang + 360.0\n u = (ang-start)/(end-start)\n if c[1][3] == -2: #samplereverse\n u = 1.0-u\n return u", "def circles_collide(x1: float, y1: float, r1: float, x2: float, y2: float, r2: float) -> bool:\n return distance_between_sq(x1, y1, x2, y2) <= (r1 + r2)**2", "def arccenter(c):\n start=c[1][1] \n end=c[1][2]\n if start == 0 and end == 360:\n return c[0]\n else:\n return samplearc(c,0.5)", "def circular_nonterminating_orbit(x, f):\n return x == f(collision_point_nonterminating_orbit(x, f))", "def overlap(cir1x, cir1y, rad1, cir2x, cir2y, rad2):\n radius = rad1 + rad2\n compare = ((cir2y - cir1y)**2 + (cir2x - cir1x)**2)**0.5\n if compare > radius:\n print \"no overlapping\"\n else:\n print \"overlapping\"", "def add_circle(self, r_center, c_center, radius, color=BLUE, image=np.full((640, 480, 3), BLACK)):\n circle = np.fromfunction(lambda r, c, _: (r - r_center) ** 2 + (c - c_center) ** 2 <= radius ** 2, image.shape)\n return np.where(circle, color, image)", "def isarc(a):\n if not isinstance(a,list):\n return False\n n = len(a)\n if n < 2 or n > 3:\n return False\n if not (ispoint(a[0]) and isvect(a[1])):\n return False\n if a[1][3] not in (-1,-2): # is psuedovector marked?\n return False\n r =a[1][0] \n if r < 0:\n # is radius less than zero? if so, not a valid arc\n return False\n if n == 3 and ( not ispoint(a[2]) or abs(mag(a[2])-1.0) > epsilon):\n # if plane-definition vector is included but is non-unitary,\n # it's not a valid arc\n return False\n \n return True", "def point_inside_circle(x,y,center_x,center_y,radius):\n return (x-center_x)**2 + (y - center_y)**2 < radius**2", "def as_boolean_mask(self):\n bbox = self.bbox()\n zs = np.unique([c.image_z_position for c in self.contours])\n z_to_index = dict(zip(zs,range(len(zs))))\n\n # Get dimensions, initialize mask.\n nx,ny = np.diff(bbox[:2], axis=1).astype(int) + 1\n nx = int(nx); ny = int(ny)\n nz = int(zs.shape[0])\n mask = np.zeros((nx,ny,nz), dtype=np.bool)\n\n # We check if these points are enclosed within each contour \n # for a given slice. `test_points` is a list of image coordinate \n # points, offset by the bounding box.\n test_points = bbox[:2,0] + np.c_[ np.where(~mask[:,:,0]) ]\n\n # First we \"turn on\" pixels enclosed by inclusion contours.\n for contour in self.contours:\n if contour.inclusion:\n zi = z_to_index[contour.image_z_position]\n contour_matrix = contour.to_matrix()[:,:2]\n\n # Turn the contour closed if it's not.\n if (contour_matrix[0] != contour_matrix[-1]).all():\n contour_matrix = np.append(contour_matrix,\n contour_matrix[0].reshape(1,2),\n axis=0)\n\n # Create path object and test all pixels\n # within the contour's bounding box.\n path = mplpath.Path(contour_matrix, closed=True)\n contains_pts = path.contains_points(test_points)\n mask[:,:,zi] = contains_pts.reshape(mask.shape[:2])\n\n # Second, we \"turn off\" pixels enclosed by exclusion contours.\n for contour in self.contours:\n if not contour.inclusion:\n zi = z_to_index[contour.image_z_position]\n contour_matrix = contour.to_matrix()[:,:2]\n\n # Turn the contour closed if it's not.\n if (contour_matrix[0] != contour_matrix[-1]).all():\n contour_matrix = np.append(contour_matrix,\n contour_matrix[0].reshape(1,2),\n axis=0)\n\n path = mplpath.Path(contour_matrix, closed=True)\n not_contains_pts = ~path.contains_points(test_points)\n not_contains_pts = not_contains_pts.reshape(mask.shape[:2])\n mask[:,:,zi] = np.logical_and(mask[:,:,zi], not_contains_pts)\n\n # The first and second axes have to \n # be swapped because of the reshape.\n return mask.swapaxes(0,1), bbox[[1,0,2]]", "def LSSTPointing_circular(xc, yc, angle_rot=0., area=None, maxbound=None):\n\n #\n arr = []\n\n radius = 1.\n if maxbound is not None:\n radius = maxbound\n\n step = radius/100.\n for x in np.arange(0., radius, step):\n y = np.sqrt(radius*radius-x*x)\n arr.append([x, y])\n\n # symmetry I: y -> -y\n arrcp = list(arr)\n for val in arr[::-1]:\n if val[1] >= 0.:\n arrcp.append([val[0], -val[1]])\n\n # symmetry II: x -> -x\n arr = list(arrcp)\n for val in arrcp[::-1]:\n if val[0] > 0.:\n arr.append([-val[0], val[1]])\n\n # build polygon\n poly_orig = geometry.Polygon(arr)\n\n # set area\n if area is not None:\n poly_orig = affinity.scale(poly_orig, xfact=np.sqrt(\n area/poly_orig.area), yfact=np.sqrt(area/poly_orig.area))\n\n # set rotation angle\n if angle_rot > 0.1:\n rotated_poly = affinity.rotate(poly_orig, angle_rot)\n else:\n rotated_poly = poly_orig\n\n return affinity.translate(rotated_poly,\n xoff=xc-rotated_poly.centroid.x,\n yoff=yc-rotated_poly.centroid.y)", "def circleCirc(radius):\n radius = float(radius)\n return 2*math.pi*radius", "def circle_mask(radius,size=None,offset=None,inner=0,subsample_limit=4,center=False):\n def subsample(x,y,sz,r,lim):\n d = np.hypot(x, y)\n if lim==0: #hit recursion limit\n #return area if x,y is inside circle\n return sz**2 if d < r else 0.0\n elif d + 0.70711*sz < r: #totally inside circle\n return sz**2\n elif d - 0.70711*sz > r: #totally outside circle\n return 0.0\n else: #on edge, recurse into quadrants\n s,o = sz/2, sz/4\n return subsample(x+o,y+o,s,r,lim-1) + \\\n subsample(x+o,y-o,s,r,lim-1) + \\\n subsample(x-o,y-o,s,r,lim-1) + \\\n subsample(x-o,y+o,s,r,lim-1)\n if offset is None:\n y0,x0 = 0,0\n else:\n y0,x0 = offset\n if size is None:\n size=2*radius+1\n if np.isscalar(size):\n size = (size,size)\n if center:\n y0 += 0.5*size[0]-0.5-radius\n x0 += 0.5*size[1]-0.5-radius\n coeffs = np.empty(size)\n for r in range(size[0]):\n for c in range(size[1]):\n x,y = c-radius,r-radius\n coeffs[r,c] = subsample(x-x0,y-y0,1,radius,subsample_limit)\n if inner > 0: \n coeffs[r,c] -= subsample(x-x0,y-y0,1,inner,subsample_limit) \n return coeffs", "def inCircleFast(self, tri, p):\n center, radius = self.circles[tri]\n return np.sum(np.square(center - p)) <= radius", "def _getCirclePoints(self, center, from_point, to_angle, interval):\r\n\r\n # radius is needed for calculating the points\r\n radius = math.sqrt( math.pow((center[0] - from_point[0]),2) + math.pow(center[1]-from_point[1],2) )\r\n\r\n # get current angle from from_point. x,y: 1,0 = zero angle, 0,1 = -pi/2, -1,0 = pi, 0,1 as pi/2\r\n if (from_point[0] < center[0] and from_point[1] > center[1]): # angles in lower left quarter\r\n angle = math.pi - math.asin( (-(center[1] - from_point[1]) )/ radius )\r\n elif (from_point[0] < center[0] and from_point[1] <= center[1]): # angles in upper left\r\n angle = -math.pi - math.asin( (-(center[1] - from_point[1]) )/ radius )\r\n else:\r\n angle = math.asin( (-(center[1] - from_point[1]) )/ radius )\r\n\r\n # debug.brf(\"from angle deg: %s to_angle deg: %s\" % (math.degrees(angle), math.degrees(to_angle)))\r\n\r\n interval = math.radians(interval)\r\n\r\n points = [] # starting point\r\n points.append(from_point)\r\n\r\n # flag of direction the 'motion' goes to\r\n clockwise = False\r\n anticlockwise = False\r\n\r\n for i in range(60): # 60 points is the maximum\r\n\r\n if angle < to_angle: # if to_angle is bigger, length to clockwise direction is absolute value from to_angle substracted from current angle modulus the whole circle\r\n len_cw = math.fabs(angle - to_angle) % (2 * math.pi)\r\n len_acw = 2 * math.pi - math.fabs(len_cw)\r\n else: # same as above, but anticlockwise\r\n len_acw = math.fabs(angle - to_angle) % (2 * math.pi)\r\n len_cw = 2 * math.pi - math.fabs(len_acw)\r\n\r\n if len_cw > len_acw:\r\n # turn anticlockwise\r\n anticlockwise = True\r\n if clockwise == True: # if clockwise turns have been done, then no direction changing needed. break point drawing\r\n break\r\n\r\n angle = angle - interval\r\n\r\n else:\r\n # turn clockwise\r\n clockwise = True\r\n if anticlockwise == True: # if anticlockwise turns have been done, then no direction changing needed. break point drawing\r\n break\r\n\r\n angle = angle + interval\r\n\r\n # calculate x/y to current angle\r\n x = int(round(radius*math.cos(angle) + center[0]))\r\n y = int(round(radius*math.sin(angle) + center[1]))\r\n points.append( (x,y) )\r\n\r\n # debug.brf(\"angle %s (%s,%s) remainding: %s (deg: %s)\" % (str(math.degrees(angle)), x, y, str(angle - to_angle), str(math.degrees(angle-to_angle))))\r\n\r\n # append still the last wanted point to get accurate\r\n x = int(round(radius*math.cos(to_angle) + center[0]))\r\n y = int(round(radius*math.sin(to_angle) + center[1]))\r\n points.append( (x,y) )\r\n\r\n # debug.brf(\"points %s\" % points)\r\n return points", "def circle():\n xmin=0\n xmax=6.5\n ymin=0.\n ymax=6.5\n\n x = arange(xmin, xmax, 0.005)\n y = x*1.\n [xx, yy] = meshgrid(x, y)\n\n zz=sqrt((xx-3.2475)**2.+(yy-3.2475)**2.)\n zz2=zz*1.\n zz2[(zz <= 3.25)]=1.\n zz2[(zz <= 3.25*0.2)]=0.\n zz2[(zz > 3.25)]=0.\n zz3=zeros(numpy.array(numpy.shape(zz2))/10)\n for i in arange(len(xx)/10):\n for j in arange(len(yy)/10):\n zz3[i,j]=numpy.sum(zz2[(i*10):(i*10+10),(j*10):(j*10+10)])/100.\n\n return zz3", "def test_odd(self):\n actual = cm.circle_mask((5, 5), 2)\n expected = np.array([[False, False, True, False, False],\n [False, True, True, True, False],\n [True, True, True, True, True],\n [False, True, True, True, False],\n [False, False, True, False, False]])\n self.assertIsNone(np.testing.assert_array_equal(actual, expected))", "def _is_trivial_angle(rad: float, atol: float) -> bool:\n return abs(rad) < atol or abs(abs(rad) - np.pi / 4) < atol", "def test_offcenter(self):\n actual = cm.circle_mask((5, 5), 2, center=(2, 3))\n expected = np.array([[False, False, False, True, False],\n [False, False, True, True, True],\n [False, True, True, True, True],\n [False, False, True, True, True],\n [False, False, False, True, False]])\n self.assertIsNone(np.testing.assert_array_equal(actual, expected))", "def ispolar(self, pole=None):\n\n if not isinstance(self.crs, GeographicalCRS):\n raise CRSError(\"ispolar defined only for geographical CRS\")\n\n if pole is None:\n pole = Point((0, 90), crs=SphericalEarth)\n\n lon0 = geodesy.reduce_deg(self[-1].vertex[0])\n sum_angle = 0.0\n for vertex in self.vertices:\n lon1 = geodesy.reduce_deg(vertex[0])\n if _cdateline.crosses_dateline(lon0, lon1):\n sum_angle += 360.0 + lon1 - lon0\n else:\n sum_angle += lon1 - lon0\n lon0 = lon1\n\n return True if abs(sum_angle) > 1e-4 else False", "def in_cylinder(x, y, z, min_z, max_z, max_r):\n r = np.sqrt(x ** 2 + y ** 2)\n m = r < max_r\n m = m & (z < max_z)\n m = m & (z >= min_z)\n return m", "def filter_sinc_channel(img, mask_circle_diameter=40.0):\n dft_image = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)\n dft_shift = np.fft.fftshift(dft_image)\n mask = np.zeros((img.shape[0], img.shape[1], 2), dtype=np.uint8)\n circle_center = (int(img.shape[0] / 2), int(img.shape[1] / 2))\n points_x, points_y = np.ogrid[:img.shape[0], :img.shape[1]]\n mask_area = (points_x - circle_center[0]) ** 2 + (points_y - circle_center[1]) ** 2 <= \\\n (mask_circle_diameter / 2) ** 2\n mask[mask_area] = 1\n filtered_dft = dft_shift * mask\n idft_image = np.fft.ifftshift(filtered_dft)\n img_filtered = cv2.idft(idft_image)\n img_filtered = cv2.magnitude(img_filtered[:, :, 0], img_filtered[:, :, 1])\n return img_filtered", "def segmentarc(c,u1,u2):\n\n pol1=samplearc(c,u1,polar=True)\n pol2=samplearc(c,u2,polar=True)\n sr= (c[1][3] == -2)\n if sr:\n return arc(pol1[0],pol1[1],pol2[2],pol1[2],samplereverse=True)\n else:\n return arc(pol1[0],pol1[1],pol1[2],pol2[2])", "def _is_circle_contractive(self,r,tol):\n B=np.diag(self.b)\n M=np.dot(B,self.A)+np.dot(self.A.T,B)-np.outer(self.b,self.b)\n X=M+B/r\n v,d=np.linalg.eig(X)\n if v.min()>-tol:\n return 1\n else:\n return 0", "def arc(radius = 10, width = 0.5, theta = 45, start_angle = 0,\n angle_resolution = 2.5, layer = 0):\n inner_radius = radius - width/2\n outer_radius = radius + width/2\n angle1 = (start_angle) * pi/180\n angle2 = (start_angle + theta) * pi/180\n t = np.linspace(angle1, angle2, int(np.ceil(abs(theta)/angle_resolution)))\n inner_points_x = (inner_radius*cos(t)).tolist()\n inner_points_y = (inner_radius*sin(t)).tolist()\n outer_points_x = (outer_radius*cos(t)).tolist()\n outer_points_y = (outer_radius*sin(t)).tolist()\n xpts = inner_points_x + outer_points_x[::-1]\n ypts = inner_points_y + outer_points_y[::-1]\n\n D = Device('arc')\n D.add_polygon(points = (xpts,ypts), layer = layer)\n D.add_port(name = 1,\n midpoint = (radius*cos(angle1), radius*sin(angle1)),\n width = width,\n orientation = start_angle - 90 + 180*(theta<0))\n D.add_port(name = 2,\n midpoint = (radius*cos(angle2), radius*sin(angle2)),\n width = width,\n orientation = start_angle + theta + 90 - 180*(theta<0))\n D.info['length'] = (abs(theta) * pi/180) * radius\n return D", "def object_circularity(labelmask, label):\n # Find z slice with most pixels from object.\n z, i, j = np.where(labelmask == label)\n zmax = mode(z)[0][0]\n # Select 2D image representing object's max Z-slice.\n im = np.where(labelmask[zmax] == label, 1, 0)\n # Calculate circularity from object perimeter and area.\n regions = regionprops(im)\n perimeter = regions[0].perimeter\n area = regions[0].area\n if (perimeter == 0):\n perimeter = 0.5\n circularity = 4 * np.pi * area / (perimeter ** 2) \n return circularity", "def __contains__(self, angle):\n angle = normalize(angle, min(self.start, self.finish), max(self.start, self.finish))\n return (self.start <= angle < self.finish) or (self.finish <= angle < self.start)", "def isCrossingCircle(self, other):\n vector = Vector.createFromTwoPoints(self.center, other.center)\n return vector.norm < self.radius + other.radius", "def regionstomask(in_regions, genome_len):\n out_mask = np.zeros((2,genome_len)).astype(bool)\n for region in in_regions:\n out_mask[region[0],region[1]:region[2]+1] = True\n return out_mask", "def houghCircle(img: np.ndarray, min_radius: float, max_radius: float) -> list:\r\n\r\n canny_cv, canny_my = edgeDetectionCanny(img, 200, 100)\r\n edges = []\r\n\r\n for x in range(canny_cv.shape[0]):\r\n for y in range(canny_cv.shape[1]):\r\n if canny_cv[x, y] == 255:\r\n edges.append((x, y))\r\n\r\n thresh = 0.47 # at least 47% of the pixels of a circle must be detected\r\n steps = 100 # number of samples from each circle\r\n\r\n points = []\r\n for r in range(min_radius, max_radius + 1):\r\n for t in range(steps):\r\n alpha = 2 * pi * t / steps\r\n x = int(r * cos(alpha))\r\n y = int(r * sin(alpha))\r\n points.append((x, y, r))\r\n\r\n temp_circles = {} # dict{circle center, radius: counter}\r\n for x, y in edges: # iterate the pixels of the edges:\r\n for dx, dy, r in points:\r\n b = x - dx\r\n a = y - dy\r\n count = temp_circles.get((a, b, r))\r\n if count is None:\r\n count = 0\r\n temp_circles[(a, b, r)] = count + 1\r\n\r\n # now add the appropriate circles to the ans list:\r\n circles = []\r\n sorted_temp = sorted(temp_circles.items(), key=lambda i: -i[1])\r\n for circle, counter in sorted_temp:\r\n x, y, r = circle\r\n # once a circle has been selected, we reject all the circles whose center is inside that circle\r\n if counter / steps >= thresh and all((x - xc) ** 2 + (y - yc) ** 2 > rc ** 2 for xc, yc, rc in circles):\r\n circles.append((x, y, r))\r\n\r\n return circles", "def _get_radial(self):\n return self.startRadius is not None and self.endRadius is not None", "def arc(radius = 10, angle = 90, num_pts = 720):\n t = np.linspace(0, angle*np.pi/180, abs(int(num_pts*angle/360))-2)\n x = radius*np.cos(t)\n y = radius*np.sin(t)\n points = np.array((x,y)).T\n start_angle = 90*np.sign(angle)\n end_angle = start_angle + angle\n return points, start_angle, end_angle", "def CircularArc(pointa, pointb, center, resolution=100, negative=False):\n check_valid_vector(pointa, 'pointa')\n check_valid_vector(pointb, 'pointb')\n check_valid_vector(center, 'center')\n if not np.isclose(\n np.linalg.norm(np.array(pointa) - np.array(center)),\n np.linalg.norm(np.array(pointb) - np.array(center)),\n ):\n raise ValueError(\"pointa and pointb are not equidistant from center\")\n\n # fix half-arc bug: if a half arc travels directly through the\n # center point, it becomes a line\n pointb = list(pointb)\n pointb[0] -= 1e-10\n pointb[1] -= 1e-10\n\n arc = _vtk.vtkArcSource()\n arc.SetPoint1(*pointa)\n arc.SetPoint2(*pointb)\n arc.SetCenter(*center)\n arc.SetResolution(resolution)\n arc.SetNegative(negative)\n\n arc.Update()\n angle = np.deg2rad(arc.GetAngle())\n arc = wrap(arc.GetOutput())\n # Compute distance of every point along circular arc\n center = np.array(center).ravel()\n radius = np.sqrt(np.sum((arc.points[0] - center) ** 2, axis=0))\n angles = np.arange(0.0, 1.0 + 1.0 / resolution, 1.0 / resolution) * angle\n arc['Distance'] = radius * angles\n return arc", "def get_mask(total, begin, end):\n mask = np.zeros([total]).astype(np.float32)\n mask[begin:end] = 1\n return np.array(mask, dtype=np.bool)", "def remote_concentric_circles(circle_turtle,dis_range,radius):\r\n for i in range(dis_range):\r\n color = random.choice(dark_colors)\r\n circle_turtle.color(color)\r\n circle_turtle.circle(radius*i)\r\n circle_turtle.up()\r\n circle_turtle.sety((radius*i)*(-1))\r\n circle_turtle.down()\r\n\r\n circle_turtle.up()\r\n circle_turtle.goto(0,0)\r\n circle_turtle.down()", "def generate_circle_by_angles(t, C, r, theta, phi):\n n = np.array([np.cos(phi)*np.sin(theta), np.sin(phi)*np.sin(theta), np.cos(theta)])\n u = np.array([-np.sin(phi), np.cos(phi), 0])\n\n P_circle = r*np.cos(t)[:, np.newaxis]*u + r*np.sin(t)[:, np.newaxis]*np.cross(n, u) + C\n\n return P_circle", "def fill_random_circles(self, n=4, min_rad=0, max_rad=40):\n # TODO: is it worth to change this to take the radius in deg?\n mask = np.ones(self.data.shape, dtype=int)\n nx, ny = mask.shape\n xx = np.random.choice(np.arange(nx), n)\n yy = np.random.choice(np.arange(ny), n)\n rr = min_rad + np.random.rand(n) * (max_rad - min_rad)\n\n for x, y, r in zip(xx, yy, rr):\n xd, yd = np.ogrid[-x:nx - x, -y:ny - y]\n val = xd * xd + yd * yd <= r * r\n mask[val] = 0\n self.data = mask", "def ArcCylinder(self, center=(0.,0.,0.), radius=1., start_angle=0, end_angle=np.pi/2.,\n length=10., nrad=16, ncirc=40, nlong=50, element_type=\"hex\"):\n\n if element_type != \"hex\":\n raise NotImplementedError('Generating {} mesh of cylinder is not supported yet'.format(element_type))\n\n if not isinstance(center,tuple):\n raise ValueError(\"The center for the base of the cylinder should be given in a tuple with three elements (x,y,z)\")\n\n self.__reset__()\n\n nlong = int(nlong)\n if nlong==0:\n nlong = 1\n\n mesh = Mesh()\n mesh.Arc(center=(center[0],center[1]), radius=radius, start_angle=start_angle,\n end_angle=end_angle, nrad=nrad, ncirc=ncirc, element_type=\"quad\")\n\n self.Extrude(base_mesh=mesh, length=length, nlong=nlong)\n self.points += center[2]", "def arc(t, r, angle):\n circumference = math.pi * 2 * r\n n = 60\n length = circumference / n\n arc_in = int(((angle / 360) * circumference) / 2)\n for i in range(arc_in):\n t.fd(length)\n t.lt(360/n)", "def samplearc(c,u,polar=False):\n p=c[0]\n r=c[1][0]\n start=c[1][1]\n end=c[1][2]\n w=c[1][3]\n if w == -2: # if arc is flagged as sample-reversed, flip u\n u=1.0-u\n if start != 0 and end != 360:\n start = start % 360.0\n end = end % 360.0\n if end < start:\n end += 360.0\n if len(c) == 3:\n norm = c[2]\n if dist(norm,vect(0,0,1)) > epsilon:\n raise NotImplementedError('non x-y plane arc sampling not yet supported')\n angle = ((end-start)*u+start)%360.0\n radians = angle*pi2/360.0\n if polar: # return polar coordinates with cartesian center\n return [ p,r,angle]\n \n q = scale3(vect(cos(radians),sin(radians)),r)\n return add(p,q)", "def get_roi_circle(self):\n return self.circle_list", "def _lineArcIntersectXY(l,c,inside=True,params=False):\n x=c[0]\n r=c[1][0]\n mpr=mpm.mpf(r)\n \n # is the arc a full circle?\n circle = False\n if c[1][1] == 0 and c[1][2] == 360:\n circle = True\n \n start=c[1][1] % 360.0\n end=c[1][2] %360.0\n\n ## what is the shortest distance between the line and the center\n ## of the arc? If that is greater than r, then there is no\n ## intersection\n dst = linePointXYDist(l,x,inside and not params)\n if dst > r+epsilon:\n return False\n\n ## start by treating the arc as a circle. At this point we know\n ## we have one or two intersections within the line segment,\n ## though perhaps none within the arc segment, which we will test\n ## for later\n \n ## transform points so arc is located at the origin\n p0=sub(l[0],x)\n p1=sub(l[1],x)\n \n ## solve for b in: | b*p0 + (1-b)*p1 | = r\n ## let V= p0-p1, P=p1\n ## | b*V + P |^2 = r^2\n ## b^2(Vx^2 + Vy^2) + 2b(VxPx+VyPy) + Px^2 + Py^2 - r^2 = 0\n ## let a = Vx^2 + Vy^2,\n ## b = 2*(VxPx + VyPy)\n ## cc = Px^2 + Py^2 - r^2\n ## b0 = ( -b + sqrt(b^2 - 4ac) )/ 2a\n ## b1 = ( -b - sqrt(b^2 - 4ac) )/ 2a\n \n V = sub(p0,p1)\n P = p1\n #a = V[0]*V[0]+V[1]*V[1]\n mpV0 = mpm.mpf(V[0])\n mpV1 = mpm.mpf(V[1])\n mpP0 = mpm.mpf(P[0])\n mpP1 = mpm.mpf(P[1])\n a = mpV0*mpV0+mpV1*mpV1\n mpepsilon = mpm.mpf(epsilon)\n if mpm.fabs(a) < mpepsilon*mpepsilon:\n print('degenerate line in lineArcIntersectXY')\n raise ValueError('bad!')\n return False\n # b = 2*(V[0]*P[0]+V[1]*P[1])\n b = 2*(mpV0*mpP0+mpV1*mpP1)\n #cc = P[0]*P[0]+P[1]*P[1]-r*r\n cc = mpP0*mpP0+mpP1*mpP1-mpr*mpr\n d = b*b-4*a*cc\n ## Check to see if we are within epsilon, scaled by the length of the line\n if mpm.fabs(d) < mpm.sqrt(a)*2*mpepsilon: # one point of intersection\n b0 = -b/(2*a)\n b1 = False\n elif d < 0:\n print(\"value of d: \",d,\" value of sqrt(a)*epsilon\",sqrt(a)*epsilon)\n raise ValueError(\"imaginary solution to circle line intersection -- shouldn't happen here\")\n else: # two points of intersection\n b0 = (-b + mpm.sqrt(d))/(2*a)\n b1 = (-b - mpm.sqrt(d))/(2*a)\n\n # use computed parameters to calculate solutions, still in\n # circle-at-origin coordinates\n s = [ add(scale3(V,float(b0)),p1) ]\n if b1:\n s = s + [ add(scale3(V,float(b1)),p1) ]\n\n if not inside or circle or params: # transform back into world\n # coordinates\n pp = list(map(lambda q: add(q,x),s))\n if params:\n uu1 = []\n uu2 = []\n for i in range(len(pp)):\n uu1 = uu1 + [ unsampleline(l,pp[i]) ]\n uu2 = uu2 + [ unsamplearc(c,pp[i]) ]\n return [uu1, uu2]\n else:\n return pp\n\n ## see if any of the intersections we've found lie between\n ## start and end of the arc\n \n ss = []\n for i in s:\n ang = (atan2(i[1],i[0]) % pi2)*360.0/pi2\n\n if end > start and ang >= start and ang <= end:\n ss = ss + [ add(x,i) ]\n elif end < start and (ang >= start or ang<= end):\n ss = ss + [ add(x,i) ]\n\n if len(ss) == 0:\n return False\n return ss", "def cone(self, ra, dec, r, remove_duplicates=False):\n if r <= 0 or r > 90:\n raise ValueError(\"Radius has to be inside (0, 90] interval. Got: \"+str(r))\n if ra < 0 or ra >= 360:\n raise ValueError(\"Ra has to be inside [0, 360) interval. Got: \"+str(ra))\n if dec < -90 or dec > 90:\n raise ValueError(\"Dec has to be inside [-90, 90] interval. Got: \"+str(dec))\n zone1 = dec_to_zone(dec - r)\n zone2 = dec_to_zone(dec + r)\n if zone1 < 0:\n zone1 = 0\n if zone2 > max_zone():\n zone2 = max_zone()\n dec1 = dec - r\n dec2 = dec + r\n if dec1 < -90:\n dec1 = -90\n if dec2 > 90:\n dec2 = 90\n\n ra1extra = -1\n ra2extra = -1\n ra1 = ra - r\n ra2 = ra + r\n\n if ra1 < 0:\n ra1extra = 360 + ra1\n ra2extra = 360\n ra1 = 0\n if ra2 > 360:\n ra1extra = 0\n ra2extra = ra2 - 360\n ra2 = 360\n\n if ra1extra >= 0:\n res = wrap(self._df.where(self._df.zone.between(zone1, zone2) &\n (self._df.ra.between(ra1, ra2) | self._df.ra.between(ra1extra, ra2extra)) &\n self._df.dec.between(dec1, dec2) &\n (dist_euclid(F.lit(ra), F.lit(dec), self._df.ra, self._df.dec) <= r)), self._table_info)\n else:\n res = wrap(self._df.where(self._df.zone.between(zone1, zone2) &\n self._df.ra.between(ra1, ra2) &\n self._df.dec.between(dec1, dec2) &\n (dist_euclid(F.lit(ra), F.lit(dec), self._df.ra, self._df.dec) <= r)), self._table_info)\n if remove_duplicates:\n res = res.exclude_duplicates()\n return res", "def trackCircle( center, rad, imShape ):\n \n \"\"\"\n center = ccnt\n rad = rd\n inShape = segImg.shape\n debug = False\n \"\"\"\n \n # check if whole circle is inside image\n if (center[0] - rad) < 0 or (center[0] + rad) >= imShape[1] or (center[1] - rad) < 0 or (center[1] + rad) >= imShape[0]:\n raise NameError( 'Circle partialy outside the image' )\n \n center = np.array( center )\n \n # start tracking at right side of circle, always pick neigbouring pixel which is closest to tabs radius and stop when came around\n startPoint1 = np.round( center + np.array( [ rad, 0] ) )\n \n currentPoint = startPoint1.copy()\n contour = [ currentPoint ]\n iterNum = 0\n maxIterNum = 1000\n \n def getNextPoint():\n \"\"\"\n gets next point \n \"\"\"\n surroundingPts_local = np.array( [ [1,0], [1,-1], [0,-1], [-1,-1], [-1,0], [-1,1], [0,1], [1,1] ])\n surroundingPts_global = np.tile( currentPoint, [8,1] ) + surroundingPts_local\n \n if len( contour ) > 1:\n # dont use last\n includeInd = np.sum( surroundingPts_global == contour[-2], 1 ) != 2\n # aditionaly exlude neighbout pts\n excludeInd = np.where( includeInd == False)[0][0]\n if excludeInd == 0:\n includeInd[ [1, 7] ] = False\n elif excludeInd == 7:\n includeInd[ [0, 6] ] = False\n else:\n includeInd[ [ excludeInd-1, excludeInd+1 ] ] = False\n \n surroundingPts_global = surroundingPts_global * np.tile( includeInd, [2,1] ).T\n \n # find closest to demamnded radius\n dists = np.abs( np.sqrt( np.sum( ( surroundingPts_global - np.tile( center, [8,1] ) )**2, 1 ) ) - rad )\n ind = np.argmin( dists )\n return surroundingPts_global[ ind, : ]\n \n while 1:\n # check if max num of iterations passed\n if iterNum == maxIterNum:\n print Warning( 'Reached max num of iterations. Tracking unsuccessful!' )\n #return np.array( contour ).astype(np.int), -1\n break\n \n # get next point\n nextPoint = getNextPoint()\n\n # in first iteraton also remember sesond tracked point.\n if iterNum is 0: \n startPoint2 = nextPoint.copy()\n \n # check if came around\n if iterNum > 2 and ( np.sum(nextPoint == startPoint1) ==2 or np.sum(nextPoint == startPoint2) == 2 ):\n # finished successfuly\n break \n # print iterNum, nextPoint - startPoint1, nextPoint\n \n # add to storage\n contour.append( nextPoint ) \n # increment \n iterNum += 1\n # reassign\n currentPoint = nextPoint.copy()\n\n # return result and successful flag\n return np.array( contour ).astype(np.int)", "def test_point_is_in_arc_range(p):\n arc = ConstructionArc((0, 0), 1, -90, 90)\n assert arc._is_point_in_arc_range(Vec2(p)) is True", "def is_occluding(p1: np.ndarray, r1: float, p2: np.ndarray, r2: float):\n d1, d2 = np.linalg.norm(p1), np.linalg.norm(p2) # compute distances\n u1, u2 = p1 / d1, p2 / d2 # project to unit circle\n rs1, rs2 = r1 / d1, r2 / d2 # scale radii by distance\n d = np.linalg.norm(u1 - u2) # compute distance between projected points\n return d < rs1 + rs2 and (d1 - r1) <= (d2 - r2)", "def is_point_inside_mask(border, target):\n degree = 0\n for i in range(len(border) - 1):\n a = border[i]\n b = border[i + 1]\n\n # calculate distance of vector\n A = get_cartersian_distance(a[0], a[1], b[0], b[1])\n B = get_cartersian_distance(target[0], target[1], a[0], a[1])\n C = get_cartersian_distance(target[0], target[1], b[0], b[1])\n\n # calculate direction of vector\n ta_x = a[0] - target[0]\n ta_y = a[1] - target[1]\n tb_x = b[0] - target[0]\n tb_y = b[1] - target[1]\n\n cross = tb_y * ta_x - tb_x * ta_y\n clockwise = cross < 0\n\n # calculate sum of angles\n if clockwise:\n degree = degree + np.rad2deg(\n np.arccos((B * B + C * C - A * A) / (2.0 * B * C))\n )\n else:\n degree = degree - np.rad2deg(\n np.arccos((B * B + C * C - A * A) / (2.0 * B * C))\n )\n\n if abs(round(degree) - 360) <= 3:\n return True\n return False", "def stokes_right_circular():\n return np.array([1, 0, 0, 1])", "def can_left_arc(c, correct_arcs):\n try:\n return Arc(c.buffer[0], c.sentence[c.stack[-1]].deprel, c.stack[-1]) in correct_arcs\n except IndexError:\n return False", "def is_circle(points, scale, verbose=False):\n\n # make sure input is a numpy array\n points = np.asanyarray(points)\n scale = float(scale)\n\n # can only be a circle if the first and last point are the\n # same (AKA is a closed path)\n if np.linalg.norm(points[0] - points[-1]) > tol.merge:\n return None\n\n box = points.ptp(axis=0)\n # the bounding box size of the points\n # check aspect ratio as an early exit if the path is not a circle\n aspect = np.divide(*box)\n if np.abs(aspect - 1.0) > tol.aspect_frac:\n return None\n\n # fit a circle with tolerance checks\n CR = fit_circle_check(points, scale=scale)\n if CR is None:\n return None\n\n # return the circle as three control points\n control = arc.to_threepoint(**CR)\n return control", "def find_channel_neighbors(geom, radius):\n return (squareform(pdist(geom)) <= radius)", "def find_channel_neighbors(geom, radius):\n return (squareform(pdist(geom)) <= radius)", "def circle_contractivity_radius(self,acc=1.e-13,rmax=1000):\n from nodepy.utils import bisect\n\n tol=1.e-14\n r=bisect(0,rmax,acc,tol,self.__num__()._is_circle_contractive)\n return r", "def circles_and_rectangles(\n target,\n pore_diameter='pore.diameter',\n throat_diameter='throat.diameter'\n):\n from openpnm.models.geometry import conduit_lengths\n out = conduit_lengths.circles_and_rectangles(\n target, pore_diameter=pore_diameter, throat_diameter=throat_diameter\n )\n return out[:, 1]", "def _arc_radius(height_in_units):\n return height_in_units * _ARC_HEIGHT_UNIT / (1 - math.cos(_ANGLE))", "def circle(geod, lon, lat, radius, n_samples=360):\n lons, lats, back_azim = geod.fwd(np.repeat(lon, n_samples), #lons\n np.repeat(lat, n_samples), #lats\n np.linspace(360, 0, n_samples), #azimuth\n np.repeat(radius, n_samples), #distance\n radians=False,\n )\n return lons, lats, back_azim", "def test(shape=(1000,2000)):\n mask = Mask()\n mask.addCircle(400,300,250)\n mask.subtractCircle(400,300,150)\n mask.addRectangle(350,250,1500,700)\n plt.imshow( mask.getMask(shape) )\n return mask" ]
[ "0.7897219", "0.7897219", "0.7897219", "0.76970416", "0.6498443", "0.6337913", "0.6234694", "0.6167133", "0.6006037", "0.59997934", "0.58989346", "0.5845237", "0.5817369", "0.5690966", "0.5672399", "0.55962497", "0.552139", "0.5515878", "0.5482021", "0.54450697", "0.54447085", "0.5442055", "0.54207516", "0.5413375", "0.54011077", "0.52636874", "0.5257807", "0.5250789", "0.5232772", "0.5225514", "0.5200453", "0.5194668", "0.51904947", "0.5163497", "0.51546943", "0.5149196", "0.51394975", "0.51382107", "0.5125169", "0.5118429", "0.5111434", "0.50898933", "0.50894606", "0.5069018", "0.5064184", "0.5063516", "0.5059152", "0.5046566", "0.5027932", "0.5023788", "0.50196505", "0.5016999", "0.5013966", "0.50061786", "0.5003166", "0.5002568", "0.49869666", "0.49820912", "0.49799368", "0.49792215", "0.49755383", "0.49647132", "0.49587372", "0.49575573", "0.49405444", "0.49284482", "0.49250868", "0.49217832", "0.49165672", "0.49076", "0.48984137", "0.48939908", "0.48927942", "0.4886329", "0.4871705", "0.486601", "0.4863631", "0.486302", "0.48590133", "0.48452407", "0.483469", "0.48333663", "0.48109838", "0.4796151", "0.47943163", "0.47940323", "0.47862118", "0.4783311", "0.47756428", "0.4765471", "0.47607827", "0.47465032", "0.4736447", "0.4736196", "0.4736196", "0.4721806", "0.4720765", "0.47065502", "0.46967235", "0.46965605" ]
0.7855404
3
Parses a region description to a dict
def parse_region(region, wcs): logger = logging.getLogger(__name__) shape = region.split(',')[0] coord = region.split(',')[1] frame = region.split(',')[2] try: params = region.split(',')[3:] except IndexError: logger.error('No coordinates given.') logger.error('Will exit now.') sys.exit(1) if 'sky' in coord.lower() and frame == '': logger.error('No frame specified.') logger.error('Will exit now.') sys.exit(1) if shape == 'point': # Convert sky coordinates to pixels if required if 'sky' in coord.lower(): coo_sky = SkyCoord(params[0], params[1], frame=frame) params[0:2] = coo_sky.to_pixel(wcs) params = [int(round(float(x))) for x in params] rgn = {'shape':'point', 'params':{'cx':params[0], 'cy':params[1]}} elif shape == 'box': # Convert sky coordinates to pixels if required if 'sky' in coord.lower(): blc_sky = SkyCoord(params[0], params[1], frame=frame) trc_sky = SkyCoord(params[2], params[3], frame=frame) params[0:2] = blc_sky.to_pixel(wcs) params[2:] = trc_sky.to_pixel(wcs) params = [int(round(float(x))) for x in params] rgn = {'shape':'box', 'params':{'blcx':params[0], 'blcy':params[1], 'trcx':params[2], 'trcy':params[3]}} elif shape == 'circle': # Convert sky coordinates to pixels if required if 'sky' in coord.lower(): coo_sky = SkyCoord(params[0], params[1], frame=frame) params[0:2] = coo_sky.to_pixel(wcs) lscale = abs(wcs.pixel_scale_matrix[0,0])*u.deg val, uni = split_str(params[2]) # Add units to the radius r = add_radius_units(val, uni) logger.debug('lscale: {0}'.format(lscale)) logger.debug('radius: {0}'.format(r)) params[2] = (r/lscale).cgs.value params = [float(x) for x in params] rgn = {'shape':'circle', 'params':{'cx':params[0], 'cy':params[1], 'r':params[2]}} elif shape == 'ellipse': # Convert sky coordinates to pixels if required if 'sky' in coord.lower(): coo_sky = SkyCoord(params[0], params[1], frame=frame) params[0:2] = coo_sky.to_pixel(wcs) lscale = abs(wcs.pixel_scale_matrix[0,0])*u.deg logger.debug('lscale: {0}'.format(lscale)) # Major axis. val, uni = split_str(params[2]) # Add units to the major axis. r = add_radius_units(val, uni) logger.debug('major axis: {0}'.format(r)) params[2] = (r/lscale).cgs.value # Minor axis. val, uni = split_str(params[3]) # Add units to the minor axis. r = add_radius_units(val, uni) logger.debug('minor axis: {0}'.format(r)) params[3] = (r/lscale).cgs.value params = [float(x) for x in params] rgn = {'shape':'ellipse', 'params':{'cx':params[0], 'cy':params[1], 'bmaj':params[2], 'bmin':params[3], 'theta':params[4]}} elif shape == 'crtf': # CASA region files are always in sky coordinates polys = ci.read_casa_polys(params[0], wcs=wcs) rgn = {'shape':'polygon', 'params':{'Polygons':polys}} elif shape == 'all': rgn = {'shape':'all', 'params':'all'} else: print('region description not supported.') print('Will exit now.') logger.error('Region description not supported.') logger.error('Will exit now.') sys.exit(1) return rgn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _decode_region_details(self, region):\r\n\r\n # This whole thing is a bit of a mess but it preserves backwards compat but also allows for\r\n # ad-hoc addition of new regions without code changes\r\n # TODO: Move EC2 builtin details to a config file\r\n region_details = None\r\n\r\n # If it is JSON, decode it\r\n try:\r\n region_details = json.loads(region)\r\n except:\r\n pass\r\n\r\n # If decoded JSON has a name field, use it for the follow on lookups\r\n if region_details and 'name' in region_details:\r\n region = region_details['name']\r\n\r\n if region in self.ec2_region_details:\r\n bi_region_details = self.ec2_region_details[region]\r\n elif (\"ec2-\" + region) in self.ec2_region_details:\r\n bi_region_details = self.ec2_region_details[\"ec2-\" + region]\r\n else:\r\n bi_region_details = None\r\n\r\n if (not region_details) and (not bi_region_details):\r\n # We couldn't decode any json and the string we were passed is not a builtin region - give up\r\n raise ImageFactoryException(\"Passed unknown EC2 region (%s)\" % region)\r\n\r\n if not region_details:\r\n region_details = { }\r\n\r\n # Allow the builtin details to fill in anything that is missing\r\n if bi_region_details:\r\n for bi_key in bi_region_details:\r\n if not (bi_key in region_details):\r\n region_details[bi_key] = bi_region_details[bi_key]\r\n\r\n return region_details", "def get_dict_from_region_string(region_string):\n\n region_list = region_string.strip().replace(\",\", \"\").replace(\"-\", \":\").split(\":\")\n region_dict = {\n \"chrom\": str(region_list[0]),\n \"start\": int(region_list[1]),\n \"end\": int(region_list[2]),\n }\n\n return region_dict", "def load_region_bounds_dict():\n region_bounds_dict = {'EAs': [(94, 156), (20, 65)], # longitude tuple, latitude tuple\n 'SEAs': [(94, 161), (-10, 20)],\n 'ANZ': [(109, 179), (-50, -10)],\n 'SAs': [(61, 94), (0, 35)],\n 'AfME': [(-21, 61), (-40, 35)],\n 'Eur': [(-26, 31), (35, 75)],\n 'CAs': [(31, 94), (35, 75)],\n 'NAm': [(-169, -51), (15, 75)],\n 'SAm': [(266, 329), (-60, 15)],\n 'Zon': [None, (-75.5, -65.5)],\n 'Mer': [(175.5, 185.5), None],\n 'Glb': [None, None]}\n return region_bounds_dict", "def regions_dict(self):\n regions_dict = dict()\n for i, r in enumerate(self.regions):\n regions_dict[getattr(r, 'ix', i)] = r\n return regions_dict", "def _parse_region(self, region: OMNode) \\\n -> Tuple[Dict[str, str],\n Dict[str, Dict[str, List[OMRegField]]]]:\n regmap = region['registerMap']\n groups = {}\n try:\n group = None\n for group in regmap['groups']:\n name = group['name'].lower()\n desc = group.get('description', '').strip()\n if desc.lower().endswith(' register'):\n desc = desc[:-len(' register')]\n if name not in groups:\n groups[name] = desc\n elif not groups[name]:\n groups[name] = desc\n except Exception:\n if self._debug:\n pprint(group, stream=stderr)\n raise\n registers = {}\n try:\n field = None\n # some register fields have no groups...\n # try to associate them with the previous group, if any\n lastgroup = (None, 0)\n for field in regmap['registerFields']:\n bitbase = field['bitRange']['base']\n bitsize = field['bitRange']['size']\n regfield = field['description']\n name = regfield['name'].lower()\n if name == 'reserved':\n continue\n desc = regfield['description']\n try:\n group = regfield['group'].lower()\n lastgroup = (group, bitbase+bitsize)\n except KeyError:\n group = None\n if lastgroup[0]:\n groupend = ((lastgroup[1] + self._regwidth - 1) &\n ~(self._regwidth - 1))\n remwidth = groupend-lastgroup[1]\n if bitbase < groupend and bitsize <= remwidth:\n group = lastgroup[0]\n self._log.info('No group in %s for field %s, use '\n 'pre-existing group %s',\n region['name'].upper(), name, group)\n if group is None:\n group = name\n lastgroup = (group, bitbase+bitsize)\n self._log.warning('No group in %s for field %s, create '\n 'fake group %s (OM is incomplete)',\n region['name'].upper(), name, group)\n reset = regfield.get('resetValue', None)\n access_ = list(filter(lambda x: x in OMAccess.__members__,\n regfield['access'].get('_types', '')))\n access = OMAccess[access_[0]] if access_ else None\n regfield = OMRegField(HexInt(bitbase), bitsize, desc, reset,\n access)\n if group not in registers:\n registers[group] = {}\n registers[group][name] = regfield\n except Exception:\n if self._debug:\n pprint(field, stream=stderr)\n raise\n foffsets = {}\n # sort the field by offset for each register group\n for grpname in registers:\n group = registers[grpname]\n fnames = sorted(group, key=lambda n: group[n].offset)\n fodict = {name: group[name] for name in fnames}\n foffsets[grpname] = group[fnames[0]].offset\n registers[grpname] = fodict\n godict = {name: registers[name]\n for name in sorted(registers, key=lambda n: foffsets[n])}\n return groups, godict", "def find_regions(directives):\n regions = {}\n for directive in directives:\n if directive.startswith(\"sequence-region\"):\n try:\n _, accession, start, end = directive.split(\" \")\n regions[accession] = (int(start), int(end))\n except ValueError:\n # likely sequence-region without coordinates\n pass\n return regions", "def get_region_dict(self):\n if self.initiated is False:\n raise RuntimeError(\"Initiate first\")\n\n return self._region_dict", "def parse_shortform_block_annotation(description):\n if not description:\n return {}\n # TODO: Test encoded string formatting using regexp\n\n ref_version, chrom, scaffold, ori, gpos, _, coords = description.split('_')\n coords = [slice(int(start) - 1, int(stop))\n for start, stop in [c.split(':') for c in coords.split(';')]]\n\n return {\n 'ref_version': ref_version,\n 'chromosome': chrom,\n 'chromosome_scaffold': int(scaffold),\n 'genome_pos': int(gpos) - 1,\n 'orientation': ori,\n 'blocks': coords,\n }", "def get_region(state: str) -> str:\n state: str = state.title()\n region: str = \"\"\n states: dict = {\n \"NORTE\": [\n \"Tocantins\",\n \"Pará\",\n \"Amapá\",\n \"Roraima\",\n \"Amazonas\",\n \"Acre\",\n \"Rondônia\",\n ],\n \"NORDESTE\": [\n \"Alagoas\",\n \"Bahia\",\n \"Ceará\",\n \"Maranhão\",\n \"Paraíba\",\n \"Pernambuco\",\n \"Piauí\",\n \"Rio Grande Do Norte\",\n \"Sergipe\",\n ],\n \"CENTRO-OESTE\": [\n \"Goiás\",\n \"Mato Grosso\",\n \"Mato Grosso Do Sul\",\n \"Distrito Federal\",\n ],\n \"SUDESTE\": [\n \"São Paulo\",\n \"Rio de Janeiro\",\n \"Espírito Santo\",\n \"Minas Gerais\",\n ],\n \"SUL\": [\"Rio Grande Do Sul\", \"Paraná\", \"Santa Catarina\"],\n }\n\n for (key, values) in states.items():\n if state in values:\n region = key\n\n if not region:\n return \"DESCONHECIDO\"\n\n return region", "def get_region_string(region: discord.VoiceRegion) -> str:\n regions = {\"us-west\": EMOJI[\":flag_us:\"]+\"US West\",\n \"us-east\": EMOJI[\":flag_us:\"]+\"US East\",\n \"us-central\": EMOJI[\":flag_us:\"]+\"US Central\",\n \"us-south\": EMOJI[\":flag_us:\"]+\"US South\",\n \"eu-west\": EMOJI[\":flag_eu:\"]+\"West Europe\",\n \"eu-central\": EMOJI[\":flag_eu:\"]+\"Central Europe\",\n \"singapore\": EMOJI[\":flag_sg:\"]+\"Singapore\",\n \"london\": EMOJI[\":flag_gb:\"]+\"London\",\n \"sydney\": EMOJI[\":flag_au:\"]+\"Sydney\",\n \"amsterdam\": EMOJI[\":flag_nl:\"]+\"Amsterdam\",\n \"frankfurt\": EMOJI[\":flag_de:\"]+\"Frankfurt\",\n \"brazil\": EMOJI[\":flag_br:\"]+\"Brazil\",\n \"japan\": EMOJI[\":flag_jp:\"]+\"Japan\",\n \"hongkong\": EMOJI[\":flag_hk:\"]+\"Hong Kong\",\n \"russia\": EMOJI[\":flag_ru:\"]+\"Russia\",\n \"vip-us-east\": EMOJI[\":flag_us:\"]+\"US East (VIP)\",\n \"vip-us-west\": EMOJI[\":flag_us:\"]+\"US West (VIP)\",\n \"vip-amsterdam\": EMOJI[\":flag_nl:\"]+\"Amsterdam (VIP)\",\n }\n return regions.get(str(region), str(region))", "def parse(spec: str):\n parts = spec.split(\":\", maxsplit=1)\n chromosome = parts[0]\n if len(parts) == 1 or not parts[1]:\n start, end = 0, None\n else:\n try:\n sep = \":\" if \":\" in parts[1] else \"-\"\n start_end = parts[1].split(sep, maxsplit=1)\n start = int(start_end[0]) - 1\n if len(start_end) == 1 or not start_end[1]:\n end = None\n else:\n end = int(start_end[1])\n if end <= start:\n raise InvalidRegion(\"end is before start in specified region\")\n except ValueError:\n raise InvalidRegion(\"Region must be specified as chrom[:start[-end]])\") from None\n return Region(chromosome, start, end)", "def get_new_region_dict(db_name=DB_NAME, lang='en'):\n\n region_dict = {}\n db_dest = 'database/' + db_name\n conn = sqlite3.connect(db_dest)\n cur = conn.cursor()\n\n if lang == 'zh':\n select_col = 'NewNameZh'\n elif lang == 'en':\n select_col = 'NewNameEn'\n else:\n return None\n\n statement = '''\n SELECT DISTINCT NewRegionId, {}\n FROM RegionsOld\n ;'''.format(select_col)\n result = cur.execute(statement)\n result_lst = result.fetchall()\n for (region_id, region_name) in result_lst:\n region_dict[region_id] = region_name\n\n conn.close()\n return region_dict", "def parse_region_data(self, region):\r\n \r\n self.download_data()\r\n region_filename = self.__get_region_filename(region)\r\n list_str = [\"region\", \"p1\", \"p36\", \"p37\", \"p2a\", \"weekday(p2a)\", \"p2b\", \"p6\", \"p7\", \"p8\", \"p9\", \"p10\", \r\n \"p11\", \"p12\", \"p13a\", \"p13b\", \"p13c\", \"p14\", \"p15\", \"p16\", \"p17\", \"p18\",\"p19\", \"p20\", \"p21\", \r\n \"p22\", \"p23\", \"p24\", \"p27\", \"p28\", \"p34\", \"p35\", \"p39\", \"p44\", \"p45a\", \"p47\", \"p48a\", \"p49\", \r\n \"p50a\", \"p50b\", \"p51\", \"p52\", \"p53\", \"p55a\", \"p57\", \"p58\", \"a\", \"b\", \"d\", \"e\", \"f\", \"g\", \"h\", \r\n \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"p5a\"]\r\n \r\n row_count = self.__get_files_row_count(region)\r\n list_arrays = [np.zeros(row_count, dtype=dt) for dt in self.__types]\r\n row_index = 0\r\n for file in self.__files:\r\n file_to_parse = os.path.join(self.__folder, os.path.basename(file))\r\n with zipfile.ZipFile(file_to_parse, \"r\") as zf:\r\n with zf.open(region_filename, 'r') as csv_file:\r\n reader = csv.reader(TextIOWrapper(csv_file, 'windows-1250'), delimiter=';', quotechar='\"')\r\n for row in reader:\r\n list_arrays[0][row_index] = region\r\n for col_index, col in enumerate(row):\r\n try:\r\n list_arrays[col_index + 1][row_index] = col\r\n except ValueError:\r\n if list_arrays[col_index + 1][row_index].dtype == np.int64:\r\n list_arrays[col_index + 1][row_index] = -1\r\n elif list_arrays[col_index + 1][row_index].dtype == 'datetime64[D]':\r\n print('Error2:', col)\r\n elif list_arrays[col_index + 1][row_index].dtype == np.float64:\r\n if type(col) != str:\r\n list_arrays[col_index + 1][row_index] = col.replace(',', '.')\r\n else:\r\n list_arrays[col_index + 1][row_index] = float(\"nan\")\r\n row_index += 1\r\n \r\n return (list_str, list_arrays)", "def find_regions(src: str) -> Dict[int, Tuple[int, int]]:\n # Filter out the first line if in a Jupyter notebook and it starts with a magic (% or %%).\n src = ScaleneAnalysis.strip_magic_line(src)\n srclines = src.split(\"\\n\")\n tree = ast.parse(src)\n regions = {}\n loops = {}\n functions = {}\n classes = {}\n for node in ast.walk(tree):\n if isinstance(node, ast.ClassDef):\n for line in range(node.lineno, node.end_lineno+1):\n classes[line] = (node.lineno, node.end_lineno)\n if isinstance(node, (ast.For, ast.While)):\n for line in range(node.lineno, node.end_lineno+1):\n loops[line] = (node.lineno, node.end_lineno)\n if isinstance(node, ast.FunctionDef):\n for line in range(node.lineno, node.end_lineno+1):\n functions[line] = (node.lineno, node.end_lineno)\n for lineno, line in enumerate(srclines, 1):\n if lineno in loops:\n regions[lineno] = loops[lineno]\n elif lineno in functions:\n regions[lineno] = functions[lineno]\n elif lineno in classes:\n regions[lineno] = classes[lineno]\n else:\n regions[lineno] = (lineno, lineno)\n return regions", "def machRegionsParser (regions):\n\tmslines=regions.split('\\n')\n\tretarray=[]\n\tfor s in mslines:\n\t\tif ( (s.find(\"0x\") > -1) and (s.find(\"---/\") == -1) ):\n\t\t\taddresses=s.split(' ')\n\t\t\taddressparts=addresses[0].split('-')\n\t\t\tstartaddress=int(addressparts[0], 16)\n\t\t\tendaddress=int(addressparts[1],16)\n\t\t\tsize=endaddress-startaddress\n\t\t\tretarray.append([startaddress, endaddress, size])\n\treturn retarray", "def mapLoad(filename,close=False):\n tree = ET.parse(filename)\n regions = {}\n starts = []\n for node in tree.getroot().getchildren():\n assert node.tag == 'region'\n name = str(node.get('name'))\n assert not regions.has_key(name),'Duplicate region name: %s' % (name)\n regions[name] = {'value': int(node.get('value','5')),\n 'owner': int(node.get('owner','0')),\n 'occupants': int(node.get('occupants','5')),\n 'neighbors': set()}\n for subnode in node.getchildren():\n assert subnode.tag == 'neighbor'\n regions[name]['neighbors'].add(str(subnode.get('name')))\n if regions[name]['owner'] > 0:\n starts.append((regions[name]['owner'],name))\n starts = [entry[1] for entry in sorted(starts)]\n if close:\n closeRegions(regions)\n return regions,starts", "def parse_info(s:str) -> dict:\n d = {}\n d[\"SVTYPE\"] = re.search(r'(?<=SVTYPE=)\\w+',s).group(0)\n d[\"SUPPORT\"] = re.search(r'(?<=SUPPORT=)\\d+',s).group(0)\n if d[\"SVTYPE\"] in [\"BND\"]:\n return d\n d[\"END\"] = re.search(r'(?<=END=)\\d+',s).group(0)\n if d[\"SVTYPE\"] in [\"INV\"]:\n return d\n d[\"SVLEN\"] = re.search(r'(?<=SVLEN=)(.*?)(?=;)',s).group(0)\n d[\"READS\"] = re.search(r'(?<=READS=)(.*?)(?=$)',s).group(0).split(\",\")\n if d[\"SVTYPE\"] == \"INS\":\n d[\"SEQS\"] = re.search(r'(?<=SEQS=)(.*?)(?=;)',s).group(0).split(\",\")\n return d", "def _process_region(self, region, writer):", "def _parse_location(self, row):\n return {\n 'name': '',\n 'address': \"{} {}\".format(row[Row.ADDRESS], row[Row.ZIP]),\n 'neighborhood': row[Row.COMMUNITY_AREA]\n }", "def __init__(self, region):\r\n self.region = region", "def to_region(self):\n\n coords = self.convert_coords()\n log.debug(coords)\n viz_keywords = ['color', 'dash', 'dashlist', 'width', 'font', 'symsize',\n 'symbol', 'symsize', 'fontsize', 'fontstyle', 'usetex',\n 'labelpos', 'labeloff', 'linewidth', 'linestyle',\n 'point', 'textangle', 'fontweight']\n\n if isinstance(coords[0], SkyCoord):\n reg = self.shape_to_sky_region[self.region_type](*coords)\n elif isinstance(coords[0], PixCoord):\n reg = self.shape_to_pixel_region[self.region_type](*coords)\n else:\n self._raise_error(\"No central coordinate\")\n\n reg.visual = RegionVisual()\n reg.meta = RegionMeta()\n\n # both 'text' and 'label' should be set to the same value, where we\n # default to the 'text' value since that is the one used by ds9 regions\n label = self.meta.get('text',\n self.meta.get('label', \"\"))\n if label != '':\n reg.meta['label'] = label\n for key in self.meta:\n if key in viz_keywords:\n reg.visual[key] = self.meta[key]\n else:\n reg.meta[key] = self.meta[key]\n reg.meta['include'] = self.include\n return reg", "def parse_regions(text):\n _regions = []\n region_pairs = text.strip().split(\",\")\n for region_pair in region_pairs:\n split_pair = region_pair.split(\"..\")\n start = split_pair[0]\n end = split_pair[1]\n _regions.append([start, end])\n return _regions", "def parse_text(self, text: str) -> SectionDict:", "def read_region(exp_name='cc', level=95):\n # shape file path\n file_name = shapefile_exptype[exp_name]\n file_path_name = os.path.join('data', 'regions', file_name)\n shape_file = resource_filename(__name__, file_path_name)\n\n # Get the Shape files, combine them and add them to a dictionary.\n regions = dict()\n with fiona.open(shape_file, 'r', 'ESRI Shapefile', schema) as shp:\n\n for s in shp:\n\n corr = s['properties']['corr']\n\n if s['properties']['levels'] != level:\n continue\n\n region = shape(s['geometry'])\n\n if corr in regions.keys():\n regions[str(corr)] = regions[corr].union(region)\n else:\n regions[str(corr)] = region\n\n return regions", "def region(self):\n return self._get(\"region\")", "def build_region(self, \n dataset_metadata_dict,\n min_lod_pixels=100, \n max_lod_pixels=-1, \n min_fade_extent=200, \n max_fade_extent=800\n ):\n\n region = simplekml.Region(latlonaltbox=\"<north>\" + str(dataset_metadata_dict['latitude_max']) + \"</north>\" +\n \"<south>\" + str(dataset_metadata_dict['latitude_min']) + \"</south>\" +\n \"<east>\" + str(dataset_metadata_dict['longitude_max']) + \"</east>\" +\n \"<west>\" + str(dataset_metadata_dict['longitude_min']) + \"</west>\",\n lod=\"<minLodPixels>\" + str(min_lod_pixels) + \"</minLodPixels>\" +\n \"<maxLodPixels>\" + str(max_lod_pixels) + \"</maxLodPixels>\" +\n \"<minFadeExtent>\" + str(min_fade_extent) + \"</minFadeExtent>\" +\n \"<maxFadeExtent>\" + str(max_fade_extent) + \"</maxFadeExtent>\")\n return region", "def filter_region_graph(data, region):\r\n MetaDct = data[1]\r\n f_MetaDct = {}\r\n for idx in MetaDct:\r\n if idx != ',':\r\n if MetaDct[idx].region == region:\r\n f_MetaDct[idx] = MetaDct[idx].country\r\n return f_MetaDct", "def read_input(self):\n\n with open(self.input_file, 'r') as file:\n\n data = defaultdict(list)\n for line in file:\n # Skip empty lines, meta-data and comments\n if len(line) < 2 or line[0] == '#':\n continue\n\n # Make Region object of current line\n f = Region(line)\n data[f.rid].append(f)\n\n return data", "def to_dict(self):\n return {'tag': self.tag,\n 'title': self.title,\n 'regionTitle': self.region_title,\n 'shortTitle': self.short_title,\n 'color': self.color,\n 'oppositeColor': self.opposite_color,\n 'latMin': self.lat_min,\n 'latMax': self.lat_max,\n 'lonMin': self.lon_min,\n 'lonMax': self.lon_max,\n 'directions': [d.to_dict() for d in self.directions],\n 'stops': [s.to_dict() for s in self.stops],\n 'paths': [p.to_list() for p in self.paths]}", "def region(self, args):\n m = MessageClass()\n print('123124')\n data = {'list': []}\n data['list'].append({\"Region_Name\": \"us-east-1\"})\n data['list'].append({\"Region_Name\": \"us-east-2\"})\n data['list'].append({\"Region_Name\": \"us-west-1\"})\n data['list'].append({\"Region_Name\": \"us-west-2\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-2\"})\n data['list'].append({\"Region_Name\": \"ap-south-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ca-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-2\"})\n data['list'].append({\"Region_Name\": \"eu-west-3\"})\n data['list'].append({\"Region_Name\": \"sa-east-1\"})\n m.data = data\n return m.to_json()", "def _parse_location(self, response):\n location_block = response.css(\".row.mt-4 > .col-12\")[0]\n location_items = location_block.css(\"p *::text\").extract()\n addr_items = [\n i.strip() for i in location_items if \"Building\" not in i and i.strip()\n ]\n name_items = [\n i.strip() for i in location_items if \"Building\" in i and i.strip()\n ]\n return {\n \"address\": \" \".join(addr_items),\n \"name\": \" \".join(name_items),\n }", "def get_region(self, region):\n\n return self.adapter.get_region(region) \n\n\n\n\n #file_compression = \"\"\n # magic_dict = {\n # b\"\\x1f\\x8b\\x08\": \"gz\",\n # b\"\\x42\\x5a\\x68\": \"bz2\",\n # b\"\\x50\\x4b\\x03\\x04\": \"zip\"\n # }\n # \n\n # max_len = max(len(x) for x in magic_dict)\n # with open(file_path, \"rb\") as f:\n # file_start = f.read(max_len)\n # for magic, filetype in magic_dict.items():\n # if file_start.startswith(magic):\n # file_compression = filetype\n # split_ext = file_path.split(\".\")\n # extension = split_ext[len(split_ext) -1]\n # if(file_compression == \"zip\"):\n # if extension != \"zip\":\n # subprocess.call(\"mv {} {}.zip\".format(file_path, file_path).split())\n # subprocess.call(\"unzip {} -d .\".format(file_path).split())\n # if(file_compression == \"bz2\"):\n # if extension != \"bz2\":\n # subprocess.call(\"mv {} {}.bz2\".format(file_path,file_path).split())\n # subprocess.call(\"bzip2 -df {}\".format(file_path).split())\n # if(file_compression == \"gz\"):\n # if extension != \"gz\":\n # subprocess.call(\"mv {} {}.gz\".format(file_path,file_path).split())\n # subprocess.call(\"gzip -df {}\".format(file_path).split())", "def decode_sect0(sec0):\r\n if str(np.packbits(sec0[:32]),'utf-8') != \"BUFR\":\r\n return {}\r\n \r\n size = bf.bits_to_n(sec0[32:56])\r\n edition = bf.bits_to_n(sec0[-8:])\r\n return {'size':size, 'edition':edition}", "def get_old_region_dict(db_name=DB_NAME, lang='en'):\n\n region_dict = {}\n db_dest = 'database/' + db_name\n conn = sqlite3.connect(db_dest)\n cur = conn.cursor()\n\n if lang == 'zh':\n select_col = 'NameZh'\n elif lang == 'en':\n select_col = 'NameEn'\n else:\n return None\n\n statement = '''\n SELECT Id, {}\n FROM RegionsOld\n ;'''.format(select_col)\n result = cur.execute(statement)\n result_lst = result.fetchall()\n for (region_id, region_name) in result_lst:\n region_dict[region_id] = region_name\n\n conn.close()\n return region_dict", "def parse(self, blob: str) -> Dict[str, Any]:\n parsed_output = {}\n for key, regex in self.reg_expressions.items():\n if key == BaseKeys.REGISTRANT_ADDRESS:\n match = self.find_match(regex, blob)\n # need to break up from address field\n address, city, state, country = match.split(', ')\n parsed_output[BaseKeys.REGISTRANT_ADDRESS] = address\n parsed_output[BaseKeys.REGISTRANT_CITY] = city\n parsed_output[BaseKeys.REGISTRANT_STATE] = state\n parsed_output[BaseKeys.REGISTRANT_COUNTRY] = country\n elif not parsed_output.get(key):\n parsed_output[key] = self.find_match(regex, blob, many=key in self.multiple_match_keys)\n\n # convert dates\n if key in self.date_keys and parsed_output.get(key, None):\n parsed_output[key] = self._parse_date(parsed_output.get(key))\n\n return parsed_output", "def procInfoParser (regions):\n\tmslines=regions.split('\\n')\n\tretarray=[]\n\tfor s in mslines:\n\t\tif (s.find(\"0x\") > -1):\n\t\t\taddresses=s.split()\n\t\t\tstartaddress=int(addresses[0], 16)\n\t\t\tendaddress=int(addresses[1],16)\n\t\t\tsize=endaddress-startaddress\n\t\t\tretarray.append([startaddress, endaddress, size])\n\treturn retarray", "def regions_json(self, filename):\n with open(filename) as f:\n return json.load(f)", "def extract_region(region_url):\n region = region_url[region_url.rfind('/')+1:]\n assert is_valid_region(region), region\n return region", "def test_regions(self):\n for i, item in enumerate(self._letters_proto.item):\n for code in item.region:\n # Region codes should be alpha-2 (where possible) or alpha-3 codes as\n # defined by ISO 3166 standard.\n self.assertLess(1, len(code))\n self.assertGreater(4, len(code))\n self.assertTrue(code.isupper(), f'Letter {i}: Region code `{code}` '\n 'should be upper-case')\n if len(code) == 3:\n country = pycountry.countries.get(alpha_3=code)\n self.assertTrue(country, f'Failed to find country for code `{code}`')\n if hasattr(country, 'alpha_2'):\n self.fail(f'Letter {i}: Please use two-letter code '\n f'`{country.alpha_2}` instead of `{country.alpha_3}` '\n f'for {country.name}')\n else:\n country = pycountry.countries.get(alpha_2=code)\n self.assertTrue(country, f'Failed to find country for code {code}')", "def region(self):\n return regions.lookup(self.state)", "def parse_mapping_page(self, id, body):\n info = {}\n info['original'] = self.__re_search(body, *self.regx['original'])\n info['save'] = self.__re_search(body, *self.regx['save'])\n info['price'] = self.__re_search(body, *self.regx['price'])\n info['rebate'] = self.__re_search(body, *self.regx['rebate'])\n return info", "def load_descriptions(doc):\n mapping = {}\n # Process lines.\n for line in doc.split('\\n'):\n # Split line by white space.\n tokens = line.split()\n if len(line) < 2:\n continue\n # Take the first token as the image id, the rest as the description.\n image_id, image_desc = tokens[0], tokens[1:]\n # Extract filename from image id.\n image_id = image_id.split('.')[0]\n # Convert description tokens back to string.\n image_desc = ' '.join(image_desc)\n # Create the mapping list if needed.\n if image_id not in mapping:\n mapping[image_id] = []\n # Store description.\n mapping[image_id].append(image_desc)\n return mapping", "def parse(self, fn, board):\n with open(fn) as f:\n return [(board.get_region(i['name']), i['base']) for i in json.loads(f.read())]", "def region(self):\n if self._region is None:\n cache_key = self.expand_name(\"region\")\n cached = unitdata.kv().get(cache_key)\n if cached:\n self._region = cached\n else:\n req = self._imdv2_request(self._az_url)\n with urlopen(req) as fd:\n az = fd.read(READ_BLOCK_SIZE).decode(\"utf8\")\n self._region = az.rstrip(string.ascii_lowercase)\n unitdata.kv().set(cache_key, self._region)\n return self._region", "def region(self):\n # type: () -> string_types\n return self._region", "def parse(self, src, line):\n r = line.split('\\t')\n p = {}\n if src == 'sf':\n p['businessID'] = r[0]\n p['name'] = r[1]\n p['address'] = r[2]\n p['city'] = r[3]\n p['state'] = r[4]\n p['zip'] = r[5]\n p['latitude'] = r[6]\n p['longitude'] = r[7]\n p['phone'] = r[8]\n elif src == 'nyc':\n p['businessID'] = r[0]\n p['name'] = r[1]\n # nyc separates the building number from the street name\n p['address'] = ' '.join([r[3].strip(), r[4].strip()])\n p['city'] = 'NYC'\n p['state'] = 'NY'\n p['zip'] = r[5]\n p['latitude'] = None\n p['longitude'] = None\n p['phone'] = r[6]\n return p", "def raw_data() -> Dict:\n return {\"neighbourhood\":\"Buttes-Montmartre\",\"room_type\":\"Entire home/apt\",\"minimum_nights\":1.555,\"mois\":2,\"voyageurs\":2.5,\"chambres\":1,\"lits\":1,\"salle_de_bains\":1}", "def get_trainscript_region_info(transcript_info, region_parent, region):\n if region_parent in transcript_info.keys():\n parent_info = transcript_info[region_parent]\n regions = []\n for r in parent_info:\n if r['object_type'] == region:\n regions.append(r)\n else:\n raise ValueError(region_parent + ' or ' + region + 'element could not be identified')\n return regions", "def parse_description(description):\n nodes = dict()\n outputs = dict()\n edges = dict()\n for line in description:\n components = line.strip().split()\n if not len(components): continue\n sentinel = components[0]\n if sentinel == 'value':\n _, value, _, _, _, node_name = components\n node = nodes.get(node_name, tuple())\n nodes[node_name] = node + (value, )\n else:\n (_, node_name, _, _, _, low_dictionary_name, low_node_name,\n _, _, _, high_dictionary_name, high_node_name) = components\n low_dictionary = \\\n nodes if low_dictionary_name == 'bot' else outputs\n high_dictionary = \\\n nodes if high_dictionary_name == 'bot' else outputs\n edges[node_name] = ((low_dictionary, low_node_name),\n (high_dictionary, high_node_name))\n return nodes, outputs, edges", "def _parse_location(self, item):\n if len(item.css('.eo-event-venue-map')) == 0:\n return {\n 'url': None,\n 'name': None,\n 'coordinates': {\n 'latitude': None,\n 'longitude': None,\n },\n }\n\n event_script = item.css('script:not([src])')[-1].extract()\n event_search = re.search('var eventorganiser = (.*);', event_script)\n event_details = json.loads(event_search.group(1))\n location = event_details['map'][0]['locations'][0]\n split_tooltip = location['tooltipContent'].split('<br />')\n if '<strong>' in split_tooltip[0]:\n location_name = split_tooltip[0][8:-9]\n else:\n location_name = split_tooltip[0]\n\n return {\n 'url': None,\n 'name': location_name,\n 'address': split_tooltip[1],\n 'coordinates': {\n 'latitude': location['lat'],\n 'longitude': location['lng'],\n },\n }", "def test_parse_AS_STRUCTURE_dict(self):\n \n true_tool = pybedtools.BedTool(\"chr10\\t127496045\\t127555714\\tENSMUSG00000040054\\t0\\t+\\n\", from_string=True)\n \n result, result_bed = parse_AS_STRUCTURE_dict(\"test\", clipper.test_dir())\n print str(result_bed)\n self.assertEqual(len(true_tool.intersect(result_bed)), 1)\n test_result = result[\"ENSMUSG00000040054\"]\n \n true_exons = {0:'127496045-127496082', \n 1:'127528690-127528832',\n 2:'127533494-127533579', \n 3:'127545949-127546087', \n 4:'127547810-127548404', \n 5:'127549637-127549823', \n 6:'127550518-127550737', \n 7:'127551389-127551839', \n 8:'127552080-127552141', \n 9:'127553116-127553225', \n 10:'127553361-127553463', \n 11:'127553602-127553813',\n 12:'127555610-127555714'}\n \n \n self.assertDictEqual(test_result[\"exons\"], true_exons)\n \n self.assertDictEqual(test_result[\"introns\"], {0 :'127496083-127528689', \n 1 :'127528833-127533493', \n 2 :'127533580-127545948', \n 3 :'127546088-127547809', \n 4 : '127548405-127549636',\n 5 :'127549824-127550517', \n 6 :'127550738-127551388', \n 7 :'127551840-127552079', \n 8 : '127552142-127553115', \n 9 : '127553226-127553360', \n 10 : '127553464-127553601', \n 11 :'127553814-127555609'}, \n \"introns not equal\")\n\n self.assertDictEqual(test_result[\"types\"], {0 : \"CE:\", \n 1 : \"CE:\", \n 2 : \"CE:\", \n 3 : \"CE:\",\n 4 : \"CE:\", \n 5 : \"CE:\", \n 6 : \"CE:\", \n 7 : \"CE:\", \n 8 : \"CE:\", \n 9 : \"CE:\", \n 10 : \"CE:\", \n 11 : \"CE:\", \n 12 : \"CE:\" }, \n \"types not equal\")\n\n self.assertEqual(test_result[\"tx_stop\"], 127555714)\n self.assertEqual(test_result[\"tx_start\"], 127496045)\n self.assertEqual(test_result[\"premRNA_length\"], 59670)\n self.assertEqual(test_result[\"mRNA_length\"], 2451)", "def extractInfo(article):\n headline = article['headline']['main']\n url = article['web_url']\n keywords = article['keywords']\n locations = []\n for k in keywords:\n if k['name'] == \"glocations\":\n locations += [k['value']]\n return {'headline':headline, 'url':url, 'locations':locations}", "def getData(self, polygon: Polygon, epsg: int) -> dict:\n\n regions = self.getRegions(polygon, epsg)\n region_dict = {}\n for region in regions:\n year = int(self.metadata[self.metadata.filename == region].year.values[0])\n if year == 0:\n year = 'unknown'\n region_df = self.get_region_data(polygon, epsg, region)\n empty = region_df.empty\n if not empty:\n region_dict[year] = region_df\n\n return region_dict", "def get_region_dropdown(mode):\n print(mode)\n if mode == SelectionMode.Continents.value:\n return {'display': 'none'}, {'display': 'block'}, {'display': 'none'}\n elif mode == SelectionMode.Countries.value:\n return {'display': 'none'}, {'display': 'none'}, {'display': 'block'}\n \n return {'height': '35px'}, {'display': 'none'}, {'display': 'none'}", "def parse_snippet(self, ultisnip_file: Path) -> dict:\n snippets_dictionary = {}\n with open(ultisnip_file, \"r\") as f:\n for line in f:\n if line.startswith(\"snippet\"):\n snippet = {}\n prefix = line.split()[1].strip()\n snippet[\"prefix\"] = prefix\n if '\"' in line:\n snippet_name = line.split('\"')[1].strip()\n snippet[\"description\"] = snippet_name\n body = []\n line = next(f)\n while not line.startswith(\"endsnippet\"):\n body.append(self._replace_variables(line.strip(\"\\n\")))\n line = next(f)\n snippet[\"body\"] = body\n snippets_dictionary[prefix] = snippet\n return snippets_dictionary", "def get_description():\n d = dict()\n d['data'] = True\n d['report'] = True\n d['description'] = \"\"\" \"\"\"\n d['arguments'] = [\n dict(type='station', name='station', default='IA2203',\n label='Select Station'),\n ]\n return d", "def get_valid_regions(self):\n pass", "def test_valid_region_type():\n reg_str = 'hyperbola[[18h12m24s, -23d11m00s], 2.3arcsec]'\n\n with pytest.raises(CRTFRegionParserError) as excinfo:\n Regions.parse(reg_str, format='crtf')\n\n assert 'Not a valid CRTF Region type: \"hyperbola\"' in str(excinfo.value)", "def get_cloud_info(location):\n params = dict()\n # Read in the file\n with open(location, 'r') as myfile: data=myfile.read()\n obj = json.loads(data)\n for o in obj:\n params[o] = obj[o]['value']\n return params", "def process(raw):\n entry = { }\n cooked = [ ]\n\n for line in raw:\n line = line.strip()\n if len(line) == 0 or line[0]==\"#\" :\n continue\n parts = line.split(';')\n if len(parts) == 3:\n entry[\"description\"] = parts[0].strip() #adding key and values to the dict\n entry[\"long\"] = parts[1].strip()\n entry[\"lat\"] = parts[2].strip()\n cooked.append(entry) #add this dict entry into the array\n entry = { }\n continue\n else:\n raise ValueError(\"Trouble wiht line: '{}'\\n\".format(line))\n \n return cooked #returning an array of dicts", "def deserilize_edpidemic_province(record: tuple) -> dict:\n return {\n \"id\": int(record[0]),\n \"curday\": record[1].strftime(DATE_FORMAT),\n \"province_name\": str(record[2]),\n \"province_code\": str(record[3]),\n \"confirmed_count\": int(record[4]),\n \"cured_count\": int(record[5]),\n \"dead_count\": int(record[6]),\n \"suspected_count\": int(record[7]),\n }", "def description(self):\n return \"create a <b>region</b> with edge at cursor\"", "def _read_info(self):\n my_filelines = self.file_lines\n info = dict()\n\n for i, line in enumerate(my_filelines):\n if line.startswith(\"VEHICLE\"):\n vehicle_pro_start = i + 2\n elif line.startswith(\"CUSTOMER\"):\n customer_pro_start = i + 3\n\n elif line.startswith(\"NUMBER\"):\n splited = line.split(' ')\n info[splited[0]] = 0\n info[splited[-1]] = 0\n return info, (vehicle_pro_start, customer_pro_start)", "def read_regions(namefile):\n db = shelve.open(namefile)\n key_firms = db['nif']\n regions = db['regions']\n methodvalues = db['methodvalues']\n db.close()\n return key_firms, regions, methodvalues", "def user_defined_descriptions(path):\n try:\n lines = [line.rstrip() for line in open(path).readlines()]\n return dict([x.split(maxsplit=1) for x in lines])\n except FileNotFoundError:\n return dict()", "def test_valid_region_syntax():\n reg_str1 = 'circle[[18h12m24s, -23d11m00s], [2.3arcsec,4.5arcsec]'\n with pytest.raises(CRTFRegionParserError) as excinfo:\n Regions.parse(reg_str1, format='crtf')\n\n estr = (\"Not in proper format: ('2.3arcsec', '4.5arcsec') should be \"\n \"a single length\")\n assert estr in str(excinfo.value)\n\n reg_str2 = ('symbol[[32.1423deg, 12.1412deg], 12deg], linewidth=2, '\n 'coord=J2000, symsize=2')\n with pytest.raises(CRTFRegionParserError) as excinfo:\n Regions.parse(reg_str2, format='crtf')\n estr = 'Not in proper format: \"12deg\" should be a symbol'\n assert estr in str(excinfo.value)\n\n reg_str3 = 'circle[[18h12m24s, -23d11m00s]'\n with pytest.raises(CRTFRegionParserError) as excinfo:\n Regions.parse(reg_str3, format='crtf')\n estr = ('Does not contain expected number of parameters for the region '\n '\"circle\"')\n assert estr in str(excinfo.value)\n\n reg_str4 = 'poly[[1, 2], [4, 5]]'\n with pytest.raises(CRTFRegionParserError) as excinfo:\n Regions.parse(reg_str4, format='crtf')\n assert 'polygon should have >= 3 coordinates' in str(excinfo.value)\n\n reg_str6 = 'rotbox[[12h01m34.1s, 12d23m33s], [3arcmin,], 12deg]'\n with pytest.raises(CRTFRegionParserError) as excinfo:\n Regions.parse(reg_str6, format='crtf')\n assert \"('3arcmin', '') should be a pair of lengths\" in str(excinfo.value)", "def parse(self) -> Dictionary:\n self.parsed_dictionary = dictionary = Dictionary()\n state = State.pre_signature\n for lineno, line in self.line_iter:\n lineno += 1\n line = decomment_and_normalize(line)\n if line == \"\": continue\n parsed = False\n expected_lines = State.expected_lines(state)\n for t in expected_lines:\n parsed, state = t.parse_line(state, dictionary, line, lineno)\n if parsed: break\n if not parsed:\n raise DictionaryParseError(lineno, expected_lines, self.source)\n if State.is_not_final(state):\n raise DictionaryParseError(lineno + 1, expected_lines, self.source)\n try:\n del dictionary._last_article\n del dictionary._last_definition\n del dictionary._last_example\n del dictionary._last_idiom\n except AttributeError:\n pass\n return dictionary", "def test_avalanche_warning_by_region_detail(self):\n pass", "def region_of_province(province_in: str) -> str:\n region = None\n for r in ITALY_MAP:\n for p in ITALY_MAP[r]:\n if province_in == p:\n region = r\n return region", "def get_line_desc(self, line):\n return dict(zip(self.header, line))", "def parse_locations(locations: str) -> dict:\n root = etree.XML(locations)\n\n return {\n element.tag.lower(): [\n ResolvedLocation(\n *clean(sub_element.itertext())\n )\n for sub_element in element\n ]\n for element in root\n }", "def convert_to_dict(df):\n df = df[['ISO3166A2',\n 'ISOen_ro_name',\n 'minlongitude',\n 'maxlongitude',\n 'minlatitude',\n 'maxlatitude',\n 'land_total',\n ]]\n df.ISOen_ro_name = df.ISOen_ro_name.str.strip()\n df.set_index('ISOen_ro_name', inplace=True)\n df.columns = ['country_tag',\n 'min_lon',\n 'max_lon',\n 'min_lat',\n 'max_lat',\n 'area',\n ]\n df.loc[:, \"country_tag\"] = df.loc[:, \"country_tag\"].str.lower()\n return {i[0]: i[1].to_dict() for i in df.iterrows()}", "def load():\r\n\r\n data = dict()\r\n global IMAGES_FILE_PATH\r\n chdir(IMAGES_FILE_PATH)\r\n try:\r\n with open('Descriptions_File.txt', 'r') as f:\r\n reader = csv.DictReader(f, delimiter=',')\r\n for row in reader:\r\n url = row['url']\r\n data[url] = row['description']\r\n f.close()\r\n except Exception: # If no Descriptions found\r\n return data\r\n return data", "def get_regional_breakdown():\n doc = reg_bdown_coll.find_one({}, {\"_id\": False})\n if doc:\n breakdown = {\n key: sorted(doc[key], key=lambda x: x['count'], reverse=True)\n for key in doc\n }\n for key in doc:\n areas_breakdown = doc[key]\n for ab in areas_breakdown:\n ab['count'] = format_number(ab['count'])\n else:\n breakdown = {\"err\": \"No data\"}\n return breakdown", "def ffgs_regions():\n return [\n ('Hispaniola', 'hispaniola'),\n ('Central America', 'centralamerica')\n ]", "def handle_region_change(data: bytes) -> Tuple[bytes, str]:\n region_name_length = struct.unpack('H', data[:2])[0]\n region_name = data[2:2+region_name_length]\n return (data[2+region_name_length:],\n f'Changing to region: {region_name.decode().upper()}')", "def _read_header_line_2(lines: list) -> dict:\n fields = (\n \"detection_status\",\n \"warning\",\n \"cloud_base_data\",\n \"warning_flags\",\n )\n values = [[line[0], line[1], line[3:20], line[21:].strip()] for line in lines]\n return values_to_dict(fields, values)", "def read_prism_hdr(hdr_path): \n with open(hdr_path, 'r') as input_f:\n header_list = input_f.readlines()\n \n return dict(item.strip().split() for item in header_list)", "async def parse(self, raw: str) -> dict:", "def _parse_spec(self):\n\n key, value = self._lines.current.split(':', 1)\n key, value = key.strip(), value.strip()\n value = int(value) if key in self._INT_PROPERTIES else value\n\n try:\n next(self._lines)\n except StopIteration:\n pass\n\n return {key: value}", "def _parse_spec(self):\n\n key, value = self._lines.current.split(':', 1)\n key, value = key.strip(), value.strip()\n value = int(value) if key in self._INT_PROPERTIES else value\n\n try:\n next(self._lines)\n except StopIteration:\n pass\n\n return {key: value}", "def get_region(rid):\n region = Region.query.get_or_404(rid)\n return jsonify(region.to_long_json())", "def _parse(self):\n\n self.specification = {}\n\n while True:\n try:\n line = self._lines.current\n if ':' in line:\n self.specification.update(self._parse_spec())\n elif line.startswith('NODE_COORD_SECTION'):\n next(self._lines)\n self.coords = self._parse_coords()\n elif line.startswith('EDGE_WEIGHT_SECTION'):\n next(self._lines)\n self.weights = self._parse_weights()\n elif line.startswith('DISPLAY_DATA_SECTION'):\n next(self._lines)\n self.display = self._parse_coords()\n else:\n break\n except StopIteration:\n break\n\n del self._lines", "def region(self) -> typing.Optional[str]:\n return self._values.get(\"region\")", "def populateDict(string, region, data):\n found = False\n dictionary = {}\n string_norm = norm.normalize_alphabet(string)\n fName = data[\"properties\"][\"cornuData\"][\"toponym_arabic\"]\n fName_norm = norm.normalize_alphabet(fName)\n sName = re.split('،|,',data[\"properties\"][\"cornuData\"][\"toponym_arabic_other\"])\n cornu_reg = data[\"properties\"][\"cornuData\"][\"region_code\"]\n key = ','.join([string] + region.strip().split(\",\"))\n key_norm = ','.join([string_norm] + region.strip().split(\",\"))\n\n if not any(x in dictionary for x in [key, key_norm]): \n if (fuzz.ratio(string_norm , fName) >= 90 or fuzz.ratio(string , fName) >= 90\n or any(x in [fName, fName_norm] for x in [string, string_norm])) \\\n and cornu_reg in region.strip().split(\",\"):\n #print(\"key fName: \", string, \"-\", key)\n dictionary[key] = {}\n dictionary[key]['lat']= data[\"properties\"][\"cornuData\"][\"coord_lat\"]\n dictionary[key]['lon'] = data[\"properties\"][\"cornuData\"][\"coord_lon\"]\n dictionary[key]['region'] = data[\"properties\"][\"cornuData\"][\"region_code\"]\n dictionary[key]['cornuUri'] = data[\"properties\"][\"cornuData\"][\"cornu_URI\"]\n found = True\n\n else:\n for n in sName:\n if (fuzz.ratio(string_norm , n.strip()) >= 90 or fuzz.ratio(string , n.strip()) >= 90 or any(x in [n.strip(), norm.normalize_alphabet(n.strip())] for x in [string, string_norm])) and cornu_reg in region.strip().split(\",\"):\n #print(\"key sName: \", string, \"-\", key)\n dictionary[key] = {}\n dictionary[key]['lat']= data[\"properties\"][\"cornuData\"][\"coord_lat\"]\n dictionary[key]['lon'] = data[\"properties\"][\"cornuData\"][\"coord_lon\"]\n dictionary[key]['region'] = data[\"properties\"][\"cornuData\"][\"region_code\"]\n dictionary[key]['cornuUri'] = data[\"properties\"][\"cornuData\"][\"cornu_URI\"]\n found = True\n break\n\n '''if key not in dictionary and found == False: \n #print(\"not in dic2: \", key)\n dictionary[key] = {}\n dictionary[key]['lat']= \"null\"\n dictionary[key]['lon'] = \"null\"\n dictionary[key]['region'] = region\n dictionary[key]['cornuUri'] = \"null\"'''\n return dictionary", "def ParseLonghurstProvinceFile():\n from xml.dom.minidom import parse, parseString\n provinces = {}\n tree = parse('longhurst.xml')\n for node in tree.getElementsByTagName('MarineRegions:longhurst'):\n # 1. Get province code, name and bounding box from file\n provCode = node.getElementsByTagName('MarineRegions:provcode')[\n 0].firstChild.data\n provName = node.getElementsByTagName('MarineRegions:provdescr')[\n 0].firstChild.data\n fid = node.getAttribute(\"fid\")\n b = node.getElementsByTagName('gml:coordinates')[0].firstChild.data\n # 2. Parse bounding box coordinates\n b = b.split(' ')\n x1, y1 = b[0].split(',')\n x2, y2 = b[1].split(',')\n x1 = float(x1)\n y1 = float(y1)\n x2 = float(x2)\n y2 = float(y2)\n # Add province to dictionary\n provinces[fid] = {'provName': provName, 'provCode': provCode,\n 'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2}\n return provinces, tree", "def to_dict(self, section):\n\t\t\n\t\tdct = {}\n\t\t\n\t\tfor name, value in self.items(section):\n\t\t\tdct[name] = self.parse_value(value)\n\t\t\n\t\treturn dct", "def region(self, region_name):\n return Region(region_name, self)", "def extract_location(cleaned_news):\r\n _ret = {}\r\n lines_in_news = cleaned_news.split('।')[0:2] # using only the first 3 lines of the news\r\n for line in lines_in_news:\r\n tokens = line.split()\r\n entities_in_line = NER_MODEL.extract_entities(tokens)\r\n for entity in entities_in_line:\r\n range = entity[0]\r\n tag = entity[1]\r\n entity_text = ' '.join(tokens[word] for word in range)\r\n _ret.setdefault(tag, []).append(entity_text)\r\n\r\n return _ret", "def dict() -> Dict[str, Pin]:", "def olive_parser(text: str) -> dict:\n soup = BeautifulSoup(text, \"lxml\")\n root = soup.find(\"xmd-entity\")\n page_no = root['page_no']\n identifier = root['id']\n language = root['language']\n title = soup.meta['name']\n entity_type = root['entity_type']\n issue_date = soup.meta['issue_date']\n\n out = {\n \"meta\": {\n \"language\": None,\n \"type\": {}\n },\n \"r\": [],\n \"stats\": {},\n \"legacy\": {\"continuation_from\": None, \"continuation_to\": None},\n }\n out[\"meta\"][\"title\"] = title\n out[\"meta\"][\"page_no\"] = [int(page_no)]\n out[\"meta\"][\"language\"] = normalize_language(language)\n out[\"meta\"][\"type\"][\"raw\"] = entity_type\n out[\"meta\"][\"issue_date\"] = issue_date\n\n new_region = {\n \"c\": [],\n \"p\": []\n }\n\n new_paragraph = {\n \"l\": []\n }\n\n new_line = {\n \"c\": [],\n \"t\": []\n }\n\n new_token = {\n \"c\": [],\n \"tx\": \"\"\n }\n\n for primitive in soup.find_all(\"primitive\"):\n\n # store coordinate of text areas (boxes) by page\n # 1) page number, 2) coordinate list\n region = copy.deepcopy(new_region)\n region[\"c\"] = [int(i) for i in primitive.get('box').split(\" \")]\n\n para = None\n line = None\n line_counter = 0\n\n for tag in primitive.find_all(recursive=False):\n\n if tag.name == \"l\":\n\n if para is None and line is None:\n para = copy.deepcopy(new_paragraph)\n line = copy.deepcopy(new_line)\n\n if line_counter > 0 and line is not None:\n line = normalize_line(line, out[\"meta\"][\"language\"])\n para[\"l\"].append(line)\n\n if tag.get(\"p\") in [\"S\", \"SA\"] and line_counter > 0:\n region[\"p\"].append(para)\n para = copy.deepcopy(new_paragraph)\n\n line = copy.deepcopy(new_line)\n line[\"c\"] = [\n int(i)\n for i in tag.get('box').split(\" \")\n ]\n line_counter += 1\n\n if tag.name in [\"w\", \"q\"]:\n\n # store coordinates of each token\n # 1) token, 2) page number, 3) coordinate list\n t = copy.deepcopy(new_token)\n t[\"c\"] = [int(i) for i in tag.get('box').split(\" \")]\n t[\"tx\"] = tag.string\n t[\"s\"] = int(tag.get('style_ref'))\n\n if tag.name == \"q\" and tag.get('qid') is not None:\n qid = tag.get('qid')\n normalized_form = soup.find('qw', qid=qid).text\n t[\"nf\"] = normalized_form\n t[\"qid\"] = qid\n\n # append the token to the line\n line[\"t\"].append(t)\n\n # append orphan lines\n if line is not None:\n line = normalize_line(line, out[\"meta\"][\"language\"])\n para[\"l\"].append(line)\n\n region[\"p\"].append(para)\n\n if para is not None:\n out[\"r\"].append(region)\n\n out[\"legacy\"][\"id\"] = identifier\n out[\"legacy\"][\"source\"] = soup.link['source']\n \"\"\"\n # I suspect this could be deleted\n out[\"legacy\"][\"word_count\"] = int(soup.meta['wordcnt'])\n out[\"legacy\"][\"chars_count\"] = int(soup.meta['total_chars_count'])\n suspicious_chars_count = int(soup.meta['suspicious_chars_count'])\n out[\"legacy\"][\"suspicious_chars_count\"] = int(suspicious_chars_count)\n \"\"\"\n out[\"legacy\"][\"first_id\"] = soup.link['first_id']\n out[\"legacy\"][\"last_id\"] = soup.link['last_id']\n out[\"legacy\"][\"next_id\"] = soup.link['next_id']\n out[\"legacy\"][\"prev_id\"] = soup.link['prev_id']\n\n if root.has_attr('continuation_from'):\n out[\"legacy\"][\"continuation_from\"] = root['continuation_from']\n\n if root.has_attr('continuation_to'):\n out[\"legacy\"][\"continuation_to\"] = root['continuation_to']\n\n return out", "def get_region(region):\n region = region.lower()\n if region in ['cena', 'ena', 'ceus', 'eus']:\n return 'cena'\n elif region in ['wna', 'wus']:\n return 'wna'\n else:\n raise NotImplementedError('No recognized region for: %s', region)", "def validate_region(region):\n # only allow supported domains\n if region['domain'] not in DOMAINS:\n raise ConfigError('domain')\n\n # search term state is inserted as province if province does not already\n # exist\n if 'state' in region:\n if (region['state'] is not None) and (region['province'] is None):\n region['province'] = region['state']\n\n # north american jobs should have a province/state provided\n if region['domain'] in ['com', 'ca'] and region['province'] is None:\n raise ConfigError('province')", "def region(self, region: str) -> Region:\n return Region(self, region)", "def form_dict(path):\n data={}\n try:\n f=codecs.open(path, \"r\", \"utf-8\")\n text=f.read()\n f.close()\n except Exception:text=None\n if text!=None:\n #print text\n lines=text.split(\"\\n\")\n for sline in lines:\n if sline!=\"\" or sline==None:line_data=sline.partition(\":\")\n if len(line_data)==3:\n try:\n kin=line_data[0].strip().decode(\"utf-8\")\n data[kin.lower()]=line_data[2].strip()\n except:pass\n return data", "def section(data):\n if len(data['index']) == 2 and data['index'][1][0].isdigit():\n element = {}\n element['is_section'] = True\n element['section_id'] = '-'.join(data['index'])\n if u\"§§ \" == data['title'][:3]:\n element['is_section_span'] = True\n else:\n element['is_section_span'] = False\n match = SECTION_TITLE_REGEX.match(data['title'])\n element['label'] = match.group(1)\n element['sub_label'] = match.group(2)\n return element", "def get_region(self, region_id):\n raise exception.NotImplemented() # pragma: no cover", "def region_str(self):\n return \"%s:%s-%s\" % (self.seqid, self.start, self.end)", "def get_regions(locale):\n\n def json_file(name):\n return os.path.join(json_dir, 'regions', '%s.json' % name)\n\n filepath = json_file(locale)\n\n if not os.path.exists(filepath):\n filepath = json_file('en-US')\n if not os.path.exists(filepath):\n raise Exception('Unable to load region data')\n\n with codecs.open(filepath, encoding='utf8') as fd:\n return json.load(fd)", "def get_new_region_data(db_name=DB_NAME, lang='en', data='density'):\n\n return_dict = {}\n db_dest = 'database/' + db_name\n output_message = ''\n\n conn = sqlite3.connect(db_dest)\n cur = conn.cursor()\n\n if lang == 'zh':\n output_message += '区域:'\n if data == 'density':\n data_column = 'Density10KSqKm'\n output_message += '人口密度(万人/平方公里)'\n elif data == 'gdp':\n data_column = 'GDPPerCapita10KCNY'\n output_message += '人均GDP(万元)'\n else:\n return None\n elif lang == 'en':\n output_message += 'Region: '\n if data == 'density':\n data_column = 'DensityKSqMi'\n output_message += 'Density (K/Sq Mi)'\n elif data == 'gdp':\n data_column = 'GDPPerCapitaKUSD'\n output_message += 'GDP Per Capita (K USD)'\n else:\n return None\n else:\n return None\n\n statement = '''\n SELECT Id, {} \n FROM RegionsNew\n ORDER BY {} DESC\n ;'''.format(data_column, data_column)\n result = cur.execute(statement)\n result_lst = result.fetchall()\n for (region_id, data_column) in result_lst:\n return_dict[region_id] = data_column\n\n conn.close()\n\n # print('\\n{' + output_message+'}')\n return return_dict" ]
[ "0.70874363", "0.70591384", "0.60175997", "0.5904459", "0.585444", "0.5795512", "0.5608974", "0.5576179", "0.5553111", "0.54601663", "0.5458088", "0.54207695", "0.54005855", "0.5378389", "0.5374614", "0.5348017", "0.53426313", "0.5309667", "0.5308136", "0.53045267", "0.5299663", "0.5294206", "0.52616996", "0.52306086", "0.52287316", "0.52234644", "0.5222808", "0.5208948", "0.5196767", "0.519235", "0.5190802", "0.5184868", "0.5169078", "0.5158093", "0.515416", "0.5149883", "0.5120476", "0.5118765", "0.5101182", "0.5100985", "0.51007867", "0.50922763", "0.5087266", "0.50809896", "0.5080977", "0.50660396", "0.504651", "0.50442827", "0.5042743", "0.50414604", "0.50371367", "0.50231147", "0.50112975", "0.5000849", "0.49983057", "0.49960113", "0.49906322", "0.49815893", "0.49760658", "0.49746644", "0.4974383", "0.49728304", "0.4968473", "0.49628794", "0.49627134", "0.49505186", "0.49349895", "0.49290022", "0.49231517", "0.49230263", "0.4920777", "0.49200335", "0.49193403", "0.4916679", "0.49108666", "0.49033692", "0.48984686", "0.48849726", "0.48840195", "0.48815", "0.48815", "0.4875084", "0.48735487", "0.48642537", "0.48583898", "0.48567957", "0.4854151", "0.48529464", "0.48520538", "0.48511252", "0.48470068", "0.48455757", "0.48452494", "0.48421577", "0.48420534", "0.4838672", "0.48378667", "0.4836078", "0.48356256", "0.4833444" ]
0.5485982
9
Constructs a cube axis header fits cube header header pyfits header axis axis to reconstruct axis int cube axis numpy array
def get_axis(header, axis): logger = logging.getLogger(__name__) logger.debug("Will extract axis: {}.".format(axis)) wcs = WCS(header) wcs_arr_shape = wcs.array_shape logger.debug("WCS array shape: {}".format(wcs_arr_shape)) n_axis = wcs.array_shape[-axis] logger.debug("Axis should have {} elements.".format(n_axis)) if len(wcs_arr_shape) > 3: axis_vals = wcs.pixel_to_world_values(np.c_[np.zeros(n_axis), np.zeros(n_axis), np.arange(0,n_axis), np.zeros(n_axis)]) else: axis_vals = wcs.pixel_to_world_values(np.c_[np.zeros(n_axis), np.zeros(n_axis), np.arange(0,n_axis)]) axis_vals = np.asarray(axis_vals) axis_vals = axis_vals[:,axis-1] return axis_vals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_phi_to_fits_header(fits_header, phi_array):\n if len(phi_array) < 2:\n raise ShapeError('RM cube should have two or more frames to be a cube')\n fhdr = fits_header.copy()\n fhdr.set('NAXIS3', len(phi_array))\n fhdr.set('CRPIX3', 1.0)\n fhdr.set('CRVAL3', phi_array[0])\n fhdr.set('CDELT3', phi_array[1]-phi_array[0])\n fhdr.set('CTYPE3', 'FARDEPTH')\n fhdr.set('CUNIT3', 'RAD/M^2')\n return fhdr", "def to_header(axes):\n axes = tuple(axes)\n mims_all = []\n matrix = cifti2.Cifti2Matrix()\n for dim, ax in enumerate(axes):\n if ax in axes[:dim]:\n dim_prev = axes.index(ax)\n mims_all[dim_prev].applies_to_matrix_dimension.append(dim)\n mims_all.append(mims_all[dim_prev])\n else:\n mim = ax.to_mapping(dim)\n mims_all.append(mim)\n matrix.append(mim)\n return cifti2.Cifti2Header(matrix)", "def _make_axes(self, hdr, quiet=False, novec=False, vonly=False, simple=False):\n\n # PULL THE IMAGE/CUBE SIZES FROM THE HEADER\n naxis = int(hdr['NAXIS'])\n naxis1 = int(hdr['NAXIS1'])\n naxis2 = int(hdr['NAXIS2'])\n if naxis > 2:\n naxis3 = hdr['NAXIS3']\n\n ## EXTRACT FITS ASTROMETRY STRUCTURE\n ww = astropy.wcs.WCS(hdr)\n\n #IF DATASET IS A CUBE THEN WE MAKE THE THIRD AXIS IN THE SIMPLEST WAY POSSIBLE (NO COMPLICATED ASTROMETRY WORRIES FOR FREQUENCY INFORMATION)\n if naxis > 3:\n #GRAB THE RELEVANT INFORMATION FROM THE ASTROMETRY HEADER\n cd = ww.wcs.cd\n crpix = ww.wcs.crpix\n cdelt = ww.wcs.crelt\n crval = ww.wcs.crval\n\n if naxis > 2:\n # MAKE THE VELOCITY AXIS (WILL BE M/S)\n v = np.arange(naxis3) * 1.0\n vdif = v - (hdr['CRPIX3']-1)\n vaxis = (vdif * hdr['CDELT3'] + hdr['CRVAL3'])\n\n # CUT OUT HERE IF WE ONLY WANT VELOCITY INFO\n if vonly:\n return vaxis\n\n #IF 'SIMPLE' IS CALLED THEN DO THE REALLY TRIVIAL THING:\n if simple:\n print('Using simple aproach to make axes.')\n print('BE SURE THIS IS WHAT YOU WANT! It probably is not.')\n raxis = np.arange(naxis1) * 1.0\n rdif = raxis - (hdr['CRPIX1'] - 1)\n raxis = (rdif * hdr['CDELT1'] + hdr['CRVAL1'])\n\n daxis = np.arange(naxis2) * 1.0\n ddif = daxis - (hdr['CRPIX1'] - 1)\n daxis = (ddif * hdr['CDELT1'] + hdr['CRVAL1'])\n\n rimg = raxis # (fltarr(naxis2) + 1.)\n dimg = (np.asarray(naxis1) + 1.) # daxis\n return rimg, dimg\n\n # OBNOXIOUS SFL/GLS THING\n glspos = ww.wcs.ctype[0].find('GLS')\n if glspos != -1:\n ctstr = ww.wcs.ctype[0]\n newtype = 'SFL'\n ctstr.replace('GLS', 'SFL')\n ww.wcs.ctype[0] = ctstr\n print('Replaced GLS with SFL; CTYPE1 now =' + ww.wcs.ctype[0])\n\n glspos = ww.wcs.ctype[1].find('GLS')\n if glspos != -1:\n ctstr = ww.wcs.ctype[1]\n newtype = 'SFL'\n ctstr.replace('GLS', 'SFL')\n ww.wcs.ctype[1] = ctstr\n print('Replaced GLS with SFL; CTYPE2 now = ' + ww.wcs.ctype[1])\n\n # CALL 'xy2ad' TO FIND THE RA AND DEC FOR EVERY POINT IN THE IMAGE\n if novec:\n rimg = np.zeros((naxis1, naxis2))\n dimg = np.zeros((naxis1, naxis2))\n for i in range(naxis1):\n j = np.asarray([0 for i in xrange(naxis2)])\n\n pixcrd = np.array([[zip(float(i), float(j))]], numpy.float_)\n ra, dec = ww.all_pix2world(pixcrd, 1)\n\n rimg[i, :] = ra\n dimg[i, :] = dec\n else:\n ximg = np.arange(naxis1) * 1.0\n yimg = np.arange(naxis1) * 1.0\n X, Y = np.meshgrid(ximg, yimg, indexing='xy')\n ss = X.shape\n xx, yy = X.flatten(), Y.flatten()\n\n pixcrd = np.array(zip(xx, yy), np.float_)\n img_new = ww.all_pix2world(pixcrd, 0)\n rimg_new, dimg_new = img_new[:,0], img_new[:,1]\n\n rimg = rimg_new.reshape(ss)\n dimg = dimg_new.reshape(ss)\n\n # GET AXES FROM THE IMAGES. USE THE CENTRAL COLUMN AND CENTRAL ROW\n raxis = np.squeeze(rimg[:, naxis2/2])\n daxis = np.squeeze(dimg[naxis1/2, :])\n\n return rimg, dimg", "def add_naxis_to_fitsio_header(hdr,extra_hdr):\n if 'ZNAXIS1' in extra_hdr or 'ZNAXIS2' in extra_hdr:\n hdr.add_record({'name':'ZNAXIS1','value':extra_hdr['ZNAXIS1']})\n hdr.add_record({'name':'ZNAXIS2','value':extra_hdr['ZNAXIS2']})\n\n if 'NAXIS1' in extra_hdr or 'NAXIS2' in extra_hdr:\n hdr.add_record({'name':'NAXIS1','value':extra_hdr['NAXIS1']})\n hdr.add_record({'name':'NAXIS2','value':extra_hdr['NAXIS2']})\n\n return hdr", "def _create_hdr_obj(self, pix_len, pix_scale):\n hdr = astropy.io.fits.Header()\n hdr['NAXIS'] = 2\n hdr['NAXIS1'] = pix_len\n hdr['NAXIS2'] = pix_len\n hdr['CTYPE1'] = 'RA---TAN'\n hdr['CRVAL1'] = float(self.ra_ctr)\n hdr['CRPIX1'] = (pix_len / 2.) * 1.\n hdr['CDELT1'] = -1.0 * pix_scale\n hdr['CTYPE2'] = 'DEC--TAN'\n hdr['CRVAL2'] = float(self.dec_ctr)\n hdr['CRPIX2'] = (pix_len / 2.) * 1.\n hdr['CDELT2'] = pix_scale\n hdr['EQUINOX'] = 2000\n return hdr", "def _clean(header):\n # TODO: find a way to identify cubes containing time\n header['ctype1'] = 'HPLN-TAN' # Helioprojective longitude, TAN projection\n header['ctype2'] = 'HPLT-TAN' # Helioprojective latitude, TAN projection\n header['ctype3'] = 'WAVE ' # Wavelength axis, default (TAB) projection\n header['naxis'] = 3\n return header", "def __make_header__(self):\n header = lashead.Header(point_format=0)\n return header", "def set_dims_in_hdr(hdr, startx, starty, cols, rows):\n hdr['startX'] = (startx, 'Starting CCD pixel column')\n hdr['endX'] = (startx + cols, 'Ending CCD pixel column+1')\n hdr['startY'] = (starty, 'Starting CCD pixel row')\n hdr['endY'] = (starty + rows, 'Ending CCD pixel row+1')", "def to_header(wcs, relax=True):\n header = wcs.to_header(relax=relax)\n if hasattr(wcs, '_naxis1'):\n header['NAXIS'] = wcs.naxis\n header['NAXIS1'] = wcs._naxis1\n header['NAXIS2'] = wcs._naxis2\n \n for k in header:\n if k.startswith('PC'):\n cd = k.replace('PC','CD')\n header.rename_keyword(k, cd)\n \n return header", "def wfirst_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, naxis=(4096,4096)):\n #naxis = 2048, 2048\n crpix = naxis[0]/2., naxis[0]/2.\n \n cd = np.array([[ -0.11, 0], [0, 0.11]])/3600.\n cd_rot = rotate_CD_matrix(cd, pa_aper)\n \n h = pyfits.Header()\n \n h['CRVAL1'] = ra\n h['CRVAL2'] = dec\n \n h['WCSAXES'] = 2\n h['CTYPE1'] = 'RA---TAN'\n h['CTYPE2'] = 'DEC--TAN'\n \n for i in range(2):\n h['NAXIS%d' %(i+1)] = naxis[i]\n h['CRPIX%d' %(i+1)] = crpix[i]\n h['CDELT%d' %(i+1)] = 1.0\n for j in range(2):\n h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]\n \n h['BACKGR'] = 0.17+0.49, 'Total, e/s SDT Report A-1'\n h['FILTER'] = 'GRS', 'WFIRST grism'\n h['INSTRUME'] = 'WFIRST'\n h['READN'] = 17, 'SDT report Table 3-3' # e/pix/per\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n wcs = pywcs.WCS(h)\n h['EXTVER'] = 1\n \n return h, wcs", "def _addStatsHeadersToMatrix(self, m):\n\n atoz = \"JKLMNOPQRSTUVWXYZABCDEFGHI\"\n\n counter = 0\n\n for col in m.TopAxis.DataMembers:\n if counter < 26:\n logicalletter = str(atoz[counter])\n col.MemberSigTestHeading = logicalletter\n counter += 1\n else:\n counter = 0", "def prep_hd(header,phi_c,lambda_c,nx,ny,dx,dy):\n header_out = {}\n\n # Keywords to get from original header\n keys_hd = ['TELESCOP', 'INSTRUME', 'WAVELNTH', 'CAMERA','DATE',\n 'DATE_S','DATE-OBS','T_OBS','T_REC','TRECEPOC',\n 'TRECSTEP','TRECUNIT','HARPNUM','DSUN_OBS','DSUN_REF',\n 'RSUN_REF','CRLN_OBS','CRLT_OBS','CAR_ROT','OBS_VR',\n 'OBS_VW','OBS_VN','RSUN_OBS','QUALITY','QUAL_S','QUALLEV1']\n\n for key in keys_hd:\n header_out[key] = header[key]\n\n # Add new keywords\n header_out['NAXIS'] = 2\n header_out['NAXIS1'] = nx\n header_out['NAXIS2'] = ny\n\n header_out['CUNIT1'] = 'degree'\n header_out['CUNIT2'] = 'degree'\n\n header_out['CRPIX1'] = (nx - 1) / 2 + 1\n header_out['CRPIX2'] = (ny - 1) / 2 + 1\n header_out['CRVAL1'] = phi_c\n header_out['CRVAL2'] = lambda_c\n header_out['CDELT1'] = dx\n header_out['CDELT2'] = dy\n header_out['CTYPE1'] = 'CRLN-CEA'\n header_out['CTYPE2'] = 'CRLT-CEA'\n header_out['CROTA2'] = 0.0\n\n header_out['WCSNAME'] = 'Carrington Heliographic'\n header_out['BUNIT'] = 'Mx/cm^2'\n\n return header_out", "def nircam_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n filter='F444W', grism='DFSR'): \n naxis = 2048, 2048\n crpix = 1024, 1024\n \n cd = np.array([[ -0.0648, 0], [0, 0.0648]])/3600.\n cd_rot = rotate_CD_matrix(cd, pa_aper)\n \n h = pyfits.Header()\n \n h['CRVAL1'] = ra\n h['CRVAL2'] = dec\n \n h['WCSAXES'] = 2\n h['CTYPE1'] = 'RA---TAN'\n h['CTYPE2'] = 'DEC--TAN'\n \n for i in range(2):\n h['NAXIS%d' %(i+1)] = naxis[i]\n h['CRPIX%d' %(i+1)] = crpix[i]\n h['CDELT%d' %(i+1)] = 1.0\n for j in range(2):\n h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]\n \n ### Backgrounds\n # http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf\n bg = {'F277W':0.30, 'F356W':0.90, 'F444W': 3.00, 'F322W2':1.25, 'F430M':0.65, 'F460M':0.86, 'F410M':0.5} # F410M is a hack, no number\n \n h['BACKGR'] = bg[filter], 'Total, e/s'\n h['FILTER'] = filter\n h['INSTRUME'] = 'NIRCam'\n h['READN'] = 9, 'Rough, per pixel per 1 ks exposure' # e/pix/per\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n if grism == 'DFSR':\n h['GRISM'] = 'DFSR', 'Spectral trace along X'\n else:\n h['GRISM'] = 'DFSC', 'Spectral trace along Y'\n \n wcs = pywcs.WCS(h)\n h['EXTVER'] = 1\n \n return h, wcs", "def fitsfile_clumpy(filename,ext=None,header=True,**kwargs):\n\n if 'hypercubenames' in kwargs:\n ext = kwargs['hypercubenames'][0]\n \n assert (isinstance(ext,(int,str))),\\\n \"'ext' must be either integer or a string, specifying the FITS extension by number or by name, respectively.\"\n \n dataset, header = pyfits.getdata(filename,ext,header=header) # dataset.shape is (Nwave,Nypix,Nxpix) for 3D, and (Nypix,Nxpix) for 2D.\n\n x = N.arange(float(header['NAXIS1']))\n y = N.arange(float(header['NAXIS2']))\n\n# x = range(header['NAXIS1'])\n# y = range(header['NAXIS2'])\n\n if dataset.ndim == 2:\n axes = None\n axnames = ['x','y']\n axvals = [x,y]\n\n elif dataset.ndim == 3:\n axes = (0,2,1)\n wave = N.array([v for k,v in header.items() if k.startswith('LAMB')])\n axnames = ['wave','x','y']\n axvals = [wave,x,y]\n\n dataset = N.transpose(dataset,axes=axes) # now it's (Nwave,Nxpix,Nypix) for 3D, and (Nxpix,Nypix) for 2D.\n\n datasets = [dataset] # has to be a list for function 'convert'\n hypercubenames = kwargs['hypercubenames']\n \n return datasets, axnames, axvals, hypercubenames", "def _horizontal_header(self):\n return self.header()", "def _horizontal_header(self):\n return self.header()", "def get_sip_keywords(header):\n cd = np.matrix([[header.get('CD1_1', 0.0), header.get('CD1_2', 0.0)],\n [header.get('CD2_1', 0.0), header.get('CD2_2', 0.0)]], dtype=np.float64)\n a_order = int(header.get('A_ORDER', 0))\n b_order = int(header.get('B_ORDER', 0))\n ac = np.matrix(np.zeros((a_order+1, a_order+1), dtype=np.float64))\n bc = np.matrix(np.zeros((b_order+1, b_order+1), dtype=np.float64))\n for m in range(a_order+1):\n for n in range(0, a_order+1-m):\n ac[m, n] = header.get('A_%d_%d' % (m, n), 0.0)\n for m in range(b_order+1):\n for n in range(0, b_order+1-m):\n bc[m, n] = header.get('B_%d_%d' % (m, n), 0.0)\n return cd, ac, bc", "def _header(self, path, files):\n headers = [fits.getheader(os.path.join(path, f))\n for f in sorted(files)]\n N = len(headers)\n\n def mean_key(headers, key, comment, type):\n return (np.mean([type(h[key]) for h in headers]), comment)\n\n h = fits.Header()\n h['BUNIT'] = 'e-/s'\n h['ORIGIN'] = 'Zwicky Transient Facility', 'Data origin'\n h['OBSERVER'] = 'ZTF Robotic Software', 'Observer'\n h['INSTRUME'] = 'ZTF/MOSAIC', 'Instrument name'\n h['OBSERVAT'] = 'Palomar Observatory', 'Observatory'\n h['TELESCOP'] = 'Palomar 48-inch', 'Observatory telescope'\n h['OBSLON'] = -116.8597, 'Observatory longitude (deg)'\n h['OBSLAT'] = 33.3483, 'Observatory latitude (deg E)'\n h['OBSALT'] = 1706., 'Observatory altitude (m)'\n h['IMGTYPE'] = 'object', 'Image type'\n h['NIMAGES'] = N, 'Number of images in stack'\n h['EXPOSURE'] = (sum([_['EXPOSURE'] for _ in headers]),\n 'Total stack exposure time (s)')\n if len(headers) == 0:\n return h\n\n h['MAGZP'] = 25.0, 'Magnitude zero point, solar color'\n h['MAGZPRMS'] = (\n np.sqrt(np.sum([h.get('MAGZPRMS', 0)**2 for h in headers])) / N,\n 'Mean MAGZP RMS')\n h['PCOLOR'] = headers[0]['PCOLOR']\n h['CLRCOEFF'] = mean_key(headers, 'CLRCOEFF',\n 'Mean color coefficient', float)\n\n h['OBSJD1'] = float(headers[0]['OBSJD']), 'First shutter start time'\n h['OBSJDN'] = float(headers[-1]['OBSJD']), 'Last shutter start time'\n h['OBSJDM'] = mean_key(\n headers, 'OBSJD', 'Mean shutter start time', float)\n\n wcsfn = sorted(files)[0]\n wcs = WCS(fits.getheader(os.path.join(path, wcsfn),\n extname='SANGLE'))\n h.update(wcs.to_header())\n h['WCSORIGN'] = wcsfn\n\n h['DBPID'] = (','.join([str(_['DBPID']) for _ in headers]),\n 'Database processed-image IDs')\n h['DESG'] = headers[0]['DESG'], 'Target designation'\n for k, comment in {\n 'RH': 'Mean heliocentric distance (au)',\n 'DELTA': 'Mean observer-target distance (au)',\n 'PHASE': 'Mean Sun-target-observer angle (deg)',\n 'RDOT': 'Mean heliocentric radial velocity, km/s',\n 'SELONG': 'Mean solar elongation, deg',\n 'SANGLE': 'Mean projected target->Sun position angle, deg',\n 'VANGLE': 'Mean projected velocity position angle, deg',\n 'TRUEANOM': 'Mean true anomaly (osculating), deg',\n 'TMTP': 'Mean T-Tp (osculating), days',\n 'TGTRA': 'Mean target RA, deg',\n 'TGTDEC': 'Mean target Dec, deg',\n 'TGTDRA': 'Mean target RA*cos(dec) rate of change,arcsec/s',\n 'TGTDDEC': 'Mean target Dec rate of change, arcsec/s',\n 'TGTRASIG': 'Mean target RA 3-sigma uncertainty, arcsec',\n 'TGTDESIG': 'Mean target Dec 3-sigma uncertainty, arcsec',\n }.items():\n try:\n h[k] = mean_key(headers, k, comment, float)\n except ValueError:\n # target rates might be empty strings\n h[k] = ''\n\n return h", "def __make_game_header(self, state_index: int = -1):\n\n if state_index < 0:\n state_index = len(self.history) + state_index\n\n # get board_state as FEN string\n fen = self.history[state_index]\n splitfen = fen.split(\" \")\n\n color = np.full((8, 8), int(self.__get_whose_turn_in_history(state_index)), dtype=np.float32)\n\n # (8,8) array of ones if white is current player otherwise zeros\n fifty_move = np.full((8, 8), int(splitfen[4]), dtype=np.float32)\n\n # (8,8) array full of number of moves since the first black's move\n fullmove_cnt = np.full((8, 8), int(splitfen[5]), dtype=np.float32)\n\n # stack the 3 sub headers\n return np.array([color, fifty_move, fullmove_cnt])", "def channel_names(self):\n header_names = [s.strip() for s in\n self.header['Bias Spectroscopy>Channels'].split(';')]\n\n # 'Bias calc (V)' is in file but not in the header.\n return ['Bias calc (V)', ] + header_names", "def make_wcsheader(ra=40.07293, dec=-1.6137748, size=2, pixscale=0.1, get_hdu=False, theta=0):\n \n if np.isscalar(pixscale):\n cdelt = [pixscale/3600.]*2\n else:\n cdelt = [pixscale[0]/3600., pixscale[1]/3600.]\n \n if np.isscalar(size):\n npix = np.cast[int]([size/pixscale, size/pixscale])\n else:\n npix = np.cast[int]([size[0]/pixscale, size[1]/pixscale])\n \n hout = pyfits.Header()\n hout['CRPIX1'] = npix[0]/2\n hout['CRPIX2'] = npix[1]/2\n hout['CRVAL1'] = ra\n hout['CRVAL2'] = dec\n hout['CD1_1'] = -cdelt[0]\n hout['CD1_2'] = hout['CD2_1'] = 0.\n hout['CD2_2'] = cdelt[1]\n hout['NAXIS1'] = npix[0]\n hout['NAXIS2'] = npix[1]\n hout['CTYPE1'] = 'RA---TAN'\n hout['CTYPE2'] = 'DEC--TAN'\n \n wcs_out = pywcs.WCS(hout)\n \n theta_rad = np.deg2rad(theta)\n mat = np.array([[np.cos(theta_rad), -np.sin(theta_rad)], \n [np.sin(theta_rad), np.cos(theta_rad)]])\n\n rot_cd = np.dot(mat, wcs_out.wcs.cd)\n \n for i in [0,1]:\n for j in [0,1]:\n hout['CD{0:d}_{1:d}'.format(i+1, j+1)] = rot_cd[i,j]\n wcs_out.wcs.cd[i,j] = rot_cd[i,j]\n \n cd = wcs_out.wcs.cd\n wcs_out.pscale = get_wcs_pscale(wcs_out) #np.sqrt((cd[0,:]**2).sum())*3600.\n \n if get_hdu:\n hdu = pyfits.ImageHDU(header=hout, data=np.zeros((npix[1], npix[0]), dtype=np.float32))\n return hdu\n else:\n return hout, wcs_out", "def _horizontal_header(self):\n return self.horizontalHeader()", "def wfc3ir_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n flt='ibhj34h6q_flt.fits', filter='G141'):\n import numpy as np\n \n import astropy.io.fits as pyfits\n import astropy.wcs as pywcs\n \n im = pyfits.open(flt)\n wcs = pywcs.WCS(im[1].header, relax=True)\n \n thet0 = np.arctan2(im[1].header['CD2_2'], im[1].header['CD2_1'])/np.pi*180\n\n wcs.wcs.crval = np.array([ra, dec])\n \n ### Rotate the CD matrix\n theta = im[1].header['PA_APER'] - pa_aper \n cd_rot = rotate_CD_matrix(wcs.wcs.cd, theta)\n wcs.wcs.cd = cd_rot\n \n h = wcs.to_header(relax=True)\n \n for i in [1,2]:\n for j in [1,2]:\n h['CD%d_%d' %(i,j)] = h['PC%d_%d' %(i,j)]\n h.remove('PC%d_%d' %(i,j))\n \n h['BACKGR'] = 1.\n h['FILTER'] = filter\n h['INSTRUME'] = 'WFC3'\n h['READN'] = im[0].header['READNSEA']\n h['NAXIS1'] = h['NAXIS2'] = 1014\n h['DETECTOR'] = 'IR'\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n return h, wcs", "def get_3d_H(H1):\n H_fin = [H1[0,0], 0, H1[0,1], H1[0,2], 0, 1, 0, 0, H1[1,0], 0, H1[1,1], H1[1,2]]\n H_fin = np.array(H_fin).reshape(3,4)\n return H_fin", "def getMeasHeaders(self):\n headers = []\n for ii in range(self.rows):\n inst = self.instruments[self.stringInsts.index(self.selInsts[ii])]\n param = inst.getParam(self.selParams[ii])\n if type(param.comps) is not list:\n if param.type == 'cont':\n headers.append(sc.formatHeader(inst, param, param.units))\n else:\n headers.append(sc.formatHeader(inst, param))\n else:\n for ii,comp in enumerate(param.comps):\n if param.type == 'cont':\n headers.append(sc.formatHeader(inst, comp, param.units[ii]))\n else:\n headers.append(sc.formatHeader(inst, comp))\n return headers", "def make_roi_header(**param):\n hdr_list = ['== Integration ROI ==']\n method = [i for i in list(param.keys()) if \"pos\" in i][0].split('_pos')[0]\n hdr_list.append('Integration method: {}'.format(method))\n\n for k, v in list(param.items()):\n hdr_list.append('{}: {}'.format(k, v))\n\n header = \"\\n\".join(['# ' + i for i in hdr_list])\n return header", "def get_calib_from_header(header):\n\n prefix = 'HIERARCH GAMSE WLCALIB '\n\n xorder = header[prefix+'XORDER']\n yorder = header[prefix+'YORDER']\n\n coeff = np.zeros((yorder+1, xorder+1))\n for j, i in itertools.product(range(yorder+1), range(xorder+1)):\n coeff[j,i] = header[prefix+'COEFF {:d} {:d}'.format(j, i)]\n\n calib = {\n 'coeff': coeff,\n 'npixel': header[prefix+'NPIXEL'],\n 'k': header[prefix+'K'],\n 'offset': header[prefix+'OFFSET'],\n 'std': header[prefix+'STDDEV'],\n 'nuse': header[prefix+'NUSE'],\n 'ntot': header[prefix+'NTOT'],\n# 'identlist': calibwindow.identlist,\n 'window_size': header[prefix+'WINDOW_SIZE'],\n 'xorder': xorder,\n 'yorder': yorder,\n 'maxiter': header[prefix+'MAXITER'],\n 'clipping': header[prefix+'CLIPPING'],\n 'q_threshold': header[prefix+'Q_THRESHOLD'],\n 'direction': header[prefix+'DIRECTION'],\n }\n return calib", "def header(self):\n\n data = {}\n data['latitude'] = self.latitude()\n data['latitude_unc'] = self.latitude_unc()\n data['longitude'] = self.longitude()\n data['longitude_unc'] = self.longitude_unc()\n data['uid'] = self.uid()\n data['n_levels'] = self.n_levels()\n data['year'] = self.year()\n data['month'] = self.month()\n data['day'] = self.day()\n data['time'] = self.time()\n data['cruise'] = self.cruise()\n data['probe_type'] = self.probe_type()\n \n header = pd.Series(data)\n\n return header", "def construct_header(self): \n \n # create the individual labels\n hdr_bits = [hb.format(hdr) for hb, hdr in zip(self.row_base, self.headers)]\n \n # stick it all together and return with hdr_sep underneath\n hdr_str = f\"|{'|'.join(hdr_bits)}|\\n\"\n return hdr_str + self.hdr_sep * (len(hdr_str)-1) + \"\\n\"", "def get_array_headers(array_name, length):\n\twidth = len(str(length))\n\treturn [join_items([array_name, str(i).zfill(width)]) for i in range(length)]", "def header(self, format=None):\n return [\" ID \",\n \"East\",\n \"North\",\n \"TARGET ELEV\",\n \" LENGTH\",\n \" AZ\",\n \" DIP\",\n \"PLAN ELEV\"]", "def create_xif_header(dataset_name: str) -> str:\n xif_rule = \"\"\n xif_rule += f\"[MODEL: dataset={dataset_name}]\\n\"\n xif_rule += \"alter\\n\"\n return xif_rule", "def niriss_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n filter='F150W', grism='GR150R'): \n naxis = 2048, 2048\n crpix = 1024, 1024\n \n cd = np.array([[ -0.0658, 0], [0, 0.0654]])/3600.\n cd_rot = rotate_CD_matrix(cd, pa_aper)\n \n h = pyfits.Header()\n \n h['CRVAL1'] = ra\n h['CRVAL2'] = dec\n \n h['WCSAXES'] = 2\n h['CTYPE1'] = 'RA---TAN'\n h['CTYPE2'] = 'DEC--TAN'\n \n for i in range(2):\n h['NAXIS%d' %(i+1)] = naxis[i]\n h['CRPIX%d' %(i+1)] = crpix[i]\n h['CDELT%d' %(i+1)] = 1.0\n for j in range(2):\n h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]\n \n ### Backgrounds\n # http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf\n bg = {'F090W':0.50, 'F115W':0.47, 'F140M':0.23, 'F150W':0.48, 'F158M':0.25, 'F200W':0.44}\n \n h['BACKGR'] = bg[filter], 'Total, e/s'\n h['FILTER'] = filter\n h['INSTRUME'] = 'NIRISS'\n h['READN'] = 6 , 'Rough, per pixel per 1 ks exposure' # e/pix/per\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n if grism == 'GR150R':\n h['GRISM'] = 'GR150R', 'Spectral trace along X'\n else:\n h['GRISM'] = 'GR150C', 'Spectral trace along Y'\n \n wcs = pywcs.WCS(h)\n h['EXTVER'] = 1\n \n return h, wcs", "def make_spectrum_wcsheader(center_wave=1.4e4, dlam=40, NX=100, spatial_scale=1, NY=10):\n \n h = pyfits.ImageHDU(data=np.zeros((2*NY, 2*NX), dtype=np.float32))\n \n refh = h.header\n refh['CRPIX1'] = NX+1\n refh['CRPIX2'] = NY+1\n refh['CRVAL1'] = center_wave\n refh['CD1_1'] = dlam\n refh['CD1_2'] = 0.\n refh['CRVAL2'] = 0.\n refh['CD2_2'] = spatial_scale\n refh['CD2_1'] = 0.\n refh['RADESYS'] = ''\n \n refh['CTYPE1'] = 'WAVE'\n refh['CTYPE2'] = 'LINEAR'\n \n ref_wcs = pywcs.WCS(h.header)\n ref_wcs.pscale = np.sqrt(ref_wcs.wcs.cd[0,0]**2 + ref_wcs.wcs.cd[1,0]**2)*3600.\n \n return refh, ref_wcs", "def test_convert_to_cal_header(caltype, obj, change_working_dir):\n # A random NIRI image\n ad = astrodata.open(astrodata.testing.download_from_archive('N20200127S0023.fits'))\n ad_out = gt.convert_to_cal_header(ad, caltype=caltype, keyword_comments=keyword_comments)\n\n # FITS WCS keywords only get changed at write-time, so we need to\n # write the file to disk and read it back in to confirm.\n with change_working_dir():\n ad_out.write(\"temp.fits\", overwrite=True)\n ad = astrodata.open(\"temp.fits\")\n\n assert ad.observation_type() == caltype.upper()\n # Let's not worry about upper/lowercase\n assert ad.object().upper() == obj.upper()\n\n assert ad.phu.get('RA', 0.) == ad.phu.get('DEC', 0.0) == 0.0\n\n assert ad.ra() == ad.dec() == 0.0", "def get_nifti1hdr_from_h5attrs(h5attrs):\n hdr = nib.Nifti1Header()\n for k in list(h5attrs.keys()):\n hdr[str(k)] = np.array(h5attrs[k])\n\n return hdr", "def Header(nmax):\r\n n = np.arange(1,nmax+1)\r\n return (2*n+1)/(n*(n+1))", "def axis_data(axis):\n x = mask.sum(axis)\n trimmed_front = N.trim_zeros(x,\"f\")\n offset = len(x)-len(trimmed_front)\n size = len(N.trim_zeros(trimmed_front,\"b\"))\n return offset,size", "def header(self, text, level, raw=None):\n return [[MdStyleInstructionCell('h{}'.format(level))] + text]", "def generate_header(value_type, num_elements, element_multiplier, imag, name_length, name):\n result = []\n\n result += Ensemble.int32_to_bytes(value_type) # Value Type\n result += Ensemble.int32_to_bytes(num_elements) # Number of elements\n result += Ensemble.int32_to_bytes(element_multiplier) # Element Multiplier\n result += Ensemble.int32_to_bytes(imag) # Image\n result += Ensemble.int32_to_bytes(name_length) # Name Length\n result += name.encode() # Name\n\n return result", "def build_ws_header(work_sheet, max_hits):\n first_header_info = ['Query #', 'Query Sequence',\n 'Top Hit Accession in L.h.', 'E-Value', 'Filename']\n r = 1\n c = 1\n for val in first_header_info:\n c = set_cell(ws, r, c, val)", "def coordinates_from_header(header):\n f = None\n if 'NAXIS' in header and header['NAXIS'] == 2:\n f = WCSCoordinates\n elif 'NAXIS' in header and header['NAXIS'] == 3:\n f = WCSCubeCoordinates\n if f:\n try:\n return f(header)\n except AttributeError as e:\n print e\n pass\n return Coordinates()", "def _get_metadata(self): \n metadata = {'DATA_TYPE':'Fourier Climatology'} \n \n area_bounds = self._area_inst.get_cube_area_bounds(self.cube, \n self.xy_coords)\n x_bounds = [area_bounds[self._area_inst.x_min], \n area_bounds[self._area_inst.x_max]]\n y_bounds = [area_bounds[self._area_inst.y_min], \n area_bounds[self._area_inst.y_max]]\n \n metadata['VARIABLE'] = self.cube.name()\n metadata['UNITS'] = str(self.cube.units)\n metadata['INITIALISATION_DATES'] = self.cube_init_dates\n metadata['DATES'] = self.cube_dates\n metadata[self.xy_coords[0].upper()+'_BOUNDS'] = x_bounds\n metadata[self.xy_coords[-1].upper()+'_BOUNDS'] = y_bounds\n \n # Find additional coordinates in cube and add them to metadata.\n for coord in self.cube.coords():\n if coord.name() not in self.unwanted_coords and \\\n coord.name() not in self._required_coords and \\\n coord.name() not in self.xy_coords:\n metadata[coord.name().upper()] = coord.points\n \n bound_names = [self.xy_coords[0].upper()+'_BOUNDS',\n self.xy_coords[-1].upper()+'_BOUNDS']\n \n return self.MetaData(metadata, bound_names)", "def read_header(infile):\n h = dict()\n fid = open(infile, 'r+b')\n h['filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['parent_filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['comments1'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['comments2'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['energy_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['config_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['file_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['trans_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['date_modified'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 16))\n h['frequency'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['mat_velocity'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_polarization_channels'] =np.fromfile(fid, dtype = np.int16,count = 1)\n h['spare00'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['adc_min_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_max_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['band_width'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare01'] = np.fromfile(fid, dtype = np.int16, count = 5)\n h['polarization_type'] = np.fromfile(fid, dtype = np.int16, count = 4)\n h['record_header_size'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_precision'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['min_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['max_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['avg_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_scale_factor'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_units'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['surf_removal'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['edge_weighting'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['x_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['y_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['z_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['t_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['spare02'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['scan_orientation'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_direction'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_storage_order'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_x_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_y_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_z_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_t_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['x_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['date_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['time_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['depth_recon'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['elevation_offset_angle'] = np.fromfile(fid,dtype = np.float32, count = 1)\n h['roll_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['azimuth_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['spare06'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_radius'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_delay'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_start'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_end'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['ahis_software_version'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare_end'] = np.fromfile(fid, dtype = np.float32, count = 10)\n return h", "def header(self) -> list:\n cols = self.data.columns.tolist()\n header = [\"index\"]\n for col_int in cols:\n header.append(col_int)\n return header", "def calculateSipWcsHeader(wcs, order, bbox, spacing, header=None):\n transform = getPixelToIntermediateWorldCoords(wcs)\n crpix = wcs.getPixelOrigin()\n cdMatrix = wcs.getCdMatrix()\n crval = wcs.getSkyOrigin()\n gridNum = Extent2I(int(bbox.getWidth()/spacing + 0.5), int(bbox.getHeight()/spacing + 0.5))\n\n sip = SipApproximation(transform, crpix, cdMatrix, Box2D(bbox), gridNum, order)\n\n md = makeTanSipMetadata(sip.getPixelOrigin(), crval, sip.getCdMatrix(), sip.getA(), sip.getB(),\n sip.getAP(), sip.getBP())\n\n if header is not None:\n header.combine(md)\n else:\n header = md\n\n return header", "def header(self):\n print 'dimensions',self.data.shape\n print 'llcorner', self.xllcorner, self.yllcorner\n print 'cell size', self.cellsize", "def generate_day_header():\n cf = config.Config()\n outstr = \"total_rotation,total_acceleration,total_distance,number_missing,\"\n outstr += \"oc1_time,oc2_time,oc3_time,oc4_time,oc5_time,oc6_time,oc7_time,\"\n outstr += \"oc8_time,oc9_time,oc10_time,oc11_time,oc12_time,oc13_time,\"\n outstr += \"oc14_time,oc15_time,oc16_time,oc17_time,oc18_time,oc19_time,\"\n outstr += \"oc20_time,oc21_time,oc22_time,oc23_time,oc24_time,oc25_time,\"\n outstr += \"oc26_time,oc27_time,oc28_time,oc29_time,oc30_time,oc31_time,\"\n outstr += \"oc32_time,oc33_time,\"\n anames = cf.activity_list\n for i in range(len(anames)):\n outstr += anames[i] + \"_time,\"\n outstr += \"oc1_first,oc2_first,\"\n outstr += \"oc3_first,oc4_first,oc5_first,oc6_first,oc7_first,oc8_first,\"\n outstr += \"oc9_first,oc10_first,oc11_first,oc12_first,oc13_first,\"\n outstr += \"oc14_first,oc15_first,oc16_first,oc17_first,oc18_first,\"\n outstr += \"oc19_first,oc20_first,oc21_first,oc22_first,oc23_first,\"\n outstr += \"oc24_first,oc25_first,oc26_first,oc27_first,oc28_first,\"\n outstr += \"oc29_first,oc30_first,oc31_first,oc32_first,oc33_first,\"\n for i in range(len(anames)):\n outstr += anames[i] + \"_first,\"\n outstr += \"attraction_time,house_time,restaurant_time,\"\n outstr += \"road_time,service_time,store_time,work_time,other_time,\"\n outstr += \"attraction_first,house_first,restaurant_first,road_first,\"\n outstr += \"service_first,store_first,work_first,other_first\"\n return outstr", "def addheader(datasets):\n header = get_header()\n for i in range(0, len(datasets)):\n datasets[i].columns = header\n return datasets", "def _writeAuxVariablesHeaderSection(self):\n self.header.write(wrapLine(\"NAUXV\", self.annotation, self.delimiter, \"%d\\n\" % self.NAUXV))\n if self.NAUXV > 0:\n line = ((\"%s\" + self.delimiter) * (self.NAUXV - 1) + \"%s\\n\") % tuple(self.ASCAL)\n self.header.write(wrapLine(\"ASCAL\", self.annotation, self.delimiter, line))\n line = ((\"%s\" + self.delimiter) * (self.NAUXV - 1) + \"%s\\n\") % tuple(self.AMISS)\n self.header.write(wrapLine(\"AMISS\", self.annotation, self.delimiter, line))\n line = \"%s\\n\" * self.NAUXV % tuple(self.ANAME)\n self.header.write(wrapLines(\"ANAME\", self.annotation, self.delimiter, line))", "def build_roi_box_head(cfg):\n return ROIBoxHead3D(cfg)", "def full_spectrum_wcsheader(center_wave=1.4e4, dlam=40, NX=100, spatial_scale=1, NY=10):\n \n h = pyfits.ImageHDU(data=np.zeros((2*NY, 2*NX), dtype=np.float32))\n \n refh = h.header\n refh['CRPIX1'] = NX+1\n refh['CRPIX2'] = NY+1\n refh['CRVAL1'] = center_wave/1.e4\n refh['CD1_1'] = dlam/1.e4\n refh['CD1_2'] = 0.\n refh['CRVAL2'] = 0.\n refh['CD2_2'] = spatial_scale\n refh['CD2_1'] = 0.\n refh['RADESYS'] = ''\n \n refh['CTYPE1'] = 'RA---TAN-SIP'\n refh['CUNIT1'] = 'mas'\n refh['CTYPE2'] = 'DEC--TAN-SIP'\n refh['CUNIT2'] = 'mas'\n \n ref_wcs = pywcs.WCS(refh) \n ref_wcs.pscale = get_wcs_pscale(ref_wcs)\n \n return refh, ref_wcs", "def read_additional_info_from_header(wcsprm, hdr, RA_input=None, DEC_input=None, projection_ra=None, projection_dec=None, ignore_header_rot=False, radius = -1., silent=False):\n fov_radius = 4 #arcmin radius to include field of view\n if(radius > 0):\n fov_radius = radius\n INCREASE_FOV_FLAG = False # increase the field to view by 50% to search in catalog if position on sky is inaccurate\n PIXSCALE_UNCLEAR = False\n\n keywords_check = [\"PIXSCALE\", \"NAXIS1\", \"NAXIS2\", \"RA\", \"DEC\"] #list of possible keywords the scs parser might miss\n keywords_present = [] # list of keywords that are actually present\n for i in keywords_check:\n if(i in hdr.keys()):\n keywords_present.append(i)\n\n if(\"NAXIS1\" not in keywords_present or \"NAXIS2\" not in keywords_present ):\n print(\"ERROR: NAXIS1 or NAXIS2 missing in file. Please add!\")\n else:\n axis1 = hdr[\"NAXIS1\"]\n axis2 = hdr[\"NAXIS2\"]\n\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n if((np.abs(wcs_pixscale[0])) < 1e-7 or (np.abs(wcs_pixscale[1])) < 1e-7 or\n (np.abs(wcs_pixscale[0])) > 5e-3 or (np.abs(wcs_pixscale[1])) > 5e-3):\n if(not silent):\n print(\"pixelscale is completely unrealistic. Will guess\")\n print(wcs_pixscale)\n guess = 8.43785734e-05\n #guess = 6.94444461259988e-05\n wcsprm.pc = [[1,0],[0,1]]\n wcsprm.cdelt = [guess, guess]\n if(not silent):\n print(\"Changed pixelscale to {:.3g} deg/arcsec\".format(guess))\n PIXSCALE_UNCLEAR = True\n if(ignore_header_rot):\n wcsprm.pc = [[1,0],[0,1]]\n #wcsprm.cdelt = [8.0006871225376e-05, 8.0006871225376e-05]\n if(\"PIXSCALE\" in keywords_present):\n #normal around 0.450000 / arcsec/pixel, for now i assume arcsec per pixel\n pixscale = hdr[\"PIXSCALE\"]\n if(\"deg\" in hdr.comments['PIXSCALE']): #correction if in deg/pixel\n pixscale = pixscale *60*60\n x_size = axis1 * pixscale /60# arcmin\n y_size = axis2 * pixscale /60# arcmin\n\n if 20 > x_size > 0.5 and 20 > y_size> 0.5 :\n #pixscale is sensical\n #Now: is the pixscale of the current wcs realistic?\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n pixscale = pixscale /60 /60 #pixelscale now in deg / pixel\n if( wcs_pixscale[0]/pixscale < 0.1 or wcs_pixscale[0]/pixscale > 10 or wcs_pixscale[1]/pixscale < 0.1 or wcs_pixscale[1]/pixscale > 10):\n #check if there is a huge difference in the scales\n #if yes then replace the wcs scale with the pixelscale info\n wcsprm.pc = [[1,0],[0,1]]\n\n wcsprm.cdelt = [pixscale, pixscale]\n if(not silent):\n print(\"changed pixelscale to {:.3g} deg/arcsec\".format(pixscale))\n fov_radius = (x_size/2+y_size/2)/np.sqrt(2) #try to get corners\n PIXSCALE_UNCLEAR=True\n\n\n if(np.array_equal(wcsprm.crpix, [0,0])):\n #centrl pixel seems to not be in header, better set in middle\n wcsprm.crpix = [axis1/2, axis2/2]\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n ###sky position not found. Maybe there is some RA and DEC info in the header:\n INCREASE_FOV_FLAG = True\n if (\"RA\" in keywords_present and \"DEC\" in keywords_present): ##carefull degree and hourangle!!!\n wcsprm.crval = [hdr[\"RA\"], hdr[\"DEC\"]]\n if(not silent):\n print(\"Found ra and dec information in the header\")\n print(wcsprm.crval)\n if(not silent):\n print(\"Is this position within the field of view in degrees? otherwise it will not work. In that case give a more accurate position as an argument: -ra XX -dec XX both in degrees\")\n\n if (RA_input is not None): #use user input if provided\n wcsprm.crval = [RA_input, wcsprm.crval[1]]\n wcsprm.crpix = [axis1/2, wcsprm.crpix[1]]\n\n if (DEC_input is not None):\n wcsprm.crval = [wcsprm.crval[0], DEC_input]\n wcsprm.crpix = [wcsprm.crpix[0], axis2/2, ]\n\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n print(\">>>>>>>>>WARNING\")\n print(\"No rough sky position was found for this object. Please add as -ra XX -dex XX both in degress. Adding the position as keywords in the fits file header will also work. The keywords are RA and DEC. The program expects the values in degrees. \")\n\n if(np.array_equal(wcsprm.ctype, [\"\",\"\"])):\n INCREASE_FOV_FLAG = True\n if(projection_ra is not None and projection_dec is not None):\n wcsprm.ctype = [ projection_ra, projection_dec]\n else:\n wcsprm.ctype = [ 'RA---TAN', 'DEC--TAN'] #this is a guess\n print(\">>>>>>>>>WARNING\")\n print(\"The wcs in the header has no projection specified. Will guess 'RA---TAN', 'DEC--TAN' (gnomonic projection) if this is incorrect the fit will fail. You can specify the projection via -projection_ra XX -projection_dec XX\")\n print(\"make sure you do not use quotations, example: -proj1 RA---TAN -proj2 DEC--TAN\")\n if(INCREASE_FOV_FLAG):\n fov_radius = fov_radius*2.5\n return wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR", "def _write_header(self, header):\n # write out telescope and source information\n header[\"latitude\"] = self.telescope_location_lat_lon_alt_degrees[0]\n header[\"longitude\"] = self.telescope_location_lat_lon_alt_degrees[1]\n header[\"altitude\"] = self.telescope_location_lat_lon_alt_degrees[2]\n header[\"telescope_name\"] = np.string_(self.telescope_name)\n header[\"instrument\"] = np.string_(self.instrument)\n header[\"object_name\"] = np.string_(self.object_name)\n\n # write out required UVParameters\n header[\"Nants_data\"] = self.Nants_data\n header[\"Nants_telescope\"] = self.Nants_telescope\n header[\"Nbls\"] = self.Nbls\n header[\"Nblts\"] = self.Nblts\n header[\"Nfreqs\"] = self.Nfreqs\n header[\"Npols\"] = self.Npols\n header[\"Nspws\"] = self.Nspws\n header[\"Ntimes\"] = self.Ntimes\n header[\"antenna_numbers\"] = self.antenna_numbers\n header[\"uvw_array\"] = self.uvw_array\n header[\"vis_units\"] = np.string_(self.vis_units)\n header[\"channel_width\"] = self.channel_width\n header[\"time_array\"] = self.time_array\n header[\"freq_array\"] = self.freq_array\n header[\"integration_time\"] = self.integration_time\n header[\"lst_array\"] = self.lst_array\n header[\"polarization_array\"] = self.polarization_array\n header[\"spw_array\"] = self.spw_array\n header[\"ant_1_array\"] = self.ant_1_array\n header[\"ant_2_array\"] = self.ant_2_array\n header[\"antenna_positions\"] = self.antenna_positions\n\n # handle antenna_names; works for lists or arrays\n header[\"antenna_names\"] = np.asarray(self.antenna_names, dtype=\"bytes\")\n\n # write out phasing information\n header[\"phase_type\"] = np.string_(self.phase_type)\n if self.phase_center_ra is not None:\n header[\"phase_center_ra\"] = self.phase_center_ra\n if self.phase_center_dec is not None:\n header[\"phase_center_dec\"] = self.phase_center_dec\n if self.phase_center_epoch is not None:\n header[\"phase_center_epoch\"] = self.phase_center_epoch\n if self.phase_center_frame is not None:\n header[\"phase_center_frame\"] = np.string_(self.phase_center_frame)\n\n # write out optional parameters\n if self.dut1 is not None:\n header[\"dut1\"] = self.dut1\n if self.earth_omega is not None:\n header[\"earth_omega\"] = self.earth_omega\n if self.gst0 is not None:\n header[\"gst0\"] = self.gst0\n if self.rdate is not None:\n header[\"rdate\"] = np.string_(self.rdate)\n if self.timesys is not None:\n header[\"timesys\"] = np.string_(self.timesys)\n if self.x_orientation is not None:\n header[\"x_orientation\"] = np.string_(self.x_orientation)\n if self.blt_order is not None:\n header[\"blt_order\"] = np.string_(\", \".join(self.blt_order))\n if self.antenna_diameters is not None:\n header[\"antenna_diameters\"] = self.antenna_diameters\n if self.uvplane_reference_time is not None:\n header[\"uvplane_reference_time\"] = self.uvplane_reference_time\n if self.eq_coeffs is not None:\n header[\"eq_coeffs\"] = self.eq_coeffs\n if self.eq_coeffs_convention is not None:\n header[\"eq_coeffs_convention\"] = np.string_(self.eq_coeffs_convention)\n\n # write out extra keywords if it exists and has elements\n if self.extra_keywords:\n extra_keywords = header.create_group(\"extra_keywords\")\n for k in self.extra_keywords.keys():\n if isinstance(self.extra_keywords[k], str):\n extra_keywords[k] = np.string_(self.extra_keywords[k])\n else:\n extra_keywords[k] = self.extra_keywords[k]\n\n # write out history\n header[\"history\"] = np.string_(self.history)\n\n return", "def header(self):\n ...", "def __init__(self, caption, tag, top_header, left_header):\n super(LatexChart, self).__init__(caption, tag)\n self._top_header = top_header\n self._num_cols = len(self._top_header)\n self._left_header = left_header\n self._num_rows = len(self._left_header)\n self._cells = {}\n for top_elt in self._top_header:\n self._cells[top_elt] = {}\n for left_elt in self._left_header:\n self._cells[top_elt][left_elt] = \" \"", "def getCube(unique_name):", "def _html_table_headers(self, row_axes, col_axes):\n dsh = self.get_dshape()\n nb_blank_cols = len(row_axes) * 2 # nb of blank cols preprended to\n # each line of the column header\n nb_rows = int(np.prod([dsh[a] for a in row_axes]))\n nb_cols = int(np.prod([dsh[a] for a in col_axes]))\n # col header\n if nb_blank_cols > 0:\n blank_cells = ['']\n blank_cells_attrs = [{'colspan': str(nb_blank_cols)}]\n else:\n blank_cells = []\n blank_cells_attrs = []\n col_header = []\n nb_repets = 1\n span = nb_cols\n for a in col_axes:\n dom = [str(v)\n for v in self.get_domain(a)] # TODO: better dv format\n span /= len(dom)\n # row showing the axis label\n col_header.append(html_list_to_row(blank_cells + [a], 'h',\n blank_cells_attrs +\n [{'colspan': nb_cols}]))\n # row showing domain values\n col_header.append(html_list_to_row(blank_cells + dom * nb_repets, 'h',\n blank_cells_attrs +\n [{'colspan': str(span)}] *\n len(dom) * nb_repets))\n nb_repets *= len(dom)\n\n # row header\n # initialization of all rows because row filling wont be sequential:\n row_header = [[] for i in range(nb_rows)]\n nb_repets = 1\n span = nb_rows\n for a in row_axes:\n # 1st row contains all axis labels:\n row_header[0].append(html_cell(html_div(a, {'class': 'rotate'}), 'h',\n {'rowspan': nb_rows}))\n\n # dispatch domain values across corresponding rows:\n dom = [str(v)\n for v in self.get_domain(a)] # TODO: better dv format\n span /= len(dom)\n for idv, dv in enumerate(dom * nb_repets):\n row_header[\n idv * span].append(html_cell(dv, 'h', {'rowspan': span}))\n\n nb_repets *= len(dom)\n\n return [''.join(r) for r in row_header], col_header", "def generateDataHDU(input_file, \n header_file='lib/header_dataHDU.txt',\n coldef_file='lib/coldefs_dataHDU.txt'):\n \n sd_in = pf.open(input_file)\n sd_data = sd_in[1].data\n num_rows = sd_data.shape[0]\n \n cols = []\n \n # The column definitions are loaded from an external file, which is\n # parsed line-by-line, using regular experssions.\n \n unit_pat = \"unit\\s*\\=\\s*'([\\w/%]+)'\"\n name_pat = \"name\\s*\\=\\s*'([\\w-]+)'\"\n dim_pat = \"dim\\s*\\=\\s*'(\\([\\d,]+\\))'\"\n format_pat = \"format\\s*\\=\\s*'(\\w+)'\" \n \n # Loop through, matching on each line\n cfile = open(coldef_file)\n for line in cfile.readlines():\n unit = name = dim = format = None\n name_match = re.search(name_pat, line)\n if name_match:\n name = name_match.group(1)\n \n format_match = re.search(format_pat, line)\n dim_match = re.search(dim_pat, line)\n unit_match = re.search(unit_pat, line)\n \n if unit_match: \n unit = unit_match.group(1)\n \n \n if dim_match: \n dim = dim_match.group(1)\n \n arr_shape = sd_data[name].shape\n \n if format_match: \n fits_fmt = format_match.group(1)\n zarr=None\n\n try:\n if name == 'DATA' or name == 'FLAGGED':\n np_dtype, data_len, data_fmt = formatLookup(fits_fmt)\n print name, \" no data\"\n else:\n # Data array must be flattened (e.g. (2,2) -> 4)\n np_dtype, data_len, data_fmt = formatLookup(fits_fmt)\n if data_len > 1 and data_fmt != 'str_':\n z_shape = (sd_data[name].shape[0], data_len)\n else:\n z_shape = sd_data[name].shape\n #print name, z_shape, sd_data[name].shape\n zarr = sd_data[name].reshape(z_shape)\n \n except:\n print \"Error with %s\"%name\n \n # Append the column to the column list\n cols.append(pf.Column(name=name, format=fits_fmt, unit=unit, dim=dim, array=zarr))\n \n # Now we have made a list of columns, we can make a new table\n #print cols\n coldefs = pf.ColDefs(cols)\n #print coldefs\n tbhdu = pf.new_table(coldefs)\n \n # If that all worked, we can populate with the final header values\n cards = generateCards(header_file)\n \n for card in cards:\n if card.keyword == 'COMMENT':\n pass\n tbhdu.header.add_comment(card.value)\n elif card.keyword == 'HISTORY':\n pass\n tbhdu.header.add_history(card.value)\n else:\n tbhdu.header.set(card.keyword, card.value, card.comment)\n \n return tbhdu", "def test_header_update8(self, capsys):\n\n # Prepare input files.\n self.get_data(\"input\", \"ocu252cmq_raw.fits\")\n self.get_data(\"input\", \"ocu252cmq_spt.fits\")\n\n capsys.readouterr()\n\n tastis('ocu252cmq_raw.fits', update=True)\n\n captured = capsys.readouterr()\n assert captured.out == \"===============================================================================\\n\" \\\n \"ocu252cmq HST/STIS MIRVIS F28X50OII ACQ/POINT\\n\" \\\n \"prop: 14143 visit: 52 line: 1 target: BD+41-3306\\n\" \\\n \"obs date, time: 2016-06-06 08:30:05 exposure time: 2.10\\n\" \\\n \"dom GS/FGS: N2JU001340F2 sub-dom GS/FGS: N2K1001229F1\\n\" \\\n \"ACQ params: bias sub: 1510 checkbox: 3 method: FLUX CENTROID\\n\" \\\n \"subarray (axis1,axis2): size=(100,100) corner=(487,466)\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Coarse locate phase: Target flux in max checkbox (DN): 1442\\n\" \\\n \"\\n\" \\\n \" global local\\n\" \\\n \" axis1 axis2 axis1 axis2\\n\" \\\n \"Target location: 527.8 513.1 41.8 48.1\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: -7.9 -2.9 -0.400 -0.147 -0.387 -0.179\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Fine locate phase: Target flux in max checkbox (DN): 611\\n\" \\\n \"\\n\" \\\n \" global local\\n\" \\\n \" axis1 axis2 axis1 axis2\\n\" \\\n \"Target location: 534.1 516.1 48.1 51.1\\n\" \\\n \"Ref ap location: 537.5 516.5 19.5 16.5\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: -2.1 -0.4 -0.106 -0.020 -0.089 -0.061\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Total est. slew: -10.0 -3.3 -0.506 -0.168 -0.477 -0.239\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"The fluxes in the maximum checkbox in the fine and coarse stages differ\\n\" \\\n \"by more than 25%. This may indicate a problem with your acquisition.\\n\" \\\n \"\\n\" \\\n \"===============================================================================\\n\"\n\n # Compare results\n outputs = [(\"ocu252cmq_raw.fits\", \"ocu252cmq_raw_ref.fits\")]\n self.compare_outputs(outputs)", "def read_horizontal_corrections(filename,no_of_tubes=128):\n axisX = zeros(no_of_tubes)\n f = open(filename,'r')\n for line in f:\n if type(line) is str:\n line = line.strip()\n if (len(line) > 0) and not line.startswith('#'):\n line_vals = [float(l) for l in line.split()]\n axisX[int(line_vals[0])] += float(line_vals[1])\n return axisX", "def setup_normalyzer_header(design_matrix: DF, annot_cols: List[str], normalyzer_vals:DF) -> DF:\n\n # Get numbers set up as list of stringified numbers ('-1', '0', '0', '1', '1')\n nbr_annot_cols = len(annot_cols)\n sample_head = [-1] + [0] * (nbr_annot_cols - 1) + list(design_matrix['biorepgroup'])\n sample_head_str = [str(e) for e in sample_head]\n\n # Get text-information about each column\n label_row = list(normalyzer_vals.columns)[:nbr_annot_cols] + list(design_matrix['name'])\n\n headers = pd.DataFrame([sample_head_str, label_row])\n headers.columns = normalyzer_vals.columns\n\n return headers", "def test_header_update6(self, capsys):\n\n # Prepare input files.\n self.get_data(\"input\", \"ocmv0lw6q_raw.fits\")\n self.get_data(\"input\", \"ocmv0lw6q_spt.fits\")\n\n capsys.readouterr()\n\n tastis('ocmv0lw6q_raw.fits', update=True)\n\n captured = capsys.readouterr()\n assert captured.out == \"===============================================================================\\n\" \\\n \"ocmv0lw6q HST/STIS MIRVIS F25ND3 ACQ/POINT\\n\" \\\n \"prop: 13760 visit: 0L line: 1 target: CD-59D3300\\n\" \\\n \"obs date, time: 2016-09-29 23:43:50 exposure time: 1.10\\n\" \\\n \"dom GS/FGS: S4B0000993F2 sub-dom GS/FGS: S4B0000953F1\\n\" \\\n \"ACQ params: bias sub: 1510 checkbox: 3 method: FLUX CENTROID\\n\" \\\n \"subarray (axis1,axis2): size=(100,100) corner=(487,466)\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Coarse locate phase: Target flux in max checkbox (DN): 1560\\n\" \\\n \"\\n\" \\\n \" global local\\n\" \\\n \" axis1 axis2 axis1 axis2\\n\" \\\n \"Target location: 534.2 507.0 48.2 42.0\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: -1.5 -9.0 -0.079 -0.457 -0.379 0.268\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Fine locate phase: Target flux in max checkbox (DN): 1559\\n\" \\\n \"\\n\" \\\n \" global local\\n\" \\\n \" axis1 axis2 axis1 axis2\\n\" \\\n \"Target location: 534.2 516.8 48.2 51.8\\n\" \\\n \"Ref ap location: 537.5 517.0 19.5 17.0\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: -2.1 -0.2 -0.104 -0.010 -0.081 -0.067\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Total est. slew: -3.6 -9.2 -0.183 -0.467 -0.460 0.201\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Your ACQ appears to have succeeded, as the fluxes in the coarse\\n\" \\\n \"and fine stages agree within 25% and the fine slews were less than\\n\" \\\n \"4 pixels as expected\\n\" \\\n \"\\n\" \\\n \"===============================================================================\\n\"\n\n # Compare results\n outputs = [(\"ocmv0lw6q_raw.fits\", \"ocmv0lw6q_raw_ref.fits\")]\n self.compare_outputs(outputs)", "def headerData(self, section:int, orientation:QtCore.Qt.Orientation, role:typing.Optional[int]=QtCore.Qt.DisplayRole) -> typing.Any:", "def analyze_on_axis(phase_space, id_begin, id_end, ds_slice, zplot):\n\n ps = phase_space[:, (id_begin-1):id_end, :]\n # print(np.shape(ps))\n # ps = ps[numpy.logical_not(numpy.isnan(ps))]\n\n x = ps[0, :, :]\n px = ps[1, :, :]\n y = ps[2, :, :]\n py = ps[3, :, :]\n\n id_on_axis = np.zeros((4, int(id_end-id_begin+1)))\n\n for n in range(int(id_end-id_begin+1)):\n x_this = x[n, :]\n px_this = px[n, :]\n y_this = y[n, :]\n py_this = py[n, :]\n\n # Remove all NAN elements in the phase space array\n x_this = x_this[np.logical_not(np.isnan(x_this))]\n px_this = px_this[np.logical_not(np.isnan(px_this))]\n y_this = y_this[np.logical_not(np.isnan(y_this))]\n py_this = py_this[np.logical_not(np.isnan(py_this))]\n\n ## Plot X\n plt.subplot(2, 2, 1)\n plt.plot(zplot[0:len(x_this)]*1e+6, x_this*1e+6)\n plt.ylabel('Position in X/ $\\mu$m', fontsize=10)\n\n ## Plot Y\n plt.subplot(2, 2, 2)\n plt.plot(zplot[0:len(y_this)]*1e+6, y_this*1e+6)\n plt.ylabel('Position in Y/ $\\mu$m', fontsize=10)\n\n ## Plot px\n plt.subplot(2, 2, 3)\n plt.plot(zplot[0:len(px_this)]*1e+6, px_this)\n plt.ylabel('Angle in X', fontsize=10)\n\n ## Plot py\n plt.subplot(2, 2, 4)\n plt.plot(zplot[0:len(py_this)]*1e+6, py_this)\n plt.ylabel('Angle in Y', fontsize=10)\n\n\n # plt.xlabel('Longitudianl Direction of the Bunch $s$/ $\\mu$m')\n # plt.title('First Undulator Section')\n # plt.title('Second Undulator Section')\n # plt.title('Third Undulator Section')\n\n id_on_axis[0, n] = np.argmin(np.abs(x_this))\n id_on_axis[1, n] = np.argmin(np.abs(px_this))\n id_on_axis[2, n] = np.argmin(np.abs(y_this))\n id_on_axis[3, n] = np.argmin(np.abs(py_this))\n\n fig = plt.gcf()\n fig.set_size_inches(13.5, 9)\n ax = plt.gca()\n ax.yaxis.get_major_formatter().set_powerlimits((0,1))\n fig.savefig('phase_space_U3_new.png', dpi=100)\n plt.show()\n\n\n s_on_axis = np.average(id_on_axis[2:4,:])*ds_slice\n\n return id_on_axis, s_on_axis", "def coadd(hdr_list, data_list, var_list, exp_list,\n method='mean', weighted=True, robust=True, sigma=8.0,\n maxiters=5, spectral=False, cube=False, wcskey=' ',\n rotate=True, fit_order=2, window=7.0, smoothing=2.0,\n adaptive_algorithm=None, edge_threshold=0.7,\n reference='first'):\n\n # cube is only supported for spectral data\n if cube:\n spectral = True\n\n # reference all data to the first file\n out_header = hdr_list[0].copy()\n\n # set reference angle to zero if it isn't already\n key = wcskey.strip().upper()\n if rotate:\n for wkey in [f'CROTA2{key}',\n f'PC1_1{key}', f'PC1_2{key}',\n f'PC2_1{key}',\n f'PC2_2{key}', f'PC2_3{key}',\n f'PC3_2{key}', f'PC3_3{key}']:\n if wkey in out_header:\n if wkey == f'CROTA2{key}':\n out_header[wkey] = 0.0\n else:\n del out_header[wkey]\n\n # swap RA to east-left if needed\n ra = f'CDELT1{key}'\n if not cube and ra in out_header and out_header[ra] > 0:\n out_header[ra] *= -1\n\n # turn down logging to avoid FITS warning for 3D coord sys\n olevel = log.level\n log.setLevel('ERROR')\n if not spectral:\n outwcs = WCS(out_header, key=wcskey, naxis=2)\n else:\n outwcs = WCS(out_header, key=wcskey)\n log.setLevel(olevel)\n\n wcs_dim = outwcs.wcs.naxis\n if cube and wcs_dim < 3:\n msg = 'WCS is not 3D. Cannot make cube.'\n log.error(msg)\n raise ValueError(msg)\n\n if cube:\n # expectation is that 3D coord was in a secondary WCS --\n # we don't handle it if not\n if key == '':\n log.error('Unexpected input WCS condition. '\n 'Cannot fix output header.')\n raise ValueError\n\n method = 'resample'\n if 'SLTW_PIX' not in out_header:\n log.warning('Slit width not in header; output flux '\n 'may not be conserved.')\n float_slitw = out_header.get('SLTW_PIX', 1.0)\n slit_width = int(np.round(float_slitw))\n else:\n float_slitw = 1.0\n slit_width = 1\n\n # if referencing to a target RA/Dec (e.g. for nonsidereal targets),\n # get the target position in reference x, y coordinates\n tgt_x, tgt_y = None, None\n if reference == 'target':\n tgt_x, tgt_y = _target_xy(out_header, outwcs)\n if None in (tgt_x, tgt_y):\n msg = 'Missing TGTRA or TGTDEC; cannot reference to target.'\n log.warning(msg)\n\n out_coord_x = []\n out_coord_y = []\n out_coord_w = []\n flxvals = []\n errvals = []\n expvals = []\n corners = []\n for (hdr, flux, var, exp) in zip(hdr_list, data_list, var_list, exp_list):\n # input wcs\n if not spectral:\n inwcs = WCS(hdr, key=wcskey, naxis=2)\n else:\n inwcs = WCS(hdr, key=wcskey)\n\n # assemble flux, error, and exposure map values\n ny, nx = flux.shape\n err = np.sqrt(var)\n good = ~np.isnan(flux) & ~np.isnan(err)\n if not np.any(good):\n log.warning(f\"No good data in \"\n f\"{hdr.get('FILENAME', 'UNKNOWN')}; skipping.\")\n continue\n if method == 'resample':\n flxvals.append(flux[good])\n errvals.append(err[good])\n else:\n flxvals.append(flux)\n errvals.append(err)\n if cube:\n # exposure value is at one wavelength only, with\n # slit width size, plus two zero columns for padding\n expval = exp[:, 0:slit_width + 2]\n expval[:, 0] = 0\n expval[:, -1] = 0\n expvals.append(expval)\n else:\n expvals.append(exp)\n\n # index values for resampling\n yin, xin = np.meshgrid(np.arange(ny), np.arange(nx), indexing='ij')\n yin = yin[good]\n xin = xin[good]\n xamin, xamax = np.argmin(xin), np.argmax(xin)\n yamin, yamax = np.argmin(yin), np.argmax(yin)\n\n # corner values for interpolation\n if cube:\n in_corner = [[xin[xamin], xin[xamin],\n xin[xamax], xin[xamax]],\n [yin[yamin], yin[yamax],\n yin[yamin], yin[yamax]],\n [-slit_width / 2 + 0.5, -slit_width / 2 + 0.5,\n slit_width / 2 - 0.5, slit_width / 2 - 0.5]]\n else:\n in_corner = [[xin[xamin], xin[xamin],\n xin[xamax], xin[xamax]],\n [yin[yamin], yin[yamax],\n yin[yamin], yin[yamax]]]\n\n # transform all coords to reference WCS\n if wcs_dim == 2:\n wxy = inwcs.wcs_pix2world(xin, yin, 0)\n oxy = outwcs.wcs_world2pix(*wxy, 0)\n cxy = inwcs.wcs_pix2world(*in_corner, 0)\n out_corner = outwcs.wcs_world2pix(*cxy, 0)\n else:\n wxy = inwcs.wcs_pix2world(xin, yin, 0, 0)\n oxy = outwcs.wcs_world2pix(*wxy, 0)\n if cube:\n cxy = inwcs.wcs_pix2world(*in_corner, 0)\n out_corner = outwcs.wcs_world2pix(*cxy, 0)\n # ra, dec corners\n in_corner = [in_corner[2], in_corner[1]]\n # correct for slit width offset in not-yet\n # existant 3rd dimension\n out_corner = np.array([out_corner[2] - slit_width / 2,\n out_corner[1]])\n else:\n cxy = inwcs.wcs_pix2world(*in_corner, 0, 0)\n out_corner = outwcs.wcs_world2pix(*cxy, 0)[0:2]\n\n # correct all coordinates for target movement\n x_off, y_off = 0., 0.\n if None not in [tgt_x, tgt_y]:\n upd_x, upd_y = _target_xy(hdr, outwcs)\n if None in [upd_x, upd_y]:\n log.warning(f\"Missing target RA/Dec in file \"\n f\"{hdr.get('FILENAME', 'UNKNOWN')}.\")\n else:\n x_off = tgt_x - upd_x\n y_off = tgt_y - upd_y\n\n if cube and wcs_dim == 3:\n # assuming crval1=wavelength, crval2=dec, crval3=ra\n out_coord_w.append(oxy[0])\n out_coord_y.append(oxy[1] + y_off)\n out_coord_x.append(oxy[2] + x_off)\n else:\n out_coord_x.append(oxy[0] + x_off)\n out_coord_y.append(oxy[1] + y_off)\n\n out_corner[0] += x_off\n out_corner[1] += y_off\n corners.append((in_corner, out_corner))\n\n # output grid shape\n stk_coord_x = np.hstack(out_coord_x)\n minx, maxx = np.min(stk_coord_x), np.max(stk_coord_x)\n stk_coord_y = np.hstack(out_coord_y)\n miny, maxy = np.min(stk_coord_y), np.max(stk_coord_y)\n\n # shift coordinates to new grid\n stk_coord_x -= minx\n stk_coord_y -= miny\n\n # stack coordinates for output grid\n if cube:\n stk_coord_w = np.hstack(out_coord_w)\n minw, maxw = np.min(stk_coord_w), np.max(stk_coord_w)\n out_shape = (int(np.ceil(maxw) - np.floor(minw) + 1),\n int(np.ceil(maxy) - np.floor(miny) + 1),\n int(np.ceil(maxx) - np.floor(minx)) + 1)\n stk_coord_w -= minw\n coordinates = stack(stk_coord_x, stk_coord_y, stk_coord_w)\n\n xout = np.arange(out_shape[2], dtype=np.float64)\n yout = np.arange(out_shape[1], dtype=np.float64)\n wout = np.arange(out_shape[0], dtype=np.float64)\n grid = xout, yout, wout\n\n # fix header reference pixel for new min value in w and x\n out_header['CRPIX1' + key] -= minw\n out_header['CRPIX2' + key] -= miny\n out_header['CRPIX3' + key] -= minx\n else:\n out_shape = (int(np.ceil(maxy) - np.floor(miny) + 1),\n int(np.ceil(maxx) - np.floor(minx)) + 1)\n\n coordinates = stack(stk_coord_x, stk_coord_y)\n\n xout = np.arange(out_shape[1], dtype=np.float64)\n yout = np.arange(out_shape[0], dtype=np.float64)\n grid = xout, yout\n\n # fix header reference pixel\n out_header['CRPIX1' + key] -= minx\n out_header['CRPIX2' + key] -= miny\n\n # also fix primary coordinates for 2D spectrum\n if key != '' and wcs_dim > 2:\n out_header['CRPIX1'] -= minx\n out_header['CRPIX2'] -= miny\n\n log.info('Output shape: {}'.format(out_shape))\n\n # use local polynomial fits to resample and coadd data\n if method == 'resample':\n flxvals = np.hstack(flxvals)\n errvals = np.hstack(errvals)\n\n if cube:\n edge_threshold = (edge_threshold, edge_threshold, 0)\n window = (window, window, 2.0)\n smoothing = (smoothing, smoothing, 1.0)\n if adaptive_algorithm in ['scaled', 'shaped']:\n adaptive_threshold = (1.0, 1.0, 0.0)\n else:\n adaptive_threshold = None\n adaptive_algorithm = None\n else:\n if adaptive_algorithm in ['scaled', 'shaped']:\n adaptive_threshold = 1.0\n else:\n adaptive_threshold = None\n adaptive_algorithm = None\n\n max_cores = psutil.cpu_count() - 1\n if max_cores < 2: # pragma: no cover\n max_cores = None\n\n log.info('Setting up output grid.')\n resampler = Resample(\n coordinates, flxvals, error=errvals,\n window=window, order=fit_order, fix_order=True)\n\n log.info('Resampling flux data.')\n flux, std = resampler(\n *grid, smoothing=smoothing, edge_threshold=edge_threshold,\n adaptive_threshold=adaptive_threshold,\n adaptive_algorithm=adaptive_algorithm,\n edge_algorithm='distribution', get_error=True,\n error_weighting=weighted, jobs=max_cores)\n var = std**2\n\n log.info('Interpolating and summing exposure maps.')\n if cube:\n expmap = np.zeros(out_shape[1:], dtype=float)\n else:\n expmap = np.zeros(out_shape, dtype=float)\n for i, expval in enumerate(expvals):\n inx, iny = corners[i][0]\n outx, outy = corners[i][1]\n outx -= minx\n outy -= miny\n exp_out = warp_image(\n expval, inx, iny, outx, outy,\n output_shape=expmap.shape, cval=0,\n order=1, interpolation_order=1)\n expmap += exp_out\n else:\n # interpolate corners for approximate warp solution\n log.info('Interpolating all images.')\n\n flx = []\n vr = []\n expmap = np.zeros(out_shape)\n for i, (flxval, errval, expval) in \\\n enumerate(zip(flxvals, errvals, expvals)):\n inx, iny = corners[i][0]\n outx, outy = corners[i][1]\n outx -= minx\n outy -= miny\n\n # flux image\n flx.append(\n warp_image(flxval, inx, iny, outx, outy,\n output_shape=out_shape, cval=np.nan,\n order=1, interpolation_order=1))\n\n # var image\n vr.append(\n warp_image(errval**2, inx, iny, outx, outy,\n output_shape=out_shape, cval=np.nan,\n order=1, interpolation_order=0))\n\n # exposure map image\n exp_out = warp_image(\n expval, inx, iny, outx, outy,\n output_shape=out_shape, cval=0,\n order=1, interpolation_order=0)\n expmap += exp_out\n\n if len(flx) > 1:\n log.info('{}-combining images.'.format(method.title()))\n flux, var = combine_images(\n flx, variance=vr, method=method, weighted=weighted,\n robust=robust, sigma=sigma, maxiters=maxiters)\n else:\n flux, var = flx[0], vr[0]\n\n if cube:\n # reconstruct as primary wcs\n key = wcskey.strip().upper()\n wcs_key_set = ['CTYPE1', 'CTYPE2', 'CUNIT1', 'CUNIT2',\n 'CRPIX1', 'CRPIX2', 'CRVAL1', 'CRVAL2',\n 'CDELT1', 'CDELT2', 'CROTA2', 'SPECSYS',\n f'CTYPE1{key}', f'CTYPE2{key}', f'CTYPE3{key}',\n f'CUNIT1{key}', f'CUNIT2{key}', f'CUNIT3{key}',\n f'CRPIX1{key}', f'CRPIX2{key}', f'CRPIX3{key}',\n f'CRVAL1{key}', f'CRVAL2{key}', f'CRVAL3{key}',\n f'CDELT1{key}', f'CDELT2{key}', f'CDELT3{key}',\n f'RADESYS{key}', f'EQUINOX{key}', f'SPECSYS{key}']\n tmp = out_header.copy()\n for wkey in wcs_key_set:\n if wkey in out_header:\n del out_header[wkey]\n if wkey.endswith(key) and wkey in tmp:\n # swap coords 1 and 3 (to make it wave, RA, Dec)\n new_key = wkey[:-1].replace('3', '9999')\n new_key = new_key.replace('1', '3').replace('9999', '1')\n hdinsert(out_header, new_key, tmp[wkey], tmp.comments[wkey])\n\n # fix source position estimate too\n if 'SRCPOSX' in out_header and 'SRCPOSY' in out_header:\n coord = ([out_header['SRCPOSX']],\n [out_header['SRCPOSY']])\n first_wcs = WCS(hdr_list[0], naxis=2)\n out_wcs = WCS(out_header, naxis=2)\n sxy = first_wcs.wcs_pix2world(*coord, 0)\n new_xy = out_wcs.wcs_world2pix(*sxy, 0)\n out_header['SRCPOSX'] = new_xy[0][0]\n out_header['SRCPOSY'] = new_xy[1][0]\n\n if cube:\n # correct flux for pixel size change\n # before: pixel x slit width in pixels\n # after: pixel x pixel\n flux /= float_slitw\n var /= float_slitw**2\n\n return out_header, flux, var, expmap", "def _nanosims_header(self, hdr):\n # Called MaskNano in OpenMIMS; BFieldTab separated out; create extra sub-dict PeakCenter\n d = {}\n d['PeakCenter'] = {}\n d['nanosimsheader version'], d['regulation mode'], d['mode'], \\\n d['grain mode'], d['semigraphic mode'], d['stage delta x'], \\\n d['stage delta y'], d['working frame width'], \\\n d['working frame height'], d['scanning frame x'], \\\n d['scanning frame width'], d['scanning frame y'], \\\n d['scanning frame height'], d['counting frame x start'], \\\n d['counting frame x end'], d['counting frame y start'], \\\n d['counting frame y end'], d['detector type'], d['electron scan'], \\\n d['scanning mode'], d['beam blanking'], \\\n d['PeakCenter']['peakcenter enabled'], d['PeakCenter']['start'], \\\n d['PeakCenter']['frequency'], d['b fields'] = \\\n unpack(self._bo + '25i', hdr.read(100))\n\n d['PeakCenter']['peakcenter enabled'] = bool(d['PeakCenter']['peakcenter enabled'])\n d['regulation mode'] = bool(d['regulation mode'])\n d['grain mode'] = bool(d['grain mode'])\n d['semigraphic mode'] = bool(d['semigraphic mode'])\n d['scanning mode'] = bool(d['scanning mode'])\n\n # Set a few extra variables.\n d['counting frame width'] = d['counting frame x end'] - d['counting frame x start'] + 1\n d['counting frame height'] = d['counting frame y end'] - d['counting frame y start'] + 1\n\n # Found in at least one version (file v11, nsHeader v8) a repeat of\n # Poly_list and this first part of nanoSIMSHeader. Total of repeat\n # adds up to 288. After last Poly_list, 288 byte padding zone, not all\n # null-bytes.\n hdr.seek(288, 1)\n\n # Is this the nPrintRed from OpenMIMS?\n d['print results'] = bool(unpack(self._bo + 'i', hdr.read(4))[0])\n\n d['SibCenterHor'] = self._sib_center(hdr)\n d['SibCenterVert'] = self._sib_center(hdr)\n\n # Duplicate and store these two in sub dicts\n b_field_index, has_sib_center = \\\n unpack(self._bo + '2i', hdr.read(8))\n if b_field_index < 0:\n b_field_index = None\n has_sib_center = bool(has_sib_center)\n\n d['SibCenterHor']['b field index'] = b_field_index\n d['SibCenterVert']['b field index'] = b_field_index\n d['SibCenterHor']['sib center enabled'] = has_sib_center\n d['SibCenterVert']['sib center enabled'] = has_sib_center\n\n d['EnergyCenter'] = self._energy_center(hdr)\n d['E0SCenter'] = self._e0s_center(hdr)\n\n d['EnergyCenter']['wait time'], d['presputtering raster'], \\\n d['PeakCenter']['E0P offset'], d['E0SCenter']['steps'], \\\n d['baseline measurement'], d['baseline offset'], \\\n d['baseline frequency'] = \\\n unpack(self._bo + '5i d i', hdr.read(32))\n return d", "def assign_header(self, btn):\n with open(self.filename, 'rU+') as f:\n df = pd.read_csv(f, sep=self.delim, index_col=False)\n btn.color = [.3, .9, .5, 1]\n non_numeric_label = self.non_numeric_axis\n buttons = self.headerButtons.children[:]\n for x in buttons:\n if x != btn:\n x.color = [0, 0, 0, 1]\n if self.cur_axis == 'x':\n self.x_axis = btn.text.encode('ascii')\n# print df[self.x_axis].dtype\n if df[self.x_axis].dtype == 'object':\n non_numeric_label.text = 'Note: This is a non-numeric data column.'\n self.ids.scatter_button.disabled = True\n self.ids.disabled_explanation.text = self.scatter_disabled_explanation\n self.non_numeric_x_axis = True\n else:\n non_numeric_label.text = ''\n self.ids.scatter_button.disabled = False\n self.ids.disabled_explanation.text = ''\n self.non_numeric_x_axis = False\n self.ids.sm.current = 'screenX'\n elif self.cur_axis == 'y':\n self.y_axis = btn.text.encode('ascii')\n #print self.y_axis\n self.ids.sm.current = 'screenY'", "def construct_header_dataset_name(\n self, board: int, channel: int, config_name=None, adc=\"SIS 3301\", **kwargs\n ) -> str:\n # ensure return_info kwarg is always False\n kwargs[\"return_info\"] = False\n\n # get dataset name\n dset_name = self.construct_dataset_name(\n board, channel, config_name=config_name, adc=adc, **kwargs\n )\n\n # build and return header name\n dheader_name = f\"{dset_name} headers\"\n return dheader_name", "def design_header(self):\n pass", "def create_header(numValues):\n\n header = []\n for value in range(numValues):\n header.append(\"att{}\".format(value))\n return header", "def _generateColumnHeader(self, obj, **args):\n result = []\n header = self._script.utilities.columnHeaderForCell(obj)\n if not header:\n return result\n\n text = self._script.utilities.displayedText(header)\n if not text:\n return result\n\n roleString = self.getLocalizedRoleName(obj, pyatspi.ROLE_COLUMN_HEADER)\n if args.get('mode') == 'speech':\n if settings.speechVerbosityLevel == settings.VERBOSITY_LEVEL_VERBOSE \\\n and not args.get('formatType') in ['basicWhereAmI', 'detailedWhereAmI']:\n text = \"%s %s\" % (text, roleString)\n elif args.get('mode') == 'braille':\n text = \"%s %s\" % (text, roleString)\n\n result.append(text)\n return result", "def _appendAxisDefinition(self, axis):\n length = len(axis)\n\n self.na_dict[\"NX\"].append(length)\n self.na_dict[\"XNAME\"].append(xarray_utils.getBestName(axis))\n\n # If only one item in axis values\n if length < 2:\n self.na_dict[\"DX\"].append(0)\n self.na_dict[\"NXDEF\"].append(length)\n self.na_dict[\"X\"].append(axis.data.tolist()) \n return\n\n incr = xarray_utils.get_interval(axis, 0, 1)\n\n for i in range(1, length):\n if (axis[i] - axis[i - 1]) != incr:\n self.na_dict[\"DX\"].append(0)\n self.na_dict[\"NXDEF\"].append(length)\n self.na_dict[\"X\"].append(axis.data.tolist())\n break\n\n else: # If did not break out of the loop\n max_length = length\n if length > 3: \n max_length = 3\n\n self.na_dict[\"DX\"].append(incr)\n self.na_dict[\"NXDEF\"].append(max_length)\n self.na_dict[\"X\"].append(axis[:max_length])", "def axisinfo(unit, axis):\n if isinstance(unit, tuple):\n unit = unit[0]\n unit_obj = unit if isinstance(unit, Unit) else Unit(unit)\n name = unyt_arrayConverter._axisnames.get(axis, \"\")\n if unit_obj.is_dimensionless:\n label = name\n else:\n name += \" \"\n unit_str = unit_obj.latex_representation()\n if unyt_arrayConverter._labelstyle == \"[]\":\n label = name + \"$\\\\left[\" + unit_str + \"\\\\right]$\"\n elif unyt_arrayConverter._labelstyle == \"/\":\n axsym = \"$q_{\\\\rm\" + axis.axis_name + \"}$\"\n name = axsym if name == \" \" else name\n if \"/\" in unit_str:\n label = name + \"$\\\\;/\\\\;\\\\left(\" + unit_str + \"\\\\right)$\"\n else:\n label = name + \"$\\\\;/\\\\;\" + unit_str + \"$\"\n else:\n label = name + \"$\\\\left(\" + unit_str + \"\\\\right)$\"\n return AxisInfo(label=label.strip())", "def second_header():\n return \"\"\"\n<th>\n<th>start\n<th>start\n<th>end\n<th>(secs)\n<th>time\n<th>frames\n<th>\n<th>bin\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>width\n<th>angle\n<th>\n<th>\n<th>\n<th>\n\"\"\"", "def get_header(col_current, col_shift):\n header = col_current\n for i in range(col_shift):\n header = header.right\n return header", "def get_header():\n str_list = ['specimennumber','speciesid','group','family','genus','species','scientificname', \\\n 'commonname','country','state','county','locality','latitude','longitude', \\\n 'source','accuracy','drainagename','centroidtype','huc8name','huc8', \\\n 'huc10name','huc10','huc12name','huc12','date','year','month','day','status','comments', \\\n 'recordtype','disposal','museumcatnumber','freshmarineintro','references']\n return str_list", "def processHeader(self, header=None, pdata=None):\n\t\tif self.invariantPData.writer and not self.invariantPData.headerOutputted:\n\t\t\tnewHeader = [\"outputID\", 'noOfOutliers', 'noOfNonMissing', 'outlierFraction', 'chiSqStat', 'chiSqMinusLogPvalue',\\\n\t\t\t\t\t\t'xMedianValue', 'yMedianValue', 'corr']\n\t\t\tself.invariantPData.writer.writerow(newHeader)\n\t\t\tself.invariantPData.headerOutputted = True", "def headerData(self, section, orientation, role):\n headers = [\"Constituancy\", \"Lab\", \"Con\", \"LD\"]\n\n if role == qc.Qt.DisplayRole and orientation == qc.Qt.Horizontal:\n return qc.QVariant(headers[section])\n\n return qc.QVariant()", "def test_addheader(self):\n datasets = [pd.DataFrame(index=range(100),columns=range(54)) for b in range(10)]\n datasetsnew = tutorial_pamap2.addheader(datasets)\n test = datasetsnew[0].shape == datasets[0].shape\n assert test", "def get_nircam_subarray(header):\n\n #\n # ROWSTART and COLSTART are zero-indexed, ROWCORNR and COLCORNR\n # are 1-indexed\n # Try to get ROWCORNR from header. If that doesn't work, try ROWSTART\n detector_row_start = None\n try:\n detector_row_start = int(header['ROWCORNR'])\n except KeyError:\n try:\n detector_row_start = int(float(header['ROWSTART'])) + 1\n except KeyError:\n pass\n if detector_row_start is None:\n print('Unable to get subarray ROWSTART, using 1')\n detector_row_start = 1\n\n #\n # Now try to get COLCORNR from header. If that doesn't work, try COLSTART\n detector_column_start = None\n try:\n detector_column_start = int(header['COLCORNR'])\n except KeyError:\n try:\n detector_column_start = int(float(header['COLSTART'])) + 1\n except KeyError:\n pass\n if detector_column_start is None:\n print('Unable to get subarray COLSTART, using 1')\n detector_column_start = 1\n\n return detector_row_start, detector_column_start", "def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.conv_axis_names:\n output_axes += ng.make_axis(name=ax.name,\n length=utils.conv_output_dim(\n ax.length,\n self.filter_spatial_shape[name],\n pad_int[name],\n self.strides[name],\n False,\n self.dilation[name]))\n elif name == \"C\":\n output_axes += ng.make_axis(name=name, length=self.nout)\n else:\n output_axes += ax\n return output_axes", "def IIR_sos_header(fname_out,SOS_mat):\r\n Ns,Mcol = SOS_mat.shape\r\n f = open(fname_out,'wt')\r\n f.write('//define a IIR SOS CMSIS-DSP coefficient array\\n\\n')\r\n f.write('#include <stdint.h>\\n\\n')\r\n f.write('#ifndef STAGES\\n')\r\n f.write('#define STAGES %d\\n' % Ns)\r\n f.write('#endif\\n')\r\n f.write('/*********************************************************/\\n');\r\n f.write('/* IIR SOS Filter Coefficients */\\n');\r\n f.write('float32_t ba_coeff[%d] = { //b0,b1,b2,a1,a2,... by stage\\n' % (5*Ns))\r\n for k in range(Ns):\r\n if (k < Ns-1):\r\n f.write(' %+-13e, %+-13e, %+-13e,\\n' % \\\r\n (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n f.write(' %+-13e, %+-13e,\\n' % \\\r\n (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n else:\r\n f.write(' %+-13e, %+-13e, %+-13e,\\n' % \\\r\n (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n f.write(' %+-13e, %+-13e\\n' % \\\r\n (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n # for k in range(Ns):\r\n # if (k < Ns-1):\r\n # f.write(' %15.12f, %15.12f, %15.12f,\\n' % \\\r\n # (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n # f.write(' %15.12f, %15.12f,\\n' % \\\r\n # (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n # else:\r\n # f.write(' %15.12f, %15.12f, %15.12f,\\n' % \\\r\n # (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n # f.write(' %15.12f, %15.12f\\n' % \\\r\n # (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n f.write('};\\n')\r\n f.write('/*********************************************************/\\n')\r\n f.close()", "def write_header(self):\n lines = [\"\"]\n\n for key in self._header_keys:\n value = self.get_attr_from_name(key)\n if isinstance(value, list):\n value = \",\".join([f\"{v:.1f}\" for v in value])\n elif isinstance(value, (float)):\n value = f\"{value:.7f}\"\n elif isinstance(value, (int)):\n value = f\"{value:.0f}\"\n\n key = (\n key.replace(\"_\", \" \")\n .title()\n .replace(\" \", \"\")\n .replace(\"MTEdit.\", \"MTEdit:\")\n )\n\n lines.append(f\"${key}={value.capitalize()}\")\n\n return lines", "def test_merge_dim_header():\n hdr_in_1 = Hdr_Ext.from_header_ext(\n {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}})\n hdr_in_2 = Hdr_Ext.from_header_ext(\n {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3],\n 'p2': [0.1, 0.2, 0.3]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}})\n\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_1, hdr_in_2, 5, 4, 3)\n assert hdr_out == {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4, 1, 2, 3],\n 'p2': [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}}\n\n hdr_in_2 = Hdr_Ext.from_header_ext(\n {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 5, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}})\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_1, hdr_in_2, 6, 4, 4)\n assert hdr_out == {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}}\n\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_2, hdr_in_1, 6, 4, 4)\n assert hdr_out == {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': [5, 6, 7, 8, 1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}}\n\n hdr_in_2 = Hdr_Ext.from_header_ext(\n {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 5, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}})\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_1, hdr_in_2, 7, 4, 4)\n assert hdr_out == {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4]}}\n\n with pytest.raises(NIfTI_MRSIncompatible) as exc_info:\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_1, hdr_in_2, 5, 4, 4)\n assert exc_info.type is NIfTI_MRSIncompatible\n assert exc_info.value.args[0] == \"Both files must have matching dimension headers apart from the one being merged.\"\\\n \" dim_7_header does not match.\"", "def _writeVariablesHeaderSection(self):\n self.header.write(wrapLine(\"NV\", self.annotation, self.delimiter, \"%d\\n\" % self.NV))\n self.header.write(wrapLine(\"VSCAL\", self.annotation, self.delimiter, ((\"%s\" + self.delimiter) * (self.NV - 1) + \"%s\\n\") % tuple(self.VSCAL)))\n self.header.write(wrapLine(\"VMISS\", self.annotation, self.delimiter, ((\"%s\" + self.delimiter) * (self.NV - 1) + \"%s\\n\") % tuple(self.VMISS)))\n self.header.write(wrapLines(\"VNAME\", self.annotation, self.delimiter, \"%s\\n\" * self.NV % tuple(self.VNAME)))", "def load_hdr(filename):\n\n img = nib.load(filename)\n np_arr = img.get_data()\n \n return np_arr", "def load_hdr(filename):\n\n img = nib.load(filename)\n np_arr = img.get_data()\n \n return np_arr", "def axisinfo(unit, axis):\n return PintAxisInfo(unit)", "def writeHeading(fil, nodes, elems, text=''): #currently only for hexahedral mesh\n fil.write(\" CONTROL INFO 2.2.30\\n\")\n fil.write(\"** GAMBIT NEUTRAL FILE\\n\")\n fil.write('%s\\n' %text)\n fil.write('PROGRAM: Gambit VERSION: 2.2.30\\n')\n fil.write(strftime('%d %b %Y %H:%M:%S\\n', gmtime()))\n fil.write(' NUMNP NELEM NGRPS NBSETS NDFCD NDFVL\\n')\n fil.write('%10i%10i%10i%10i%10i%10i\\n' % (shape(nodes)[0],shape(elems)[0],1,0,3,3))\n fil.write('ENDOFSECTION\\n')", "def __init__(self, axis=-1):\n self.axis = axis", "def test_header_update7(self, capsys):\n\n # Prepare input files.\n self.get_data(\"input\", \"octr11h4q_raw.fits\")\n self.get_data(\"input\", \"octr11h4q_spt.fits\")\n\n capsys.readouterr()\n\n tastis('octr11h4q_raw.fits', update=True)\n\n captured = capsys.readouterr()\n assert captured.out == \"===============================================================================\\n\" \\\n \"octr11h4q HST/STIS MIRVIS F25ND5 ACQ/POINT\\n\" \\\n \"prop: 14341 visit: 11 line: 1 target: HD128620\\n\" \\\n \"obs date, time: 2016-08-28 19:57:49 exposure time: 0.30\\n\" \\\n \"dom GS/FGS: S7QX000303F1 sub-dom GS/FGS: S7QX000751F2\\n\" \\\n \"ACQ params: bias sub: 1510 checkbox: 3 method: FLUX CENTROID\\n\" \\\n \"subarray (axis1,axis2): size=(100,100) corner=(487,466)\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Coarse locate phase: Target flux in max checkbox (DN): 278\\n\" \\\n \"\\n\" \\\n \" global local\\n\" \\\n \" axis1 axis2 axis1 axis2\\n\" \\\n \"Target location: 557.0 473.0 71.0 8.0\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: 21.3 -43.0 1.080 -2.184 -0.781 2.308\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Fine locate phase: Target flux in max checkbox (DN): 280\\n\" \\\n \"\\n\" \\\n \" global local\\n\" \\\n \" axis1 axis2 axis1 axis2\\n\" \\\n \"Target location: 547.0 564.0 61.0 99.0\\n\" \\\n \"Ref ap location: 537.6 517.3 19.6 17.3\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: 10.6 46.7 0.541 2.372 2.060 -1.295\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Total est. slew: 31.9 3.7 1.621 0.188 1.279 1.013\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"The fine slew (to center the target in the reference aperture) is larger\\n\" \\\n \"than 4 pixels. This may indicate a problem with your acquisition.\\n\" \\\n \"\\n\" \\\n \"===============================================================================\\n\"\n\n # Compare results\n outputs = [(\"octr11h4q_raw.fits\", \"octr11h4q_raw_ref.fits\")]\n self.compare_outputs(outputs)", "def create_svg_number(self):\n self.header = [str(i) for i in range(len(self.data[0]))]\n self.create_svg_name()", "def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.conv_axis_names:\n output_axes += ng.make_axis(name=ax.name,\n length=utils.deconv_output_dim(ax.length,\n self.filter_shape[name],\n pad_int[name],\n self.strides[name],\n self.dilation[name]))\n elif name == \"C\":\n output_axes += ng.make_axis(name=name, length=self.nout)\n else:\n output_axes += ax\n\n return output_axes", "def test_header_update3(self, capsys):\n\n # Prepare input files.\n self.get_data(\"input\", \"octr11hrq_raw.fits\")\n self.get_data(\"input\", \"octr11hrq_spt.fits\")\n\n capsys.readouterr()\n\n tastis('octr11hrq_raw.fits', update=True)\n\n captured = capsys.readouterr()\n assert captured.out == \"===============================================================================\\n\" \\\n \"octr11hrq HST/STIS G430M 31X0.05NDA ACQ/PEAK-UP\\n\" \\\n \"prop: 14341 visit: 11 line: 9 target: HD128621-2\\n\" \\\n \"obs date, time: 2016-08-28 22:33:14 exposure time: 0.10\\n\" \\\n \"dom GS/FGS: S7QX000303F1 sub-dom GS/FGS: S7QX000751F2\\n\" \\\n \"ACQ params: bias sub: 1510 method: MAX-FLUX-CENTROID\\n\" \\\n \"subarray (axis1,axis2): size=(1022,32) corner=(25,500)\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Scan type: LINEARAXIS1 Step size (mas): 39\\n\" \\\n \"\\n\" \\\n \" [5478 0 798 3264 4796 1923 4876]\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: 0.2 0.0 0.010 0.000 0.007 0.007\\n\" \\\n \"Flux in post-slew confirmation image (882661) - Pedestal (871184) = 11477 DN\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"The flux in the confirmation image is 110% greater than the maximum flux\\n\" \\\n \"in the ACQ/PEAK scan. An excess greater than 100% indicates\\n\" \\\n \"problems in the ACQ/PEAK.\\n\" \\\n \"\\n\" \\\n \"The flux in the confirmation image is 57% of the recommended minimum\\n\" \\\n \"of 20000 DN for a dispersed-light ACQ/PEAK. The signal-to-noise in\\n\" \\\n \"the ACQ/PEAK may be inadequate for an accurate centering.\\n\" \\\n \"\\n\" \\\n \"The maximum flux in the sequence occurred at one end.\\n\" \\\n \"This may indicate that the target was beyond that end\\n\" \\\n \"or that a neighboring object affected the acquisition.\\n\" \\\n \"===============================================================================\\n\"\n\n # Compare results\n outputs = [(\"octr11hrq_raw.fits\", \"octr11hrq_raw_ref.fits\")]\n self.compare_outputs(outputs)", "def generate_ens_header(ens_num, payload_size):\n\n header = []\n\n # Get the Header ID\n for cnt in range(0, 16):\n header.append(0x80)\n\n # Ensemble Number and inverse\n header += Ensemble.int32_to_bytes(ens_num)\n header += struct.pack(\"i\", ~ens_num)\n\n # Payload size and inverse\n header += Ensemble.int32_to_bytes(payload_size)\n header += struct.pack(\"i\", ~payload_size)\n\n return header", "def build_roi_box_head(cfg, in_channels):\n return ROIBoxHead(cfg, in_channels)", "def build_roi_box_head(cfg, in_channels):\n return ROIBoxHead(cfg, in_channels)", "def build_roi_box_head(cfg, in_channels):\n return ROIBoxHead(cfg, in_channels)", "def get_dispersion_from_header(header,order=1): \n import numpy as np\n hist = header['history']\n n = \"%1s\"%(order) \n C = [get_keyword_from_history(hist,'DISP'+n+'_0')]\n if C == [None]: \n raise RuntimeError(\"header history does not contain the DISP keyword\")\n try:\n coef = get_keyword_from_history(hist,'DISP'+n+'_1')\n if coef != None: C.append(coef)\n try:\n coef = get_keyword_from_history(hist,'DISP'+n+'_2')\n if coef != None: C.append(coef)\n try:\n coef = get_keyword_from_history(hist,'DISP'+n+'_3')\n if coef != None: C.append(coef)\n try:\n coef=get_keyword_from_history(hist,'DISP'+n+'_4')\n if coef != None: C.append(coef)\n except:\n pass\n except:\n pass \n except:\n pass \n except:\n pass \n return np.array(C,dtype=float)" ]
[ "0.66289526", "0.652385", "0.63435376", "0.5962099", "0.59471786", "0.5827295", "0.5715055", "0.5618013", "0.55621886", "0.55530864", "0.5483086", "0.54277635", "0.5331385", "0.5288723", "0.5281522", "0.5281522", "0.5277689", "0.524827", "0.52445453", "0.5223636", "0.52164155", "0.51774174", "0.51694834", "0.51271045", "0.51087534", "0.510141", "0.5100756", "0.5062894", "0.50613445", "0.5056456", "0.50222605", "0.50127643", "0.49933454", "0.4986515", "0.49858066", "0.49822226", "0.49689275", "0.4954976", "0.49523827", "0.49479148", "0.4945582", "0.49452958", "0.49379054", "0.4930436", "0.4926816", "0.49266282", "0.49080062", "0.4904214", "0.4896082", "0.48947707", "0.48941845", "0.4890328", "0.48813948", "0.48798352", "0.48790127", "0.48679698", "0.48545927", "0.48442703", "0.4809514", "0.4805609", "0.48039654", "0.47976324", "0.47938272", "0.4793574", "0.47927248", "0.4788987", "0.47874075", "0.47860363", "0.47848657", "0.47785577", "0.477541", "0.4773601", "0.47703952", "0.4766212", "0.4762945", "0.47597322", "0.47584134", "0.47572756", "0.4755664", "0.47492945", "0.47471806", "0.473896", "0.4736547", "0.47295484", "0.47277403", "0.4721172", "0.47195727", "0.47195727", "0.47121596", "0.47075778", "0.4707451", "0.47059196", "0.47056597", "0.47013387", "0.4700665", "0.46985236", "0.46907482", "0.46907482", "0.46907482", "0.46872213" ]
0.6036392
3
Sums the pixels inside a region preserving the spectral axis.
def extract_spec(data, region, naxis, mode): logger = logging.getLogger(__name__) logger.debug('Data shape: {0}'.format(data.shape)) if region['shape'] == 'point': if naxis > 3: spec = data[:,:,region['params']['cy'],region['params']['cx']] if mode == 'sum': spec = spec.sum(axis=0) elif mode == 'avg': spec = spec.mean(axis=0) elif 'flux' in mode.lower(): spec = spec.sum(axis=0)/region['barea'] else: logger.error('Mode not supported.') logger.error('Will exit now.') sys.exit(1) elif naxis == 3: spec = data[:,region['params']['cy'],region['params']['cx']] else: spec = data[region['params']['cy'],region['params']['cx']] elif region['shape'] == 'box': area = (region['params']['trcy'] - region['params']['blcy']) * \ (region['params']['trcx'] - region['params']['blcx']) if naxis > 3: spec = data[0,:,region['params']['blcy']:region['params']['trcy'], region['params']['blcx']:region['params']['trcx']] if mode == 'sum': spec = spec.sum(axis=2).sum(axis=1) elif mode == 'avg': spec = spec.mean(axis=2).mean(axis=1) elif 'flux' in mode.lower(): spec = spec.sum(axis=2).sum(axis=1)/region['barea'] else: logger.error('Mode not supported.') logger.error('Will exit now.') sys.exit(1) elif naxis == 3: spec = data[:,region['params']['blcy']:region['params']['trcy'], region['params']['blcx']:region['params']['trcx']] if mode == 'sum': spec = spec.sum(axis=2).sum(axis=1)#/area elif mode == 'avg': spec = spec.mean(axis=2).mean(axis=1)#/area elif 'flux' in mode.lower(): summ = spec.sum(axis=2).sum(axis=1) logger.info('Sum of pixels: {0}'.format(summ)) spec = summ/region['barea'] else: logger.error('Mode not supported.') logger.error('Will exit now.') sys.exit(1) else: spec = data[region['params']['blcy']:region['params']['trcy'], region['params']['blcx']:region['params']['trcx']] if mode == 'sum': spec = spec.sum() elif mode == 'avg': spec = spec.mean() elif 'flux' in mode.lower(): spec = spec.sum()/region['barea'] else: logger.error('Mode not supported.') logger.error('Will exit now.') sys.exit(1) elif region['shape'] == 'circle': logger.info("Circular region has a center " \ "at pixel ({0},{1}) with radius " \ "{2}".format(region['params']['cx'], region['params']['cy'], region['params']['r'])) if naxis > 3: logger.debug("The image has more than 3 axes.") mask = sector_mask(data[0,0].shape, (region['params']['cy'], region['params']['cx']), region['params']['r'], (0, 360)) mdata = data[0][:,mask] logger.debug("Masked data shape: {0}".format(mdata.shape)) if 'sum' in mode.lower(): spec = mdata.sum(axis=1) elif 'avg' in mode.lower(): spec = mdata.mean(axis=1) elif 'flux' in mode.lower(): spec = mdata.sum(axis=1)/region['barea'] elif naxis == 3: mask = sector_mask(data[0].shape, (region['params']['cy'], region['params']['cx']), region['params']['r'], (0, 360)) mdata = data[:,mask] logger.debug("Masked data shape: {0}".format(mdata.shape)) if 'sum' in mode.lower(): spec = mdata.sum(axis=1)#/len(np.where(mask.flatten() == 1)[0]) elif 'avg' in mode.lower(): spec = mdata.mean(axis=1) elif 'flux' in mode.lower(): spec = mdata.sum(axis=1)/region['barea'] else: logger.error('Mode not supported.') logger.error('Will exit now.') sys.exit(1) else: mask = sector_mask(data.shape, (region['params']['cy'], region['params']['cx']), region['params']['r'], (0, 360)) mdata = np.ma.masked_invalid(data[mask]) logger.debug("Masked data shape: {0}".format(mdata.shape)) logger.debug("Masked data sum: {0}".format(mdata)) if 'sum' in mode.lower(): spec = mdata.sum()#/len(np.where(mask.flatten() == 1)[0]) elif 'avg' in mode.lower(): spec = mdata.mean() elif 'flux' in mode.lower(): spec = mdata.sum()/region['barea'] else: logger.error('Mode not supported.') logger.error('Will exit now.') sys.exit(1) elif region['shape'] == 'ellipse': logger.info("Elliptical region has a center " \ "at pixel ({0},{1}) with major and minor axes " \ "{2} and {3} at an angle {4}".format(region['params']['cx'], region['params']['cy'], region['params']['bmaj'], region['params']['bmin'], region['params']['theta'])) logger.debug("Mask shape: {}".format(data.shape[-2:])) mask = ellipse_mask(data.shape[-2:], region['params']['cy'], region['params']['cx'], region['params']['bmaj']/2., region['params']['bmin']/2., region['params']['theta']) logger.debug('Elements in mask: {}'.format(mask.sum())) if naxis > 3: mdata = data[0][:,mask] axis = 1 elif naxis == 3: mdata = data[:,mask] axis = 1 else: mdata = data[mask] axis = 0 logger.debug("Masked data shape: {0}".format(mdata.shape)) if 'sum' in mode.lower(): spec = mdata.sum(axis=axis) elif 'avg' in mode.lower(): spec = mdata.mean(axis=axis) elif 'flux' in mode.lower(): spec = mdata.sum(axis=axis)/region['barea'] else: logger.error('Mode not supported.') logger.error('Will exit now.') sys.exit(1) elif 'poly' in region['shape']: npolys = len(region['params']['Polygons']) if naxis > 3: shape = data[0][0].shape npix3 = data[0].shape[0] elif naxis == 3: shape = data[0].shape npix3 = data.shape[0] else: shape = data.shape npix3 = 0 mask = np.zeros(shape) for poly in region['params']['Polygons']: # Add all the polygons together logger.info("Adding polygons to the mask.") mask += poly.make_mask(shape) logger.info("Normalizing the mask to unity.") mask = np.ceil(mask/npolys) if naxis > 3: mdata = data[0]*np.tile(mask, (npix3,1,1)) else: mdata = data*np.tile(mask, (npix3,1,1)) if mode == 'sum': spec = mdata.sum(axis=1).sum(axis=1) elif 'avg' in mode.lower(): spec = mdata.mean(axis=1).mean(axis=1) elif 'flux' in mode.lower(): spec = mdata.sum(axis=1).sum(axis=1)/region['barea'] else: logger.error('Mode not supported.') logger.error('Will exit now.') sys.exit(1) elif 'all' in region['shape']: if naxis > 3: data = data[0] spec = proc_data(data, mode, region) elif naxis == 3: data = data spec = proc_data(data, mode, region) else: spec = proc_data(data, mode, region) return spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sumRegion(self, row1, col1, row2, col2):\n\n if not self.sums:\n return 0\n\n r1, c1 = row1+1, col1+1\n r2, c2 = row2+1, col2+1\n return self.sums[r2][c2] + self.sums[r1-1][c1-1] - self.sums[r2][c1-1] - self.sums[r1-1][c2]", "def sumRegion(self, row1, col1, row2, col2):\n return self.total[row2+1][col2+1] - self.total[row1][col2+1] - self.total[row2+1][col1] + self.total[row1][col1]", "def sumRegion(self, row1, col1, row2, col2):\n \n if row1 == col1 == 0:\n return self.matrix_sum[row2][col2]\n elif row1 == 0:\n return self.matrix_sum[row2][col2] - self.matrix_sum[row2][col1-1]\n elif col1 == 0:\n return self.matrix_sum[row2][col2] - self.matrix_sum[row1-1][col2]\n else:\n return (self.matrix_sum[max(row1, row2)][max(col1, col2)] - \n self.matrix_sum[max(0, min(row1, row2)-1)][max(col1, col2)] - \n self.matrix_sum[max(row1, row2)][max(0, min(col1, col2)-1)] + \n self.matrix_sum[max(0, min(row1, row2)-1)][max(0, min(col1, col2)-1)])", "def sumRegion(self, row1, col1, row2, col2):\n if not self.dp:\n return 0\n \n row1 -= 1\n col1 -= 1\n \n dp = self.dp\n \n return dp[row2 + 1][col2 + 1] - dp[row1 +1][col2 + 1] - dp[row2 + 1][col1 + 1] + dp[row1 + 1][col1 + 1]", "def sumRegion(self, row1, col1, row2, col2):\r\n\r\n # Using dp matrix, the sum of the region can be calculated as\r\n # dp[row2][col2] - dp[row2][col1-1] - dp[row1-1][col2] + dp[row1-1][col1-1]\r\n sum_upper_left = self.dp[row1-1][col1-1] if row1-1 >= 0 and col1-1 >=0 else 0\r\n sum_left = self.dp[row2][col1-1] if col1-1 >= 0 else 0\r\n sum_up = self.dp[row1-1][col2] if row1-1 >=0 else 0\r\n\r\n #print sum_upper_left, sum_left, sum_up, dp[row2][col2]\r\n return self.dp[row2][col2] - (sum_left + sum_up) + sum_upper_left", "def sphere_r_intensity(img):\n pixels = []\n for j in range(0, img.shape[0]):\n for i in range(1, 40):\n pixels.append(img[j, img.shape[1] - i])\n\n return np.mean(pixels)", "def Rj_strip_sum(ang_width, im, center, Rj_ap_height, imtype, header, row):\n Rj = ang_width/2/plate_scale # arcsec / (arcsec/pix)\n #D.say('Rj = ', Rj, ' pixels')\n ap_height = int(Rj_ap_height * Rj)\n if ((abs(ap_height)/2 + center[0]) >= im.shape[0]\n or (center[0] - abs(ap_height)/2 < 0 )):\n #log.warning('Rj_ap_height ' + str(Rj_ap_height) + ' Rj too large, setting aperture sum to zero')\n asum = 0\n else:\n tim = im + 0\n #D.say('Total of pixels in image: ', np.sum(tim))\n #D.say(Rj_ap_height, ' Rj ', ap_height, ' pix')\n if ap_height > 0:\n ny, nx = tim.shape\n #D.say('ycenter, ny: ', center[0], ny)\n # Blank out pixels above and below aperture strip\n tim[0:int(center[0]-ap_height/2), :] = 0\n tim[int(center[0]+ap_height/2):ny, :] = 0\n #impl = plt.imshow(tim, origin='lower',\n # cmap=plt.cm.gray, filternorm=0, interpolation='none')\n #plt.show()\n\n elif ap_height < 0:\n # Blank out the strip in the center\n tim[int(center[0]+ap_height/2):int(center[0]-ap_height/2), :] = 0\n #impl = plt.imshow(tim, origin='lower',\n # cmap=plt.cm.gray, filternorm=0, interpolation='none')\n #plt.show()\n asum = np.sum(tim)\n #D.say(asum)\n good_idx = np.where(tim != 0)\n asum /= len(good_idx[0])\n #D.say(asum)\n sap_height = str(abs(Rj_ap_height))\n if ap_height > 0:\n keypm = 'p'\n comstr = 'strip ' + sap_height + ' Rj in Y'\n elif ap_height < 0:\n keypm = 'm'\n comstr = 'excluding strip ' + sap_height + ' Rj in Y'\n else:\n keypm = '_'\n comstr = 'entire image'\n key = imtype + 'Rj' + keypm + sap_height\n header[key] = (asum, 'average of ' + comstr)\n row[key] = asum\n return key", "def sumPixel(img):\n ret = cv2.sumElems(img)\n if len(img.shape) == 2:\n return ret[0]\n else:\n return (ret[0], ret[1], ret[2])", "def integrate_spectrum(self):\n flux = sum(self.spectrum)\n return flux", "def aperture_sum(im, center, y, x, r, imtype, header, row):\n r2 = int(r/2)\n center = center.astype(int)\n asum = np.sum(im[center[0]+y-r2:center[0]+y+r2,\n center[1]+x-r2:center[1]+x+r2])\n asum /= r**2\n key = imtype + 'AP' + str(x) + '_' + str(y)\n header[key] = (asum, 'square aperture average at x, y, r = ' + str(r))\n row[key] = asum\n return key", "def sum_over_energy(self):\n # Note that the array is using the opposite convention from WCS\n # so we sum over axis 0 in the array, but drop axis 2 in the WCS object\n return Map(np.sum(self.counts, axis=0), self.wcs.dropaxis(2))", "def strip_sum(im, center, ap_height, imtype, header, row):\n tim = im + 0\n if ap_height > 0:\n ny, nx = tim.shape\n # Blank out pixels above and below aperture strip\n tim[0:int(center[0]-ap_height/2), :] = 0\n tim[int(center[0]+ap_height/2):ny, :] = 0\n elif ap_height < 0:\n # Blank out the strip in the center\n tim[int(center[0]+ap_height/2):int(center[0]-ap_height/2), :] = 0\n asum = np.sum(tim)\n good_idx = np.where(tim != 0)\n asum /= len(good_idx[0])\n sap_height = str(abs(ap_height))\n if ap_height > 0:\n keypm = 'p'\n comstr = 'strip ' + sap_height + ' pix in Y'\n elif ap_height < 0:\n keypm = 'm'\n comstr = 'excluding strip ' + sap_height + ' pix in Y'\n else:\n keypm = '_'\n comstr = 'entire image'\n key = imtype + keypm + sap_height\n header[key] = (asum, 'average of ' + comstr)\n row[key] = asum\n return key", "def sum_over_energy(self):\n # We sum over axis 0 in the array, and drop the energy binning in the\n # hpx object\n return HpxMap(np.sum(self.counts, axis=0), self.hpx.copy_and_drop_energy())", "def calc_region_sum(cube, coord_names, aux_coord_names, grid_type, area_cube, region):\n\n if grid_type == 'curvilinear':\n assert area_cube, \"Must provide an area cube of curvilinear data\"\n\n cube = cube.copy() \n coord_names = coord_names.copy()\n lat_bounds = region_bounds[region]\n\n # Extract region\n if lat_bounds:\n if grid_type == 'curvilinear':\n cube = extract_region_curvilinear(cube, lat_bounds)\n else:\n cube = extract_region_latlon(cube, lat_bounds)\n\n if 'm-2' in str(cube.units):\n # Get area weights \n if area_cube:\n if grid_type == 'latlon' and lat_bounds:\n area_cube = extract_region_latlon(area_cube, lat_bounds)\n area_data = uconv.broadcast_array(area_cube.data, [1, 2], cube.shape)\n else:\n area_data = spatial_weights.area_array(cube)\n\n # Multiply by area\n cube.data = cube.data * area_data\n units = str(cube.units)\n cube.units = units.replace('m-2', '')\n\n assert cube.units == 'J'\n\n coord_names.remove('time')\n spatial_agg = cube.collapsed(coord_names, iris.analysis.SUM)\n \n spatial_agg.remove_coord('latitude')\n spatial_agg.remove_coord('longitude')\n if grid_type == 'curvilinear':\n spatial_agg.remove_coord(coord_names[0])\n spatial_agg.remove_coord(coord_names[1])\n\n return spatial_agg", "def summation(colors):\n return np.sum(colors, -1)", "def _gu_sum(a, **kwds):\n return np.sum(np.ascontiguousarray(a), axis=-1, **kwds)", "def _sum_n_rasters(\n raster_path_list, target_raster_path):\n LOGGER.info('Summing %s rasters to %s', len(raster_path_list),\n target_raster_path)\n LOGGER.debug('Attempting to open %s', raster_path_list[0])\n pygeoprocessing.new_raster_from_base(\n raster_path_list[0], target_raster_path, gdal.GDT_Float32,\n [NODATA_FLOAT32_MIN])\n\n target_raster = gdal.OpenEx(\n target_raster_path, gdal.GA_Update | gdal.OF_RASTER)\n target_band = target_raster.GetRasterBand(1)\n\n n_pixels_to_process = (\n (target_raster.RasterXSize * target_raster.RasterYSize) *\n len(raster_path_list))\n n_pixels_processed = 0\n last_log_time = time.time()\n\n raster_tuple_list = []\n for raster_path in raster_path_list:\n raster = gdal.OpenEx(raster_path, gdal.OF_RASTER)\n band = raster.GetRasterBand(1)\n nodata = band.GetNoDataValue()\n raster_tuple_list.append((raster, band, nodata))\n\n for block_info in pygeoprocessing.iterblocks(\n (raster_path_list[0], 1), offset_only=True):\n\n sum_array = numpy.empty(\n (block_info['win_ysize'], block_info['win_xsize']),\n dtype=numpy.float32)\n sum_array[:] = 0.0\n\n # Assume everything is valid until proven otherwise\n pixels_touched = numpy.zeros(sum_array.shape, dtype=bool)\n for (_, band, nodata) in raster_tuple_list:\n if time.time() - last_log_time >= 5.0:\n percent_complete = round(\n n_pixels_processed / n_pixels_to_process, 4)*100\n LOGGER.info(f'Summation {percent_complete:.2f}% complete')\n last_log_time = time.time()\n\n array = band.ReadAsArray(**block_info)\n valid_pixels = slice(None)\n if nodata is not None:\n valid_pixels = ~utils.array_equals_nodata(array, nodata)\n\n sum_array[valid_pixels] += array[valid_pixels]\n pixels_touched[valid_pixels] = 1\n n_pixels_processed += sum_array.size # for logging\n\n sum_array[~pixels_touched] = NODATA_FLOAT32_MIN\n\n target_band.WriteArray(\n sum_array, block_info['xoff'], block_info['yoff'])\n\n LOGGER.info('Summation 100.00% complete')\n raster_tuple_list = None\n\n target_band.ComputeStatistics(0)\n target_band = None\n target_raster = None", "def sum(self, elim=None, out=None):\n if (elim is None):\n elim = self.v\n return self.__opReduce2(self.v & elim,np.sum, out=out)", "def sphere_l_intensity(img):\n pixels = []\n for j in range(0, img.shape[0]):\n for i in range(1, 40):\n pixels.append(img[j, i])\n\n return np.mean(pixels)", "def Rj_box_sum(ang_width, im, center, Rj_ap_side, imtype, header, row):\n Rjpix = ang_width/2/plate_scale # arcsec / (arcsec/pix)\n #D.say('Rj = ', Rj, ' pixels')\n ap_side = int(Rj_ap_side * Rjpix)\n if ((abs(ap_side)/2 + center[0]) >= im.shape[0]\n or (center[0] - abs(ap_side)/2 < 0 )\n or (abs(ap_side)/2 + center[1]) >= im.shape[1]\n or (center[1] - abs(ap_side)/2 < 0 )):\n #log.warning('Rj_ap_side ' + str(Rj_ap_side) + ' Rj too large, setting aperture sum to zero')\n asum = 0\n else:\n tim = im + 0\n #D.say('Total of pixels in image: ', np.sum(tim))\n #D.say(Rj_ap_side, ' Rj ', ap_side, ' pix')\n if ap_side > 0:\n ny, nx = tim.shape\n #D.say('ycenter, ny: ', center[0], ny)\n # Blank out pixels outside of the box\n tim[0:int(center[0]-ap_side/2), :] = 0\n tim[int(center[0]+ap_side/2):ny, :] = 0\n tim[:, 0:int(center[1]-ap_side/2)] = 0\n tim[:, int(center[1]+ap_side/2):nx] = 0\n #impl = plt.imshow(tim, origin='lower',\n # cmap=plt.cm.gray, filternorm=0, interpolation='none')\n #plt.show()\n\n elif ap_side < 0:\n # Blank out the box\n tim[int(center[0]+ap_side/2):int(center[0]-ap_side/2),\n int(center[1]+ap_side/2):int(center[1]-ap_side/2)] = 0\n #impl = plt.imshow(tim, origin='lower',\n # cmap=plt.cm.gray, filternorm=0, interpolation='none')\n #plt.show()\n asum = np.sum(tim)\n #D.say(asum)\n good_idx = np.where(tim != 0)\n asum /= len(good_idx[0])\n #D.say(asum)\n sap_side = str(abs(Rj_ap_side))\n if ap_side > 0:\n keypm = 'p'\n comstr = 'box ' + sap_side + ' Rj in size'\n elif ap_side < 0:\n keypm = 'm'\n comstr = 'excluding box ' + sap_side + ' Rj in size'\n else:\n keypm = '_'\n comstr = 'entire image'\n key = imtype + 'Rj' + keypm + sap_side\n header['HIERARCH ' + key] = (asum, 'average of ' + comstr)\n row[key] = asum\n return key", "def assert_array_sum(base_raster_path, desired_sum):\r\n base_raster = gdal.OpenEx(base_raster_path, gdal.OF_RASTER)\r\n base_band = base_raster.GetRasterBand(1)\r\n base_array = base_band.ReadAsArray()\r\n raster_sum = numpy.sum(base_array)\r\n numpy.testing.assert_almost_equal(raster_sum, desired_sum)", "def abs_sum(gray_im, windowsize, searchvalue=None):\n colsum = [0] * gray_im.size[0] * gray_im.size[1]\n totsum = [0] * gray_im.size[0] * gray_im.size[1]\n c1 = clock()\n if searchvalue != None:\n data = list(gray_im.point(lambda x: 1 if x==searchvalue else 0).getdata()) # init as 0/1 thresholded image data (vs 0/255)\n else:\n data = list(gray_im.getdata())\n pos = 0\n for j in range(gray_im.size[1]):\n for i in range(gray_im.size[0]):\n colsum[pos] = data[pos]\n if j-1 >= 0: colsum[pos] += colsum[pos-gray_im.size[0]] # add the subtotal from point above\n if j-windowsize[1] >= 0: colsum[pos] -= data[pos-(gray_im.size[0]*windowsize[1])] # keep within vertical window\n\n totsum[pos] = colsum[pos]\n if i-1 >= 0: totsum[pos] += totsum[pos-1] # add point total to the left\n if i-windowsize[0] >= 0: totsum[pos] -= colsum[pos-windowsize[0]] # keep within horizontal window\n\n pos += 1\n sys.stderr.write(\"local sum: %3.3fs\\n\" % (clock()-c1))\n return totsum", "def rowsums (self):\n return self.values.sum (axis=0)", "def sum(self):\n return self.aggregate(np.sum)", "def Rj_aperture_sum(ang_width, im, center, yRj, xRj, rpix, imtype, header, row):\n r2 = int(rpix/2)\n center = center.astype(int)\n Rjpix = ang_width/2/plate_scale # arcsec / (arcsec/pix)\n y = int(yRj * Rjpix)\n x = int(xRj * Rjpix)\n center = center.astype(int)\n asum = np.sum(im[center[0]+y-r2:center[0]+y+r2,\n center[1]+x-r2:center[1]+x+r2])\n asum /= rpix**2\n key = imtype + 'AP' + str(xRj) + '_' + str(yRj)\n header[key] = (asum, str(rpix) + ' pix square aperture [-]NNN_[-]MMM Rj from Jupiter' )\n row[key] = asum\n return key", "def averageColorInRegion(self,x1,y1,x2,y2,skip_factor):\n \n\n rgb = [0, 0, 0, 0]\n temp = [0, 0, 0, 0]\n pixels = abs(((x2-x1) / skip_factor) * ((y2-y1) / skip_factor))\n\n #switching endpoints so iteration is positive\n if (x1 > x2):\n temp = x2\n x2 = x1\n x1 = temp\n\n if (y1 > y2):\n temp = y2\n y2 = y1\n y1 = temp\n\n for i in range(x1, x2, skip_factor):\n for j in range(y1, y2, skip_factor):\n temp = self.pixel(i, j)\n \n #rgb[0] += temp[0] * temp[3]/255 #Sum plus alpha correction\n #rgb[1] += temp[1] * temp[3]/255\n #rgb[2] += temp[2] * temp[3]/255\n #rgb[3] += temp[3]\n\n rgb[0] += temp[0] \n rgb[1] += temp[1]\n rgb[2] += temp[2] \n\n for i in range(4):\n rgb[i] = int(rgb[i] / pixels * brightness)\n #rgb[i] = int( (rgb[i] / pixels * brightness) * alpha)\n if (rgb[i] > 255):\n #cutting off at 255 - need to find the problem later\n rgb[i] = 255\n\n #if (rgb[i] < 20):\n # rgb[i] = 0\n \n\n return rgb", "def sum_img(img_array):\n sum_img = 0\n counter= 0\n for n in range(len(img_array)):\n sum_img += img_array[n]\n counter += 1\n sum_img = np.asarray(sum_img)\n avg_img = sum_img / (counter)\n return sum_img, avg_img", "def sum_bands(self, idx_target_bands, stack=None, update=False):\n # Retrieve bands\n current_stack = self.current_stack\n if stack is not None: # use instead a given stack\n current_stack = stack\n to_replace_bands = current_stack[:, idx_target_bands, ...]\n \n # Update shape\n old_shape = list(to_replace_bands.shape)\n old_shape[1] = 1 # sum of bands equals to one\n new_shape = tuple(old_shape)\n\n # Sum and reshape bands\n summed_bands = np.sum(to_replace_bands, axis=1).reshape(new_shape)\n\n # Concatenate new summed band and delete old ones\n scene_stack = np.concatenate((current_stack, summed_bands), axis=1)\n new_scene_stack = np.delete(scene_stack, idx_target_bands, axis=1)\n\n if update:\n self.set_current_stack(new_scene_stack)\n\n return new_scene_stack", "def sum(self, axis=None):\n if axis is None:\n return numpy.ma.sum(self.data)\n\n new_data = numpy.ma.sum(self.data, axis=axis)\n remaining_axes = numpy.setdiff1d(range(self.ndim), axis)\n remaining_edges = [self.bset.edges[ax] for ax in remaining_axes]\n\n # This is kind of a hack that breaks good OO design, but is there\n # a better solution?\n if len(remaining_edges) == 2:\n return IntensityMap2D(new_data, (remaining_edges,))\n else:\n return IntensityMap(new_data, (remaining_edges,))", "def get_bandpass(self):\n return self.sum(axis=1)", "def process_sum_square( fids, ndim=2 ):\n\t\n\timg = process( fids, ndim)\n\timg *= np.conj(img) \n\t\n\treturn np.squeeze(np.sqrt( np.sum( np.abs(img), axis=0) ))", "def sum(self) -> FrameLike:\n return super().sum()", "def sum(self) -> FrameLike:\n return super().sum()", "def sum(self) -> FrameLike:\n return super().sum()", "def sum(self) -> FrameLike:\n return super().sum()", "def sumModelIntoImage(x, y, nx, ny, modelData, imageData):\r\n imageData[y:y+ny,x:x+nx] += modelData", "def sum(self, axis=None, keepdims=False):\n return F.Sum.apply(self, axis, keepdims)", "def calc_rsi(image):\n\n # roll axes to conventional row,col,depth\n img = np.rollaxis(image, 0, 3)\n\n # bands: Coastal(0), Blue(1), Green(2), Yellow(3), Red(4), Red-edge(5), NIR1(6), NIR2(7)) Multispectral\n COAST = img[:, :, 0]\n B = img[:, :, 1]\n G = img[:, :, 2]\n Y = img[:, :, 3]\n R = img[:, :, 4]\n RE = img[:, :, 5]\n NIR1 = img[:, :, 6]\n NIR2 = img[:, :, 7]\n\n arvi = old_div((NIR1 - (R - (B - R))), (NIR1 + (R - (B - R))))\n dd = (2 * NIR1 - R) - (G - B)\n gi2 = (B * -0.2848 + G * -0.2434 + R * -0.5436 + NIR1 * 0.7243 + NIR2 * 0.0840) * 5\n gndvi = old_div((NIR1 - G), (NIR1 + G))\n ndre = old_div((NIR1 - RE), (NIR1 + RE))\n ndvi = old_div((NIR1 - R), (NIR1 + R))\n ndvi35 = old_div((G - R), (G + R))\n ndvi84 = old_div((NIR2 - Y), (NIR2 + Y))\n nirry = old_div((NIR1), (R + Y))\n normnir = old_div(NIR1, (NIR1 + R + G))\n psri = old_div((R - B), RE)\n rey = old_div((RE - Y), (RE + Y))\n rvi = old_div(NIR1, R)\n sa = old_div(((Y + R) * 0.35), 2) + old_div((0.7 * (NIR1 + NIR2)), 2) - 0.69\n vi1 = old_div((10000 * NIR1), (RE) ** 2)\n vire = old_div(NIR1, RE)\n br = (old_div(R, B)) * (old_div(G, B)) * (old_div(RE, B)) * (old_div(NIR1, B))\n gr = old_div(G, R)\n rr = (old_div(NIR1, R)) * (old_div(G, R)) * (old_div(NIR1, RE))\n\n ###Built-Up indices\n wvbi = old_div((COAST - RE), (COAST + RE))\n wvnhfd = old_div((RE - COAST), (RE + COAST))\n\n ###SIs\n evi = old_div((2.5 * (NIR2 - R)), (NIR2 + 6 * R - 7.5 * B + 1))\n L = 0.5 # some coefficient for Soil Adjusted Vegetation Index (SAVI) DO NOT INCLUDE IN FEATURES\n savi = old_div(((1 + L) * (NIR2 - R)), (NIR2 + R + L))\n msavi = old_div((2 * NIR2 + 1 - ((2 * NIR2 + 1) ** 2 - 8 * (NIR2 - R)) ** 0.5), 2)\n bai = old_div(1.0, ((0.1 + R) ** 2 + 0.06 + NIR2))\n rgi = old_div(R, G)\n bri = old_div(B, R)\n\n rsi = np.stack(\n [arvi, dd, gi2, gndvi, ndre, ndvi, ndvi35, ndvi84, nirry, normnir, psri, rey, rvi, sa, vi1, vire, br, gr, rr,\n wvbi, wvnhfd, evi, savi, msavi, bai, rgi, bri],\n axis=2)\n\n return rsi", "def sumSegments(array):\n sum_segs = []\n for seg in array:\n sum_segs.append(sum(seg))\n return np.array(sum_segs)", "def accumulate(self,tod,weights,pixels):\n binFuncs.binValues(self.sigwei, pixels, weights=tod*weights)\n binFuncs.binValues(self.wei , pixels, weights=weights )\n if self.storehits:\n binFuncs.binValues(self.hits, pixels,mask=weights)", "def GetSumOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUS2_GetSumOutput(self, *args)", "def collapse(self):\n try:\n wavelengths = pylab.linspace(self.start, self.end,\n self.image.shape[not self.waveaxis])\n except TypeError:\n print 'The starting and ending wavelengths must be specified.'\n background = pylab.zeros(len(wavelengths))\n backgroundlines = 0\n data = pylab.zeros(len(wavelengths))\n datalines = 0\n for region in self.regions:\n if region['group'] is 0:\n backgroundlines += region['max'] - region['min']\n background += self.image[region['min']:region['max'] + 1, :]\\\n .sum(axis=self.waveaxis)\n else:\n datalines += region['max'] - region['min']\n data += self.image[region['min']:region['max'] + 1, :]\\\n .sum(axis=self.waveaxis)\n background = [sum/backgroundlines for sum in background]\n data = [sum/datalines for sum in data]\n corrected = pylab.array(data) - pylab.array(background)\n output = Spectrum(list(wavelengths), list(corrected))\n return output", "def GetSum(self, label: 'unsigned short') -> \"double\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2IUS2_GetSum(self, label)", "def sumImages(frames,weights):\n\tsumImage=np.zeros(frames[0].shape)\n\tfor i in range(0,len(weights)):\n\t\tsumImage += frames[i]*weights[i]\n\treturn sumImage", "def sum_mat_bins(mat):\n # Equivalaent to row or col sum on a full matrix\n # Note: mat.sum returns a 'matrix' object. A1 extracts the 1D flat array\n # from the matrix\n return mat.sum(axis=0).A1 + mat.sum(axis=1).A1 - mat.diagonal(0)", "def get_pixels_hu(slices):\n image = np.stack([s.pixel_array for s in slices])\n image = image.astype(np.int16)\n\n outside_image = image.min()\n image[image == outside_image] = 0\n\n for slice_number in range(len(slices)):\n intercept = slices[slice_number].RescaleIntercept\n slope = slices[slice_number].RescaleSlope\n\n if slope != 1:\n image[slice_number] = slope * image[slice_number].astype(np.float64)\n image[slice_number] = image[slice_number].astype(np.int16)\n\n image[slice_number] += np.int16(intercept)\n\n return np.array(image, dtype=np.int16)", "def GetSum(self, label: 'unsigned short') -> \"double\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS2IUS2_GetSum(self, label)", "def sum_spectra(self, wave_range=None, units=u.Angstrom):\n if wave_range is None:\n # Sum over entire wavelength axis and return an NDCube\n sum_data = np.sum(self.data, axis=2)\n new_wcs = self.wcs.dropaxis(0)\n new_meta = copy.deepcopy(self.meta)\n new_meta['notes'].append('Summed over entire wavelength axis.')\n return NDCube(sum_data, new_wcs, meta=new_meta)\n\n # Validate input wavelength range\n if isinstance(wave_range, (list, tuple)):\n use_range = [0, 0]\n range_units = ['unknown', 'unknown']\n print('Summing EISCube spectra over a select wavelength range.')\n if len(wave_range) != 2:\n print('Error: invalid number of wave_range values. Please input'\n +' a list or tuple with exactly two elements.',\n file=sys.stderr)\n return None\n else:\n print('Error: invalid wave_range type. Please input either None or'\n +' a list (or tuple) with two elements.', file=sys.stderr)\n return None\n\n for w in range(2):\n if isinstance(wave_range[w], u.Quantity):\n # Parse an astropy.units.Quantity and convert as needed\n # Note: this will overwrite any inputs to the \"units\" kwarg\n if wave_range[w].unit == u.pix:\n use_range[w] = wave_range[w].value\n range_units[w] = u.pix\n elif wave_range[w].unit.physical_type == 'length':\n use_range[w] = wave_range[w].to('Angstrom').value\n range_units[w] = u.Angstrom\n else:\n print('Error: invalid wavelength unit. Please input a pixel'\n +' or length unit.', file=sys.stderr)\n return None\n else:\n # Assume default or user inputted units (still convert if needed)\n input_units = u.Unit(units)\n if input_units == u.pix:\n use_range[w] = float(wave_range[w])\n range_units[w] = u.pix\n elif input_units.physical_type == 'length':\n u_scale = input_units.to('Angstrom')\n use_range[w] = float(wave_range[w])*u_scale\n range_units[w] = u.Angstrom\n else:\n print('Error: invalid wavelength unit. Please input a pixel'\n +' or length unit.', file=sys.stderr)\n return None\n\n # Check for consistent units\n if range_units[0] != range_units[1]:\n print('Error: mismatched units. Please input the same units for'\n +' both wave_range elements or use the \"units\" keyword',\n file=sys.stderr)\n return None\n\n # If given values of [center, half width], compute the actual range\n if use_range[1] < use_range[0]:\n temp_center = use_range[0]\n temp_half_wid = use_range[1]\n use_range[0] = temp_center - temp_half_wid\n use_range[1] = temp_center + temp_half_wid\n\n # Get indices to be summed over\n w_indices = [0, -1]\n if range_units[0] == u.pix:\n # Round pixels values to nearest whole indice\n w_indices[w] = int(round(use_range[w]))\n elif range_units[0] == u.Angstrom:\n # Find the closest pixel location on the average wavelength axis\n try:\n # Note: the corrected wavelength has units of [Angstrom]\n w_coords = np.mean(self.wavelength, axis=(0,1))\n except KeyError:\n print('Error: missing or invalid corrected wavelength array.')\n return None\n for w in range(2):\n abs_w_diff = np.abs(w_coords - use_range[w])\n w_indices[w] = np.argmin(abs_w_diff)\n\n sum_data = np.sum(self.data[:,:,w_indices[0]:w_indices[1]+1], axis=2)\n new_wcs = self.wcs.dropaxis(0)\n new_meta = copy.deepcopy(self.meta)\n new_meta['notes'].append('Summed wavelength axis over the range of '\n +str(use_range)+' '+str(range_units[0]))\n return NDCube(sum_data, new_wcs, meta=new_meta)", "def region_of_interest(self, img):\n # get region vertices\n r1, r2, r3, r4 = self.region_filter_params[\"ratios\"]\n img_height, img_width = img.shape\n vertices = define_region_vertices(img_height, img_width, r1, r2, r3, r4)\n\n # defining a blank mask to start with\n mask = np.zeros_like(img)\n\n # defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n\n # filling pixels inside the polygon defined by \"vertices\" with the fill color\n cv2.fillPoly(mask, [vertices], ignore_mask_color)\n\n # returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image", "def compute_accumulation(self, i):\n\n #These are just pointer reassignments, not a deep-copy.\n dx = self.dx_arr\n phi = self.porosity\n area = self.res_area\n cf = self.compressibility\n Bw = self.form_volume_factor\n\n return area * dx[i] * phi[i] * cf / Bw", "def energy_map(img):\n img_new = img.astype(float) #converting image to float\n total_energy = 0.0 # To store the sum of energy for all channels\n r,c,d = img.shape \n for i in range(d):\n dy = np.zeros([r, c], dtype=float) \n dx = np.zeros([r, c], dtype=float)\n if r > 1:\n dy = np.gradient(img_new[:,:,i], axis=0) #gradient along rows\n if c > 1:\n dx = np.gradient(img_new[:,:,i], axis=1) #gradient along columns\n total_energy += np.absolute(dy) + np.absolute(dx) \n return total_energy #Total energy map for entire image", "def summation(self):\n return sum(self.read_ints())", "def compute_statistics(self, region):\n x = 0.0\n y = 0.0\n n = 1\n for pixel in region:\n n = n + 1\n x = x + pixel[0]\n y = y + pixel[1]\n\n x = x / n\n y = y / n\n k = 1\n print(\"Region: \" + str(k) + \", Centroid: (\" + str(x) + \",\" + str(y) + \"), Area: \" + str(n))\n\n # Please print your region statistics to stdout\n # <region number>: <location or center>, <area>\n # print(stats)\n\n return n", "def accumulate(self,tod,weights,chunk):\n binFuncs.binValues(self.sigwei, self.offsetpixels[chunk[0]:chunk[1]], weights=tod*weights )\n binFuncs.binValues(self.wei , self.offsetpixels[chunk[0]:chunk[1]], weights=weights )", "def normalize_sumwt(im: Image, sumwt) -> Image:\n nchan, npol, _, _ = im.data.shape\n assert isinstance(im, Image), im\n assert sumwt is not None\n assert nchan == sumwt.shape[0]\n assert npol == sumwt.shape[1]\n for chan in range(nchan):\n for pol in range(npol):\n if sumwt[chan, pol] > 0.0:\n im.data[chan, pol, :, :] = im.data[chan, pol, :, :] / sumwt[chan, pol]\n else:\n im.data[chan, pol, :, :] = 0.0\n return im", "def GetSum(self, label: 'short') -> \"double\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2ISS2_GetSum(self, label)", "def sumpxp(data, num_pixels):\n dat_shape = np.array(np.shape(data))\n dat_shape[-2] = int(dat_shape[-2] / num_pixels) # Reduce size by num_pixels in the row and column directions\n dat_shape[-1] = int(dat_shape[-1] / num_pixels)\n ndata = np.zeros(dat_shape)\n n = num_pixels\n for row in np.arange(dat_shape[-2]):\n for col in np.arange(dat_shape[-1]):\n # Get each 2x2 subarray over all of the first 2 axes\n if len(dat_shape) == 5:\n temp = data[:, :, :, n * row:n * row + n, n * col:n * col + n]\n ndata[:, :, :, row, col] = np.sum(temp, axis=(-2, -1)) # Sum over only the rows and columns\n elif len(dat_shape) == 4:\n temp = data[:, :, n * row:n * row + n, n * col:n * col + n]\n ndata[:, :, row, col] = np.sum(temp, axis=(-2, -1)) # Sum over only the rows and columns\n else:\n ndata = 0\n return ndata", "def sum(self):\n if self.isscalar():\n s = self.defval\n else:\n if self.defval:\n msg = \"Sum of a tensor wish defval != 0 not implemented.\"\n raise NotImplementedError(msg)\n s = 0\n for v in self.sects.values():\n s += np.sum(v)\n return s", "def GetSum(self, label: 'unsigned char') -> \"double\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2IUC2_GetSum(self, label)", "def imadd(img):\n\n img = img / 2 + 0.5 # unnormalize\n npimg = img\n plt.imshow(np.transpose(npimg, (1, 2, 0)))", "def testMethodSumLine(self):\n toolBar = self.plot.getProfileToolbar()\n\n toolBar.lineAction.trigger()\n plot2D = self.plot.getPlotWidget().getWidgetHandle()\n pos1 = plot2D.width() * 0.5, plot2D.height() * 0.2\n pos2 = plot2D.width() * 0.5, plot2D.height() * 0.8\n\n self.mouseMove(plot2D, pos=pos1)\n self.mousePress(plot2D, qt.Qt.LeftButton, pos=pos1)\n self.mouseMove(plot2D, pos=pos2)\n self.mouseRelease(plot2D, qt.Qt.LeftButton, pos=pos2)\n\n manager = toolBar.getProfileManager()\n roi = manager.getCurrentRoi()\n roi.setProfileMethod(\"sum\")\n roi.setProfileType(\"2D\")\n roi.setProfileLineWidth(3)\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n # check 2D 'sum' profile\n profilePlot = roi.getProfileWindow().getCurrentPlotWidget()\n data = profilePlot.getAllImages()[0].getData()\n expected = numpy.array([[3, 12], [21, 30], [39, 48]])\n numpy.testing.assert_almost_equal(data, expected)", "def GetSum(self, label: 'unsigned short') -> \"double\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUS3_GetSum(self, label)", "def GetSum(self, label: 'unsigned short') -> \"double\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS3IUS3_GetSum(self, label)", "def GetSum(self, label: 'unsigned short') -> \"double\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2IUS2_GetSum(self, label)", "def GetSum(self, label: 'short') -> \"double\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3ISS3_GetSum(self, label)", "def GetSum(self, label: 'unsigned short') -> \"double\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2IUS2_GetSum(self, label)", "def plot_sum(self):\n fig, ax = plt.subplots()\n ax.set_title(\"Unpolarized intensity: I_up + I_down\")\n ax.set_xlabel(\"Time (microseconds)\")\n ax.set_ylabel('Intensity')\n\n if (self.is_attribute(\"time\") & self.is_attribute(\"intensity_up\") & \n self.is_attribute(\"intensity_up_sigma\") &\n self.is_attribute(\"intensity_down\") & \n self.is_attribute(\"intensity_down_sigma\") &\n self.is_attribute(\"intensity_up_total\") &\n self.is_attribute(\"intensity_down_total\")):\n np_excl = numpy.array(self.excluded, dtype=bool)\n np_notexcl = numpy.logical_not(np_excl)\n np_time = numpy.array(self.time, dtype=float)\n np_up = numpy.array(self.intensity_up, dtype=float)\n np_sup = numpy.array(self.intensity_up_sigma, dtype=float)\n np_up_mod = numpy.array(self.intensity_up_total, dtype=float)\n np_down = numpy.array(self.intensity_down, dtype=float)\n np_sdown = numpy.array(self.intensity_down_sigma, dtype=float)\n np_down_mod = numpy.array(self.intensity_down_total, dtype=float)\n np_sum = np_up + np_down\n np_sum_mod = np_up_mod + np_down_mod\n np_ssum = numpy.sqrt(numpy.square(np_sup)+numpy.square(np_sdown))\n ax.plot(np_time, np_sum_mod, \"k-\", label=\"model\")\n ax.errorbar(np_time[np_notexcl], np_sum[np_notexcl], yerr=np_ssum[np_notexcl], fmt=\"ko\", alpha=0.2, label=\"experiment\")\n ax.errorbar(np_time[np_excl], np_sum[np_excl], yerr=np_ssum[np_excl], fmt=\"rs\", alpha=0.2, label=\"excluded\")\n\n y_min_d, y_max_d = ax.get_ylim()\n param = y_min_d-(np_sum - np_sum_mod).max()\n coeff = np_notexcl.astype(int)\n\n ax.plot([np_time.min(), np_time.max()], [param, param], \"k:\")\n ax.plot(np_time, coeff*(np_sum - np_sum_mod)+param, \"r-\", alpha=0.7,\n label=\"difference\")\n elif (self.is_attribute(\"time\") & self.is_attribute(\"intensity\") & \n self.is_attribute(\"intensity_total\") &\n self.is_attribute(\"intensity_sigma\")):\n np_excl = numpy.array(self.excluded, dtype=bool)\n np_notexcl = numpy.logical_not(np_excl)\n np_time = numpy.array(self.time, dtype=float)\n np_sum = numpy.array(self.intensity, dtype=float)\n np_sum_mod = numpy.array(self.intensity_total, dtype=float)\n np_ssum = numpy.array(self.intensity_sigma, dtype=float)\n ax.plot(np_time, np_sum_mod, \"k-\", label=\"model\")\n ax.errorbar(np_time[np_notexcl], np_sum[np_notexcl], yerr=np_ssum[np_notexcl], fmt=\"ko\", alpha=0.2, label=\"experiment\")\n ax.errorbar(np_time[np_excl], np_sum[np_excl], yerr=np_ssum[np_excl], fmt=\"rs\", alpha=0.2, label=\"excluded\")\n\n y_min_d, y_max_d = ax.get_ylim()\n param = y_min_d-(np_sum - np_sum_mod).max()\n coeff = np_notexcl.astype(int)\n\n ax.plot([np_time.min(), np_time.max()], [param, param], \"k:\")\n ax.plot(np_time, coeff*(np_sum - np_sum_mod)+param, \"r-\", alpha=0.7,\n label=\"difference\")\n ax.legend(loc='upper right')\n fig.tight_layout()\n return (fig, ax)", "def GetSumOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUS3_GetSumOutput(self, *args)", "def wsum(self):\n return reduce(operator.add, self.wvalues, 0.0)", "def sum_elements(arr):\n return sum(arr)", "def sum_over_energy(self):\n raise NotImplementedError(\"MapBase.sum_over_energy()\")", "def _compute_raw_image_norm(self):\n return np.sum(self._data, dtype=float)", "def sum(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.sum, reduce_instance_dims, name)", "def total_histogram_diff(pixel_diff):\n return sum(i * n for i, n in enumerate(pixel_diff.histogram()))", "def GetSum(self, label: 'unsigned char') -> \"double\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUC3_GetSum(self, label)", "def Sum2d(a):\n return(np.sum(np.sum(a,-1),-1))", "def _output_add(block_x, orig_x):\n stride = orig_x.shape[-2] // block_x.shape[-2]\n strides = (stride, stride)\n if block_x.shape[-1] != orig_x.shape[-1]:\n orig_x = nn.avg_pool(orig_x, strides, strides)\n channels_to_add = block_x.shape[-1] - orig_x.shape[-1]\n orig_x = jnp.pad(orig_x, [(0, 0), (0, 0), (0, 0), (0, channels_to_add)])\n return block_x + orig_x", "def GetSum(self, label: 'unsigned short') -> \"double\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3IUS3_GetSum(self, label)", "def countPixels(input_img, input_mode, x, y, r):\n\n if input_mode == 'fp':\n np_img = loadImage(input_img)\n elif input_mode == 'np':\n np_img = input_img\n else:\n return (input_mode, \" is not a supported mode. Supported modes are 'np' or 'fp'.\")\n\n base_img = circleMask(np_img, x, y, r, 'exterior')\n\n core_img = circleMask(np_img, x, y, r*0.8, 'exterior')\n core_count = np.count_nonzero(base_img*core_img)\n\n inner_img = circleMask(np_img, x, y, r*0.8, 'exterior')\n inner_ring = base_img - inner_img\n inner_count = np.count_nonzero(inner_ring)\n\n outer_img = circleMask(np_img, x, y, r*1.2, 'exterior')\n outer_ring = outer_img - base_img\n outer_count = np.count_nonzero(outer_ring)\n\n\n\n return (core_count, inner_count, outer_count)", "def sum(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.add.reduce(self, out=out, axis=axis, keepdims=keepdims, dtype=dtype)", "def calculate_pixels(self, lower, upper):\n\n lower_range = np.array(lower)\n upper_range = np.array(upper)\n\n mask = cv2.inRange(self.hsv, lower_range, upper_range)\n return (mask == 255).sum()", "def getArea(self):\r\n return np.sum(self.array[:])", "def GetSum(self, label: 'unsigned short') -> \"double\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC3IUS3_GetSum(self, label)", "def sum(self, axis: int = 0):\r\n self.values = self.values.sum(axis=axis)\r\n self.layers = [None]\r\n return self.copy()", "def sum(self) -> \"Stream[float]\":\n return self.agg(np.sum).astype(\"float\")", "def sum(matrix):\n\n return float(sum([sum(row) for row in matrix]))", "def light_measure_Z0_weit(data, weit_data, pix_size, cx, cy, R_bins):\n\tNx = data.shape[1]\n\tNy = data.shape[0]\n\tx0 = np.linspace(0, Nx-1, Nx)\n\ty0 = np.linspace(0, Ny-1, Ny)\n\tpix_id = np.array(np.meshgrid(x0,y0))\n\n\t#..center pixel point\n\tdev_05_x = cx - np.int( cx )\n\tdev_05_y = cy - np.int( cy )\n\n\tif dev_05_x > 0.5:\n\t\txn = np.int( cx ) + 1\n\telse:\n\t\txn = np.int( cx )\n\n\tif dev_05_y > 0.5:\n\t\tyn = np.int( cy ) + 1\n\telse:\n\t\tyn = np.int( cy )\n\n\ttheta = np.arctan2((pix_id[1,:] - yn), (pix_id[0,:] - xn))\n\tchi = theta * 180 / np.pi\n\t# radius in unit of pixel number\n\trbin = R_bins + 0.\n\n\tN_bins = len( rbin )\n\n\tintens = np.zeros(N_bins, dtype = np.float)\n\tintens_err = np.zeros(N_bins, dtype = np.float)\n\tAngl_r = np.zeros(N_bins, dtype = np.float)\n\tN_pix = np.zeros(N_bins, dtype = np.float)\n\tnsum_ratio = np.zeros(N_bins, dtype = np.float)\n\n\tdr = np.sqrt(((2*pix_id[0] + 1) / 2 - (2*xn + 1) / 2)**2 + ((2*pix_id[1] + 1) / 2 - (2*yn + 1) / 2)**2)\n\n\tfor k in range(N_bins - 1):\n\t\tcdr = rbin[k + 1] - rbin[k]\n\t\td_phi = (cdr / ( 0.5 * (rbin[k] + rbin[k + 1]) ) ) * 180 / np.pi\n\t\tN_phi = np.int(360 / d_phi) + 1\n\t\tphi = np.linspace(0, 360, N_phi)\n\t\tphi = phi - 180\n\n\t\tir = (dr >= rbin[k]) * (dr < rbin[k + 1])\n\n\t\tbool_sum = np.sum(ir)\n\n\t\tr_iner = rbin[k]\n\t\tr_out = rbin[k + 1]\n\n\t\tif bool_sum == 0:\n\t\t\tAngl_r[k] = 0.5 * (r_iner + r_out) * pix_size\n\t\telse:\n\t\t\tweit_arr = weit_data[ir]\n\t\t\tsamp_flux = data[ir]\n\t\t\tsamp_chi = chi[ir]\n\n\t\t\ttot_flux = np.nansum(samp_flux * weit_arr) / np.nansum(weit_arr)\n\t\t\tidnn = np.isnan( samp_flux )\n\t\t\tN_pix[k] = np.sum( idnn == False )\n\t\t\tnsum_ratio[k] = np.nansum(weit_arr) / np.sum( idnn == False )\n\n\t\t\tintens[k] = tot_flux\n\t\t\t#Angl_r[k] = 0.5 * (r_iner + r_out) * pix_size\n\t\t\tAngl_r[k] = np.nansum( dr[ir] * weit_arr ) / np.nansum( weit_arr ) * pix_size\n\n\t\t\ttmpf = []\n\t\t\tfor tt in range(len(phi) - 1):\n\t\t\t\tiv = (samp_chi >= phi[tt]) & (samp_chi <= phi[tt+1])\n\n\t\t\t\tset_samp = samp_flux[iv]\n\t\t\t\tset_weit = weit_arr[iv]\n\n\t\t\t\tttf = np.nansum(set_samp * set_weit) / np.nansum(set_weit)\n\t\t\t\ttmpf.append(ttf)\n\n\t\t\t# rms of flux\n\t\t\ttmpf = np.array(tmpf)\n\t\t\tid_inf = np.isnan(tmpf)\n\t\t\ttmpf[id_inf] = np.nan\n\t\t\tid_zero = tmpf == 0\n\t\t\ttmpf[id_zero] = np.nan\n\t\t\tid_nan = np.isnan(tmpf)\n\t\t\tid_fals = id_nan == False\n\t\t\tTmpf = tmpf[id_fals]\n\n\t\t\t#RMS = np.sqrt( np.sum(Tmpf**2) / len(Tmpf) )\n\t\t\tRMS = np.std(Tmpf)\n\t\t\tif len(Tmpf) > 1:\n\t\t\t\tintens_err[k] = RMS / np.sqrt(len(Tmpf) - 1)\n\t\t\telse:\n\t\t\t\tintens_err[k] = RMS\n\n\tidzo = N_pix < 1\n\n\tIntns = intens.copy()\n\tIntns[idzo] = 0.\n\tIntns_err = intens_err.copy()\n\tIntns_err[idzo] = 0.\n\tnsum_ratio[idzo] = 0.\n\n\tIntns, Intns_err = Intns / pix_size**2, Intns_err / pix_size**2\n\n\treturn Intns, Angl_r, Intns_err, N_pix, nsum_ratio", "def GetSumOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIF2_GetSumOutput(self, *args)", "def find_pixels(self):\n ref_image=Image.open('sample0000.png')\n imarray=np.array(ref_image)\n ref_image.close()\n self.number_of_pix=imarray.shape\n print self.number_of_pix\n ref_image=None\n imarray=None", "def roi_subtraction(image, list_of_regions: List[Region]):\n # We're going to need to count all intensity in all the background, as well\n # as the number of pixels used in our measurement of the background.\n sum_of_bkg_areas = 0\n total_num_pixels = 0\n\n # Make sure we've been given multiple regions. If not, np: make a list.\n if isinstance(list_of_regions, Region):\n list_of_regions = [list_of_regions]\n\n # Add up all the intensity in all the pixels.\n for region in list_of_regions:\n # Now add the total intensity in this particular background region to\n # the intensity measured in all the background regions so far.\n sum_of_bkg_areas += np.sum(\n image.array_original[\n int(region.x_start):int(region.x_end),\n int(region.y_start):int(region.y_end)\n ]\n )\n # Add the number of pixels in this background ROI to the total number of\n # pixels used to compute the background measurement overall.\n total_num_pixels += region.num_pixels\n\n # Now Poisson stats can be abused to only calculate a single sqrt.\n err_of_bkg_areas = np.sqrt(sum_of_bkg_areas)\n if err_of_bkg_areas == 0:\n err_of_bkg_areas = 1\n\n # Get the per pixel background mean and stddev.\n bkg_per_pixel = sum_of_bkg_areas / total_num_pixels\n bkg_error_per_pixel = err_of_bkg_areas / total_num_pixels\n\n # Expose the calculated background and background_error per pixel.\n return BkgSubInfo(bkg_per_pixel, bkg_error_per_pixel, roi_subtraction)", "def sumRange(self, i, j):\r\n return self.dp[i][j]", "def media(img):\n\tsize = img.shape\n\tsume = 0\n\tdim = size[0]*size[1]\n\tfor i in range(size[0]):\n\t\tfor j in range(size[1]):\n\t\t\tsume += img[i,j]\n\treturn sume/dim", "def GetSumOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUC2_GetSumOutput(self, *args)", "def GetSumOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUL2_GetSumOutput(self, *args)", "def GetSumOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterID2_GetSumOutput(self, *args)", "def get_sumkw(self):\n sumkw_l = self.read_register(4107, 0, 3) \n sumkw_h = self.read_register(4108, 0, 3) \n return (sumkw_h * 255) + sumkw_l", "def integral (self):\n dx = self.xbins[1] - self.xbins[0]\n dy = self.ybins[1] - self.ybins[0]\n return self.sum * (dx * dy)", "def test_array_sum_equals_one(self):\n plugin = NonLinearWeights(0.85)\n result = plugin.process(self.cube, self.coord_name)\n self.assertAlmostEqual(result.data.sum(), 1.0)", "def sumRange(self, i, j):\n if not self.d:\n return 0\n return self.d[j + 1] - self.d[i]\n\n\n\n # Your NumArray object will be instantiated and called as such:", "def with_sum_sum_reduction(self):\n return self.with_reduction(lambda x: x.sum())", "def sum_grid(self, grid):\n new_grid = []\n for i in range(self.grid_size):\n new_grid.append(self.sum_row(grid[i]))\n return new_grid" ]
[ "0.6154293", "0.603963", "0.600956", "0.59598166", "0.5920878", "0.59154606", "0.59146667", "0.58369637", "0.5797925", "0.57950807", "0.5758008", "0.5724144", "0.56694096", "0.55569243", "0.5546531", "0.54882556", "0.54564047", "0.5439962", "0.5425233", "0.541275", "0.54115766", "0.5408521", "0.537066", "0.53670967", "0.53474265", "0.53362364", "0.53253925", "0.53244007", "0.53038186", "0.5302875", "0.52985877", "0.52758044", "0.52758044", "0.52758044", "0.52758044", "0.52544546", "0.5241351", "0.51958805", "0.5163707", "0.5156206", "0.51561797", "0.51273507", "0.51159215", "0.5104029", "0.5100693", "0.5092999", "0.5089864", "0.508751", "0.50829387", "0.5075398", "0.5068984", "0.5052348", "0.5042489", "0.50398064", "0.5034698", "0.50283295", "0.50235075", "0.5021869", "0.5016627", "0.50162286", "0.5015034", "0.5008322", "0.50056595", "0.50042206", "0.4998838", "0.49971437", "0.49950045", "0.49941805", "0.49915075", "0.4987611", "0.49850252", "0.49763831", "0.49728745", "0.49662662", "0.49576193", "0.49508187", "0.4943138", "0.49365088", "0.49337408", "0.49334943", "0.4928026", "0.49223876", "0.49197277", "0.49131715", "0.49102834", "0.490988", "0.48998803", "0.48994127", "0.48803052", "0.48782435", "0.48779786", "0.4873901", "0.48692793", "0.48600242", "0.48597214", "0.48588037", "0.4857883", "0.4857679", "0.4852021", "0.48501062", "0.4848743" ]
0.0
-1
Build a WCS object given the spatial header parameters.
def set_wcs(head): logger = logging.getLogger(__name__) # Create a new WCS object. wcs = WCS(head) if wcs.naxis > 3: wcs = wcs.dropaxis(2) logger.debug('WCS contains {0} axes.'.format(wcs.naxis)) return wcs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readWCS(header):\r\n # Creating the WCS instance\r\n w = wcs.WCS(header = header)\r\n # Filling the WCS\r\n w.wcs.crval = N.array([float(header[\"CRVAL1\"]), float(header[\"CRVAL2\"])])\r\n w.wcs.crpix = N.array([float(header[\"CRPIX1\"]), float(header[\"CRPIX2\"])])\r\n w.wcs.cdelt = N.array([float(header[\"CD1_1\"]) , float(header[\"CD2_2\"])])\r\n # Returning the filled instance\r\n return w", "def parse_coordinates(self):\n header = self.header\n wcs = WCS()\n try:\n wcs.crval = header['crval1'], header['crval2']\n wcs.crpix = header['crpix1'] - 1, header['crpix2'] - 1\n wcs.cdelt = header['cdelt1'], header['cdelt2']\n except KeyError:\n msg = \"Coordinate system not specified in FITS\"\n logger.error(msg)\n raise TypeError(msg)\n try:\n wcs.ctype = header['ctype1'], header['ctype2']\n except KeyError:\n wcs.ctype = 'unknown', 'unknown'\n try:\n wcs.crota = float(header['crota1']), float(header['crota2'])\n except KeyError:\n wcs.crota = 0., 0.\n try:\n wcs.cunit = header['cunit1'], header['cunit2']\n except KeyError:\n # The \"Definition of the Flexible Image Transport System\", version\n # 3.0, tells us that \"units for celestial coordinate systems defined\n # in this Standard must be degrees\", so we assume that if nothing else\n # is specifiedj\n msg = \"WCS units unknown; using degrees\"\n logger.warning(msg)\n wcs.cunit = 'deg', 'deg'\n return wcs", "def make_wcs(shape, galactic=False):\n wcs = WCS(naxis=2)\n rho = np.pi / 3.0\n scale = 0.1 / 3600.0 # 0.1 arcsec/pixel in deg/pix\n\n wcs.pixel_shape = shape\n wcs.wcs.crpix = [shape[1] / 2, shape[0] / 2] # 1-indexed (x, y)\n wcs.wcs.crval = [197.8925, -1.36555556]\n wcs.wcs.cunit = ['deg', 'deg']\n wcs.wcs.cd = [[-scale * np.cos(rho), scale * np.sin(rho)],\n [scale * np.sin(rho), scale * np.cos(rho)]]\n if not galactic:\n wcs.wcs.radesys = 'ICRS'\n wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n else:\n wcs.wcs.ctype = ['GLON-CAR', 'GLAT-CAR']\n\n return wcs", "def makeWcs(projName, destCtrInd, skyOffset, rotAng, scaleFac, srcWcs, srcCtrInd):\n ps = dafBase.PropertySet()\n srcCtrPix = lsst.geom.Point2D(*[float(val) for val in srcCtrInd])\n destCtrFitsPix = lsst.geom.Point2D(*[ind + 1.0 for ind in destCtrInd])\n srcCtrFitsPix = lsst.geom.Point2D(*[ind + 1.0 for ind in srcCtrInd])\n # offset 1 pixel in x to compute orientation\n srcOffFitsPix = srcCtrFitsPix + lsst.geom.Extent2D(1.0, 0.0)\n srcCtrSkyPos = srcWcs.pixelToSky(srcCtrFitsPix)\n srcOffSkyPos = srcWcs.pixelToSky(srcOffFitsPix)\n srcAngleRad = srcCtrSkyPos.bearingTo(srcOffSkyPos).asRadians()\n destAngleRad = srcAngleRad + (rotAng / DegPerRad)\n srcScale = srcWcs.getPixelScale(srcCtrPix).asDegrees()\n destScale = srcScale / scaleFac\n for i in range(2):\n ip1 = i + 1\n ctypeStr = (\"%-5s%3s\" % ((\"RA\", \"DEC\")[i], projName)).replace(\" \", \"-\")\n ps.add(\"CTYPE%1d\" % (ip1,), ctypeStr)\n ps.add(\"CRPIX%1d\" % (ip1,), destCtrFitsPix[i])\n ps.add(\"CRVAL%1d\" % (ip1,), srcCtrSkyPos[i].asDegrees() + skyOffset[i])\n ps.add(\"RADESYS\", \"ICRS\")\n ps.add(\"EQUINOX\", 2000)\n ps.add(\"CD1_1\", -destScale * math.cos(destAngleRad))\n ps.add(\"CD2_1\", destScale * math.sin(destAngleRad))\n ps.add(\"CD1_2\", destScale * math.sin(destAngleRad))\n ps.add(\"CD2_2\", destScale * math.cos(destAngleRad))\n return lsst.afw.geom.makeSkyWcs(ps)", "def calculateSipWcsHeader(wcs, order, bbox, spacing, header=None):\n transform = getPixelToIntermediateWorldCoords(wcs)\n crpix = wcs.getPixelOrigin()\n cdMatrix = wcs.getCdMatrix()\n crval = wcs.getSkyOrigin()\n gridNum = Extent2I(int(bbox.getWidth()/spacing + 0.5), int(bbox.getHeight()/spacing + 0.5))\n\n sip = SipApproximation(transform, crpix, cdMatrix, Box2D(bbox), gridNum, order)\n\n md = makeTanSipMetadata(sip.getPixelOrigin(), crval, sip.getCdMatrix(), sip.getA(), sip.getB(),\n sip.getAP(), sip.getBP())\n\n if header is not None:\n header.combine(md)\n else:\n header = md\n\n return header", "def get_WCS (ff_hdus_list, which_hdu=0):\n if (which_hdu >= len(ff_hdus_list)): # sanity check\n return None\n return wcs.WCS(ff_hdus_list[which_hdu].header)", "def to_header(wcs, relax=True):\n header = wcs.to_header(relax=relax)\n if hasattr(wcs, '_naxis1'):\n header['NAXIS'] = wcs.naxis\n header['NAXIS1'] = wcs._naxis1\n header['NAXIS2'] = wcs._naxis2\n \n for k in header:\n if k.startswith('PC'):\n cd = k.replace('PC','CD')\n header.rename_keyword(k, cd)\n \n return header", "def make_gwcs(shape, galactic=False):\n from gwcs import coordinate_frames as cf\n from gwcs import wcs as gwcs_wcs\n\n rho = np.pi / 3.0\n scale = 0.1 / 3600.0 # 0.1 arcsec/pixel in deg/pix\n\n shift_by_crpix = (models.Shift((-shape[1] / 2) + 1)\n & models.Shift((-shape[0] / 2) + 1))\n\n cd_matrix = np.array([[-scale * np.cos(rho), scale * np.sin(rho)],\n [scale * np.sin(rho), scale * np.cos(rho)]])\n\n rotation = models.AffineTransformation2D(cd_matrix, translation=[0, 0])\n rotation.inverse = models.AffineTransformation2D(\n np.linalg.inv(cd_matrix), translation=[0, 0])\n\n tan = models.Pix2Sky_TAN()\n celestial_rotation = models.RotateNative2Celestial(197.8925, -1.36555556,\n 180.0)\n\n det2sky = shift_by_crpix | rotation | tan | celestial_rotation\n det2sky.name = 'linear_transform'\n\n detector_frame = cf.Frame2D(name='detector', axes_names=('x', 'y'),\n unit=(u.pix, u.pix))\n\n if galactic:\n sky_frame = cf.CelestialFrame(reference_frame=coord.Galactic(),\n name='galactic', unit=(u.deg, u.deg))\n else:\n sky_frame = cf.CelestialFrame(reference_frame=coord.ICRS(),\n name='icrs', unit=(u.deg, u.deg))\n\n pipeline = [(detector_frame, det2sky), (sky_frame, None)]\n\n return gwcs_wcs.WCS(pipeline)", "def define_wcs(skypos,skyrange,width=False,height=False,verbose=0,\n\t\t\t pixsz=0.000416666666666667):\n\tif verbose:\n\t\tprint_inline('Defining World Coordinate System (WCS).')\n\twcs = pywcs.WCS(naxis=2) # NAXIS = 2\n\timsz = gxt.deg2pix(skypos,skyrange)\n\twcs.wcs.cdelt = np.array([-pixsz,pixsz])\n\twcs.wcs.ctype = ['RA---TAN','DEC--TAN']\n\twcs.wcs.crpix = [(imsz[1]/2.)+0.5,(imsz[0]/2.)+0.5]\n\twcs.wcs.crval = skypos\n\treturn wcs", "def combined_wcs(self) -> BaseHighLevelWCS:", "def make_wcsheader(ra=40.07293, dec=-1.6137748, size=2, pixscale=0.1, get_hdu=False, theta=0):\n \n if np.isscalar(pixscale):\n cdelt = [pixscale/3600.]*2\n else:\n cdelt = [pixscale[0]/3600., pixscale[1]/3600.]\n \n if np.isscalar(size):\n npix = np.cast[int]([size/pixscale, size/pixscale])\n else:\n npix = np.cast[int]([size[0]/pixscale, size[1]/pixscale])\n \n hout = pyfits.Header()\n hout['CRPIX1'] = npix[0]/2\n hout['CRPIX2'] = npix[1]/2\n hout['CRVAL1'] = ra\n hout['CRVAL2'] = dec\n hout['CD1_1'] = -cdelt[0]\n hout['CD1_2'] = hout['CD2_1'] = 0.\n hout['CD2_2'] = cdelt[1]\n hout['NAXIS1'] = npix[0]\n hout['NAXIS2'] = npix[1]\n hout['CTYPE1'] = 'RA---TAN'\n hout['CTYPE2'] = 'DEC--TAN'\n \n wcs_out = pywcs.WCS(hout)\n \n theta_rad = np.deg2rad(theta)\n mat = np.array([[np.cos(theta_rad), -np.sin(theta_rad)], \n [np.sin(theta_rad), np.cos(theta_rad)]])\n\n rot_cd = np.dot(mat, wcs_out.wcs.cd)\n \n for i in [0,1]:\n for j in [0,1]:\n hout['CD{0:d}_{1:d}'.format(i+1, j+1)] = rot_cd[i,j]\n wcs_out.wcs.cd[i,j] = rot_cd[i,j]\n \n cd = wcs_out.wcs.cd\n wcs_out.pscale = get_wcs_pscale(wcs_out) #np.sqrt((cd[0,:]**2).sum())*3600.\n \n if get_hdu:\n hdu = pyfits.ImageHDU(header=hout, data=np.zeros((npix[1], npix[0]), dtype=np.float32))\n return hdu\n else:\n return hout, wcs_out", "def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None):\n msgs.info(\"Calculating the WCS\")\n # Get the x and y binning factors, and the typical slit length\n binspec, binspat = parse.parse_binning(self.get_meta_value([hdr], 'binning'))\n\n # Get the pixel and slice scales\n pxscl = platescale * binspat / 3600.0 # Need to convert arcsec to degrees\n slscl = self.get_meta_value([hdr], 'slitwid')\n if spatial_scale is not None:\n if pxscl > spatial_scale / 3600.0:\n msgs.warn(\"Spatial scale requested ({0:f}'') is less than the pixel scale ({1:f}'')\".format(spatial_scale, pxscl*3600.0))\n # Update the pixel scale\n pxscl = spatial_scale / 3600.0 # 3600 is to convert arcsec to degrees\n\n # Get the typical slit length (this changes by ~0.3% over all slits, so a constant is fine for now)\n slitlength = int(np.round(np.median(slits.get_slitlengths(initial=True, median=True))))\n\n # Get RA/DEC\n raval = self.get_meta_value([hdr], 'ra')\n decval = self.get_meta_value([hdr], 'dec')\n\n # Create a coordinate\n coord = SkyCoord(raval, decval, unit=(units.deg, units.deg))\n\n # Get rotator position\n msgs.warn(\"HACK FOR MAAT SIMS --- NEED TO FIGURE OUT RPOS and RREF FOR MAAT FROM HEADER INFO\")\n if 'ROTPOSN' in hdr:\n rpos = hdr['ROTPOSN']\n else:\n rpos = 0.\n if 'ROTREFAN' in hdr:\n rref = hdr['ROTREFAN']\n else:\n rref = 0.\n # Get the offset and PA\n rotoff = 0.0 # IFU-SKYPA offset (degrees)\n skypa = rpos + rref # IFU position angle (degrees)\n crota = np.radians(-(skypa + rotoff))\n\n # Calculate the fits coordinates\n cdelt1 = -slscl\n cdelt2 = pxscl\n if coord is None:\n ra = 0.\n dec = 0.\n crota = 1\n else:\n ra = coord.ra.degree\n dec = coord.dec.degree\n # Calculate the CD Matrix\n cd11 = cdelt1 * np.cos(crota) # RA degrees per column\n cd12 = abs(cdelt2) * np.sign(cdelt1) * np.sin(crota) # RA degrees per row\n cd21 = -abs(cdelt1) * np.sign(cdelt2) * np.sin(crota) # DEC degress per column\n cd22 = cdelt2 * np.cos(crota) # DEC degrees per row\n # Get reference pixels (set these to the middle of the FOV)\n crpix1 = 11 # i.e. see get_datacube_bins (11 is used as the reference point - somewhere in the middle of the FOV)\n crpix2 = slitlength / 2.\n crpix3 = 1.\n # Get the offset\n msgs.warn(\"HACK FOR MAAT SIMS --- Need to obtain offset from header?\")\n off1 = 0.\n off2 = 0.\n off1 /= binspec\n off2 /= binspat\n crpix1 += off1\n crpix2 += off2\n\n # Create a new WCS object.\n msgs.info(\"Generating MAAT WCS\")\n w = wcs.WCS(naxis=3)\n w.wcs.equinox = hdr['EQUINOX']\n w.wcs.name = 'MAAT'\n w.wcs.radesys = 'FK5'\n # Insert the coordinate frame\n w.wcs.cname = ['MAAT RA', 'MAAT DEC', 'MAAT Wavelength']\n w.wcs.cunit = [units.degree, units.degree, units.Angstrom]\n w.wcs.ctype = [\"RA---TAN\", \"DEC--TAN\", \"WAVE\"]\n w.wcs.crval = [ra, dec, wave0] # RA, DEC, and wavelength zeropoints\n w.wcs.crpix = [crpix1, crpix2, crpix3] # RA, DEC, and wavelength reference pixels\n w.wcs.cd = np.array([[cd11, cd12, 0.0], [cd21, cd22, 0.0], [0.0, 0.0, dwv]])\n w.wcs.lonpole = 180.0 # Native longitude of the Celestial pole\n w.wcs.latpole = 0.0 # Native latitude of the Celestial pole\n\n return w", "def wcs(self):\n model = self.model\n return gwcs.WCS(forward_transform=model,\n input_frame=_generate_generic_frame(model.n_inputs, u.pix),\n output_frame=self.frame)", "def make_spectrum_wcsheader(center_wave=1.4e4, dlam=40, NX=100, spatial_scale=1, NY=10):\n \n h = pyfits.ImageHDU(data=np.zeros((2*NY, 2*NX), dtype=np.float32))\n \n refh = h.header\n refh['CRPIX1'] = NX+1\n refh['CRPIX2'] = NY+1\n refh['CRVAL1'] = center_wave\n refh['CD1_1'] = dlam\n refh['CD1_2'] = 0.\n refh['CRVAL2'] = 0.\n refh['CD2_2'] = spatial_scale\n refh['CD2_1'] = 0.\n refh['RADESYS'] = ''\n \n refh['CTYPE1'] = 'WAVE'\n refh['CTYPE2'] = 'LINEAR'\n \n ref_wcs = pywcs.WCS(h.header)\n ref_wcs.pscale = np.sqrt(ref_wcs.wcs.cd[0,0]**2 + ref_wcs.wcs.cd[1,0]**2)*3600.\n \n return refh, ref_wcs", "def prep_hd(header,phi_c,lambda_c,nx,ny,dx,dy):\n header_out = {}\n\n # Keywords to get from original header\n keys_hd = ['TELESCOP', 'INSTRUME', 'WAVELNTH', 'CAMERA','DATE',\n 'DATE_S','DATE-OBS','T_OBS','T_REC','TRECEPOC',\n 'TRECSTEP','TRECUNIT','HARPNUM','DSUN_OBS','DSUN_REF',\n 'RSUN_REF','CRLN_OBS','CRLT_OBS','CAR_ROT','OBS_VR',\n 'OBS_VW','OBS_VN','RSUN_OBS','QUALITY','QUAL_S','QUALLEV1']\n\n for key in keys_hd:\n header_out[key] = header[key]\n\n # Add new keywords\n header_out['NAXIS'] = 2\n header_out['NAXIS1'] = nx\n header_out['NAXIS2'] = ny\n\n header_out['CUNIT1'] = 'degree'\n header_out['CUNIT2'] = 'degree'\n\n header_out['CRPIX1'] = (nx - 1) / 2 + 1\n header_out['CRPIX2'] = (ny - 1) / 2 + 1\n header_out['CRVAL1'] = phi_c\n header_out['CRVAL2'] = lambda_c\n header_out['CDELT1'] = dx\n header_out['CDELT2'] = dy\n header_out['CTYPE1'] = 'CRLN-CEA'\n header_out['CTYPE2'] = 'CRLT-CEA'\n header_out['CROTA2'] = 0.0\n\n header_out['WCSNAME'] = 'Carrington Heliographic'\n header_out['BUNIT'] = 'Mx/cm^2'\n\n return header_out", "def make_wcs_from_hpx(self, sum_ebins=False, proj='CAR', oversample=2,\n normalize=True):\n self._wcs_proj = proj\n self._wcs_oversample = oversample\n self._wcs_2d = self.hpx.make_wcs(2, proj=proj, oversample=oversample)\n self._hpx2wcs = HpxToWcsMapping(self.hpx, self._wcs_2d)\n wcs, wcs_data = self.convert_to_cached_wcs(self.counts, sum_ebins,\n normalize)\n return wcs, wcs_data", "def convertToWCS(x, y, wcs_hdr):\n w = WCS(wcs_hdr)\n xy_coords = np.column_stack([x, y])\n \n # FITS convention, so use Fortran-like 1-based origin\n world = w.all_pix2world(xy_coords, 1)\n ra, dec = world[:, 0], world[:, 1]\n \n return ra, dec", "def build_cs(self, gl):\n\n cs = gl.compute_shader(GLUtil.shader(\"./gl/cs/cs.glsl\"))\n\n u_time = None\n u_width = None\n u_height = None\n if \"u_time\" in cs:\n u_time = cs[\"u_time\"]\n\n if \"u_width\" in cs:\n u_width = cs[\"u_width\"]\n\n if \"u_height\" in cs:\n u_height = cs[\"u_height\"]\n\n buf_in = gl.buffer(reserve=width * height * 4 * 4)\n buf_in.bind_to_storage_buffer(0)\n\n buf_out = gl.buffer(reserve=width * height * 4 * 4)\n buf_out.bind_to_storage_buffer(1)\n\n return cs, [u_time, u_width, u_height], [buf_in, buf_out]", "def addWCSKeywords(wcs, hdr, blot=False, single=False, after=None):\n wname = wcs.wcs.name\n wtype = updatehdr.interpret_wcsname_type(wname)\n\n # Update WCS Keywords based on PyDrizzle product's value\n # since 'drizzle' itself doesn't update that keyword.\n # If output wcs does not have a name (wcs.name), then\n # the value from the input hdr will remain as the name\n # for this WCS solution.\n if wname != '':\n # Replace input WCSNAME value with name from user-specified WCS\n # since it was defined.\n hdr['WCSNAME'] = wname\n hdr['WCSTYPE'] = wtype\n hdr.set('VAFACTOR', value=1.0, after=after)\n hdr.set('ORIENTAT', value=wcs.orientat, after=after)\n\n # Use of 'after' not needed if these keywords already exist in the header\n if after in WCS_KEYWORDS:\n after = None\n\n # if 'CTYPE1' not in hdr:\n hdr.set('CTYPE2', value=wcs.wcs.ctype[1], after=after)\n hdr.set('CTYPE1', value=wcs.wcs.ctype[0], after=after)\n hdr.set('CRPIX2', value=wcs.wcs.crpix[1], after=after)\n hdr.set('CRPIX1', value=wcs.wcs.crpix[0], after=after)\n hdr.set('CRVAL2', value=wcs.wcs.crval[1], after=after)\n hdr.set('CRVAL1', value=wcs.wcs.crval[0], after=after)\n hdr.set('CD2_2', value=wcs.wcs.cd[1][1], after=after)\n hdr.set('CD2_1', value=wcs.wcs.cd[1][0], after=after)\n hdr.set('CD1_2', value=wcs.wcs.cd[0][1], after=after)\n hdr.set('CD1_1', value=wcs.wcs.cd[0][0], after=after)\n\n # delete distortion model related keywords\n deleteDistortionKeywords(hdr)\n\n if not blot:\n blendheaders.remove_distortion_keywords(hdr)", "def to_swc(self, contributors=\"\"):\n from . import __version__\n sx, sy, sz = np.diag(self.transform)[:3]\n\n swc_header = f\"\"\"# ORIGINAL_SOURCE CloudVolume {__version__}\n# CREATURE \n# REGION\n# FIELD/LAYER\n# TYPE\n# CONTRIBUTOR {contributors}\n# REFERENCE\n# RAW \n# EXTRAS \n# SOMA_AREA\n# SHINKAGE_CORRECTION \n# VERSION_NUMBER {__version__}\n# VERSION_DATE {datetime.datetime.utcnow().isoformat()}\n# SCALE {sx:.6f} {sy:.6f} {sz:.6f}\n\"\"\"\n\n def generate_swc(skel, offset):\n if skel.edges.size == 0:\n return \"\"\n\n index = defaultdict(set)\n visited = defaultdict(bool)\n for e1, e2 in skel.edges:\n index[e1].add(e2)\n index[e2].add(e1)\n\n stack = [ skel.edges[0,0] ]\n parents = [ -1 ]\n\n swc = \"\"\n\n while stack:\n node = stack.pop()\n parent = parents.pop()\n\n if visited[node]:\n continue\n\n swc += \"{n} {T} {x:0.6f} {y:0.6f} {z:0.6f} {R:0.6f} {P}\\n\".format(\n n=(node + 1 + offset),\n T=skel.vertex_types[node],\n x=skel.vertices[node][0],\n y=skel.vertices[node][1],\n z=skel.vertices[node][2],\n R=skel.radii[node],\n P=parent if parent == -1 else (parent + 1 + offset),\n )\n\n visited[node] = True\n \n for child in index[node]:\n stack.append(child)\n parents.append(node)\n\n return swc\n\n skels = self.components()\n\n swc = swc_header + \"\\n\"\n offset = 0\n for skel in skels:\n swc += generate_swc(skel, offset) + \"\\n\"\n offset += skel.vertices.shape[0]\n\n return swc", "def full_spectrum_wcsheader(center_wave=1.4e4, dlam=40, NX=100, spatial_scale=1, NY=10):\n \n h = pyfits.ImageHDU(data=np.zeros((2*NY, 2*NX), dtype=np.float32))\n \n refh = h.header\n refh['CRPIX1'] = NX+1\n refh['CRPIX2'] = NY+1\n refh['CRVAL1'] = center_wave/1.e4\n refh['CD1_1'] = dlam/1.e4\n refh['CD1_2'] = 0.\n refh['CRVAL2'] = 0.\n refh['CD2_2'] = spatial_scale\n refh['CD2_1'] = 0.\n refh['RADESYS'] = ''\n \n refh['CTYPE1'] = 'RA---TAN-SIP'\n refh['CUNIT1'] = 'mas'\n refh['CTYPE2'] = 'DEC--TAN-SIP'\n refh['CUNIT2'] = 'mas'\n \n ref_wcs = pywcs.WCS(refh) \n ref_wcs.pscale = get_wcs_pscale(ref_wcs)\n \n return refh, ref_wcs", "def __make_header__(self):\n header = lashead.Header(point_format=0)\n return header", "def replace_wcs(hdu, wcs):\n\n # Checks for old WCS keys in the form PC001002\n pc_old_pattern = re.compile('PC0*[0-9]{1}0*[0-9]{1}')\n header_keys = hdu.header.keys()\n pc_old_in_header = list(filter(pc_old_pattern.match, header_keys))\n\n wcs_keys = list(wcs.to_header().keys())\n\n for key in wcs_keys + pc_old_in_header:\n if key in hdu.header:\n del hdu.header[key]\n\n # Adds the new WCS header to the hdu\n hdu.header.extend(wcs.to_header().cards)\n\n # Manually make sure all PCX_Y keywords are present\n for ii in [0, 1]:\n for jj in [0, 1]:\n hdu.header['PC{}_{}'.format(ii + 1, jj + 1)] = wcs.wcs.pc[ii, jj]\n\n ra, dec = wcs.wcs_pix2world([[hdu.header['NAXIS1'] // 2,\n hdu.header['NAXIS2'] // 2]], 1)[0]\n hdu.header['CRVAL1'] = ra\n hdu.header['CRVAL2'] = dec\n hdu.header['CRPIX1'] = hdu.header['NAXIS1'] // 2\n hdu.header['CRPIX2'] = hdu.header['NAXIS2'] // 2\n\n return hdu", "def wfirst_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, naxis=(4096,4096)):\n #naxis = 2048, 2048\n crpix = naxis[0]/2., naxis[0]/2.\n \n cd = np.array([[ -0.11, 0], [0, 0.11]])/3600.\n cd_rot = rotate_CD_matrix(cd, pa_aper)\n \n h = pyfits.Header()\n \n h['CRVAL1'] = ra\n h['CRVAL2'] = dec\n \n h['WCSAXES'] = 2\n h['CTYPE1'] = 'RA---TAN'\n h['CTYPE2'] = 'DEC--TAN'\n \n for i in range(2):\n h['NAXIS%d' %(i+1)] = naxis[i]\n h['CRPIX%d' %(i+1)] = crpix[i]\n h['CDELT%d' %(i+1)] = 1.0\n for j in range(2):\n h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]\n \n h['BACKGR'] = 0.17+0.49, 'Total, e/s SDT Report A-1'\n h['FILTER'] = 'GRS', 'WFIRST grism'\n h['INSTRUME'] = 'WFIRST'\n h['READN'] = 17, 'SDT report Table 3-3' # e/pix/per\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n wcs = pywcs.WCS(h)\n h['EXTVER'] = 1\n \n return h, wcs", "def _build_headers(self):\n headers = {}\n headers.update(self.data_sources)\n headers.update(self.seasons)\n headers.update(self.region)\n headers.update(self.subregions)\n return headers", "def test_wcs_extras():\n data = np.ones([6, 6], dtype=np.float64)\n header = {'CRVAL1': 0,\n 'CRVAL2': 0,\n 'CRPIX1': 5,\n 'CRPIX2': 5,\n 'CDELT1': 10,\n 'CDELT2': 10,\n 'CUNIT1': 'arcsec',\n 'CUNIT2': 'arcsec',\n 'PC1_1': 0,\n 'PC1_2': -1,\n 'PC2_1': 1,\n 'PC2_2': 0,\n 'NAXIS1': 6,\n 'NAXIS2': 6,\n 'CTYPE1': 'HPLN-TAN',\n 'CTYPE2': 'HPLT-TAN',\n 'date-obs': '1970-01-01T00:00:00',\n 'obsrvtry': 'Foo',\n 'detector': 'bar',\n 'wavelnth': 10,\n 'waveunit': 'm',\n 'hglt_obs': 0,\n 'hgln_obs': 0,\n 'dsun_obs': 10,\n 'rsun_ref': 690000000}\n generic_map = sunpy.map.Map((data, header))\n\n wcs = generic_map.wcs\n\n assert wcs.heliographic_observer.lat.value == 0\n assert wcs.heliographic_observer.lon.value == 0\n assert wcs.heliographic_observer.radius.value == 10\n assert wcs.rsun.value == header['rsun_ref']\n\n result = solar_wcs_frame_mapping(wcs)\n\n assert isinstance(result, Helioprojective)\n assert result.observer.lat.value == 0\n assert result.observer.lon.value == 0\n assert result.observer.radius.value == 10\n assert result.rsun.value == header['rsun_ref']", "def compute_output_wcs(wcs_list, pixel_scale=0.1, max_size=10000):\n from shapely.geometry import Polygon\n \n footprint = Polygon(wcs_list[0].calc_footprint())\n for i in range(1, len(wcs_list)):\n fp_i = Polygon(wcs_list[i].calc_footprint())\n footprint = footprint.union(fp_i)\n \n x, y = footprint.convex_hull.boundary.xy\n x, y = np.array(x), np.array(y)\n\n # center\n crval = np.array(footprint.centroid.xy).flatten()\n \n # dimensions in arcsec\n xsize = (x.max()-x.min())*np.cos(crval[1]/180*np.pi)*3600\n ysize = (y.max()-y.min())*3600\n\n xsize = np.minimum(xsize, max_size*pixel_scale)\n ysize = np.minimum(ysize, max_size*pixel_scale)\n \n header, outputwcs = make_wcsheader(ra=crval[0], dec=crval[1], size=(xsize, ysize), pixscale=pixel_scale, get_hdu=False, theta=0)\n \n return header, outputwcs", "def _write_header(self, header):\n # write out telescope and source information\n header[\"latitude\"] = self.telescope_location_lat_lon_alt_degrees[0]\n header[\"longitude\"] = self.telescope_location_lat_lon_alt_degrees[1]\n header[\"altitude\"] = self.telescope_location_lat_lon_alt_degrees[2]\n header[\"telescope_name\"] = np.string_(self.telescope_name)\n header[\"instrument\"] = np.string_(self.instrument)\n header[\"object_name\"] = np.string_(self.object_name)\n\n # write out required UVParameters\n header[\"Nants_data\"] = self.Nants_data\n header[\"Nants_telescope\"] = self.Nants_telescope\n header[\"Nbls\"] = self.Nbls\n header[\"Nblts\"] = self.Nblts\n header[\"Nfreqs\"] = self.Nfreqs\n header[\"Npols\"] = self.Npols\n header[\"Nspws\"] = self.Nspws\n header[\"Ntimes\"] = self.Ntimes\n header[\"antenna_numbers\"] = self.antenna_numbers\n header[\"uvw_array\"] = self.uvw_array\n header[\"vis_units\"] = np.string_(self.vis_units)\n header[\"channel_width\"] = self.channel_width\n header[\"time_array\"] = self.time_array\n header[\"freq_array\"] = self.freq_array\n header[\"integration_time\"] = self.integration_time\n header[\"lst_array\"] = self.lst_array\n header[\"polarization_array\"] = self.polarization_array\n header[\"spw_array\"] = self.spw_array\n header[\"ant_1_array\"] = self.ant_1_array\n header[\"ant_2_array\"] = self.ant_2_array\n header[\"antenna_positions\"] = self.antenna_positions\n\n # handle antenna_names; works for lists or arrays\n header[\"antenna_names\"] = np.asarray(self.antenna_names, dtype=\"bytes\")\n\n # write out phasing information\n header[\"phase_type\"] = np.string_(self.phase_type)\n if self.phase_center_ra is not None:\n header[\"phase_center_ra\"] = self.phase_center_ra\n if self.phase_center_dec is not None:\n header[\"phase_center_dec\"] = self.phase_center_dec\n if self.phase_center_epoch is not None:\n header[\"phase_center_epoch\"] = self.phase_center_epoch\n if self.phase_center_frame is not None:\n header[\"phase_center_frame\"] = np.string_(self.phase_center_frame)\n\n # write out optional parameters\n if self.dut1 is not None:\n header[\"dut1\"] = self.dut1\n if self.earth_omega is not None:\n header[\"earth_omega\"] = self.earth_omega\n if self.gst0 is not None:\n header[\"gst0\"] = self.gst0\n if self.rdate is not None:\n header[\"rdate\"] = np.string_(self.rdate)\n if self.timesys is not None:\n header[\"timesys\"] = np.string_(self.timesys)\n if self.x_orientation is not None:\n header[\"x_orientation\"] = np.string_(self.x_orientation)\n if self.blt_order is not None:\n header[\"blt_order\"] = np.string_(\", \".join(self.blt_order))\n if self.antenna_diameters is not None:\n header[\"antenna_diameters\"] = self.antenna_diameters\n if self.uvplane_reference_time is not None:\n header[\"uvplane_reference_time\"] = self.uvplane_reference_time\n if self.eq_coeffs is not None:\n header[\"eq_coeffs\"] = self.eq_coeffs\n if self.eq_coeffs_convention is not None:\n header[\"eq_coeffs_convention\"] = np.string_(self.eq_coeffs_convention)\n\n # write out extra keywords if it exists and has elements\n if self.extra_keywords:\n extra_keywords = header.create_group(\"extra_keywords\")\n for k in self.extra_keywords.keys():\n if isinstance(self.extra_keywords[k], str):\n extra_keywords[k] = np.string_(self.extra_keywords[k])\n else:\n extra_keywords[k] = self.extra_keywords[k]\n\n # write out history\n header[\"history\"] = np.string_(self.history)\n\n return", "def get_calib_from_header(header):\n\n prefix = 'HIERARCH GAMSE WLCALIB '\n\n xorder = header[prefix+'XORDER']\n yorder = header[prefix+'YORDER']\n\n coeff = np.zeros((yorder+1, xorder+1))\n for j, i in itertools.product(range(yorder+1), range(xorder+1)):\n coeff[j,i] = header[prefix+'COEFF {:d} {:d}'.format(j, i)]\n\n calib = {\n 'coeff': coeff,\n 'npixel': header[prefix+'NPIXEL'],\n 'k': header[prefix+'K'],\n 'offset': header[prefix+'OFFSET'],\n 'std': header[prefix+'STDDEV'],\n 'nuse': header[prefix+'NUSE'],\n 'ntot': header[prefix+'NTOT'],\n# 'identlist': calibwindow.identlist,\n 'window_size': header[prefix+'WINDOW_SIZE'],\n 'xorder': xorder,\n 'yorder': yorder,\n 'maxiter': header[prefix+'MAXITER'],\n 'clipping': header[prefix+'CLIPPING'],\n 'q_threshold': header[prefix+'Q_THRESHOLD'],\n 'direction': header[prefix+'DIRECTION'],\n }\n return calib", "def has_wcs(self):\n if self.header is None:\n return False\n\n required = 'CRPIX,CRVAL,CTYPE'.split(',')\n keywords = np.concatenate(\n [(lambda i: [r+str(i+1) for r in required])(i) \n for i in range(self.header['NAXIS'])])\n\n return all([k in self.header for k in keywords])", "def make_output_wcs(input_models, pscale_ratio=1.0):\n wcslist = [i.meta.wcs for i in input_models]\n for w, i in zip(wcslist, input_models):\n if w.bounding_box is None:\n w.bounding_box = wcs_bbox_from_shape(i.data.shape)\n naxes = wcslist[0].output_frame.naxes\n\n if naxes == 2:\n output_wcs = wcs_from_footprints(input_models, pscale_ratio=pscale_ratio)\n output_wcs.data_size = shape_from_bounding_box(output_wcs.bounding_box)\n else:\n raise RuntimeError(\"Output WCS needs 2 spatial axes. \"\n f\"{wcslist[0]} has {naxes}.\")\n\n # Check that the output data shape has no zero length dimensions\n if not np.product(output_wcs.data_size):\n raise ValueError(\"Invalid output frame shape: \"\n \"{}\".format(output_wcs.data_size))\n\n return output_wcs", "def build_ws_header(work_sheet, max_hits):\n first_header_info = ['Query #', 'Query Sequence',\n 'Top Hit Accession in L.h.', 'E-Value', 'Filename']\n r = 1\n c = 1\n for val in first_header_info:\n c = set_cell(ws, r, c, val)", "def __init__(\n self,\n system,\n class_name,\n header_path_prefix,\n header_extension,\n period_variant=False,\n ):\n self.system = system\n self.class_name = class_name\n self.header_path_prefix = header_path_prefix\n self.header_extension = header_extension\n template = (\n \"<\"\n + str(system.sysd.A.shape[0])\n + \", \"\n + str(system.sysd.B.shape[1])\n + \", \"\n + str(system.sysd.C.shape[0])\n + \">\"\n )\n\n self.period_variant = period_variant\n if period_variant:\n self.class_type = \"PeriodVariant\"\n self.plant_coeffs_header = \"PeriodVariantPlantCoeffs\"\n self.obsv_coeffs_header = \"PeriodVariantKalmanFilterCoeffs\"\n self.loop_header = \"PeriodVariantLoop\"\n else:\n self.class_type = \"StateSpace\"\n self.plant_coeffs_header = \"StateSpacePlantCoeffs\"\n self.obsv_coeffs_header = \"StateSpaceObserverCoeffs\"\n self.loop_header = \"StateSpaceLoop\"\n\n self.ctrl_coeffs_header = \"StateSpaceControllerCoeffs\"\n self.ctrl_coeffs_type = \"frc::\" + self.ctrl_coeffs_header + template\n self.plant_coeffs_type = \"frc::\" + self.plant_coeffs_header + template\n self.obsv_coeffs_type = \"frc::\" + self.obsv_coeffs_header + template\n self.loop_type = \"frc::\" + self.loop_header + template", "def fill_header_section():\n section = _SectionData(\"Header\")\n section.props.append((\"FormatVersion\", 1))\n section.props.append((\"Source\", get_combined_ver_str()))\n section.props.append((\"Type\", \"Configuration\"))\n section.props.append((\"Note\", \"User settings of SCS Blender Tools\"))\n author = bpy.context.user_preferences.system.author\n if author:\n section.props.append((\"Author\", str(author)))\n section.props.append((\"ConfigStoragePlace\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.config_storage_place)))\n section.props.append((\"DumpLevel\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.dump_level)))\n return section", "def to_wcs(self):\n wcs_list = self._indv_mem_wcslist()\n\n n = len(wcs_list)\n if n > 1:\n wcs = output_wcs(wcs_list)\n elif n == 1:\n wcs = wcs_list[0]\n else:\n wcs = None\n\n return wcs", "def coordinates_from_header(header):\n f = None\n if 'NAXIS' in header and header['NAXIS'] == 2:\n f = WCSCoordinates\n elif 'NAXIS' in header and header['NAXIS'] == 3:\n f = WCSCubeCoordinates\n if f:\n try:\n return f(header)\n except AttributeError as e:\n print e\n pass\n return Coordinates()", "def make_sunpy(evtdata, hdr):\n\n\t# Parse Header keywords\n\tfor field in hdr.keys():\n\t\tif field.find('TYPE') != -1:\n\t\t\tif hdr[field] == 'X':\n\t\t\t\tprint(hdr[field][5:8])\n\t\t\t\txval = field[5:8]\n\t\t\tif hdr[field] == 'Y':\n\t\t\t\tprint(hdr[field][5:8])\n\t\t\t\tyval = field[5:8]\n\t\t\n\tmin_x= hdr['TLMIN'+xval]\n\tmin_y= hdr['TLMIN'+yval]\n\tmax_x= hdr['TLMAX'+xval]\n\tmax_y= hdr['TLMAX'+yval]\n\n\tdelx = hdr['TCDLT'+xval]\n\tdely = hdr['TCDLT'+yval]\n\n\tx = evtdata['X'][:]\n\ty = evtdata['Y'][:]\n\tmet = evtdata['TIME'][:]*u.s\n\tmjdref=hdr['MJDREFI']\n\tmid_obs_time = astropy.time.Time(mjdref*u.d+met.mean(), format = 'mjd')\n\n\t# Use the native binning for now\n\n\t# Assume X and Y are the same size\n\tresample = 1.0\n\tscale = delx * resample\n\tbins = (max_x - min_x) / (resample)\n\n\tH, yedges, xedges = np.histogram2d(y, x, bins=bins, range = [[min_y,max_y], [min_x, max_x]])\n\n\n\tdict_header = {\n\t\"DATE-OBS\": mid_obs_time.iso,\n\t\"CDELT1\": scale,\n\t\"NAXIS1\": bins,\n\t\"CRVAL1\": 0.,\n\t\"CRPIX1\": bins*0.5,\n\t\"CUNIT1\": \"arcsec\",\n\t\"CTYPE1\": \"HPLN-TAN\",\n\t\"CDELT2\": scale,\n\t\"NAXIS2\": bins,\n\t\"CRVAL2\": 0.,\n\t\"CRPIX2\": bins*0.5 + 0.5,\n\t\"CUNIT2\": \"arcsec\",\n\t\"CTYPE2\": \"HPLT-TAN\",\n\t\"HGLT_OBS\": 0,\n\t\"HGLN_OBS\": 0,\n\t\"RSUN_OBS\": sun.solar_semidiameter_angular_size(mid_obs_time).value,\n\t\"RSUN_REF\": sun.constants.radius.value,\n\t\"DSUN_OBS\": sun.sunearth_distance(mid_obs_time).value\n\t}\n\t# For some reason the DSUN_OBS crashed the save...\n\n\theader = sunpy.map.MapMeta(dict_header)\n\n\tnustar_map = sunpy.map.Map(H, header)\n\t\n\treturn nustar_map", "def read_additional_info_from_header(wcsprm, hdr, RA_input=None, DEC_input=None, projection_ra=None, projection_dec=None, ignore_header_rot=False, radius = -1., silent=False):\n fov_radius = 4 #arcmin radius to include field of view\n if(radius > 0):\n fov_radius = radius\n INCREASE_FOV_FLAG = False # increase the field to view by 50% to search in catalog if position on sky is inaccurate\n PIXSCALE_UNCLEAR = False\n\n keywords_check = [\"PIXSCALE\", \"NAXIS1\", \"NAXIS2\", \"RA\", \"DEC\"] #list of possible keywords the scs parser might miss\n keywords_present = [] # list of keywords that are actually present\n for i in keywords_check:\n if(i in hdr.keys()):\n keywords_present.append(i)\n\n if(\"NAXIS1\" not in keywords_present or \"NAXIS2\" not in keywords_present ):\n print(\"ERROR: NAXIS1 or NAXIS2 missing in file. Please add!\")\n else:\n axis1 = hdr[\"NAXIS1\"]\n axis2 = hdr[\"NAXIS2\"]\n\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n if((np.abs(wcs_pixscale[0])) < 1e-7 or (np.abs(wcs_pixscale[1])) < 1e-7 or\n (np.abs(wcs_pixscale[0])) > 5e-3 or (np.abs(wcs_pixscale[1])) > 5e-3):\n if(not silent):\n print(\"pixelscale is completely unrealistic. Will guess\")\n print(wcs_pixscale)\n guess = 8.43785734e-05\n #guess = 6.94444461259988e-05\n wcsprm.pc = [[1,0],[0,1]]\n wcsprm.cdelt = [guess, guess]\n if(not silent):\n print(\"Changed pixelscale to {:.3g} deg/arcsec\".format(guess))\n PIXSCALE_UNCLEAR = True\n if(ignore_header_rot):\n wcsprm.pc = [[1,0],[0,1]]\n #wcsprm.cdelt = [8.0006871225376e-05, 8.0006871225376e-05]\n if(\"PIXSCALE\" in keywords_present):\n #normal around 0.450000 / arcsec/pixel, for now i assume arcsec per pixel\n pixscale = hdr[\"PIXSCALE\"]\n if(\"deg\" in hdr.comments['PIXSCALE']): #correction if in deg/pixel\n pixscale = pixscale *60*60\n x_size = axis1 * pixscale /60# arcmin\n y_size = axis2 * pixscale /60# arcmin\n\n if 20 > x_size > 0.5 and 20 > y_size> 0.5 :\n #pixscale is sensical\n #Now: is the pixscale of the current wcs realistic?\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n pixscale = pixscale /60 /60 #pixelscale now in deg / pixel\n if( wcs_pixscale[0]/pixscale < 0.1 or wcs_pixscale[0]/pixscale > 10 or wcs_pixscale[1]/pixscale < 0.1 or wcs_pixscale[1]/pixscale > 10):\n #check if there is a huge difference in the scales\n #if yes then replace the wcs scale with the pixelscale info\n wcsprm.pc = [[1,0],[0,1]]\n\n wcsprm.cdelt = [pixscale, pixscale]\n if(not silent):\n print(\"changed pixelscale to {:.3g} deg/arcsec\".format(pixscale))\n fov_radius = (x_size/2+y_size/2)/np.sqrt(2) #try to get corners\n PIXSCALE_UNCLEAR=True\n\n\n if(np.array_equal(wcsprm.crpix, [0,0])):\n #centrl pixel seems to not be in header, better set in middle\n wcsprm.crpix = [axis1/2, axis2/2]\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n ###sky position not found. Maybe there is some RA and DEC info in the header:\n INCREASE_FOV_FLAG = True\n if (\"RA\" in keywords_present and \"DEC\" in keywords_present): ##carefull degree and hourangle!!!\n wcsprm.crval = [hdr[\"RA\"], hdr[\"DEC\"]]\n if(not silent):\n print(\"Found ra and dec information in the header\")\n print(wcsprm.crval)\n if(not silent):\n print(\"Is this position within the field of view in degrees? otherwise it will not work. In that case give a more accurate position as an argument: -ra XX -dec XX both in degrees\")\n\n if (RA_input is not None): #use user input if provided\n wcsprm.crval = [RA_input, wcsprm.crval[1]]\n wcsprm.crpix = [axis1/2, wcsprm.crpix[1]]\n\n if (DEC_input is not None):\n wcsprm.crval = [wcsprm.crval[0], DEC_input]\n wcsprm.crpix = [wcsprm.crpix[0], axis2/2, ]\n\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n print(\">>>>>>>>>WARNING\")\n print(\"No rough sky position was found for this object. Please add as -ra XX -dex XX both in degress. Adding the position as keywords in the fits file header will also work. The keywords are RA and DEC. The program expects the values in degrees. \")\n\n if(np.array_equal(wcsprm.ctype, [\"\",\"\"])):\n INCREASE_FOV_FLAG = True\n if(projection_ra is not None and projection_dec is not None):\n wcsprm.ctype = [ projection_ra, projection_dec]\n else:\n wcsprm.ctype = [ 'RA---TAN', 'DEC--TAN'] #this is a guess\n print(\">>>>>>>>>WARNING\")\n print(\"The wcs in the header has no projection specified. Will guess 'RA---TAN', 'DEC--TAN' (gnomonic projection) if this is incorrect the fit will fail. You can specify the projection via -projection_ra XX -projection_dec XX\")\n print(\"make sure you do not use quotations, example: -proj1 RA---TAN -proj2 DEC--TAN\")\n if(INCREASE_FOV_FLAG):\n fov_radius = fov_radius*2.5\n return wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR", "def make_header(args):\n header = os.path.join(args.output_dir,'header.sam')\n args.header = header\n header_handle = open(header,'w')\n header_handle.write('@HD\\tVN:1.4\\n')\n joined_sam = open(os.path.join(args.output_dir, 'watson_joinedAligned.out.sam'))\n merged_sam = open(os.path.join(args.output_dir, 'watson_mergedAligned.out.sam'))\n for line in joined_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n else:\n break\n for line in merged_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n elif not line.startswith('@HD'):\n header_handle.write(line)\n else:\n break\n header_handle.close()\n in_files = {'header':os.path.join(args.output_dir,'header.sam')}\n addRG(in_files, args)\n return args", "def setupCentralFeature():\n\n inputFC = ARCPY.GetParameterAsText(0)\n outputFC = ARCPY.GetParameterAsText(1)\n distanceMethod = ARCPY.GetParameterAsText(2).upper().replace(\" \", \"_\")\n weightField = UTILS.getTextParameter(3, fieldName = True) \n potentialField = UTILS.getTextParameter(4, fieldName = True)\n caseField = UTILS.getTextParameter(5, fieldName = True)\n\n distanceMethod = distanceMethod.split(\"_\")[0]\n fieldList = []\n if weightField:\n fieldList.append(weightField)\n\n if potentialField:\n fieldList.append(potentialField)\n\n if caseField:\n fieldList.append(caseField)\n\n #### Create a Spatial Stats Data Object (SSDO) ####\n ssdo = SSDO.SSDataObject(inputFC, templateFC = outputFC,\n useChordal = False)\n\n #### Populate SSDO with Data ####\n ssdo.obtainData(ssdo.oidName, fieldList, minNumObs = 1, dateStr = True) \n\n #### Run Analysis ####\n cf = CentralFeature(ssdo, distanceMethod = distanceMethod,\n weightField = weightField, \n potentialField = potentialField,\n caseField = caseField)\n\n #### Create Output ####\n cf.createOutput(outputFC)", "def create_hdul(wcskeys={\n 'wcsaxes': 2,\n 'ra_ref': 22.02351763251896,\n 'dec_ref': 11.99875540218638,\n 'v2_ref': 86.039011,\n 'v3_ref': -493.385704,\n 'roll_ref': 0.005076934167039675},\n data_shape=(2048, 2048)):\n hdul = fits.HDUList()\n phdu = fits.PrimaryHDU()\n phdu.header['DATAMODL'] = 'ImageModel'\n phdu.header['TELESCOP'] = \"JWST\"\n phdu.header['FILENAME'] = \"test+F444W\"\n phdu.header['INSTRUME'] = 'NIRCAM'\n phdu.header['CHANNEL'] = 'LONG'\n phdu.header['DETECTOR'] = 'NRCALONG'\n phdu.header['FILTER'] = 'F444W'\n phdu.header['PUPIL'] = 'CLEAR'\n phdu.header['MODULE'] = 'A'\n phdu.header['TIME-OBS'] = '16:58:27.258'\n phdu.header['DATE-OBS'] = '2021-10-25'\n phdu.header['EXP_TYPE'] = 'NRC_IMAGE'\n scihdu = fits.ImageHDU()\n scihdu.header['EXTNAME'] = \"SCI\"\n scihdu.header['SUBARRAY'] = 'FULL'\n\n scihdu.header.update(wcskeys)\n\n scihdu.data = zeros(data_shape)\n\n hdul.append(phdu)\n hdul.append(scihdu)\n\n return hdul", "def test_wcs_fit():\n import astropy.units as u\n rng = np.random.default_rng(57721)\n camera = imsim.get_camera()\n\n for _ in range(30):\n # Random spherepoint for boresight\n z = rng.uniform(-1, 1)\n th = rng.uniform(0, 2*np.pi)\n x = np.sqrt(1-z**2) * np.cos(th)\n y = np.sqrt(1-z**2) * np.sin(th)\n boresight = galsim.CelestialCoord(\n np.arctan2(y, x) * galsim.radians,\n np.arcsin(z) * galsim.radians\n )\n\n # Random obstime. No attempt to make sky dark.\n obstime = Time(\"J2020\") + rng.uniform(0, 1)*u.year\n\n # Rotator\n rotTelPos = rng.uniform(-np.pi/2, np.pi/2)\n\n # Ambient conditions\n temperature = rng.uniform(270, 300)\n pressure = rng.uniform(66, 72)\n H2O_pressure = rng.uniform(0.1, 10)\n\n wavelength = 620. # nm\n telescope = imsim.load_telescope(\n \"LSST_r.yaml\", rotTelPos=rotTelPos*galsim.radians\n )\n\n factory = imsim.BatoidWCSFactory(\n boresight, obstime, telescope, wavelength,\n camera, temperature, pressure, H2O_pressure\n )\n\n aob, zob, hob, dob, rob, eo = factory._ICRF_to_observed(\n boresight.ra.rad, boresight.dec.rad, all=True\n )\n\n # If zenith angle > 70 degrees, try again\n if np.rad2deg(zob) > 70:\n continue\n\n # Pick a few detectors randomly\n for idet in rng.choice(len(camera), 3):\n det = camera[idet]\n wcs = factory.getWCS(det, order=3)\n\n # center of detector:\n xc, yc = det.getCenter(cameraGeom.PIXELS)\n x = xc + rng.uniform(-2000, 2000, 100)\n y = yc + rng.uniform(-2000, 2000, 100)\n rc, dc = wcs.xyToradec(x, y, units='radians')\n rc1, dc1 = factory.pixel_to_ICRF(x, y, det)\n\n dist = sphere_dist(rc, dc, rc1, dc1)\n np.testing.assert_allclose( # sphere dist < 1e-5 arcsec\n 0,\n np.rad2deg(np.max(np.abs(dist)))*3600,\n rtol=0,\n atol=1e-5\n )\n print(\n \"ICRF dist (arcsec) \",\n np.rad2deg(np.mean(dist))*3600,\n np.rad2deg(np.max(np.abs(dist)))*3600,\n np.rad2deg(np.std(dist))*3600\n )\n x, y = wcs.radecToxy(rc, dc, units='radians')\n x1, y1 = factory.ICRF_to_pixel(rc, dc, det)\n np.testing.assert_allclose( # pix dist < 2e-3\n 0,\n np.max(np.abs(x-x1)),\n rtol=0,\n atol=2e-3\n )\n np.testing.assert_allclose(\n 0,\n np.max(np.abs(y-y1)),\n rtol=0,\n atol=2e-3\n )\n print(\n \"x-x1 (pixel) \",\n np.mean(x-x1),\n np.max(np.abs(x-x1)),\n np.std(x-x1)\n )\n print(\n \"y-y1 (pixel) \",\n np.mean(y-y1),\n np.max(np.abs(y-y1)),\n np.std(y-y1)\n )\n print(\"\\n\")\n\n if __name__ != '__main__':\n # In regular unit testing, just do one of these.\n break", "def getCoverage(\n self,\n identifier=None,\n bbox=None,\n time=None,\n format=None,\n crs=None,\n width=None,\n height=None,\n resx=None,\n resy=None,\n resz=None,\n parameter=None,\n method=\"Get\",\n timeout=30,\n **kwargs\n ):\n from owslib.util import makeString\n from urllib.parse import urlencode\n from owslib.util import openURL\n\n if logger.isEnabledFor(logging.DEBUG):\n msg = \"WCS 1.0.0 DEBUG: Parameters passed to GetCoverage: identifier={}, bbox={}, time={}, format={}, crs={}, width={}, height={}, resx={}, resy={}, resz={}, parameter={}, method={}, other_arguments={}\" # noqa\n logger.debug(\n msg.format(\n identifier, bbox, time, format, crs, width, height, resx, resy, resz, parameter, method, str(kwargs)\n )\n )\n\n base_url = self.source\n\n logger.debug(\"WCS 1.0.0 DEBUG: base url of server: %s\" % base_url)\n\n # process kwargs\n request = {\"version\": self.version, \"request\": \"GetCoverage\", \"service\": \"WCS\"}\n assert len(identifier) > 0\n request[\"Coverage\"] = identifier\n # request['identifier'] = ','.join(identifier)\n if bbox:\n request[\"BBox\"] = \",\".join([makeString(x) for x in bbox])\n else:\n request[\"BBox\"] = None\n if time:\n request[\"time\"] = \",\".join(time)\n if crs:\n request[\"crs\"] = crs\n request[\"format\"] = format\n if width:\n request[\"width\"] = width\n if height:\n request[\"height\"] = height\n if resx:\n request[\"resx\"] = resx\n if resy:\n request[\"resy\"] = resy\n if resz:\n request[\"resz\"] = resz\n\n # anything else e.g. vendor specific parameters must go through kwargs\n if kwargs:\n for kw in kwargs:\n request[kw] = kwargs[kw]\n\n # encode and request\n data = urlencode(request)\n logger.debug(\"WCS 1.0.0 DEBUG: Second part of URL: %s\" % data)\n\n u = openURL(base_url, data, method, self.cookies, auth=self.auth, timeout=timeout, headers=self.headers)\n return u", "def __init__(self, por): \n logger.debug(\"Entering in ocentricWKT constructor\") \n super(OcentricWKT, self).__init__(\n por.getElement(OcentricMetadata.GEO_GCS_NAME),\n por.getElement(OcentricMetadata.DATUM_NAME),\n por.getElement(OcentricMetadata.ELLIPSOIDE_NAME),\n por.getElement(OcentricMetadata.RADIUS),\n por.getElement(OcentricMetadata.INVERSE_FLATTENING),\n por.getElement(OcentricMetadata.AUTHORITY_NAME),\n por.getElement(OcentricMetadata.AUTHORITY_CODE)\n ) \n logger.debug(\"Exiting from ocentricWKT constructor\")", "def _clean(header):\n # TODO: find a way to identify cubes containing time\n header['ctype1'] = 'HPLN-TAN' # Helioprojective longitude, TAN projection\n header['ctype2'] = 'HPLT-TAN' # Helioprojective latitude, TAN projection\n header['ctype3'] = 'WAVE ' # Wavelength axis, default (TAB) projection\n header['naxis'] = 3\n return header", "def wlsoln_coeff_from_header (pyfits_header, apply_WCS_rv=False, preferred=None, output='sel'):\n # coefficient choices\n cc = {}\n #========================================================================#\n # linear dispersion\n cc['linear'] = coeff_basic_linear(pyfits_header)\n\n #========================================================================#\n # using keywords ctype, crval, crpix, cdelt\n cc['ctype1'] = coeff_from_ctype1(pyfits_header)\n\n #========================================================================#\n # linear dispersion using keywords linintrp, crvl1_?, cdlt1_?\n # from IRAF, order by order !! do I need to look up what the 1_ means?\n # some of these are doubled by WAT0_001 stuff\n cc['crvl'] = coeff_from_crvl(pyfits_header)\n # if preferred_disp == 'any' or preferred_disp == 'linear' or preferred_disp == 'crvl' or preferred_disp == 'makee linear':\n \n #========================================================================#\n # IRAF WCS keywords WAT?_001 \n #if preferred_disp == 'any' or preferred_disp == 'IRAF_WCS':\n cc['wcs'] = coeff_from_wcs(pyfits_header,apply_WCS_rv)\n\n #========================================================================#\n # linear dispersion for keywords w0 and wpc\n cc['w0'] = coeff_from_w0(pyfits_header)\n #if preferred_disp == 'any' or preferred_disp == 'linear' or preferred_disp == 'w0':\n\n #========================================================================#\n # MAKEE type dispersion using keywords co_0_? and co_4_?\n # I'm not sure what type of coefficients these are !!\n #cc['co_0'] = coeff_from_makee_c0(pyfits_header)\n# if preferred_disp == 'any' or preferred_disp == 'makee' or preferred_disp == 'co_0':\n\n #========================================================================#\n # MAKEE coeffificients using keywords wv_0_? and wv_4_?\n cc['wv_0'] = coeff_from_makee_wv(pyfits_header)\n #if preferred_disp == 'any' or preferred_disp == 'makee' or preferred_disp == 'wv_0':\n\n #========================================================================#\n # spectre type dispersion\n cc['spectre'] = coeff_from_SPECTRE(pyfits_header)\n #if preferred_disp == 'any' or preferred_disp == 'spectre':\n\n #========================================================================#\n #========================================================================#\n \n if output == 'all': return cc\n return resolve_wlsoln_coeffs(cc,preferred)", "def header(self):\n header = _HEADER_DEFAULT.copy()\n bounds = self.bounds\n header.update({'point_return_count': [len(self), 0, 0, 0, 0],\n 'x_offset': round(bounds.minx),\n 'y_offset': round(bounds.miny),\n 'z_offset': round(bounds.minz),\n 'x_min': bounds.minx,\n 'y_min': bounds.miny,\n 'z_min': bounds.minz,\n 'x_max': bounds.maxx,\n 'y_max': bounds.maxy,\n 'z_max': bounds.maxz})\n\n return laspy.header.Header(**header)", "def getWCSHeader(wcsHDU=None):\n if wcsHDU is None:\n return None\n\n wcsHeader = wcsHDU[0].header\n return wcsHeader", "def sanitize(self):\n # Early versions of CASU catalogues chave multiple columns 'Blank'\n # Numpy will throw an exception if multiple columns have the same\n # name, so we need to rename these columns.\n n_columns = len(self.fits[self.ccd].columns)\n for col in range(26, n_columns, 1):\n name = self.fits[self.ccd].columns[col].name\n if name == 'Blank':\n self.fits[self.ccd].columns[col].name = 'Blank%d' % col\n\n # The headers contain a combination of old- and modern-\n # style WCS parameters for the ZPN projection coefficients, which\n # confuses libwcs. Moreover, in a few cases the keyword values\n # are plainly wrong. Hence we remove the keywords.\n for kw in ['PV1_0', 'PV1_1', 'PV1_2', 'PV1_3',\n 'PV2_0', 'PV2_1', 'PV2_2', 'PV2_3',\n 'PV3_0', 'PV3_1', 'PV3_3', 'PV3_3',\n 'PROJP1', 'PROJP3', 'WAT1_001', 'WAT2_001',\n 'RADECSYS']:\n del self.fits[self.ccd].header[kw]\n\n # ..and enforce the parameters wich have been used by the pipeline\n self.fits[self.ccd].header['EQUINOX'] = 2000.0\n self.fits[self.ccd].header['PV2_1'] = 1.0\n self.fits[self.ccd].header['PV2_3'] = 220.0\n self.fits[self.ccd].header['CUNIT1'] = 'deg'\n self.fits[self.ccd].header['CUNIT2'] = 'deg'\n self.fits[self.ccd].header['RADESYSa'] = 'ICRS'", "def write_wcs_to_hdr(original_filename, wcsprm, report, hdul_idx=0):\n with fits.open(original_filename) as hdul:\n\n hdu = hdul[hdul_idx]\n hdr_file = hdu.header\n\n #new_header_info = wcs.to_header()\n\n wcs =WCS(wcsprm.to_header())\n\n #I will through out CD which contains the scaling and separate into pc and Cdelt\n for old_parameter in ['CD1_1', 'CD1_2', 'CD2_1', 'CD2_2', \"PC1_1\", \"PC1_2\", \"PC2_1\", \"PC2_2\"]:\n if (old_parameter in hdr_file):\n del hdr_file[old_parameter]\n\n hdr_file.update(wcs.to_header())\n repr(hdr_file)\n\n #adding report\n hdr_file['AST_SCR'] = (\"Astrometry\", 'Astrometry by Lukas Wenzl')\n hdr_file['AST_CAT'] = (report[\"catalog\"], 'Catalog used')\n hdr_file['AST_MAT'] = (report[\"matches\"], 'Number of catalog matches')\n hdr_file['AST_RAD'] = (report[\"match_radius\"], 'match radius in pixel')\n hdr_file['AST_CONV'] = (report[\"converged\"], \"T or F for astroemtry converged or not\")\n\n\n\n hdu.header = hdr_file\n hdul[hdul_idx] = hdu\n #removing fits ending\n name_parts = original_filename.rsplit('.', 1)\n hdul.writeto(name_parts[0]+'_astro.fits', overwrite=True)\n print(\"file written.\")", "def insert_wcs_service(self,\n base=\"/thredds/wcs/\"):\n sv = self.new_element(\"service\",\n name=\"wcs\",\n serviceType=\"WCS\",\n base=base)\n self.root.insert(0, sv)", "def create_cloud(header, fields, points):\n\t\n\tcloud_struct = struct.Struct(_get_struct_fmt(False, fields))\n\t\n\tbuff = ctypes.create_string_buffer(cloud_struct.size * len(points))\n\t\n\tpoint_step, pack_into = cloud_struct.size, cloud_struct.pack_into\n\toffset = 0\n\t\n\tfor p in points:\n\t\tpack_into(buff, offset, *p)\n\t\toffset += point_step\n\treturn PointCloud2(header=header,\n\t\t\t\t\t\theight=1,\n\t\t\t\t\t\twidth=len(points),\n\t\t\t\t\t\tis_dense=False,\n\t\t\t\t\t\tis_bigendian=False,\n\t\t\t\t\t\tfields=fields,\n\t\t\t\t\t\tpoint_step=cloud_struct.size,\n\t\t\t\t\t\trow_step=cloud_struct.size * len(points),\n\t\t\t\t\t\tdata=buff.raw)", "def build_parameters(pobj):\n ViscosityWilke.build_parameters(pobj)", "def wfc3ir_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n flt='ibhj34h6q_flt.fits', filter='G141'):\n import numpy as np\n \n import astropy.io.fits as pyfits\n import astropy.wcs as pywcs\n \n im = pyfits.open(flt)\n wcs = pywcs.WCS(im[1].header, relax=True)\n \n thet0 = np.arctan2(im[1].header['CD2_2'], im[1].header['CD2_1'])/np.pi*180\n\n wcs.wcs.crval = np.array([ra, dec])\n \n ### Rotate the CD matrix\n theta = im[1].header['PA_APER'] - pa_aper \n cd_rot = rotate_CD_matrix(wcs.wcs.cd, theta)\n wcs.wcs.cd = cd_rot\n \n h = wcs.to_header(relax=True)\n \n for i in [1,2]:\n for j in [1,2]:\n h['CD%d_%d' %(i,j)] = h['PC%d_%d' %(i,j)]\n h.remove('PC%d_%d' %(i,j))\n \n h['BACKGR'] = 1.\n h['FILTER'] = filter\n h['INSTRUME'] = 'WFC3'\n h['READN'] = im[0].header['READNSEA']\n h['NAXIS1'] = h['NAXIS2'] = 1014\n h['DETECTOR'] = 'IR'\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n return h, wcs", "def _create_hdr_obj(self, pix_len, pix_scale):\n hdr = astropy.io.fits.Header()\n hdr['NAXIS'] = 2\n hdr['NAXIS1'] = pix_len\n hdr['NAXIS2'] = pix_len\n hdr['CTYPE1'] = 'RA---TAN'\n hdr['CRVAL1'] = float(self.ra_ctr)\n hdr['CRPIX1'] = (pix_len / 2.) * 1.\n hdr['CDELT1'] = -1.0 * pix_scale\n hdr['CTYPE2'] = 'DEC--TAN'\n hdr['CRVAL2'] = float(self.dec_ctr)\n hdr['CRPIX2'] = (pix_len / 2.) * 1.\n hdr['CDELT2'] = pix_scale\n hdr['EQUINOX'] = 2000\n return hdr", "def __init__(\n self,\n w,\n x,\n y,\n z):\n self.w = w\n self.x = x\n self.y = y\n self.z = z", "def _constructor(self):\r\n return SpatialDataFrame", "def niriss_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n filter='F150W', grism='GR150R'): \n naxis = 2048, 2048\n crpix = 1024, 1024\n \n cd = np.array([[ -0.0658, 0], [0, 0.0654]])/3600.\n cd_rot = rotate_CD_matrix(cd, pa_aper)\n \n h = pyfits.Header()\n \n h['CRVAL1'] = ra\n h['CRVAL2'] = dec\n \n h['WCSAXES'] = 2\n h['CTYPE1'] = 'RA---TAN'\n h['CTYPE2'] = 'DEC--TAN'\n \n for i in range(2):\n h['NAXIS%d' %(i+1)] = naxis[i]\n h['CRPIX%d' %(i+1)] = crpix[i]\n h['CDELT%d' %(i+1)] = 1.0\n for j in range(2):\n h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]\n \n ### Backgrounds\n # http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf\n bg = {'F090W':0.50, 'F115W':0.47, 'F140M':0.23, 'F150W':0.48, 'F158M':0.25, 'F200W':0.44}\n \n h['BACKGR'] = bg[filter], 'Total, e/s'\n h['FILTER'] = filter\n h['INSTRUME'] = 'NIRISS'\n h['READN'] = 6 , 'Rough, per pixel per 1 ks exposure' # e/pix/per\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n if grism == 'GR150R':\n h['GRISM'] = 'GR150R', 'Spectral trace along X'\n else:\n h['GRISM'] = 'GR150C', 'Spectral trace along Y'\n \n wcs = pywcs.WCS(h)\n h['EXTVER'] = 1\n \n return h, wcs", "def _readheader(lines):\n hdrdict = {}\n #input list of 26 lines of header\n #station and channel\n line = lines[5]\n parts = line.strip().split()\n fname = parts[1]\n fparts = fname.split('_')\n hdrdict['station'] = fparts[-2]+'_'+fparts[-1]\n\n #the \"Component\" lines look like either: Component S00W, Component S90E, Component Up\n compstr = lines[12].strip().split()[1]\n hdrdict['channel'] = get_comp_name(compstr)\n\n #instrument\n hdrdict['instrument'] = lines[3].split()[1].strip()\n \n #location string\n line = lines[6]\n hdrdict['location'] = line.strip()\n #event origin, buffer start year/month\n line = lines[16]\n parts = line.strip().split()\n bufyear = int(parts[8])\n bufmonth = int(parts[9])\n #epicentral location, buffer start day/hour\n line = lines[17]\n parts = line.strip().split()\n bufday = int(parts[8])\n bufhour = int(parts[9])\n #numpoints, buffer start min/sec\n line = lines[19]\n parts = line.strip().split()\n hdrdict['npts'] = int(parts[0])\n bufmin = int(parts[8])\n millisec = int(parts[9])\n bufsec = int(millisec/1000)\n bufmicrosec = int(np.round(millisec/1000.0 - bufsec))\n hdrdict['starttime'] = UTCDateTime(datetime(bufyear,bufmonth,bufday,bufhour,bufmin,bufsec,bufmicrosec))\n #part C\n #frequency, calibration value and some other stuff we don't care about\n line = lines[20]\n parts = line.strip().split()\n hdrdict['sampling_rate'] = float(parts[0])\n hdrdict['delta'] = 1.0/hdrdict['sampling_rate']\n hdrdict['calib'] = float(parts[7])\n #site location info, this time in dd\n line = lines[21]\n parts = line.strip().split()\n hdrdict['lat'] = float(parts[0]) * -1\n hdrdict['lon'] = float(parts[1])\n hdrdict['height'] = 0.0\n #duration\n line = lines[22]\n parts = line.strip().split()\n hdrdict['duration'] = float(parts[0])\n hdrdict['endtime'] = hdrdict['starttime'] + hdrdict['duration']\n #max acceleration - good for sanity check\n line = lines[23]\n parts = line.strip().split()\n hdrdict['maxacc'] = float(parts[0])\n hdrdict['network'] = 'NZ'\n hdrdict['units'] = 'acc'\n return hdrdict", "def get_wcs(self, sample_times=None, derotate=True, wcs_timestep=None, cube_type=None, bins=None):\n\n if not derotate and sample_times is None:\n sample_times = np.array([self.start_time])\n elif sample_times is None:\n sample_times = np.arange(self.start_time, self.stop_time, wcs_timestep)\n\n start_time = sample_times[0]\n ref_pixels = []\n try:\n for t in sample_times:\n md = self.metadata(t)\n # we want to inherit defaults for ref values\n head = mkidcore.metadata.build_header(md, unknown_keys='ignore')\n ref_pixels.append(compute_wcs_ref_pixel((md['E_CONEXX'], md['E_CONEXY']),\n reference_pixel=(head['E_PREFX'], head['E_PREFY']),\n reference=(head['E_CXREFX'], head['E_CXREFY']),\n conex_deltas=(md['E_DPDCX'], md['E_DPDCY'])))\n except KeyError:\n getLogger(__name__).warning('Insufficient data to build a WCS solution, conex info missing')\n return None\n\n cubeaxis = {}\n if cube_type in ('wave', 'time'):\n cubeaxis = {'CTYPE3': 'TIME' if cube_type == 'time' else 'WAVE',\n 'CUNIT3': 's' if cube_type == 'time' else 'nm',\n 'CDELT3': bins[1] - bins[0], 'CRPIX3': 1, 'CRVAL3': bins[0], 'NAXIS3': bins.size}\n\n # we want to inherit defaults for ref values\n header = mkidcore.metadata.build_header(self.metadata(start_time), unknown_keys='warn')\n wcs_solns = mkidcore.metadata.build_wcs(header, astropy.time.Time(val=sample_times, format='unix'), ref_pixels,\n self.beamImage.shape, subtract_parallactic=derotate, cubeaxis=cubeaxis)\n return wcs_solns", "def _build_header_dictionary(self):\n start = 0\n #print self.raw_data\n for a in range(20):\n redatapuller = re.compile(\"\\r\\n\\r\\n\\r\\n(?P<word>.*?)\\t.*?\\n\", re.DOTALL)\n m = redatapuller.search(self.raw_data[start:])\n if not(m):\n break\n self.header_dictionary[m.group(\"word\")] = start + m.end()\n if a==0:\n self.header_dictionary[\"main\"] = start + m.end()\n start += m.end()", "def getWKT(self):\n logger.debug(\"Entering in ocentricWKT.getWkt\")\n\n # building WKT string\n wkt = OcentricWKT.GEODCRS % (\n self.getGeoGcsName(), self.getDatumName(), self.getSpheroidName(), self.getRadius(), self.getInverseFlattening(),\n self.getRadius(), self.getAuthorityName(), self.getAuthorityCode()\n )\n\n logger.debug(\"Exiting from ocentricWKT.getWkt\")\n return wkt", "def _make_header(self, scan_data):\n \n # copy header data\n header_data = scan_data.copy()\n \n # set precursor target\n if header_data['precursor_target_mz'] is None:\n header_data['precursor_target_mz'] = header_data['precursor_mz']\n \n # set precursor offsets\n if header_data['precursor_width'] is not None:\n header_data['precursor_lower_offset'] = 0.5 * header_data['precursor_width']\n header_data['precursor_upper_offset'] = 0.5 * header_data['precursor_width']\n \n # set precursor window\n if header_data['precursor_target_mz'] is not None:\n header_data['precursor_low_mz'] = header_data['precursor_target_mz'] - header_data['precursor_lower_offset']\n header_data['precursor_high_mz'] = header_data['precursor_target_mz'] + header_data['precursor_upper_offset']\n \n # remove some items from raw data\n del header_data['mz_data']\n del header_data['mz_precision']\n del header_data['mz_compression']\n del header_data['int_data']\n del header_data['int_precision']\n del header_data['int_compression']\n del header_data['precursor_target_mz']\n del header_data['precursor_lower_offset']\n del header_data['precursor_upper_offset']\n del header_data['precursor_width']\n \n # create header\n return ScanHeader(header_data)", "def coadd(hdr_list, data_list, var_list, exp_list,\n method='mean', weighted=True, robust=True, sigma=8.0,\n maxiters=5, spectral=False, cube=False, wcskey=' ',\n rotate=True, fit_order=2, window=7.0, smoothing=2.0,\n adaptive_algorithm=None, edge_threshold=0.7,\n reference='first'):\n\n # cube is only supported for spectral data\n if cube:\n spectral = True\n\n # reference all data to the first file\n out_header = hdr_list[0].copy()\n\n # set reference angle to zero if it isn't already\n key = wcskey.strip().upper()\n if rotate:\n for wkey in [f'CROTA2{key}',\n f'PC1_1{key}', f'PC1_2{key}',\n f'PC2_1{key}',\n f'PC2_2{key}', f'PC2_3{key}',\n f'PC3_2{key}', f'PC3_3{key}']:\n if wkey in out_header:\n if wkey == f'CROTA2{key}':\n out_header[wkey] = 0.0\n else:\n del out_header[wkey]\n\n # swap RA to east-left if needed\n ra = f'CDELT1{key}'\n if not cube and ra in out_header and out_header[ra] > 0:\n out_header[ra] *= -1\n\n # turn down logging to avoid FITS warning for 3D coord sys\n olevel = log.level\n log.setLevel('ERROR')\n if not spectral:\n outwcs = WCS(out_header, key=wcskey, naxis=2)\n else:\n outwcs = WCS(out_header, key=wcskey)\n log.setLevel(olevel)\n\n wcs_dim = outwcs.wcs.naxis\n if cube and wcs_dim < 3:\n msg = 'WCS is not 3D. Cannot make cube.'\n log.error(msg)\n raise ValueError(msg)\n\n if cube:\n # expectation is that 3D coord was in a secondary WCS --\n # we don't handle it if not\n if key == '':\n log.error('Unexpected input WCS condition. '\n 'Cannot fix output header.')\n raise ValueError\n\n method = 'resample'\n if 'SLTW_PIX' not in out_header:\n log.warning('Slit width not in header; output flux '\n 'may not be conserved.')\n float_slitw = out_header.get('SLTW_PIX', 1.0)\n slit_width = int(np.round(float_slitw))\n else:\n float_slitw = 1.0\n slit_width = 1\n\n # if referencing to a target RA/Dec (e.g. for nonsidereal targets),\n # get the target position in reference x, y coordinates\n tgt_x, tgt_y = None, None\n if reference == 'target':\n tgt_x, tgt_y = _target_xy(out_header, outwcs)\n if None in (tgt_x, tgt_y):\n msg = 'Missing TGTRA or TGTDEC; cannot reference to target.'\n log.warning(msg)\n\n out_coord_x = []\n out_coord_y = []\n out_coord_w = []\n flxvals = []\n errvals = []\n expvals = []\n corners = []\n for (hdr, flux, var, exp) in zip(hdr_list, data_list, var_list, exp_list):\n # input wcs\n if not spectral:\n inwcs = WCS(hdr, key=wcskey, naxis=2)\n else:\n inwcs = WCS(hdr, key=wcskey)\n\n # assemble flux, error, and exposure map values\n ny, nx = flux.shape\n err = np.sqrt(var)\n good = ~np.isnan(flux) & ~np.isnan(err)\n if not np.any(good):\n log.warning(f\"No good data in \"\n f\"{hdr.get('FILENAME', 'UNKNOWN')}; skipping.\")\n continue\n if method == 'resample':\n flxvals.append(flux[good])\n errvals.append(err[good])\n else:\n flxvals.append(flux)\n errvals.append(err)\n if cube:\n # exposure value is at one wavelength only, with\n # slit width size, plus two zero columns for padding\n expval = exp[:, 0:slit_width + 2]\n expval[:, 0] = 0\n expval[:, -1] = 0\n expvals.append(expval)\n else:\n expvals.append(exp)\n\n # index values for resampling\n yin, xin = np.meshgrid(np.arange(ny), np.arange(nx), indexing='ij')\n yin = yin[good]\n xin = xin[good]\n xamin, xamax = np.argmin(xin), np.argmax(xin)\n yamin, yamax = np.argmin(yin), np.argmax(yin)\n\n # corner values for interpolation\n if cube:\n in_corner = [[xin[xamin], xin[xamin],\n xin[xamax], xin[xamax]],\n [yin[yamin], yin[yamax],\n yin[yamin], yin[yamax]],\n [-slit_width / 2 + 0.5, -slit_width / 2 + 0.5,\n slit_width / 2 - 0.5, slit_width / 2 - 0.5]]\n else:\n in_corner = [[xin[xamin], xin[xamin],\n xin[xamax], xin[xamax]],\n [yin[yamin], yin[yamax],\n yin[yamin], yin[yamax]]]\n\n # transform all coords to reference WCS\n if wcs_dim == 2:\n wxy = inwcs.wcs_pix2world(xin, yin, 0)\n oxy = outwcs.wcs_world2pix(*wxy, 0)\n cxy = inwcs.wcs_pix2world(*in_corner, 0)\n out_corner = outwcs.wcs_world2pix(*cxy, 0)\n else:\n wxy = inwcs.wcs_pix2world(xin, yin, 0, 0)\n oxy = outwcs.wcs_world2pix(*wxy, 0)\n if cube:\n cxy = inwcs.wcs_pix2world(*in_corner, 0)\n out_corner = outwcs.wcs_world2pix(*cxy, 0)\n # ra, dec corners\n in_corner = [in_corner[2], in_corner[1]]\n # correct for slit width offset in not-yet\n # existant 3rd dimension\n out_corner = np.array([out_corner[2] - slit_width / 2,\n out_corner[1]])\n else:\n cxy = inwcs.wcs_pix2world(*in_corner, 0, 0)\n out_corner = outwcs.wcs_world2pix(*cxy, 0)[0:2]\n\n # correct all coordinates for target movement\n x_off, y_off = 0., 0.\n if None not in [tgt_x, tgt_y]:\n upd_x, upd_y = _target_xy(hdr, outwcs)\n if None in [upd_x, upd_y]:\n log.warning(f\"Missing target RA/Dec in file \"\n f\"{hdr.get('FILENAME', 'UNKNOWN')}.\")\n else:\n x_off = tgt_x - upd_x\n y_off = tgt_y - upd_y\n\n if cube and wcs_dim == 3:\n # assuming crval1=wavelength, crval2=dec, crval3=ra\n out_coord_w.append(oxy[0])\n out_coord_y.append(oxy[1] + y_off)\n out_coord_x.append(oxy[2] + x_off)\n else:\n out_coord_x.append(oxy[0] + x_off)\n out_coord_y.append(oxy[1] + y_off)\n\n out_corner[0] += x_off\n out_corner[1] += y_off\n corners.append((in_corner, out_corner))\n\n # output grid shape\n stk_coord_x = np.hstack(out_coord_x)\n minx, maxx = np.min(stk_coord_x), np.max(stk_coord_x)\n stk_coord_y = np.hstack(out_coord_y)\n miny, maxy = np.min(stk_coord_y), np.max(stk_coord_y)\n\n # shift coordinates to new grid\n stk_coord_x -= minx\n stk_coord_y -= miny\n\n # stack coordinates for output grid\n if cube:\n stk_coord_w = np.hstack(out_coord_w)\n minw, maxw = np.min(stk_coord_w), np.max(stk_coord_w)\n out_shape = (int(np.ceil(maxw) - np.floor(minw) + 1),\n int(np.ceil(maxy) - np.floor(miny) + 1),\n int(np.ceil(maxx) - np.floor(minx)) + 1)\n stk_coord_w -= minw\n coordinates = stack(stk_coord_x, stk_coord_y, stk_coord_w)\n\n xout = np.arange(out_shape[2], dtype=np.float64)\n yout = np.arange(out_shape[1], dtype=np.float64)\n wout = np.arange(out_shape[0], dtype=np.float64)\n grid = xout, yout, wout\n\n # fix header reference pixel for new min value in w and x\n out_header['CRPIX1' + key] -= minw\n out_header['CRPIX2' + key] -= miny\n out_header['CRPIX3' + key] -= minx\n else:\n out_shape = (int(np.ceil(maxy) - np.floor(miny) + 1),\n int(np.ceil(maxx) - np.floor(minx)) + 1)\n\n coordinates = stack(stk_coord_x, stk_coord_y)\n\n xout = np.arange(out_shape[1], dtype=np.float64)\n yout = np.arange(out_shape[0], dtype=np.float64)\n grid = xout, yout\n\n # fix header reference pixel\n out_header['CRPIX1' + key] -= minx\n out_header['CRPIX2' + key] -= miny\n\n # also fix primary coordinates for 2D spectrum\n if key != '' and wcs_dim > 2:\n out_header['CRPIX1'] -= minx\n out_header['CRPIX2'] -= miny\n\n log.info('Output shape: {}'.format(out_shape))\n\n # use local polynomial fits to resample and coadd data\n if method == 'resample':\n flxvals = np.hstack(flxvals)\n errvals = np.hstack(errvals)\n\n if cube:\n edge_threshold = (edge_threshold, edge_threshold, 0)\n window = (window, window, 2.0)\n smoothing = (smoothing, smoothing, 1.0)\n if adaptive_algorithm in ['scaled', 'shaped']:\n adaptive_threshold = (1.0, 1.0, 0.0)\n else:\n adaptive_threshold = None\n adaptive_algorithm = None\n else:\n if adaptive_algorithm in ['scaled', 'shaped']:\n adaptive_threshold = 1.0\n else:\n adaptive_threshold = None\n adaptive_algorithm = None\n\n max_cores = psutil.cpu_count() - 1\n if max_cores < 2: # pragma: no cover\n max_cores = None\n\n log.info('Setting up output grid.')\n resampler = Resample(\n coordinates, flxvals, error=errvals,\n window=window, order=fit_order, fix_order=True)\n\n log.info('Resampling flux data.')\n flux, std = resampler(\n *grid, smoothing=smoothing, edge_threshold=edge_threshold,\n adaptive_threshold=adaptive_threshold,\n adaptive_algorithm=adaptive_algorithm,\n edge_algorithm='distribution', get_error=True,\n error_weighting=weighted, jobs=max_cores)\n var = std**2\n\n log.info('Interpolating and summing exposure maps.')\n if cube:\n expmap = np.zeros(out_shape[1:], dtype=float)\n else:\n expmap = np.zeros(out_shape, dtype=float)\n for i, expval in enumerate(expvals):\n inx, iny = corners[i][0]\n outx, outy = corners[i][1]\n outx -= minx\n outy -= miny\n exp_out = warp_image(\n expval, inx, iny, outx, outy,\n output_shape=expmap.shape, cval=0,\n order=1, interpolation_order=1)\n expmap += exp_out\n else:\n # interpolate corners for approximate warp solution\n log.info('Interpolating all images.')\n\n flx = []\n vr = []\n expmap = np.zeros(out_shape)\n for i, (flxval, errval, expval) in \\\n enumerate(zip(flxvals, errvals, expvals)):\n inx, iny = corners[i][0]\n outx, outy = corners[i][1]\n outx -= minx\n outy -= miny\n\n # flux image\n flx.append(\n warp_image(flxval, inx, iny, outx, outy,\n output_shape=out_shape, cval=np.nan,\n order=1, interpolation_order=1))\n\n # var image\n vr.append(\n warp_image(errval**2, inx, iny, outx, outy,\n output_shape=out_shape, cval=np.nan,\n order=1, interpolation_order=0))\n\n # exposure map image\n exp_out = warp_image(\n expval, inx, iny, outx, outy,\n output_shape=out_shape, cval=0,\n order=1, interpolation_order=0)\n expmap += exp_out\n\n if len(flx) > 1:\n log.info('{}-combining images.'.format(method.title()))\n flux, var = combine_images(\n flx, variance=vr, method=method, weighted=weighted,\n robust=robust, sigma=sigma, maxiters=maxiters)\n else:\n flux, var = flx[0], vr[0]\n\n if cube:\n # reconstruct as primary wcs\n key = wcskey.strip().upper()\n wcs_key_set = ['CTYPE1', 'CTYPE2', 'CUNIT1', 'CUNIT2',\n 'CRPIX1', 'CRPIX2', 'CRVAL1', 'CRVAL2',\n 'CDELT1', 'CDELT2', 'CROTA2', 'SPECSYS',\n f'CTYPE1{key}', f'CTYPE2{key}', f'CTYPE3{key}',\n f'CUNIT1{key}', f'CUNIT2{key}', f'CUNIT3{key}',\n f'CRPIX1{key}', f'CRPIX2{key}', f'CRPIX3{key}',\n f'CRVAL1{key}', f'CRVAL2{key}', f'CRVAL3{key}',\n f'CDELT1{key}', f'CDELT2{key}', f'CDELT3{key}',\n f'RADESYS{key}', f'EQUINOX{key}', f'SPECSYS{key}']\n tmp = out_header.copy()\n for wkey in wcs_key_set:\n if wkey in out_header:\n del out_header[wkey]\n if wkey.endswith(key) and wkey in tmp:\n # swap coords 1 and 3 (to make it wave, RA, Dec)\n new_key = wkey[:-1].replace('3', '9999')\n new_key = new_key.replace('1', '3').replace('9999', '1')\n hdinsert(out_header, new_key, tmp[wkey], tmp.comments[wkey])\n\n # fix source position estimate too\n if 'SRCPOSX' in out_header and 'SRCPOSY' in out_header:\n coord = ([out_header['SRCPOSX']],\n [out_header['SRCPOSY']])\n first_wcs = WCS(hdr_list[0], naxis=2)\n out_wcs = WCS(out_header, naxis=2)\n sxy = first_wcs.wcs_pix2world(*coord, 0)\n new_xy = out_wcs.wcs_world2pix(*sxy, 0)\n out_header['SRCPOSX'] = new_xy[0][0]\n out_header['SRCPOSY'] = new_xy[1][0]\n\n if cube:\n # correct flux for pixel size change\n # before: pixel x slit width in pixels\n # after: pixel x pixel\n flux /= float_slitw\n var /= float_slitw**2\n\n return out_header, flux, var, expmap", "def make_roi_header(**param):\n hdr_list = ['== Integration ROI ==']\n method = [i for i in list(param.keys()) if \"pos\" in i][0].split('_pos')[0]\n hdr_list.append('Integration method: {}'.format(method))\n\n for k, v in list(param.items()):\n hdr_list.append('{}: {}'.format(k, v))\n\n header = \"\\n\".join(['# ' + i for i in hdr_list])\n return header", "def reproject(self, header, order='bilinear'):\n\n self._raise_wcs_no_celestial()\n\n try:\n from reproject.version import version\n except ImportError:\n raise ImportError(\"Requires the reproject package to be\"\n \" installed.\")\n\n # Need version > 0.2 to work with cubes\n from distutils.version import LooseVersion\n if LooseVersion(version) < \"0.3\":\n raise Warning(\"Requires version >=0.3 of reproject. The current \"\n \"version is: {}\".format(version))\n\n from reproject import reproject_interp\n\n # TODO: Find the minimal footprint that contains the header and only reproject that\n # (see FITS_tools.regrid_cube for a guide on how to do this)\n\n newwcs = wcs.WCS(header)\n shape_out = [header['NAXIS{0}'.format(i + 1)] for i in range(header['NAXIS'])][::-1]\n\n newproj, newproj_valid = reproject_interp((self.value,\n self.header),\n newwcs,\n shape_out=shape_out,\n order=order)\n\n self = Projection(newproj, unit=self.unit, wcs=newwcs,\n meta=self.meta, header=header,\n read_beam=True)\n\n return self", "def __init__(self):\n OWSReport.__init__(self)\n self.stats['type'] = 'OGC:WCS'\n self.stats['operations']['GetCoverage'] = {}\n self.stats['operations']['GetCoverage']['hits'] = 0\n self.stats['operations']['GetCoverage']['resource'] = {}\n self.stats['operations']['GetCoverage']['resource']['param'] = 'coverage'\n self.stats['operations']['GetCoverage']['resource']['list'] = {}\n self.stats['operations']['DescribeCoverage'] = {}\n self.stats['operations']['DescribeCoverage']['hits'] = 0", "def read_header(self):\n # Read entire header into memory in one read to minimize Disk I/O.\n self.fh.seek(0)\n hdr = self.fh.read(self.header['header size'])\n\n # Find several markers in the byte-string\n # Each of these may occur more than once, find last.\n polylist_pos = hdr.rfind(b'Poly_list\\x00')\n champslist_pos = hdr.rfind(b'Champs_list\\x00')\n offsetlist_pos = hdr.rfind(b'Offset_list\\x00')\n\n # Find first occurance for these.\n # analparam_pos = hdr.find(b'Anal_param\\x00')\n analparamnano_pos = hdr.find(b'Anal_param_nano\\x00')\n analparamnanobis_pos = hdr.find(b'Anal_param_nano_bis\\x00')\n\n # Turn byte-string into BytesIO file-like object; reading and\n # keeping track of where we are is easier that way than trying to\n # slice byte-string as an array and keeping track of indices.\n hdr = io.BytesIO(hdr)\n\n # Main header\n hdr.seek(12)\n self.header.update(self._main_header(hdr))\n\n # NanoSIMS header, starts with PolyList/ChampsList/OffsetList\n # The following configurations have been found in the wild, so far:\n # 1. NS header\n # 2. PL, NS header\n # 3. PL, CL, OL, NS header\n # 4. PL, CL, OL, partial NS header, PL, NS header, PL, CL, OL,\n # partial NS header, PL, NS header\n # Note: I have not seen any *lists with contents (only length 0).\n # From OpenMIMS documentation I know that PolyList is as list of\n # Species dicts, but don't know how to read ChampsList or OffsetList.\n if polylist_pos < 0:\n # Case 1: No PL marker, so far only found for Real Time Images,\n # beam stability, or secondary ion beam centering files.\n if (self.header['analysis type'].endswith('rti') or\n self.header['file type'] == 35):\n hdr.seek(216, 1)\n elif self.header['file type'] == 31:\n if (self.header['analysis type'].endswith('hmr') or\n self.header['analysis type'].endswith('trolley step scan')):\n hdr.seek(120, 1)\n else:\n # secondary ion beam\n hdr.seek(600, 1)\n else:\n raise NotImplementedError('No PolyList marker found in header '\n 'and not and RTI image. Don\\'t know '\n 'how to continue.')\n elif (champslist_pos < 0 and offsetlist_pos < 0):\n # Case 2: PL, NS header\n self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)\n elif (polylist_pos < champslist_pos < offsetlist_pos):\n # Case 3: PL, CL, OL, NS header\n self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)\n self.header['ChampsList'] = self._pco_list(hdr, 'champs', champslist_pos)\n self.header['OffsetList'] = self._pco_list(hdr, 'offset', offsetlist_pos)\n elif (champslist_pos < offsetlist_pos < polylist_pos):\n # Case 4: PL, CL, OL, partial NS header, PL, NS header\n # with possible repeat\n self.header['ChampsList'] = self._pco_list(hdr, 'champs', champslist_pos)\n self.header['OffsetList'] = self._pco_list(hdr, 'offset', offsetlist_pos)\n self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)\n else:\n raise NotImplementedError(\n 'An unknown order of the Poly/Champs/Offset Lists occured.\\n'\n 'Positions: PL = {}, CL = {}, OL = {}'\n ''.format(polylist_pos, champslist_pos, offsetlist_pos))\n\n self.header['NanoSIMSHeader'] = self._nanosims_header(hdr)\n\n # How much to skip? Chomping does not work; what if first value is 0?\n # This is correct so far, for nsheader v8 and 9\n hdr.seek(948, 1)\n self.header['BFields'] = []\n for b in range(self.header['NanoSIMSHeader']['b fields']):\n bf = self._bfield(hdr)\n bf['counting frame time'] = bf['time per pixel'] * \\\n self.header['NanoSIMSHeader']['counting frame height'] * \\\n self.header['NanoSIMSHeader']['counting frame width']\n bf['scanning frame time'] = bf['time per pixel'] * \\\n self.header['NanoSIMSHeader']['scanning frame height'] * \\\n self.header['NanoSIMSHeader']['scanning frame width']\n bf['working frame time'] = bf['time per pixel'] * \\\n self.header['NanoSIMSHeader']['working frame height'] * \\\n self.header['NanoSIMSHeader']['working frame width']\n self.header['BFields'].append(bf)\n # End nanosims_header/bfield based on Poly_list position\n\n # Analytical parameters\n\n # anal_param is not in OpenMIMS at all, represents file\n # Cameca NanoSIMS Data/raw_spec/cur_anal_par\n # However, only few useful things in this section, all of\n # which are also in other sections. Skip.\n # if analparam_pos < 0:\n # msg = 'Anal_param not found in header, skipping.'\n # warnings.warn(msg)\n # else:\n # hdr.seek(analparam_pos + 24)\n # print(analparam_pos)\n # d = {}\n # d['primary ion'], d['primary current begin'], \\\n # d['primary current end'], d['raster'], \\\n # d['X 00 always 1.0'], \\\n # d['X 01 always 1'], d['X 02 always 0'], \\\n # d['X 03 always 1'], d['X 04 always 0'], \\\n # d['X 05 always 0'], d['X 06 (not0 always 0'], \\\n # d['X 07 (not) always 0'], d['X 08 always 0'], \\\n # d['pressure 1'], d['e0w'], d['X 09 always 35 or #'], \\\n # d['X 10 junk'], \\\n # d['X 11 always 1'], d['X 12 always 0'], \\\n # d['X 13 always 1'], d['X 14 always 0'], \\\n # d['X 15 always 0'], d['X 16 always 0'], \\\n # d['X 17 always 0'], d['X 18 always 0'], \\\n # d['X 19 always 0'], d['X 20 always 300'], \\\n # d['X 21'], d['X 22'], d['X 23'], d['X 24'], \\\n # d['pressure 2'], d['X 25 junk'] = \\\n # unpack(self._bo + '24s 4d 8i 48s d i 28s 14i 8s 176s', hdr.read(416))\n #\n # d['pressure 1'] = self._cleanup_string(d['pressure 1'])\n # d['pressure 2'] = self._cleanup_string(d['pressure 2'])\n # d['primary ion'] = self._cleanup_string(d['primary ion'])\n #\n # self.header['AnalParam'] = d\n\n # Called AnalyticalParamNano AND AnalysisParamNano in OpenMIMS.\n # Here, split out Primary and Secondary beam.\n # Represents the file Cameca NanoSIMS Data/raw_spec/cur_anal_par_nano\n if analparamnano_pos < 0:\n msg = 'Anal_param_nano not found in header, '\n msg += 'don\\'t know where PrimaryBeam section starts.'\n warnings.warn(msg)\n else:\n hdr.seek(analparamnano_pos + 16)\n self.header['analysis version'], self.header['n50large'], \\\n self.header['comment'] = \\\n unpack(self._bo + '2i 8x 256s', hdr.read(272))\n\n self.header['n50large'] = bool(self.header['n50large'])\n self.header['comment'] = self._cleanup_string(self.header['comment'])\n\n self.header['PrimaryBeam'] = self._primary_beam(hdr)\n self.header['SecondaryBeam'] = self._secondary_beam(hdr)\n self.header['Detectors'] = self._detectors1(hdr)\n\n self.header['SecondaryBeam']['E0S'] = self.header['Detectors'].pop('E0S')\n self.header['SecondaryBeam']['pressure multicollection chamber'] = \\\n self.header['Detectors'].pop('pressure multicollection chamber')\n\n # Add overall mode of machine, based on E0W\n if self.header['SecondaryBeam']['E0W'] < 0:\n self.header['polarity'] = '+'\n else:\n self.header['polarity'] = '-'\n\n # Combine pixel size from NanoSIMSHeader and raster from PrimaryBeam\n # Prevent ZeroDivisionError if undefined\n wfw = self.header['NanoSIMSHeader']['working frame width']\n if not wfw:\n wfw = 1\n self.header['NanoSIMSHeader']['working frame raster'] = \\\n self.header['PrimaryBeam']['raster']\n self.header['NanoSIMSHeader']['scanning frame raster'] = \\\n self.header['NanoSIMSHeader']['working frame raster'] * \\\n self.header['NanoSIMSHeader']['scanning frame width'] / wfw\n self.header['NanoSIMSHeader']['counting frame raster'] = \\\n self.header['NanoSIMSHeader']['working frame raster'] * \\\n self.header['NanoSIMSHeader']['counting frame width'] / wfw\n\n # Header for non-nano SIMS\n magic = unpack(self._bo + 'i', hdr.read(4))[0]\n if magic != 2306:\n msg = 'SIMSHeader magic number not found here at byte {}.'\n msg = msg.format(hdr.tell()-4)\n raise ValueError(msg)\n self.header['SIMSHeader'] = self._sims_header(hdr)\n\n if self.header['analysis version'] >= 5:\n if analparamnanobis_pos < 0:\n msg = 'Anal_param_nano_bis not found in header, '\n msg += 'don\\'t know where second Detectors section starts.'\n warnings.warn(msg)\n else:\n hdr.seek(analparamnanobis_pos + 24)\n self.header['Detectors'].update(self._detectors2(hdr))\n xl = self.header['Detectors'].pop('exit slit xl')\n for n in range(7):\n det = self.header['Detectors']['Detector {}'.format(n+1)]\n w = list(det['exit slit widths'])\n w[2] = xl[5*n:5*(n+1)]\n det['exit slit widths'] = tuple(w)\n h = list(det['exit slit heights'])\n h[2] = xl[5*(n+1):5*(n+2)]\n det['exit slit heights'] = tuple(h)\n\n # Presets\n self.header['Presets'] = self._presets(hdr)\n\n # End Detectors pt 2 based on anal_param_nano_bis position\n\n # Last part of detectors\n if self.header['analysis version'] >= 6:\n d3 = self._detectors3(hdr)\n self.header['Detectors']['TIC'] = d3.pop('TIC')\n for k, v in d3.items():\n self.header['Detectors'][k].update(v)\n # End PrimaryBeam/SecondaryBeam/Presets/Detectors based on anal_param_nano position\n\n # Image header, at end of overall header\n if self.header['file type'] == 26:\n hdr.seek(-176, 2)\n self.header['Isotopes'] = self._isotopes_hdr(hdr)\n elif self.header['file type'] in (21, 22, 31, 35):\n # no image header for line scan or beam stability\n pass\n else:\n hdr.seek(-84, 2)\n self.header['Image'] = self._image_hdr(hdr)\n\n # Done reading header. Check for and read external files for extra info.\n if os.path.exists(os.path.splitext(self.filename)[0] + '.chk_is'):\n self._read_chk_is()", "def build_W(points):\n return None", "def buildCwells(self):\n #Constructor begins here\n self.__dfclst=pd.read_csv(self.__workspace+'WELL\\\\'+'fileclist.csv',sep=',')\n print('*--- Preparing transport simulation, reading csv concentration wells ---*')\n self.__dfcwells=pd.read_csv(self.__workspace+'WELL\\\\'+'arraycdata.csv',sep=',')\n self.__ncwells=self.__dflst.shape[0]", "def get_header(header_row):\n header = {}\n header['station'], c1, c2, c3, date, time, tz = header_row.split()\n header['short_model'] = c1\n header['model'] = f'{c1} {c2} {c3}' \n header['runtime'] = dateutil.parser.parse(f'{date} {time} {tz}')\n return header", "def modelCS(self,ht=None,hf=None):\n if ht is not None:\n hf = time2freq(ht)\n cs,a,b,c = make_model_cs(hf, self.s0, self.bw, self.ref_freq)\n \n return cs", "def _header(self, path, files):\n headers = [fits.getheader(os.path.join(path, f))\n for f in sorted(files)]\n N = len(headers)\n\n def mean_key(headers, key, comment, type):\n return (np.mean([type(h[key]) for h in headers]), comment)\n\n h = fits.Header()\n h['BUNIT'] = 'e-/s'\n h['ORIGIN'] = 'Zwicky Transient Facility', 'Data origin'\n h['OBSERVER'] = 'ZTF Robotic Software', 'Observer'\n h['INSTRUME'] = 'ZTF/MOSAIC', 'Instrument name'\n h['OBSERVAT'] = 'Palomar Observatory', 'Observatory'\n h['TELESCOP'] = 'Palomar 48-inch', 'Observatory telescope'\n h['OBSLON'] = -116.8597, 'Observatory longitude (deg)'\n h['OBSLAT'] = 33.3483, 'Observatory latitude (deg E)'\n h['OBSALT'] = 1706., 'Observatory altitude (m)'\n h['IMGTYPE'] = 'object', 'Image type'\n h['NIMAGES'] = N, 'Number of images in stack'\n h['EXPOSURE'] = (sum([_['EXPOSURE'] for _ in headers]),\n 'Total stack exposure time (s)')\n if len(headers) == 0:\n return h\n\n h['MAGZP'] = 25.0, 'Magnitude zero point, solar color'\n h['MAGZPRMS'] = (\n np.sqrt(np.sum([h.get('MAGZPRMS', 0)**2 for h in headers])) / N,\n 'Mean MAGZP RMS')\n h['PCOLOR'] = headers[0]['PCOLOR']\n h['CLRCOEFF'] = mean_key(headers, 'CLRCOEFF',\n 'Mean color coefficient', float)\n\n h['OBSJD1'] = float(headers[0]['OBSJD']), 'First shutter start time'\n h['OBSJDN'] = float(headers[-1]['OBSJD']), 'Last shutter start time'\n h['OBSJDM'] = mean_key(\n headers, 'OBSJD', 'Mean shutter start time', float)\n\n wcsfn = sorted(files)[0]\n wcs = WCS(fits.getheader(os.path.join(path, wcsfn),\n extname='SANGLE'))\n h.update(wcs.to_header())\n h['WCSORIGN'] = wcsfn\n\n h['DBPID'] = (','.join([str(_['DBPID']) for _ in headers]),\n 'Database processed-image IDs')\n h['DESG'] = headers[0]['DESG'], 'Target designation'\n for k, comment in {\n 'RH': 'Mean heliocentric distance (au)',\n 'DELTA': 'Mean observer-target distance (au)',\n 'PHASE': 'Mean Sun-target-observer angle (deg)',\n 'RDOT': 'Mean heliocentric radial velocity, km/s',\n 'SELONG': 'Mean solar elongation, deg',\n 'SANGLE': 'Mean projected target->Sun position angle, deg',\n 'VANGLE': 'Mean projected velocity position angle, deg',\n 'TRUEANOM': 'Mean true anomaly (osculating), deg',\n 'TMTP': 'Mean T-Tp (osculating), days',\n 'TGTRA': 'Mean target RA, deg',\n 'TGTDEC': 'Mean target Dec, deg',\n 'TGTDRA': 'Mean target RA*cos(dec) rate of change,arcsec/s',\n 'TGTDDEC': 'Mean target Dec rate of change, arcsec/s',\n 'TGTRASIG': 'Mean target RA 3-sigma uncertainty, arcsec',\n 'TGTDESIG': 'Mean target Dec 3-sigma uncertainty, arcsec',\n }.items():\n try:\n h[k] = mean_key(headers, k, comment, float)\n except ValueError:\n # target rates might be empty strings\n h[k] = ''\n\n return h", "def build_csp(puzzle):\n # Enter your code here and remove the pass statement below\n variables = [(a,b) for a in range(0,9) for b in range(0,9)]\n domain = {}\n for x in variables:\n if x in puzzle:\n domain[x] = {puzzle[x]}\n else:\n domain[x] = {1, 2, 3, 4, 5, 6, 7, 8, 9}\n neighbors = get_neighbors(variables)\n constraint = get_constrains(neighbors)\n\n mySudoku = csp.CSP(domain, neighbors, constraint)\n return mySudoku", "def get_hucs(source, huc, level, out_crs=None, digits=None):\n # get the hu from source\n huc = workflow.sources.utils.huc_str(huc)\n if level is None:\n level = len(huc)\n\n logging.info(\"\")\n logging.info(f\"Loading level {level} HUCs in {huc}\")\n logging.info(\"-\"*30)\n \n profile, hus = source.get_hucs(huc, level)\n logging.info(\"... found {} HUCs\".format(len(hus)))\n for hu in hus:\n logging.info(' -- {}'.format(workflow.sources.utils.get_code(hu,level)))\n \n # convert to destination crs\n native_crs = workflow.crs.from_fiona(profile['crs'])\n if out_crs and not workflow.crs.equal(out_crs, native_crs):\n logging.info(\"Converting to out_crs\")\n for hu in hus:\n workflow.warp.shape(hu, native_crs, out_crs)\n else:\n out_crs = native_crs\n\n # round\n if digits != None:\n logging.info(\"Rounding coordinates\")\n workflow.utils.round_shapes(hus, digits)\n\n # convert to shapely\n logging.info(\"Converting to shapely\")\n hu_shapes = [workflow.utils.shply(hu) for hu in hus]\n return out_crs, hu_shapes", "def test_create_fitswcs(tmpdir, create_model_3d):\n im = create_model_3d\n w3d = pointing.create_fitswcs(im)\n gra, gdec, glam = w3d(1, 1, 1)\n\n path = str(tmpdir.join(\"fitswcs.fits\"))\n im.save(path)\n with fits.open(path) as hdulist:\n hdu = hdulist[\"SCI\"]\n w = wcs.WCS(hdu.header)\n wcel = w.sub(['celestial'])\n ra, dec = wcel.all_pix2world(1, 1, 0)\n\n # Check that astropy.wcs.WCS and gwcs.WCS give same result\n assert_allclose((ra, dec), (gra, gdec))", "def makeSourceCat(self, distortedWcs):\n loadRes = self.refObjLoader.loadPixelBox(bbox=self.bbox, wcs=distortedWcs, filterName=\"r\")\n refCat = loadRes.refCat\n refCentroidKey = afwTable.Point2DKey(refCat.schema[\"centroid\"])\n refFluxRKey = refCat.schema[\"r_flux\"].asKey()\n\n sourceSchema = afwTable.SourceTable.makeMinimalSchema()\n measBase.SingleFrameMeasurementTask(schema=sourceSchema) # expand the schema\n sourceCat = afwTable.SourceCatalog(sourceSchema)\n sourceCentroidKey = afwTable.Point2DKey(sourceSchema[\"slot_Centroid\"])\n sourceInstFluxKey = sourceSchema[\"slot_ApFlux_instFlux\"].asKey()\n sourceInstFluxErrKey = sourceSchema[\"slot_ApFlux_instFluxErr\"].asKey()\n\n sourceCat.reserve(len(refCat))\n for refObj in refCat:\n src = sourceCat.addNew()\n src.set(sourceCentroidKey, refObj.get(refCentroidKey))\n src.set(sourceInstFluxKey, refObj.get(refFluxRKey))\n src.set(sourceInstFluxErrKey, refObj.get(refFluxRKey)/100)\n return sourceCat", "def _new_instance(cls, data, wcs, errors=None, **kwargs):\n return cls(data, wcs, errors=errors, **kwargs)", "def _build_http_header(self) -> Dict[str, str]:\n return {}", "def build_geometry(self):\n\n Rbo = self.get_Rbo()\n\n point_dict = self._comp_point_coordinate()\n Z1 = point_dict[\"Z1\"]\n Z2 = point_dict[\"Z2\"]\n Z3 = point_dict[\"Z3\"]\n Z4 = point_dict[\"Z4\"]\n Z5 = point_dict[\"Z5\"]\n Z6 = point_dict[\"Z6\"]\n Z7 = point_dict[\"Z7\"]\n Z8 = point_dict[\"Z8\"]\n Z9 = point_dict[\"Z9\"]\n Z10 = point_dict[\"Z10\"]\n\n # Creation of curve\n curve_list = list()\n curve_list.append(Segment(Z1, Z2))\n curve_list.append(Arc1(Z2, Z3, -Rbo + self.H0, is_trigo_direction=False))\n curve_list.append(Arc1(Z3, Z4, -self.R1, is_trigo_direction=False))\n curve_list.append(Segment(Z4, Z5))\n curve_list.append(Arc1(Z5, Z6, Rbo - self.H0 - self.H2, is_trigo_direction=True))\n curve_list.append(Segment(Z6, Z7))\n curve_list.append(Arc1(Z7, Z8, -self.R1, is_trigo_direction=False))\n curve_list.append(Arc1(Z8, Z9, -Rbo + self.H0, is_trigo_direction=False))\n curve_list.append(Segment(Z9, Z10))\n\n return curve_list", "def build_var_header():\n # var_id will only be populated by... ew.. this one is difficult...\n # \"var_id\" # - need to generate these...\n meta_header = OrderedDict()\n meta_header['sample'] = str\n meta_header['chrom'] = str\n meta_header['start'] = np.int16\n meta_header['end'] = np.int16\n meta_header['var_type'] = str\n meta_header['state'] = str\n\n # Target - made from parsing truvari/rtg information\n # \"state\" #\n\n info_header = OrderedDict()\n info_header[\"POP\"] = bool\n info_header[\"VARLEN\"] = np.int32\n info_header[\"NUMASM\"] = np.int16\n \n fmt_header = OrderedDict() \n # Categorical\n # need to categorize these... ./. 0/0 0/1 1/1 # only possibilities\n fmt_header[\"GT\"] = np.int8 \n # fmt_header[\"PG\"] = np.int32 # don't use..\n fmt_header[\"GQ\"] = np.int8\n # fmt_header[\"PI\"] = don't use\n fmt_header[\"OV\"] = np.int8\n fmt_header[\"DP\"] = np.int16\n #split where _r is ref-allele and _a is alt-allele\n fmt_header[\"AD_r\"] = np.int16\n fmt_header[\"AD_a\"] = np.int16\n fmt_header[\"PDP\"] = np.int16\n fmt_header[\"PAD_r\"] = np.int16\n fmt_header[\"PAD_a\"] = np.int16\n fmt_header[\"US_r\"] = np.int16\n fmt_header[\"US_a\"] = np.int16\n fmt_header[\"DS_r\"] = np.int16\n fmt_header[\"DS_a\"] = np.int16\n fmt_header[\"UC_r\"] = np.int16\n fmt_header[\"UC_a\"] = np.int16\n fmt_header[\"DC_r\"] = np.int16\n fmt_header[\"DC_a\"] = np.int16\n fmt_header[\"UDC_r\"] = np.int16\n fmt_header[\"UDC_a\"] = np.int16\n fmt_header[\"UCC_r\"] = np.int16\n fmt_header[\"UCC_a\"] = np.int16\n fmt_header[\"DDC_r\"] = np.int16\n fmt_header[\"DDC_a\"] = np.int16\n fmt_header[\"DCC_r\"] = np.int16\n fmt_header[\"DCC_a\"] = np.int16\n fmt_header[\"UMO_r\"] = np.int16\n fmt_header[\"UMO_a\"] = np.int16\n fmt_header[\"DMO_r\"] = np.int16\n fmt_header[\"DMO_a\"] = np.int16\n fmt_header[\"UXO_r\"] = np.int16\n fmt_header[\"UXO_a\"] = np.int16\n fmt_header[\"DXO_r\"] = np.int16\n fmt_header[\"DXO_a\"] = np.int16\n fmt_header[\"NR_r\"] = np.int16\n fmt_header[\"NR_a\"] = np.int16\n fmt_header[\"MO_r\"] = np.int16\n fmt_header[\"MO_a\"] = np.int16\n fmt_header[\"XO_r\"] = np.int16\n fmt_header[\"XO_a\"] = np.int16\n fmt_header[\"XC_r\"] = np.int16\n fmt_header[\"XC_a\"] = np.int16\n fmt_header[\"AC_r\"] = np.int16\n fmt_header[\"AC_a\"] = np.int16\n fmt_header[\"MC_r\"] = np.int16\n fmt_header[\"MC_a\"] = np.int16\n fmt_header[\"EC_r\"] = np.int16\n fmt_header[\"EC_a\"] = np.int16\n fmt_header[\"PL_ref\"] = np.int8\n fmt_header[\"PL_het\"] = np.int8\n fmt_header[\"PL_hom\"] = np.int8\n\n ret_header = OrderedDict()\n ret_header.update(meta_header)\n ret_header.update(info_header)\n ret_header.update(fmt_header)\n return ret_header", "def makeMetadata(self):\n # arbitrary values\n orientation = 0 * degrees\n flipX = False\n metadata = makeTanWcsMetadata(\n crpix = self.crpix,\n crval = self.crval,\n cdMatrix = makeCdMatrix(scale=self.scale, orientation=orientation, flipX=flipX),\n )\n self.assertEqual(metadata.nameCount(), 14)\n metadata.add(\"SIMPLE\", True)\n metadata.add(\"BITPIX\", 16)\n metadata.add(\"NAXIS\", 2)\n metadata.add(\"NAXIS1\", 500)\n metadata.add(\"NAXIS2\", 200)\n metadata.add(\"BZERO\", 32768)\n metadata.add(\"BSCALE\", 1)\n metadata.add(\"TIMESYS\", \"UTC\")\n metadata.add(\"UTC-OBS\", \"12:04:45.73\")\n metadata.add(\"DATE-OBS\", \"2006-05-20\")\n metadata.add(\"EXPTIME\", 5.0)\n metadata.add(\"COMMENT\", \"a comment\")\n metadata.add(\"COMMENT\", \"another comment\")\n metadata.add(\"EXTEND\", True)\n metadata.add(\"INHERIT\", False)\n metadata.add(\"LTV1\", 5)\n metadata.add(\"LTV2\", -10)\n metadata.add(\"ZOTHER\", \"non-standard\")\n return metadata", "def _new_wos_dict():\n wos_dict = {\n 'DI': None,\n 'TI': None,\n 'PY': None,\n 'SO': None,\n 'UT': None,\n 'DE': None,\n }\n\n return wos_dict", "def __init__(self,\n name: str = \"Untitled Wing\",\n xyz_le: np.ndarray = np.array([0, 0, 0]),\n xsecs: List['WingXSec'] = [],\n symmetric: bool = False,\n ):\n self.name = name\n self.xyz_le = np.array(xyz_le)\n self.xsecs = xsecs\n self.symmetric = symmetric", "def WCS(imname, outname, astronet=False, timeout=None):\n\n print_cmd_line(\"STAP_WCS.py\", imname, outname,\n astronet=astronet, timeout=timeout)\n\n if astronet:\n cmd = \"solve-mosaic_single.py {0} {1}\".format(imname,outname)\n else:\n cmd = \"{0}/bin/SM-WCS-perchip.py {1} --outname {2}\".format(\n os.environ['BRIANWCS'],imname,outname)\n\n status, stdoutstr = STAP_callexternal(cmd, combinestderr=True,\n getstdout=True, timeout=timeout)\n print stdoutstr\n if status != 0:\n os.system('rm -fr {0}'.format(outname))\n raise ExternalFailure(cmd=cmd, exit_code=status)\n\n # Check to ensure the solution makes sense\n print \"Initiating sanity checks...\"\n with pyfits.open(outname, mode='update') as hdulist:\n hdr = hdulist[0].header\n cd11, cd12 = hdr['CD1_1'], hdr['CD1_2']\n cd21, cd22 = hdr['CD2_1'], hdr['CD2_2']\n # Is the plate scale right?\n psx = 3600*math.sqrt(cd11**2 + cd12**2)\n psy = 3600*math.sqrt(cd21**2 + cd22**2)\n print \" psx, psy =\", psx, psy, \"arcsec/pix\"\n if (abs(1.0-psx/Imager.pixel_scale) > 0.05 or\n abs(1.0-psx/Imager.pixel_scale) > 0.05):\n os.system('rm -fr {0}'.format(outname))\n raise TrackableException(\"WCS solution doesn't make sense\")\n # Are the axes orthogonal?\n ctheta = (cd11*cd21 + cd12*cd22)/(psx*psy)\n theta = math.acos(ctheta)*180/math.pi\n print \" ctheta =\", ctheta, \"theta =\", theta, \"deg\"\n if abs(ctheta) > 0.01:\n os.system('rm -fr {0}'.format(outname))\n raise TrackableException(\"WCS solution doesn't make sense\")\n # What's the position angle?\n if not astronet:\n pa = math.atan2(cd12, cd11)\n print \" pa =\", pa*180/math.pi, \"deg\"\n if abs(math.sin(pa)) > 0.02:\n os.system('rm -fr {0}'.format(outname))\n raise TrackableException(\"WCS solution doesn't make sense\")\n print \"All checks done, WCS makes sense.\"", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(ins, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.Time is None:\n self.Time = 0.\n if self.Week is None:\n self.Week = 0\n if self.Status is None:\n self.Status = 0\n if self.RPY is None:\n self.RPY = geometry_msgs.msg.Vector3()\n if self.LLA is None:\n self.LLA = geometry_msgs.msg.Vector3()\n if self.NedVel is None:\n self.NedVel = geometry_msgs.msg.Vector3()\n if self.YawUncertainty is None:\n self.YawUncertainty = 0.\n if self.PitchUncertainty is None:\n self.PitchUncertainty = 0.\n if self.RollUncertainty is None:\n self.RollUncertainty = 0.\n if self.PosUncertainty is None:\n self.PosUncertainty = 0.\n if self.VelUncertainty is None:\n self.VelUncertainty = 0.\n if self.SyncInTime is None:\n self.SyncInTime = 0.\n if self.SyncInCount is None:\n self.SyncInCount = 0\n else:\n self.header = std_msgs.msg.Header()\n self.Time = 0.\n self.Week = 0\n self.Status = 0\n self.RPY = geometry_msgs.msg.Vector3()\n self.LLA = geometry_msgs.msg.Vector3()\n self.NedVel = geometry_msgs.msg.Vector3()\n self.YawUncertainty = 0.\n self.PitchUncertainty = 0.\n self.RollUncertainty = 0.\n self.PosUncertainty = 0.\n self.VelUncertainty = 0.\n self.SyncInTime = 0.\n self.SyncInCount = 0", "def create_dat_from_shapefile(windgrid_file, DAT_header, output_file, wind_field='Vg_mph'):\n t0 = time()\n output_file = output_file + '.dat'\n print('reading windgrid')\n t1 = time()\n gdf = gpd.read_file(windgrid_file)\n create_dat_from_geodataframe(\n gdf, DAT_header, output_file, wind_field=wind_field)", "def init_pos_parms(self):\n\n ## init_pos_parms()\n parms = {}\n\n # length axis\n parms['length_attribute_road'] = ('top', 'cl', 'bottom')[self.segment]\n parms['length_attribute_artifact'] = ('top', 'cl', 'bottom')[self.pos_length]\n\n # width axis\n if type(self.pos_width) is int:\n parms['width_road_rect'] = self.road.lanes[self.pos_width]\n parms['width_attribute_road'] = 'cw'\n parms['width_attribute_artifact'] = 'cw'\n else:\n parms['width_road_rect'] = self.road\n pos_parms = {'l': ('left', 'right'),\n 'c': ('cw', 'cw'),\n 'r': ('right', 'left')}[self.pos_width]\n parms['width_attribute_road'] = pos_parms[0]\n parms['width_attribute_artifact'] = pos_parms[1]\n return parms", "def header(self):\n\n data = {}\n data['latitude'] = self.latitude()\n data['latitude_unc'] = self.latitude_unc()\n data['longitude'] = self.longitude()\n data['longitude_unc'] = self.longitude_unc()\n data['uid'] = self.uid()\n data['n_levels'] = self.n_levels()\n data['year'] = self.year()\n data['month'] = self.month()\n data['day'] = self.day()\n data['time'] = self.time()\n data['cruise'] = self.cruise()\n data['probe_type'] = self.probe_type()\n \n header = pd.Series(data)\n\n return header", "def update_header(self) -> None:\n self.header.partial_reset()\n self.header.point_format_id = self.points.point_format.id\n self.header.point_data_record_length = self.points.point_size\n\n if len(self.points) > 0:\n self.header.update(self.points)\n\n if self.header.version.minor >= 4:\n if self.evlrs is not None:\n self.header.number_of_evlrs = len(self.evlrs)\n self.header.start_of_waveform_data_packet_record = 0\n # TODO\n # if len(self.vlrs.get(\"WktCoordinateSystemVlr\")) == 1:\n # self.header.global_encoding.wkt = 1\n else:\n self.header.number_of_evlrs = 0", "def __init__(self, geom_input, srs=None):\n str_instance = isinstance(geom_input, str)\n\n # If HEX, unpack input to a binary buffer.\n if str_instance and hex_regex.match(geom_input):\n geom_input = memoryview(bytes.fromhex(geom_input))\n str_instance = False\n\n # Constructing the geometry,\n if str_instance:\n wkt_m = wkt_regex.match(geom_input)\n json_m = json_regex.match(geom_input)\n if wkt_m:\n if wkt_m[\"srid\"]:\n # If there's EWKT, set the SRS w/value of the SRID.\n srs = int(wkt_m[\"srid\"])\n if wkt_m[\"type\"].upper() == \"LINEARRING\":\n # OGR_G_CreateFromWkt doesn't work with LINEARRING WKT.\n # See https://trac.osgeo.org/gdal/ticket/1992.\n g = capi.create_geom(OGRGeomType(wkt_m[\"type\"]).num)\n capi.import_wkt(g, byref(c_char_p(wkt_m[\"wkt\"].encode())))\n else:\n g = capi.from_wkt(\n byref(c_char_p(wkt_m[\"wkt\"].encode())), None, byref(c_void_p())\n )\n elif json_m:\n g = self._from_json(geom_input.encode())\n else:\n # Seeing if the input is a valid short-hand string\n # (e.g., 'Point', 'POLYGON').\n OGRGeomType(geom_input)\n g = capi.create_geom(OGRGeomType(geom_input).num)\n elif isinstance(geom_input, memoryview):\n # WKB was passed in\n g = self._from_wkb(geom_input)\n elif isinstance(geom_input, OGRGeomType):\n # OGRGeomType was passed in, an empty geometry will be created.\n g = capi.create_geom(geom_input.num)\n elif isinstance(geom_input, self.ptr_type):\n # OGR pointer (c_void_p) was the input.\n g = geom_input\n else:\n raise GDALException(\n \"Invalid input type for OGR Geometry construction: %s\"\n % type(geom_input)\n )\n\n # Now checking the Geometry pointer before finishing initialization\n # by setting the pointer for the object.\n if not g:\n raise GDALException(\n \"Cannot create OGR Geometry from input: %s\" % geom_input\n )\n self.ptr = g\n\n # Assigning the SpatialReference object to the geometry, if valid.\n if srs:\n self.srs = srs\n\n # Setting the class depending upon the OGR Geometry Type\n self.__class__ = GEO_CLASSES[self.geom_type.num]", "def header(fpath):\n # If you want to change something, instead of overwriting a bug, add a new\n # key with the desired functionallity. This way, prior code doesn't break.\n # One can be very waste full with this function as it is fast anyways.\n\n\n ret = {}\n with open(fpath) as f:\n for line in f:\n if line[0] is not \"#\":\n break\n # Strip comment marker\n line = line[2:]\n name, value = line.split(\"=\")\n # Strip newline\n ret[name] = value[:-1]\n\n # To have some compatibility between spe veronica and viktor files,\n # we further unify some of the namings\n ret['gain'] = ret.get('Gain')\n\n exp_time = ret.get('ExposureTime [s]')\n if exp_time:\n ret['exposure_time'] = datetime.timedelta(seconds=float(exp_time))\n\n hbin = ret.get('HBin')\n if hbin:\n ret['hbin'] = {'ON': True}.get(value, False)\n\n cw = ret.get('Central-Wavelength')\n if cw:\n ret['central_wl'] = float(cw)\n\n vis_wl = ret.get('vis-Wavelength')\n if vis_wl:\n ret['vis_wl'] = float(vis_wl)\n\n syringe_pos = ret.get('Syringe Pos')\n if syringe_pos:\n ret['syringe_pos'] = int(syringe_pos)\n\n cursor = ret.get(\"Cursor\")\n if cursor:\n ret['cursor'] = tuple([int(elm) for elm in cursor.split('\\t')])\n\n x_mirror = ret.get('x-mirror')\n if x_mirror:\n ret['x_mirror'] = {'ON': True}.get(x_mirror, False)\n\n calib_coeff = ret.get('calib Coeff')\n if calib_coeff:\n ret['calib Coeff'] = tuple([float(elm) for elm in calib_coeff.split('\\t')])\n # Index 0 is actually central_wl during calibration,\n ret['calib_central_wl'] = ret['calib Coeff'][0]\n\n\n # For np.poly1d the calibration coefficents need to be in decreasing\n # order and no zero values are not allowed\n _cc = np.array(ret['calib Coeff'][1:])\n ret['calib_coeff'] = _cc[np.nonzero(_cc)][::-1]\n\n scan_start_time = ret.get('Scan Start time')\n if scan_start_time:\n ret['date'] = datetime.datetime.strptime(scan_start_time, '%d.%m.%Y %H:%M:%S')\n\n scan_stop_time = ret.get('Scan Stop time')\n if scan_stop_time:\n ret['date_stop'] = datetime.datetime.strptime(scan_stop_time, '%d.%m.%Y %H:%M:%S')\n\n timedelay = ret.get('Timedelay')\n if timedelay:\n ret['timedelay'] = np.array([int(elm) for elm in timedelay.split('\\t')])\n\n timedelay_pos= ret.get('Timedelay Pos')\n if timedelay_pos:\n ret['timedel_pos'] = np.array([int(elm) for elm in timedelay_pos.split('\\t')])\n\n return ret", "def _nanosims_header(self, hdr):\n # Called MaskNano in OpenMIMS; BFieldTab separated out; create extra sub-dict PeakCenter\n d = {}\n d['PeakCenter'] = {}\n d['nanosimsheader version'], d['regulation mode'], d['mode'], \\\n d['grain mode'], d['semigraphic mode'], d['stage delta x'], \\\n d['stage delta y'], d['working frame width'], \\\n d['working frame height'], d['scanning frame x'], \\\n d['scanning frame width'], d['scanning frame y'], \\\n d['scanning frame height'], d['counting frame x start'], \\\n d['counting frame x end'], d['counting frame y start'], \\\n d['counting frame y end'], d['detector type'], d['electron scan'], \\\n d['scanning mode'], d['beam blanking'], \\\n d['PeakCenter']['peakcenter enabled'], d['PeakCenter']['start'], \\\n d['PeakCenter']['frequency'], d['b fields'] = \\\n unpack(self._bo + '25i', hdr.read(100))\n\n d['PeakCenter']['peakcenter enabled'] = bool(d['PeakCenter']['peakcenter enabled'])\n d['regulation mode'] = bool(d['regulation mode'])\n d['grain mode'] = bool(d['grain mode'])\n d['semigraphic mode'] = bool(d['semigraphic mode'])\n d['scanning mode'] = bool(d['scanning mode'])\n\n # Set a few extra variables.\n d['counting frame width'] = d['counting frame x end'] - d['counting frame x start'] + 1\n d['counting frame height'] = d['counting frame y end'] - d['counting frame y start'] + 1\n\n # Found in at least one version (file v11, nsHeader v8) a repeat of\n # Poly_list and this first part of nanoSIMSHeader. Total of repeat\n # adds up to 288. After last Poly_list, 288 byte padding zone, not all\n # null-bytes.\n hdr.seek(288, 1)\n\n # Is this the nPrintRed from OpenMIMS?\n d['print results'] = bool(unpack(self._bo + 'i', hdr.read(4))[0])\n\n d['SibCenterHor'] = self._sib_center(hdr)\n d['SibCenterVert'] = self._sib_center(hdr)\n\n # Duplicate and store these two in sub dicts\n b_field_index, has_sib_center = \\\n unpack(self._bo + '2i', hdr.read(8))\n if b_field_index < 0:\n b_field_index = None\n has_sib_center = bool(has_sib_center)\n\n d['SibCenterHor']['b field index'] = b_field_index\n d['SibCenterVert']['b field index'] = b_field_index\n d['SibCenterHor']['sib center enabled'] = has_sib_center\n d['SibCenterVert']['sib center enabled'] = has_sib_center\n\n d['EnergyCenter'] = self._energy_center(hdr)\n d['E0SCenter'] = self._e0s_center(hdr)\n\n d['EnergyCenter']['wait time'], d['presputtering raster'], \\\n d['PeakCenter']['E0P offset'], d['E0SCenter']['steps'], \\\n d['baseline measurement'], d['baseline offset'], \\\n d['baseline frequency'] = \\\n unpack(self._bo + '5i d i', hdr.read(32))\n return d", "def create_header(analysis_outdir, metadata, rg_dict, specimen_dict, logger=default_logger):\n\n rgid = rg_dict[\"ID\"].replace(\".\", \"_\")\n header = \"%s/header-%s.sam\" %(analysis_outdir, rg_dict[\"ID\"])\n header_file = open(header, \"w\")\n header_file.write(\"@HD\\tVN:1.4\\n\")\n PI_STR = \"\"\n if len(rg_dict[\"PI\"]):\n PI_STR=\"PI:%s\\t\" % (rg_dict[\"PI\"])\n header_file.write(\"@RG\\tID:%s:%s\\tCN:%s\\tPL:%s\\tPM:%s\\tLB:%s:%s:%s\\t%sSM:%s\\tPU:%s:%s\\tDT:%s\\n\"\n %(metadata[\"center_name\"], rgid,metadata[\"center_name\"], metadata[\"platform\"],metadata[\"platform_model\"], metadata[\"seqtype\"],\n metadata[\"center_name\"], rg_dict[\"LB\"], PI_STR, metadata[\"aliquot_id\"], rg_dict[\"CN\"], rg_dict[\"PU\"], getUTCDate(rg_dict[\"DT\"])))\n header_file.write(\"@CO\\tdcc_project_code:%s-US\\n\" %metadata[\"disease\"])\n header_file.write(\"@CO\\tsubmitter_donor_id:%s\\n\" %metadata[\"participant_id\"])\n header_file.write(\"@CO\\tsubmitter_specimen_id:%s\\n\" %metadata[\"sample_id\"])\n header_file.write(\"@CO\\tsubmitter_sample_id:%s\\n\" %metadata[\"aliquot_id\"])\n\n if metadata[\"sample_type\"] not in specimen_dict:\n msg = \"sample_type %s not found in specimen mapping\" % metadata[\"sample_type\"]\n logger.error(msg)\n if not FORCE_RUN:\n raise HeaderException(msg)\n\n if \"sample_type\" in metadata and metadata[\"sample_type\"] in specimen_dict:\n (icgc_type, sample_class) = specimen_dict[metadata[\"sample_type\"]]\n else:\n icgc_type = \"unknown\"\n sample_class = \"unknown\"\n\n #Sanity check about use_cntl\n if \"use_cntl\" in metadata:\n if metadata[\"use_cntl\"] == \"N/A\" and sample_class == \"tumour\":\n msg = \"Tumour sample requires use_cntl, set to %s. Are your IDs in the wrong order?\" % metadata[\"use_cntl\"]\n logger.error(msg)\n raise HeaderException(msg)\n if sample_class == \"normal\" and metadata[\"use_cntl\"] != \"N/A\":\n msg = \"Normal sample requires N/A use_cntl, set to %s. Are your IDs in the wrong order?\" % metadata[\"use_cntl\"]\n logger.error(msg)\n raise HeaderException(msg)\n\n header_file.write(\"@CO\\tdcc_specimen_type:%s\\n\" % icgc_type)\n header_file.write(\"@CO\\tuse_cntl:%s\\n\" %(metadata.get(\"use_cntl\", \"NA\")))\n header_file.close()\n return header", "def __init__(self):\n OWSReport.__init__(self)\n self.stats['type'] = 'OGC:WMS'\n self.stats['operations']['GetMap'] = {}\n self.stats['operations']['GetMap']['hits'] = 0\n self.stats['operations']['GetMap']['resource'] = {}\n self.stats['operations']['GetMap']['resource']['param'] = 'layers'\n self.stats['operations']['GetMap']['resource']['list'] = {}\n self.stats['operations']['GetFeatureInfo'] = {}\n self.stats['operations']['GetFeatureInfo']['hits'] = 0\n self.stats['operations']['GetLegendGraphic'] = {}\n self.stats['operations']['GetLegendGraphic']['hits'] = 0\n self.stats['operations']['GetStyles'] = {}\n self.stats['operations']['GetStyles']['hits'] = 0\n self.stats['operations']['DescribeLayer'] = {}\n self.stats['operations']['DescribeLayer']['hits'] = 0", "def build(self):\n self.kwargs.pop('clobber', None)\n\n # Read in mock catalog with assigned photometric redshifts\n # and calculate the line-of-sight displacement between the \n # upweighted galaxy and the photometric redshift of the \n # collided galaxy \n photoz_cat_corr = {\n 'catalog': self.cat_corr['catalog'].copy(), \n 'correction': {'name': 'photoz'}\n }\n dataclass = Data('data', photoz_cat_corr) \n dataclass.read() \n\n cosmo = dataclass.cosmo()\n\n coll = np.where(dataclass.wfc == 0) \n \n dlos_actual = (cosmos.distance.comoving_distance(dataclass.z[coll], **cosmo) - \\\n cosmos.distance.comoving_distance(dataclass.zupw[coll], **cosmo)) * cosmo['h']\n dlos_photoz = (cosmos.distance.comoving_distance(dataclass.photoz[coll], **cosmo) - \\\n cosmos.distance.comoving_distance(dataclass.zupw[coll], **cosmo)) * cosmo['h']\n\n # each value of d_NN corresponds to a dLOS value \n # in dLOS file \n print self.file_name\n np.savetxt(self.file_name, \n np.c_[dlos_actual, dlos_photoz], \n fmt=['%10.5f', '%10.5f'],\n header='Columns : dLOS, dLOS_photoz'\n ) \n\n return None", "def _build_sparse(self, name, wrt, consize, param_vals, sub_param_conns,\n full_param_conns, rels):\n\n jac = None\n\n # Additional sparsity for index connections\n for param in wrt:\n\n sub_conns = sub_param_conns.get(param)\n if not sub_conns:\n continue\n\n # If we have a simultaneous full connection, then we move on\n full_conns = full_param_conns.get(param)\n if full_conns.intersection(rels):\n continue\n\n rel_idx = set()\n for target, idx in iteritems(sub_conns):\n\n # If a target of the indexed desvar connection is\n # in the relevant path for this constraint, then\n # those indices are relevant.\n if target in rels:\n rel_idx.update(idx)\n\n nrel = len(rel_idx)\n if nrel > 0:\n\n if jac is None:\n jac = {}\n\n if param not in jac:\n # A coo matrix for the Jacobian\n # mat = {'coo':[row, col, data],\n # 'shape':[nrow, ncols]}\n coo = {}\n coo['shape'] = [consize, len(param_vals[param])]\n jac[param] = coo\n\n row = []\n col = []\n for i in range(consize):\n row.extend([i]*nrel)\n col.extend(rel_idx)\n data = np.ones((len(row), ))\n\n jac[param]['coo'] = [np.array(row), np.array(col), data]\n\n if name not in self.sub_sparsity:\n self.sub_sparsity[name] = {}\n self.sub_sparsity[name][param] = np.array(list(rel_idx))\n\n return jac", "def create(self):\n # TODO: Properly validate data\n self._proj()\n if self.cfg.align_heading:\n self._align()\n self._griddata()\n if self.cfg.gap_filter[\"algorithm\"] != \"none\":\n self._gap_filter()", "def load_sps(zcontinuous=1, **extras):\n from prospect.sources import CSPSpecBasis\n sps = CSPSpecBasis(zcontinuous=zcontinuous)\n return sps", "def getDictCWells(self,itype):\n #Method begins here\n #nx=self.__grid['nx'] #From the geometry in grid\n ny=self.__grid['ny']\n nz=self.__grid['nz']\n minx=self.__grid['ox']\n miny=self.__grid['oy']\n minz=self.__grid['oz']\n rx=self.__grid['dx']\n ry=self.__grid['dy']\n rz=self.__grid['dz']\n \n # well package\n # Remember to use zero-based layer, row, column indices!\n lcoordw=np.zeros((self.__ncwells,3),dtype=np.int32)\n for i in range (self.__ncwells):\n lcoordw[i,0]=floor((self.__dfclst.iloc[i,3]-minx)/rx)\n #In MODFLOW y ans z coordinates are inverted\n lcoordw[i,1]=floor((miny+ry*ny-self.__dfclst.iloc[i,4])/ry)\n lcoordw[i,2]=floor((minz+rz*nz-self.__dfclst.iloc[i,5])/rz)\n \n nper=self.__df.getForcPer()\n ssm_data = {}\n print('Number of conc periods='+str(nper)) \n for i in range(nper):\n lst=[]\n for j in range(self.__ncwells):\n conc_rate=self.__dfcwells.iloc[i+1,j+1]\n lst.append( [ lcoordw[j,2], lcoordw[j,1], lcoordw[j,0], conc_rate, itype['WEL'] ] )\n ssm_data[i]=lst\n print(ssm_data)\n \n print('*--- Succesfull reading of concentration wells ---*')\n \n return ssm_data" ]
[ "0.76218075", "0.6315264", "0.62880224", "0.61376697", "0.6050542", "0.59938204", "0.599007", "0.59843117", "0.5939777", "0.59356636", "0.5899153", "0.5899032", "0.58228743", "0.58159834", "0.5745454", "0.5663349", "0.565065", "0.5642652", "0.55831504", "0.55391955", "0.55139893", "0.5461432", "0.54195976", "0.53471434", "0.5313975", "0.53135854", "0.53046393", "0.52834886", "0.5222583", "0.5222049", "0.51564956", "0.51447904", "0.5114657", "0.5096941", "0.5089444", "0.50868773", "0.5072009", "0.50401944", "0.5008303", "0.4985135", "0.49402925", "0.4937887", "0.49230358", "0.49184886", "0.49169883", "0.49027207", "0.4874015", "0.48679906", "0.48558873", "0.4854182", "0.48383036", "0.4836527", "0.48334387", "0.48211396", "0.48150876", "0.4803064", "0.4788963", "0.47889003", "0.47728437", "0.47618243", "0.4753982", "0.4747516", "0.4746445", "0.47346714", "0.4730279", "0.47275695", "0.47179142", "0.4702931", "0.47020584", "0.4700285", "0.46962342", "0.46950513", "0.46948823", "0.46915424", "0.46890903", "0.46850455", "0.46841398", "0.46700364", "0.46626797", "0.46607718", "0.46451032", "0.46247083", "0.46237752", "0.46214336", "0.46091124", "0.46074313", "0.4603648", "0.45911142", "0.45851362", "0.45773676", "0.45692766", "0.4559678", "0.4557473", "0.4548023", "0.4546263", "0.45454183", "0.45397487", "0.45369476", "0.45364177", "0.45351237" ]
0.60295844
5
Plots the extraction region.
def show_rgn(ax, rgn, **kwargs): alpha = 0.1 #lw = 0.1 if rgn['shape'] == 'box': ax.plot([rgn['params']['blcx']]*2, [rgn['params']['blcy'],rgn['params']['trcy']], 'r-', **kwargs) ax.plot([rgn['params']['blcx'],rgn['params']['trcx']], [rgn['params']['blcy']]*2, 'r-', **kwargs) ax.plot([rgn['params']['blcx'],rgn['params']['trcx']], [rgn['params']['trcy']]*2, 'r-', **kwargs) ax.plot([rgn['params']['trcx']]*2, [rgn['params']['blcy'],rgn['params']['trcy']], 'r-', **kwargs) elif rgn['shape'] == 'circle': patch = mpatches.Circle((rgn['params']['cx'], rgn['params']['cy']), rgn['params']['r'], alpha=alpha, transform=ax.transData) #plt.figure().artists.append(patch) ax.add_patch(patch) elif rgn['shape'] == 'polygon': for poly in rgn['params']['Polygons']: patch = mpatches.Polygon(poly.get_vertices(), closed=True, alpha=alpha, transform=ax.transData) ax.add_patch(patch) elif rgn['shape'] == 'pixel': ax.plot(region['params']['cy'], region['params']['cx'], 'rs', ms=5)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot(self):\n pass", "def plot(self):\n\t\tself.plotOfXray().plot()", "def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n temp = self.x[counter,:]\n counter += 1\n img = self.x[counter,:]\n axarr[i][j].imshow(img)\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def plot(self):\n\t\tself.plotOfIP().plot()", "def show(self):\n plt.show()", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def plot(self):\n\t\tself.plotOfSpect()", "def visualize(self):\r\n self.aggregator.plot_loss()\r\n self.save_figure()", "def plot(self):\n self.fig = plt.figure('black hole')\n self.fig.clf() #clear the graph to avoir superposing data from the same set (can be deactivated if need to superpose)\n self.ax = plt.subplot()\n\n if self.img2 is not None:\n self.ax.imshow(self.img2)\n else:\n print(\"No black hole deformation in the memory, displayed the original image instead.\")\n self.ax.imshow(self.img_debut)\n\n self.fig.canvas.set_window_title('Black hole')\n self.ax.set_title(\"scrool to zoom in or out \\nright click to add an offset in the background \\nleft click to refresh image \\n close the option windows to stop the program\")\n self.fig.canvas.mpl_connect('scroll_event', self.onscroll)\n self.fig.canvas.mpl_connect('button_press_event', self.onclick)\n self.fig.canvas.mpl_connect('axes_leave_event', self.disconnect)\n self.fig.canvas.mpl_connect('axes_enter_event', self.connect)\n\n self.draw()", "def visualize_in_scan(self, verbose=True):\n images = self.scan.load_all_dicom_images(verbose)\n \n # Preload contours and sort them by z pos.\n contours = sorted(self.contours, key=lambda c: c.image_z_position)\n fnames = self.scan.sorted_dicom_file_names.split(',')\n index_of_contour = [fnames.index(c.dicom_file_name) for c in contours]\n\n fig = plt.figure(figsize=(16,8))\n\n min_slice = min(index_of_contour)\n max_slice = max(index_of_contour)\n current_slice = min_slice\n\n ax_image = fig.add_axes([0.5,0.0,0.5,1.0])\n img = ax_image.imshow(images[current_slice].pixel_array,\n cmap=plt.cm.gray)\n\n contour_lines = []\n # We draw all the contours initally and set the visibility\n # to False. This works better than trying create and destroy\n # plots every time we update the image.\n for i,c in enumerate(contours):\n arr = c.to_matrix()\n cc, = ax_image.plot(arr[:,0], arr[:,1], '-r')\n cc.set_visible(i==0) # Set the first contour visible.\n contour_lines.append( cc )\n ax_image.set_xlim(-0.5,511.5); ax_image.set_ylim(511.5,-0.5)\n ax_image.axis('off')\n \n # Add the scan info table\n ax_scan_info = fig.add_axes([0.1, 0.8, 0.3, 0.1])\n ax_scan_info.set_axis_bgcolor('w')\n scan_info_table = ax_scan_info.table(\n cellText=[\n ['Patient ID:', self.scan.patient_id],\n ['Slice thickness:', '%.3f mm' % self.scan.slice_thickness],\n ['Pixel spacing:', '%.3f mm'%self.scan.pixel_spacing]\n ],\n loc='center', cellLoc='left'\n )\n # Remove the cell borders.\n # It Seems like there should be an easier way to do this...\n for cell in scan_info_table.properties()['child_artists']:\n cell.set_color('w')\n\n ax_scan_info.set_title('Scan Info')\n ax_scan_info.set_xticks([])\n ax_scan_info.set_yticks([])\n\n # Add annotations / characteristics table.\n ax_annotation_info = fig.add_axes([0.1, 0.45, 0.3, 0.25])\n ax_annotation_info.set_axis_bgcolor('w')\n\n # Create the rows to be displayed in the annotations table.\n cell_text = []\n for c in _all_characteristics_:\n row = []\n cname = c.capitalize()\n if cname.startswith('Int'):\n cname = 'InternalStructure'\n\n row.append(cname)\n row.append(getattr(self,cname)())\n row.append(getattr(self,c))\n\n cell_text.append(row)\n\n annotation_info_table = ax_annotation_info.table(\n cellText=cell_text,\n loc='center', cellLoc='left', colWidths=[0.45,0.45,0.1]\n )\n\n # Again, remove cell borders.\n for cell in annotation_info_table.properties()['child_artists']:\n cell.set_color('w')\n\n ax_annotation_info.set_title('Annotation Info')\n ax_annotation_info.set_xticks([])\n ax_annotation_info.set_yticks([])\n\n # Add the checkbox for turning contours on / off.\n ax_contour_checkbox = fig.add_axes([0.1, 0.25, 0.1, 0.15])\n ax_contour_checkbox.set_axis_bgcolor('w')\n contour_checkbox = CheckButtons(ax_contour_checkbox,\n ('Show Contours',), (True,))\n contour_checkbox.is_checked = True\n\n # Add the widgets.\n ax_slice = fig.add_axes([0.1, 0.1, 0.3, 0.05])\n ax_slice.set_axis_bgcolor('w')\n txt = 'Z: %.3f'%float(images[current_slice].ImagePositionPatient[-1]) \n sslice = Slider(ax_slice,\n txt,\n 0,\n len(images)-1,\n valinit=current_slice,\n valfmt=u'Slice: %d')\n\n def update(_):\n # Update image itself.\n current_slice = int(sslice.val)\n img.set_data(images[current_slice].pixel_array)\n txt='Z: %.3f'%float(images[current_slice].ImagePositionPatient[-1])\n sslice.label.set_text(txt)\n if contour_checkbox.is_checked:\n for i,c in enumerate(contour_lines):\n flag = (index_of_contour[i] == current_slice)\n flag = flag and (current_slice >= min_slice)\n flag = flag and (current_slice <= max_slice)\n # Set contour visible if flag is True.\n c.set_visible(flag)\n else:\n for c in contour_lines: c.set_visible(False)\n fig.canvas.draw_idle()\n\n def update_contours(_):\n contour_checkbox.is_checked = not contour_checkbox.is_checked\n update(None) # update requires an argument.\n\n sslice.on_changed(update)\n contour_checkbox.on_clicked(update_contours)\n\n plt.show()", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def assemblePlot(self):\n self.clearPlot()\n self.axes = self.figure.add_subplot(111)\n\n # Reset handles\n self._fluxOverlayHandles = []\n self._magneticAxisHandle = None\n self._orbitHandles = []\n self._separatrixOverlayHandle = None\n self._wallCrossSectionOverlayHandle = None\n\n # Plot image\n self.plotEq()\n\n # Plot overlays\n self.plotOverlays()\n\n self.adjustAxes()", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def show_plot(self):\r\n\t\tself.generate_plot()\r\n\t\tplt.show()", "def plot_trace(self):\n az.plot_trace(self.ifd_)", "def plot_data_assemble(self,kwargs_seg, add_mask ,img_name='data.pdf',cutout_text='lensed image',font_size=28):\n mask = self.data_mask\n image = self.raw_image\n picked_data = self.data\n selem = np.ones((add_mask, add_mask))\n img_mask = ndimage.binary_dilation(mask.astype(np.bool), selem)\n fig, (ax1, ax2, ax3,ax4) = plt.subplots(1, 4, figsize=(19, 10))\n ax1.imshow(image, origin='lower', cmap=\"gist_heat\")\n ax1.set_title('Cutout Image',fontsize =font_size)\n ax1.text(image.shape[0] * 0.2, image.shape[0] * 0.05, cutout_text,size=20, color='white',weight=\"bold\")\n ax1.axis('off')\n segments_deblend_list, xcenter, ycenter, c_index=kwargs_seg\n ax2.imshow(segments_deblend_list, origin='lower')\n for i in range(len(xcenter)):\n ax2.text(xcenter[i] * 1.1, ycenter[i], 'Seg' + repr(i), size=20,color='w',weight=\"bold\")\n ax2.text(image.shape[0] * 0.2, image.shape[0] * 0.9, 'Seg' + repr(c_index) + ' ' + 'in center',\n size=20, color='white',weight=\"bold\")\n ax2.set_title('Segmentations',fontsize =font_size)\n ax2.axis('off')\n ax3.imshow(img_mask+mask, origin='lower',cmap=\"gist_heat\")\n ax3.set_title('Selected pixels',fontsize =font_size)\n ax3.text(image.shape[0] * 0.1, image.shape[0] * 0.05, 'pixels (S/N >' + repr(self.snr) + ')',size=20, color='white',weight=\"bold\")\n ax3.text(image.shape[0] * 0.1, image.shape[0] * 0.9, 'additional pixels', size=20, color='r',weight=\"bold\")\n ax3.axis('off')\n ax4.imshow(picked_data, origin='lower',cmap=\"gist_heat\")\n ax4.set_title('Processed Image',fontsize =font_size)\n ax4.axis('off')\n plt.show()\n fig.savefig(img_name)\n return 0", "def show():\n\tplt.show()", "def plotmap(self):\n if self.plotfigure is None: return\n\n self.plotfigure.clf()\n collist = [\"#%.2x%.2x%.2x\" % (i, i, i) for i in self.currentshades]\n cmap = colors.ListedColormap(collist)\n if self.gs.isfixed:\n crange = [self.minvalue] + self.currentvalues\n elif self.gs.isperc:\n crange = np.percentile(self.imagearray, [0.0] + self.currentpercents)\n else:\n crange = np.array([self.minstdd] + self.currentnsigs) * self.stdvalue + self.meanvalue\n norm = colors.BoundaryNorm(crange, cmap.N)\n img = plt.imshow(self.imagearray, cmap=cmap, norm=norm, origin='lower')\n plt.colorbar(img, norm=norm, cmap=cmap, boundaries=crange, ticks=crange)\n if self.imagetitle is not None:\n plt.title(self.imagetitle)", "def finalize_plot(self, artifact_name, attacker_x=None, attacker_y=None):\n # Plot the axis ticks.\n plt.ylim((self.min_y - 10.0, self.max_y + 10.0))\n plt.xlim((self.min_x - 10.0, self.max_x + 10.0))\n plt.xticks([self.min_x + 1000, 0.0, self.max_x], size=15)\n plt.yticks([self.min_y + 1000, 0.0, self.max_y], size=15)\n # Add and place the labels.\n ax = plt.gca()\n plt.ylabel(\"Crossrange (ft)\", size=15)\n plt.xlabel(\"Downrange (ft)\", size=15)\n plt.subplots_adjust(bottom=0.25, left=0.25)\n ax.yaxis.set_label_coords(-0.1, 0.5)\n # Place the plane.\n plane = plt.imread(\"plane.png\").transpose((1, 0, 2))\n width = (self.max_x - self.min_x) / 10\n height = (496.0 / 499.0) * width\n x_start = -(width / 2.0)\n y_start = -(height / 2.0)\n plt.imshow(plane, extent=[x_start, x_start + width,\n y_start, y_start + height], zorder=100)\n plane = np.flip(plane, 1)\n if attacker_x is None:\n attacker_x = self.max_x - (2 * width)\n if attacker_y is None:\n attacker_y = self.max_y - (2 * height)\n red_plane = self.color_plane_png(plane, [1.0, 0, 0], True)\n plt.imshow(red_plane, zorder=100,\n extent=[attacker_x, attacker_x + width,\n attacker_y, attacker_y + height])\n self.record_artifact(plt, artifact_name, \"matplotlib\")\n plt.clf()", "def plot(self, show_contours=False):\n plt.imshow(self.img, cmap='gray')\n if show_contours:\n for X in self.contours:\n plt.plot(X[:, 0], X[:, 1])\n plt.gca().invert_yaxis()", "def show():\n plt.show()", "def show():\n plt.show()", "def show():\n plt.show()", "def _plot_robot(self):\n try:\n x = 200\n y = 200\n self.ax1.plot(x, y, marker='o', markersize=10, linestyle='None')\n except Exception as err:\n rospy.loginfo(err)", "def visualize(self):\n self.dataFrame.hist()\n plt.show()", "def plot(arrivals_file, region): # pragma: no cover\n region = [float(s) for s in region.split()]\n reg = Region(*region)\n\n arrivals = pd.read_csv(arrivals_file, header=None, names=column_names,\n sep=' ')\n arr_file_base = os.path.splitext(arrivals_file.name)[0]\n # import IPython; IPython.embed(); import sys; sys.exit()\n source = _source_or_stations_in_region(\n arrivals, reg, SOURCE_LATITUDE, SOURCE_LONGITUDE,\n 'sources_in_region_{}.png'.format(arr_file_base))\n\n station = _source_or_stations_in_region(\n arrivals, reg, STATION_LATITUDE, STATION_LONGITUDE,\n 'stations_in_region_{}.png'.format(arr_file_base))\n\n # sources and stations both in region\n sources_and_stations = arrivals[source & station]\n\n fig = plt.figure()\n\n _plot_on_map(sources_and_stations,\n SOURCE_LONGITUDE, SOURCE_LATITUDE,\n marker='*', color='r')\n _plot_on_map(sources_and_stations,\n STATION_LONGITUDE, STATION_LATITUDE,\n marker='^', color='b')\n\n plt.title('Sources and stations in \\n region {}'.format(region))\n # plt.xlabel('Longitude')\n # plt.ylabel('Latitude')\n fig.savefig('sources_and_stations_in_region_{}.png'.format(arr_file_base))\n\n # rays originating and terminating in region\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n for i, arr in enumerate(sources_and_stations.iterrows()):\n dat = arr[1]\n ax.add_line(Line2D([dat[SOURCE_LONGITUDE], dat[STATION_LONGITUDE]],\n [dat[SOURCE_LATITUDE], dat[STATION_LATITUDE]],\n color='b', zorder=i))\n ANZ.drawcoastlines(linewidth=2.0, color='k',\n zorder=sources_and_stations.shape[0]+1)\n\n # ax.set_xlim(reg.leftlon - 5, reg.rightlon + 5)\n # ax.set_ylim(reg.bottomlat - 5, reg.upperlat + 5)\n _draw_paras_merids(ANZ)\n plt.title('Ray paths in \\n region {}'.format(region))\n # plt.xlabel('Longitude')\n # plt.ylabel('Latitude')\n fig.savefig('rays_in_region_{}.png'.format(arr_file_base))", "def _plot_images(self):\n # Plot sagittal (0), coronal (1) or axial (2) view\n self._images = dict(base=list(), cursor_v=list(), cursor_h=list(),\n bounds=list())\n img_min = np.nanmin(self._base_data)\n img_max = np.nanmax(self._base_data)\n text_kwargs = dict(fontsize='medium', weight='bold', color='#66CCEE',\n family='monospace', ha='center', va='center',\n path_effects=[patheffects.withStroke(\n linewidth=4, foreground=\"k\", alpha=0.75)])\n xyz = apply_trans(self._ras_vox_t, self._ras)\n for axis in range(3):\n plot_x_idx, plot_y_idx = self._xy_idx[axis]\n fig = self._figs[axis]\n ax = fig.axes[0]\n img_data = np.take(self._base_data, self._current_slice[axis],\n axis=axis).T\n self._images['base'].append(ax.imshow(\n img_data, cmap='gray', aspect='auto', zorder=1,\n vmin=img_min, vmax=img_max))\n img_extent = self._img_extents[axis] # x0, x1, y0, y1\n w, h = np.diff(np.array(img_extent).reshape(2, 2), axis=1)[:, 0]\n self._images['bounds'].append(Rectangle(\n img_extent[::2], w, h, edgecolor='w', facecolor='none',\n alpha=0.25, lw=0.5, zorder=1.5))\n ax.add_patch(self._images['bounds'][-1])\n v_x = (xyz[plot_x_idx],) * 2\n v_y = img_extent[2:4]\n self._images['cursor_v'].append(ax.plot(\n v_x, v_y, color='lime', linewidth=0.5, alpha=0.5, zorder=8)[0])\n h_y = (xyz[plot_y_idx],) * 2\n h_x = img_extent[0:2]\n self._images['cursor_h'].append(ax.plot(\n h_x, h_y, color='lime', linewidth=0.5, alpha=0.5, zorder=8)[0])\n # label axes\n self._figs[axis].text(0.5, 0.05, _IMG_LABELS[axis][0],\n **text_kwargs)\n self._figs[axis].text(0.05, 0.5, _IMG_LABELS[axis][1],\n **text_kwargs)\n self._figs[axis].axes[0].axis(img_extent)\n self._figs[axis].canvas.mpl_connect(\n 'scroll_event', self._on_scroll)\n self._figs[axis].canvas.mpl_connect(\n 'button_release_event', partial(self._on_click, axis=axis))\n # add head and brain in mm (convert from m)\n if self._head is None:\n logger.info('Using marching cubes on CT for the '\n '3D visualization panel')\n rr, tris = _marching_cubes(np.where(\n self._base_data < np.quantile(self._base_data, 0.95), 0, 1),\n [1])[0]\n rr = apply_trans(self._vox_ras_t, rr)\n self._renderer.mesh(\n *rr.T, triangles=tris, color='gray', opacity=0.2,\n reset_camera=False, render=False)\n else:\n self._renderer.mesh(\n *self._head['rr'].T * 1000, triangles=self._head['tris'],\n color='gray', opacity=0.2, reset_camera=False, render=False)\n if self._lh is not None and self._rh is not None:\n self._renderer.mesh(\n *self._lh['rr'].T * 1000, triangles=self._lh['tris'],\n color='white', opacity=0.2, reset_camera=False, render=False)\n self._renderer.mesh(\n *self._rh['rr'].T * 1000, triangles=self._rh['tris'],\n color='white', opacity=0.2, reset_camera=False, render=False)\n self._renderer.set_camera(azimuth=90, elevation=90, distance=300,\n focalpoint=tuple(self._ras))\n # update plots\n self._draw()\n self._renderer._update()", "def plot_reconstruction(self, figsize=(20, 10)):\n\n extent_wf = tools.get_extent(self.fy, self.fx)\n\n # for reconstructed image, must check if has been resampled\n fx = tools.get_fft_frqs(self.img_sr.shape[1], 0.5 * self.dx)\n fy = tools.get_fft_frqs(self.img_sr.shape[0], 0.5 * self.dx)\n\n extent_rec = tools.get_extent(fy, fx)\n\n gamma = 0.1\n min_percentile = 0.1\n max_percentile = 99.9\n\n # create plot\n figh = plt.figure(figsize=figsize)\n grid = plt.GridSpec(2, 3)\n # todo: print more reconstruction information here\n plt.suptitle(\"SIM reconstruction, w=%0.2f, wicker=%d\" %\n (self.wiener_parameter, self.use_wicker))\n\n # widefield, real space\n ax = plt.subplot(grid[0, 0])\n\n vmin = np.percentile(self.widefield.ravel(), min_percentile)\n vmax = np.percentile(self.widefield.ravel(), max_percentile)\n plt.imshow(self.widefield, vmin=vmin, vmax=vmax)\n plt.title('widefield')\n\n # deconvolved, real space\n ax = plt.subplot(grid[0, 1])\n\n vmin = np.percentile(self.widefield_deconvolution.ravel(), min_percentile)\n vmax = np.percentile(self.widefield_deconvolution.ravel(), max_percentile)\n plt.imshow(self.widefield_deconvolution, vmin=vmin, vmax=vmax)\n plt.title('widefield deconvolved')\n\n # SIM, realspace\n ax = plt.subplot(grid[0, 2])\n\n vmin = np.percentile(self.img_sr.ravel(), min_percentile)\n vmax = np.percentile(self.img_sr.ravel(), max_percentile)\n plt.imshow(self.img_sr, vmin=vmin, vmax=vmax)\n plt.title('SIM reconstruction')\n\n # widefield Fourier space\n ax = plt.subplot(grid[1, 0])\n plt.imshow(np.abs(self.widefield_ft) ** 2, norm=PowerNorm(gamma=gamma), extent=extent_wf)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2 * self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # deconvolution Fourier space\n ax = plt.subplot(grid[1, 1])\n plt.imshow(np.abs(self.widefield_deconvolution_ft) ** 2, norm=PowerNorm(gamma=gamma), extent=extent_rec)\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n circ2 = matplotlib.patches.Circle((0, 0), radius=2 * self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # SIM fourier space\n ax = plt.subplot(grid[1 ,2])\n plt.imshow(np.abs(self.img_sr_ft) ** 2, norm=PowerNorm(gamma=gamma), extent=extent_rec)\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2 * self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n return figh", "def _plot(self):\r\n fig = plt.figure()\r\n\r\n # Take out second component of intensity if needed\r\n # if self._vna.isTwoComponents():\r\n # intensitySimplified = []\r\n # for i in range(len(self._intensity)):\r\n # tempSet = []\r\n # for j in range(len(self._intensity[i])):\r\n # if (j%2) == 0:\r\n # tempSet.append(self._intensity[i][j])\r\n # intensitySimplified.append(tempSet)\r\n # for i in range(len(self._frequency)):\r\n # plt.plot(self._frequency[i],intensitySimplified[i],label=('%sv' % self._voltages[i][0]))\r\n # else:\r\n for i in range(len(self._frequency)):\r\n plt.plot(self._frequency[i],self._intensity[i],label=('%sv' % self._voltages[i][0]))\r\n plt.legend(loc='upper left')\r\n fig.suptitle('Intensity-Frequency with non-Constant Voltage', fontsize=18)\r\n plt.xlabel('Frequency (Hz)', fontsize=18)\r\n plt.ylabel('Intensity (dBm)', fontsize=16)\r\n\r\n # Save plot\r\n self._saveFig()", "def plot_extracted(self, ax):\n if not self.extracted:\n pass\n else:\n for arrow in self.extracted:\n panel = arrow.panel\n rect_bbox = Rectangle((panel.left, panel.top), panel.right - panel.left, panel.bottom - panel.top,\n facecolor='y', edgecolor=None, alpha=0.4)\n ax.add_patch(rect_bbox)", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='blue')", "def plot_array(self):\n locations = np.array([getattr(pulsar, 'location') for pulsar in self.pulsars])\n fig = plt.figure()\n ax = plt.subplot(111, projection=\"hammer\")\n for location in locations:\n plt.plot(location.ra, location.dec, '.', color='b')\n return fig", "def show():\n setup()\n plt.show()", "def show(self):\n plt.close() # Remove any existing plot\n plt.imshow(\n self.data,\n extent=[\n self.c - self.radius,\n self.c + self.radius,\n self.r + self.radius,\n self.r - self.radius,\n ],\n )\n plt.colorbar()\n plt.title(self.time.strftime(\"%Y%m%d %H:%M:%S.%f %Z\"))\n plt.show()", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='red')", "def __init__(self):\n super(vanderpol_output,self).__init__()\n\n # add figure object for further use\n fig = plt.figure()\n self.ax = fig.add_subplot(111)\n self.ax.set_xlim([-2.5,2.5])\n self.ax.set_ylim([-10.5,10.5])\n plt.ion()\n self.sframe = None", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='green')", "def _show(self, a):\n fig = plt.figure()\n fig.set_size_inches((2, 2))\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n plt.set_cmap('hot')\n ax.imshow(a, aspect='equal')\n plt.show()", "def plotFilled(self):\n minc = 70\n maxc = 120\n num = 25\n levels = np.linspace(minc,maxc,num+1)\n title = \"Orography difference between LGM and Modern ICE-5G data\"\n plt.figure()\n plt.contourf(self.difference_in_ice_5g_orography,levels=levels)\n plt.title(title)\n pts.set_ticks_to_zero()\n cbar = plt.colorbar()\n cbar.ax.set_ylabel('Orography difference in meters')\n #if self.save:\n #plt.savefig('something')\n print(\"Filled contour plot created\")", "def plot(self):\n\t\tself.plotOfTF().plot()", "def plotCombinedIncludingOceanFloors(self):\n self.CombinedPlotHelper(minc=70,maxc=170,num=50)", "def plot(self):\n\n self.map.plot(ax=self.ax,\n color='white',\n edgecolor='gray')\n\n plt.axis('off')", "def plot_IE(self, cmap='Greys', size=(10,10)):\n fig, (ax0, ax1) = plt.subplots(nrows=1,\n ncols=2,\n sharex=True,\n sharey=True)\n\n ax0.imshow(self.I, cmap=cmap)\n ax0.set_title(f'Original {self.I.shape}',\n fontsize=15)\n ax1.imshow(self.E, cmap=cmap)\n ax1.set_title(f'W * H with n={self._n_components} {self.E.shape}',\n fontsize=15)\n\n fig.set_figheight(size[0])\n fig.set_figwidth(size[1])\n fig.tight_layout()\n plt.show()", "def show_figure(self):\n pylab.show()", "def vis_segmentation(image, seg_map):\n plt.figure(figsize=(20, 20))\n \n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n plt.imshow(seg_image)\n plt.axis('off')\n plt.savefig(str(image_id)+'_seg.jpg',bbox_inches='tight')\n plt.close()", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])", "def show_plot() :\n logger.info(\"Show plot\")\n pylab.axis('equal')\n pylab.xlabel(\"Longitud\")\n pylab.ylabel(\"Latitud\")\n pylab.grid(True)\n pylab.title(\"Product tiles and product source\")\n pylab.show()", "def plot(self):\n cs = plt.contour(self.X, self.Y, self.fitness_function)\n plt.clabel(cs, inline=1, fontsize=6)\n plt.imshow(self.fitness_function, extent=self.limits, origin=\"lower\", alpha=0.3)", "def ion():\n plt.ion()", "def _update_plot(self) -> None:\n\n # Check if plotting is active\n if self._fig is None:\n return None\n LOG.debug(\"Updating plot.\")\n\n # Extract glaciated area\n hs_back = np.ma.masked_where(\n self.h <= 1,\n hillshade(\n self.ele, self.PLOT_HILLSHADE_AZIMUTH, self.PLOT_HILLSHADE_ALTITUDE\n ),\n )\n\n # Clear plot and draw axes\n self._fig.clear()\n ax = plt.subplot(121, facecolor=\"black\")\n ax.tick_params(axis=\"x\", colors=\"w\")\n ax.tick_params(axis=\"y\", colors=\"w\")\n ax.set(xlabel=\"X-coordinate [m]\", ylabel=\"Y-coordinate [m]\")\n ax.xaxis.label.set_color(\"w\")\n ax.yaxis.label.set_color(\"w\")\n title_text = f\"Year: {str(self.i)} ELA: {str(int(self.ela))} m.a.s.l.\"\n ax.set_title(title_text, color=\"white\", size=18)\n\n # Draw new image layers\n plt.imshow(self.hs, vmin=90, vmax=345, cmap=\"copper\", extent=self.extent)\n plt.imshow(255 - hs_back, vmin=1, vmax=150, cmap=\"Greys\", extent=self.extent)\n\n # Mass balance\n ax1 = plt.subplot(222, facecolor=\"black\")\n ax1.plot(self.mass_balance, color=\"w\")\n ax1.plot(self.mass_balance_trend, color=\"r\")\n ax1.set(ylabel=\"Mass balance [m]\")\n ax1.yaxis.label.set_color(\"w\")\n plt.setp(ax1.get_xticklabels(), visible=False)\n ax1.tick_params(axis=\"y\", colors=\"w\")\n ax1.set_title(f\"Gradient: {str(self.m)} m/m\", color=\"white\", size=18)\n\n # Plot mean thickness\n ax2 = plt.subplot(224, sharex=ax1, facecolor=\"black\")\n ax2.plot(self.mass, color=\"w\")\n ax2.set(xlabel=\"Year [a]\", ylabel=\"Mean thickness [m]\")\n ax2.xaxis.label.set_color(\"w\")\n ax2.yaxis.label.set_color(\"w\")\n ax2.tick_params(axis=\"x\", colors=\"w\")\n ax2.tick_params(axis=\"y\", colors=\"w\")\n\n # Draw new plot\n self._fig.canvas.draw()\n plt.pause(0.05)", "def show(image):\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def show_tileselection(image, tile_selection, tile_dim=[200, 200]):\n fig, ax = plt.subplots()\n ax.imshow(image, cmap='gray')\n for r in np.arange(image.shape[0]+1, step=200):\n ax.plot([0, image.shape[1]], [r, r], 'r')\n for c in np.arange(image.shape[1]+1, step=200):\n ax.plot([c, c], [0, image.shape[0]], 'r') \n for tiler, tilec in zip(tile_selection[0], tile_selection[1]):\n ax.plot([tilec*tile_dim[0], tilec*tile_dim[0]], [tiler*tile_dim[0], (tiler+1)*tile_dim[0]], color=[0, 1, 0])\n ax.plot([(tilec+1)*tile_dim[0], (tilec+1)*tile_dim[0]], [tiler*tile_dim[0], (tiler+1)*tile_dim[0]], color=[0, 1, 0])\n ax.plot([tilec*tile_dim[0], (tilec+1)*tile_dim[0]], [tiler*tile_dim[0], tiler*tile_dim[0]], color=[0, 1, 0])\n ax.plot([tilec*tile_dim[0], (tilec+1)*tile_dim[0]], [(tiler+1)*tile_dim[0], (tiler+1)*tile_dim[0]], color=[0, 1, 0])\n ax.set_xlim(-5, image.shape[1]+5)\n ax.set_ylim(image.shape[0]+5, -5)\n ax.axis('off')\n return fig, ax", "def plot_obs(self):\n if self.obs_im is None and self.obs_ax is None:\n fig, self.obs_ax = plt.subplots()\n self.obs_ax.set_title('Observation')\n self.obs_ax.set_xticks(())\n self.obs_ax.set_yticks(())\n self.obs_im = self.obs_ax.imshow(self.obs, cmap='gray')\n else:\n self.obs_im.set_data(self.obs)", "def _plot_rawdata(self):\n fig, ax = plt.subplots(1, 1)\n ax.imshow(self.data, origin='top', extent=(0., 360., -90., 90.))\n ax.set_title('Driscoll Healy Grid')\n ax.set_xlabel('longitude')\n ax.set_ylabel('latitude')\n fig.tight_layout(pad=0.5)\n return fig,ax", "def plot():\n pass", "def imshow(img):\n imadd(img)\n plt.ion()\n plt.show()", "def plotLine(self):\n minc = 0\n maxc = 500\n num = 500\n levels = np.linspace(minc,maxc,num+1)\n title = textwrap.dedent(\"\"\"\\\n Orography difference between LGM and Modern ICE-5G data\n using {0} meter contour interval\"\"\").format((maxc-minc)/num)\n plt.figure()\n plt.contour(self.difference_in_ice_5g_orography,levels=levels)\n plt.title(title)\n pts.set_ticks_to_zero()\n #if self.save:\n #plt.savefig('something')\n print(\"Line contour plot created\")", "def visualisation(self):\n plt.plot(self.x, self.y, 'o', label = 'Example data')\n plt.plot(self.x, np.dot(self.w, self.X), label = 'Model')\n plt.xlim([-1,1])\n plt.ylim([-1,1])", "def plot_extraction_image(results, remove_frame=False, title=None,\n framesize=None, path_output=None, ext=\"png\",\n show=True):\n # check parameters\n stack.check_parameter(results=list,\n remove_frame=bool,\n title=(str, type(None)),\n framesize=(tuple, type(None)),\n path_output=(str, type(None)),\n ext=(str, list))\n\n # we plot 3 images by row maximum\n nrow = int(np.ceil(len(results)/3))\n ncol = min(len(results), 3)\n if framesize is None:\n framesize = (5 * ncol, 5 * nrow)\n\n # plot one image\n marge = stack.get_offset_value()\n if len(results) == 1:\n cyt, nuc, rna, foci, _ = results[0]\n if remove_frame:\n fig = plt.figure(figsize=(8, 8), frameon=False)\n ax = fig.add_axes([0, 0, 1, 1])\n ax.axis('off')\n else:\n plt.figure(figsize=(8, 8))\n plt.xlim(-marge, max(cyt[:, 1]) + marge)\n plt.ylim(max(cyt[:, 0]) + marge, -marge)\n plt.scatter(cyt[:, 1], cyt[:, 0], c=\"black\", s=5, marker=\".\")\n plt.scatter(nuc[:, 1], nuc[:, 0], c=\"steelblue\", s=5, marker=\".\")\n plt.scatter(rna[:, 1], rna[:, 0], c=\"firebrick\", s=50, marker=\"x\")\n if len(foci) > 0:\n plt.scatter(foci[:, 2], foci[:, 1], c=\"chartreuse\", s=60,\n marker=\"D\")\n if title is not None and not remove_frame:\n title_plot = title + \"_cell_0\"\n plt.title(title_plot, fontweight=\"bold\", fontsize=25)\n if not remove_frame:\n plt.tight_layout()\n if path_output is not None:\n save_plot(path_output, ext)\n if show:\n plt.show()\n else:\n plt.close()\n\n return\n\n # plot multiple images\n fig, ax = plt.subplots(nrow, ncol, figsize=framesize)\n\n # one row\n if len(results) in [2, 3]:\n for i, (cyt, nuc, rna, foci, _) in enumerate(results):\n if remove_frame:\n ax[i].axis(\"off\")\n ax[i].set_xlim(-marge, max(cyt[:, 1]) + marge)\n ax[i].set_ylim(max(cyt[:, 0]) + marge, -marge)\n ax[i].scatter(cyt[:, 1], cyt[:, 0], c=\"black\", s=5, marker=\".\")\n ax[i].scatter(nuc[:, 1], nuc[:, 0], c=\"steelblue\", s=5, marker=\".\")\n ax[i].scatter(rna[:, 1], rna[:, 0], c=\"firebrick\", s=50,\n marker=\"x\")\n if len(foci) > 0:\n ax[i].scatter(foci[:, 2], foci[:, 1], c=\"chartreuse\", s=60,\n marker=\"D\")\n if title is not None:\n title_plot = title + \"_cell_{0}\".format(i)\n ax[i].set_title(title_plot, fontweight=\"bold\", fontsize=10)\n\n # several rows\n else:\n # we complete the row with empty frames\n r = nrow * 3 - len(results)\n results_completed = [(cyt, nuc, rna, foci, _)\n for (cyt, nuc, rna, foci, _) in results]\n results_completed += [None] * r\n for i, result in enumerate(results_completed):\n row = i // 3\n col = i % 3\n if result is None:\n ax[row, col].set_visible(False)\n continue\n else:\n cyt, nuc, rna, foci, cell = result\n if remove_frame:\n ax[row, col].axis(\"off\")\n ax[row, col].set_xlim(-marge, max(cyt[:, 1]) + marge)\n ax[row, col].set_ylim(max(cyt[:, 0]) + marge, -marge)\n ax[row, col].scatter(cyt[:, 1], cyt[:, 0], c=\"black\", s=5,\n marker=\".\")\n ax[row, col].scatter(nuc[:, 1], nuc[:, 0], c=\"steelblue\", s=5,\n marker=\".\")\n ax[row, col].scatter(rna[:, 1], rna[:, 0], c=\"firebrick\", s=50,\n marker=\"x\")\n if len(foci) > 0:\n ax[row, col].scatter(foci[:, 2], foci[:, 1], c=\"chartreuse\",\n s=60, marker=\"D\")\n if title is not None:\n title_plot = title + \"_cell_{0}\".format(i)\n ax[row, col].set_title(title_plot,\n fontweight=\"bold\", fontsize=10)\n\n plt.tight_layout()\n if path_output is not None:\n save_plot(path_output, ext)\n if show:\n plt.show()\n else:\n plt.close()\n\n return", "def inner_PlotDistrifun():\r\n \r\n font = {'family': 'serif',\r\n 'color': 'darkred',\r\n 'weight': 'normal',\r\n 'size': 16}\r\n\r\n Nmax = 100\r\n bins = np.linspace(0, Nmax, Nmax+1)\r\n nList = np.linspace(0, Nmax, Nmax+1, dtype = int)\r\n\r\n y_location = self.spinBox_PixelY.value()\r\n x_location = self.spinBox_PixelX.value()\r\n\r\n # get pixel intensity data\r\n Array1 = self.APP_dataprocess.PixelData(y_location, x_location)\r\n Array2 = Array1\r\n g2 = G2(Array1, Array2)\r\n print(\"g2 is:\", g2)\r\n\r\n arr = []\r\n rv = poisson(self.firstOrdImaging[y_location, x_location])\r\n for num in range(0,40):\r\n arr.append(rv.pmf(num))\r\n\r\n ax = fig.add_subplot(111)\r\n\r\n try:\r\n ax.cla()\r\n #print(\"clear self.cbar !\")\r\n except:\r\n pass\r\n #print(\"fail to clear self.cbar !\")\r\n \r\n ax.hist(Array1 , bins, normed=True, label = \"Data distribution\") \r\n ax.plot(nList, BoseEinstein(self.firstOrdImaging[y_location, x_location], Nmax), label =\"BoseEinstein distribution\")\r\n ax.plot(arr, linewidth=2.0, label =\"Possion distribution\")\r\n ax.set_title(\"Pixel Position({},{}); <$I$>:{}\".format(x_location , y_location, self.firstOrdImaging[y_location, x_location]), fontdict=font)\r\n \r\n ax.text(22, .08, r\"g2:{}\".format(g2), fontdict=font)\r\n ax.legend() \r\n \r\n fig.savefig('PixelPosition({},{})PhotDist.eps'.format(x_location , y_location), format='eps', dpi=300)\r\n plt.close()", "def plot(self):\n\n import matplotlib.pyplot as plt\n plt.matshow(self.event_roll.T, cmap=plt.cm.gray, interpolation='nearest', aspect='auto')\n plt.show()", "def plot(self, num_levels=10):\n if num_levels == -1:\n num_levels = len(self.energies())\n print(self.energies(num_levels))\n figure(figsize=(20, 5))\n subplot(1, num_levels + 1, 1)\n self.plot_potential()\n #xlabel('$\\phi$')\n for ii, psi2D in enumerate(self.get_2Dpsis(num_levels)):\n subplot(1, num_levels + 1, ii + 2)\n #imshow(psi2D.real,extent=(self.x[0],self.x[-1],self.y[0],self.y[-1]),interpolation=\"None\",aspect='auto')\n imshow(psi2D.real, interpolation=\"None\", aspect='auto')\n xlabel(ii)", "def _init_plot(self) -> None:\n\n # create a grayscale plot\n out = sys.stdout\n sys.stdout = open(\"/dev/null\", \"w\")\n hdu = self.image_generator.image(self.ra, self.dec)\n self.plot = aplpy.FITSFigure(hdu)\n self.plot.show_grayscale()\n self.plot.set_theme(\"publication\")\n sys.stdout = out\n\n # label for the position angle\n pa_string = \"PA = %.1f\" % self.mode_details.position_angle().to_value(u.deg)\n if self.mode_details.automated_position_angle():\n pa_string += \" (auto)\"\n self.draw_label(0.95, -0.05, pa_string, style=\"italic\", weight=\"bold\")\n\n # label for the title\n if self.title:\n self.draw_label(\n 0.5, 1.03, self.title, style=\"italic\", weight=\"bold\", size=\"large\"\n )\n\n # label for the image source\n self.draw_label(\n -0.05,\n -0.05,\n \"%s\" % self.image_generator.source(),\n style=\"italic\",\n weight=\"bold\",\n )\n\n # grid overlay\n self.plot.add_grid()\n self.plot.grid.set_alpha(0.2)\n self.plot.grid.set_color(\"b\")\n\n # indicate the RSS field of view\n self.draw_circle(self.ra, self.dec, 4.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.79,\n 0.79,\n \"RSS\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # indicate the Salticam field of view\n self.draw_circle(self.ra, self.dec, 5.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.86,\n 0.86,\n \"SCAM\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # labels for north and east direction\n self.draw_label(\n self.ra,\n self.dec + 4.8 * u.arcmin,\n \"N\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n color=(0, 0.5, 1),\n )\n self.draw_label(\n self.ra + 4.8 * u.arcmin / np.abs(np.cos(self.dec)),\n self.dec,\n \"E\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"right\",\n color=(0, 0.5, 1),\n )\n\n # add cross hairs\n self.draw_centered_line(\n 0 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n self.draw_centered_line(\n 90 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n\n # label for the magnitude range and bandpass\n if self.magnitude_range:\n self._show_magnitudes()\n\n # add mode specific content\n if not self.basic_annotations:\n self.mode_details.annotate_finder_chart(self)", "def plot(self, *args, **kwargs):\n pass", "def show_plots():\n plt.show()", "def show_geometry(self):\n import matplotlib.pyplot as plt\n fig_geom = plt.figure()\n ax_geom = fig_geom.add_subplot(111)\n rectangle = []\n for i in range(len(self.coordinates)):\n rectangle.append(plt.Rectangle((self.coordinates[i][0],\n self.coordinates[i][1]),\n self.size[i][0], self.size[i][1], alpha=0.5))\n ax_geom.add_patch(rectangle[i])\n plt.axis('auto')\n plt.show()", "def vis_segmentation(image, seg_map):\n plt.figure(figsize=(15, 5))\n grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])\n\n plt.subplot(grid_spec[0])\n plt.imshow(image)\n plt.axis('off')\n plt.title('input image')\n\n plt.subplot(grid_spec[1])\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n plt.imshow(seg_image)\n plt.axis('off')\n plt.title('segmentation map')\n\n plt.subplot(grid_spec[2])\n plt.imshow(image)\n plt.imshow(seg_image, alpha=0.7)\n plt.axis('off')\n plt.title('segmentation overlay')\n\n unique_labels = np.unique(seg_map)\n ax = plt.subplot(grid_spec[3])\n plt.imshow(\n FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')\n ax.yaxis.tick_right()\n plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])\n plt.xticks([], [])\n ax.tick_params(width=0.0)\n plt.grid('off')\n plt.show()", "def rho_plot2(self, pred=None):\n axRect = [0.1446, 0.2150, 0.7604, 0.7100]\n # plt.figure(22, figsize = (8.5, 11), dpi=300)\n fig, ax = plt.subplots()\n if pred is not None:\n self.rho_sub_plot(ax, axRect, pred=pred)\n else:\n self.rho_sub_plot(ax, axRect)", "def plot_overscan(overscan, img, TITLE, OUT_DIR):\n fig = plt.figure(figsize=(20, 20))\n gs0 = gridspec.GridSpec(3, 3)\n\n for i, f in enumerate(img):\n x = f.dev_index % 3\n\n gs = gridspec.GridSpecFromSubplotSpec(\n 1, 2, wspace=0, subplot_spec=gs0[f.dev_index])\n ax2 = plt.subplot(gs[0, 0])\n for j in range(9, 17):\n plt.plot(overscan[i, j - 1] + 500 *\n (j - 8), label='seg' + str(j + 1))\n plt.legend(fontsize=6, loc='upper center', ncol=4)\n if x != 0:\n ax2.set_yticklabels([])\n\n plt.grid()\n plt.xlim(0, 2100)\n plt.ylim(0, 4500)\n ax2.set_title(f.dev_name + ' (seg 10-17)')\n\n ax1 = plt.subplot(gs[0, 1])\n for j in range(1, 9):\n plt.plot(overscan[i, j - 1] + 500 * j, label='seg' + str(j - 1))\n plt.legend(fontsize=6, loc='upper center', ncol=4)\n if x != 2:\n ax1.set_yticklabels([])\n if x == 2:\n ax1.yaxis.tick_right()\n plt.grid()\n plt.xlim(0, 2100)\n plt.ylim(0, 4500)\n ax1.set_title(f.dev_name + ' (seg 0-7)')\n\n fig.suptitle('Overscan ' + TITLE, y=0.94, size=20)\n plt.subplots_adjust(wspace=0.05)\n plt.savefig(OUT_DIR + TITLE + '_spatial.png')\n plt.close(fig)", "def relative_src_bg(self):\n fig, ax = plt.subplots()\n \n for oneF in ['extracted_flux','extracted_bg_only']:\n wave, f = self.result['1d'][oneF]\n ax.plot(wave,f,label=oneF)\n ax.set_xlabel('Wavelength ($\\mu$m)')\n ax.set_ylabel('Extracted Flux')\n ax.legend()\n \n fig.show()", "def vis_segmentation(image, seg_map):\n plt.figure(figsize=(15, 5))\n grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])\n\n plt.subplot(grid_spec[0])\n plt.imshow(image)\n plt.axis('off')\n plt.title('input image')\n\n plt.subplot(grid_spec[1])\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n plt.imshow(seg_image)\n plt.axis('off')\n plt.title('segmentation map')\n\n plt.subplot(grid_spec[2])\n plt.imshow(image)\n plt.imshow(seg_image, alpha=0.7)\n plt.axis('off')\n plt.title('segmentation overlay')\n\n unique_labels = np.unique(seg_map)\n ax = plt.subplot(grid_spec[3])\n plt.imshow(\n FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')\n ax.yaxis.tick_right()\n plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])\n plt.xticks([], [])\n ax.tick_params(width=0.0)\n plt.grid('off')\n plt.show()", "def plot(self,id=1,dpi=150):\n fig = plt.figure(id)\n ax1 = fig.add_subplot(111)\n ax1.imshow(self.image,interpolation='nearest',extent=[self.xmin,self.xmax,\n self.ymin,self.ymax], origin='lower')\n #plt.savefig('.png',dpi=dpi)\n plt.draw()", "def reconstruction_plot(yyy, color = 'r'):\n length = len(yyy)\n plt.plot(np.linspace(0, 1, length)[:length // to_show + 1]\n , yyy[:length // to_show + 1], color)\n # plt.plot(np.linspace(0, 1, len(yyy)), yyy, color)", "def _plot(self, step, rewards, losses):\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('Total Episode Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('MSE Loss')\n plt.plot(losses)\n plt.show()", "def show(image):\n fig = pyplot.figure()\n axis = fig.add_subplot(1, 1, 1)\n imgplot = axis.imshow(image)\n imgplot.set_interpolation('nearest')\n axis.xaxis.set_ticks_position('top')\n axis.yaxis.set_ticks_position('left')\n pyplot.show()", "def plot_image_extent(data_obj, test_num):\n\n fig = new_pdf_page(data_obj.pdf_obj) # Create a new page in the pdf\n\n # Display information about the image extent if it exists\n if data_obj.image_extent is not None:\n plt.suptitle('Test ' + str(test_num) + ': Image Extent (' + \"{0:.1f}\".format(data_obj.image_extent) + ' mm)')\n else:\n plt.suptitle('Test ' + str(test_num) + ': Image Extent (None)')\n\n\n #----------------------------------------------------------------------------------------------#\n # Display the extent image along with the ROIs of the dynamic range for the image, with arrows #\n # pointing to them #\n #----------------------------------------------------------------------------------------------#\n plt.subplot(212)\n plt.imshow(data_obj.extent_img, cmap=plt.get_cmap('gray'))\n data_obj.extent_dark_ROI.add_rect_to_plot()\n data_obj.extent_bright_ROI.add_rect_to_plot()\n\n # Create the arrows\n arrowprops = dict(facecolor='green', shrink=0.05)\n plt.annotate('bright spot', xy=(data_obj.extent_bright_ROI.center[::-1]), xytext=(0.89, 0.95),\n arrowprops=arrowprops, color='green', textcoords='axes fraction',\n horizontalalignment='right', verticalalignment='top')\n\n plt.annotate('dark spot', xy=(data_obj.extent_dark_ROI.center[::-1]), xytext=(0.98, 0.8),\n arrowprops=arrowprops, color='green', textcoords='axes fraction',\n horizontalalignment='right', verticalalignment='top')\n\n plt.subplot(211)\n plt.axis('off')\n\n #--------------------------------------------------------------------------------------------------#\n # For each distance given (in mm), go through and display the dynamic range for the given distance #\n #--------------------------------------------------------------------------------------------------#\n fontsize2 = 8\n step = 0.05\n dist = (1, 5, 10)\n\n for i in range(len(dist)):\n plt.text(0.3,\n 0.5 - i * step,\n 'distance '\n + str(dist[i])\n + ' mm '\n + ' dynamic range '\n + \"{0:.1f}\".format(data_obj.extent_dyn_range[i]),\n ha='left',\n va='center',\n fontsize=fontsize2)", "def plot(self):\n # Find only unmasked data :\n # xyz, sData, sColor, _ = self._select_unmasked()\n xyz, sData, sColor = self.xyz, self.sData, self.sColor\n\n # Render as cloud points :\n self.mesh = visu.Markers(name='Sources')\n self.mesh.set_data(xyz, edge_color=self.edgecolor, face_color=sColor,\n size=sData, scaling=self.scaling,\n edge_width=self.edgewidth, symbol=self.symbol)\n self.mesh.set_gl_state('translucent')", "def show_es():\n img = rotated_e()\n for i in range(4):\n plt.subplot(2, 2, i + 1)\n plt.imshow(img[i], cmap=plt.cm.gray, interpolation='nearest')\n plt.show()", "def plot_reconstruction_diagnostics(self, figsize=(20, 10)):\n figs = []\n fig_names = []\n\n # upsampled frequency\n fx_us = tools.get_fft_frqs(2 * self.nx, 0.5 * self.dx)\n fy_us = tools.get_fft_frqs(2 * self.ny, 0.5 * self.dx)\n\n # plot different stages of inversion\n extent = tools.get_extent(self.fy, self.fx)\n extent_upsampled = tools.get_extent(fy_us, fx_us)\n\n for ii in range(self.nangles):\n fig = plt.figure(figsize=figsize)\n grid = plt.GridSpec(3, 4)\n\n for jj in range(self.nphases):\n\n # ####################\n # separated components\n # ####################\n ax = plt.subplot(grid[jj, 0])\n\n to_plot = np.abs(self.separated_components_ft[ii, jj])\n to_plot[to_plot <= 0] = np.nan\n plt.imshow(to_plot, norm=LogNorm(), extent=extent)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('O(f)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.title('m*O(f-fo)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.title('m*O(f+fo)otf(f)')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # deconvolved component\n # ####################\n ax = plt.subplot(grid[jj, 1])\n\n plt.imshow(np.abs(self.components_deconvolved_ft[ii, jj]), norm=LogNorm(), extent=extent)\n\n if jj == 0:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('deconvolved component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # shifted component\n # ####################\n ax = plt.subplot(grid[jj, 2])\n\n # avoid any zeros for LogNorm()\n cs_ft_toplot = np.abs(self.components_shifted_ft[ii, jj])\n cs_ft_toplot[cs_ft_toplot <= 0] = np.nan\n plt.imshow(cs_ft_toplot, norm=LogNorm(), extent=extent_upsampled)\n plt.scatter(0, 0, edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('shifted component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # normalized weights\n # ####################\n ax = plt.subplot(grid[jj, 3])\n\n to_plot = self.weights[ii, jj] / self.weight_norm\n to_plot[to_plot <= 0] = np.nan\n im2 = plt.imshow(to_plot, norm=LogNorm(), extent=extent_upsampled)\n im2.set_clim([1e-5, 1])\n fig.colorbar(im2)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('normalized weight')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n plt.suptitle('period=%0.3fnm at %0.3fdeg=%0.3frad, f=(%0.3f,%0.3f) 1/um\\n'\n 'mod=%0.3f, min mcnr=%0.3f, wiener param=%0.2f\\n'\n 'phases (deg) =%0.2f, %0.2f, %0.2f, phase diffs (deg) =%0.2f, %0.2f, %0.2f' %\n (self.periods[ii] * 1e3, self.angles[ii] * 180 / np.pi, self.angles[ii],\n self.frqs[ii, 0], self.frqs[ii, 1], self.mod_depths[ii, 1], np.min(self.mcnr[ii]), self.wiener_parameter,\n self.phases[ii, 0] * 180/np.pi, self.phases[ii, 1] * 180/np.pi, self.phases[ii, 2] * 180/np.pi,\n 0, np.mod(self.phases[ii, 1] - self.phases[ii, 0], 2*np.pi) * 180/np.pi,\n np.mod(self.phases[ii, 2] - self.phases[ii, 0], 2*np.pi) * 180/np.pi))\n\n figs.append(fig)\n fig_names.append('sim_combining_angle=%d' % (ii + 1))\n\n # #######################\n # net weight\n # #######################\n figh = plt.figure(figsize=figsize)\n grid = plt.GridSpec(1, 2)\n plt.suptitle('Net weight, Wiener param = %0.2f' % self.wiener_parameter)\n\n ax = plt.subplot(grid[0, 0])\n net_weight = np.sum(self.weights, axis=(0, 1)) / self.weight_norm\n im = ax.imshow(net_weight, extent=extent_upsampled, norm=PowerNorm(gamma=0.1))\n\n figh.colorbar(im, ticks=[1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 1e-2, 1e-3, 1e-4, 1e-5])\n\n ax.set_title(\"non-linear scale\")\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2*self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n ax = plt.subplot(grid[0, 1])\n ax.set_title(\"linear scale\")\n im = ax.imshow(net_weight, extent=extent_upsampled)\n\n figh.colorbar(im)\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2 * self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n figs.append(figh)\n fig_names.append('net_weight')\n\n return figs, fig_names", "def ShowOneContour(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt,figname):\n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-15\n YMAX=15\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0-20:y0+20,x0:spec_index_max]/all_expo[index] \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n C = plt.contour(X, Y, reduc_image , 20, colors='black', linewidth=.5,origin='lower')\n \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX-3,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)", "def visualize(self):\n colors = {'outline': (220, 220, 220),\n 'inlier': (0, 255, 0),\n 'outlier': (0, 0, 255),\n 'lines': (128, 220, 128)}\n # Create output image for visualization\n gap = 5\n h1, w1 = self.target.image.shape[:2]\n h2, w2 = self.image.shape[:2]\n vis = np.zeros((max(h1, h2), w1 + w2 + gap, 3), np.uint8)\n vis[:h1, :w1, :] = self.target.image\n w1 += gap\n vis[:h2, w1:w1+w2, :] = self.image\n \n # Draw the located object \n quad = np.float32(self.quad) + np.float32([w1, 0])\n self.draw(vis, colors['outline'], 2, quad)\n \n # draw point details\n inliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.inliers]\n outliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.outliers]\n if colors['outlier'] is not None: # draw x on each point\n r = 2 # radius\n thickness = 2\n for x0, y0, x1, y1 in outliers:\n cv2.line(vis, (x0 - r, y0 - r), (x0 + r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x0 + r, y0 - r), (x0 - r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 + r, y1 - r), (x1 - r, y1 + r), colors['outlier'], thickness)\n if colors['lines'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.line(vis, (x0, y0), (x1, y1), colors['lines'], 1)\n if colors['inlier'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.circle(vis, (x0, y0), 2, colors['inlier'], -1)\n cv2.circle(vis, (x1, y1), 2, colors['inlier'], -1)\n return vis", "def vis_segmentation(image, seg_map):\r\n plt.figure(figsize=(15, 5))\r\n grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])\r\n\r\n plt.subplot(grid_spec[0])\r\n plt.imshow(image)\r\n plt.axis('off')\r\n plt.title('input image')\r\n\r\n plt.subplot(grid_spec[1])\r\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\r\n plt.imshow(seg_image)\r\n plt.axis('off')\r\n plt.title('segmentation map')\r\n\r\n plt.subplot(grid_spec[2])\r\n plt.imshow(image)\r\n plt.imshow(seg_image, alpha=0.7)\r\n plt.axis('off')\r\n plt.title('segmentation overlay')\r\n\r\n unique_labels = np.unique(seg_map)\r\n ax = plt.subplot(grid_spec[3])\r\n plt.imshow(\r\n FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')\r\n ax.yaxis.tick_right()\r\n plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])\r\n plt.xticks([], [])\r\n ax.tick_params(width=0.0)\r\n plt.grid('off')\r\n plt.show()", "def __draw(self):\n plt.rcParams.update(self.settings.rcParams)\n\n self.fig = plt.figure()\n self.ax = self.fig.add_axes(self.axes_rect)\n\n xs = np.arange(1, self.xmax+1)\n ys = [np.arange(0, self.ymax) for i in range(self.xmax)]\n\n self.ax.plot(xs, ys)\n\n self.__draw_xaxis()\n self.__draw_yaxis()\n\n self.__draw_annotations()\n self.__draw_eras()\n self.__draw_era_spans()\n self.__draw_watermark()\n self.__draw_title()\n self.__draw_image()\n self.__draw_max_age()\n\n self.ax.set_aspect('equal', share=True)", "def plot(self):\n\t\tself.plotOfCos1().plot()", "def plot(self, x, y, ax=None, size=None):\n if ax is None:\n ax = plt.gca()\n if size is None:\n size = 1\n if self.final_image is not None:\n imagebox = OffsetImage(self.final_image, zoom=size)\n ab = AnnotationBbox(\n imagebox, (x, y), frameon=False,\n pad=0)\n ax.add_artist(ab)\n zorder = ab.zorder\n else:\n zorder = 0\n if self.marker:\n if self.final_image is not None:\n markersize = max(self.final_image.size)\n else:\n markersize = 50\n markersize = markersize * size\n if self.marker_front:\n plt.plot(x, y, marker=self.marker, markeredgecolor=self.col,\n markerfacecolor=(0, 0, 0, 0), markersize=markersize,\n zorder=zorder + 0.1,\n markeredgewidth=self.markeredgewidth)\n else:\n plt.plot(x, y, marker=self.marker, markeredgecolor=self.col,\n markerfacecolor=self.col, markersize=markersize,\n zorder=zorder - 0.1,\n markeredgewidth=self.markeredgewidth)\n if self.string is not None:\n ax.annotate(self.string, (x, y),\n horizontalalignment='center',\n verticalalignment='center',\n zorder=zorder + 0.2,\n fontsize=self.fontsize, fontname=self.fontname,\n color=self.fontcolor)", "def boxPlot(self):\n clf()\n boxplot(self.y,positions=self.x,widths=0.5)\n xlabel('X Label (units)')\n ylabel('Y Label (units)')\n savefig('boxplot.png')", "def plot(self):\n plot_spectrum(self.data, self.fig, self.ax_e, self.ax_s, title = \"Solar spectrum\")", "def visualize_scan(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(self.p1_points[:, 0], self.p1_points[:, 1], self.p1_points[:, 2], c='r')\n ax.scatter(self.p2_points[:, 0], self.p2_points[:, 1], self.p2_points[:, 2], c='g')\n ax.scatter(self.p3_points[:, 0], self.p3_points[:, 1], self.p3_points[:, 2], c='b')\n ax.scatter(self.p4_points[:, 0], self.p4_points[:, 1], self.p4_points[:, 2])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()", "def plottrace_paper(moviedict, figw, figh, figdpi, fontsz, border, xlabel, ylabel, yaxisticks, \n xaxisticks, labels, lw, fs):\n \n for movie, val in moviedict.iteritems():\n os.chdir(movie)\n condition, xlim, color, inum = val\n \n fontv = matplotlib.font_manager.FontProperties(fname='/usr/share/matplotlib/mpl-data/fonts/ttf/arial.ttf')\n fontv.set_size(fontsz)\n \n print(movie)\n td = dil.load_params()\n x, roi_cols = dil.load_results(RESULTS_FILE)\n start = int(td['startshort'])\n end = int(td['endshort'])\n \n \n fig1 = plt.figure(figsize=(figw*xlim/0.6, figh), dpi=figdpi, facecolor='w', edgecolor='k')\n \n xlen = len(x[roi_cols['Mean1']][start:end])\n #print(xlen)\n xvals = np.arange(0, float(xlen)/fs, 1/float(fs))\n #print(xvals)\n \n \n ycib = x[roi_cols['Mean1']][start:end]\n ycib = [v - np.mean(ycib) for v in ycib]\n #print(ycib)\n \n ylab = x[roi_cols['Mean2']][start:end]\n ylab = [v - np.mean(ylab) for v in ylab]\n ylab = [v + 70 for v in ylab]\n \n # Plots the traces\n \n plt.plot(xvals, ylab, label='proboscis tip', linewidth=lw, color='k')\n plt.plot(xvals, ycib, label='cibarium', linewidth=lw, color='b')\n \n \n \n \n \n \n \n if labels == 'yes':\n plt.title(td['condition'], fontproperties=fontv, horizontalalignment='left')\n \n #Plots legend and removes the border around it.\n legend=plt.legend()\n #legend = plt.legend(bbox_to_anchor = (1.5, 1.6))\n legend.draw_frame(False)\n ltext = legend.get_texts() \n plt.setp(ltext, fontproperties=fontv) \n \n ax = plt.gca()\n \n #Uncomment lines below to display without top and right borders.\n \n if border == 'no':\n for loc, spine in ax.spines.iteritems():\n if loc in ['left','bottom']:\n pass\n elif loc in ['right','top']:\n spine.set_color('none') # don't draw spine\n else:\n raise ValueError('unknown spine location: %s'%loc)\n \n \n #Uncomment lines below to display ticks only where there are borders.\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n \n # Specifies the number of tickmarks/labels on the yaxis.\n #ax.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(yaxisticks)) \n ## Removes tick labels and ticks from xaxis.\n ax.axes.yaxis.set_major_locator(matplotlib.ticker.NullLocator())\n \n if labels == 'yes':\n plt.ylabel(ylabel, fontsize=fontsz, labelpad=12)\n fig1.figsize = (6, 3)\n \n # Adjusts the space between the plot and the edges of the figure; (0,0) is the lower \n #lefthand corner of the figure.\n fig1.subplots_adjust(bottom=0.3)\n fig1.subplots_adjust(left=0.05)\n fig1.subplots_adjust(right=0.95)\n fig1.subplots_adjust(top=0.95)\n \n #ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(XAXISTICKS)) \n \n #Specifies axis labels and axis tick label sizes.\n plt.xlabel(xlabel, fontproperties=fontv)\n plt.ylabel(ylabel, fontproperties=fontv)\n plt.xticks([0, 0.2, 0.4, 0.6], fontproperties=fontv)\n plt.xlim( (0, xlim+0.05) )\n #plt.yticks(fontproperties=fontv)\n \n \n \n # Saves the figures in plots/plots.\n if labels == 'no':\n plotfolder = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath('.'))),\n 'plots')\n makenewdir(plotfolder)\n figname = os.path.join(plotfolder, movie + '_trace_nolab')\n plt.savefig(figname+'.svg', dpi=FIGDPI, format='svg')\n plt.savefig(figname+'.png', dpi=FIGDPI, format='png')\n os.chdir('../')\n\n if labels == 'yes':\n plotfolder = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath('.'))),\n 'plots')\n makenewdir(plotfolder)\n figname = os.path.join(plotfolder, movie + '_trace')\n plt.savefig(figname+'.svg', dpi=FIGDPI, format='svg')\n plt.savefig(figname+'.png', dpi=FIGDPI, format='png')\n os.chdir('../')", "def visualizeObs():\n fcontourf(fObs, [-2, 2], [-1, 1], [0, 10])", "def plotResults(self):\n\n clusters = self.data[[i for i in range(len(self.data)) if self.vorLabels[i] != 0], :]\n vorLabels = [self.vorLabels[i] for i in range(len(self.data)) if self.vorLabels[i] != 0]\n\n self.plot = voronoiPlot(clusters, self.skel, self.skelLabels, self.isCorrect, vorLabels)\n self.plot.snapPlot()", "def plot_lines(self):\n self.plot(3)", "def EventDisplay( tubes, quantities, PMTFlatMapPositive, title=\"Charge\", cutrange=[-1,-1] ):\n \n fig, ax= plt.subplots(figsize=[30,30])\n preimage = np.zeros( preimage_dimensions )\n \n imgmin = quantities.min()\n imgmax = quantities.max()\n\n for idx, tube in enumerate( tubes ):\n if cutrange[0] != cutrange[1]:\n if quantities[idx] < cutrange[0] or quantities[idx] > cutrange[1]:\n continue\n for dx in range(-3,4):\n for dy in range(-3,4):\n if abs(dx)==3 and abs(dy)==3:\n continue\n \n #print( \"idx=\", idx, \" len(quantities)=\",len(quantities), \" tube=\", tube, \" len(PMTFlatMap)=\", len(PMTFlatMapPositive))\n preimage[ PMTFlatMapPositive[tube][1]+dx, PMTFlatMapPositive[tube][0]+dy ] = quantities[idx]\n\n if cutrange[0] != cutrange[1]:\n imgmin = cutrange[0]\n imgmax = cutrange[1]\n \n im = ax.imshow( preimage, extent = [-positive_x_offset,positive_x_offset,-lower_endcap_offset,lower_endcap_offset], vmin=imgmin, vmax=imgmax )\n\n fig.suptitle(title, fontsize=80)\n\n plt.rc('xtick', labelsize=24) \n plt.rc('ytick', labelsize=24) \n plt.xlabel('Distance CCW on perimeter from x-axis (cm)', fontsize=48)\n plt.ylabel('Y (cm)', fontsize=48)\n\n plt.set_cmap('gist_heat_r')\n\n # Create colourbar\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cbar = plt.colorbar(im, cax=cax)\n cbar.ax.tick_params(labelsize=24)\n\n # Fix title height\n plt.subplots_adjust(top=0.5)\n plt.tight_layout()", "def visualize(self):\n\n check_is_fitted(self, \"sm_\")\n\n fig = plt.figure(figsize=(6, 4))\n inner = gridspec.GridSpec(2, 1, hspace=0.1, height_ratios=[6, 1])\n ax1_main = plt.Subplot(fig, inner[0]) \n xgrid = np.linspace(self.xmin, self.xmax, 100).reshape([-1, 1])\n ygrid = self.decision_function(xgrid)\n ax1_main.plot(xgrid, ygrid)\n ax1_main.set_xticklabels([])\n ax1_main.set_title(\"Shape Function\", fontsize=12)\n fig.add_subplot(ax1_main)\n \n ax1_density = plt.Subplot(fig, inner[1]) \n xint = ((np.array(self.bins_[1:]) + np.array(self.bins_[:-1])) / 2).reshape([-1, 1]).reshape([-1])\n ax1_density.bar(xint, self.density_, width=xint[1] - xint[0])\n ax1_main.get_shared_x_axes().join(ax1_main, ax1_density)\n ax1_density.set_yticklabels([])\n ax1_density.autoscale()\n fig.add_subplot(ax1_density)\n plt.show()", "def plot(self):\n return self.graph(edge_labels='words_in_out').plot()", "def phase_plot(self, pred=None):\n axRect = [0.1446, 0.2150, 0.7604, 0.7100]\n # plt.figure(22, figsize = (8.5, 11), dpi=300)\n fig, ax = plt.subplots()\n if pred is not None:\n self.phase_sub_plot(ax, axRect, pred=pred)\n else:\n self.phase_sub_plot(ax, axRect)", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def plot_well_depth(self):\n fig, ax = plt.subplots()\n imMap = ax.imshow(self.get_well_depth_image())\n ax.set_title('Well Fraction Map Map')\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n \n ## Adjust colorbar height to match image\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(imMap,label='Well Fraction',cax=cax)\n fig.show()", "def show_graphs ():\n plt.ylim = (0, 300)\n plt.xlim = (0, 300)\n #Set up lidar plot to figure 1\n lidar_plot = plt.figure (1)\n #Assign title\n plt.title ('Lidar data')\n #Assign data\n plt.imshow (lidar_clean)\n #Set up radar plot to figure 2\n radar_plot = plt.figure (2)\n #Assign title\n plt.title ('Radar data')\n #Assign data\n plt.imshow (radar_clean)\n #Show plots\n plt.show ()" ]
[ "0.64836633", "0.63798314", "0.63485694", "0.63415164", "0.62981063", "0.6228685", "0.6214679", "0.61689097", "0.6165796", "0.61613774", "0.611915", "0.60805905", "0.60388714", "0.6032662", "0.6006002", "0.5967216", "0.5963205", "0.59328747", "0.5918682", "0.59050053", "0.58971596", "0.58971596", "0.58971596", "0.58917505", "0.5870122", "0.5850759", "0.58383065", "0.5837782", "0.5832891", "0.58321315", "0.58019716", "0.5799492", "0.5791418", "0.5785222", "0.57821226", "0.5781753", "0.57466394", "0.5740755", "0.5738051", "0.5731026", "0.5711088", "0.571049", "0.5708579", "0.5704825", "0.5694754", "0.5694012", "0.56885797", "0.56737864", "0.5673688", "0.5661136", "0.5653911", "0.565231", "0.56246406", "0.56237465", "0.56225216", "0.5616251", "0.5606584", "0.56026334", "0.5599348", "0.55976933", "0.5594443", "0.55795765", "0.55756235", "0.5566534", "0.55649585", "0.55646855", "0.55634457", "0.55511266", "0.55501705", "0.5534543", "0.55320024", "0.55315936", "0.5516617", "0.55151266", "0.5509418", "0.5508476", "0.55055314", "0.55043226", "0.5500863", "0.5490862", "0.5486253", "0.5481102", "0.54784316", "0.5475996", "0.54751295", "0.54746073", "0.5472705", "0.5470276", "0.5468901", "0.5466252", "0.54585916", "0.5454122", "0.5453412", "0.5453372", "0.54532087", "0.5446091", "0.5433436", "0.5433436", "0.5433436", "0.5429299", "0.542361" ]
0.0
-1
Splits text from digits in a string.
def split_str(str): logger = logging.getLogger(__name__) logger.debug('{0}'.format(str)) match = re.match(r"([0-9]+.?\d{0,32}?)(d|m|s)", str) if match: items = match.groups() return items[0], items[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_str_digit(s):\n res = []\n for m in re.finditer('(\\d*)(\\D*)', s):\n for g in m.groups():\n if g != '':\n try:\n res.append(int(g))\n except ValueError:\n res.append(g)\n return tuple(res)", "def split_num(s):\n i = 0\n while i < len(s):\n if s[i] < '0' or s[i] > '9':\n break\n i += 1\n if s[i:]:\n return (int(s[:i]), s[i:], )\n return (int(s[:i]), )", "def find_numbers(text):\n result = []\n for word in text.split():\n if word.isdigit():\n result.append(int(word))\n return result", "def split_string(text, chars_per_string):\n return [text[i:i + chars_per_string] for i in range(0, len(text), chars_per_string)]", "def split_string(text, chars_per_string):\n return [text[i:i + chars_per_string] for i in range(0, len(text), chars_per_string)]", "def split_alnum(s):\n def convert(x):\n try:\n return int(x)\n except ValueError:\n return x\n r = []\n digit = None\n for c in s:\n d = c.isdigit()\n if d != digit:\n digit = d\n r += [c]\n else:\n r[-1] += c\n return [convert(x) for x in r]", "def _split(string):\n out = [\"\", \"\"]\n for i in string:\n if i.isalpha():\n out[0] += i\n elif i.isnumeric() or i == \".\":\n out[1] += i\n return out", "def tokenize(s):\n return split_words(replace_numbers(r' \\1 ', s))", "def split_num(a_str):\n idx = None\n for i in iter(a_str):\n if i.isdigit():\n idx = a_str.index(i)\n break\n if idx == None:\n return (a_str[:idx], int('1'))\n else:\n return (a_str[:idx], int(a_str[idx:]))", "def ExtractNumbers(s):\n\n t = s.strip('[]\\n')\n comma_space = r', '\n re_comma_space = re.compile(comma_space)\n z = re_comma_space.split(t)\n #print z\n return z", "def _split(string: str, n: int):\n return [string[start : start + n] for start in range(0, len(string), n)]", "def test_split_string(self):\n mytext = '2011 Senior PGA Championship presented by'\n string1, string2 = split_string(mytext, 25, 25)\n self.assertEqual(string1, '2011 Senior PGA')\n self.assertEqual(string2, 'Championship presented')", "def find_numbers(text):\n # -------------------------------------------------------------------------\n # Notice how expressive the list comprehension syntax is, in that it sounds\n # almost exactly the same as you would describe the problem in English.\n # I.e.\n # Convert each word to an integer, for every word in text split over\n # spaces, if the word is comprised only of digits.\n # \n # int(word) Convert each word to an integer,\n # for word for every word\n # in text.split() in text split over spaces\n # if text.isdigit() if the word is comprised only of digits.\n # -------------------------------------------------------------------------\n return [int(word) for word in text.split() if word.isdigit()]", "def split_string_at_numbers(string):\n split_list = re.compile(r'(\\d+)').split(string)\n filtered_list = []\n skip_next_loops = 0\n for i in range(len(split_list)):\n if skip_next_loops > 0:\n skip_next_loops -= 1\n continue\n part = split_list[i]\n if part.isdigit() or (part == '.' and i < len(split_list) - 1 and split_list[i + 1].isdigit()):\n # Some kind of number\n if part == '.':\n # number of format '.###' (start of string)\n part += split_list[i + 1]\n skip_next_loops = 1\n elif i < len(split_list) - 2 and split_list[i + 1] == '.' and split_list[i + 2].isdigit():\n # number of format '###.###'\n part += split_list[i + 1] + split_list[i + 2]\n skip_next_loops = 2\n elif (i > 0 and len(filtered_list) and len(filtered_list[-1]) and\n filtered_list[-1][-1] == '.'):\n # number of format '.###' (within string)\n filtered_list[-1] = filtered_list[-1][:-1]\n part = '.' + part\n # otherwise just number of format '###'\n factor = 1\n if i < len(split_list) - 1:\n # check next part for unit information\n msg = split_list[i + 1].strip()\n msg = msg.lstrip('_([{')\n msg = re.split('[^a-zA-Zµ]', msg)[0]\n if msg:\n for unit in tools.science.UNIT_SYMBOLS:\n if msg.endswith(unit):\n msg = msg[:-len(unit)]\n break\n if len(msg) == 1:\n factor = 10**tools.science.SI_PREFIXES.get(msg[0], 0)\n filtered_list.append(float(part)*factor)\n else:\n # Actual string\n filtered_list.append(part)\n return filtered_list", "def split_string(line, nth):\n return [int(line[i:i+nth]) for i in range(0, len(line), nth)]", "def _split_string(self, string_):\n strings = []\n temp = []\n\n for token in string_.split(\" \"):\n temp.append(token)\n temp_string = \" \".join(temp)\n if len(temp_string) >= self._max_seq_length:\n strings.append(temp_string)\n temp = []\n # remaining text\n if temp != []:\n temp_string = \" \".join(temp)\n strings.append(temp_string)\n\n return strings", "def split_text(text, n=100, character=\" \"):\n text = text.split(character)\n return [character.join(text[i : i + n]).strip() for i in range(0, len(text), n)]", "def splitTag(my_tag):\n my_split = re.findall(r'(\\d+)(\\D+)', my_tag)\n return ((int(x[0]), x[1]) for x in my_split)", "def get_digits(string):\n digit_str= ''.join(filter(lambda x: x.isdigit(), string))\n return digit_str", "def split(n):\n rest_of_num, last_num = n // 10, n % 10\n return rest_of_num, last_num", "def split_numeric(self, text, parse=True):\n\n block = ''\n block_numeric = self.isnum(text[0])\n output = []\n for t in text:\n if self.isnum(t) == block_numeric:\n block += t\n else:\n if block_numeric:\n block = float(block)\n output.append(block)\n block = t\n block_numeric = self.isnum(t)\n if block_numeric:\n block = float(block)\n output.append(block)\n return output", "def splits(text, L=20):\n return [(text[:i+1], text[i+1:]) \n for i in range(min(len(text), L))]", "def split(string, sep='\\t'):\n return text_type.split(string, sep)", "def split_into_words(s):\n s = re.sub(r\"\\W+\", \" \", s)\n s = re.sub(r\"[_0-9]+\", \" \", s)\n return s.split()", "def split_into_words(s):\n s = re.sub(r\"\\W+\", \" \", s)\n s = re.sub(r\"[_0-9]+\", \" \", s)\n return s.split()", "def test_split_string(self):\n self.assertEqual(('1-4', 14), split_string('1-4/14'))", "def num_split(num):\r\n num = list(str(num))\r\n return [int(i) for i in num]", "def extract_numbers_nl(text, short_scale=True, ordinals=False):\n results = _extract_numbers_with_text_nl(tokenize(text),\n short_scale, ordinals)\n return [float(result.value) for result in results]", "def integers_only(text) -> str:\n return ''.join(x for x in text if x.isdigit())", "def split(self, string):\n if self.chars:\n return list(string)\n else:\n return string.split(' ')", "def splitInPhrase(self,text):\n return self._support.splitInPhrase(text)", "def tokenize(expr):\n expr = expr.replace(' ', '')\n tokens = []\n for split in re.split('(\\d+)', expr):\n if not split:\n continue\n if split.isdigit():\n tokens.append(split)\n else:\n # Further split each non-digit char into a separate token.\n tokens.extend(split)\n return tokens", "def splitText(self, textStr):\n return [text.strip().replace('\\0', self.editSep) for text in\n textStr.replace(self.editSep * 2, '\\0').\n split(self.editSep)]", "def split_sentence(s, tok=False):\n if tok:\n s = s.lower()\n s = s.replace('\\u2019', \"'\")\n s = digit_pattern.sub('0', s)\n words = []\n for word in s.split():\n if tok:\n words.extend(split_pattern.split(word))\n else:\n words.append(word)\n words = [w for w in words if w]\n return words", "def split_string(self, string, n):\n if len(string) == 0:\n return ['']\n blocks = []\n while len(string) > 0:\n blocks.append(string[:n])\n string = string[n:]\n return blocks", "def text_to_parts(text: str) -> list:\n parts = []\n first_block_start, first_block_end, typee = find_first_block(text)\n parts.append(text[first_block_start : first_block_end + 1])\n if len(text) == first_block_end + 1:\n return [text]\n parts.append(text[first_block_end + 1])\n parts += text_to_parts(text[first_block_end + 2 : ])\n return parts", "def splitInSentence(self,text):\n return self._support.splitInPhrase(text)", "def tokenize(text):\n return text.split(' ')", "def split_number(string):\n\ttry:\n\t\tparts = string.split('-')\n\texcept AttributeError:\n\t\ttry:\n\t\t\tstring * string\n\t\t\treturn ('', string)\n\t\texcept TypeError:\n\t\t\treturn None\n\t\n\t\t\n\tend = parts[-1]\n\tif '.' in end:\n\t\ttry:\n\t\t\tnum = float(end)\n\t\texcept:\n\t\t\tnum = None\n\telse:\n\t\ttry:\n\t\t\tnum = int(end)\n\t\texcept:\n\t\t\tnum = None\n\tif num is not None:\n\t\tparts.pop(-1)\n\treturn ('-'.join(parts), num)", "def _regex_split(pattern, string):\n splits = list((m.start(), m.end()) for m in re.finditer(pattern, string))\n starts = [0] + [i[1] for i in splits]\n ends = [i[0] for i in splits] + [len(string)]\n return [string[start:end] for start, end in zip(starts, ends)]", "def split_large_text(text, length=4096):\n\n text = str(text)\n\n yield text[0: length]\n\n for i in range(length, len(text), length):\n yield text[i: i + length]", "def clean_and_split(text: str, compiled_pattern=TOKENIZER):\n\n text = text.lower().strip()\n if not hasattr(compiled_pattern, 'findall'):\n return text.split()\n return compiled_pattern.findall(text)", "def _split_course_string(course_string):\n course_num = ''\n course_postfix = ''\n\n count = 0\n for indx, char in enumerate(course_string):\n if not char.isdigit():\n break\n\n course_num += char\n count += 1\n try:\n course_num = int(course_num)\n except ValueError:\n logger.exception('Got an invalid course string: %s', course_string)\n raise InvalidCourseStringError(course_string)\n\n course_postfix = course_string[count:]\n\n return course_num, course_postfix", "def clean_numbers(text):\n return regex.sub(\"\\d+\", ' NUM', text)", "def split(self, string, maxsplit=MAX_INT, include_separators=False):\n return self._split(\n string, maxsplit=maxsplit, include_separators=include_separators\n )", "def split_space(string):\n return string.strip().split()", "def get_numbers(string:str, type_=\"int\") -> list:\n \n num_list = []\n for word in string.split():\n if type_ == \"int\":\n try:\n num_list.append(int(word))\n except:\n pass\n elif type_ == \"float\":\n if isfloat(word):\n num_list.append(float(word))\n return num_list", "def _remove_digit_blocks(self, text: str) -> str:\n return re.sub(r\"\\b\\d+\\b\", \" \", str(text))", "def extract_number_nl(text, short_scale=True, ordinals=False):\n return _extract_number_with_text_nl(tokenize(text.lower()),\n short_scale, ordinals).value", "def tokenize(str):\n return str.split()", "def text_to_int(text):\n # type (str) -> int\n try:\n return int(\"\".join(x for x in text if x.isdigit()))\n except ValueError:\n return 0", "def split_text(text, max_length, recursive_until=None, step=10):\n if len(text) <= max_length:\n return [text]\n breaks = [i for i in re.finditer(' |\\n|\\:|\\:|\\,|\\,|\\﹐|\\。|\\ㄧ|\\?|\\?|\\!|\\!|\\;|\\;|\\、|\\.', text)]\n segments = []\n start_offset = 0\n for k, p in enumerate(breaks):\n if p.end() - start_offset > max_length:\n start = start_offset\n end = breaks[k-1].end()\n segment = text[start:end]\n start_offset = breaks[k-1].end()\n segments.append(segment)\n\n if segments == []:\n if len(breaks) == 0:\n if len(text) < max_length:\n return [text]\n else:\n return [text[:recursive_until]]\n else:\n mid = len(breaks)//2\n segments = [text[:breaks[mid-1].end()], text[breaks[mid-1].end():]]\n\n if segments == []:\n raise Exception(f'something is wrong \\n{max_length}\\n{text}')\n\n for segment in segments:\n if len(segment) > max_length:\n if recursive_until:\n if max_length+step < recursive_until:\n return split_text(text, max_length+step, recursive_until=recursive_until)\n else:\n return [text[:recursive_until]]\n # raise Exception(f'splitted segment is larger than recursive limit {recursive_until}\\n{segment}\\n{text}')\n else:\n raise Exception(f'splitted segment is larger than {max_length}\\n{segment}\\n{text}')\n return segments", "def mysplit(s,delims):\r\n for c in delims:\r\n s = s.replace(c,' ')\r\n return s.split()", "def splits(text, start=0, end=20) -> Tuple[str, str]:\n return [(text[:i], text[i:]) \n for i in range(start, min(len(text), end)+1)]", "def split_paragraph_text(text):\n marker_positions = []\n for marker in _first_markers:\n # text.index('(') to skip over the periods, spaces, etc.\n marker_positions.extend(text.index('(', m.start())\n for m in marker.finditer(text))\n # Remove any citations\n citations = internal_citations(text, require_marker=True)\n marker_positions = [pos for pos in marker_positions\n if not any(cit.start <= pos and cit.end >= pos\n for cit in citations)]\n texts = []\n # Drop Zeros, add the end\n break_points = [p for p in marker_positions if p] + [len(text)]\n last_pos = 0\n for pos in break_points:\n texts.append(text[last_pos:pos])\n last_pos = pos\n return texts", "def extract_words(input_string):\n for c in punctuation + digits:\n input_string = input_string.replace(c, ' ' + c + ' ')\n\n return input_string.lower().split()", "def extractDigits(key):\n text = \"\"\n digits = \"\"\n for c in key:\n if c in \"0123456789\":\n digits += c\n else:\n text += c\n return (text, 0 if not digits else int(digits))", "def replace_digits(text):\n text = re.sub(r\"\\d+\", \"number\", text)\n \n return text", "def splitTextIRCWise(s, width):\n lines = s.splitlines()\n ret = []\n for line in lines:\n ret.extend(textwrap.wrap(line, width))\n return ret", "def tokenize_pt(text):\n #primeiros padrões, separação de palavra de [. , ? ! ( ) [ ] : ; ' ' \" \" ]\n return split_level_two(split_level_one(text))", "def split(self, string):\n return (re.split('; |, |: |\"(\"|\"(\"|;|,|:| |', string))", "def _extract_numbers_with_text_nl(tokens, short_scale=True,\n ordinals=False, fractional_numbers=True):\n placeholder = \"<placeholder>\" # inserted to maintain correct indices\n results = []\n while True:\n to_replace = \\\n _extract_number_with_text_nl(tokens, short_scale,\n ordinals, fractional_numbers)\n\n if not to_replace:\n break\n\n results.append(to_replace)\n\n tokens = [\n t if not\n to_replace.start_index <= t.index <= to_replace.end_index\n else\n Token(placeholder, t.index) for t in tokens\n ]\n results.sort(key=lambda n: n.start_index)\n return results", "def func(str):\n\treturn str.split()", "def get_nummeric_only(text):\n\n nummeric_string =\"\"\n \n for character in text:\n if character.isnumeric():\n \n nummeric_string+=character\n \n return nummeric_string", "def inner_split(s):\n\n return s.split(split_string)", "def tokenize(text):\n \n div_pattern = r\"\"\"\n (?:\\d+\\.\\d+\\.\\d+)|(?:\\d+\\.\\d+) # Date\n |\\d{1,2}:\\d{2} # Time\n |\\d+(?:\\.\\d+)*(?:,\\d+)? # Number\n |\\w+(?:[-]\\w+)* # Word (possibly compound)\n |[\\(\\)\"] # Parentheses\n |[\\.,?!] # Punctuation\n |[^\\s]+ # Other\n \"\"\"\n\n \n token_pattern = re.compile(div_pattern, re.VERBOSE|re.UNICODE)\n \n return token_pattern.findall(text)", "def multi_split(text, seps):\n if not seps: # split by whitespaces\n return text.split()\n else: # split by separators in `seps`\n\n ##### Topics on Stack Overflow\n # http://stackoverflow.com/questions/1059559/python-strings-split-with-multiple-separators\n\n ## Method 1: use `re.split()` (from gimel)\n return re.split(r'[%s]' % seps, text)\n\n ## Method 2: DIY (from pprzemek)\n '''\n res = [text]\n for sep in seps:\n text, res = res, []\n for s in text:\n res += s.split(sep)\n return res\n '''", "def natural_keys(text):\n return [atoi(c) for c in re.split(\"(\\d+)\", text)]", "def split(text):\n doc = nlp(text)\n sentences = [x.text_with_ws for x in doc.sents]\n return sentences", "def alphanum_key(s):\n return [int(c) if c.isdigit() else c for c in re.split('([0-9]+)', s)]", "def alphanum_key(s):\n return [int(c) if c.isdigit() else c for c in re.split('([0-9]+)', s)]", "def split_to_obtain_token(str):\r\n str = re.split('[^a-z0-9]', str)\r\n str = [x for x in str if x != '']\r\n return str", "def extract_digits(cls, phone_number):\n extracted_num = \"\"\n for ch in phone_number:\n if ch in cls.INTEGER_STRING:\n extracted_num += ch\n return extracted_num", "def _splitCount(s: str, count: int)->list:\n return [''.join(x) for x in zip(*[list(s[z::count]) for z in range(count)])]", "def separate_words(text, min_word_return_size=2):\n splitter = re.compile('[^a-zA-Z0-9_\\\\+\\\\-/]')\n words = []\n for single_word in splitter.split(text):\n current_word = single_word.strip().lower()\n # leave numbers in phrase, but don't count as words, since they tend to invalidate scores of their phrases\n if len(current_word) > min_word_return_size and \\\n current_word != '' and \\\n not is_number(current_word):\n words.append(current_word)\n return words", "def alphanum_key(s):\n return [tryint(c) for c in re.split('([0-9]+)', s)]", "def _split_sentence(x: str) ->Sequence[str]:\n if not _NLTK_AVAILABLE:\n raise ModuleNotFoundError('ROUGE-Lsum calculation requires that `nltk` is installed. Use `pip install nltk`.')\n nltk.download('punkt', quiet=True, force=False)\n re.sub('<n>', '', x)\n return nltk.sent_tokenize(x)", "def split_tagged_text_into_chunks(text, *a, **kw):\n return split_tagged_text_into_chunks(text, *a, **kw)", "def intparse(text):\n return int(text, 0)", "def splitstring(string, splitcharacter=' ', part=None):\n\n # If the part is empty\n if part in [None, '']:\n # Return an array of the splitted text\n return str(string).split(splitcharacter)\n\n # Return an array of the splitted text with a specific part\n return str(string).split(splitcharacter)[part]", "def _extract_number_with_text_nl_helper(tokens,\n short_scale=True, ordinals=False,\n fractional_numbers=True):\n if fractional_numbers:\n fraction, fraction_text = \\\n _extract_fraction_with_text_nl(tokens, short_scale, ordinals)\n if fraction:\n return fraction, fraction_text\n\n decimal, decimal_text = \\\n _extract_decimal_with_text_nl(tokens, short_scale, ordinals)\n if decimal:\n return decimal, decimal_text\n\n return _extract_whole_number_with_text_nl(tokens, short_scale, ordinals)", "def collapse_numbers(text: str):\n groups = re.findall(r\"[\\d|\\s]{1,}\", text)\n\n results = list()\n for numbers in groups:\n squashed = squash(numbers)\n if squashed != \"\":\n results.append(squashed)\n\n return results", "def safe_split(self, text):\n try:\n words = self.shlex_split(text)\n return words\n except:\n return text", "def tokenize_by_space(text: str) -> List[str]:\n return text.split(\" \")", "def alphanum_key(s):\n return [ tryint(c) for c in re.split('([0-9]+)', s) ]", "def alphanum_key(s):\n return [ tryint(c) for c in re.split('([0-9]+)', s) ]", "def alphanum_key(s):\n return [ tryint(c) for c in re.split('([0-9]+)', s) ]", "def alphanum_key(s):\n return [ tryint(c) for c in re.split('([0-9]+)', s) ]", "def word_split(self, sentence):\n return re.split(self.word_split_pattern, sentence)", "def tokenize(s):\n return s.split()", "def split_words(self, position=None):\r\n if position is None:\r\n position = self.offset\r\n text = self.source_code[:position]\r\n return re.findall(self.id_regex, text)", "def chunks(text):\n lines = []\n for line in text.splitlines():\n lines.append(re.sub(' {2,}', ' ', line.strip()))\n return '\\n'.join(lines).split('\\n\\n')", "def splitTrackingNums(_pack):\n multi = [ i.strip() for i in _pack[1].split(';') ]\n splits_ = [ [_pack[0], m] for m in multi ]\n return splits_", "def tokenize(text,split_str='\\s',chars=False):\n if not chars:\n text=re.split(split_str,text)\n return [token for token in text if token not in [\"\"]]", "def split_strings_to_two_char(*, text: str) -> list:\n lenght_text_is_not_even = len(text) % 2 == 1\n\n if lenght_text_is_not_even:\n text += '_'\n\n result = []\n for iterable in range(len(text)//2):\n result.append(text[iterable * 2: iterable * 2 + 2])\n return result", "def text_to_int(self, text):\n int_sequence = []\n for c in text:\n if c == ' ':\n ch = self.char_map['']\n else:\n ch = self.char_map[c]\n int_sequence.append(ch)\n return int_sequence", "def _basic_tokenizer(line,normalize_digits=False): \r\n _DIGIT_RE = re.compile(r\"\\d+\") ## find digits \r\n \r\n words = []\r\n tokens = list(jieba.cut(line.strip().lower()))\r\n if normalize_digits:\r\n for token in tokens:\r\n m = _DIGIT_RE.search(token)\r\n if m is None:\r\n words.append(token)\r\n else:\r\n words.append('_数字_')\r\n else:\r\n words = tokens \r\n \r\n return words", "def sentence_split(self, text):\n return split_into_sentences(text)", "def split(self, s):\n punctuations = _SPLIT_RE.findall(s)\n texts = _SPLIT_RE.split(s)\n assert len(punctuations) + 1 == len(texts)\n new_texts = [self._split(x) for x in texts]\n for i, punctuation in enumerate(punctuations):\n new_texts.insert(2*i+1, punctuation)\n return [item for sublist in new_texts for item in sublist]", "def _string_to_chunks(text, **kwargs):\n text_limit = kwargs.get('text_limit', 1024)\n lines = \"\"\n for line in text:\n if len(lines) + len(line) < text_limit:\n lines += line\n else:\n yield lines\n lines = line[0:text_limit]\n else:\n yield lines" ]
[ "0.69932103", "0.69409627", "0.6516405", "0.6497158", "0.6497158", "0.6333978", "0.62500066", "0.62345093", "0.6181007", "0.61332566", "0.6114605", "0.60910285", "0.6013863", "0.59716225", "0.59670913", "0.59426206", "0.59077483", "0.5873073", "0.58616483", "0.58398443", "0.58017176", "0.57818675", "0.57747257", "0.5751674", "0.5751674", "0.5708564", "0.5682253", "0.5668389", "0.56672615", "0.56417775", "0.5608135", "0.5598263", "0.5570409", "0.5566214", "0.55651367", "0.55437183", "0.55259633", "0.5522699", "0.5518799", "0.55166566", "0.5514511", "0.55031", "0.5497465", "0.548128", "0.54733205", "0.5465348", "0.54541266", "0.5452415", "0.5447036", "0.5436695", "0.5433663", "0.5423205", "0.53983235", "0.5397567", "0.53966624", "0.5382257", "0.53703934", "0.5364366", "0.536263", "0.5358884", "0.53559595", "0.5345177", "0.5343731", "0.5343576", "0.53392816", "0.5333192", "0.53281474", "0.5325933", "0.53258306", "0.5322316", "0.5322316", "0.53070587", "0.5301108", "0.52973366", "0.52954966", "0.52840495", "0.52667755", "0.52590305", "0.5255625", "0.5252169", "0.5237571", "0.52332735", "0.5231637", "0.5225507", "0.52227414", "0.52227414", "0.52227414", "0.52227414", "0.52208966", "0.52103055", "0.5210158", "0.5209985", "0.52074534", "0.52018756", "0.52004015", "0.51964533", "0.5194631", "0.5189427", "0.5186204", "0.51849455" ]
0.6069437
12
Get the current status of the pool.
def get_pool_status(self, mission): # initialize node status states = dict( idle=0, rebooting=0, reimaging=0, running=0, unusable=0, creating=0, starting=0, waiting_for_start_task=0, start_task_failed=0, unknown=0, leaving_pool=0, offline=0, preempted=0) # if the pool does not exist if not self.batch_client.pool.exists(pool_id=mission.pool_name): return "N/A", "N/A", states # get pool info the_pool = self.batch_client.pool.get(pool_id=mission.pool_name) state = the_pool.state.name allocation_state = the_pool.allocation_state.name # get the list of node at current time point # we check the existance of the pool again to avoid coincidence if self.batch_client.pool.exists(pool_id=mission.pool_name): node_list = self.batch_client.compute_node.list( pool_id=mission.pool_name) # calculate the number of nodes in each status for node in node_list: states[node.state.name] += 1 node_list.reset() return state, allocation_state, states
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pool_status():\n pools_status = split_status_pools(fork_and_get_output(\"zpool status\".split()))\n pools = []\n for p in pools_status:\n pools.append(status.PoolStatus(p))\n return pools", "def status(self):\n self._refresh_state()\n return self._data.get('status')", "def getstatus(self):\n return self.__status", "def get_status(self):\n return self._conn_state", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self._status", "def status(self):\n\t\treturn self._status", "def status(self):\n return self._get(path='status')", "def status(self):\n return self.get(self._names[\"status\"])", "def getStatus(self):\n return self.__status", "def GetStatus(self):\r\n return self.status", "def getStatus(self):\n return self._status", "def status(self):\r\n return self._status", "def status(self):\r\n return self._status", "def status(self):\n return self.state", "def _get_status(self):\n return self.__status", "def status(self):\n if Daemon.status(self) != 0:\n return 1\n \n # Load decoy logger\n self.load_outputs(decoy=True)\n\n # Load node pool & print status\n try:\n self.pool = PLNodePool(self)\n sys.stdout.write(self.status_str())\n except PLNodePoolException:\n sys.stdout.write(\"No node found.\\n\")\n\n return 0", "def get_raw_status(self):\n self.__param_lock.acquire()\n status = self.__status\n self.__param_lock.release()\n return status", "def status(self):\n return self.__status", "def status(self):\n return self.__status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self.status", "def get_status(self):\n # TODO retrieve from db if not set\n return self.status", "def status(self):\n\n return self._status", "def status(self):\n\n return self._status", "def status(self):\n\n return self._status", "def status(self):\n if hasattr(self, \"_status\"):\n return self._status\n else:\n return None", "def status(self):\n with self.__lock:\n assert(self.__complete)\n return self.__status", "def status(self):\n return self._query_status()['status']", "def status(self):\n return self._dbattr('status')", "def get_status(self):\n\n return self._system", "def state(self):\n return self.status", "def status(self):\n return self._data['status']", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output['outputs.JobStatus']:\n return pulumi.get(self, \"status\")", "def status(self) -> 'outputs.ConnectionStatusResponse':\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")" ]
[ "0.75640756", "0.7310082", "0.73063296", "0.7286715", "0.7214205", "0.7214205", "0.7214205", "0.72092366", "0.71747327", "0.71360385", "0.7126846", "0.71214384", "0.7102766", "0.7091428", "0.70774436", "0.70774436", "0.7071764", "0.70652586", "0.70541", "0.70418775", "0.702872", "0.702872", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.70195943", "0.7009838", "0.69537354", "0.68971765", "0.68971765", "0.68971765", "0.68824387", "0.68791336", "0.6878214", "0.68637687", "0.68606764", "0.6847685", "0.6845295", "0.6842841", "0.6842841", "0.6842841", "0.6842841", "0.6842841", "0.6842841", "0.6842841", "0.6842841", "0.6842841", "0.6842841", "0.6842841", "0.6842841", "0.6842841", "0.6842841", "0.6842841", "0.6842841", "0.6842841", "0.6842841", "0.6842841", "0.68412733", "0.6831925", "0.68205553", "0.68205553", "0.68205553", "0.68205553", "0.68205553", "0.68205553", "0.68205553", "0.68205553", "0.68205553", "0.68205553", "0.68205553", "0.68205553", "0.68205553", "0.68205553", "0.68205553", "0.68205553", "0.68205553", "0.68205553", "0.68205553", "0.68205553", "0.68205553" ]
0.71968305
8
Get a string for the status overview of the pool and nodes.
def get_pool_overview_string(self, mission): # get statuses pool_status, allocation_status, node_status = self.get_pool_status(mission) s = "Pool status: {}\n".format(pool_status) s += "Allocation status: {}".format(allocation_status) if pool_status != "N/A": other = sum(node_status.values()) - node_status["idle"] - \ node_status["running"] - node_status["unusable"] s += "\n" s += "Node status: " s += "{} idle; ".format(node_status["idle"]) s += "{} running; ".format(node_status["running"]) s += "{} unusable; ".format(node_status["unusable"]) s += "{} other;".format(other) return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def status_str(self, spaced=False):\n if self.args.vverbose:\n ## Print profile of all nodes\n status = self.pool.status(string=True)\n\n elif self.args.verbose:\n ## Print profile of usable nodes\n status = self.pool.status(min_state=PLNodeState.usable, string=True)\n\n else:\n ## Print list of usable nodes\n attribute = \"name\" if self.args.names else \"addr\"\n nodes = self.pool._get(attribute, min_state=PLNodeState.usable)\n if len(nodes) > 0:\n status = \"\\n\".join(nodes)+\"\\n\"\n else:\n status = \"No usable node found.\\n\"\n\n return status", "def status(self):\n \n tmpl1 = \"\"\"%-20s%-52s[%s]\"\"\"\n tmpl2 = \"\"\"%-20s%-52s\\n\"\"\"\n # print tmpl1 % (\"Machine Name\", \"IP Addresses\", \"Status\")\n # print 80 * \"-\"\n # print self.get_image()\n if self.cloudserver:\n # let's build the IPs first\n status = self.cloudserver.status\n \n else:\n status = \"OFF\"\n\n res2=\"\"\n ip1 = \"%s:%s\" % (self.networks[0], self.ip_addresses[self.networks[0]])\n if len(self.networks) > 1:\n res2 += \"\\n\"\n for network in self.networks[1:]:\n ipstr = \"%s:%s\" % (network, self.ip_addresses[network])\n res2+=tmpl2 % (\"-\", ipstr)\n # print res2\n # if len(self.ip_addresses.keys()) > 1:\n # ip1 = self.ip_addresses.values()[0]\n res1 = tmpl1 % (self.machine_name, ip1, status)\n return res1 + res2", "def status(ctx):\n return show_network_status()", "def get_pool_status():\n pools_status = split_status_pools(fork_and_get_output(\"zpool status\".split()))\n pools = []\n for p in pools_status:\n pools.append(status.PoolStatus(p))\n return pools", "def printStatus(self):\n output = StringIO.StringIO()\n # use a csv writer to write out each row\n writer = csv.writer(output, lineterminator = '\\n')\n \n # write the header\n writer.writerow(['Server','Ping Interval','Status'])\n \n # write out the online servers\n for server, interval in self.online_servers.iteritems():\n writer.writerow([server, interval[1], 'Online'])\n \n # write out the offline servers\n for server, interval in self.offline_servers.iteritems():\n writer.writerow([server, interval[1], 'Offline'])\n \n return output.getvalue()", "def get_pool_status(self, mission):\n\n # initialize node status\n states = dict(\n idle=0, rebooting=0, reimaging=0, running=0, unusable=0, creating=0,\n starting=0, waiting_for_start_task=0, start_task_failed=0, unknown=0,\n leaving_pool=0, offline=0, preempted=0)\n\n # if the pool does not exist\n if not self.batch_client.pool.exists(pool_id=mission.pool_name):\n return \"N/A\", \"N/A\", states\n\n # get pool info\n the_pool = self.batch_client.pool.get(pool_id=mission.pool_name)\n state = the_pool.state.name\n allocation_state = the_pool.allocation_state.name\n\n # get the list of node at current time point\n # we check the existance of the pool again to avoid coincidence\n if self.batch_client.pool.exists(pool_id=mission.pool_name):\n node_list = self.batch_client.compute_node.list(\n pool_id=mission.pool_name)\n\n # calculate the number of nodes in each status\n for node in node_list:\n states[node.state.name] += 1\n node_list.reset()\n\n return state, allocation_state, states", "def status(self):\n logging.debug(\"%s entered status\" % self)\n # print_config(self.infra)\n # print self.images\n # headers = [\"Machine Name\", \"Flavor\", \"IP Addresses\", \"Image Name\", \"Status\"]\n # pt = prettytable.PrettyTable(headers)\n # pt.align[\"Machine Name\"]=\"l\"\n # pt.align[\"IP Addresses\"] = \"l\"\n # pt.align[\"Image Name\"] = \"l\"\n # pt.align[\"Status\"] = \"r\"\n \n print \"Checking status of %s\" % self.footprint_name\n # tmpl = \"%(machine_name)-20s%(flavor)5s%(status)-30s\"\n tmpl1 = \"\"\"%-20s%-52s[%s]\"\"\"\n tmpl2 = \"\"\"%-20s%-60s\\n\"\"\"\n print tmpl1 % (\"Machine Name\", \"IP Addresses\", \"Status\")\n print 80 * \"-\"\n \n for machine in self.machines.keys():\n m = self.machines[machine]\n # machine_name = m.machine_name\n # ips = str(m.ip_addresses)\n # flavor = str(m.flavor)\n # img = str(m.image_id)\n # status = str(m.status)\n # pt.add_row([m, ips, status, img, status])\n # print \"FFF\", m, ips, flavor, img, status\n # print tmpl % locals()\n print m.status\n \n return \"%s is currently: %s\" % (self.footprint_name, self.footprint_status)", "def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")", "def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")", "def detailed_status(self) -> str:\n return pulumi.get(self, \"detailed_status\")", "def status(self):\n if Daemon.status(self) != 0:\n return 1\n \n # Load decoy logger\n self.load_outputs(decoy=True)\n\n # Load node pool & print status\n try:\n self.pool = PLNodePool(self)\n sys.stdout.write(self.status_str())\n except PLNodePoolException:\n sys.stdout.write(\"No node found.\\n\")\n\n return 0", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status_summary(self):\n base_query_set = super(PeeringSessionManager, self).get_queryset()\n summary = base_query_set.annotate(\n label=models.Case(\n models.When(provisioning_state=2, then=models.Case(\n models.When(admin_state=2, then=models.Case(\n models.When(operational_state=6,\n then=models.Value('Up')),\n default=models.Value('Down')\n )),\n default=models.Value('Admin Down')\n )),\n models.When(provisioning_state=1,\n then=models.Value('Provisioning')),\n default=models.Value('None'),\n output_field=models.CharField()\n )).values('label').annotate(value=models.Count('label'))\n return summary", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def get_all_status():\n return \"\"", "def __repr__(self):\n result = \"%s (%s):\\n\" % (self.host, self.status)\n for port in self.ports:\n result += \"\\t%s %s %s %s %s\\n\" % (port[0], port[1], port[2], port[3], port[4])\n result += \"\\n\"\n return result", "def _get_status(self):\n return u'%s' % (self.get_status_display())", "def zpool_status(p):\n # Input file\n f = '/'.join([p, 'zfs/zpool-status-dv.out'])\n check_path(f)\n\n status = {}\n\n # Match empty lines\n empty = re.compile('^\\s*$')\n\n # Match multiple underscores\n underscore = re.compile('^__')\n\n # Match dashes\n dash = re.compile('^--')\n\n # Open file with universal newline support\n with open(f, 'rU') as fh:\n current = None\n\n # Read the lines into an array\n lines = fh.readlines()\n\n # Certain scenarios can lead to no pools available\n # The Ready Deploy image will not return any pools for example\n if len(lines) == 0 or 'no pools available' in lines[0]:\n return None\n\n for line in lines:\n #Ignore empty lines and lines that start with dashes or underscores\n if empty.search(line) or \\\n underscore.search(line) or \\\n dash.search(line):\n continue\n\n # Lines containing ':' define a new section\n elif ':' in line:\n \"\"\"\n Possible sections\n + pool - pool name\n + state - pool state\n + status - pool status\n + action - recovery action\n + scan - scan status\n + config - pool configuration\n + errors - pool errors\n + dedup - deduplication table\n \"\"\"\n # Parse pool name\n if 'pool:' in line:\n current = 'pool'\n pool = line.split(':')[1].strip()\n status[pool] = {}\n\n # Parse state\n elif 'state:' in line:\n current = 'state'\n state = line.split(':')[1].strip()\n status[pool]['state'] = state\n\n # Parse status\n elif 'status:' in line:\n current = 'status'\n if current not in status[pool]:\n status[pool][current] = []\n status[pool][current].append(line.split(':')[1].strip())\n\n # Parse action\n elif 'action:' in line:\n current = 'action'\n\n # Parse scan\n elif 'scan:' in line:\n current = 'scan'\n if current not in status[pool]:\n status[pool][current] = []\n status[pool][current].append(line.split(':')[1].strip())\n\n # Parse config\n elif 'config:' in line:\n current = 'config'\n status[pool]['config'] = []\n\n # Parse errors\n elif 'errors:' in line:\n current = 'errors'\n\n # Parse dedup\n elif 'dedup:' in line:\n current = 'dedup'\n if 'no DDT entries' in line:\n status[pool]['dedup'] = None\n else:\n status[pool]['dedup'] = []\n status[pool]['dedup'].append(line.split(':')[1])\n\n else:\n # Ignore these fields\n #if current in ['status', 'action', 'scan', 'errors']:\n # continue\n if current in ['action', 'errors']:\n continue\n\n if current == 'status' or current == 'scan':\n status[pool][current].append(line.strip())\n continue\n\n status[pool][current].append(line)\n\n for pool in status:\n # Parse config\n status[pool]['config'] = _parse_zpool_config(status[pool]['config'])\n\n # Parse dedup table if dedup is enabled\n if 'dedup' in status[pool] and status[pool]['dedup']:\n status[pool]['dedup'] = _parse_zpool_dedup(status[pool]['dedup'])\n\n # Ignoring errors for now\n # Parse errors if they exist\n #if status[pool]['errors']:\n # status[pool]['errors'] = parse_errors(status[pool]['errors'])\n\n # Ignoring scan information for now\n # Parse scan information is a scan is in progress\n #if status[pool]['scan']:\n # status[pool]['scan'] = parse_scan(status[pool]['scan'])\n\n return status", "def summary(self):\n res = \", \".join(\n elem[\"summary\"] for elem in self.status[\"health\"][\"summary\"]\n )\n if res:\n return res\n elif self.detail:\n return self.detail[0]\n return \"\"", "def get_status_as_string(self):\n if self.downloaded == 0:\n return \"[Starting... ]\"\n return \"[%s, %s, %s]\" % self.get_status()", "def display_status(self) -> str:\n return pulumi.get(self, \"display_status\")", "def status(self):\n r = requests.get('/'.join([self.base_url, self.ENDPOINT_STATUS]))\n return r.json()", "def get_overview_string(self, mission):\n\n s = self.get_pool_overview_string(mission) + \"\\n\\n\"\n s += self.get_job_overview_string(mission) + \"\\n\\n\"\n s += self.get_storage_container_overview_string(mission)\n\n return s", "def status(self):\n return {\n 'id': 'status',\n 'protocol_version': 'PV62',\n 'network': self.origin_node.network.name,\n 'td': self.origin_node.chain.head.header.difficulty,\n 'best_hash': self.origin_node.chain.head.header.hash,\n 'genesis_hash': self.origin_node.chain.genesis.header.hash,\n 'size': kB_to_MB(self._message_size['status'])\n }", "def status(self):\n res = \"\"\n for tlight in self.trafficLights:\n res += \"Traffic light {} status: {}\\n\".format(self.trafficLights[tlight].id,self.trafficLights[tlight].getState())\n return res", "def get_summary(self) -> str:\n connected = self.is_connected()\n info = '[{} - {}]'.format(self._index,\n 'Connected' if connected else 'Disconnected')\n if connected:\n info += ' {} ({})'.format(self.get_model_name(), self.get_serial())\n return info", "def get_status(self):\n r = requests.get(self.base_url + '/status')\n return r.json()", "def status(self):\n return self._get(path='status')", "def get_status_string(self, instance):\n return instance.get_status_string()", "def status():\n statuses = get_all_statuses()\n return json.dumps(statuses, indent=4)", "def get_pool_stats(self, pool):\n svc = self.pool_path % pool\n ret = self.rclient.get(svc)\n if ret.status != restclient.Status.OK:\n exception_msg = (_('Error getting pool stats: '\n 'pool: %(pool)s '\n 'return code: %(ret.status)d '\n 'message: %(ret.data)s.')\n % {'pool': pool,\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n raise exception.InvalidInput(reason=exception_msg)\n val = jsonutils.loads(ret.data)\n if not self._is_pool_owned(val):\n exception_msg = (_('Error pool ownership: '\n 'pool %(pool)s is not owned '\n 'by %(host)s.')\n % {'pool': pool,\n 'host': self.host})\n raise exception.InvalidInput(reason=pool)\n avail = val['pool']['usage']['available']\n used = val['pool']['usage']['used']\n return avail, used", "def getClusterStatus(self):\n data = self.connect('get','cluster/status', None)\n return data", "def summary_statistics(self):\n display_str = f'--- {self.server_ip} ping statistics ---\\n'\n\n transmitted = str(self.request_count)\n received = str(self.reply_count)\n loss = str(round((1 - self.reply_count / self.request_count) * 100))\n total_time = str(round(self.total_end - self.total_start))\n\n display_str += f'{transmitted} transmitted, {received} received, ' \\\n f'{loss}% loss, time {total_time} ms\\n'\n if self.reply_count:\n rtt_min = str(min(self.rtt_list))\n rtt_avg = str(round(sum(self.rtt_list) / len(self.rtt_list)))\n rtt_max = str(max(self.rtt_list))\n display_str += f'rtt min/avg/max = {rtt_min}/{rtt_avg}/{rtt_max} '\\\n f'ms'\n else:\n display_str += 'rtt min/avg/max = 0/0/0 ms'\n\n return display_str", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def display_pipeline_status(ctx,\n pipeline_name,\n username,\n password,\n ip_address,\n interactive):\n slab_logger.info('Displaying status of %s' % pipeline_name)\n if not username:\n username = ctx.get_username()\n if not password:\n password = ctx.get_password(interactive)\n if not password or not username:\n slab_logger.error(\"Username is %s and password is %s. \"\n \"Please, set the correct value for both and retry.\" %\n (username, password))\n sys.exit(1)\n server_url = \"http://{0}/go/api/pipelines/{1}/status\"\n res = requests.get(server_url.format(ip_address, pipeline_name),\n auth=HTTPBasicAuth(username, password))\n soup = BeautifulSoup(res.content, \"html.parser\")\n print str(soup)", "def info(self):\n import string\n results = self.info_list()\n labels = \"%-8s %-9s %-4s %-8s %-8s %-4s\" % \\\n ('MACHINE','CPU','GHZ','MB TOTAL',\n 'MB FREE','LOAD')\n print labels\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%-8s %2dx%-6s %4.1f %8.1f %8.1f %4.2f\" % \\\n (name[-8:], res['cpu_count'],res['cpu_type'][-6:], \\\n res['cpu_speed'],res['mem_total'],res['mem_free'],\\\n res['load_1'])\n print s", "def detailed_status_message(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status_message\")", "def detailed_status_message(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status_message\")", "def visualize_pool ( self, pool_id=None ):\n try:\n pool = self._poolstack [ -1 if pool_id is None else pool_id ]\n except IndexError:\n return \"\"\n else:\n return '\\n'.join ( pool.export_rules() )", "def getStatus():", "def status(self):\n if self.error_code:\n msg = self.error_code\n else:\n msg = 'C{cycle},P{seen},{progress:.0f}%'.format(\n cycle=self.cycle,\n seen=self.seen_per_cycle,\n progress=(self.step / float(self.count_points) * 100)\n )\n return '[W{worker_no}: {msg}]'.format(\n worker_no=self.worker_no,\n msg=msg\n )", "def node_statuses(self) -> pulumi.Output[Sequence['outputs.NodeBalancerConfigNodeStatus']]:\n return pulumi.get(self, \"node_statuses\")", "def getNodeStatus(self,node):\n data = self.connect('get','nodes/%s/status' % (node),None)\n return data", "def _getCurrentComponentStatus(self):\n resOverall = self.sysAdminClient.getOverallStatus()\n if not resOverall['OK']:\n return resOverall\n currentStatus = {'Down': set(), 'Run': set(), 'All': set()}\n informationDict = resOverall['Value']\n for systemsDict in informationDict.values():\n for system, instancesDict in systemsDict.items():\n for instanceName, instanceInfoDict in instancesDict.items():\n identifier = '%s__%s' % (system, instanceName)\n runitStatus = instanceInfoDict.get('RunitStatus')\n if runitStatus in ('Run', 'Down'):\n currentStatus[runitStatus].add(identifier)\n\n currentStatus['All'] = currentStatus['Run'] | currentStatus['Down']\n return S_OK(currentStatus)", "def __str__(self):\n status = \"height = {}\\n\".format(self.height)\n status += \"width = {}\\n\".format(self.width)\n status += \"channels = {}\\n\".format(self.channels)\n status += \"architecture = {}\\n\".format(self.architecture)\n status += \"activations = {}\\n\".format(self.activations)\n status += \"conv_activations = {}\\n\".format(self.conv_activations)\n status += \"conv_architecture = {}\\n\".format(self.conv_architecture)\n status += \"kernel_sizes = {}\\n\".format(self.kernel_sizes)\n status += \"pool_kernel = {}\\n\".format(self.pool_kernel)\n status += \"batch_size = {}\\n\".format(self.batch_size)\n status += \"epochs = {}\\n\".format(self.epochs)\n status += \"save_step = {}\\n\".format(self.save_step)\n status += \"learning_rate = {}\\n\".format(self.learning_rate)\n status += \"momentum = {}\\n\".format(self.momentum)\n return status", "def status(self):\n if self.to_phone:\n phone = '(%s) %s - %s' % (self.to_phone[:3], self.to_phone[3:6], self.to_phone[6:])\n else:\n phone = ''\n\n name = self.to_name if self.to_name else ''\n\n return ' poll in %ds | %s | %s ' % (self.step, name, phone)", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def printstatus(self):\n data = self.statuslist()\n if not data:\n print(\n \"Unable to communicate to the OpenSprinkler \"\n \"at %s\" % self.hostname\n )\n return None\n print('Station\\t%-15.15s\\tStatus' % 'Name')\n for item in data:\n print('%d\\t%-15.15s\\t%s' % (item[0], item[1], item[2]))\n return", "async def _status():\n # TODO(Deepankar): should we add versions of executors?\n return {\n 'status_code': status.HTTP_200_OK,\n 'jina_version': jina_version\n }", "def state(self) -> str:\n return str(self.coordinator.server_status)", "def fetch_status():\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((GEARMAND_HOST, GEARMAND_PORT))\n log_verbose('Connected to Gearmand at %s:%s' % (GEARMAND_HOST, GEARMAND_PORT))\n except socket.error, e:\n collectd.error('gearmand_info plugin: Error connecting to %s:%d - %r'\n % (GEARMAND_HOST, GEARMAND_PORT, e))\n return None\n fp = s.makefile('r')\n log_verbose('Sending info command')\n s.sendall('status\\r\\n')\n\n status = {}\n while True:\n data = fp.readline().strip()\n log_verbose('Received data: %r' % data)\n if not data or data == '.':\n break\n function, total, running, available_workers = data.split('\\t')\n status[function] = {\n 'total': total,\n 'running': running,\n 'available_workers': available_workers}\n\n s.close()\n return status" ]
[ "0.78566015", "0.72541404", "0.690066", "0.68025947", "0.67186147", "0.6582966", "0.65206933", "0.65195024", "0.65195024", "0.6434992", "0.64222366", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6333841", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.6326419", "0.63026965", "0.6287767", "0.62804025", "0.6263736", "0.6241694", "0.6237216", "0.6226961", "0.6195314", "0.61846477", "0.6175487", "0.61490506", "0.61199194", "0.6093558", "0.60847443", "0.6079126", "0.6078211", "0.60649735", "0.6054429", "0.60511917", "0.6031487", "0.6031487", "0.6031487", "0.602098", "0.6018005", "0.6015281", "0.6015281", "0.59941095", "0.59688926", "0.5968612", "0.59400636", "0.5936846", "0.5916769", "0.5912286", "0.5909197", "0.59084266", "0.59084266", "0.59084266", "0.59084266", "0.59084266", "0.59084266", "0.5905792", "0.5886632", "0.58842593", "0.588199" ]
0.7833251
1
Get the current status of the job (task scheduler) and tasks.
def get_job_status(self, mission): # initialize task status status = dict(active=0, running=0, succeeded=0, failed=0) # get job status if it exists. Otherwise, return N/A try: the_job = self.batch_client.job.get(job_id=mission.job_name) # get counts of tasks in different statuses status_counts = self.batch_client.job.get_task_counts(mission.job_name) except azure.batch.models.BatchErrorException as err: if err.message.value.startswith("The specified job does not exist"): return "N/A", status # raise an exception for other kinds of errors raise # update the dictionary status["active"] = status_counts.active status["running"] = status_counts.running status["succeeded"] = status_counts.succeeded status["failed"] = status_counts.failed return the_job.state.name, status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def status(self):\n return self.job_proto.status", "def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)", "def celery_task_status(self):\n return self._get_celery_queue_data()", "def get_job_status(self):\n if self.worker_thread is None:\n return None\n else:\n return self.worker_thread.get_status()", "def get_status(self):\n url = \"data_request?id=jobstatus&job=%d&plugin=zwave\" % self.id\n return self.vera.get(url)", "def status(self) -> str:\n return self._check_job_status()", "def status(self) -> pulumi.Output['outputs.JobStatus']:\n return pulumi.get(self, \"status\")", "def getstatus(self):\n with self.lock:\n return (self.status, self.time_start)", "def task_status(self) -> str:\n return self._task_status", "def updater_job_status(self,request):\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/status invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\n\t\t# First check if a job is running. This will update the\n\t\t# internal field self._current_job, or if the job is finished,\n\t\t# it would return an empty string.\n\t\tinst = self.__which_job_is_running()\n\n\t\tjob = request.options.get('job','')\n\t\tresult = {}\n\t\tif job in INSTALLERS:\n\t\t\t# make a copy, not a reference!\n#\t\t\tresult = {}\n#\t\t\tfor arg in INSTALLERS[job]:\n#\t\t\t\tresult[arg] = INSTALLERS[job][arg]\n\t\t\tresult = deepcopy(INSTALLERS[job])\n\n\t\t\tif 'statusfile' in INSTALLERS[job]:\n\t\t\t\ttry:\n\t\t\t\t\tfor line in open(INSTALLERS[job]['statusfile']):\n\t\t\t\t\t\tfields = line.strip().split('=')\n\t\t\t\t\t\tif len(fields) == 2:\n\t\t\t\t\t\t\tresult['_%s_' % fields[0]] = fields[1]\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t# if we encounter that the frontend asks about the last job we\n\t\t\t# have executed -> include its properties too.\n\t\t\tif self._current_job:\n\t\t\t\tif self._current_job['job'] == job:\n\t\t\t\t\tfor f in self._current_job:\n\t\t\t\t\t\tresult[f] = self._current_job[f]\n\t\t\t\t\t\tif isinstance(result[f],str) and result[f].isdigit():\n\t\t\t\t\t\t\tresult[f] = int(result[f])\n\t\t\t\tif inst == '':\n\t\t\t\t\tresult['running'] = False\n\t\t\telse:\n\t\t\t\t# no job running but status for release was asked? \n\t\t\t\t# maybe the server restarted after job finished\n\t\t\t\t# and the frontend did not get that information\n\t\t\t\t# Bug #26318\n\t\t\t\tif job == 'release':\n\t\t\t\t\tresult['detail'] = '%s-%s' % (self.ucr.get('version/version'), self.ucr.get('version/patchlevel'))\n\t\t\t\telse:\n\t\t\t\t\tresult['detail'] = _('Unknown')\n\n\t\t\t# -------------- additional fields -----------------\n\n\t\t\t# elapsed time, ready to be displayed. (not seconds, but rather\n\t\t\t# the formatted string)\n\t\t\tif 'time' in result and 'started' in result:\n\t\t\t\telapsed = result['time'] - result['started']\n\t\t\t\tif elapsed < 60:\n\t\t\t\t\tresult['elapsed'] = '%ds' % elapsed\n\t\t\t\telse:\n\t\t\t\t\tmins = int(elapsed/60)\n\t\t\t\t\tsecs = elapsed - (60 * mins)\n\t\t\t\t\tif mins < 60:\n\t\t\t\t\t\tresult['elapsed'] = '%d:%02dm' % (mins,secs)\n\t\t\t\t\telse:\n\t\t\t\t\t\thrs = int(mins/60)\n\t\t\t\t\t\tmins = mins - (60*hrs)\n\t\t\t\t\t\tresult['elapsed'] = '%d:%02d:%02dh' % (hrs,mins,secs)\n\t\t\t# Purpose is now formatted in the language of the client (now that\n\t\t\t# this LANG is properly propagated to us)\n\t\t\tif 'purpose' in result:\n\t\t\t\tif result['purpose'].find('%') != -1:\n\t\t\t\t\t# make sure to not explode (Bug #26318), better show nothing\n\t\t\t\t\tif 'detail' in result:\n\t\t\t\t\t\tresult['label'] = result['purpose'] % result['detail']\n\t\t\t\telse:\n\t\t\t\t\tresult['label'] = result['purpose']\n\t\t\t# Affordance to reboot... hopefully this gets set before\n\t\t\t# we stop polling on this job status\n\t\t\tself.ucr.load()\t# make it as current as possible\n\t\t\tresult['reboot'] = self.ucr.is_true('update/reboot/required',False)\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/status returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# -----------------------------------\n\n\t\tself.finished(request.id,result)", "def get_working_status(self):\n #TODO: fix some issue on restarting and so on about current status\n return self.working_map[self.get_status()]", "def task_status():\n pass", "def _get_current_job_status(acq_tframes):\n cur_job = dict()\n if acq_tframes:\n cur_job['employer'] = f'{str_sep}'.join(\n {tframe.employer if tframe.category == 'O' else tframe.category for tframe in\n acq_tframes}).replace(',', '')\n cur_job['start'] = [tf.start for tf in acq_tframes][0]\n cur_job['end'] = sorted([tf.end for tf in acq_tframes])[-1]\n return cur_job", "def mesos_status(self, submissionId):\n get_tasks = self.driver.getTasks()['get_tasks']\n task_state = None\n\n tasks = get_tasks['tasks'] + get_tasks.get('completed_tasks')\n tasks_list = list(filter(lambda x: x['task_id']['value'] == submissionId, tasks))\n if len(tasks_list) > 0:\n task = tasks_list[0]\n task_state = task['state']\n self._log.debug(\"Task state = \" + task_state)\n else:\n self._log.debug(\"Task not found\")\n\n return task_state", "def refresh_queue_status(self):\n \n # Get the jobid and state for all jobs pending/running/completed for the current user\n qacct_stdout=self.run_grid_command_resubmit([\"qacct\",\"-o\",getpass.getuser(),\"-j\",\"*\"])\n \n # info list should include jobid, state, cpus, time, and maxrss\n info=[]\n job_status=[]\n for line in qacct_stdout.split(\"\\n\"):\n if line.startswith(\"jobnumber\") or line.startswith(\"job_number\"):\n if job_status:\n info.append(job_status)\n job_status=[line.rstrip().split()[-1],\"NA\",\"NA\",\"NA\",\"NA\"]\n # get the states for completed jobs\n elif line.startswith(\"failed\"):\n failed_code = line.rstrip().split()[1]\n if failed_code != \"0\":\n if failed_code in [\"37\",\"100\"]:\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n elif line.startswith(\"deleted_by\"):\n if line.rstrip().split()[-1] != \"NONE\" and job_status[1] == self.job_code_terminated:\n job_status[1]=self.job_code_deleted\n elif line.startswith(\"exit_status\"):\n # only record if status has not yet been set\n if job_status[1] == \"NA\":\n exit_status = line.rstrip().split()[-1]\n if exit_status == \"0\":\n job_status[1]=self.job_code_completed\n elif exit_status == \"137\":\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n # get the current state for running jobs\n elif line.startswith(\"job_state\"):\n job_status[1]=line.rstrip().split()[-1]\n elif line.startswith(\"slots\"):\n job_status[2]=line.rstrip().split()[-1]\n elif line.startswith(\"ru_wallclock\"):\n try:\n # get the elapsed time in minutes\n job_status[3]=str(float(line.rstrip().split()[-1])/60.0)\n except ValueError:\n job_status[3]=\"NA\"\n elif line.startswith(\"ru_maxrss\"):\n job_status[4]=line.rstrip().split()[-1]+\"K\"\n \n if job_status:\n info.append(job_status)\n\n return info", "def get_job_status(self, job, context=None):\n return self._client.call_method(\n 'UserAndJobState.get_job_status',\n [job], self._service_ver, context)", "async def get_task_status(task_id: TaskId):", "def _get_job_status(self):\n total_hits = session.query(BoxHit).filter_by(training_job_id=self.id).count()\n num_hits_left = session.query(BoxHit).filter_by(training_job_id=self.id, outstanding=True).count()\n total_urls = self.num_urls\n num_urls_left = session.query(VideoTrainingURL).filter_by(job=self, processed=False).count()\n faces_obtained = MTurkBox.query.filter_by(label=self.evaluator.target_label, result=True).count()\n return '\\n'.join([\n '------------- Stats for Job ID: %s -------------' % str(self.id) ,\n 'Job for Label : %s' % self.label.name,\n 'Total URLs : %d' % total_urls,\n 'Total HITs : %d' % total_hits,\n 'unprocessed URLS : %d' % num_urls_left,\n 'outstanding Hits : %d' % num_hits_left,\n 'Job Finish Status : %s' % self.finished,\n 'Faces Obtained : %d' % faces_obtained,\n ]) + '\\n'", "def __get_job_status_from_queue__(self):\n\n return (lambda job: (int(job[-1]['JobStatus']),\n job[-1]))(self.schedd.query(\"ClusterId =?= {0}\".format(self.id)))", "async def status(self) -> JobStatus:\n async with self._redis.pipeline(transaction=True) as tr:\n tr.exists(result_key_prefix + self.job_id) # type: ignore[unused-coroutine]\n tr.exists(in_progress_key_prefix + self.job_id) # type: ignore[unused-coroutine]\n tr.zscore(self._queue_name, self.job_id) # type: ignore[unused-coroutine]\n is_complete, is_in_progress, score = await tr.execute()\n\n if is_complete:\n return JobStatus.complete\n elif is_in_progress:\n return JobStatus.in_progress\n elif score:\n return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued\n else:\n return JobStatus.not_found", "def job_status(self, job_id):\n\n response = self.batch_client.describe_jobs(jobs=[job_id])\n return response[\"jobs\"][0][\"status\"]", "def get_status(self):\n # find status\n # search in summary file first\n self.status = \"running\"\n status = self.search_summary(\"status\")\n if status:\n self.status = status.split()[1]\n # define running time\n # search in summary file first\n self.running_time = \"00:00:00\"\n running_time = self.search_summary(\"running-time\")\n if running_time:\n self.running_time = running_time.split()[1]\n # calculate running time\n else:\n now = datetime.datetime.now()\n elapsed_time = (now - self.ctime).seconds\n hours, remainder = divmod(elapsed_time, 3600)\n minutes, seconds = divmod(remainder, 60)\n self.running_time = (\n f\"{int(hours):02}:{int(minutes):02}:{int(seconds):02}\"\n )", "def do_status(self, args):\n status = self._leet.job_status\n\n for job in self.finished_jobs:\n status.append({\"id\" : job.id,\n \"hostname\" : job.machine.hostname,\n \"plugin\": job.plugin_instance.LEET_PG_NAME,\n \"status\" : job.status})\n if status:\n pretty_jobs_status(status)\n else:\n print(\"***No jobs pending\")", "def job_status(self) -> JobStatus:\n statuses = set()\n with self._jobs.lock:\n\n # No jobs present\n if not self._jobs:\n return JobStatus.DONE\n\n statuses = set()\n for job in self._jobs.values():\n if job:\n statuses.add(job.status())\n\n # If any jobs are in non-DONE state return that state\n for stat in [\n JobStatus.ERROR,\n JobStatus.CANCELLED,\n JobStatus.RUNNING,\n JobStatus.QUEUED,\n JobStatus.VALIDATING,\n JobStatus.INITIALIZING,\n ]:\n if stat in statuses:\n return stat\n\n return JobStatus.DONE", "def get_status(job_id):\n job = fetch_data.AsyncResult(job_id, app=app)\n return jsonify({'job_id': job_id, 'status': job.status})", "def status(self):\n return self._get(path='status')", "def get_oozie_status(self, job_id):\n self.echo('Checking status...')\n status = self.call_return(\"oozie job -oozie \" + self.pylot_cfg.hdfs_oozie_interface + \" -info \" + job_id + \" | grep 'Status' | grep ':' | awk '{print $NF}'\")\n status = status.strip('\\n')\n return status", "def getstatus(self):\n return self.__status", "def status(self):\n return self.get(self._names[\"status\"])", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def info(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s\" % (self.sessionid,\n self.name))\n return self.server.json_body(resp)", "def status(self):\n self._refresh_state()\n return self._data.get('status')", "def get_status(self):\n if self._is_running():\n return \"RUNNING\"\n elif self._has_error():\n # The run started but failed\n return \"FAILED\"\n elif self._is_finished():\n # The run was finished\n return \"FINISHED\"\n elif self.current_step() >= 0:\n # The run started at some point but was not completed\n return \"INCOMPLETE\"\n else:\n # The run did not start\n return \"NOT STARTED\"", "def get_status(self):\n return self._status", "def get_status(self):\n\n return self._system", "def GetStatus(self):\r\n return self.status", "def get_project_job_status(id):\n user = current_user\n\n if user.get_id() is not None:\n _tasks = user.get_project_tasks_in_progress(id)\n running_task_dicts = get_running_task_dicts(_tasks)\n\n _tasks = user.get_finished_project_tasks(id)\n finished_task_dicts = get_finished_task_dicts(_tasks)\n\n response_object = {\n 'running_tasks': running_task_dicts,\n 'finished_tasks': finished_task_dicts\n }\n else:\n response_object = {'status': 'error'}\n # print(jsonify(response_object))\n return jsonify(response_object)", "def container_status(self):\n if self.status == 'complete':\n return 'complete'\n try:\n task_status = self._ecs.describe_tasks(tasks=[self.name])['tasks'][0]['lastStatus']\n return task_status\n except (IndexError, ClientError):\n return 'STOPPED'", "def get_status(self, job_id):\n\n result = self.redis.get('job_status:' + str(job_id))\n return pickle.loads(result) if result else None", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def get_job_status(jobid, wait=30):\n cmd = \"scontrol show job {0}\".format(jobid)\n try:\n output = subprocess.check_output(cmd, shell=True)\n m = re.search(\"JobState=(\\w+)\", output)\n except subprocess.CalledProcessError:\n m = False\n\n status = None\n if m:\n status = m.group(1)\n else:\n repeat = 0\n while not m and repeat < wait:\n cmd = \"sacct -b -j {0}\".format(jobid)\n output = subprocess.check_output(cmd, shell=True)\n m = re.search(\"{0}\\s+([A-Z]+)\".format(jobid), output)\n time.sleep(1)\n repeat += 1\n if m:\n status = m.group(1)\n\n if status is None:\n raise ValueError(\"Job not found: {0}\".format(jobid))\n else:\n return status", "def _get_status(self):\n return self.__status", "def _check_job_status(self) -> str:\n self._assert_job_created()\n\n r = requests.post(\n f'https://{cc.ROUTE_PREFIX}.stratodem.com/jobs/status',\n headers=dict(\n Authorization=f'Bearer {get_api_token()}',\n ),\n json=dict(job_id=self._job_id)\n )\n\n if not r.status_code == 200:\n raise APIQueryFailedException('Failed to determine job status')\n\n r = r.json()\n\n if not r['success']:\n raise APIQueryFailedException(r)\n else:\n return r['message']", "def status(self):\n\t\treturn self._status", "def status(self):\n return self._query_status()['status']", "def job_status(self, job_id):\n url = self.base_url + \"/ml-service/phoenix-ml/job/status?id={0}\".format(job_id)\n # url = \"http://10.1.2.110:8199/phoenix-ml/job/status?id=12\"\n headers = {\"ApiKey\": self.api_key}\n response = requests.get(url=url, headers=headers)\n return response.text", "def getCompileStatus():\n logger.debug(\"[FLASKWEB] Retrieving current active compilation status\")\n\n jobs = compileService.getActiveState()\n title = \"Active Compiling Tasks\" if jobs else \"NO Active Compiling Jobs\"\n\n if request.headers['Accept'] == 'application/json':\n return jsonify(jobs), 200\n else:\n return render_template(\"keyvalue.html\", title=title, store=jobs)", "def fuota_task_status(self) -> Optional[str]:\n return pulumi.get(self, \"fuota_task_status\")", "def status(self):\n return self._data['status']", "def workflow_status(self):\n return self._workflow_status", "def find_job_and_job_status(self):\n\n def find_job_and_job_status_log_history(f):\n rcelog('critical', \"find_job_and_status(): Found job {0} in history. Terminated in error.\".\n format(self.id))\n return f\n\n try:\n return self.__get_job_status_from_queue__()\n except:\n pass\n\n try:\n return find_job_and_job_status_log_history(self.__get_job_status_from_history__())\n except:\n return (None, None)", "def get_status(job_key):\n job = Job.fetch(job_key, connection=conn)\n\n logs_url = \"{}{}/runner/logs/{}\".format(request.url_root, API_VERSION, job_key)\n status_dict = {\"status\": \"\", \"logs_url\": logs_url}\n return_code = 200\n if job.is_finished:\n status_dict['status'] = \"success\"\n return_code = 200\n elif job.is_failed:\n status_dict['status'] = \"terminal\"\n return_code = 400\n else:\n status_dict['status'] = \"running\"\n status_dict['logs_url'] = \"\"\n return_code = 202\n\n return jsonify(status_dict), return_code", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def getStatus(self):\n return self.__status", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")" ]
[ "0.7439859", "0.74113566", "0.73972505", "0.73632085", "0.7306718", "0.7229825", "0.7227331", "0.719492", "0.71917385", "0.7006022", "0.69687855", "0.69370997", "0.6901852", "0.6846558", "0.68459725", "0.68419343", "0.6834033", "0.6797795", "0.67972744", "0.6779665", "0.67757803", "0.6775244", "0.6749801", "0.67442286", "0.672544", "0.6723445", "0.671497", "0.6714617", "0.6687363", "0.6632541", "0.6632541", "0.6632541", "0.6592881", "0.65856355", "0.65797096", "0.65731573", "0.6563606", "0.65541655", "0.65530854", "0.6537728", "0.6513283", "0.6508077", "0.6508077", "0.6508077", "0.6508077", "0.6508077", "0.6508077", "0.6508077", "0.6508077", "0.6508077", "0.6508077", "0.6508077", "0.6508077", "0.6508077", "0.6508077", "0.6508077", "0.6508077", "0.6508077", "0.6508077", "0.6508077", "0.65037715", "0.65035754", "0.6497703", "0.64791757", "0.64751786", "0.6458712", "0.6448715", "0.64474285", "0.64468443", "0.6433897", "0.6426137", "0.6421537", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.64133906", "0.6411864", "0.6406713", "0.6406713" ]
0.6771893
22
Get a string for the status overview of the job and tasks.
def get_job_overview_string(self, mission): # get statuses job_status, task_status = self.get_job_status(mission) s = "Job status: {}".format(job_status) if job_status != "N/A": s += "\n" s += "Tasks status: " s += "{} active; ".format(task_status["active"]) s += "{} running; ".format(task_status["running"]) s += "{} succeeded; ".format(task_status["succeeded"]) s += "{} failed;".format(task_status["failed"]) return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_job_status(self):\n total_hits = session.query(BoxHit).filter_by(training_job_id=self.id).count()\n num_hits_left = session.query(BoxHit).filter_by(training_job_id=self.id, outstanding=True).count()\n total_urls = self.num_urls\n num_urls_left = session.query(VideoTrainingURL).filter_by(job=self, processed=False).count()\n faces_obtained = MTurkBox.query.filter_by(label=self.evaluator.target_label, result=True).count()\n return '\\n'.join([\n '------------- Stats for Job ID: %s -------------' % str(self.id) ,\n 'Job for Label : %s' % self.label.name,\n 'Total URLs : %d' % total_urls,\n 'Total HITs : %d' % total_hits,\n 'unprocessed URLS : %d' % num_urls_left,\n 'outstanding Hits : %d' % num_hits_left,\n 'Job Finish Status : %s' % self.finished,\n 'Faces Obtained : %d' % faces_obtained,\n ]) + '\\n'", "def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")", "def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")", "def detailed_status(self) -> str:\n return pulumi.get(self, \"detailed_status\")", "def get_status_as_string(self):\n if self.downloaded == 0:\n return \"[Starting... ]\"\n return \"[%s, %s, %s]\" % self.get_status()", "def task_status(self) -> str:\n return self._task_status", "def status(self) -> str:\n return self._check_job_status()", "def _get_status(self):\n return u'%s' % (self.get_status_display())", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def get_status(self, render_via_ajax):\r\n ugettext = self.system.service(self, \"i18n\").ugettext\r\n status_list = []\r\n current_task_human_name = \"\"\r\n for i in xrange(0, len(self.task_xml)):\r\n human_task_name = self.extract_human_name_from_task(self.task_xml[i])\r\n human_task_name = ugettext(human_task_name)\r\n # Extract the name of the current task for screen readers.\r\n if self.current_task_number == i:\r\n current_task_human_name = human_task_name\r\n task_data = {\r\n 'task_number': i + 1,\r\n 'human_task': human_task_name,\r\n 'current': self.current_task_number == i\r\n }\r\n status_list.append(task_data)\r\n\r\n context = {\r\n 'status_list': status_list,\r\n 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,\r\n 'legend_list': LEGEND_LIST,\r\n 'render_via_ajax': render_via_ajax,\r\n 'current_task_human_name': current_task_human_name,\r\n }\r\n status_html = self.system.render_template(\r\n \"{0}/combined_open_ended_status.html\".format(self.TEMPLATE_DIR), context\r\n )\r\n\r\n return status_html", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def _status_summary(jobs):\n assert type(jobs) == list\n successful = 0\n pending = 0\n running = 0\n coalesced = 0\n\n for job in jobs:\n status = buildapi.query_job_status(job)\n if status == buildapi.PENDING:\n pending += 1\n if status == buildapi.RUNNING:\n running += 1\n if status == buildapi.SUCCESS:\n successful += 1\n if status == buildapi.COALESCED:\n coalesced += 1\n\n return (successful, pending, running, coalesced)", "def display_status(self) -> str:\n return pulumi.get(self, \"display_status\")", "def pretty_jobs_status(jobs):\n print(tabulate.tabulate(jobs, headers=\"keys\"))", "def detailed_status_message(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status_message\")", "def detailed_status_message(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status_message\")", "def status(self):\n logging.debug(\"%s entered status\" % self)\n # print_config(self.infra)\n # print self.images\n # headers = [\"Machine Name\", \"Flavor\", \"IP Addresses\", \"Image Name\", \"Status\"]\n # pt = prettytable.PrettyTable(headers)\n # pt.align[\"Machine Name\"]=\"l\"\n # pt.align[\"IP Addresses\"] = \"l\"\n # pt.align[\"Image Name\"] = \"l\"\n # pt.align[\"Status\"] = \"r\"\n \n print \"Checking status of %s\" % self.footprint_name\n # tmpl = \"%(machine_name)-20s%(flavor)5s%(status)-30s\"\n tmpl1 = \"\"\"%-20s%-52s[%s]\"\"\"\n tmpl2 = \"\"\"%-20s%-60s\\n\"\"\"\n print tmpl1 % (\"Machine Name\", \"IP Addresses\", \"Status\")\n print 80 * \"-\"\n \n for machine in self.machines.keys():\n m = self.machines[machine]\n # machine_name = m.machine_name\n # ips = str(m.ip_addresses)\n # flavor = str(m.flavor)\n # img = str(m.image_id)\n # status = str(m.status)\n # pt.add_row([m, ips, status, img, status])\n # print \"FFF\", m, ips, flavor, img, status\n # print tmpl % locals()\n print m.status\n \n return \"%s is currently: %s\" % (self.footprint_name, self.footprint_status)", "def status(self):\n if self.to_phone:\n phone = '(%s) %s - %s' % (self.to_phone[:3], self.to_phone[3:6], self.to_phone[6:])\n else:\n phone = ''\n\n name = self.to_name if self.to_name else ''\n\n return ' poll in %ds | %s | %s ' % (self.step, name, phone)", "def get_status_string(self, instance):\n return instance.get_status_string()", "def show_tasks_status(user, tasks):\n employee_name = user[0]['username']\n all_tasks = tasks\n completed = 0\n title_completed_tasks = ''\n for task in all_tasks:\n if task['completed'] is True:\n completed += 1\n title_completed_tasks += '\\t ' + task['title'] + '\\n'\n print('Employee {} is done with tasks({}/{}):'\n .format(employee_name, completed, len(all_tasks)))\n print(title_completed_tasks, end='')", "def status(self):\n \n tmpl1 = \"\"\"%-20s%-52s[%s]\"\"\"\n tmpl2 = \"\"\"%-20s%-52s\\n\"\"\"\n # print tmpl1 % (\"Machine Name\", \"IP Addresses\", \"Status\")\n # print 80 * \"-\"\n # print self.get_image()\n if self.cloudserver:\n # let's build the IPs first\n status = self.cloudserver.status\n \n else:\n status = \"OFF\"\n\n res2=\"\"\n ip1 = \"%s:%s\" % (self.networks[0], self.ip_addresses[self.networks[0]])\n if len(self.networks) > 1:\n res2 += \"\\n\"\n for network in self.networks[1:]:\n ipstr = \"%s:%s\" % (network, self.ip_addresses[network])\n res2+=tmpl2 % (\"-\", ipstr)\n # print res2\n # if len(self.ip_addresses.keys()) > 1:\n # ip1 = self.ip_addresses.values()[0]\n res1 = tmpl1 % (self.machine_name, ip1, status)\n return res1 + res2", "def status(self):\n return self._get(path='status')", "def summarize(self, jobId):\n jobInfo = self.jobs[jobId]\n state = jobInfo['state']\n return 'State=%s Elapsed=%s' % (\n jobInfo['state'], jobInfo['elapsed'])", "def detailed_status_message(self) -> str:\n return pulumi.get(self, \"detailed_status_message\")", "def printStatus(self):\n output = StringIO.StringIO()\n # use a csv writer to write out each row\n writer = csv.writer(output, lineterminator = '\\n')\n \n # write the header\n writer.writerow(['Server','Ping Interval','Status'])\n \n # write out the online servers\n for server, interval in self.online_servers.iteritems():\n writer.writerow([server, interval[1], 'Online'])\n \n # write out the offline servers\n for server, interval in self.offline_servers.iteritems():\n writer.writerow([server, interval[1], 'Offline'])\n \n return output.getvalue()", "def do_status(self, args):\n status = self._leet.job_status\n\n for job in self.finished_jobs:\n status.append({\"id\" : job.id,\n \"hostname\" : job.machine.hostname,\n \"plugin\": job.plugin_instance.LEET_PG_NAME,\n \"status\" : job.status})\n if status:\n pretty_jobs_status(status)\n else:\n print(\"***No jobs pending\")", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def get_status(self):\n url = \"data_request?id=jobstatus&job=%d&plugin=zwave\" % self.id\n return self.vera.get(url)", "def getJobStatusStr(status):\n if not isinstance(status, int):\n return ''\n\n return JobUtils.JOB_STATUS.get(status, '')", "async def get_task_status(task_id: TaskId):", "def status():\n statuses = get_all_statuses()\n return json.dumps(statuses, indent=4)", "def get_all_status():\n return \"\"", "def status(self) -> pulumi.Output['outputs.JobStatus']:\n return pulumi.get(self, \"status\")", "def fuota_task_status(self) -> Optional[str]:\n return pulumi.get(self, \"fuota_task_status\")", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status_message(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status_message\")", "def status():\n _request('worklog/status/')", "def get_pool_overview_string(self, mission):\n\n # get statuses\n pool_status, allocation_status, node_status = self.get_pool_status(mission)\n\n s = \"Pool status: {}\\n\".format(pool_status)\n s += \"Allocation status: {}\".format(allocation_status)\n\n if pool_status != \"N/A\":\n\n other = sum(node_status.values()) - node_status[\"idle\"] - \\\n node_status[\"running\"] - node_status[\"unusable\"]\n\n s += \"\\n\"\n s += \"Node status: \"\n s += \"{} idle; \".format(node_status[\"idle\"])\n s += \"{} running; \".format(node_status[\"running\"])\n s += \"{} unusable; \".format(node_status[\"unusable\"])\n s += \"{} other;\".format(other)\n\n return s", "def status(self):\n return STATUS[self.fields['status']]", "def display_status(self):\n time = float2str(self.scheduler.time, '10.2f')\n tx = float2str(self.tx_total, '10g')\n rx = float2str(self.rx_total, '10g')\n dup = float2str(self.dup_total, '10g')\n uniq_total = float2str(self.uniq_total, '10g')\n delivered_total = float2str(self.delivered_total, '10g')\n uniq_delivered_total = float2str(self.uniq_delivered_total, '10g')\n print(\n 'define status_l text Time:{},____TX:{},____RX:{},____DUP:{},____Delivered:{}__/__{},____Arrived:{} 14 white 0.5 0.05'\n .format(time, tx, rx, dup, uniq_delivered_total, uniq_total,\n delivered_total))", "def getFormattedJobStatistics(self):\n\t\tformatted_job_stats = [self.name]\n\t\tformatted_job_stats.append(str(self.retry_count))\n\t\tif self.site is None:\n\t\t\tformatted_job_stats.append('-')\n\t\telse:\n\t\t\tformatted_job_stats.append(self.site)\n\t\tformatted_job_stats.append(round_to_str(self.kickstart))\n\t\tformatted_job_stats.append(round_to_str(self.post))\n\t\tformatted_job_stats.append(round_to_str(self.condor_delay))\n\t\tformatted_job_stats.append(round_to_str(self.resource))\n\t\tformatted_job_stats.append(round_to_str(self.runtime))\n\t\tformatted_job_stats.append(round_to_str(self.seqexec))\n\t\tformatted_job_stats.append(round_to_str(self.seqexec_delay))\n\t\treturn formatted_job_stats", "def status(self):\n return status_dict[self._get_property_(self.STATUS).upper()]", "def jobStatus(self, jobId):\n params = {'id': jobId}\n try:\n resp = self.gc.get(JobUtils.JOB_ID_PATH, parameters=params)\n except HttpError as e:\n if e.status == 400:\n print('Error. invalid job id:', jobId)\n return {}\n raise\n\n if not resp:\n return ''\n\n status = resp.get('status')\n\n statusStr = JobUtils.getJobStatusStr(status)\n return statusStr", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")" ]
[ "0.74661416", "0.7176071", "0.7176071", "0.71116614", "0.7109996", "0.7087766", "0.7026905", "0.69023955", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.68682194", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.6858241", "0.68405855", "0.68306243", "0.6817298", "0.680635", "0.680635", "0.67411906", "0.6729011", "0.66870797", "0.66833764", "0.66704893", "0.6653465", "0.6646299", "0.6631301", "0.6629168", "0.6625916", "0.6621827", "0.6621827", "0.6621827", "0.6621213", "0.66094756", "0.6609168", "0.65970993", "0.6593992", "0.6559062", "0.6551455", "0.6534675", "0.6534675", "0.6534675", "0.6534675", "0.6534675", "0.6534675", "0.65221125", "0.6518491", "0.6501436", "0.6494356", "0.64933676", "0.64858866", "0.64780277", "0.6470889", "0.64636517", "0.64636517", "0.64636517", "0.64636517", "0.64636517", "0.64636517", "0.64636517", "0.64636517" ]
0.7895506
0
Get the status of a mission's storage container.
def get_storage_container_status(self, mission): if self.storage_client.exists(container_name=mission.container_name): return "available" # TODO: calculate space used in the container return "N/A"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_storage_container_overview_string(self, mission):\n\n status = self.get_storage_container_status(mission)\n s = \"Storage container status: {}\".format(status)\n return s", "def container_status(self) -> str:\n return pulumi.get(self, \"container_status\")", "def container_status(self):\n if self.status == 'complete':\n return 'complete'\n try:\n task_status = self._ecs.describe_tasks(tasks=[self.name])['tasks'][0]['lastStatus']\n return task_status\n except (IndexError, ClientError):\n return 'STOPPED'", "def getContainerStatus(self,node,vmid):\n data = self.connect('get','nodes/%s/lxc/%s/status/current' % (node,vmid),None)\n return data", "def status(self) -> Optional[pulumi.Input['StorageSpacesPropertiesStatusArgs']]:\n return pulumi.get(self, \"status\")", "def get_cont_stat(self, path, request_from_updater = False):\n try:\n self.logger.debug('Called get container stat interface of library')\n container_stat_obj = ContainerStatWithStatus()\n self.__get_container_stat(path, container_stat_obj, request_from_updater)\n status = container_stat_obj.get_return_status()\n self.logger.info(('Status from container library comes '\n 'out to be: %(status)s'),\n {'status' : status})\n if status == OsdExceptionCode.OSD_INTERNAL_ERROR:\n self.logger.debug('Internal error raised from library')\n return HTTPInternalServerError\n elif status == OsdExceptionCode.OSD_FILE_OPERATION_ERROR:\n self.logger.debug('File operatiopn error raised from library')\n return HTTPInternalServerError\n elif status == OsdExceptionCode.OSD_NOT_FOUND:\n self.logger.debug('File not found error raised from library')\n return HTTPNotFound\n else:\n pass\n cont_stat = container_stat_obj.container_stat\n return {'account' : cont_stat.account, \\\n 'container' : cont_stat.container, \\\n 'created_at' : cont_stat.created_at, \\\n 'put_timestamp' : cont_stat.put_timestamp , \\\n 'delete_timestamp' : cont_stat.delete_timestamp, \\\n 'object_count' : cont_stat.object_count, \\\n 'bytes_used' : cont_stat.bytes_used, \\\n 'hash' : cont_stat.hash, 'id' : cont_stat.id, \\\n 'status' : cont_stat.status, \\\n 'status_changed_at' : cont_stat.status_changed_at, \\\n 'metadata' : cont_stat.metadata}\n except Exception as err:\n self.logger.exception(err)\n raise err", "def get_storage(isamAppliance, statistics_duration, check_mode=False, force=False):\n return isamAppliance.invoke_get(\n \"Retrieving the Storage Usage Statistics\",\n \"/statistics/systems/storage.json{0}\".format(\n tools.create_query_string(\n timespan=statistics_duration)),requires_model=requires_model)", "def storage_bytes_status(self) -> str:\n return pulumi.get(self, \"storage_bytes_status\")", "def get_job_status(self, mission):\n\n # initialize task status\n status = dict(active=0, running=0, succeeded=0, failed=0)\n\n # get job status if it exists. Otherwise, return N/A\n try:\n the_job = self.batch_client.job.get(job_id=mission.job_name)\n\n # get counts of tasks in different statuses\n status_counts = self.batch_client.job.get_task_counts(mission.job_name)\n except azure.batch.models.BatchErrorException as err:\n if err.message.value.startswith(\"The specified job does not exist\"):\n return \"N/A\", status\n # raise an exception for other kinds of errors\n raise\n\n # update the dictionary\n status[\"active\"] = status_counts.active\n status[\"running\"] = status_counts.running\n status[\"succeeded\"] = status_counts.succeeded\n status[\"failed\"] = status_counts.failed\n\n return the_job.state.name, status", "def moc_storage_container(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"moc_storage_container\")", "def __get_container_stat(self, path, container_stat_obj, request_from_updater = False):\n try:\n self.logger.debug('Get container interface called')\n self.asyn_helper.call(\"get_container_stat\", path, container_stat_obj, request_from_updater)\n except Exception as err:\n self.logger.error(('get_container_stat for %(con_dir)s failed '\n 'close failure: %(exc)s : %(stack)s'),\n {'con_dir' : path,\n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err", "def get_details(self):\n status = []\n for key, container in self.containers.items():\n container.details = container.daemon.connection.inspect_container(self.config['release_name'])\n status.append(container.details)\n return status", "def get_pool_status(self, mission):\n\n # initialize node status\n states = dict(\n idle=0, rebooting=0, reimaging=0, running=0, unusable=0, creating=0,\n starting=0, waiting_for_start_task=0, start_task_failed=0, unknown=0,\n leaving_pool=0, offline=0, preempted=0)\n\n # if the pool does not exist\n if not self.batch_client.pool.exists(pool_id=mission.pool_name):\n return \"N/A\", \"N/A\", states\n\n # get pool info\n the_pool = self.batch_client.pool.get(pool_id=mission.pool_name)\n state = the_pool.state.name\n allocation_state = the_pool.allocation_state.name\n\n # get the list of node at current time point\n # we check the existance of the pool again to avoid coincidence\n if self.batch_client.pool.exists(pool_id=mission.pool_name):\n node_list = self.batch_client.compute_node.list(\n pool_id=mission.pool_name)\n\n # calculate the number of nodes in each status\n for node in node_list:\n states[node.state.name] += 1\n node_list.reset()\n\n return state, allocation_state, states", "def status(self, name=None):\n volume_info = self.cm.find_name(name)\n if volume_info:\n status = volume_info[0]['State']\n else:\n Console.error(\"volume is not existed\")\n return volume_info", "def get_status(self):\n try:\n c = self._oc_command([\"status\"])\n o = run_cmd(c, return_output=True)\n for line in o.split('\\n'):\n logger.debug(line)\n return o\n except subprocess.CalledProcessError as ex:\n raise ConuException(\"Cannot obtain OpenShift cluster status: %s\" % ex)", "def status(self) -> pulumi.Output['outputs.VirtualHardDiskStatusResponse']:\n return pulumi.get(self, \"status\")", "def get_status(self):\n if self.vm.get_cloud_status() != \"ACTIVE\":\n return \"stopped\"\n #wait for the vm to be ready and SSH-able\n self.vm.wait_ready()\n status = self.vm.run_command(\"ctool status\", indent=0, prefix='')\n return status.strip()", "def getStorageElementStatus( self, elementName, statusType = None, default = None ):\n\n if self.__getMode():\n # We do not apply defaults. If is not on the cache, S_ERROR is returned.\n return self.__getRSSStorageElementStatus( elementName, statusType )\n else:\n return self.__getCSStorageElementStatus( elementName, statusType, default )", "def _get_status(self):\n if self._state in [\"processed\", \"error\"]:\n return self._state\n \n get_resp = requests.get(self.location, cookies={\"session\": self.session})\n\n self._state = get_resp.json()[\"status\"]\n self.slice_time = get_resp.json()[\"slice_time\"]\n \n return self._state", "def status(self):\n return self._get(path='status')", "def get_storage(id):\n url = f\"{BCD_URL}/contract/{NETWORK}/{id}/storage?size=10\"\n js = load_json(url)\n storage = get_storage_internal(js['children'])\n print(storage)\n return storage", "def status(self):\n self.scion_sh('status')", "def mesos_status(self, submissionId):\n get_tasks = self.driver.getTasks()['get_tasks']\n task_state = None\n\n tasks = get_tasks['tasks'] + get_tasks.get('completed_tasks')\n tasks_list = list(filter(lambda x: x['task_id']['value'] == submissionId, tasks))\n if len(tasks_list) > 0:\n task = tasks_list[0]\n task_state = task['state']\n self._log.debug(\"Task state = \" + task_state)\n else:\n self._log.debug(\"Task not found\")\n\n return task_state", "def get_status(self, job_id):\n\n result = self.redis.get('job_status:' + str(job_id))\n return pickle.loads(result) if result else None", "def put_container(self, filesystem, acc_dir, cont_dir, \\\n account, container, metadata, req):\n try:\n # create path\n path = self.create_path(filesystem, acc_dir, cont_dir, account, container)\n # Remove this after container library update\n self.logger.debug(('PUT container called for path: %(path)s'),\n {'path' : path})\n if not os.path.exists(path):\n os.makedirs(path)\n timestamp = normalize_timestamp(req.headers['x-timestamp'])\n created_at = normalize_timestamp(time.time())\n # create container stat object\n cont_stat = ContainerStat(account, container, created_at, \\\n timestamp, '0', 0, 0, '', str(uuid4()), 'ADDED', '', metadata)\n\t #get component number\n\t component_name = req.headers['x-component-number']\n # call container library to create container\n status_obj = self.__create_cont(path, filesystem, cont_stat, component_name)\n status = status_obj.get_return_status()\n self.logger.info(('Status from container library comes '\n 'out to be: %(status)s'),\n {'status' : status})\n return status, cont_stat\n except Exception as err:\n self.logger.error(('PUT request failed for account/container:'\n ' %(account)s/%(container)s '\n 'close failure: %(exc)s : %(stack)s'),\n {'account' : account, 'container' : container,\n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err", "def get_job_status(parent_pid, heart_pid):\n status_container = {}\n if parent_pid != -1:\n status_container[\"memory\"] = get_memory_usage(parent_pid, heart_pid)\n status_container[\"cpu_load\"] = get_cpu_load(parent_pid, heart_pid)\n return status_container", "def disk_encryption_status(self) -> 'outputs.DiskEncryptionStatusResponse':\n return pulumi.get(self, \"disk_encryption_status\")", "def storage_get(context, storage_id):\n return _storage_get(context, storage_id)", "def storage_detail(self, storage_id):\n response = self.session.get(self.get_url('newStorageAPI.do'), params={\n 'op': 'getStorageInfo_sacolar',\n 'storageId': storage_id\n })\n\n data = json.loads(response.content.decode('utf-8'))\n return data", "def lift_container(self) -> TaskStatus:\n\n status, object_id = self._go_to_and_lift(object_ids=self.container_ids, object_type=\"container\",\n stopping_distance=0.3)\n return status", "def get_status(directory):\n\n os.system(\"squeue -o '%.18i %.9P %.16j %.8u %.8T %.10M %.9l %.6D %R %Z'\"\n \">> all_jobs.txt\")\n lines = open('all_jobs.txt').readlines()\n job_state = None\n for i in range(len(lines)):\n if directory in lines[i]:\n job_state = lines[i][4]\n break\n\n os.system('rm all_jobs.txt')\n\n return job_state", "def GetStatus(self):\r\n return self.status", "def get_status():\n # TODO tie this in with requests that can fetch the status of the pod from the cluster\n\n if request.method == \"GET\":\n \"\"\"\n request looks like:\n {\n \"workflow_name\": \"test-workflow\"\n }\n \"\"\"\n\n req = request.get_json(force=True)\n if workflow_exists(req['workflow_name']):\n # TODO fit into database\n # Get the pod by workflow and read the status\n # status = RUNNING_JOBS[req['workflow_name']].get_pod_status()\n response = {\n \"status\": 'Still running'\n }\n else:\n app.logger.error(\n f\"Received request asking the pod status in {req['workflow_name']} \"\n f\"but this workflow is not present in running jobs\"\n f\"record. Nothing to do.\")\n response = {\n \"status\": \"Not running\"\n }\n\n return jsonify(response)", "def get_status(self):\n url = \"data_request?id=jobstatus&job=%d&plugin=zwave\" % self.id\n return self.vera.get(url)", "def _get_status(self):\n return self.__status", "def status(self) -> pulumi.Output['outputs.JobStatus']:\n return pulumi.get(self, \"status\")", "def get_operation_status(self, lifecycle_operation_occurrence_id):\n return self.em_adapter.get_operation_status(lifecycle_operation_occurrence_id)", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self):\n return self.get(self._names[\"status\"])", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def get(self, container_id):\n resp = self.client.api.inspect_container(container_id)\n return self.prepare_model(resp)", "def _compute_status(self, instance, zone):\n if self.compute_service is None:\n logging.warning('Service unavailable: unable to start GCE VM: %s (%s)',\n instance, zone)\n return\n\n info = self.compute_service.instances().get(\n project=app_identity.get_application_id(),\n instance=instance,\n zone=zone).execute()\n return info[COMPUTE_STATUS]", "def getstatus(self):\n return self.__status", "def get_status(self):\n return self.read_register(259, 0, 3)", "def get_service_status(self):\n return self.service.status()", "def get_full_juju_status():\n\n status = model.get_status(lifecycle_utils.get_juju_model())\n return status", "def vacuum_status(vac_id):\n out = subprocess.check_output(cmd_preamble + [\"admin\", \"vacuum-status\", \"--id\", vac_id])\n out = out.decode()\n return out.split(' ')[0].strip()", "def _read_status(self):\n results = self.status_table.query_items({'api_version': self.api_version})\n if not results:\n return None\n else:\n return results[0]", "def get_one(self, container_id):\n container = _get_container(container_id)\n check_policy_on_container(container.as_dict(), \"container:get\")\n context = pecan.request.context\n compute_api = pecan.request.compute_api\n container = compute_api.container_show(context, container)\n return view.format_container(pecan.request.host_url, container)", "def get_container(self, container_name):\n response = self.client.get_container(container_name)\n return response", "def status(self):\n if hasattr(self, \"_status\"):\n return self._status\n else:\n return None", "def subcmd_getstorage_main(args, parameter_info):\n \n from get_storage_inventory import get_storage_inventory\n result = get_storage_inventory(parameter_info['ip'], parameter_info['user'], parameter_info['passwd'], parameter_info['sysid'])\n \n if result['ret'] is True:\n del result['ret']\n sys.stdout.write(json.dumps(result['entries'], sort_keys=True, indent=2))\n else:\n sys.stderr.write(result['msg'])", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self._status", "def service_mesh_job_check(container_name):\n complete = False\n log.info(\"Checking if %s is complete\", container_name)\n try:\n response = coreV1Api.list_namespaced_pod(namespace=namespace, watch=False)\n for item in response.items:\n # container_statuses can be None, which is non-iterable.\n if item.status.container_statuses is None:\n continue\n for container in item.status.container_statuses:\n if container.name == container_name and item.status.phase == \"Running\":\n name = read_name(item)\n log.info(\"Container Details %s \", container)\n log.info(\"Container Status %s \", container.state.terminated)\n\n if container.state.terminated:\n log.info(\"Container Terminated with reason %s \", container.state.terminated.reason)\n complete = True\n\n except ApiException as exc:\n log.error(\"Exception when calling read_namespaced_job_status: %s\\n\",\n exc)\n return complete", "def disk_encryption_status(self) -> pulumi.Output['outputs.DiskEncryptionStatusResponse']:\n return pulumi.get(self, \"disk_encryption_status\")", "def get_status(self):\n\n return self._system", "def volume_state(self):\r\n return self.status", "def status(self) -> VacuumStatus:\n return VacuumStatus(self.send(\"get_status\")[0])", "def get_container_statuses(self):\n return {c['id']: c['state'] for c in self.get_containers()}", "def status(self):\n self._refresh_state()\n return self._data.get('status')", "def get_status(self):\n r = requests.get(self.base_url + '/status')\n return r.json()", "def getClusterStatus(self):\n data = self.connect('get','cluster/status', None)\n return data", "def get_operation_status(self, lifecycle_operation_occurrence_id):\n LOG.debug('\"Lifecycle Operation Occurrence Id\" is not implemented in OpenStack Tacker client!')\n LOG.debug('Will return the state of the resource with given Id')\n\n return constants.OPERATION_SUCCESS", "def status(self):\n if not self.volume:\n # no volume active\n status = volume_status.NONE\n elif self._status and self._last_status_check >= time.time() - MIN_TIME_BETWEEN_STATUS_CHECKS:\n status = self._status\n else:\n try:\n self.volume.update()\n # Take only the first word of the status as openstack adds some extra info\n # after a space\n status = volume_status_map.get(self.volume.status.split(' ')[0], None)\n if status == volume_status.IN_USE and self.volume.attachment_state() == 'attached':\n status = volume_status.ATTACHED\n if not status:\n log.error(\"Unknown volume status: {0}. Setting status to volume_status.NONE\"\n .format(self.volume.status))\n status = volume_status.NONE\n self._status = status\n self._last_status_check = time.time()\n except EC2ResponseError as e:\n log.error(\n 'Cannot retrieve status of current volume. {0}'.format(e))\n status = volume_status.NONE\n return status", "def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)", "async def get_status():", "def read_status(cls, storage):\n # Handle case in which storage is a string.\n reporter = cls._reporter_from_storage(storage, check_exist=True)\n\n # Read iteration and online analysis info.\n try:\n reporter.open(mode='r')\n options = reporter.read_dict('options')\n iteration = reporter.read_last_iteration(last_checkpoint=False)\n # Search for last cached free energies only if online analysis is activated.\n target_error = None\n last_err_free_energy = None\n # Check if online analysis is set AND that the target error is a stopping condition (> 0)\n if (options['online_analysis_interval'] is not None and\n options['online_analysis_target_error'] != 0.0):\n target_error = options['online_analysis_target_error']\n try:\n last_err_free_energy = cls._read_last_free_energy(reporter, iteration)[1][1]\n except TypeError:\n # Trap for undefined free energy (has not been run yet)\n last_err_free_energy = np.inf\n finally:\n reporter.close()\n\n # Check if the calculation is done.\n number_of_iterations = options['number_of_iterations']\n online_analysis_target_error = options['online_analysis_target_error']\n is_completed = cls._is_completed_static(number_of_iterations, iteration,\n last_err_free_energy,\n online_analysis_target_error)\n\n return cls.Status(iteration=iteration, target_error=target_error,\n is_completed=is_completed)", "def get_container(self, account, container):\n \n pass", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")" ]
[ "0.7662295", "0.68757343", "0.65628034", "0.61149687", "0.61096156", "0.60097855", "0.59428936", "0.5903649", "0.58938575", "0.5616815", "0.560521", "0.5599396", "0.55950266", "0.5577565", "0.55629945", "0.556032", "0.5544695", "0.5521543", "0.5521207", "0.551155", "0.55082923", "0.54887813", "0.5452498", "0.5414804", "0.53947043", "0.5363504", "0.5355417", "0.5353225", "0.53408164", "0.5328964", "0.532681", "0.53223616", "0.53197503", "0.5317166", "0.52960074", "0.52855146", "0.5284014", "0.5274084", "0.5274084", "0.5274084", "0.5274084", "0.5274084", "0.5274084", "0.5274084", "0.5274084", "0.5274084", "0.5274084", "0.5274084", "0.5274084", "0.5274084", "0.5274084", "0.5274084", "0.5274084", "0.5274084", "0.5274084", "0.5274084", "0.5265608", "0.5265413", "0.5265413", "0.5265413", "0.52650666", "0.5253094", "0.5250347", "0.5247204", "0.5246413", "0.5243993", "0.5232832", "0.52270544", "0.5213755", "0.52048993", "0.5202925", "0.5198903", "0.5196859", "0.5196859", "0.5196859", "0.5193027", "0.5186076", "0.518205", "0.51805717", "0.51768947", "0.51743025", "0.516844", "0.5167358", "0.51658607", "0.51618624", "0.51579523", "0.515551", "0.5155506", "0.51528883", "0.5143763", "0.51419306", "0.514158", "0.514158", "0.514158", "0.514158", "0.514158", "0.514158", "0.514158", "0.514158", "0.514158" ]
0.85038316
0
Get a string for the status of the storage container.
def get_storage_container_overview_string(self, mission): status = self.get_storage_container_status(mission) s = "Storage container status: {}".format(status) return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def container_status(self) -> str:\n return pulumi.get(self, \"container_status\")", "def storage_bytes_status(self) -> str:\n return pulumi.get(self, \"storage_bytes_status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def container_status(self):\n if self.status == 'complete':\n return 'complete'\n try:\n task_status = self._ecs.describe_tasks(tasks=[self.name])['tasks'][0]['lastStatus']\n return task_status\n except (IndexError, ClientError):\n return 'STOPPED'", "def get_storage_container_status(self, mission):\n\n if self.storage_client.exists(container_name=mission.container_name):\n return \"available\"\n\n # TODO: calculate space used in the container\n\n return \"N/A\"", "def _get_status(self):\n return u'%s' % (self.get_status_display())", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def get_status_as_string(self):\n if self.downloaded == 0:\n return \"[Starting... ]\"\n return \"[%s, %s, %s]\" % self.get_status()", "def get_status_string(self, instance):\n return instance.get_status_string()", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self):\n return self._get(path='status')", "async def get_status(self) -> str:\n return await self.hw_device.status()", "def status(self):\n return self.get(self._names[\"status\"])", "def display_status(self) -> str:\n return pulumi.get(self, \"display_status\")", "def _get_status(self):\n held_msg=\"\"\n return u'%s%s' % (self.get_status_display(), held_msg)", "def get_status(self):\n if self.vm.get_cloud_status() != \"ACTIVE\":\n return \"stopped\"\n #wait for the vm to be ready and SSH-able\n self.vm.wait_ready()\n status = self.vm.run_command(\"ctool status\", indent=0, prefix='')\n return status.strip()", "def status(self) -> Optional[pulumi.Input['StorageSpacesPropertiesStatusArgs']]:\n return pulumi.get(self, \"status\")", "def getStatusString(self, statClass=None):\n return self.convertStatus(self.getStatus(statClass=statClass))", "def getContainerStatus(self,node,vmid):\n data = self.connect('get','nodes/%s/lxc/%s/status/current' % (node,vmid),None)\n return data", "def status_message(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status_message\")", "def health_status(self) -> str:\n return pulumi.get(self, \"health_status\")", "def health_status(self) -> str:\n return pulumi.get(self, \"health_status\")", "def status(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"status\")", "def status_message(self) -> str:\n return pulumi.get(self, \"status_message\")", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def status(self) -> str:\n return self._check_job_status()", "def GetStatus(self):\r\n return self.status", "def detailed_status(self) -> str:\n return pulumi.get(self, \"detailed_status\")", "def status(self):\n return self._data['status']", "def get_status(self):\n return self._status", "def getstatus(self):\n return self.__status", "def status(self):\n return STATUS[self.fields['status']]", "def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")", "def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")", "def status(self) -> pulumi.Output['outputs.JobStatus']:\n return pulumi.get(self, \"status\")", "def status(self):\n self._refresh_state()\n return self._data.get('status')", "def status(self):\n return self._query_status()['status']", "def get_status(self):\n return self.msg" ]
[ "0.8081375", "0.7568696", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.714917", "0.714917", "0.714917", "0.714917", "0.714917", "0.714917", "0.714917", "0.714917", "0.714917", "0.714917", "0.714917", "0.714917", "0.714917", "0.714917", "0.714917", "0.714917", "0.714917", "0.714917", "0.714917", "0.71359295", "0.70908743", "0.7088581", "0.69717145", "0.69717145", "0.69717145", "0.6926432", "0.6916393", "0.69131243", "0.69131243", "0.69131243", "0.69131243", "0.69131243", "0.69131243", "0.690456", "0.690456", "0.690456", "0.690456", "0.690456", "0.690456", "0.690456", "0.690456", "0.690456", "0.6852125", "0.6833919", "0.68264747", "0.6706075", "0.66507626", "0.66430557", "0.6632735", "0.66095424", "0.65945864", "0.65877116", "0.6582409", "0.6582409", "0.65695786", "0.65695786", "0.65382195", "0.6534538", "0.6534538", "0.6534538", "0.6525312", "0.6519406", "0.6497462", "0.6493496", "0.6471084", "0.64700896", "0.64634454", "0.64587396", "0.64587396", "0.64524776", "0.64332944", "0.6422254", "0.64202034" ]
0.7668451
1
Get the string of an overview to all resources.
def get_overview_string(self, mission): s = self.get_pool_overview_string(mission) + "\n\n" s += self.get_job_overview_string(mission) + "\n\n" s += self.get_storage_container_overview_string(mission) return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def overview():\n return render_template('api/api.html', title='API Overview')", "def __str__(self):\n return gettext('List of %s') % self.resource.__name__", "def get_overview():\n from app.core.api_views import Api\n from app.modules.overview import inc\n sar = inc.main()\n api = Api()\n return render_template(\"index.html\",\n sar=sar,\n )", "def get_resources():\n return Response(f\"{Resource.get_all_resources()}\", 200, mimetype='text/plain')", "def show_apis():\n return (\n f\"<h4>Available Routes:</h4>\"\n f'<a href=\"/api/v1.0/ids\">/api/v1.0/ids</a><br/>' \n f'<a href=\"/api/v1.0/info/1286\">/api/v1.0/info/subject_id</a><br/>' \n f'<a href=\"/api/v1.0/subjects\">/api/v1.0/subjects</a><br/>' \n f'<a href=\"/api/v1.0/subjects/1286\">/api/v1.0/subjects/subject_id</a><br/>' \n f'<a href=\"/\"><h4>Back</h4></a><br/>' \n )", "def do_overview(self):\n summaries = []\n for name, cmd in self.base.commands.iteritems():\n summaries.append(' %-14s %s\\n' % (name, cmd.get_summary()))\n summaries.sort()\n sys.stdout.write('Usage: %s COMMAND ARGUMENTS...\\n\\n' \\\n 'Available commands:\\n' % (self.base.scriptname, ))\n for line in summaries:\n sys.stdout.write(line)", "def home():\n return(\n f\"Available Routes:<br/>\"\n f\"Precipitation: /api/v1.0/precipitation<br/>\"\n f\"List of Stations: /api/v1.0/stations<br/>\"\n f\"Temperature for one year: /api/v1.0/tobs<br/>\"\n f\"Temperature stat from the start date(yyyy-mm-dd): /api/v1.0/min_max_avg/<start><br/>\"\n f\"Temperature stat from start to end dates(yyyy-mm-dd): /api/v1.0/min_max_avg/<start><br/>\"\n )", "def get_resource_details (self):\n return (f\"[Title:\\\"{self.get_title()}\\\"] [Author:{self.get_author()}] [Publisher:{self.get_publisher()}] [Year:{self.get_year()}]\")", "def describe(self) -> str:", "def overview():\n pages_list = g.db.pages.find().sort('name')\n return render_template('{}/index.html'.format(MODULE_DIR), **locals() )", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/2015-01-01<br/>\"\n f\"/api/v1.0/2015-01-01/2015-12-31\"\n )", "def overview():\n # TODO: fix ajax https://groups.google.com/d/msg/web2py/YyVilc2ywdg/ZLtN3Gg3Ft0J\n # TODO: fix ?plain link in results\n from plugin_introspect import get_task_code\n lesson = request.args[0] # controller with lesson contents\n # lesson = request.vars.lesson_controller # controller with lesson contents\n fun_names = exposed_functions_names( controller=lesson )\n exposed_functions = generate_exposed_functions_info( controller=lesson )\n examples_codes = [ get_task_code(code=exposed_functions[f]['code'], task_key=lesson+'/'+f, decorate=True) for f in fun_names ]\n results_urls = [ URL(lesson, f, vars=dict(plain=1)) for f in fun_names ]\n return response.render('tutor.html', dict(lesson=lesson, fun_names=fun_names, examples_codes=examples_codes, results_urls=results_urls) )", "def toString(self):\n\t\ts = \"A %s titled '%s':\\n\\n\" % (self.getSpecString(), self.getName())\n\t\ts += \"It's summary reads: %s\\n\\n\" % (self.getDescription())\n\t\ts += \"~~\\n%s\\n~~\" % (self.getAllItemsStr())\n\t\treturn s", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs\"\n )", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end><br/>\"\n )", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start_date(yyyy-mm-dd)<br/>\"\n f\"/api/v1.0/start_date(yyyy-mm-dd)/end_date(yyyy-mm-dd)<br/>\")", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/preciptation<br/>\"\n f\"/api/v1.0/Stations\"\n )", "def resources(self):\r\n return self.page.object_list", "def getOverview(movieInfo):\n \n if \"overview\" in movieInfo:\n overview = \"\" if movieInfo[\"overview\"] is None else movieInfo[\"overview\"]\n return _format(\"\".join(c for c in overview if c not in punctuation))\n else: \n raise AttributeError(\"The parameter has no attribute 'overview'\")", "def summary(self):\r\n return '%s%s: %s%s %s%s' % (BLUE, self.title,\r\n GREEN, self.description,\r\n NORMAL, self.link)", "def summary_string(self) -> str:", "def getSummary(self):\n return self.base.get(\"summary\", [])", "def displayName(self):\n\t\treturn self.tr(\"Get Drainage Basins\")", "def home():\n return (\n f\"These are the available routes:</br>\"\n f\"/api/v1.0/precipitation</br>\"\n f\"/api/v1.0/stations</br>\"\n f\"/api/v1.0/tobs</br>\"\n f\"/api/v1.0/< start ></br>\"\n f\"/api/v1.0/< start >/< end ></br>\"\n )", "def summary(self) -> str:\n return pulumi.get(self, \"summary\")", "def print_resources(self) -> None:\n for resource in self._request_get(self.url_base + 'documentacao'):\n print(\n \"Nome: {},\\nUrl: {},\\n\".format(\n resource['name'],\n self._format_url_to_resource(resource['url']),\n )\n )", "def overview():\r\n # Update the list of languages allowed on the site, \r\n # except for the language used by your users at that time.\r\n if request.method == 'POST':\r\n lan_object = Languages()\r\n data = lan_object.update()\r\n message = lan_object.message\r\n status = lan_object.status\r\n \r\n # Gets documents from the collections of all languages \r\n languages_list = g.languages_object.get_languages(1)\r\n language_chosen = g.languages_object.get_languages(2)\r\n return render_template( '{}/index.html'.format(MODULE_DIR), **locals())", "def summary(self):\n res = \", \".join(\n elem[\"summary\"] for elem in self.status[\"health\"][\"summary\"]\n )\n if res:\n return res\n elif self.detail:\n return self.detail[0]\n return \"\"", "def description(self) -> str:\n return f\"List of {self.key}\"", "def resources(self):\n return list(self.get_resources_for_type(gdef.ResType_All))", "def home_page():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"/api/v1.0/start/end<br/>\"\n )", "def _show_help(self):\r\n info = {\"/contexts/<context>/[orgs/[<org_name>]]/[spaces/[<space_name>]]\": \"reports\",\r\n \"/contexts/<context>/orgs_metadata/[<org_name>]\": \"metadata\",\r\n \"/contexts/<context>/orgs/<org_name>/director\": \"org/director mapping\",\r\n \"/reader_status\": \"status of Bitbucket reader cache\"}\r\n if self._cache_refresh:\r\n info['/refresh'] = \"force cache refresh from BitBucket\"\r\n return info", "def home():\n return(\n f\"Available Routes: <br/>\"\n\n f\"For Precipitation: /api/v1.0/precipitation<br/>\"\n f\"Returns Jsonify dictionary of dates and Precepitation<br/><br/>\"\n\n f\"For list of Stations: /api/v1.0/stations<br/>\"\n f\"Returns Jasonify list of stations <br/><br/>\"\n\n f\"For last year temperatures: /api/v1.0/tobs<br/>\"\n f\"Returns Jsonify dictionary of Temperature Observations for last year<br/><br/>\"\n\n f\"Temperature result from the date in format (yyyy-mm-dd): /api/v1.0/yyyy-mm-dd<br/>\"\n f\"Returns an Average, Max, and Min temperatures from given start date of dataset<br/><br/>\"\n\n f\"Temperature result from start date to end date in format (yyyy-mm-dd): /api/v1.0/yyyy-mm-dd/yyyy-mm-dd<br/>\"\n f\"Returns an Average, Max, and Min temperatures for a given date range\"\n\n )", "def _build_resources_repr(self, resources):\n if resources:\n result = \", \".join(\"{} (r{})\".format(r.name, r.revision) for r in resources)\n else:\n result = \"-\"\n return result", "def overview():\n subjects = get_latest(10)\n return render_template('subject.html', subjects=subjects)", "def get_description(self):\n return IntrospectorHelper.get_summary(self.callback)", "def summary(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json", "def summary(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json", "def summary(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json", "def show_overview(self) -> None:\n print(f\"\\n\\nCluster overview:\")\n all_clusters = self.get_all_clusters()\n print(f\" - Total of {len(all_clusters)} clusters\")\n if all_clusters:\n cluster_lengths = [len(v) for v in all_clusters.values()]\n print(f\" - Average number of cluster-labels: {round(sum(cluster_lengths) / len(cluster_lengths), 2)}\")", "def resource_details(self) -> pulumi.Output[Any]:\n return pulumi.get(self, \"resource_details\")", "def resource_details(self) -> pulumi.Output[Any]:\n return pulumi.get(self, \"resource_details\")", "def GetAllResourcesSample():\n client = CreateClient()\n # Unlike client.GetResources, this returns a list of resources\n for resource in client.GetAllResources():\n PrintResource(resource)", "def display_collection_by_title_table(resource_list):\n \n # Obtain sorted resource_list\n resource_list = sort_collection_by_title(resource_list)\n \n # Display type\n print(\"\\nBOOKS:\")\n print(\"======\")\n \n # Display column names\n print(\"{:7s} {:30s} {:20s} {:11s} {:9s} {:5s} {:8s} {:14s}\"\\\n \" {:9s} {:18s} {:20s}\"\n .format(\"UID\", \"Title\", \"Creator\", \"Genre\", \"Language\", \"Year\", \n \"Country\", \"Publisher\", \"City\", \"Category\", \n \"Keywords\"))\n \n # Display book resources\n for resource in resource_list:\n \n if resource.resource_type == \"book\":\n\n print(\"{:<7d} {:30s} {:20s} {:11s} {:9s} {:<5d} {:8s} {:14s} \"\\\n \"{:9s} {:18s} {:20s}\"\n .format(resource.get_uid(), resource.title[:29], \n resource.creator.get_full_name(), resource.genre[:10], \n resource.language[:8], resource.year, \n resource.country, resource.publisher[:13], \n resource.city, resource.category,\n resource.get_keyword_string()))\n\n # Display type\n print(\"\\nMOVIES:\")\n print(\"=======\")\n \n # Display column names\n print(\"{:7s} {:30s} {:20s} {:11s} {:9s} {:5s} {:8s} {:7s} {:35s} {:20s}\"\n .format(\"UID\", \"Title\", \"Creator\", \"Genre\", \"Language\", \"Year\", \n \"Country\", \"Rating\", \"Writers\", \"Keywords\"))\n \n # Display movie resources\n for resource in resource_list:\n \n if resource.resource_type == \"movie\":\n \n print(\"{:<7d} {:30s} {:20s} {:11s} {:9s} {:<5d} {:8s} {:7s} \"\\\n \"{:35s} {:20s}\"\n .format(resource.get_uid(), resource.title, \n resource.creator.get_full_name(), \n resource.genre, resource.language[:8], resource.year, \n resource.country, resource.rating, \n resource.get_names_string(resource.writers)[:35], \n resource.get_keyword_string()))", "def summary():\r\n\r\n average_age, counted = _find_average_age()\r\n male, female = _find_male_female_percentage()\r\n headings = [\"Total Number of Patients\", \"Average Age\",\r\n \"Patients Involved In Average Age\", \"Percentage of Male\",\r\n \"Percentage of Female\"]\r\n data = [len(resources), average_age, counted, male, female]\r\n return render_template(\"summary.html\", headings=headings, data=data)", "def Home():\n return(\n f\"Hawaii Climate Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"and<br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def resources(self):", "def __repr__(self):\n return str(self.list_all())", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"<a href='/api/v1.0/precipitation'>Precipitation</a><br/>\"\n f\"<a href='/api/v1.0/stations'>Stations</a><br/>\"\n f\"<a href='/api/v1.0/tobs'>Temperature</a><br/>\"\n f\"<a href='/api/v1.0/start'>Start Date</a><br/>\"\n f\"<a href='/api/v1.0/start/end'>End Date</a><br/>\"\n )", "def summary(app):\n click.echo(get_summary(app))", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"<a href='/api/v1.0/precipitation'>precipitation</a><br/>\"\n f\"<a href='/api/v1.0/stations'>stations</a><br/>\"\n f\"<a href='/api/v1.0/tobs'>tobs</a><br/>\"\n f\"<a href='/api/v1.0/tobs/start_date'>tobs/start_date</a><br/>\"\n f\"<a href='/api/v1.0/tobs/start_date/end_date'>tobs/start_date/end_date</a><br/>\"\n )", "def __str__(self):\n return self.resource.__name__", "def Help():\n names=api_method_dict.keys()\n names.sort()\n return ''.join(['**** ' + api_method_dict[name].__name__ + '\\n' + api_method_dict[name].__doc__ + '\\n'\n for name in names])", "def show_db_overview(self):\n\n models_list = sorted_models_list()\n apps = [p.app_label for p in settings.SITE.installed_plugins]\n s = \"%d apps: %s.\" % (len(apps), \", \".join(apps))\n s += \"\\n%d models:\\n\" % len(models_list)\n i = 0\n headers = [\n #~ \"No.\",\n \"Name\",\n \"Default table\",\n #~ \"M\",\n \"#fields\",\n \"#rows\",\n #~ ,\"first\",\"last\"\n ]\n rows = []\n for model in models_list:\n if True: # model._meta.managed:\n i += 1\n cells = []\n #~ cells.append(str(i))\n cells.append(fmn(model))\n cells.append(model.get_default_table())\n #~ cells.append(str(model))\n #~ if model._meta.managed:\n #~ cells.append('X')\n #~ else:\n #~ cells.append('')\n cells.append(str(len(model._meta.concrete_fields)))\n qs = model.objects.all()\n n = qs.count()\n cells.append(str(n))\n #~ if n:\n #~ cells.append(obj2str(qs[0]))\n #~ cells.append(obj2str(qs[n-1]))\n #~ else:\n #~ cells.append('')\n #~ cells.append('')\n\n rows.append(cells)\n s += rstgen.table(headers, rows)\n return s", "def summary(self) -> str:\n pass", "def GetResourceNames(self):\r\n return [x.name for x in self.resources]", "def cal_desc(self):\n desc = \"\"\n desc += \"Requested by \"\n orgs = self.org.all()\n if len(orgs) > 0:\n for org in orgs:\n desc += org.name + \", \"\n desc = desc[:-2] + \".\\n\" # removes trailing comma\n ccs = self.ccinstances.all()\n if len(ccs) > 0:\n desc += \"Crew Chiefs: \"\n for cc in ccs:\n desc += cc.crew_chief.get_full_name() + \" [\" + (cc.service.shortname if cc.service else cc.category.name) + \"], \"\n desc = desc[:-2] + \".\\n\" # removes trailing comma\n if self.description:\n desc += self.description + \"\\n\"\n return desc", "def __str__(self):\n return self.summary()", "def welcome():\n return (\n f\"All Available Routes:<br/>\" \n f\"/api/v1.0/precipitation<br/>\" \n f\"/api/v1.0/stations<br/>\" \n f\"/api/v1.0/tobs<br/>\" \n f\"/api/v1.0/yyyy-mm-dd<br/>\"\n f\"/api/v1.0/yyyy-mm-dd/yyyy-mm-dd\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start_date<br/>\"\n f\"/api/v1.0/start_date/end_date<br/>\"\n )", "def get_description(self):", "def resources(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resources\")", "def resources(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resources\")", "def resources(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resources\")", "def resources(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resources\")", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start_date<br/>\"\n f\"/api/v1.0/start_date/end_date\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/consumption<br/>\"\n f\"/api/v1.0/gasprices<br/>\"\n f\"/api/v1.0/states<br/>\"\n f\"/api/v1.0/vmt<br/>\"\n )", "def get_monitor_string(self):\n\n return self.reporter.get_overview_string(self.info)", "def test_get_brief_summary_output(self):\n \n # Create a Resource object\n resource = Resource(1, \"White Noise\", Name(\"Don\", \"\", \"DeLillo\"), \n \"Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.\",\n \"sci-fi\", \"English\", 1985, \"US\", 326, \"book\",\n [\"culture\", \"survival\", \"life\", \"society\"])\n \n # Assert expected results \n self.assertEqual(resource.get_brief_summary(), \"Delillo's White \"\\\n \"Noise follows narrator Jack Gladney, a professor \"\\\n \"at a \\nsmall Liberal Arts college and describes an \"\\\n \"academic year. Jack teaches \\nat ...\")", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start-date<br/>\"\n f\"/api/v1.0/start-date/end-date<br/>\"\n )", "def to_short_string(self):\n return f'{self.name} - {self.resource_type}'", "def index():\n return (\n f\"Welcome to my Hawaii trip info!<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def app_help():\n import urllib\n output = []\n\n func_list = {}\n for rule in app.url_map.iter_rules():\n if rule.endpoint != 'static':\n help = app.view_functions[rule.endpoint].__doc__\n if help:\n help = re.sub(\".*return.*\\n\",\"\",help).replace(\"\\n\",'<br/>')\n func_list[rule.rule] = help\n\n ordered = OrderedDict(func_list)\n\n return ordered", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/START_DATE<br/>\"\n f\"/api/v1.0/START_DATE/END_DATE\"\n )", "def home():\n\n # Provide the date range (from the most distant to the recent date) for\n # filtering in the last two API routes\n session = Session(engine)\n start_limit = session.query(Measurement.date).filter(Measurement.date).\\\n order_by(Measurement.date).first()\n end_limit = session.query(Measurement.date).filter(Measurement.date).\\\n order_by(Measurement.date.desc()).first()\n\n return (\n f'Available Routes:<br/>'\n f'<br/>'\n f'/api/v1.0/precipitation<br/>'\n f'/api/v1.0/stations<br/>'\n f'/api/v1.0/tobs<br/>'\n f'<br/>'\n f'/api/v1.0/start<br/>'\n f'/api/v1.0/start/end<br/>'\n f'<br/>'\n f'*Please use \"yyyy-mm-dd\" as the date format to replace the \"start\" and/or \"end\" parameter(s) in the last two API routes in order to filter summarized temperature results based on desired date range:<br/>'\n f'The earliest date available in this dataset is {start_limit[0]}<br/>'\n f'The most recent date available in this dataset is {end_limit[0]}<br/>'\n )", "def welcome():\n return (\n f\"Avalable Routes:<br/>\"\n f\"/api/v1.0/precipitation - List of Precipitation Observations from the previous year<br/>\"\n\n f\"/api/v1.0/stations\"\n f\"- List of observation stations<br/>\"\n\n f\"/api/v1.0/tobs\"\n f\"- List of Temperature Observations (tobs) for the previous year<br/>\"\n\n f\"/api/v1.0/temps/&ltstart&gt/&ltend&gt\"\n f\"- Min, avg, max temp for start or start-end date range (format yyyy-mm-dd)<br/>\"\n\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/'start day'<br/>\"\n f\"/api/v1.0/'start day'/'end day'<br/>\"\n )", "def __str__(self):\n return self.__resource;", "def __str__(self):\n\n return str(self.__resource);", "def home():\n return\"\"\"<!DOCTYPE><html><h1>List of all available Honolulu, HI API routes</h1><ul>\n <li>List of precipitation scores from the last year:<a href=\"/api/v1.0/precipitation\">/api/v1.0/precipitation</a></li>\n <li>List of stations:<a href=\"/api/v1.0/stations\">/api/v1.0/stations</a></li>\n <li>List of temp observations from the last year:<a href=\"/api/v1.0/tobs\">/api/v1.0/tobs</a></li>\n <li>List of minimum, maximum, and average temperatures for the date provided (replace &ltstart&gt with a date in 'yyyy-mm-dd' format: <a href=\"/api/v1.0/<start>\">/api/v1.0/<start></a></li>\n <li>List of minimum, maximum, and average temperatures for the dates in range provided (replace &ltstart&gt and &ltend&gt with dates in 'yyyy-mm-dd' format): <a href=\"/api/v1.0/<start>/<end>\">/api/v1.0/<start>/<end></a></li>\n </ul></html>\"\"\"", "def __str__(self):\n\n descr = \"You are in the \" + self.name + \"\\n\"\n for key in self.exits:\n descr += \"You can go \" + key + \" to the \" + self.exits[key].name + \"\\n\"\n for item in self.inventory:\n descr += \"There is a \" + item.name + \" here.\\n\"\n for item in self.objects:\n descr += item.name + \" is here.\"\n return descr", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/[start]<br/>\"\n f\"/api/v1.0/[start]/[end]\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"/api/v1.0/start_end\" )", "def displayStatistics(self):\n return \"\"", "def shortDescription(self):\n desc = super(WebAPITestCaseMixin, self).shortDescription()\n\n if self.sample_api_url:\n test_method = getattr(self, self._testMethodName)\n\n if getattr(test_method, 'is_test_template', False):\n desc = desc.replace('<URL>', self.sample_api_url)\n\n return desc", "def get_overview(entities=None):\n \n url = \"{ep}/views/overview\".format(ep=endpoint)\n \n if entities is not None:\n qs = {}\n for e in entities:\n qs.update({'entityId': e})\n \n r = requests.get(url, headers=headers, params=qs)\n else:\n r = requests.get(url, headers=headers)\n \n return r.json()", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/2017<br/>\"\n f\"/api/v1.0/start2<br/>\"\n f\"/api/v1.0/range<br/>\"\n )", "def describe(self):\n return ''", "def help(self):\n res = \"\"", "def toString(self):\n st = \" \\n\"\n st += \"Title: \" +self.getTitle()+ \"\\n\"\n st += \"Path: \" +self.getPath()+ \"\\n\"\n st += \"Description: \" +self.getDescription()+ \"\\n\"\n return st", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/station<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs</br>\"\n f\"/api/v1.0/<start>/<end></br>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"/api/v1.0/start/end<br/>\"\n \n )", "def description(self):", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def summary(self):\n response = self._get(self.uri_for(\"summary\"))\n return json_to_py(response)", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"/api/v1.0/start/end\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"/api/v1.0/start/end<br/>\"\n )" ]
[ "0.6887148", "0.6648697", "0.65732783", "0.63620585", "0.6329359", "0.6313626", "0.6280509", "0.6225066", "0.6117485", "0.6111171", "0.61043644", "0.60946447", "0.6082813", "0.60682595", "0.6067177", "0.6052863", "0.6049613", "0.59981334", "0.59859276", "0.59684676", "0.5964471", "0.5889672", "0.5887159", "0.5877157", "0.5853452", "0.58123237", "0.5769632", "0.5756064", "0.57417315", "0.57114685", "0.571128", "0.57096577", "0.5708175", "0.57029855", "0.56963027", "0.5684172", "0.56807303", "0.56807303", "0.56807303", "0.5672813", "0.5671905", "0.5671905", "0.56696767", "0.5668153", "0.56475407", "0.5646781", "0.56444794", "0.5640563", "0.5633624", "0.5624946", "0.5623865", "0.5615525", "0.559326", "0.5582307", "0.55783343", "0.5577212", "0.5567236", "0.55660737", "0.5552609", "0.5551342", "0.5549522", "0.5548807", "0.5548807", "0.5548807", "0.5548807", "0.5541449", "0.55387497", "0.553727", "0.5537075", "0.55339056", "0.55281126", "0.5518637", "0.55176836", "0.55166864", "0.55154276", "0.5510604", "0.5505796", "0.55039763", "0.5486811", "0.5484248", "0.5482281", "0.54784846", "0.5469671", "0.54614234", "0.5461294", "0.5459481", "0.54578936", "0.5457747", "0.5455331", "0.5450688", "0.5450499", "0.5449846", "0.54453874", "0.5443046", "0.54402864", "0.54402864", "0.54402864", "0.5439016", "0.543865", "0.54355365" ]
0.64852643
3
A generator that can be used in a loop.
def status_generator(self, mission): while True: status = {} status["timestamp"] = datetime.datetime.utcnow().replace( microsecond=0, tzinfo=datetime.timezone.utc).strftime( "%a %b %d %H:%M:%S %Z %Y") status["pool_status"], status["allocation_status"], \ status["node_status"] = self.get_pool_status(mission) status["job_status"], status["task_status"] = \ self.get_job_status(mission) status["storage_status"] = \ self.get_storage_container_status(mission) yield status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n yield from self.gen", "def _mc_gen():\r\n n = 1\r\n while True:\r\n yield n\r\n n += 1", "def __iter__(self):\n return self.new_generator()", "def generator():\n mygenerator = (x for x in range(3))\n for element in mygenerator:\n print 'poprve = ', element\n\n for element in mygenerator:\n print 'podruhe = ', element", "def generator(self):\n return [None, 1]", "def __iter__(self) -> Generator:\r\n yield from self.sequence", "def _generator(self):\n while not self._stopFlag:\n yield self._oneStep()\n self._cooperator = None", "def __iter__(self):\n for x in self.seq: yield x", "def simple():\n yield 1\n yield 2\n yield 3", "def next(self):\n return next(self.gen)", "def iterator(self):\n yield", "def generator(func):\n\n @fn\n @wraps(func)\n def gen(*args, **kwargs):\n return Iter(func(*args, **kwargs))\n\n return gen", "def vytvorit_generator():\n mylist = range(3)\n print 'mylist = ', mylist\n for element in mylist:\n yield element", "def __next__(self):\n\t\treturn next()", "def loop(self):\n yield self\n e = self.next\n while e is not self:\n yield e\n e = e.next", "def id_generator():\n start_value = 0\n while True:\n yield start_value\n start_value += 1", "def _next(self):\n i = 0\n while i < self.size:\n yield self.data[i]\n i += 1", "def very_simple():\n yield 1", "def simple_generator():\n yield 'horse'\n # just going to do it...\n yield 'cow'\n yield 'mouse'", "def get_generator(self):\n while self._is_running():\n yield self._queue.get()", "def get_generator(self):\n while self._is_running():\n yield self._queue.get()", "def sequences(self):\n # i am one\n yield self\n # nothing further\n return", "def __iter__(self):\n while True:\n if self.stop:\n return\n for item in self.get_next_batch():\n yield item", "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "def abc():\r\n yield \"a\"\r\n yield \"b\"\r\n yield \"c\"", "def __iter__(self):\n # we should really never have 1e6, let's prevent some user pain\n for ii in range(self._stop):\n yield self.next()\n else:\n raise RuntimeError('Generated over %s images' % (self._stop,))", "def __iter__(self):\n return self._product_generator()", "def multiple_gen(modulus):\n count = 1\n while True:\n yield modulus * count\n count += 1", "def random_values():\n while True:\n yield random()", "def next():", "def next():", "def __iter__(self):\n cursor = self.first()\n while cursor is not None:\n yield cursor.element()\n cursor = self.after(cursor)", "def Next():\n return CheckForError(lib.Generators_Get_Next())", "def __iter__(self):\n yield self", "def id_generator():\n\t\tcount = 0\n\t\twhile True:\n\t\t\tyield count\n\t\t\tcount += 1", "def cycle(obj):\r\n while True:\r\n for item in obj:\r\n yield item", "def __iter__(self):\n for run in self.runs:\n yield run", "def __iter__(self):\n cursor = 0\n while cursor < len(self):\n yield self._items[cursor]\n cursor += 1", "def test_generator_method(self):\n for i in range(0, 4):\n yield self.try_odd, i", "def gen_sequence(a, b, c):\n i = 1\n while True:\n yield a * i**2 + b * i + c\n i += 1", "def counter_wrapper_2(generator):\n yield from generator", "def __iter__(self):\n for batch in self.iterator:\n yield Batch.from_iterator_batch(batch, self.pad_index, self.sos_index, self.eos_index)", "def counter_wrapper(generator):\n for value in generator:\n yield value", "def generator(self) -> Iterator[Tuple[int, int, complex]]:\n for inda in range(self._core.lena()):\n alpha_str = self._core.string_alpha(inda)\n for indb in range(self._core.lenb()):\n beta_str = self._core.string_beta(indb)\n yield alpha_str, beta_str, self.coeff[inda, indb]", "def __next__(self):\n return self.next()", "def __next__(self):\n return self.next()", "def __next__(self):\n return self.next()", "def stream():\n while True:\n yield random_point()", "def __iter__(self):\r\n return self._iterate()", "def customer_generator(env, inventory_stock):\n for i in itertools.count():\n yield env.timeout(random.randint(*T_INTER))\n env.process(customer(env, inventory_stock, 'Customer_'+str(i+1)))", "def __iter__(self):\n counter = 0\n while True:\n if counter < len(self.all_records):\n yield self.all_records[counter]\n else:\n yield self.next()\n counter += 1", "def __iter__(self):\n for b in self.x:\n yield b", "def random_keys(self):\n while True:\n yield self.generator.str()", "def iterate(self, start, end):\n if not self.db:\n self.db = self.get_db()\n\n p = start[:-4]\n s = struct.unpack(\"!L\", start[-4:])[0]\n e = struct.unpack(\"!L\", end[-4:])[0]\n time.sleep(self.READ_DELAY)\n while s <= e:\n v = random.random() * 100\n yield p + struct.pack(\"!L\", s), struct.pack(\"!d\", v)\n s += self.STEP", "def __iter__(self):\n for sample in self.samples:\n yield sample", "def enumerate(self):\n\n done = False\n while not done:\n mcs = self.compute()\n\n if mcs != None:\n yield mcs\n else:\n done = True", "def integers():\n i = 1\n while True:\n yield i\n i = i + 1", "def integers():\n i = 1\n while True:\n yield i\n i = i + 1", "def my_generator(n):\n print(\"Hello world! I'm a generator.\")\n i = 1\n try:\n while True:\n yield \"I generated %d\" % (i * n)\n i += 1\n except GeneratorExit:\n print(\"Generator closed at i=%d\" % i)", "def emptyGenerator():\n return\n yield", "def __iter__(self):\n for o in self._iter:\n yield o", "def one_shot_iterator(dataloader):\n while True:\n for data in dataloader:\n yield data", "def __iter__(self):\n for i in range(len(self)):\n yield self[i]", "def __iter__(self):\n for i in range(len(self)):\n yield self[i]", "def __call__(self):\n yield from self", "def __iter__(self):\n for index in range(len(self)):\n yield self[index]", "def __iter__(self):\n for i in range(self.n):\n yield self.get(i, i + 1)", "def __iter__(self):\n cursor=0\n while cursor<len(self):\n yield self._item[cursor].key\n cursor+=1", "def __iter__(self):\n yield from self.url.generator", "def __iter__(self):\n for i in range(len(self.data)):\n yield self.data[i]", "def __call__(self):\r\n return self.next()", "def __iter__(self):\n for val in self.value:\n yield val", "def __iter__(self):\n cursor = self._front\n while not cursor is None:\n yield cursor.data\n cursor = cursor.next", "def __iter__(self):\n return iter(range(1, self.size() + 1))", "def __iter__(self):\n return iter(())", "def new_generator(self):\n return self.generator_function(*self.args, **self.kwargs)", "def __iter__():", "def __iter__():", "def __iter__():", "def __iter__():", "def __iter__(self):\n for sample in self.data:\n yield sample", "def __iter__(self):\n return self.next()", "def __iter__(self):\n while True:\n for item in (self[i] for i in range(len(self))):\n yield item", "def iterate(f, x):\n while True:\n yield x\n x = f(x)", "def iterwhite():\n while True:\n for n in rng.randn(100):\n yield n", "def frame_generator(self):\n frame = 0\n while not self.process.termination:\n yield frame\n frame += 1", "def task5(count):\n number_1, number_2 = 1, 1\n for _ in range(count):\n yield number_1\n number_1, number_2 = number_2, number_1 + number_2", "def cycle(iterator: Iterable[Any]) -> Iterable[Any]:\n while True:\n yield from iterator", "def rowgen(searchcursor_rows):\n rows = searchcursor_rows\n row = rows.next() \n while row:\n yield row\n row = rows.next()", "def _generator(self):\n\t\twhile(1):\n\t\t\ttry:\n\t\t\t\tm = self.messages.pop(0) # pop the first Flash2Message in the list\n\t\t\t\tyield m\n\t\t\texcept IndexError:\n\t\t\t\traise StopIteration", "def __iter__(self):\n return self._next()", "def __iter__(self):\n return self._next()", "def __iter__(self):\n return self._next()", "async def async_generator() -> Generator[float, None, None]:\n for i in range(10):\n yield (random.uniform(0, 10))\n await asyncio.sleep(1)", "def generator(self):\n return self._generator", "def __iter__(self):\n for b in self.dl: \n yield to_device(b, self.device) # yield pauses the execution, not store values in memory, forgets about them once iterated\n # no need to remove batch of data from device, done automatically", "def simple_seq(seq):\n for i in seq:\n yield i", "def __iter__(self):\n items = self._fetch()\n for item in items:\n yield item", "def test_generator_scope():\n def inner(val):\n print(\"inner running\")\n return [0, val]\n gen = (a for a in inner(10))\n print(\"generator created\")\n return gen", "def infinite_increment():\n i = 0\n while 1:\n yield i\n i += 1", "def __iter__(self):\n for x in self.innings:\n yield x" ]
[ "0.77754736", "0.76618725", "0.7656073", "0.75702596", "0.74272484", "0.74266475", "0.73606414", "0.7329172", "0.7289084", "0.7237256", "0.72144675", "0.7204889", "0.71852434", "0.7164605", "0.7125203", "0.7109289", "0.7065946", "0.70342624", "0.7027617", "0.70258605", "0.70258605", "0.6998238", "0.69878113", "0.6983592", "0.69728726", "0.6943328", "0.6940175", "0.69308794", "0.6922135", "0.6889133", "0.6889133", "0.6881998", "0.68813556", "0.6856801", "0.6854184", "0.6843894", "0.68197674", "0.6817881", "0.679327", "0.6788238", "0.6756197", "0.6756189", "0.6748089", "0.67203087", "0.6707582", "0.6707582", "0.6707582", "0.6693941", "0.66930795", "0.66858834", "0.66758794", "0.6662933", "0.6659334", "0.665933", "0.6652621", "0.66513544", "0.6641548", "0.6641548", "0.6635567", "0.66318315", "0.662845", "0.6628252", "0.6625814", "0.6625814", "0.66190803", "0.6612619", "0.66034263", "0.6600367", "0.6592", "0.6591028", "0.6585529", "0.6583591", "0.65784496", "0.6574567", "0.65663445", "0.65651846", "0.65631276", "0.65631276", "0.65631276", "0.65631276", "0.6558769", "0.6558567", "0.6557522", "0.6549446", "0.65340644", "0.65212417", "0.6520297", "0.65145594", "0.65069145", "0.6504353", "0.6500082", "0.6500082", "0.6500082", "0.64888984", "0.6485088", "0.6483023", "0.64746207", "0.64732456", "0.6471374", "0.6467573", "0.6460108" ]
0.0
-1
Prints summary statistics for every column of X, split into 2 classes X is the data, y is the class labels, assumed to be 0 or 1. This will also plot the data.
def summarize_source(X, y, decimals=4): # Assumes y is either 1 or 0 pos_idxs = np.where(y == 1)[0] neg_idxs = np.where(y == 0)[0] # Divide dataset into positive and negatives Xs = (X[neg_idxs, :], X[pos_idxs, :]) Ys = (y[neg_idxs], y[pos_idxs]) # Make format string numstr = ", ".join(["{" + str(i) + ":10." + str(decimals) + "f}" for i in range(X.shape[1])]) # Output results print("Total number of samples: " + str(len(y))) print() print(str(len(Ys[1])) + " Positive Samples:") print("\tMin : " + numstr.format( *np.min(Xs[1], axis=0))) print("\tMean : " + numstr.format(*np.mean(Xs[1], axis=0))) print("\tMax : " + numstr.format( *np.max(Xs[1], axis=0))) print() print("\tStdev : " + numstr.format(*np.sqrt(np.var(Xs[1], axis=0)))) print("\tVar : " + numstr.format( *np.var(Xs[1], axis=0))) print() print(str(len(Ys[0])) + " Negative Samples:") print("\tMin : " + numstr.format( *np.min(Xs[0], axis=0))) print("\tMean : " + numstr.format(*np.mean(Xs[0], axis=0))) print("\tMax : " + numstr.format( *np.max(Xs[0], axis=0))) print() print("\tStdev : " + numstr.format(*np.sqrt(np.var(Xs[0], axis=0)))) print("\tVar : " + numstr.format( *np.var(Xs[0], axis=0)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def basic_summary(data):\n headers = [\"Split\", \"Samples\", \"Height\", \"Width\", \"Channels\", \"Classes\"]\n print(table_format(headers, header = True))\n for split in [\"train\", \"valid\", \"test\"]:\n X, y = data[split]\n n, h, w, c = X.shape\n n_classes = np.unique(y).shape[0]\n row = [split, n, h, w, c, n_classes]\n print(table_format(row))", "def analyze_data(data):\n mean_data = np.mean(data[:,:-1], axis=0)\n std_data = np.std(data[:,:-1], axis=0)\n \n print(f'Mean of data features: {mean_data}')\n print(f'Std of data features: {std_data}')\n\n # Look at and analyze data\n \n print(f'Shape: {data.shape}')\n print(f'Head: {data.head()}')\n print(f'Tail: {data.tail()}')\n print(f'Describe data: {data.describe()}')\n \n # class distribution\n print(data.groupby('class').size())\n \n # box plots\n data.plot(kind='box' , sharex = False , sharey = False, figsize=(15,10))\n plt.show()\n \n # histograms\n data.hist(edgecolor = 'black', linewidth=1, figsize=(15,5))\n plt.show()\n \n # scatter plot matrix\n scatter_matrix(data)\n plt.show()\n \n # seaborn pairplot: relationship between pairs of features\n sns.pairplot(data, hue=\"class\")\n plt.show()", "def summary_plot(self, X, y, plot_type='dot'):\n\n assert(plot_type in _SHAP_SUMMARY_PLOT_CHOICE)\n\n y = self._slice_target_index(y=y)\n\n for index in range(_n_targets(y)):\n if sklearn.utils.multiclass.type_of_target(y) == 'continuous-multioutput':\n self.fit(X, y.iloc[:, index].values.ravel(order='K'))\n else:\n self.fit(X, y)\n _, shap_values = self.explainer(X=X)\n\n shap.summary_plot(shap_values=shap_values, features=X,\n plot_type=plot_type, feature_names=list(X.columns),\n show=self.show)", "def dataset_statistics(dataset):\n print (dataset.describe())", "def dataset_statistics(dataset):\n print(dataset.describe())", "def dataset_statistics(dataset):\n print(dataset.describe())", "def visualize(X: pd.DataFrame, y: pd.DataFrame) -> None:\r\n y[\"Action\"].value_counts().plot.pie(explode=(0.02, 0.04, 0.05, 0.09), title=\"Proportion of classes in dataset\")\r\n plt.savefig(\"Figures/proportions\")\r\n\r\n for i, column in enumerate(X.columns):\r\n fig, ax = plt.subplots(1, 2)\r\n\r\n ax[0].hist(\r\n (\r\n X[y[\"Action\"] == \"allow\"][column],\r\n X[y[\"Action\"] == \"deny\"][column],\r\n X[y[\"Action\"] == \"drop\"][column],\r\n X[y[\"Action\"] == \"reset-both\"][column],\r\n )\r\n )\r\n ax[0].set_xlabel(column)\r\n ax[0].set_ylabel(\"Frequency\")\r\n\r\n ax[1].boxplot(\r\n (\r\n X[y[\"Action\"] == \"allow\"][column],\r\n X[y[\"Action\"] == \"deny\"][column],\r\n X[y[\"Action\"] == \"drop\"][column],\r\n X[y[\"Action\"] == \"reset-both\"][column],\r\n )\r\n )\r\n ax[1].set_xlabel(\"Action\")\r\n ax[1].set_ylabel(column)\r\n\r\n X[column].hist(by=y[\"Action\"])\r\n\r\n ax[0].legend([\"allow\", \"deny\", \"drop\", \"reset-both\"])\r\n ax[1].set_xticklabels([\"allow\", \"deny\", \"drop\", \"reset-both\"])\r\n fig.suptitle(\"Distribution of classes among attributes\")\r\n plt.savefig(\"Figures/boxplots\")", "def print_stats(ds):\n print(\"Dataset Name: \" + ds.name)\n print(\"Dataset Mode: \" + ds.mode)\n print(\"Band Count: \" + str(ds.count))\n print(\"Dataset Width: \" + str(ds.width))\n print(\"Dataset Height: \" + str(ds.height))\n print(\"Dataset Bounds: \", ds.bounds)\n print(\"Dataset Transform: \", ds.transform)\n ul = ds.transform * (0, 0)\n print(\"Upper Left Corner: \", ul)\n lr = ds.transform * (ds.width, ds.height)\n print(\"Lower Right Corner: \", lr)\n {i: dtype for i, dtype in zip(ds.indexes, ds.dtypes)}", "def display_summary_statistics(tx, column_names=None):\n \n N, D = tx.shape\n \n mean = tx.mean(axis=0)\n median = np.median(tx, axis=0)\n std = tx.std(axis=0)\n max_ = tx.max(axis=0)\n min_ = tx.min(axis=0)\n n_undef = (tx <= -999.0).sum(axis=0)\n pct_undef = (tx <= -999.0).mean(axis=0) * 100\n\n column_names = column_names if column_names is not None else range(D)\n \n print(\" Column | Mean | Median | Std dev | Max | Min | # Undefined | % Undef \")\n for i, (col, m, med, s, mx, mn, nu, pu) in enumerate(zip(column_names, mean, median, std, max_, min_, n_undef, pct_undef)):\n print(f\"{i:2}-{col:27} | {m:8.3f} {med:8.3f} {s:8.3f} {mx:8.3f} \" + \n f\"{mn:8.3f} {nu:10.3f} {pu:7.3f}\")", "def print_summary(column, data):\n print(data[column].describe())\n print()\n print('Количество уникальных значений:', data[column].nunique())\n print('Количество пустых значений:', data[column].isnull().sum())", "def _print_summary(data, metric):\n\n print(u'Cortical thickness {}: {:.2f} \\u00B1 {:.2f} [{:.2f}--{:.2f}]'\n .format(metric, data[:, 0].mean(), data[:, 0].std(ddof=1),\n data[:, 0].min(), data[:, 0].max()))\n print('Other modalities {}: {:.2f} \\u00B1 {:.2f} [{:.2f}--{:.2f}]'\n .format(metric, data[:, 1:].mean(), data[:, 1:].std(ddof=1),\n data[:, 1:].min(), data[:, 1:].max()))\n print('Overall {}: {:.2f} \\u00B1 {:.2f} [{:.2f}--{:.2f}]'\n .format(metric, data.mean(), data.std(ddof=1),\n data.min(), data.max()))", "def basic_stats(data):\n if isinstance(data, pd.DataFrame):\n return data.describe(percentiles=[0.5]).T.drop(['50%'], axis=1)\n else:\n return data.to_frame().describe(percentiles=[0.5]).T.drop(['50%'], axis=1)", "def compute_statistics(self):\n for i in range(len(self.wine_matrix[0, :])):\n feature = self.wine_matrix[:, i]\n self.wine_stats['feature ' + str(i)] = {}\n if i == 11: # results column\n self.wine_stats['feature ' + str(i)]['positive_class_ratio'] = (feature == 1).sum() / len(feature)\n null, self.wine_stats['feature ' + str(i)]['pvalue'] = stats.normaltest(feature)\n\n # plot\n # pyplot.hist(feature, bins=50)\n # pyplot.show()\n\n for i in range(len(self.cancer_matrix[0, :])):\n feature = self.cancer_matrix[:, i]\n self.cancer_stats['feature ' + str(i)] = {}\n if i == 10: # results column\n self.cancer_stats['feature ' + str(i)]['positive_class_ratio'] = (feature == 1).sum() / len(feature)\n null, self.cancer_stats['feature ' + str(i)]['pvalue'] = stats.normaltest(feature)\n\n # plot\n # pyplot.hist(feature, bins=50)\n # pyplot.show()", "def get_statistics(column_data):\r\n print(\"The statistics for this column are:\")\r\n print(\"Count = \", column_data.count())\r\n print(\"Mean = \", column_data.mean())\r\n print(\"Standard Deviation = \", column_data.std())\r\n print(\"Min = \", column_data.min())\r\n print(\"Max = \", column_data.max())\r\n column_data.hist()\r\n print(\"The Histogram of this column can be downloaded now.\")", "def show_stats(x):\n print(\"min =\", x.min())\n print(\"max =\", x.max())\n print(\"median =\", np.median(x))\n print(\"average =\", x.mean())\n print(\"std =\", x.std())", "def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do", "def inst_class_stats(df, col='num_pkts'):\n classes = df.groupby('class_label')\n stat = classes[col].describe()\n return stat", "def visualization(data):\n\t# preview top 5 row of data\n\tprint(\"\\n--------Data preview--------\\n{0}\"\n\t\t .format(data.head()))\n\tprint(\"\\nNull value status as follow:\\n{0}\".format(data.isnull().sum()))\n\tcols = [col for col in data.columns]\n\tprint(\"\\nNumber of original features: {0}\".format(len(cols)))\n\tprint(\"\\nFeatures types:\\n{0}\".format(data[cols].dtypes.value_counts()))\n\n\tcounts = [[], [], []]\n\tfor col in cols:\n\t\t# the data type of each feature\n\t\ttyp = data[col].dtype\n\t\t# the number of differents value in each feature\n\t\tuniq = len(np.unique(data[col]))\n\t\t# constant value feature\n\t\tif uniq == 1:\n\t\t\tcounts[0].append(col)\n\t\t# binary value feature\n\t\telif uniq == 2 and typ == np.int64:\n\t\t\tcounts[1].append(col)\n\t\t# multiple value feature\n\t\telse:\n\t\t\tcounts[2].append(col)\n\n\tprint('\\nConstant features: {}\\nBinary features: {} \\nCategorical features: {}\\n'.format(*[len(c) for c in counts]))\n\tprint('Constant features:', counts[0])\n\tprint('Binary features:', counts[1])\n\tprint('Categorical features:', counts[2])\n\n\tfig, axes = plt.subplots(2,2)\n\tfig.set_size_inches(12, 10)\n\tsn.boxplot(data=data,y=\"count\",orient=\"v\",ax=axes[0][0])\n\tsn.boxplot(data=data,y=\"count\",x=\"season\",orient=\"v\",ax=axes[0][1])\n\tsn.boxplot(data=data,y=\"count\",x=\"hour\",orient=\"v\",ax=axes[1][0])\n\tsn.boxplot(data=data,y=\"count\",x=\"workingday\",orient=\"v\",ax=axes[1][1])\n\n\taxes[0][0].set(ylabel='Count',title=\"Box Plot On Count\")\n\taxes[0][1].set(xlabel='Season', ylabel='Count',title=\"Box Plot On Count Across Season\")\n\taxes[1][0].set(xlabel='Hour Of The Day', ylabel='Count',title=\"Box Plot On Count Across Hour Of The Day\")\n\taxes[1][1].set(xlabel='Working Day', ylabel='Count',title=\"Box Plot On Count Across Working Day\")\n\tplt.show()\n\n\tfig,(ax1,ax2,ax3,ax4)= plt.subplots(nrows=4)\n\tfig.set_size_inches(12,20)\n\tsortOrder = [1,2,3,4,5,6,7,8,9,10,11,12]\n\thueOrder = [\"Sunday\",\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\"]\n\n\tmonthAggregated = pd.DataFrame(data.groupby(\"month\")[\"count\"].mean()).reset_index()\n\tmonthSorted = monthAggregated.sort_values(by=\"count\",ascending=False)\n\tsn.barplot(data=monthSorted,x=\"month\",y=\"count\",ax=ax1,order=sortOrder)\n\tax1.set(xlabel='Month', ylabel='Avearage Count',title=\"Average Count By Month\")\n\n\thourAggregated = pd.DataFrame(data.groupby([\"hour\",\"season\"],sort=True)[\"count\"].mean()).reset_index()\n\tsn.pointplot(x=hourAggregated[\"hour\"], y=hourAggregated[\"count\"],hue=hourAggregated[\"season\"],\n\t data=hourAggregated, join=True,ax=ax2)\n\tax2.set(xlabel='Hour Of The Day', ylabel='Users Count',\n\t title=\"Average Users Count By Hour Of The Day Across Season\",label='big')\n\n\thourAggregated = pd.DataFrame(data.groupby([\"hour\",\"weekday\"],sort=True)[\"count\"].mean()).reset_index()\n\tsn.pointplot(x=hourAggregated[\"hour\"], y=hourAggregated[\"count\"],hue=hourAggregated[\"weekday\"],hue_order=hueOrder,\n\t data=hourAggregated, join=True,ax=ax3)\n\tax3.set(xlabel='Hour Of The Day', ylabel='Users Count',\n\t title=\"Average Users Count By Hour Of The Day Across Weekdays\",label='big')\n\n\thourTransformed = pd.melt(data[[\"hour\",\"casual\",\"registered\"]], id_vars=['hour'], value_vars=['casual', 'registered'])\n\thourAggregated = pd.DataFrame(hourTransformed.groupby([\"hour\",\"variable\"],sort=True)[\"value\"].mean()).reset_index()\n\tsn.pointplot(x=hourAggregated[\"hour\"], y=hourAggregated[\"value\"],hue=hourAggregated[\"variable\"],\n\t hue_order=[\"casual\",\"registered\"], data=hourAggregated, join=True,ax=ax4)\n\tax4.set(xlabel='Hour Of The Day', ylabel='Users Count',\n\t title=\"Average Users Count By Hour Of The Day Across User Type\",label='big')\n\tplt.show()", "def generate_statistics(data):\n print(data)\n for key in data:\n print('****\\nSummary data for %s:\\n----' % key)\n for category in data[key]:\n mean = generate_mean(data[key][category])\n print('\\taverage %s: %d' % (category, mean))\n print('\\n\\n')\n return", "def report(self, X, y):\n pred = self.model.predict(X)\n skplt.estimators.plot_feature_importances(\n self.model, x_tick_rotation=90)\n plt.show()\n skplt.metrics.plot_confusion_matrix(y, pred, normalize=True)\n plt.show()\n print(\"Classification report\")\n print(classification_report(y, pred))\n skplt.metrics.plot_precision_recall(y, self.model.predict_proba(X))\n plt.show()\n pd.DataFrame({\n \"predict_probability\": self.model.predict_proba(X)[:, 1],\n \"observed\": y\n }).hist(\n \"predict_probability\", by=\"observed\")\n plt.suptitle(\"Probability distribution by class\")\n plt.show()", "def boxplots_of_classes(self, title:str, y_axis: str=\"mean activity over all neurons\", second_path: str=r'D:\\Dataframes\\double_skip_mean', show:bool=True, dest_path:str=None, show_outliers: bool=False):\n data = []\n counter = 0\n for pop in self.populations:\n df = pd.read_csv(self.path + '\\\\{}.csv'.format(pop))\n trials = df['label'].tolist()\n values = df['Component 1'].tolist()\n response = df['response'].tolist()\n \n for i in range(len(response)):\n # Removing day 4 trials\n if eval(trials[i])[0] != 4:\n data.append([response[i], values[i], \"Transition over 1 day\"])\n\n df = pd.DataFrame(data, columns = ['Labels', y_axis, \"Transition\"])\n\n self.__box_plot(df, \"Labels\", y_axis, \"Transition\", title, show=show, dest_path=dest_path, showfliers=show_outliers, order = [\"0->0\", \"0->1\", \"1->0\", \"1->1\"])", "def report(self, X, y):\n predict = self.model.predict(X)\n\n skplt.estimators.plot_feature_importances(\n self.model, x_tick_rotation=90)\n plt.show()\n\n fig, ax = plt.subplots(figsize=(7, 7))\n sns.scatterplot(x=y, y=predict)\n lims = [\n np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes\n np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes\n ]\n ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)\n ax.set_aspect('equal')\n ax.set_xlim(lims)\n ax.set_ylim(lims)\n ax.set_xlabel(\"Observed\")\n ax.set_ylabel(\"Predict\")\n ax.set_title(\"Predict vs. Observed\")\n plt.show()\n\n residuals = y - predict\n\n fig, ax = plt.subplots(figsize=(7, 7))\n sns.scatterplot(x=y, y=residuals)\n plt.title(\"Residuals vs. Observed\")\n plt.xlabel(\"Obserbed\")\n plt.ylabel(\"Residuals\")\n plt.show()\n\n plt.hist(residuals)\n plt.title(\"Residuals distribution\")\n plt.xlabel(\"Residuals value\")\n plt.ylabel(\"Count\")\n plt.show()\n\n display(\n pd.DataFrame({\n \"explained_variance_score\":\n metrics.explained_variance_score(y, predict),\n \"mean_absolute_error\":\n metrics.mean_absolute_error(y, predict),\n \"mean_squared_log_error\":\n metrics.mean_squared_log_error(y, predict),\n \"median_absolute_error\":\n metrics.median_absolute_error(y, predict),\n \"r2_score\":\n metrics.r2_score(y, predict)\n },\n index=[0]))", "def statistics(self, **kwargs) -> None:\n print(\n tabulate.tabulate(\n list(self._iter_statistics(**kwargs)),\n headers=[\"path\", \"type\", \"occurences\", \"%\"],\n floatfmt=\".3f\",\n )\n )", "def print_results_summary(self,remove_zeros=False,trim_end_zeros=False):\n if remove_zeros:\n if trim_end_zeros:\n raise Warning('remove_zeros = False overrides trim_end_zeros=True. Removing all values with mean=zero')\n nz_ind = np.nonzero(self.xhat)\n xhats = self.xhat[nz_ind]\n sigmas = self.mean_stddev[nz_ind]\n elif trim_end_zeros:\n xhats = np.trim_zeros(self.xhat,trim='b')\n sigmas = self.mean_stddev[np.arange(xhats.size)]\n else:\n xhats = self.xhat\n sigmas = self.mean_stddev\n\n self._print_results_header()\n print('{: >5} {: >8} {: >10} {: >4}'.format('n','mean','error','pct_error'))\n for i in range(xhats.size):\n print('{0: >5} {1: >8.4g} +/- {2: >10.4g} ({3: >4.1%})'.format(i,xhats[i],sigmas[i],sigmas[i] / xhats[i]))", "def show_stats(x, **kws):\n mean = np.mean(x)\n median = np.median(x)\n std = np.std(x,ddof=1)\n ax = plt.gca()\n ax.annotate(\"Mean: {:.2f}\\nMedian: {:.2f}\\n$\\sigma$: {:.3e}\".format(mean,median,std), xy=(.6,.3),xycoords=ax.transAxes, fontsize=9)", "def summarise_features(dataset):\n summary = [(mean(attribute), standard_deviation(attribute)) for attribute in zip(*dataset)]\n return summary", "def learn(self, Xtrain, ytrain):\n\n ### YOUR CODE HERE\n \n self.numfeatures = Xtrain.shape[1]\n numsamples = Xtrain.shape[0]\n #print (self.numfeatures)\n count = 0\n for i in ytrain:\n if (i>count):\n count+=1\n self.numclasses = count + 1\n \n if(self.params['usecolumnones']==False):\n b = np.ones((numsamples, self.numfeatures-1))\n b = Xtrain[:,:-1]\n Xtrain = b\n self.numfeatures -= 1\n # print(Xtrain.shape[1])\n\n ### END YOUR CODE\n\n origin_shape = (self.numclasses, self.numfeatures)\n self.means = np.zeros(origin_shape)\n self.stds = np.zeros(origin_shape)\n\n ### YOUR CODE HERE\n countclass = np.zeros(self.numclasses)\n for i in range (0, numsamples):\n k = int(ytrain[i])\n countclass[k] += 1\n for j in range (0, self.numfeatures):\n self.means[k][j]+=Xtrain[i][j]\n \n for i in range (0, self.numclasses):\n #np.true_divide(self.means[i], countclass[i])\n for j in range (0, self.numfeatures):\n self.means[i][j] = self.means[i][j]/(countclass[i]+1e-8)\n \n self.yprob = np.true_divide(countclass, numsamples)\n \n for i in range (0, numsamples):\n k = int(ytrain[i])\n for j in range (0, self.numfeatures):\n self.stds[k][j]+= (Xtrain[i][j] - self.means[k][j])**2\n # print (self.stds)\n \n for i in range (0, self.numclasses):\n #np.true_divide(self.stds[i], countclass[i])\n for j in range (0, self.numfeatures):\n self.stds[i][j] = self.stds[i][j]/(countclass[i]+1e-8)\n \n # print (self.means)\n # print (self.stds)\n ### END YOUR CODE\n\n assert self.means.shape == origin_shape\n assert self.stds.shape == origin_shape", "def descriptive_statistics(relfreqs): \n means = np.mean(relfreqs, axis=\"columns\")\n stdevs = np.std(relfreqs, axis=\"columns\")\n return means, stdevs", "def chi2_stats_vs_feature():\n print(\"\\nPlotting chi square statistics vs Features\\n\")\n \n features, chi2_critical, chi2_stats = chi2_feature_select.get_chi2_stats(verbose = True)\n width = 0.8\n\n chi2_critical = list(map(math.log, chi2_critical))\n chi2_stats = list(map(math.log, chi2_stats))\n\n x = list(range(1, 3*len(chi2_critical) + 1, 3))\n plt.bar(x, chi2_stats, width, color = 'g', label = 'Log of chi2_stats')\n plt.bar([p + width for p in x], chi2_critical, width, color = 'r', label = 'Log of chi2_critical')\n plt.ylabel('Log of chi-square stats')\n plt.xlabel('features')\n plt.tight_layout()\n\n plt.xticks([p + width for p in x], features)\n plt.legend(loc='best')\n plt.axis([0 ,50 ,0 ,10])\n plt.show()", "def descriptive_stats(array, verbose=True, label='', mean=False, plot=False):\n if mean:\n mean_ = np.mean(array)\n median = np.median(array)\n mini = np.min(array)\n maxi = np.max(array)\n first_qu = np.percentile(array, 25)\n third_qu = np.percentile(array, 75)\n\n if verbose:\n if mean:\n label += 'min={:.1f} / 1st QU={:.1f} / ave={:.1f} / med={:.1f} / '\n label += '3rd QU={:.1f} / max={:.1f}'\n print(label.format(mini, first_qu, mean_, median, third_qu, maxi))\n else:\n label += 'min={:.1f} / 1st QU={:.1f} / med={:.1f} / 3rd QU={:.1f} '\n label += '/ max={:.1f}'\n print(label.format(mini, first_qu, median, third_qu, maxi))\n\n if plot:\n boxplot(array, vert=False, meanline=mean, showfliers=True, sym='.')\n\n if mean:\n return mini, first_qu, mean_, median, third_qu, maxi\n else:\n return mini, first_qu, median, third_qu, maxi", "def scatter_2_features(df, x, y, ylim_i = 0, set_y_limit = False, xlim_i = 0, set_x_limit = False, order_boxplot = False, print_value = False, num_label = 1):\n \n value_counts_temp = df[x].value_counts()\n f, ax = plt.subplots(figsize=(18, 7));\n plot =plt.scatter(df[x], df[y])\n plt.xticks(rotation=90);\n ax.set_title('Scatter plot of {} group by {}'.format(y, x));\n plt.xlabel(str(x))\n plt.ylabel(str(y))\n if set_y_limit:\n ax.set_ylim(top = ylim_i);\n if set_x_limit:\n ax.set_xlim(right = xlim_i);\n if print_value:\n print(value_counts_temp)", "def print_summary_stats(self) -> None:\n print(\"Number of Users: {}\".format(len(self.all_users)))\n print(\"Number of Utterances: {}\".format(len(self.utterances)))\n print(\"Number of Conversations: {}\".format(len(self.conversations)))", "def print_statistics(samples, values=None, sample_labels=None, value_labels=None):\n num_vars, nsamples = samples.shape\n if values is None:\n values = np.empty((nsamples, 0))\n if values.ndim == 1:\n values = values[:, np.newaxis]\n num_qoi = values.shape[1]\n assert nsamples == values.shape[0]\n if sample_labels is None:\n sample_labels = ['z%d' % ii for ii in range(num_vars)]\n if value_labels is None:\n value_labels = ['y%d' % ii for ii in range(num_qoi)]\n data = [(label, s) for s, label in zip(samples, sample_labels)]\n data += [(label, s) for s, label in zip(values.T, value_labels)]\n\n # data = [(label, s) for s, label in zip(samples, sample_labels)]\n # data += [(label, s) for s, label in zip(values.T, value_labels)]\n # data = dict(data)\n # df = DataFrame(index=np.arange(nsamples), data=data)\n # print(df.describe())\n\n str_format = ' '.join([\"{:<6}\"]+[\"{:^10}\"]*(len(data)))\n print(str_format.format(*([\" \"]+[dat[0] for dat in data])))\n stat_funs = [lambda x: x.shape[0], lambda x: x.mean(), lambda x: x.std(),\n lambda x: x.min(), lambda x: x.max()]\n stat_labels = [\"count\", \"mean\", \"std\", \"min\", \"max\"]\n str_format = ' '.join([\"{:<6}\"]+[\"{:10.6f}\"]*(len(data)))\n for stat_fun, stat_label in zip(stat_funs, stat_labels):\n print(str_format.format(\n *([stat_label]+[stat_fun(dat[1]) for dat in data])))", "def collect_statistics(self, stat_col, data_streams):\n # Calculate all four statistics.\n _, precisions, recalls, f1scores, supports = self.calculate_statistics(data_streams)\n\n # Calculate weighted averages.\n precision_sum = sum([pi*si for (pi,si) in zip(precisions,supports)])\n recall_sum = sum([ri*si for (ri,si) in zip(recalls,supports)])\n f1score_sum = sum([fi*si for (fi,si) in zip(f1scores,supports)])\n support_sum = sum(supports)\n\n if support_sum > 0:\n precision_avg = precision_sum / support_sum \n recall_avg = recall_sum / support_sum\n f1score_avg = f1score_sum / support_sum\n else:\n precision_avg = 0\n recall_avg = 0\n f1score_avg = 0\n\n # Export averages to statistics.\n stat_col[self.key_precision] = precision_avg\n stat_col[self.key_recall] = recall_avg\n stat_col[self.key_f1score] = f1score_avg\n\n # Export support to statistics.\n stat_col[self.key_f1score+'_support'] = support_sum", "def summarize_diagnostics(self):\n # plot loss\n pyplot.subplot(211)\n pyplot.title('Cross Entropy Loss')\n pyplot.plot(self.history.history['loss'], color='blue', label='train')\n pyplot.plot(self.history.history['val_loss'], color='orange', label='test')\n # plot accuracy\n pyplot.subplot(212)\n pyplot.title('Classification Accuracy')\n pyplot.plot(self.history.history['accuracy'], color='blue', label='train')\n pyplot.plot(self.history.history['val_accuracy'], color='orange', label='test')\n # save plot to file\n pyplot.savefig(f'{self.project_home / \"o\"}/{self.model.name}_plot.png')\n pyplot.close()", "def output_metrics(self):\n print('')\n for key in sorted(self.metrics):\n print('{}:'.format(key), end='')\n for k, v in self.metrics[key].items():\n if type(v[-1]) is list:\n print('\\t' + k + ': ' + ''.join('{:5.3f} '.format(vs) for vs in v[-1]), end='')\n else:\n print('\\t{}: {:5.3f}'.format(k, v[-1]), end='')\n print('\\n', end='')", "def print_summary_metrics(lst):\n print('*' * 50)\n print(' ' * 16 + 'Summary statistics')\n print('*' * 50)\n print('mean: {} | median: {} | mode: {}'.format(get_mean(lst),\n get_median(lst),\n get_mode(lst)))\n print('range: {} | IQR: {}'.format(get_range(list_nums),\n get_IQR(list_nums)))\n print('\\n')\n print('original list: \\n {}'.format(lst))\n print('sorted list: \\n {}'.format(sorted(lst)))\n print('List without outliers: \\n {}'.format(\n remove_outliers(list_nums)))", "def summary_statistics(y_true, y_pred):\r\n\r\n assert y_true.ndim == 1 and y_pred.ndim <= 2\r\n\r\n # This function cannot be used for multiclass classification.\r\n # if (y_pred.ndim == 2 and y_pred.shape[-1] > 2):\r\n # raise NotImplementedError(\"summary_statistics does not support multiclass \" \\\r\n # + \"classification.\")\r\n\r\n # Greedy classification, handles one-dimensional and two-dimensional y_preds\r\n y_greedy = y_pred.argmax(-1) if y_pred.ndim > 1 else y_pred.round()\r\n\r\n # Calculate the simple accuracy.\r\n acc = torch.sum(y_greedy == y_true) / len(y_true)\r\n\r\n # Calculate the MCC.\r\n with warnings.catch_warnings(record=True) as w:\r\n mcc = matthews_corrcoef(y_true, y_greedy)\r\n if w: print('Warning raised with MCC calculation. This can likely be ignored.')\r\n\r\n # Calculate the AUC with the predicted probabilities.\r\n auc = roc_auc_score(y_true, y_pred if y_pred.ndim > 1 else y_pred.max(1)[0], multi_class='ovr')\r\n\r\n return acc, mcc, auc", "def summarizeData(data, dataLabel=None, decimals=4):\n if dataLabel is not None:\n print ('%s: Data Set Summary (median, IQR)' % dataLabel)\n n = max([len(l) for l in data.keys()])\n for i, k in enumerate(data.keys()):\n g1 = data[k]\n iqr1 = np.subtract(*np.percentile(g1, [75, 25]))\n print(u' {:s}: {:8.{pc}f}, {:.{pc}f} (median, IQR)'.\n format(k.rjust(n), np.median(g1), iqr1, pc=decimals))", "def feature_stats(df, verbose=False):\n df[df != 0] = 1\n feat_df = df.sum(axis=0)\n df_mean, df_med, df_min, df_max = \\\n feat_df.mean(), feat_df.median(), feat_df.min(), feat_df.max()\n if verbose:\n print \"##########Feature Statistics##########\"\n print \"Mean: \", df_mean\n print \"Median: \", df_med\n print \"Min: \", df_min\n print \"Max: \", df_max\n print \"######################################\"\n return df_mean, df_med, df_min, df_max", "def print_classification_info(clf, x, y):\n x_tr, x_ts, y_tr, y_ts = train_test_split(x, y, train_size=0.8, test_size=0.2)\n clf.fit(x_tr, y_tr)\n p = clf.predict(x_ts)\n print(classification_report(y_ts, p))\n print(confusion_matrix(y_ts, p))", "def get_evaluation_statistics(ind2label, label=0):\n print(\"evaluation statistics for label\", label, ind2label[label])\n tp, fp, fn = get_TP_FP_FN(cm, label)\n print(\"True positives\", tp, \" , False positives\", fp, \" , False negatives\", fn)\n precision = get_precision(cm, label)\n print(\"Precision\", precision)\n recall = get_recall(cm, label)\n print(\"Recall\", recall)\n f1 = get_F1_score(cm, label)\n print(\"F1 score\", f1, end='\\n\\n')", "def create_plots_for_outliers(feature_1, feature_2):\n data = featureFormat(data_dict, [feature_1, feature_2, 'poi'])\n for point in data:\n x = point[0]\n y = point[1]\n poi = point[2]\n if poi:\n color = 'red'\n else:\n color = 'blue'\n plt.scatter(x, y, color=color)\n plt.xlabel(feature_1)\n plt.ylabel(feature_2)\n plt.show()", "def print_stats(dataset, top=5):\n sum = numpy.sum(list(dataset.values()))\n i = 0\n if sum:\n sorted_keys = sorted(dataset, key=dataset.get, reverse=True)\n max_len_key = max([len(x) for x in sorted_keys][:top]) # use to adjust column width\n for k in sorted_keys:\n try:\n cprint((\"- \\033[1m{:<%d}\\033[0m {:>6} {:<4}\" % max_len_key)\n .format(k, dataset[k], \"(%d%%)\" % ((float(dataset[k]) / sum) * 100)))\n except:\n import ipdb\n ipdb.set_trace()\n i += 1\n if i >= top:\n break\n else:\n cprint(\"No data\")\n cprint(\"\")", "def boxplot_2_features(df, x, y, ylim_i = 0, set_y_limit = False, order_boxplot = False, print_value = False, num_label = 1, save_plot = False, path_dir = None):\n \n value_counts_temp = df[x].value_counts()\n sns.set(font_scale=2)\n f, ax = plt.subplots(figsize=(18, 7));\n if order_boxplot :\n plot =sns.boxplot(x=x, y=y, data=df, order = value_counts_temp.index)\n else:\n plot =sns.boxplot(x=x, y=y, data=df) \n ax.set_title('Boxplot of {} group by {}'.format(y, x));\n plt.xticks(rotation=90);\n if set_y_limit:\n ax.set_ylim(0, ylim_i);\n for ind, label in enumerate(plot.get_xticklabels()):\n if ind % num_label == 0: # every 15th label is kept\n label.set_visible(True)\n else:\n label.set_visible(False)\n if print_value :\n print(value_counts_temp)\n if save_plot == True:\n plt.savefig((plot_dir + \"boxplot\"+str(y)+\"per _\"+str(x)+\".png\"))\n plt.clf()", "def summarize(dataset):\n summaries = [(np.mean(attribute), np.std(attribute)) for attribute in zip(*dataset)]\n\n return summaries", "def print_score(y_actual, y_pred, measure):\n print(\"\\t\\tWeighted Average Scores Over Each Output Class\\n\")\n print(\"\\t\\tPrecision\\tRecall\\t\\tF1_Score\")\n for column_name, column in y_actual.iteritems():\n report = classification_report(y_actual[column_name], y_pred[column_name], output_dict=True )\n prec = report[measure]['precision']\n recall = report[measure]['recall']\n f1 = report[measure]['f1-score']\n print(\"%20.2f %15.2f % 15.2f\" % (prec, recall, f1) + \"\\t\\t\" + column_name )", "def show_metrics(y_true, y_pred, target_names):\n print(\"Hamming Loss: {}\".format(hamming_loss(y_true, y_pred)))\n print(\"Zero One Loss: {}\".format(zero_one_loss(y_true, y_pred)))\n print(\"Hamming Loss Non Zero: {}\\n\".format(hamming_loss_non_zero(y_true, np.array(y_pred))))\n print(classification_report(y_true, y_pred, target_names=target_names))", "def print_stats(category, a_column, limit_hi, limit_lo, num_outliers):\n print(\"\"\"\\nThe '{}' category:\n Count: {}\n Distinct: {}\n Min_value: {}\n Max_value: {}\n Median: {}\n Mean: {:.3f}\n St. dev.: {:.3f}\n Limit_Low: {:.3f}\n Limit_High: {:.3f}\n # outliers: {:.3f}\n \"\"\"\n .format(category,\n a_column.count(),\n len(a_column.unique()),\n np.min(a_column),\n np.max(a_column),\n np.median(a_column),\n np.mean(a_column),\n np.std(a_column),\n limit_lo,\n limit_hi,\n num_outliers\n )\n )", "def summarizeFitData(X, y, w=None, categories=None, showavevarminmax=True):\n \n print(\"X.shape=\", X.shape, \"y.shape=\", y.shape,end=\"\")\n if w is None:\n w=pd.Series(np.ones(y.shape))\n else:\n print(\"w.shape=\", w.shape,end=\"\")\n\n print()\n print(\"columns=\", X.columns)\n \n if categories is None:\n categories=y\n\n uniquecategories=sorted(categories.unique())\n print(\"categories=\",uniquecategories)\n print()\n \n print(\"sum of weights per category\")\n length=max([len(str(x)) for x in uniquecategories]+[10])\n print(('{:>'+str(length)+'}').format(\"all\"),('{:>'+str(length)+'}').format(w.sum()))\n for cat in uniquecategories:\n print(('{:>'+str(length)+'}').format(cat), ('{:>'+str(length)+'}').format(w[categories==cat].sum()))\n print(\"\\n\")\n\n if showavevarminmax:\n print(\"average\")\n variablelength=max([len(x) for x in X.columns]+[len(\"variable/class\")])\n print(('{:>'+str(variablelength)+'}').format(\"variable/class\"),end=\"\")\n print(('{:>'+str(length)+'}').format(\"all\"),end=\"\")\n for cat in uniquecategories:\n print(('{:>'+str(length)+'}').format(cat),end=\"\")\n print(\"\")\n \n for i,variable in enumerate(X.columns):\n print(('{:>'+str(variablelength)+'}').format(variable),end=\"\")\n print(('{:>'+str(length)+'.3}').format(np.average(X[variable], weights=w)),end=\"\")\n for cat in uniquecategories:\n print(('{:>'+str(length)+'.3}').format(np.average(X[variable][categories==cat], weights=w[categories==cat])),end=\"\")\n print()\n print(\"\\n\")\n \n print(\"variance\")\n print(('{:>'+str(variablelength)+'}').format(\"variable/class\"),end=\"\")\n print(('{:>'+str(length)+'}').format(\"all\"),end=\"\")\n for cat in uniquecategories:\n print(('{:>'+str(length)+'}').format(cat),end=\"\")\n print()\n \n for i,variable in enumerate(X.columns):\n print(('{:>'+str(variablelength)+'}').format(variable),end=\"\")\n print(('{:>'+str(length)+'.3}').format(variance(X[variable], weights=w)),end=\"\")\n for cat in uniquecategories:\n print(('{:>'+str(length)+'.3}').format(variance(X[variable][categories==cat], weights=w[categories==cat])),end=\"\")\n print()\n print(\"\\n\")\n\n print(\"min/max\")\n print(('{:>'+str(variablelength)+'}').format(\"variable/class\"),end=\"\")\n print(('{:>'+str(length)+'}').format(\"all/min\"),end=\"\")\n print(('{:>'+str(length)+'}').format(\"all/max\"),end=\"\")\n for cat in uniquecategories:\n print(('{:>'+str(length)+'}').format(str(cat)+\"/min\"),end=\"\")\n print(('{:>'+str(length)+'}').format(str(cat)+\"/max\"),end=\"\")\n print()\n \n for i,variable in enumerate(X.columns):\n print(('{:>'+str(variablelength)+'}').format(variable),end=\"\")\n print(('{:>'+str(length)+'.3}').format(float(np.min(X[variable]))),end=\"\")\n print(('{:>'+str(length)+'.3}').format(float(np.max(X[variable]))),end=\"\")\n for cat in uniquecategories:\n print(('{:>'+str(length)+'.3}').format(float(np.min(X[variable][categories==cat]))),end=\"\")\n print(('{:>'+str(length)+'.3}').format(float(np.max(X[variable][categories==cat]))),end=\"\")\n print()\n print(\"\\n\")", "def summarize_classes(classes):\n u, indices = np.unique(classes,return_inverse=True)\n num_u=len(u)\n print(\"****************************\")\n print(\"Number of samples: {0}\".format(len(classes)))\n print(\"Number of Classes:{0}\".format(num_u))\n for c in u:\n num_c=np.sum(classes==c)\n print(\"Class {0}: {1} Samples\".format(c,num_c))\n print(\"****************************\")", "def summaryone(x):\n print 'mean and std are ',np.mean(x), np.std(x)\n print 'max and min are ',np.max(x), np.min(x)\n print 'the range is ',np.max(x)-np.min(x)", "def show_learning_curve(self):\n\n # Loop output classes\n for c in range(1,self.n_output_classes):\n # Get data\n x_values = np.array(self.n_class_samples_list[c])\n accuracy = np.array(self.accuracy_list[c])\n precision = np.array(self.precision_list[c])\n recall = np.array(self.recall_list[c])\n F1 = np.array(self.F1_list[c])\n\n # Make plot\n with sns.axes_style(\"ticks\"):\n fig,ax = plt.subplots()\n plt.plot([np.min(x_values),np.max(x_values)],[0.5,0.5],\n color='#777777',linestyle='--')\n plt.plot([np.min(x_values),np.max(x_values)],[0.66,0.66],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.8,0.8],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.9,0.9],\n color='#777777',linestyle=':')\n\n plt.plot( x_values, accuracy, color='#000000',\n linewidth=1, label='Accuracy' )\n plt.plot( x_values, precision, color='#0000aa',\n linewidth=1, label='Precision' )\n plt.plot( x_values, recall, color='#00aa00',\n linewidth=1, label='Recall' )\n plt.plot( x_values, F1, color='#aa0000',\n linewidth=2, label='F1' )\n\n plt.yticks( [0, 0.5, 0.66, 0.8, 0.9, 1.0],\n ['0','0.5','0.66','0.8','0.9','1.0'], ha='right' )\n plt.xlim(np.max(x_values)*-0.02,np.max(x_values)*1.02)\n plt.ylim(-0.02,1.02)\n plt.xlabel('Number of training samples')\n plt.ylabel('Performance')\n plt.title('Learning curve, class {}'.format(c))\n sns.despine(ax=ax, offset=0, trim=True)\n lgnd = plt.legend(loc=4, ncol=1, frameon=True, fontsize=9)\n lgnd.get_frame().set_facecolor('#ffffff')\n ax.spines['left'].set_bounds(0,1)\n ax.spines['bottom'].set_bounds(np.min(x_values),np.max(x_values))", "def descriptive_stats(data_lastDV):\n col_names = data_lastDV.columns.values.tolist() # get the columns' names\n outcome = col_names.pop() # remove the last item in the list\n\n # Summary of Number of Helpers Selected\n print(FORMAT_LINE)\n print(\"Descriptive statistics for: \\'\" + outcome+\"\\'\")\n print(data_lastDV[outcome].describe())\n print(FORMAT_LINE)\n\n # Descriptive Statistics of conditions\n print(FORMAT_LINE)\n print(\"Descriptive statistics for: all conditions\")\n df_conditions = data_lastDV[col_names]\n print(df_conditions.describe())\n df_conditions = data_lastDV[col_names+[outcome]] # add numerical column back in for descriptive stats\n\n # Count/Descriptive Stats of individual conditions & mean num helps of each (2^5) conditions\n for cond in col_names:\n print(FORMAT_LINE)\n print(\"Counts & Mean \" + outcome + \" for: \\'\" + cond)\n print(pd.concat([df_conditions.groupby(cond)[cond].count(), df_conditions.groupby(cond)[outcome].mean()], axis=1))", "def get_report(dataset):\n\n dataset = dataset.round(2)\n print('Overall results (mean): ')\n display(dataset[['classifier', 'preprocessor', 'f1', 'precision', 'recall']].groupby(['preprocessor', 'classifier'])\n .mean().round(2))\n print('Overall results (max): ')\n display(dataset[['classifier', 'preprocessor', 'f1', 'precision', 'recall']].groupby(['preprocessor', 'classifier'])\n .max().round(2))\n print('Grouped by Preprocessor (mean):')\n display(dataset[['preprocessor', 'f1', 'precision', 'recall']].groupby('preprocessor').mean().round(2))\n print('Grouped by Classifier (mean):')\n display(dataset[['classifier', 'f1', 'precision', 'recall']].groupby('classifier').mean().round(2))\n\n preprocessors = dataset['preprocessor'].unique()\n metrics = ['f1', 'precision', 'recall']\n\n # For each metric, display top 10 rounds.\n for m in metrics:\n print(f'Top 10 by {m}:')\n display(dataset.sort_values(m, ascending=False).head(10).round(2))\n\n for p in preprocessors:\n for m in metrics:\n d = dataset[dataset['preprocessor'] == p]\n for c in dataset['classifier'].unique():\n plt.plot(d[d['classifier'] == c]['prior'].unique(), d[d['classifier'] == c].groupby('prior').mean()[m],\n label=str(c))\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)\n plt.title(m + ' - ' + str(p))\n plt.show()", "def show_feature_summary(df, colname, display_uniques=False):\n\tprint('Details of feature:',colname)\n\tprint(' - datatype:',df[colname].dtypes)\n\tprint(' - col.size:',df[colname].shape)\n\tprint(' - NaN.vals:',df[colname].isnull().sum())\n\tif (display_uniques): print(' - uniqvals:',get_unique_values(df, colname))\n\tif (display_uniques): print(' - cnt.vals:',get_unique_counts(df, colname))\n\tprint(\"\\n\")", "def plot_data():\n \n [X_train, X_dev, X_test, Y_train, Y_dev, Y_test, numOutputNodes] = load_data('regression') \n \n traindev = np.concatenate((Y_train, Y_dev), 1)\n traindevtest = np.concatenate((traindev, Y_test), 1)\n tdt = traindevtest.reshape(traindevtest.shape[1],)\n\n Y_train = Y_train.reshape(Y_train.shape[1],)\n Y_dev = Y_dev.reshape(Y_dev.shape[1],)\n Y_test = Y_test.reshape(Y_test.shape[1],)\n\n sigma = np.round(np.std(tdt), 3)\n mu = np.round(np.mean(tdt), 3)\n\n # plots histogram of all data together, indicating values of mean and standard deviation\n plt.figure(1)\n plt.hist(tdt)\n plt.title(\"{} data points, mu = {}, sigma = {}\".format(tdt.size, mu, sigma))\n plt.xlabel(\"average Fe coordination number\")\n plt.ylabel(\"frequency\")\n plt.show()\n\n # plots histogram where the training, cross-validation, and test sets have separate bars\n plt.figure(2)\n plt.hist([Y_train, Y_dev, Y_test], label = ['training', 'cross-validation', 'test'], density = True)\n plt.xlabel(\"average Fe coordination number\")\n plt.ylabel(\"frequency\")\n plt.legend()\n plt.show()\n\n # below is graphing for the charge data, as opposed to the averaged spectrum data\n [X_train1, X_dev1, X_test1, _, _, _, Y_train1, Y_dev1, Y_test1, numOutputNodes1] = load_data('multi_task')\n traindev1 = np.concatenate((Y_train1, Y_dev1), 1)\n traindevtest1 = np.concatenate((traindev1, Y_test1), 1)\n tdt1 = traindevtest1.reshape(traindevtest1.shape[1],)\n\n Y_train1 = Y_train1.reshape(Y_train1.shape[1],)\n Y_dev1 = Y_dev1.reshape(Y_dev1.shape[1],)\n Y_test1 = Y_test1.reshape(Y_test1.shape[1],)\n\n sigma = np.round(np.std(tdt1), 3)\n mu = np.round(np.mean(tdt1), 3)\n\n # plots histogram of all data together, indicating values of mean and standard deviation\n plt.figure(3)\n plt.hist(tdt1)\n plt.title(\"{} data points, mu = {}, sigma = {}\".format(tdt1.size, mu, sigma))\n plt.xlabel(\"charge\")\n plt.ylabel(\"frequency\")\n plt.show()\n\n # plots histogram where the training, cross-validation, and test sets have separate bars\n plt.figure(4)\n plt.hist([Y_train1, Y_dev1, Y_test1], label = ['training', 'cross-validation', 'test'], density = True)\n plt.xlabel(\"charge\")\n plt.ylabel(\"frequency\")\n plt.legend()\n plt.show()\n\n return None", "def print_stats(self):\n if self.df_avg is None:\n self.collect_stats()\n\n print(\"Simulation Results\")\n print(tabulate(self.df_avg, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"FleetManager stats\")\n print(tabulate(self.manager_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"Customer stats\")\n print(tabulate(self.customer_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"Transport stats\")\n print(tabulate(self.transport_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"Station stats\")\n print(tabulate(self.station_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))", "def pert_stats(df, verbose=False):\n df[df != 0] = 1\n df['num_features'] = df.sum(axis=1)\n df_mean, df_med, df_min, df_max =\\\n df['num_features'].mean(), df['num_features'].median(), df['num_features'].min(), df['num_features'].max()\n if verbose:\n print \"##########Pert Statistics#############\"\n print \"Mean: \", df_mean\n print \"Median: \", df_med\n print \"Min: \", df_min\n print \"Max: \", df_max\n print \"######################################\"\n return df_mean, df_med, df_min, df_max", "def simplePlots() -> None:\r\n \r\n # Univariate data -------------------------\r\n \r\n # Make sure that always the same random numbers are generated\r\n np.random.seed(1234)\r\n \r\n # Generate data that are normally distributed\r\n x = np.random.randn(500)\r\n \r\n # Other graphics settings\r\n # Set \" context='poster' \" for printouts, and \"set_fonts(32)\"\r\n sns.set(context='notebook', style='ticks', palette='muted')\r\n \r\n # Set the fonts the way I like them\r\n set_fonts(16)\r\n \r\n # Scatter plot\r\n plt.plot(x, '.', markersize=7)\r\n plt.xlim([0, len(x)])\r\n \r\n # Save and show the data, in a systematic format\r\n printout('scatterPlot.jpg', xlabel='Datapoints', ylabel='Values', title='Scatter')\r\n \r\n # Histogram\r\n plt.hist(x)\r\n printout('histogram_plain.jpg', xlabel='Data Values',\r\n ylabel='Frequency', title='Histogram, default settings')\r\n \r\n plt.hist(x, 25, density=True)\r\n printout('density_histogram.jpg', xlabel='Data Values', ylabel='Probability',\r\n title='Density Histogram, 25 bins')\r\n \r\n # Boxplot\r\n # The ox consists of the first, second (middle) and third quartile\r\n set_fonts(18)\r\n plt.boxplot(x, sym='*')\r\n printout('boxplot.jpg', xlabel='Values', title='Boxplot')\r\n \r\n plt.boxplot(x, sym='*', vert=False)\r\n plt.title('Boxplot, horizontal')\r\n plt.xlabel('Values')\r\n plt.show()\r\n \r\n # Errorbars\r\n x = np.arange(5)\r\n y = x**2\r\n errorBar = x/2\r\n plt.errorbar(x,y, yerr=errorBar, fmt='o', capsize=5, capthick=3)\r\n plt.xlim([-0.2, 4.2])\r\n plt.ylim([-0.2, 19])\r\n printout('Errorbars.jpg', xlabel='Data Values', ylabel='Measurements', title='Errorbars')\r\n\r\n # SD for two groups\r\n weight = {'USA':89, 'Austria':74}\r\n weight_SD_male = 12\r\n plt.errorbar([1,2], weight.values(), yerr=weight_SD_male * np.r_[1,1],\r\n capsize=5, LineStyle='', marker='o')\r\n plt.xlim([0.5, 2.5])\r\n plt.xticks([1,2], weight.keys())\r\n plt.ylabel('Weight [kg]')\r\n plt.title('Adult male, mean +/- SD')\r\n\r\n show_data('SD_groups.jpg', out_dir='.')\r\n \r\n # Barplot\r\n # The font-size is set such that the legend does not overlap with the data\r\n np.random.seed(1234)\r\n set_fonts(16)\r\n \r\n df = pd.DataFrame(np.random.rand(7, 3), columns=['one', 'two', 'three'])\r\n df.plot(kind='bar', grid=False, color=sns.color_palette('muted'))\r\n \r\n show_data('barplot.jpg')\r\n\r\n # Bivariate Plots\r\n df2 = pd.DataFrame(np.random.rand(50, 3), columns=['a', 'b', 'c'])\r\n df2.plot(kind='scatter', x='a', y='b', s=df2['c']*500);\r\n plt.axhline(0, ls='--', color='#999999')\r\n plt.axvline(0, ls='--', color='#999999')\r\n printout('bivariate.jpg')\r\n \r\n sns.set_style('ticks')\r\n\r\n # Pieplot\r\n txtLabels = 'Cats', 'Dogs', 'Frogs', 'Others'\r\n fractions = [45, 30, 15, 10]\r\n offsets =(0, 0.05, 0, 0)\r\n \r\n plt.pie(fractions, explode=offsets, labels=txtLabels,\r\n autopct='%1.1f%%', shadow=True, startangle=90,\r\n colors=sns.color_palette('muted') )\r\n plt.axis('equal')\r\n printout('piePlot.jpg', title=' ')", "def visualise_2d_data(self):\n self.__generate_output_data()\n if len(self.output_data[0]) != 2: # The output dimensions must be 2\n return\n f = Plot.figure()\n f.hold()\n plt.title('2D data')\n for c in sorted(set(self.class_indices)):\n class_mask = mat(self.class_indices).T.A.ravel() == c\n plt.plot(array(self.output_data)[class_mask, 0], array(self.output_data)[class_mask, 1], 'o')\n plt.legend(self.legend)\n plt.show()\n plt.savefig(self.path + '/2dplotlow.png', dpi=200)", "def print_stats(arr: Union[np.ndarray, List]):\n if len(arr) == 0:\n print(\"Error: `arr` has length 0.\")\n sys.exit(0)\n if not isinstance(arr, np.ndarray):\n arr = np.asarray(arr)\n\n mean = arr.mean()\n std = arr.std()\n median = np.median(arr)\n low = arr.min()\n high = arr.max()\n\n print(f\"Mean:\\t{mean}\")\n print(f\"Std:\\t{std}\")\n print(f\"Median:\\t{median}\")\n print(f\"Min:\\t{low}\")\n print(f\"Max:\\t{high}\")\n print(f\"N:\\t{len(arr)}\")", "def visualizeData(df):\n for column in df:\n df[column].value_counts().plot(kind = 'bar', rot = 'vertical', use_index = False)", "def write_stats(self, filestream):\n if not self.summary:\n self.summarize()\n\n print(self.scores, file=filestream)", "def visualize_data(data):\n\n # Instantiate a PCA object for the sake of easy visualisation\n pca = PCA(n_components=2)\n\n # Fit and transform x to visualise inside a 2D feature space\n x_vis = pca.fit_transform(data[data.columns[:-1]])\n y = data['Tumor'].as_matrix()\n\n # Plot the original data\n # Plot the two classes\n palette = sns.color_palette()\n\n plt.scatter(x_vis[y == 0, 0], x_vis[y == 0, 1], label=\"Normal\", alpha=0.5,\n edgecolor=ALMOST_BLACK, facecolor=palette[0], linewidth=0.15)\n plt.scatter(x_vis[y == 1, 0], x_vis[y == 1, 1], label=\"Tumor\", alpha=0.5,\n edgecolor=ALMOST_BLACK, facecolor=palette[2], linewidth=0.15)\n\n plt.legend()\n plt.show()", "def plot_dataset(features, labels, nb_classes: int) -> None:\n sns.scatterplot(x=features[:, 0], y=features[:, 1], hue=labels, markers=True)\n plt.title(f'Data from {nb_classes} classes')\n save_plot('mock_dataset')", "def classification_report(self, y_train, y_test):\n features = y_train.unique()\n testfeatures= y_test.unique()\n result = np.unique(np.concatenate((features,testfeatures),0))\n a = np.array(result.tolist())\n print (a)\n list=[]\n for i in a:\n st=str(i)\n list.append(st)\n #result = np.array2string(result, precision=2)\n\n print(classification_report(y_test, self.__dataset, target_names=list))", "def classify_report(scores, columns):\n if isinstance(columns, str):\n columns = [\"{}_0\".format(columns), \"{}_1\".format(columns)]\n\n print(\"{: <15}{: <10}{: <10}{: <10}{}\".format(\n \"Role\", \"Precision\", \"Recall\", \"F1\", \"Support\"), flush=True)\n for role in columns:\n p = scores[\"precision_{}\".format(role)]\n r = scores[\"recall_{}\".format(role)]\n f1 = scores[\"f1_{}\".format(role)]\n s = scores[\"support_{}\".format(role)]\n print(\"{: <15}{:.2f}{:10.2f}{:10.2f}{:10}\"\n .format(role, p, r, f1, s), flush=True)\n\n p, r, f1 = scores[\"precision\"], scores[\"recall\"], scores[\"f1\"]\n print(\"\\n{: <15}{:.2f}{:10.2f}{:10.2f}\".format(\"Total:\", p, r, f1), flush=True)\n\n print(\"AUC: {:.2f}\".format(scores[\"auc\"]), flush=True)\n print(\"Jaccard: {:.2f}\".format(scores[\"jaccard\"]), flush=True)\n print(\"Hamming Loss: {:.2f}\".format(scores[\"hamming_loss\"]), flush=True)", "def print_statistics(self) -> None:\n e = self.current_epoch\n if len(self.loss_history[\"test_loss\"]) > 0:\n template = 'Epoch: {} Training loss: {:.4f}, Test loss: {:.4f}'\n print(template.format(e, self.loss_history[\"training_loss\"][-1],\n self.loss_history[\"test_loss\"][-1]))\n else:\n template = 'Epoch: {} Training loss: {:.4f}'\n print(template.format(e, self.loss_history[\"training_loss\"][-1]))", "def multiclass_classification_summary(metrics, labels, n_obs):\n # Zip values with labels and index by fold\n data = {\n f\"fold{i}\": {k: dict(zip(labels, v)) for k, v in m.items()}\n for i, m in enumerate(metrics)\n }\n\n # Reformat nested dict in a format understood by pandas as a multi-index.\n reform = {\n fold_key: {\n (metrics_key, level): val\n for metrics_key, metrics_vals in fold_val.items()\n for level, val in metrics_vals.items()\n }\n for fold_key, fold_val in data.items()\n }\n # Then transpose to convert multi-index dataframe into hierarchical one\n # (multi-index on the columns).\n df = pd.DataFrame(reform).T\n\n # Append rows for average and stdev of every column\n df.loc[\"average\"] = df.mean()\n df.loc[\"stdev\"] = df.std()\n\n # Hierarchical dataframe to nested dict\n summary = {level: df.xs(level, axis=1).to_dict() for level in df.columns.levels[0]}\n\n summary[\"n_obs\"] = {f\"fold{i}\": int(n) for i, n in enumerate(n_obs)}\n return summary", "def print_info(df):\n\n # Data statistics\n # Number of total samples\n print('There are {n_samples} samples in total.'.format(n_samples=len(list(df.index.get_level_values(0).unique()))))\n\n # Count the different types of labels\n unique = df['label'].unique()\n count = []\n\n for label in unique:\n count.append(len(df.index.get_level_values(0)[df['label'] == label].unique()))\n\n count_dict = {unique[i]: count[i] for i in range(len(unique))}\n count_dict_percentage = {\n unique[i]: np.round(count[i] / len(list(df.index.get_level_values(0).unique())), decimals=2)\n for i in range(len(unique))}\n\n print('The types and counts of different labels : \\n {count_dict}'.format(count_dict=count_dict))\n print('The types and counts of different labels as percentage of the total data'\n ' : \\n {count_dict}'.format(count_dict=count_dict_percentage))", "def show_current_scattering_statistics(self, out=sys.stdout):\n print(\"\", file=out)\n print(\"Model and map statistics:\", file=out)\n print(\" mean mFo map height @ carbon: %s\" % format_value(\"%.2f\",\n flex.max(self.carbon_fo_values)), file=out)\n if (self.calpha_mean_two_fofc > 0):\n print(\" mean 2mFo-DFc map height @ C-alpha: %s\" % format_value(\n \"%.2f\", self.calpha_mean_two_fofc), file=out)\n print(\" mean B-factor: %s\" % format_value(\"%.2f\", self.b_mean_all), file=out)\n if (self.b_mean_calpha > 0):\n print(\" mean C-alpha B-factor: %s\" % format_value(\"%.2f\",\n self.b_mean_calpha), file=out)\n print(\" mean water B-factor: %s\" % format_value(\"%.2f\",\n self.b_mean_hoh), file=out)\n n_water_fofc_peaks = 0\n n_water_anom_peaks = 0\n water_sel = self.water_selection()\n print(\" %d water molecules\" % len(water_sel), file=out)\n for i_seq in water_sel :\n map_stats = self.map_stats(i_seq)\n if (map_stats.fofc >= 3.0):\n n_water_fofc_peaks += 1\n if (map_stats.anom is not None) and (map_stats.anom >= 3.0):\n n_water_anom_peaks += 1\n print(\" %d waters have mFo-DFc map >= 3.0 sigma\" % \\\n n_water_fofc_peaks, file=out)\n if (self.anomalous_flag):\n print(\" %d waters have anomalous map >= 3.0 sigma\" % \\\n n_water_anom_peaks, file=out)\n print(\"\", file=out)", "def ploter(self):\n if len(self.dataset[self.first_title]) != 2:\n print('plot is only avilable for two features')\n return\n x_axis = []\n y_axis = []\n for title in self.dataset:\n x_axis.append(self.dataset[title][0])\n y_axis.append(self.dataset[title][1])\n plt.plot(x_axis, y_axis, 'o')\n plt.show()", "def summarize_features(data, include_nan = True, print_vals = True):\n if(include_nan):\n # calculate the different measures including nan values\n means = np.mean(data, axis=0)\n medians = np.median(data, axis=0)\n stds = np.std(data, axis=0)\n mins = np.min(data, axis=0)\n maxs = np.max(data, axis=0)\n else:\n # calculate the different measures discarding nan values\n means = np.nanmean(data, axis=0)\n medians = np.nanmedian(data, axis=0)\n stds = np.nanstd(data, axis=0)\n mins = np.nanmin(data, axis=0)\n maxs = np.nanmax(data, axis=0)\n \n if(print_vals):\n # print the values obtained\n print()\n if(include_nan):\n print(\"summary variables, where nan values are not ignored:\")\n else:\n print(\"summary variables, where nan values are ignored:\")\n for idx, mean in enumerate(means):\n print(\"feature {idx}: mean={m:.3f} std={s:.3f} median={me:.3f} min={mi:.3f} max={ma:.3f}.\".format(\n idx=idx, m=mean, s=stds[idx], me=medians[idx], mi=mins[idx], ma=maxs[idx]))\n print()\n return means, stds, medians, mins, maxs", "def plot_features(data: np.array)->None:\n n_rows = np.size(data, 0)\n n_cols = np.size(data, 1)\n for i in range(n_cols):\n plt.hist(data[:,i])\n plt.show()", "def show(self):\n print \"Name: \"+str(self.name)\n ss = self.y.shape[0]\n for i in xrange(ss):\n print \"Actual: \"+str(self.y[i])\n print \"Prediction: \"+str(self.a[i])\n print \"\"\n print \"\\n\"", "def hist_of_numeric(X):\n figsize(10,3)\n for col in get_numeric(X):\n print(col)\n X[col].hist(bins=50)\n show()", "def plot_std_mean_of_each_neuron(self, title:str, show:bool=True, dest_path:str=None):\n d = Data(self.populations, self.path)\n d.split_trial_wise()\n X, x, Y, y = d.get_data()\n X = np.concatenate((X, x))\n Y = np.concatenate((Y, y))\n\n d = {}\n for i in range(len(Y)):\n if Y[i] in d:\n d[Y[i]].append(X[i])\n else:\n d[Y[i]] = [X[i]]\n\n for key in d.keys():\n d[key] = np.asarray(d[key], dtype=float)\n d[key] = np.std(d[key], axis=0)[::-1]\n\n c = {\"1->1\": \"magenta\", \"0->0\": \"cyan\", \"1->0\":\"red\", \"0->1\": \"green\"}\n fig, ax = plt.subplots()\n for key in d.keys():\n ax.plot(range(1, len(X[0]) + 1), d[key], color=c[key])\n \n plt.rcParams.update({'font.size': 30})\n plt.xlabel('{} most active Neurons'.format(len(X[0])))\n plt.ylabel(\"Neuron-wise standard-deviation per class\")\n plt.title(title)\n plt.legend()\n\n if show:\n plt.show()\n\n if dest_path !=None:\n plt.savefig(dest_path + '\\\\{}.png'.format(title))\n\n plt.clf()\n plt.cla()\n plt.close()", "def print_stat(self, returnTable=False):\n summary = PrettyTable([\"Set\", \"Name\", \"Number [-]\", \"Fraction [%]\"])\n summary.align = 'l'\n for name, df in self.subsets.items():\n summary.add_row([name, 'Normal', df[df.abnormal_XR == 0].shape[0], '{:.2%}'.format(df[df.abnormal_XR == 0].shape[0] / df.shape[0])])\n summary.add_row([name, 'Abnormal', df[df.abnormal_XR == 1].shape[0], '{:.2%}'.format(df[df.abnormal_XR == 1].shape[0] / df.shape[0])])\n summary.add_row([name, 'Normal known', df[df.semi_label == 1].shape[0], '{:.2%}'.format(df[df.semi_label == 1].shape[0] / df.shape[0])])\n summary.add_row([name, 'Abnormal known', df[df.semi_label == -1].shape[0], '{:.2%}'.format(df[df.semi_label == -1].shape[0] / df.shape[0])])\n summary.add_row([name, 'Unknown', df[df.semi_label == 0].shape[0], '{:.2%}'.format(df[df.semi_label == 0].shape[0] / df.shape[0])])\n if name != 'test' : summary.add_row(['----']*4)\n if returnTable:\n return summary\n else:\n print(summary)", "def train_and_plot_prediction_metrics(X_train, y_train, X_test, y_test, pipelines):\n\n scores = pd.DataFrame(columns=[\"Model\", \"MAE\", \"MSE\", \"R2\"])\n\n for modelname, pipeline in pipelines.items():\n pipeline.fit(X_train, y_train)\n y_pred = pipeline.predict(X_test)\n mae = mean_absolute_error(y_test, y_pred)\n mse = mean_squared_error(y_test, y_pred)\n r2 = r2_score(y_test, y_pred)\n scores = scores.append(\n {\"Model\": modelname, \"MAE\": mae, \"MSE\": mse, \"R2\": r2}, ignore_index=True\n )\n\n for metric in [\"MAE\", \"MSE\", \"R2\"]:\n ax = sns.barplot(x=\"Model\", y=metric, data=scores)\n ax.set_ylim(bottom=0)\n plt.title(\"Test data: \" + metric)\n plt.show()", "def display_comparison(self, X_val, y_val):\n import matplotlib.pyplot as plt\n x = []\n y = []\n for model_tuple in self.model_list:\n x.append(model_tuple[1])\n y.append(model_tuple[0].score(X_val, y_val))\n plt.scatter(x, y)\n plt.show()", "def summarize_model(clf_, X_tr, X_te, y_tr, y_te, tree=False):\n \n import sklearn.metrics as metrics\n import matplotlib.pyplot as plt\n import pandas as pd\n \n y_hat_tr, y_hat_te = fit_n_pred(clf_, X_tr, X_te, y_tr)\n print('Classification Report:')\n print(metrics.classification_report(y_te, y_hat_te))\n \n if tree:\n fig, ax = plt.subplots(figsize=(10,5), nrows=2)\n\n metrics.plot_confusion_matrix(clf_,X_te,y_te,cmap=\"YlOrRd\", normalize='true',\n ax=ax[0])\n ax[0].set(title='Confusion Matrix')\n ax[0].grid(False)\n\n plot_importance(clf_, X_tr, ax=ax[1])\n plt.tight_layout()\n \n else:\n clf_coef = pd.Series(clf_.coef_[0], index=X_tr.columns, name='Normal')\n abs_coef = pd.Series(abs(clf_.coef_[0]), index=X_tr.columns, name='Absolute')\n posi_coef = pd.Series((clf_coef > 0), name='Positive')\n coef_all = pd.concat([clf_coef, abs_coef, posi_coef], axis=1)\n coef_all.sort_values('Absolute', ascending=True, inplace=True)\n coef_all.tail(20)['Normal'].plot(kind='barh', color=coef_all['Positive'].map({True:'b',False:'r'})\n\n metrics.plot_confusion_matrix(clf_,X_te,y_te,cmap=\"YlOrRd\", normalize='true')\n plt.title('Confusion Matrix')\n plt.grid(False)\n plt.tight_layout()\n\ndef grid_searcher(clf_, params, X_tr, X_te, y_tr, y_te, cv=None, keep_t=False, train_score=True):\n \n \"\"\"Takes any classifier, train/test data for X/y, and dict of parameters to\n iterate over. Optional parameters select for cross-validation tuning, keeping\n time for running the gridsearch, and returning training scores when done.\n Default parameters only return the fitted grid search object. MUST HAVE Timer\n class imported.\"\"\"\n \n from sklearn.model_selection import GridSearchCV\n import numpy as np\n \n ## Instantiate obj. with our targets\n grid_s = GridSearchCV(clf_, params, cv=cv, return_train_score=train_score)\n \n ## Time and fit run the 'search'\n time = Timer()\n time.start()\n grid_s.fit(X_tr, y_tr)\n time.stop()\n \n ## Display results\n tr_score = np.mean(grid_s.cv_results_['mean_train_score'])\n te_score = grid_s.score(X_te, y_te)\n print(f'Mean Training Score: {tr_score :.2%}')\n print(f'Mean Test Score: {te_score :.2%}')\n print('Best Parameters:')\n print(grid_s.best_params_)\n \n ## Time keeping and grid obj\n if keep_t:\n lap = time.record().total_seconds()\n print('**********All done!**********')\n return grid_s, lap\n else:\n return grid_s", "def basic_statistics():\n print(train_data['revenue'].describe())\n plt.hist(train_data['revenue'], color = 'blue', edgecolor = 'black',\n bins = int(4))\n\n # Add labels\n plt.title('Histogram of Revenues')\n plt.xlabel('revenues')\n plt.ylabel('P(revenues)')\n plt.show()", "def plot_scatter_n_accuracy_joint(self, data_objects, labels, label_self, markers):\n dataframes = [self.df] + [data.df for data in data_objects]\n labels = [label_self] + labels\n\n acc = []\n n = []\n statistics = []\n for df, label in zip(dataframes, labels):\n acc = df.groupby('worker')['correct'].mean()\n n = df.groupby('worker')['question'].count()\n df_new = pd.concat([acc, n], axis=1)\n df_new['dataset'] = label\n statistics.append(df_new)\n\n df = pd.concat(statistics, axis=0)\n sns.lmplot('question', 'correct', data=df, hue='dataset',\n markers=markers, fit_reg=False)\n plt.xlabel('Number of questions answered')\n plt.ylabel('Accuracy')\n plt.xlim((0, None))\n plt.ylim((0, 1))\n plt.title('')\n return plt.gca()", "def _basic_data_info(X, y):\n num_samples, num_feats = X.shape # start with X properties\n\n # Compute distribution\n classes, counts, percs = _class_distribution(y)\n num_classes = classes.size\n\n # Return data info dictionary\n output_dic = {\n \"Num_samples\": num_samples,\n \"Num_feats\": num_feats,\n \"Num_classes\": num_classes,\n \"classes\": classes,\n \"counts\": counts,\n \"percs\": percs\n }\n\n return output_dic", "def print_summary(metrics_list, labels_list):\n for metric, name in zip(metrics_list, labels_list):\n print('*' * 108)\n print(name)\n mean_inc_acc = []\n for i in range(metric.shape[0]):\n print('\\t', end='')\n for j in range(metric.shape[1]):\n print('{:5.2f}% '.format(100 * metric[i, j]), end='')\n if np.trace(metric) == 0.0:\n if i > 0:\n avg = 100 * metric[i, :i].mean()\n mean_inc_acc += [avg]\n print('\\tAvg.:{:5.2f}% '.format(avg), end='')\n else:\n avg = 100 * metric[i, :i + 1].mean()\n mean_inc_acc += [avg]\n print('\\tAvg.:{:5.2f}% '.format(avg), end='')\n print()\n print()\n\n # Computing AIA across all incremental states (thus excluding the first non-incremental state)\n print('\\tMean Incremental Acc.: {:5.2f}%'.format(np.mean(mean_inc_acc[1:])))\n print('*' * 108)", "def prettySummary(self):\n #import pandas as pd\n temp = self.framed.describe().toPandas()\n temp.iloc[1:3,1:] = temp.iloc[1:3,1:].convert_objects(convert_numeric=True)\n pd.options.display.float_format = '{:,.2f}'.format\n \n return temp", "def decision_plot(self, X, y):\n\n y = self._slice_target_index(y=y)\n\n for index in range(_n_targets(y)):\n if sklearn.utils.multiclass.type_of_target(y) == 'continuous-multioutput':\n self.fit(X, y.iloc[:, index].values.ravel(order='K'))\n else:\n self.fit(X, y)\n explainer, shap_values = self.explainer(X=X)\n shap.decision_plot(base_value=explainer.expected_value, shap_values=shap_values,\n feature_names=list(X.columns), show=self.show)", "def plot2d(data, labels, feature0, feature1):\n for i in range(0, 4000):\n if labels[i] == 0:\n female = pl.scatter(data[i, feature0], data[i, feature1], c='r', marker='o')\n elif labels[i] == 1:\n male = pl.scatter(data[i, feature0], data[i, feature1], c='b', marker='+')\n pl.legend([female, male], ['Female', 'Male'])\n pl.title('4000 Samples for Female and Male')\n pl.show()", "def descriptive_table(data, column_name, fig_size=(8, 8)):\n\n # Set up figure dimensions and sub components.\n sheet, axs = plt.subplots(4, 1, figsize=fig_size)\n\n # Heights ratio is based on the number of rows in each\n # table, this relates to the number of statistics each\n # sub table will show.\n gs = gridspec.GridSpec(4, 1, height_ratios=[2, 2, 5, 9])\n\n # Assign all subplots based on figure dimensions.\n ax0 = plt.subplot(gs[0])\n ax1 = plt.subplot(gs[1])\n ax2 = plt.subplot(gs[2])\n ax3 = plt.subplot(gs[3])\n\n title_color = '#9099A2' # Dark grey\n plt.suptitle(\n 'Descriptive Statistics',\n fontsize=16,\n color=title_color,\n x=0.25\n )\n\n table_top(data, column_name, ax0)\n table_central_tend(data, ax1)\n table_disperssion(data, ax2)\n table_distribution(data, ax3)\n\n # Adjust the spacing so the title fits correctly.\n sheet.subplots_adjust(hspace=0.2, top=0.95)", "def _print_summary(results):\n if not len(results) > 0:\n print 'No results to show in summary.'\n return\n\n table = {}\n for res in results:\n for k, v in res.iteritems():\n table.setdefault(k, []).append(v)\n print tabulate(table, headers='keys', tablefmt=\"simple\")", "def print_classification_report(y_train,y_test,y_train_preds,y_test_preds): \n try:\n for i in range(y_train.shape[-1]):\n test = (y_test.iloc[:,i].values, y_test_preds[:,i])\n train = (y_train.iloc[:,i].values, y_train_preds[:,i])\n print(f\"---------------{y_train.columns[i]}------train:-------------\")\n print(classification_report(*train))\n print(f\"----TEST---\")\n print(classification_report(*test))\n except Exception as e:\n try:\n print(f\"--------train:-------------\")\n print(classification_report(y_train, y_train_preds))\n print(f\"---------TEST--------------\")\n print(classification_report(y_test, y_test_preds))\n except Exception as e2:\n print('could not do report',e, e2)", "def draw_num_classes_graphs():\n values = [10, 50, 100, 250, 1000, 4000]\n for num_classes in values:\n print(\"Training model on {} most common classes.\".format(num_classes))\n model = create_pretrained_model(num_classes=num_classes)\n histories = train(model, num_classes, epochs=50)\n run_name = get_run_name(\"{}classes\".format(num_classes))\n save_learning_curves(histories, run_name)\n csv_path = os.path.join(\"plots/\", run_name, \"data.csv\")\n ut.write_csv_dict(histories,\n keys=['loss', 'acc', 'val_loss', 'val_acc'],\n filename=csv_path)", "def plot_class_distribution(data):\n classes = [r[0] for r in data]\n plt.hist(classes)\n plt.xlabel('Labels')\n plt.ylabel('Counts')\n plt.title('Histogram of class counts')\n plt.show()", "def print_summary(stim_table):\n print(\n '{:<20}{:>15}{:>15}\\n'.format('Colname', 'No. conditions', 'Mean N/cond')\n )\n for colname in stim_table.columns:\n conditions, occurrences = np.unique(\n np.nan_to_num(stim_table[colname]), return_counts = True\n )\n print(\n '{:<20}{:>15}{:>15.1f}'.format(\n colname, len(conditions), np.mean(occurrences)\n )\n )", "def printStats(m2, m3, actual, top):\n print(\"\\nThe actual categories for this page are: %s\" % \", \".join(sorted(actual)))\n print(\"\\nThe suggested categories for this page are: %s\" % \", \".join(sorted([v for v, count in top])))\n print(\"\\nBOOLEAN MEASURE = %s\" %(m2 != 0))\n print(\"FRACTIONAL MEASURE = %0.2f\" %(m2))\n print(\"HIERARCHICAL MEASURE = %0.2f\\n\" %(m3))\n print(\"*\" * 150)", "def using_testset(X_trainset, y_trainset, X_testset, y_testset):\n\n i = 0\n x_score = []\n y_score = []\n\n for i in range(1, 11):\n classifier = linear_model.SGDClassifier(max_iter=1000, tol=1e-3)\n classifier.fit(X_trainset, y_trainset)\n print(\"\\n\\n\\n\\n\\n\\nResults using test set: \\n\", classifier.score(X_testset, y_testset))\n y_predict = classifier.predict(X_testset)\n\n print(\"\\n Statistics and Confusion matrix obtained with pandas_ml: \\n\")\n cm = ConfusionMatrix(y_testset, y_predict)\n stats = cm.stats()\n\n file = open(\"linear_classification_9000_testset_\" + str(i) + \".txt\", \"w\")\n file.write(str(stats))\n file.close()\n\n # cm.print_stats()\n # print confusion matrix\n cm.plot(normalized=True)\n plt.show()", "def show_score(clf, X_test, y_test):\n y_pred = predict(clf, X_test)\n print metrics.classification_report(y_test.astype(np.int), y_pred)", "def plot_columns(dataframe, title):\n sns.boxplot(x=dataframe['category_id'], y=dataframe['price'])\n plt.title(title)\n plt.xlabel('Category ID')\n plt.ylabel('Price')\n plt.show()", "def add_boxplotlike_data(stats, y_bottom,y_mid,y_top, y_label,method_index,statistic=\"mean_SD\"):\n if statistic==\"median_IQR\":\n x1,x2,x3=tuple(np.quantile(stats, q=[.25, .50, .75]))\n elif statistic==\"mean_SD\":\n sd = np.std(stats)\n x2 = np.mean(stats)\n x1 = x2 - sd\n x3 = x2 + sd\n elif statistic==\"meanall_replicatesd\": # when joining different fitfuns\n\n x2=np.mean(np.array(stats))\n sds=[np.std(stats[i]) for i in range(len(stats))]\n sd=np.mean(sds)\n x1= x2 - sd\n x3 = x2 + sd\n # assumes fitfun is first dimension of stats\n\n else:\n raise Exception(\"statistic %s not known\"%(statistic))\n\n y_bottom[y_label][method_index].append(x1)\n y_mid[y_label][method_index].append(x2)\n y_top[y_label][method_index].append(x3)" ]
[ "0.66654617", "0.6197573", "0.59729", "0.58434284", "0.58352256", "0.58352256", "0.58282775", "0.5750776", "0.57281625", "0.57217586", "0.5707557", "0.5698241", "0.56464404", "0.56453407", "0.5628074", "0.5584864", "0.55809045", "0.55757785", "0.55118465", "0.55106294", "0.5470058", "0.5456213", "0.54472476", "0.5429421", "0.5394317", "0.53916156", "0.5390674", "0.5370067", "0.5369377", "0.5353142", "0.5345816", "0.53389853", "0.53209937", "0.53066456", "0.53051865", "0.5293627", "0.5288341", "0.5284701", "0.52824783", "0.5280031", "0.5271343", "0.5265511", "0.5244857", "0.523444", "0.52341294", "0.5228477", "0.5225729", "0.5221067", "0.5218498", "0.52108663", "0.52011913", "0.5192138", "0.5170419", "0.5169932", "0.5162582", "0.51481545", "0.5147072", "0.5145711", "0.5145324", "0.5140306", "0.51396847", "0.51289797", "0.5125844", "0.51105016", "0.51099306", "0.5103831", "0.51028883", "0.5097322", "0.509428", "0.5091868", "0.5088728", "0.50885093", "0.5088299", "0.5087506", "0.5081107", "0.5074866", "0.50711864", "0.506829", "0.506535", "0.5062582", "0.50572574", "0.5051436", "0.50488853", "0.5047484", "0.5044105", "0.5041912", "0.50291395", "0.50276", "0.50232536", "0.50223446", "0.5013219", "0.5012465", "0.50120515", "0.50111365", "0.50110143", "0.50094545", "0.5002988", "0.49939486", "0.49879888", "0.49844587" ]
0.5336147
32
Shows a simple scatterplot of X, colored by the classes in y. Technically, this shows the 1st three principal components of X if X has more than 3 dimensions. If X only has 2 dimensions, then just a 2dimensional scatterplot is returned. This will not produce a plot for 1 dimensional data.
def plot_data(X, y): x_dim = X.shape[1] # Ignore 1 dimensional data if x_dim == 1: print("plot_data not gonna bother with 1 dimensional data") return # For 2 dimensional data, just plot it if x_dim == 2: plt.scatter(X[:,0], X[:,1], c=y) plt.show() return # For at least 4 dimensions, do PCA if x_dim >= 4: pca = PCA(n_components=3) pca.fit(X) plot_x = pca.transform(X) else: plot_x = X # Assumes y is either 1 or 0 pos_idxs = np.where(y == 1)[0] neg_idxs = np.where(y == 0)[0] # Plot the now 3 dimensional data fig = plt.figure() ax = fig.add_subplot(111, projection='3d') Xs = plot_x[neg_idxs, :] ax.scatter(Xs[:,0], Xs[:,1], Xs[:,2], color='orange') Xs = plot_x[pos_idxs, :] ax.scatter(Xs[:,0], Xs[:,1], Xs[:,2], color='purple') # Label plot if x_dim >= 4: ax.set_title("PCA of Generated Data") ax.set_xlabel("1st Principal Component") ax.set_ylabel("2nd Principal Component") ax.set_zlabel("3rd Principal Component") else: ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_zticklabels([]) # Display! plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scatter_plot(self):\n\n X = self.reduce_dimension(n_components=2)\n\n plt.figure()\n plt.scatter(X[:,0], X[:,1])\n\n return plt", "def plot_dataset(X, classes):\n data = pd.DataFrame(X, columns=['x', 'y'])\n data['dataset'] = classes\n sns.lmplot('x', 'y', data=data, hue='dataset', fit_reg=False, size=10,\n palette=sns.color_palette(\"Set3\", 10),\n scatter_kws={\"s\": 75})", "def scatterPlot2DMiddle(data, title, classes):\n fig = plt.figure(figsize=(8, 8))\n colormap = np.array([\"g\", \"b\"])\n if classes is not None:\n plt.scatter(data[:, 0], data[:, 1], c=colormap[classes], s=0.2)\n else:\n plt.scatter(data[:, 0], data[:, 1], s=1)\n plt.title(title, fontsize=18)\n plt.show()", "def scatter_plot(x_train, y_train, x_test, y_test, class1, class2):\n train_c0 = x_train[y_train == 0, :]\n train_c1 = x_train[y_train == 1, :]\n test_c0 = x_test[y_test == 0, :]\n test_c1 = x_test[y_test == 1, :]\n fig, a = plt.subplots(1, 2)\n fig.set_size_inches(11, 5)\n a[0].scatter(train_c0[:, 0], train_c0[:, 1], color='green', label=class1)\n a[0].scatter(train_c1[:, 0], train_c1[:, 1], color='red', label=class2)\n a[0].legend()\n a[0].set_title('Train Set')\n a[1].scatter(test_c0[:, 0], test_c0[:, 1], color='green', label=class1)\n a[1].scatter(test_c1[:, 0], test_c1[:, 1], color='red', label=class2)\n a[1].legend()\n a[1].set_title('Test Set')\n plt.show()", "def visualise_data_set(x_arr, y_arr):\n # Instantiate a PCA object for the sake of easy visualisation\n pca = PCA(n_components=3)\n\n # Fit and transform x to visualise inside a 3D feature space\n x_visualisation = pca.fit_transform(x_arr)\n\n figure = plt.figure()\n axis = Axes3D(figure)\n\n axis.scatter(x_visualisation[y_arr == 0, 0], x_visualisation[y_arr == 0, 1], x_visualisation[y_arr == 0, 2],\n label=\"Class #0\",\n edgecolor=almost_black, facecolor=palette[0], linewidth=0.3, marker=\"o\")\n axis.scatter(x_visualisation[y_arr == 1, 0], x_visualisation[y_arr == 1, 1], x_visualisation[y_arr == 1, 2],\n label=\"Class #1\",\n edgecolor=almost_black, facecolor=palette[2], linewidth=0.3, marker=\"^\")\n axis.set_title(\"PCA to 3 components\")\n\n plt.show()", "def plot_data(x: np.ndarray, y: np.ndarray) -> None:\n\n _, ax = plt.subplots()\n scatter = ax.scatter(x[:, 0], x[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)\n legend1 = ax.legend(*scatter.legend_elements(),\n loc=\"lower right\", title=\"Classes\")\n ax.add_artist(legend1)\n plt.xlim((min(x[:, 0]) - 0.1, max(x[:, 0]) + 0.1))\n plt.ylim((min(x[:, 1]) - 0.1, max(x[:, 1]) + 0.1))", "def plot(self):\n y = self.projection\n mpl.scatter(y[:, 0], y[:, 1], c=self.data_class)\n mpl.show()", "def plot_iris_dataset(data, classes, feature_names, target_names, title = \"Grafica de las caracteristicas y sus clases\"):\n\n # Tomo las coordenadas de la matriz de datos, es decir, separo coordenadas\n # x e y de una matriz de datos que contiene pares de coordenadas\n data = np.array(data)\n x_values = data[:, 0]\n y_values = data[:, 1]\n\n # Colores que voy a utilizar para cada una de las clases\n colormap = ['orange', 'black', 'green']\n\n # Separacion de indices. Con esto, consigo la lista de los indices de la\n # clase i-esima, cada uno en un vector distinto. Esto lo necesitare para\n # colorear cada clase de un color y ponerle de label el nombre de la planta\n first_class_indexes = np.where(classes == 0)\n second_class_indexes = np.where(classes == 1)\n third_class_indexes = np.where(classes == 2)\n\n # Asi puedo referirme a la primera clase como splitted_indixes[0] en vez\n # de usar el nombre de la variable (para acceder a los indices en el siguiente\n # bucle)\n splitted_indixes = [first_class_indexes, second_class_indexes, third_class_indexes]\n\n # Tomo estos elementos para hacer graficas elaboradas\n fig, ax = plt.subplots()\n\n # Itero sobre las clases\n for index, target_name in enumerate(target_names):\n\n # Tomo las coordenadas de la clase index-esima\n current_x = x_values[splitted_indixes[index]]\n current_y = y_values[splitted_indixes[index]]\n\n # Muestro la clase index-esima, con su color y su etiqueta correspondiente\n ax.scatter(current_x, current_y, c=colormap[index], label=target_name)\n\n # Titulo para la grafica\n plt.title(title)\n\n # Tomo los titulos de las caracteristicas y los asigno al grafico\n # Tomo la idea de: https://scipy-lectures.org/packages/scikit-learn/auto_examples/plot_iris_scatter.html\n x_legend = feature_names[0]\n y_legend = feature_names[1]\n ax.legend()\n\n plt.show()\n wait_for_user_input()", "def lda_scatter(X,Y, dim3=True):\n # Fit data\n lda = LDA()\n lda.fit(X, Y)\n X_r2 = lda.transform(X) \n\n # 3-D plot\n if dim3:\n fig = pylab.figure()\n ax = Axes3D(fig)\n ax.scatter3D(X_r2[:,0],X_r2[:,1],X_r2[:,2], c=Y)\n \n #2-D plot\n else:\n plt.scatter(X_r2[:,0], X_r2[:,1], c= Y )", "def visualize_data(data):\n\n # Instantiate a PCA object for the sake of easy visualisation\n pca = PCA(n_components=2)\n\n # Fit and transform x to visualise inside a 2D feature space\n x_vis = pca.fit_transform(data[data.columns[:-1]])\n y = data['Tumor'].as_matrix()\n\n # Plot the original data\n # Plot the two classes\n palette = sns.color_palette()\n\n plt.scatter(x_vis[y == 0, 0], x_vis[y == 0, 1], label=\"Normal\", alpha=0.5,\n edgecolor=ALMOST_BLACK, facecolor=palette[0], linewidth=0.15)\n plt.scatter(x_vis[y == 1, 0], x_vis[y == 1, 1], label=\"Tumor\", alpha=0.5,\n edgecolor=ALMOST_BLACK, facecolor=palette[2], linewidth=0.15)\n\n plt.legend()\n plt.show()", "def scatter_plot(x, y):\n mpl_fig = plt.figure()\n plt.scatter(x, y)\n return get_div_from_data(mpl_fig)", "def scatterPlot2DBig(data, title, classes):\n fig = plt.figure(figsize=(15, 15))\n colormap = np.array([\"g\", \"b\"])\n\n if classes is not None:\n plt.scatter(data[:, 0], data[:, 1], c=colormap[classes])\n else:\n plt.scatter(data[:, 0], data[:, 1])\n plt.title(title, fontsize=18)\n plt.show()", "def plot_classification(X,\n y,\n y_true,\n y_pred,\n metrics=(\"acc\", \"sen\", \"spe\"),\n fig_size=(12, 5),\n fig_show=True,\n save_as=\"figure.pdf\",\n x_label=\"x\",\n y_label=\"y\",\n **plot_kwargs):\n\n # Convert the input data to pd.Series\n if not isinstance(X, pd.Series):\n X = pd.Series(X.reshape((len(X), )))\n if not isinstance(y, pd.Series):\n y = pd.Series(y.reshape((len(y), )))\n if not isinstance(y_true, pd.Series):\n y_true = pd.Series(y_true.reshape((len(y_true), )))\n if not isinstance(y_pred, pd.Series):\n y_pred = pd.Series(y_pred.reshape((len(y_pred), )))\n\n # Compute the classification metrics\n computed_metrics = [(metric, round(classification_metric(metric, y_true, y_pred), 2)) for metric in metrics]\n\n # Prepare the temporary DataFrame\n df = pd.DataFrame({\"X\": X, \"y\": y, \"y_true\": y_true, \"y_pred\": y_pred, \"matches\": y_true == y_pred})\n\n # Create the figure\n fig = plt.figure(figsize=fig_size)\n\n # Plot the true labels scatter-plot\n ax = fig.add_subplot(1, 2, 1)\n sns.scatterplot(x=\"X\", y=\"y\", hue=\"y_true\", data=df, **plot_kwargs)\n\n ax.set_title(\"Ground truth\")\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n plt.tight_layout()\n\n # Plot the predicted labels scatter-plot\n ax = fig.add_subplot(1, 2, 2)\n sns.scatterplot(x=\"X\", y=\"y\", hue=\"y_pred\", size=\"matches\", data=df, **plot_kwargs)\n\n ax.set_title(\"Predicted ({})\".format(\" \".join([\"{} = {},\".format(m, v) for m, v in computed_metrics])))\n ax.set_xlabel(x_label)\n ax.set_ylabel(\"\")\n plt.tight_layout()\n\n # Store the figure\n if save_as:\n plt.savefig(save_as)\n\n # Show the graph (if enabled)\n if fig_show:\n plt.show()\n else:\n plt.close()", "def plot(model, samples):\n # compute responsiblity values\n resp = model.predict_proba(samples)\n\n # plot\n plt.axis('equal')\n plt.scatter(samples[:,0], samples[:,1], c=resp)\n plt.show()", "def vis(X, y = None, vis_noise = False):\n plt.figure()\n\n if y is None:\n plt.scatter(*X.T, s=1)\n else:\n color_noise = (1,1,1)\n if vis_noise:\n color_noise = (0.75, 0.75, 0.75)\n\n color_palette = sns.color_palette('deep', np.max(y).astype(int)+1)\n cluster_colors = [color_palette[y_i] if y_i >= 0\n else color_noise\n for y_i in y]\n\n plt.scatter(*X.T, s=1, c=cluster_colors)\n\n plt.show()", "def cluster_plot(self):\r\n train = StandardScaler().fit_transform(self.X)\r\n pca = PCA(n_components=3)\r\n pca_component = pca.fit_transform(self.X)\r\n fig = plt.figure(figsize=(10,8))\r\n sns.set_palette(sns.color_palette(\"cubehelix\", 8))\r\n ax = Axes3D(fig)\r\n ax.scatter(pca_component[:,0].tolist(),pca_component[:,1].tolist(),pca_component[:,2].tolist(),c=self.labels,marker='v')\r\n ax.legend()\r\n plt.show()", "def plot_dataset(features, labels, nb_classes: int) -> None:\n sns.scatterplot(x=features[:, 0], y=features[:, 1], hue=labels, markers=True)\n plt.title(f'Data from {nb_classes} classes')\n save_plot('mock_dataset')", "def scatterPlot2():\n N = 100\n x = np.random.rand(N)\n y = np.random.rand(N)\n colors = np.random.rand(N)\n\n plt.scatter(x, y, c=colors, alpha=0.5)\n plt.show()", "def plot(self):\n\t\tif (2 <= len(self.X) <= 3):\n\t\t\tvDataFrame(self.name, self.cursor).scatter(columns = self.X, catcol = \"dbscan_cluster\", max_cardinality = 100, max_nb_points = 10000)\n\t\telse:\n\t\t\traise ValueError(\"Clustering Plots are only available in 2D or 3D\")", "def plot_coefs(results):\n coefs_noisy = pd.concat([\n arr_to_df(results['obj_noisy'], n_arr, 'obj'),\n vec_to_df(results['dist_obj'], n_arr, 'obj'),\n arr_to_df(results['pos_noisy'], n_arr, 'pos'),\n vec_to_df(results['dist_pos'], n_arr, 'pos'),\n arr_to_df(results['neg_noisy'], n_arr, 'neg'),\n vec_to_df(results['dist_neg'], n_arr, 'neg')\n ])\n\n xlim = (min(n_arr), max(n_arr))\n ylim = (-1.1, 1.1)\n\n g = sns.FacetGrid(coefs_noisy, row = 'id', col = 'component', xlim = xlim,\n ylim = ylim)\n g.map(sns.pointplot, 'n', 'value', order = n_arr)\n g.set_xticklabels(rotation = 45)\n\n for i, val in enumerate(results['obj_true']):\n ax = g.axes[0, i]\n ax.hlines(val, *ax.get_xlim())\n for i, val in enumerate(results['pos_true']):\n ax = g.axes[1, i]\n ax.hlines(0, *ax.get_xlim(), linestyle = '--', color = 'red')\n ax.hlines(val, *ax.get_xlim())\n for i, val in enumerate(results['neg_true']):\n ax = g.axes[2, i]\n ax.hlines(0, *ax.get_xlim(), linestyle = '--', color = 'red')\n ax.hlines(val, *ax.get_xlim())", "def plot_data(x):\n if DATA_2D:\n plt.scatter(x[:, 0], x[:, 1])\n plt.show()\n else:\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x[:, 0], x[:, 1], x[:, 2])\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n plt.show()", "def generate_pca(X, y, cols, n_components, **kwargs):\n\n pca = PCA(n_components, **kwargs)\n pca_result = pca.fit_transform(X)\n pca_df = pd.DataFrame(pca_result, columns=cols, index=X.index)\n pca_df['label'] = y\n pca_plot = ggplot(pca_df, aes(x=\"PCA-1\", y=\"PCA-2\", color='label') ) + geom_point(size=100,alpha=0.8) + ggtitle(\"First and Second Principal Components colored by class\")\n return pca_plot", "def scatterplot(x, y):\n plt.figure(figsize=(14, 8), dpi=80)\n plt.scatter(x[:, 1], y, s=30, c='r', marker='x', linewidths=1)\n plt.grid(True)\n plt.xlim(4, 24)\n plt.ylabel('Profit ($10k)')\n plt.xlabel('Population (10k)')\n plt.show()\n plt.close()", "def scatter(xarr, yarr, xlbl=None, ylbl=None, pw=600, ph=400):\n p = figure(plot_width=pw, plot_height=ph)\n # Model\n p.circle(xarr, yarr, color='black')#, legend='data')\n # Label\n if xlbl is not None:\n p.xaxis.axis_label = xlbl\n if ylbl is not None:\n p.yaxis.axis_label = ylbl\n # Show\n show(p)", "def plot_cmatrix_wrapper(y_true, y_pred, classes, **kwargs):\n cm = confusion_matrix(y_true, y_pred)\n plot_confusion_matrix(cm, classes, **kwargs)", "def plot_data(self):\n if hasattr(self,'data'):\n plt.scatter(*self.data.T)\n plt.show()\n else:\n raise Exception('No 2d data of the instance has been loaded')", "def _bokeh_confusion_scatter(\n y_true: np.ndarray,\n y_pred: np.ndarray,\n class_names: Sequence[str],\n title_rows: Sequence[str],\n x_label_rotation: Union[str, float] = \"horizontal\",\n y_label_rotation: Union[str, float] = \"vertical\",\n) -> Callable[[], Figure]:\n if len(y_true) != len(y_pred):\n raise ValueError(\"y_true and y_pred must have the same length!\")\n\n def figure() -> Figure:\n\n p = plotting.figure(\n x_range=(-0.5, -0.5 + len(class_names)),\n y_range=(-0.5, -0.5 + len(class_names)),\n plot_height=350,\n plot_width=350,\n tools=TOOLS,\n toolbar_location=TOOLBAR_LOCATION,\n match_aspect=True,\n )\n\n def noise() -> np.ndarray:\n return (np.random.beta(1, 1, size=len(y_true)) - 0.5) * 0.6\n\n p.scatter(\n x=y_true + noise(),\n y=y_pred + noise(),\n size=scatter_plot_circle_size(\n num_points=len(y_true),\n biggest=4.0,\n smallest=1.0,\n use_smallest_when_num_points_at_least=5000,\n ),\n color=DARK_BLUE,\n fill_alpha=SCATTER_CIRCLES_FILL_ALPHA,\n line_alpha=SCATTER_CIRCLES_LINE_ALPHA,\n )\n\n add_title_rows(p, title_rows)\n apply_default_style(p)\n\n p.xaxis.axis_label = \"Ground Truth\"\n p.yaxis.axis_label = \"Prediction\"\n\n arange = np.arange(len(class_names))\n\n p.xaxis.ticker = arange\n p.yaxis.ticker = arange\n\n p.xaxis.major_label_overrides = {i: name for i, name in enumerate(class_names)}\n p.yaxis.major_label_overrides = {i: name for i, name in enumerate(class_names)}\n\n p.xaxis.major_label_orientation = x_label_rotation\n p.yaxis.major_label_orientation = y_label_rotation\n\n # grid between classes, not at classes\n p.xgrid.ticker = arange[0:-1] + 0.5\n p.ygrid.ticker = arange[0:-1] + 0.5\n\n p.xgrid.grid_line_width = 3\n p.ygrid.grid_line_width = 3\n\n # prevent panning to empty regions\n p.x_range.bounds = (-0.5, -0.5 + len(class_names))\n p.y_range.bounds = (-0.5, -0.5 + len(class_names))\n\n return p\n\n return figure", "def pairplot(data, target_col, columns=None, scatter_alpha='auto',\n scatter_size='auto'):\n if columns is None:\n columns = data.columns.drop(target_col)\n n_features = len(columns)\n fig, axes = plt.subplots(n_features, n_features,\n figsize=(n_features * 3, n_features * 3))\n axes = np.atleast_2d(axes)\n for ax, (i, j) in zip(axes.ravel(),\n itertools.product(range(n_features), repeat=2)):\n legend = i == 0 and j == n_features - 1\n if i == j:\n class_hists(data, columns[i], target_col, ax=ax.twinx())\n else:\n discrete_scatter(data[columns[j]], data[columns[i]],\n c=data[target_col], legend=legend, ax=ax,\n alpha=scatter_alpha,\n s=scatter_size)\n if j == 0:\n ax.set_ylabel(columns[i])\n else:\n ax.set_ylabel(\"\")\n ax.set_yticklabels(())\n if i == n_features - 1:\n ax.set_xlabel(_shortname(columns[j]))\n else:\n ax.set_xlabel(\"\")\n ax.set_xticklabels(())\n despine(fig)\n if n_features > 1:\n axes[0, 0].set_yticks(axes[0, 1].get_yticks())\n axes[0, 0].set_ylim(axes[0, 1].get_ylim())\n return axes", "def plot_scatter(x, y):\n\tplt.scatter(x, y)", "def visualise_two_data_sets(x_arr, y_arr, x_arr_two, y_arr_two):\n # Instantiate a PCA object for the sake of easy visualisation\n pca = PCA(n_components=3)\n\n # Fit and transform x to visualise inside a 3D feature space\n x_visualisation = pca.fit_transform(x_arr)\n\n figure = plt.figure()\n axis = Axes3D(figure)\n\n axis.scatter(x_visualisation[y_arr == 0, 0], x_visualisation[y_arr == 0, 1], x_visualisation[y_arr == 0, 2],\n label=\"Class #0\",\n edgecolor=almost_black, facecolor=palette[0], linewidth=0.3, marker=\"o\")\n axis.scatter(x_visualisation[y_arr == 1, 0], x_visualisation[y_arr == 1, 1], x_visualisation[y_arr == 1, 2],\n label=\"Class #1\",\n edgecolor=almost_black, facecolor=palette[2], linewidth=0.3, marker=\"^\")\n axis.set_title(\"PCA to 3 components - data-set 1\")\n\n x_visualisation_two = pca.transform(x_arr_two)\n figure_two = plt.figure()\n axis_two = Axes3D(figure_two)\n axis_two.scatter(x_visualisation_two[y_arr_two == 0, 0], x_visualisation_two[y_arr_two == 0, 1],\n x_visualisation_two[y_arr_two == 0, 2],\n label=\"Class #0\", edgecolor=almost_black,\n facecolor=palette[0], linewidth=0.3, marker=\"o\")\n axis_two.scatter(x_visualisation_two[y_arr_two == 1, 0], x_visualisation_two[y_arr_two == 1, 1],\n x_visualisation_two[y_arr_two == 1, 2],\n label=\"Class #1\", edgecolor=almost_black,\n facecolor=palette[2], linewidth=0.3, marker=\"^\")\n axis_two.set_title(\"PCA to 3 components - data-set 2\")\n\n plt.show()", "def simple_scatter(x,y,title=None, xlabel=None, ylabel=None, colours=None):\n fig = plt.scatter(x, y, c = colours, s = 5)\n if xlabel:\n plt.xlabel(xlabel)\n if ylabel:\n plt.ylabel(ylabel)\n if title:\n plt.title(title)\n x_lines = [0, -1, 1]\n y_lines = [1.3]\n for a in range (0,len(x_lines)):\n plt.axvline(x_lines[a], color='gray', linestyle='dashed', linewidth=1)\n for b in range(0,len(y_lines)):\n plt.axhline(y_lines[b], color='gray', linestyle='dashed', linewidth=1) # p-value of 0.05 is considered significant\n plt.grid(True)\n fig = plt.gcf()\n return fig\n #plt.show(fig)", "def plot_data(data, labels=None, markers = ['o', 's']):\n if labels is None:\n plt.scatter(data[:, 0], data[:, 1], c='b', s = 80, marker = markers[0])\n\n else:\n classes = np.sort(np.unique(labels))\n n_classes = classes.shape[0]\n color_blind_list = sns.color_palette(\"colorblind\", n_classes)\n sns.set_palette(color_blind_list)\n\n for i, l in enumerate(classes):\n plt.scatter(data[labels == l, 0],\n data[labels == l, 1],\n c=color_blind_list[i],\n s=80,\n marker=markers[i])", "def visualize_2d_data(X, y):\n assert len(X.shape) == len(y.shape) == 2, f\"Input/output pairs must be 2D-arrays. X: {X.shape}, y: {y.shape}\"\n (N, D) = X.shape\n assert N == y.shape[0], f\"Number of samples must match for input/output pairs. X: {N}, y: {y.shape[0]}\"\n assert D == 2, f\"Expected 2 features. Got: {D}\"\n assert y.shape[1] == 1, f\"Y must be a column vector. Got: {y.shape}\"\n\n # ====================================================\n # TODO: Implement your solution within the box\n plt.scatter(X[:, 0], X[:, 1], marker='o', c=y, s=len(X[:]), edgecolor='k')\n plt.show()\n # ====================================================", "def plot_scatter_points(self):\n self.plot(1)", "def visualize_2d_data(X, y):\n assert len(X.shape) == len(y.shape) == 2, f\"Input/output pairs must be 2D-arrays. X: {X.shape}, y: {y.shape}\"\n (N, D) = X.shape\n assert N == y.shape[0], f\"Number of samples must match for input/output pairs. X: {N}, y: {y.shape[0]}\"\n assert D == 2, f\"Expected 2 features. Got: {D}\"\n assert y.shape[1] == 1, f\"Y must be a column vector. Got: {y.shape}\"\n\n # ====================================================\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.grid(True, linestyle='-', color='0.75')\n\n x_v = X[:, 0]\n y_h = X[:, 1]\n z = y\n # scatter with colormap mapping to z value\n ax.scatter(x_v, y_h, s=20, c=z, marker='o', cmap=cm.jet)\n\n plt.show()\n\n # ====================================================", "def scatterplot(self, x = \"Predictor\", y = \"Response\", color = None, jitter = False, jitter_sd = .1,\n marg_x = None, marg_y = None, trendline = None, opacity = 1, template = \"ggplot2\",\n has_title = True, title = None):\n x_clean, df_clean = clean_varname(self._df, var = x)\n y_clean, df_clean = clean_varname(df_clean, var = y)\n\n if jitter:\n df_clean[x_clean] = df_clean[x_clean] + np.random.normal(0, jitter_sd, size=len(df))\n df_clean[y_clean] = df_clean[y_clean] + np.random.normal(0, jitter_sd, size=len(df))\n\n if color:\n color_clean, df_clean = clean_varname(df_clean, var = color)\n else:\n color_clean = color \n\n if has_title:\n if not title:\n title = f\"Scatter Plot of {x_clean} and {y_clean}\"\n \n fig = px.scatter(df_clean, x=x_clean, y=y_clean, color=color_clean, title = title,\n marginal_x = marg_x, marginal_y = marg_y, trendline = trendline, template = template, opacity = opacity)\n return fig", "def make_scatter():\n x = np.linspace(4, 8, 6)\n y = np.sin(x)\n plt.plot(x, y, 'o', color='black');\n plt.show()", "def plot_data (features : list, actual_labels : list, classified_labels : list = None,\n extra_lines : list = None, normalize=False):\n samples = np.array(features)\n if normalize:\n norms = np.linalg.norm(samples, axis=1)\n l=[]\n for i, s in enumerate(samples):\n l.append(s/norms[i])\n samples = np.array(l)\n \n plt.figure(figsize=(8, 8))\n for (idx_case, ((actual, classified), marker, color)) in enumerate(zip(cases, markers, colors)):\n mask = np.logical_and(np.equal(actual_labels, actual), \n np.equal(actual if classified_labels == None else classified_labels, classified))\n if not np.any(mask): continue\n plt.scatter(\n samples[mask, 0], samples[mask, 1],\n label = f\"Class {actual}\" if classified_labels == None else f\"Was {actual}, classified {classified}\",\n marker = marker, s = 300, c = [color],\n )\n # Add the lines to show the true classes boundaries, if provided\n if extra_lines != None:\n for line in extra_lines:\n plt.plot(line[0], line[1], color = 'gray')\n plt.legend()", "def scatter(x, colors):\n \n # We choose a color palette with seaborn.\n palette = np.array(sns.color_palette(\"hls\", 2))\n\n # We create a scatter plot.\n f = plt.figure(figsize=(10, 8))\n ax = plt.subplot(aspect='equal')\n sc = ax.scatter(x[:,0], x[:,1], lw=0, s=40,\n c=palette[colors.astype(np.int)])\n \n ax.axis('off') # the axis will not be shown\n ax.axis('tight') # makes sure all data is shown\n \n # set title\n plt.title(\"Featurespace Visualization Titanic\", fontsize=25)\n \n # legend with color patches\n survived_patch = mpatches.Patch(color=palette[1], label='Survived')\n died_patch = mpatches.Patch(color=palette[0], label='Died')\n plt.legend(handles=[survived_patch, died_patch], fontsize=20, loc=1)\n\n return f, ax, sc", "def pca_visual(X_data, Y_data, dict_CLnames, comp=False, clusters=None,):\n pca = PCA(2) # project from 72 to 2 dimensions\n X_pca = pca.fit_transform(X_data)\n\n #encode class labels into numeric values\n le = preprocessing.LabelEncoder()\n label_encoder = le.fit(Y_data)\n y = label_encoder.transform(Y_data)\n\n Xax=X_pca[:,0] #First Principal Component\n Yax=X_pca[:,1] #Second Principal Component\n labels= y\n cdict={0:'red',1:'green'} #dict with colors\n labl=dict_CLnames\n labl_cl = {0:'cluster 1',1:'cluster 2'}\n if comp == False:\n fig,ax=plt.subplots(figsize=(7,5))\n fig.patch.set_facecolor('white')\n for l in np.unique(labels):\n ix=np.where(labels==l)\n ax.scatter(Xax[ix],Yax[ix],c=cdict[l],s=40, label=labl[l])\n # for loop ends\n plt.xlabel(\"First Principal Component\",fontsize=14)\n plt.ylabel(\"Second Principal Component\",fontsize=14)\n plt.legend()\n plt.show()\n \n if comp == True:\n fig,axs =plt.subplots(nrows=1, ncols=2, figsize=(15,5))\n fig.patch.set_facecolor('white')\n ax = axs[0]\n for l in np.unique(labels):\n ix=np.where(labels==l)\n ax.scatter(Xax[ix],Yax[ix],c=cdict[l],s=40, label=labl[l])\n # for loop ends\n ax.set_xlabel(\"First Principal Component\",fontsize=14)\n ax.set_ylabel(\"Second Principal Component\",fontsize=14)\n ax.set_title('Original data')\n ax.legend()\n\n \n ax = axs[1]\n for l in np.unique(clusters):\n ix=np.where(clusters==l)\n ax.scatter(Xax[ix],Yax[ix],c=cdict[l],s=40, label=labl_cl[l])\n # for loop ends\n ax.set_xlabel(\"First Principal Component\",fontsize=14)\n ax.set_ylabel(\"Second Principal Component\",fontsize=14)\n ax.set_title('Clustered data')\n ax.legend()\n plt.show()", "def inclass1():\n import numpy as np\n import matplotlib.pyplot as plt\n\n\n N = 50\n x = np.random.rand(N)\n y = np.random.rand(N)\n colors = np.random.rand(N)\n area = np.pi * (15 * np.random.rand(N))**2 # 0 to 15 point radiuses\n\n plt.scatter(x, y, s=area, c=colors, alpha=0.5)\n plt.show()", "def plot_2d(data, y=None, w=None, alpha_choice=1):\n\n k = np.unique(y).shape[0]\n color_blind_list = sns.color_palette(\"colorblind\", k)\n sns.set_palette(color_blind_list)\n if y is None:\n labs = [\"\"]\n idxbyclass = [range(data.shape[0])]\n else:\n labs = np.unique(y)\n idxbyclass = [np.where(y == labs[i])[0] for i in range(len(labs))]\n\n for i in range(len(labs)):\n plt.scatter(data[idxbyclass[i], 0], data[idxbyclass[i], 1],\n c=color_blind_list[i], s=80, marker=symlist[i])\n plt.ylim([np.min(data[:, 1]), np.max(data[:, 1])])\n plt.xlim([np.min(data[:, 0]), np.max(data[:, 0])])\n mx = np.min(data[:, 0])\n maxx = np.max(data[:, 0])\n if w is not None:\n plt.plot([mx, maxx], [mx * -w[1] / w[2] - w[0] / w[2],\n maxx * -w[1] / w[2] - w[0] / w[2]],\n \"g\", alpha=alpha_choice)", "def plot_2d(data, y=None, w=None, alpha_choice=1):\n\n k = np.unique(y).shape[0]\n color_blind_list = sns.color_palette(\"colorblind\", k)\n sns.set_palette(color_blind_list)\n if y is None:\n labs = [\"\"]\n idxbyclass = [range(data.shape[0])]\n else:\n labs = np.unique(y)\n idxbyclass = [np.where(y == labs[i])[0] for i in range(len(labs))]\n\n for i in range(len(labs)):\n plt.scatter(data[idxbyclass[i], 0], data[idxbyclass[i], 1],\n c=color_blind_list[i], s=80, marker=symlist[i])\n plt.ylim([np.min(data[:, 1]), np.max(data[:, 1])])\n plt.xlim([np.min(data[:, 0]), np.max(data[:, 0])])\n mx = np.min(data[:, 0])\n maxx = np.max(data[:, 0])\n if w is not None:\n plt.plot([mx, maxx], [mx * -w[1] / w[2] - w[0] / w[2],\n maxx * -w[1] / w[2] - w[0] / w[2]],\n \"g\", alpha=alpha_choice)", "def plot_data(self, classA, classB):\n plt.scatter(classA[:,0], classA[:,1], color='cyan', alpha=0.7, s=7)\n plt.scatter(classB[:,0], classB[:,1], color='purple', alpha=0.7, s=7)\n plt.axis('tight')\n plt.show()", "def scatter(data, target, codes=None, title=None, axes_names=None):\n nclusters = len(target[:, 0])\n naxes = len(target[0, :])\n # Clustering graphs\n if title is None:\n title = \"Clustering\"\n if axes_names is None:\n axes_names = [\"La1\", \"La2\", \"La3\"]\n if codes is not None:\n colors = np.multiply(codes, np.ceil(255 / nclusters))\n else:\n colors = None\n\n if naxes == 2:\n fig = plt.figure()\n fig.suptitle(title)\n ax = fig.add_subplot(111)\n if colors is None:\n ax.scatter(data[:, 0], data[:, 1], alpha=0.7)\n else:\n ax.scatter(data[:, 0], data[:, 1], c=colors, alpha=0.7)\n ax.scatter(target[:, 0], target[:, 1], alpha=0.5, s=100)\n ax.set_xlabel(axes_names[0])\n ax.set_ylabel(axes_names[1])\n for i in range(nclusters):\n ax.annotate(i + 1, (target[i, 0], target[i, 1]))\n plt.draw()\n if naxes == 3:\n fig = plt.figure()\n fig.suptitle(title)\n ax = fig.gca(projection=\"3d\")\n if colors is None:\n ax.scatter(data[:, 0], data[:, 1], data[:, 2], alpha=0.7)\n else:\n ax.scatter(data[:, 0], data[:, 1], data[:, 2], c=colors, alpha=0.7, s=10)\n ax.scatter(target[:, 0], target[:, 1], target[:, 2], alpha=0.3, s=75)\n ax.set_xlabel(axes_names[0])\n ax.set_ylabel(axes_names[1])\n ax.set_zlabel(axes_names[2])\n for i in range(nclusters):\n ax.text(target[i, 0], target[i, 1], target[i, 2], i + 1)\n plt.draw()", "def plot(self, x_train, x_test=None, cmap='jet', s=15, title=None, fit=False):\n if self.comet_exp is not None:\n # If comet_exp is set, use different backend to avoid display errors on clusters\n matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\n import matplotlib.pyplot as plt\n\n if not fit:\n z_train = self.transform(x_train)\n else:\n z_train = self.fit_transform(x_train)\n\n y_train = x_train.targets.numpy()\n\n if z_train.shape[1] != 2:\n raise Exception('Can only plot 2D embeddings.')\n\n plt.figure(figsize=(3.5, 3.5))\n\n if title is not None:\n plt.title(title, fontsize=12)\n plt.xticks([])\n plt.yticks([])\n\n if x_test is None:\n plt.scatter(*z_train.T, c=y_train, cmap=cmap, s=s)\n else:\n # Train data is grayscale and Test data is colored\n z_test = self.transform(x_test)\n y_test = x_test.targets.numpy()\n plt.scatter(*z_train.T, c='grey', s=s / 10, alpha=.2)\n plt.scatter(*z_test.T, c=y_test, cmap=cmap, s=s)\n\n if self.comet_exp is not None:\n self.comet_exp.log_figure(figure=plt, figure_name=title)\n plt.clf()\n else:\n plt.show()", "def visclassifier(fun,xTr,yTr):\n\n yTr = np.array(yTr).flatten()\n \n symbols = [\"ko\",\"kx\"]\n marker_symbols = ['o', 'x']\n mycolors = [[0.5, 0.5, 1], [1, 0.5, 0.5]]\n classvals = np.unique(yTr)\n\n plt.figure()\n\n res=300\n xrange = np.linspace(min(xTr[:, 0]), max(xTr[:, 0]),res)\n yrange = np.linspace(min(xTr[:, 1]), max(xTr[:, 1]),res)\n pixelX = repmat(xrange, res, 1)\n pixelY = repmat(yrange, res, 1).T\n\n xTe = np.array([pixelX.flatten(), pixelY.flatten()]).T\n\n testpreds = fun(xTe)\n Z = testpreds.reshape(res, res)\n # Z[0,0] = 1 # optional: scale the colors correctly\n plt.contourf(pixelX, pixelY, np.sign(Z), colors=mycolors)\n\n for idx, c in enumerate(classvals):\n plt.scatter(xTr[yTr == c,0],\n xTr[yTr == c,1],\n marker=marker_symbols[idx],\n color='k'\n )\n\n plt.axis('tight')\n plt.show()", "def plot_synthetic_data(X_non_sensitive, y, sensitive_feat_array):\n num_to_draw = 200 # only draw a small number of points to avoid clutter\n x_draw = X_non_sensitive[:num_to_draw]\n y_draw = y[:num_to_draw]\n sensitive_feat_draw = sensitive_feat_array[:num_to_draw]\n\n X_s_0 = x_draw[sensitive_feat_draw == 0.0]\n X_s_1 = x_draw[sensitive_feat_draw == 1.0]\n y_s_0 = y_draw[sensitive_feat_draw == 0.0]\n y_s_1 = y_draw[sensitive_feat_draw == 1.0]\n plt.figure(figsize=(8, 6))\n plt.scatter(X_s_0[y_s_0==1.0][:, 0], X_s_0[y_s_0==1.0][:, 1], color='green', marker='x', s=30, linewidth=1.5, label= \"Unprivileged (female), Hired\")\n plt.scatter(X_s_0[y_s_0==-1.0][:, 0], X_s_0[y_s_0==-1.0][:, 1], color='red', marker='x', s=30, linewidth=1.5, label = \"Unprivileged (female), Not Hired\")\n plt.scatter(X_s_1[y_s_1==1.0][:, 0], X_s_1[y_s_1==1.0][:, 1], color='green', marker='o', facecolors='none', s=30, label = \"Privileged (male), Hired\")\n plt.scatter(X_s_1[y_s_1==-1.0][:, 0], X_s_1[y_s_1==-1.0][:, 1], color='red', marker='o', facecolors='none', s=30, label = \"Privileged (male), Not Hired\")\n \n plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')\n plt.tick_params(axis='y', which='both', left='off', right='off', labelleft='off')\n plt.legend(loc=2, fontsize=15)\n plt.xlim((-15,10))\n plt.ylim((-10,15))\n plt.xlabel(\"Prior Income\")\n plt.ylabel(\"Years of Work Experience\")\n plt.show()", "def plotCifar():\n classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n num_classes = len(classes)\n samples_per_class = 7\n for y, cls in enumerate(classes):\n #print(type(y)) #<class 'int'>\n #print(y) # 0 to 9 - 10 Classes \n idxs = np.flatnonzero(y_train == y) ##FOO_BAR_TBD--\n #print(type(idxs)) # <class 'numpy.ndarray'> \n #Output array, containing the indices of the elements of a.ravel() that are non-zero.\n #print(idxs) #[ 29 30 35 ... 49941 49992 49994]\n idxs = np.random.choice(idxs, samples_per_class, replace=False)\n for i, idx in enumerate(idxs):\n plt_idx = i * num_classes + y + 1\n plt.subplot(samples_per_class, num_classes, plt_idx)\n plt.imshow(X_train[idx].astype('uint8'))\n plt.axis('off')\n if i == 0:\n plt.title(cls)\n plt.show()", "def scatter_plot(self):\n sns.set_style('whitegrid')\n \n fig, ax = plt.subplots()\n cmap = sns.cubehelix_palette(8, start=.5, rot=-.75, as_cmap=True)\n \n \n plt.title('Benchmark and Trial Samples', fontsize=16)\n \n ax.xaxis.set_tick_params(labelsize=16, direction='inout', length=6, width=1, color='gray')\n ax.yaxis.set_tick_params(labelsize=16, direction='inout', length=6, width=1, color='gray')\n \n ax.scatter(self.x_benchmark[:,0], self.x_benchmark[:,1], c='magenta',\n alpha=0.5, marker='x',label='B sample')\n ax.scatter(self.x_trial[:,0],self.x_trial[:,1], c='blue',\n alpha=0.2, marker='s',label='T sample')\n \n plt.grid(True)\n plt.legend(loc='upper left', fontsize=14)\n # plt.show()\n plt.savefig(\"pyplot.png\")", "def show_scatter(self):\n plt.scatter(self.a1[:, 0], self.a1[:, 1], c=\"red\", alpha=0.5, s=10)\n plt.scatter(self.a2[:, 0], self.a2[:, 1], c=\"blue\", alpha=0.5, s=10)\n plt.scatter(0, 0, marker=\"D\", c=\"black\", alpha=0.8)\n plt.scatter(2, 2, marker=\"D\", c=\"black\", alpha=0.8)\n plt.show()", "def visualize_features_according_class(features: np.array, labels: np.array):\n # check if labels and features formats are correct\n if len(features.shape) != 2:\n raise AttributeError('Provided features must be 2-dimensional. Got %i.' % len(features.shape))\n if len(labels.shape) > 2:\n raise AttributeError('Provided labels must be 2- or 1-dimensional. Got %i.' % len(labels.shape))\n # reshape labels if they are 2-dimensional\n if len(labels.shape) == 2:\n labels = labels.reshape((-1,))\n # transform data via TSNE\n tsne = TSNE(n_components=2)\n features = tsne.fit_transform(features)\n # create support variables to create graph\n num_classes = np.unique(labels).shape[0]\n colors = [i for i in range(num_classes)]\n class_names = ['Neutral', 'Anger', 'Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise']\n # creating graph\n plt.figure(figsize=(10, 8))\n colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k']\n for i, c, label in zip(range(num_classes), colors, class_names):\n plt.scatter(features[labels == i, 0], features[labels == i, 1], c=c, label=label)\n plt.legend()\n plt.show()", "def showData(data, labels, truelabels=None):\n\n n = data.shape[0]\n colors = np.dot(labels,np.arange(2)).reshape([-1]) # for color-coding labels\n\n plt.figure()\n plt.scatter(data[:,0],data[:,1], c=colors, s=40)\n\n\n # identify incorrectly labeled examples with an x colored with the correct class\n if truelabels is not None:\n incorrect_idx = []\n truecolors = np.dot(truelabels,np.arange(2)).reshape([-1])\n for i in range(n):\n if not isgoodprediction(labels[i,:], truelabels[i,:]):\n incorrect_idx.append(i)\n plt.scatter( data[incorrect_idx,0], data[incorrect_idx,1],s=50, c='k', marker='x',lw=5 ,label='misclassified')\n\n plt.legend()\n plt.axes().set_aspect('equal', 'datalim')\n plt.show()", "def plot(X, clusters, cluster_num):\n centroids = []\n X = np.asarray(X)\n for i in range(0,cluster_num):\n centroids.append(clusters[i].centroid)\n \n np_centroids = np.asarray(centroids)\n \n color = [\"g\", \"r\", \"b\", \"c\", \"m\", \"b\"]\n \n fig = figure()\n ax = fig.gca(projection='3d')\n for i in range(len(X)):\n ax.scatter(X[i][0], X[i][1], X[i][2], c=color)\n \n ax.scatter(np_centroids[:, 0],np_centroids[:, 1],\n np_centroids[:, 2], marker = \"x\", s=150,\n linewidths = 5, zorder = 100, c=color\n )", "def plotData(X, y):\n plt.figure()\n\n # Find Indices of Positive and Negative Examples\n pos = np.where(y == 1, True, False).flatten()\n neg = np.where(y == 0, True, False).flatten()\n\n # Plot Examples\n plt.plot(X[pos, 0], X[pos, 1], 'k+', linewidth=1, markersize=7)\n plt.plot(X[neg, 0], X[neg, 1], 'ko', color='y', markersize=7)", "def plot_class_scatter(rows_of_class, class_name, max_value, min_value):\n fig = plt.figure(figsize=(30, 5))\n fig.suptitle(\"Components for class {}\".format(class_name))\n function_to_channel_plots = {function_name: [] for function_name in [\"Mean\", \"Min\", \"Max\"]}\n n_plots = 1\n # For each function\n for function_idx, function_name in enumerate(function_to_channel_plots):\n # For each channel\n for channel_idx in range(0, 4):\n plot = fig.add_subplot(1, 14, n_plots + function_idx)\n channel_number = ((n_plots - 1) % 4) + 1\n plot.set_title(\"{} of Channel {}\".format(function_name, channel_number))\n plot.set_xlabel(\"Components\")\n # Only need title for first graph for each function\n if channel_idx == 0:\n plot.set_ylabel(\"{} of 100 pulses\".format(function_name))\n\n plot.set_ylim((min_value, max_value))\n function_to_channel_plots[function_name].append(plot)\n n_plots += 1\n\n components_per_function = 256\n components_per_channel = 64\n for index, row in rows_of_class.iterrows():\n for function_idx, (function, channel_plots) in enumerate(function_to_channel_plots.items()):\n for channel_idx, channel_plot in enumerate(channel_plots):\n x = np.arange(0, components_per_channel)\n start = (function_idx * components_per_function) + (channel_idx * components_per_channel)\n end = start + components_per_channel\n y = row[start:end]\n channel_plot.scatter(x, y, alpha=0.8)\n\n plt.savefig(\"{}.png\".format(class_name))", "def plot_spiral_and_predicted_class(position_array, class_array, model, output_file_name, title):\n h = 0.02\n x_min, x_max = position_array[:, 0].min() - 1, position_array[:, 0].max() + 1\n y_min, y_max = position_array[:, 1].min() - 1, position_array[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n z = np.argmax(model.predict(np.c_[xx.ravel(), yy.ravel()]), axis=1)\n z = z.reshape(xx.shape)\n plt.close('all')\n fig = plt.figure()\n plt.contourf(xx, yy, z, cmap=plt.cm.coolwarm, alpha=0.8)\n plt.scatter(position_array[:, 0], position_array[:, 1], c=class_array, s=40, cmap=plt.cm.coolwarm)\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())\n\n plt.title(title)\n fig.savefig(output_file_name)", "def scatter_plot(group1, group2):\n plt.scatter(group1, group2)\n plt.show()", "def visualize(vis, features, label):\n if vis == 'PCA':\n #n_components = st.sidebar.slider(\"n_components\", 2, 10)\n #alpha = st.sidebar.slider(\"alpha\", 0.8, 2.0)\n #pca = PCA(n_components)\n pca = PCA(2)\n\n X_projected = pca.fit_transform(features)\n \n x1 = X_projected[:, 0]\n x2 = X_projected[:, 1]\n\n\n fig = plt.figure()\n plt.scatter(x1, x2, c=label, alpha=0.8, cmap='viridis')\n plt.xlabel(\"Principal Component 1\")\n plt.ylabel(\"Principal Component 2\")\n plt.colorbar()\n\n st.pyplot()", "def plot_cmatrix(self,\n X=None,\n y=None,\n ):\n if X is None and y is None:\n y = self.ytrain\n elif X is not None and y is not None:\n if isinstance(y, pd.Series):\n y = self.le.transform(y)\n else:\n raise ValueError\n\n ypred = self._predict(X)\n\n return helpers.plot_cmatrix(y,\n ypred,\n cmapper=self.le,\n )", "def vis_class(X, labels, title, file_path=None):\n unique_labels = set(labels)\n colors = [plt.cm.Spectral(each)\n for each in np.linspace(0, 1, len(unique_labels))]\n\n plt.figure(figsize=(15, 12))\n for k, col in zip(unique_labels, colors):\n if k == -1:\n # Black used for noise.\n col = [0, 0, 0, 1]\n\n class_member_mask = (labels == k)\n\n xy = X[class_member_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),\n markeredgecolor='k', markersize=14, label=k)\n plt.text(xy[0, 0], xy[0, 1], str(k), fontsize=18)\n\n # xy = X[class_member_mask & ~core_samples_mask]\n # plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),\n # markeredgecolor='k', markersize=6, label=k)\n plt.title(title)\n plt.legend()\n plt.tight_layout()\n if file_path:\n plt.savefig(file_path, dpi=300)", "def model_visualization(model,X,y,classifier):\n sns.set_context(context='notebook',font_scale=2)\n plt.figure(figsize=(16,9))\n from matplotlib.colors import ListedColormap\n X_set, y_set = X, y\n X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\n plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.6, cmap = ListedColormap(('green', 'blue')))\n plt.xlim(X1.min(), X1.max())\n plt.ylim(X2.min(), X2.max())\n for i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n color = ListedColormap(('turquoise', 'blue'))(i), label = j)\n plt.title(\"%s Model Set\" %(model))\n plt.xlabel('PC 1')\n plt.ylabel('PC 2')\n plt.legend()\n plt.savefig('images/{0}.png'.format(model))", "def scatterplot(x, c, label, square=False):\n \n z_c = _scatter_centroids(c, square)\n z_x, zrmax = _scatter_x_near_centroid(x, c, z_c, label)\n return z_x, z_c, zrmax", "def do_pca(X, y, components: int = 2, plot: bool = True):\n\n new_X = []\n for i in X:\n new_X.append(i.flatten())\n\n X = new_X\n\n # PCA Stuff?\n pca = PCA(n_components=components)\n pca.fit(X)\n\n # Transform input data based on eigenvectors\n X = pca.transform(X)\n\n # Get scatters\n x = [i[0] for i in X]\n w = [i[1] for i in X]\n\n # plot\n\n plt.scatter(x, w, c=y)\n plt.show()", "def plot_scatter(X, Y=None, legend=None, title='', xlabel='', ylabel='', markersize=5):\n global vis\n opts = dict(title=title, xlabel=xlabel, ylabel=ylabel, markersize=markersize, legend=legend)\n win = vis.scatter(X, Y, opts=opts)\n return win", "def plotClusters(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Identification of Cluster Particles with Voronoi Volumes', fontsize=22)\n ax.set_xlabel('x [m]', fontsize=18)\n ax.set_ylabel('y [m]', fontsize=18)\n ax.set_zlabel('z [m]', fontsize=18)\n\n strength = np.linspace(0, 0.8, len(self.unique_labels))\n np.random.shuffle(strength)\n colors = [plt.cm.nipy_spectral(each) for each in strength]\n np.random.shuffle(strength)\n colorsB = [plt.cm.nipy_spectral(each) for each in strength]\n\n for k, col, colB in zip(self.unique_labels, colors, colorsB):\n a = 1\n s = 3\n if k == -1:\n # Black used for noise.\n col = [1, 0, 0]\n a = 0.3\n s = 1\n\n class_member_mask = (self.labels == k)\n xy = self.data[class_member_mask]\n if len(xy) > 0:\n ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], c=np.reshape(np.array(col), (1, -1)),\n edgecolors=np.reshape(np.array(colB), (1, -1)), alpha=a, s=s, label='Cluster ' + str(k))", "def scatter_plot(self, independent, dependent, second_indep=None):\n\n try:\n if second_indep is None:\n x = self.df_input[independent]\n y = self.df_input[dependent]\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(x, y, color = 'red')\n ax.set_xlabel(independent)\n ax.set_ylabel(dependent)\n ax.axis('tight')\n plt.title(\"Scatter Plot of \" + dependent + \" and \" + independent)\n plt.show()\n else:\n x = self.df_input[[independent]]\n y = self.df_input[[dependent]]\n z = self.df_input[[second_indep]]\n\n # plot the results\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(x, y, z, color = 'red')\n ax.set_xlabel(independent)\n ax.set_ylabel(\"Number of cases of \" + dependent)\n ax.set_zlabel(second_indep)\n ax.axis('tight')\n plt.title(\"Scatter Plot of \" + dependent + \", \" + independent + \" and \" + second_indep)\n plt.show()\n except Exception as e:\n print(e)", "def make_2d_scatter_plot(self, xdata, ydata, xlabel=None, xunits=None,\n ylabel=None, yunits=None, title=None,\n subplotnum=None, num_rows=None,\n plot_cor=True, set_range=True):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n\n if not set_range:\n xlim = plt.gca().get_xlim()\n ylim = plt.gca().get_ylim()\n\n plt.scatter(xdata, ydata)\n\n # Adjust ranges unless told otherwise\n if set_range:\n if isinstance(xdata, list):\n hrange = max(xdata) - min(xdata)\n if hrange != 0.0:\n plt.xlim(min(xdata)-0.1*hrange,\n max(xdata)+0.1*hrange)\n elif isinstance(xdata, np.ndarray):\n hrange = xdata.max() - xdata.min()\n if hrange != 0.0:\n plt.xlim(xdata.min()-0.1*hrange,\n xdata.max()+0.1*hrange)\n if isinstance(ydata, list):\n vrange = max(ydata) - min(ydata)\n if vrange != 0.0:\n plt.ylim(min(ydata)-0.1*vrange,\n max(ydata)+0.3*vrange)\n elif isinstance(ydata, np.ndarray):\n vrange = ydata.max() - ydata.min()\n if vrange != 0.0:\n plt.ylim(ydata.min()-0.1*vrange,\n ydata.max()+0.3*vrange)\n else:\n plt.xlim(xlim)\n plt.ylim(ylim)\n if plot_cor:\n # Calculate correlation and annotate\n rho, pval = self.get_correlation_coefficient(\n xdata=xdata,\n ydata=ydata,\n xsystkey=xlabel,\n ysystkey=ylabel\n )\n if (len(set(xdata)) != 1) and (len(set(ydata)) != 1):\n if subplotnum is not None:\n if num_rows is None:\n raise ValueError(\n \"Need to know the number of rows in \"\n \"order to correctly place the correlation \"\n \"annotation on the subplot\"\n )\n row = int((subplotnum-1)/4)\n xtext = 0.25*0.25+((subplotnum-1)%4)*0.25\n ytext = 0.88-(1.0/num_rows)*0.9*row\n plt.figtext(\n xtext,\n ytext,\n 'Correlation = %.2f'%rho,\n fontsize='large'\n )\n else:\n plt.figtext(\n 0.15,\n 0.80,\n 'Correlation = %.2f'%rho,\n fontsize=16\n )\n\n # Set labels, if required\n if xlabel is not None:\n nice_xlabel = self.make_label(xlabel, xunits)\n plt.xlabel(nice_xlabel, fontsize=16)\n if ylabel is not None:\n nice_ylabel = self.make_label(ylabel, yunits)\n plt.ylabel(nice_ylabel, fontsize=16)\n if subplotnum is None and (title is not None):\n plt.title(title, fontsize=16)", "def scatter_plot_2d(filepath, X_valid_encoded, X_valid, y_valid, n_classes):\n fig, ax = plt.subplots()\n class_points_x = [[] for i in range(n_classes)]\n class_points_y = [[] for i in range(n_classes)]\n for i, (e, y) in enumerate(zip(X_valid_encoded, y_valid)):\n pp_e = postprocess(e, (1, 2))\n coord = pp_e.numpy().ravel()\n class_points_x[y].append(coord[0])\n class_points_y[y].append(coord[1])\n for label in range(n_classes):\n ax.scatter(class_points_x[label], class_points_y[label], label=\"%d\" % label)\n plt.legend()\n plt.savefig(filepath + \"/img/2d_encode.png\")\n plt.close(fig)", "def simple_scatter():\n\n # Make two datasets specifying scatter graph\n dataset_a = DataSet(random_2d_a,plot='scatter')\n dataset_b = DataSet(random_2d_b,plot='scatter')\n\n # Make plot object and add data sets\n plot = Plot()\n plot.add_dataset(dataset_a)\n plot.add_dataset(dataset_b)\n\n # Plot graph and display\n plot.plot()\n plot.save(name='./figures/2d_simple_scatter',fmt='png')\n plot.display()", "def draw_scatter_plot(x: pd.Series, y: pd.Series, x_label: str, y_label: str):\n\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.scatter(x, y)\n plt.title(\"Scatter plot of '%s' and '%s'\" % (x_label, y_label))\n\n lr_model = linear_regression(x, y)\n plt.plot(x, lr_model.predict(np.array(x).reshape(-1, 1)), color='red')\n\n plt.show()", "def perp_plots(X, labs, perp_vec, ncols = 3, verbose = False, \\\n cdict = {1: 'red', 2: 'mediumspringgreen', 3: 'royalblue'}):\n \n # Set dimensions of subplots\n nrows = math.ceil(len(perp_vec) / ncols)\n \n # Configure axes\n axes = []\n fig = plt.figure(figsize = (16, 3 * nrows))\n \n # Iteratively generate plots\n for perp in range(len(perp_vec)):\n low_d = tsne(X = X, perplexity = perp_vec[perp], verbose = verbose, optim = \"fastest\")\n axes.append(fig.add_subplot(nrows, ncols, perp + 1))\n axes[-1].set_title(\"Perplexity = \" + str(perp_vec[perp]))\n plt.scatter(x = low_d[-1, :, 0], y = low_d[-1, :, 1], \\\n edgecolor = None, alpha = 0.8, c = np.array(list(map(lambda x: cdict[x], labs))))\n axes[-1].set_xticklabels([])\n axes[-1].set_yticklabels([])\n axes[-1].xaxis.set_ticks_position('none')\n axes[-1].yaxis.set_ticks_position('none')", "def plot_scatter(\n xdata: np.ndarray,\n ydata: np.ndarray,\n ax=None,\n labelsize: int = 14,\n grid: bool = True,\n **kwargs,\n):\n if ax is None:\n ax = get_non_gui_ax()\n\n # Default plot options\n plot_opts = kwargs.copy()\n if \"c\" not in plot_opts:\n plot_opts[\"c\"] = \"grey\"\n if \"marker\" not in plot_opts:\n plot_opts[\"marker\"] = \"x\"\n if \"alpha\" not in plot_opts:\n plot_opts[\"alpha\"] = 0.8\n\n # Plot data\n ax.scatter(xdata, unp.nominal_values(ydata), **plot_opts)\n\n # Formatting\n ax.tick_params(labelsize=labelsize)\n ax.grid(grid)\n return ax", "def plot_iris_dataset():\n iris = sns.load_dataset(\"iris\")\n sns.pairplot(iris, hue='species')", "def plotly_scatter_plot_chart():\n df = read_dataset(Path('..', '..', 'iris.csv'))\n\n model_data = cluster_iris_dataset_again()\n df['clusters'] = model_data['clusters']\n\n fig = px.scatter(df, x=\"sepal_width\", y=\"sepal_length\", color=\"clusters\")\n\n return fig", "def scatter_it(x, y, x_label, y_label):\n\n fig, ax1 = plt.subplots()\n plt.scatter(x, y)\n ax1.set_xlabel(x_label)\n ax1.set_ylabel(y_label)\n plt.show()", "def _generateFig1():\n\n x0, y0 = make_blobs(n_samples=200, n_features=2, centers=[[13, 13]], cluster_std=1, random_state=45)\n x1, y1 = make_blobs(n_samples=1000, n_features=2, centers=[[5, 0]], cluster_std=3.7, random_state=45)\n y1 += 1\n X = np.vstack((x0, x1))\n y = np.hstack((y0, y1))\n # Visualize the test data\n fig0, ax0 = plt.subplots()\n for label in range(2):\n ax0.plot(X[y == label][:, 0], X[y == label][:, 1], '.',\n color=colors[label])\n ax0.set_xlim(-10, 20)\n ax0.set_ylim(-8, 16)\n # ax0.set_title('Test data: 200 points x3 clusters.')\n return X", "def plotSVMProfile(self, df_class=None, is_plot=True, **kwargs):\n # Use getimportance for each class and each clf to get average\n # importance value for each feature\n # Construct importance dataframes by class\n COLORS = [\"blue\", \"red\", \"green\", \"brown\"]\n if df_class is None:\n dfs = [self.makeImportanceDF(class_selection=c) for c in self.classes]\n else:\n dfs = []\n for cls in df_class.index:\n ser_X = df_class.loc[cls, :]\n # Accumulate the feature contribution for each classifier\n # over the class averaged values\n sers = [self.clf_desc.getFeatureContributions(\n c, self.columns, ser_X).loc[cls, :] for c in self.clfs]\n df_values = pd.concat(sers, axis=1)\n df = self._makeFeatureDF(df_values)\n dfs.append(df)\n ymin = 0.9*min([df.values.flatten().min() for df in dfs])\n ymax = 1.1*max([df.values.flatten().max() for df in dfs])\n ylim = [ymin, ymax]\n fig, axes = plt.subplots(1, len(dfs))\n is_first = True\n for cls, ax, df in zip(self.classes, axes, dfs):\n df_new = df.sort_index(ascending=False)\n self._plot(df_new, None, fig, ax, False, is_vertical=False,\n is_ygrid=False, color=COLORS,\n ylim=ylim, **kwargs)\n ax.plot([0, 0], [0, len(df)])\n if is_first:\n is_first = False\n else:\n ax.set_ylabel(\"\")\n ax.set_yticklabels([])\n if self._class_names is not None:\n title = self._class_names[cls]\n else:\n title = str(cls)\n ax.set_title(title)\n if is_plot:\n plt.show()", "def plot_samples(s):\r\n assert len(s[0, :]) >= 2, ('The Phase space dimensions are less than two.', ' Need at least two to plot.')\r\n fig = plt.figure(1)\r\n if len(s[0, :]) >= 3:\r\n ax = fig.add_subplot(111, projection='3d')\r\n ax.scatter(s[:, 0], s[:, 1], s[:, 2])\r\n fig = plt.figure(2)\r\n plt.scatter(s[:, 0], s[:, 1])\r\n plt.show()", "def decision_plot(self, X, y):\n\n y = self._slice_target_index(y=y)\n\n for index in range(_n_targets(y)):\n if sklearn.utils.multiclass.type_of_target(y) == 'continuous-multioutput':\n self.fit(X, y.iloc[:, index].values.ravel(order='K'))\n else:\n self.fit(X, y)\n explainer, shap_values = self.explainer(X=X)\n shap.decision_plot(base_value=explainer.expected_value, shap_values=shap_values,\n feature_names=list(X.columns), show=self.show)", "def plot_2D(df):\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,6))\n fig.clf()\n #Get the current Axes instance on the current figure matching the given \n #keyword args, or create one.\n ax = fig.gca()\n df.plot(kind = 'scatter', x = 'x', y = 'y', ax = ax, alpha = 0.5)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_title('X vs. Y')\n return 'Done'", "def scatter_plot(x_vals, y_vals, x_variable):\n if (x_variable == 'm'):\n x_variable = 'Mole Fraction A'\n elif (x_variable == 'p'):\n x_variable = 'Pressure (kPa)'\n elif (x_variable == 't'):\n x_variable = 'Temperature (K)'\n fig = plt.figure(figsize=FIG_SIZE)\n plt.scatter(x_vals, y_vals)\n plt.xlabel(x_variable)\n plt.ylabel('Electrical Conductivity')\n\n return fig", "def plot_data(data, fig):\n\n if data.shape[1] > 3:\n print(\"Warning: data dimension is larger than 3, dim is %s\" % (data.shape[1]))\n\n ax = fig.add_subplot(111, projection='3d')\n # ax.scatter(data[:, 0], data[:, 1], data[:, 2], marker='.', s=0.5)\n return ax", "def plotit(X,Y=None,clf=None, markers = ('s','o'), hold = False, transform = None):\n eps=1e-6\n minx, maxx = np.min(X[:,0]), np.max(X[:,0])\n miny, maxy = np.min(X[:,1]), np.max(X[:,1])\n \n if clf is not None:\n npts = 150\n x = np.linspace(minx,maxx,npts)\n y = np.linspace(miny,maxy,npts)\n t = np.array(list(itertools.product(x,y)))\n if transform is not None:\n t = transform(t)\n z = clf(t)\n z = np.reshape(z,(npts,npts)).T \n extent = [minx,maxx,miny,maxy]\n plt.contour(x,y,z,[-1+eps,0,1-eps],linewidths = [2],colors=('b','k','r'),extent=extent, label='f(x)=0')\n #plt.imshow(np.flipud(z), extent = extent, cmap=plt.cm.Purples, vmin = -2, vmax = +2); plt.colorbar()\n plt.pcolormesh(x, y, z,cmap=plt.cm.Purples,vmin=-2,vmax=+2);plt.colorbar()\n plt.axis([minx,maxx,miny,maxy])\n \n if Y is not None:\n \n plt.scatter(X[Y==1,0],X[Y==1,1],marker = markers[0], c = 'y', s = 30)\n plt.scatter(X[Y==-1,0],X[Y==-1,1],marker = markers[1],c = 'c', s = 30)\n plt.xlabel('$x_1$')\n plt.ylabel('$x_2$') \n \n else:\n plt.scatter(X[:,0],X[:,1],marker = '.', c = 'k', s = 5)\n if not hold:\n plt.grid()\n plt.show()", "def PlotData(data, true_labels):\n\tcolors = ['red' if l == 0 else 'blue' for l in true_labels]\n\tfig = plt.figure()\n\tplt.scatter(data[:, 0], data[:, 1], c=colors)\n\tplt.show()\n\treturn", "def plot_PCA():\n X, languages = prepare_data_matrix()\n #print(X)\n eigenvectors, eigenvalues=power_iteration_two_components(X)\n explain = explained_variance_ratio(X, eigenvectors, eigenvalues)\n X=project_to_eigenvectors(X,eigenvectors)\n\n #print(X)\n plt.title('Explained variance: %.3f' % explain)\n plt.scatter(X[:,0], X[:,1])\n for i in range(len(X)):\n plt.text(X[i,0], X[i,1], languages[i][:3])\n plt.show()", "def plot_scatter(dataframe, colx, coly, xlabel='', \n ylabel='', \n xlim=[0,15], ylim=[0,15], density=True):\n\n if not density : \n plt.scatter(dataframe[colx].values, dataframe[coly].values)\n else:\n xvals = dataframe[colx].values\n yvals = dataframe[coly].values\n xy = np.vstack([xvals, yvals])\n z = gaussian_kde(xy)(xy)\n plt.scatter(xvals, yvals, c=z, s=10, edgecolor='')\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.plot(np.linspace(xlim[0], xlim[1], 100), \n np.linspace(ylim[0], ylim[1], 100), \n color='black')\n plt.xlim(xlim)\n plt.ylim(ylim)\n plt.show();", "def plot_pred(xy, y_prime, N=10, groundtruth=True):\n \n fig,ax = plt.subplots()\n pred_seq = y_prime.shape[2]\n obs_seq = xy.shape[1] - pred_seq\n \n if groundtruth:\n for i in range(N):\n # plot observation\n ax.plot(xy[i, :obs_seq, 2], xy[i, :obs_seq, 3], color='k')\n # plot ground truth\n ax.plot(xy[i, obs_seq-1:, 2], xy[i, obs_seq-1:, 3], color='r')\n for j, pred in enumerate(y_prime[i]):\n # concate the first step for visulization purpose\n pred = np.concatenate((xy[i, obs_seq-1:obs_seq, 2:4], pred), axis=0) \n ax.plot(pred[:, 0], pred[:, 1], color='b') \n else:\n x = xy\n obs_seq = x.shape[1] \n for i in range(N):\n # plot observation\n ax.plot(x[i, :, 2], x[i, :, 3], color='k')\n for j, pred in enumerate(y_prime[i]):\n # concate the first step for visulization\n pred = np.concatenate((x[i, obs_seq-1:obs_seq, 2:4], pred), axis=0) \n ax.plot(pred[:, 0], pred[:, 1], color='b') \n ax.set_aspect(\"equal\")\n plt.show()\n plt.gcf().clear()\n plt.close()", "def create_scatter_plot(x,y,df,kmeans, X_scaled, scaler, name):\n \n plt.figure(figsize=(10, 6))\n sns.scatterplot(x = x, y = y, data = df, hue = name)\n centroids = pd.DataFrame(scaler.inverse_transform(kmeans.cluster_centers_), columns=X_scaled.columns)\n centroids.plot.scatter(y=y, x= x, ax=plt.gca(), alpha=.30, s=500, c='black')\n plt.legend(bbox_to_anchor=(1.2,.8))", "def scatterplot_matrix():\r\n\r\n # load data\r\n iris_dataset = load_iris()\r\n data = iris_dataset\r\n setosa = data['data'][data['target'] == 0]\r\n versicolor = data['data'][data['target'] == 1]\r\n virginica = data['data'][data['target'] == 2]\r\n\r\n # set picture frame\r\n num = 4\r\n fig, axes = plt.subplots(nrows=num, ncols=num, figsize=(18, 18))\r\n fig.subplots_adjust(hspace=0.5, wspace=0.25)\r\n\r\n # set scatter plot\r\n for i in range(0, num):\r\n for j in range(0, num):\r\n if i == j:\r\n continue\r\n axes[j, i].plot(setosa[:, j], setosa[:, i], color='navy', marker='o', linestyle='none')\r\n axes[j, i].plot(versicolor[:, j], versicolor[:, i], color='purple', marker='*', linestyle='none')\r\n axes[j, i].plot(virginica[:, j], virginica[:, i], color='pink', marker='s', linestyle='none')\r\n\r\n # set histgram on the diagram\r\n for i in range(0, num):\r\n axes[i, i].hist(setosa[:, i], color='navy')\r\n axes[i, i].hist(versicolor[:, i], color='purple')\r\n axes[i, i].hist(virginica[:, i], color='pink')\r\n\r\n axes[0, 0].set_title('Sepal length')\r\n axes[1, 1].set_title('Sepal width')\r\n axes[2, 2].set_title('Petal length')\r\n axes[3, 3].set_title('Petal width')\r\n\r\n plt.legend(('Setosa', 'Virginica', 'Versicolor')) # add legend\r\n\r\n # add Main title\r\n fig.suptitle('Iris Plots, measurements in cm', size=20)\r\n plt.show()", "def plot_class_distribution(data):\n classes = [r[0] for r in data]\n plt.hist(classes)\n plt.xlabel('Labels')\n plt.ylabel('Counts')\n plt.title('Histogram of class counts')\n plt.show()", "def Plot_predict(X,Y,model,X_path): \n labels = {0: 'CNV', 1: 'DME', 2: 'DRUSEN', 3: 'NORMAL'}\n Y_pred_classes = np.argmax(model.predict(X),axis = 1) \n Y_true = np.argmax(Y,axis = 1)\n \n fig = plt.figure(figsize=(40, 40)) \n for i in range(X.shape[0]):\n ax = fig.add_subplot(8, 4, i + 1, xticks=[], yticks=[])\n ax.set_title(\"Groundtruth : {} \\n Prediction : {}\".format(labels[Y_true[i]],labels[Y_pred_classes[i]]), \\\n color=(\"green\" if Y_true[i] == Y_pred_classes[i] else \"red\"),fontsize=20) \n img = image.load_img(X_path[i])\n ax.imshow(img)\n plt.show()\n return", "def plot_svm(N=5, std=0.60, kernel='linear'):\n X, y = make_blobs(n_samples=200, centers=2, random_state=0,\n cluster_std=std)\n\n X_train, y_train = X[:N], y[:N]\n X_test, y_test = X[N:], y[N:]\n\n clf = SVC(kernel=str(kernel))\n clf.fit(X_train, y_train)\n\n plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, s=50, cmap='spring')\n plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, s=50, cmap='spring',\n alpha=0.2)\n plt.xlim(-1, 4)\n plt.ylim(-1, 6)\n plot_svc_decision_function(clf, plt.gca())\n plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],\n s=200, facecolors='none')\n\n train_score = clf.score(X_train, y_train)\n test_score = clf.score(X_test, y_test) if len(X_test) > 0 else 'NA'\n plt.title('Train Accuracy = {0}; Test Accuracy = {1}'.format(train_score,\n test_score))", "def tsplot_clusters( X, y):\n for yit in list(set(y)):\n sns.tsplot( X[y==yit,:], color=plt.cm.rainbow(yit/max(y)))", "def plot_decision_regions(X, y, classifier, resolution=.02, test_idx=None):\n # setup marker generator & color map\n plt.figure()\n markers = ('x', 'o')\n colors = ('red', 'blue')\n\n # calculate and plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=.35, cmap=ListedColormap(colors=colors[:len(np.unique(y))]))\n plt.xlim(xx1.min(), xx2.max())\n plt.ylim(xx2.min(), xx2.max())\n\n # scatter plot all values of the data sets\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0],\n y=X[y == cl, 1],\n c=colors[idx],\n marker=markers[idx],\n label=cl,\n edgecolors='black')\n if test_idx:\n # circle test data\n X_test, y_test = X[test_idx, :], y[test_idx]\n plt.scatter(X_test[:, 0],\n X_test[:, 1],\n c='',\n edgecolors='black',\n alpha=1.0,\n linewidths=1,\n marker='o',\n s=100,\n label='test set')", "def scatterplot(loc: List[CrimeStatistics]) -> None: \n # return None #stub\n #template based on visualization\n \n x = enrollment_list(loc)\n y = crime_list(loc)\n \n \n pyplot.scatter(x,y)\n pyplot.xlabel(\"Enrollment\")\n pyplot.ylabel(\"Total crime per campus\")\n pyplot.title(\"correlation between enrollment and crimes committed\")\n \n \n \n pyplot.show()\n print(linregress(x,y))\n \n \n return None", "def _scatter_example_3(data):\n # Plot the data\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"datetime\")\n ch.plot.scatter(\n data_frame=data,\n x_column=\"date\",\n y_column=\"unit_price\",\n size_column=\"quantity\",\n color_column=\"fruit\",\n )\n ch.set_title(\"Scatterplot\")\n ch.set_subtitle(\"Optional 'color_column' argument for grouping by color.\")\n ch.show(_OUTPUT_FORMAT)", "def plot_data(x_plot, X_train, X_test, y_train, y_test, low, high):\n s = 15\n plt.plot(x_plot, ground_truth(x_plot), alpha=0.5, label='ground truth')\n plt.scatter(X_train, y_train, s=s, alpha=0.2)\n plt.scatter(X_test, y_test, s=s, alpha=0.2, color='red')\n plt.xlim((low, high))\n plt.ylabel('y')\n plt.xlabel('x')\n plt.legend(loc='upper left')\n plt.show()", "def plot_data(self, dataset, plt):\n cluster_markers = ['*', '+', 'o']\n cluster_color = ['b', 'g', 'r']\n for i in range(dataset.shape[0]):\n plt.scatter(*zip(*dataset[i]), marker=cluster_markers[i], c=cluster_color[i])\n\n return plt", "def single_feature_plt(features, targets, save_fig=''):\n alpha = 0.5\n color = ['red' if targets[i, 0] > 0.0 else 'blue' for i in range(len(targets))]\n num_dims = features.shape[1]\n\n if num_dims == 2:\n plt.scatter(features[:, 0].numpy(), features[:, 1].numpy(), c=color,\n alpha=alpha, linewidths=0)\n plt.tick_params(axis='both', which='both', bottom=False, top=False,\n labelbottom=False, right=False, left=False,\n labelleft=False)\n ax = plt.gca()\n elif num_dims == 3:\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(features[:, 0].numpy(), features[:, 1].numpy(),\n features[:, 2].numpy(), c=color, alpha=alpha,\n linewidths=0, s=80)\n ax.tick_params(axis='both', which='both', bottom=False, top=False,\n labelbottom=False, right=False, left=False,\n labelleft=False)\n\n ax.set_aspect(get_square_aspect_ratio(ax))\n\n if len(save_fig):\n fig.savefig(save_fig, format='png', dpi=200, bbox_inches='tight')\n plt.clf()\n plt.close()\n else:\n plt.show()" ]
[ "0.68430007", "0.66364133", "0.6596344", "0.6580677", "0.65084815", "0.645392", "0.6410972", "0.6298724", "0.6219223", "0.621192", "0.61827666", "0.61542743", "0.6125981", "0.60232717", "0.59843695", "0.590425", "0.59014237", "0.5894517", "0.58731264", "0.58717024", "0.58680373", "0.58463746", "0.58204675", "0.5802502", "0.58007133", "0.5799708", "0.57966316", "0.57835287", "0.5772385", "0.57693636", "0.57482725", "0.57299304", "0.5716847", "0.57091576", "0.5704499", "0.56821847", "0.5681956", "0.5681141", "0.5677385", "0.56540054", "0.564567", "0.56429595", "0.56429595", "0.5642916", "0.5640794", "0.5635225", "0.56302094", "0.5628986", "0.562556", "0.5613561", "0.5589416", "0.5582073", "0.55803126", "0.5563254", "0.5562985", "0.5560494", "0.55472416", "0.55376345", "0.5511388", "0.5506067", "0.54996526", "0.54900616", "0.54828036", "0.54816914", "0.54778475", "0.5423424", "0.5403997", "0.5399479", "0.539699", "0.53823334", "0.53769106", "0.5376302", "0.5375326", "0.5374202", "0.5371878", "0.5360661", "0.535968", "0.5354695", "0.5354", "0.5348595", "0.5346514", "0.5344474", "0.5343676", "0.5338667", "0.53332657", "0.5329242", "0.5327919", "0.5318983", "0.53118414", "0.53114706", "0.531129", "0.5310997", "0.53050363", "0.52958614", "0.52929497", "0.52879584", "0.52856106", "0.52852386", "0.52759105", "0.52714986" ]
0.7091436
0
This function reads the data from database where the clean data is stored.
def load_data(database_filepath): # load data from database engine = create_engine('sqlite:///' + database_filepath) df = pd.read_sql_table('figure-eight', engine) # There is only one classification class for child_alone which is 0 which indicates that there is no message classified into this class. del df['child_alone'] X = df.message.values Y = df[np.delete(df.columns.values, [0,1,2,3])].values category_names = np.delete(df.columns.values, [0,1,2,3]) return X,Y,category_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleandata():\n engine = create_engine('sqlite:///../data/disaster_db.db')\n df = pd.read_sql_table('disaster_db', engine)\n\n return df", "def read_db(self):\n with open(self.filename, 'r') as database:\n data = json.load(database)\n self.data = data", "def read_sql(self):\n pass", "def read_database():\n file = tables.open_file(glob.datafile)\n table_d = file.root.VelibData.dynamic\n table_s = file.root.VelibData.static\n n_rows = len(table_d)\n print \"Nrows in dynamic table:\", n_rows\n print \"N stations:\", len(table_d[0][\"last_update\"])\n print \"Time of most recent sampling:\", \\\n time.asctime(time.localtime(recover_time(table_d[-1][\"sample_time\"])))\n print \"Nbikes available at most recent sampling:\", \\\n table_d[n_rows-1][\"available_bikes\"]\n print \"Time of last_update at most recent sampling:\", \\\n time.asctime(\n time.localtime(recover_time(table_d[n_rows-1][\"last_update\"][0])))\n print \"Number arr\", table_s[0][\"number\"]\n file.close()", "def readData(self):\n dayToday = self.currentDay()\n \n loopDbInput = True\n \n while loopDbInput == True: #While there is an error\n try:\n self.c.execute(\"SELECT * FROM Enigma WHERE Datum = \" + str(dayToday)) #Select all the data in the record for the current day\n data = self.c.fetchall() #Store the selected data in this variable\n except:\n print('Error reading database. Please choose another database.') #Inform the user that there is an error connecting to the database \n self.reset() #Prompt the user to establish a new database connection\n else:\n loopDbInput = False #Otherwise continue with the program\n return data #And return the daily settings ", "def loaddata(self):\n # Connect to the db\n self.conn, self.c = self.connect_db(self.dbname)\n # create the bdefile table to \n self.c.execute(oeeutil.sql_create_bdefile_table)\n # Delete any previous records\n self.c.execute('DELETE FROM bdefile')\n # hold the content for analysis\n for item in self.content:\n self.c.execute('INSERT INTO bdefile VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)', item)\n self.c.executescript(oeeutil.sql_create_bdefile_view)\n self.conn.commit()", "def read_database(app):\n app.status.cursorToHourglass()\n app.central.closeAllSubWindows()\n app.database().scan()\n app.status.cursorToNormal() \n app.refresh()", "def read(self):\n file_path = os.path.join(self.query_path, self.filename + '.sql')\n with open(file_path, 'r') as f:\n self.raw_sql = f.read()", "def load(self):\n db = CrawlDBI.DBI(dbtype='crawler')\n if self.rowid is not None:\n rows = db.select(table='checkables',\n fields=['rowid',\n 'path',\n 'type',\n 'cos',\n 'cart',\n 'ttypes',\n 'checksum',\n 'last_check',\n 'fails',\n 'reported'],\n where=\"rowid = ?\",\n data=(self.rowid,))\n else:\n rows = db.select(table='checkables',\n fields=['rowid',\n 'path',\n 'type',\n 'cos',\n 'cart',\n 'ttypes',\n 'checksum',\n 'last_check',\n 'fails',\n 'reported'],\n where=\"path = ?\",\n data=(self.path,))\n if 0 == len(rows):\n self.in_db = False\n elif 1 == len(rows):\n self.in_db = True\n rz = list(rows[0])\n self.rowid = rz.pop(0)\n self.path = rz.pop(0)\n self.type = rz.pop(0)\n self.cos = rz.pop(0)\n self.cart = rz.pop(0)\n self.ttypes = rz.pop(0)\n self.checksum = rz.pop(0)\n self.last_check = rz.pop(0)\n try:\n self.fails = rz.pop(0)\n except IndexError:\n self.fails = 0\n try:\n self.reported = rz.pop(0)\n except IndexError:\n self.reported = 0\n self.dirty = False\n else:\n raise StandardError(\"There appears to be more than one copy \" +\n \"of %s in the database\" % self)\n\n db.close()", "def read_db():\n with open(\"config.json\") as f:\n config = json.load(f)\n \n conn = psycopg2.connect(dbname='cage_sc_db', user='cage_db_user', \n password='legend', host='10.66.193.71')\n cursor = conn.cursor()\n\n # cmd = \"SELECT value_raw, timestamp FROM numeric_data WHERE endpoint_name='krstc_baseline' AND timestamp>'2019-09-27T00:00';\"\n \n # cmd = \"SELECT * FROM endpoint_id_map;\"\n \n # cmd = \"SELECT value_cal, timestamp FROM numeric_data WHERE endpoint_name='cage_coldPlate_temp' AND timestamp>'2019-09-03T00:02';\"\n \n # cmd = \"SELECT value_cal, timestamp FROM numeric_data WHERE endpoint_name='cage_pressure' AND timestamp>'2019-09-27T00:00';\"\n \n cmd = \"SELECT value_cal, timestamp FROM numeric_data WHERE endpoint_name='cage_ln_level' AND timestamp>'2019-09-27T00:00';\"\n \n # cmd = \"SELECT value_raw, timestamp FROM string_data WHERE endpoint_name='krstc_hv_status' AND timestamp>'2019-08-01';\"\n \n cursor.execute(cmd)\n\n # retrieve data. returns a list of tuples.\n record = cursor.fetchall()\n \n # print(type(record[0]))\n \n # dt = record[0][1]\n \n # print(dt)\n \n for rec in record:\n print(rec)", "def load_data(database_filepath):\n \n engine = create_engine('sqlite:///data/DisasterResponse.db')\n df = pd.read_sql_query('select * from cleanDF', engine)\n X = df['message'].values\n Y = df.iloc[:,5:]\n category_names = Y.columns\n\n return X, Y, category_names", "def retrieve_from_db(self):\n pass", "def readDB():\n if not os.path.exists(filenameDB):\n return { }\n \n with open(filenameDB, \"r\") as csvfile:\n rows = csv.reader(csvfile)\n if rows:\n db = { }\n for r in rows:\n if len(r)==2 and isinstance(r[0],str) and isinstance(r[1],str):\n db[r[1]] = r[0]\n return db\n return { }", "def read_from_db(cursor):\n\tcursor.execute('''SELECT * FROM (\n\t\tSELECT * FROM Kombucha_data ORDER BY Time DESC LIMIT 20)\n\t\tORDER BY Time ASC;''')\n\n\trows = cursor.fetchall()\n\treturn rows", "def read_from_db(cursor):\n\tcursor.execute('''SELECT * FROM (\n\t\tSELECT * FROM Kombucha_data ORDER BY Time DESC LIMIT 20)\n\t\tORDER BY Time ASC;''')\n\n\trows = cursor.fetchall()\n\treturn rows", "def _get_db_data(self) -> None:\n if self._db_data:\n return\n with db(cursor=True) as cur:\n cur.execute('SELECT count, gender, age_start FROM age_groups WHERE district = ?', (self.district,))\n self._db_data = cur.fetchall()\n self._db_data = sorted([row for row in self._db_data if row['gender'] == self.gender],\n key=lambda x: (x['age_start'] is None, x['age_start']))", "def _read_data(self):", "def load_cleaned_data(self):\n try:\n self.train = pd.read_pickle('../input/train_clean.pkl')\n self.test = pd.read_pickle('../input/test_clean.pkl')\n except FileNotFoundError:\n self.load_raw_data()", "def read_database(self):\n # open the database\n f = open('KISS_LINES','r')\n # make a list which will contain lines\n tlc = []\n for row in f:\n tlc.append(f.readline())\n f.close()\n\n return tlc", "def readdatabase2(self):\n fname=\"/home/alice/rl/v/vme/ADCI/DB/INPUTS.txt\"\n try:\n database=open(fname,\"r\") \n except IOError:\n print \"Cannot open \",fname\n return None\n else:\n print \"File \",fname,\" open successfuly.\"\n #print \"database= \",database\n lines=database.readlines()\n database.close() \n #print lines,len(lines) \n dbinputs=[]\n for i in lines:\n if(i[0] != '#'):\n items=string.split(i)\n #print 'items= ',items,len(items)\n if(len(items)<6):\n print \"Error parsing database, not enough items in line:\"\n print items\n return None\n db={}\n db['number']=items[0]\n db['numberDIM']=items[1]\n db['level']=items[2]\n db['name']=items[3]\n db['detector']=items[4]\n db['signature']=items[5]\n dbinputs.append(db)\n return dbinputs", "def read_database(db_path, db_file, *args):\n\n db_filepath = os.path.join(db_path, db_file)\n\n # list to store loaded data\n data_imported = []\n conn = sqlite3.connect(db_filepath)\n\n for data_name in args:\n\n\n info = f'Reading {data_name} from database................'\n print(info, end=\"\")\n data_name_in_db = conn.execute(\n f\"\"\"SELECT name FROM sqlite_master WHERE type='table' \n AND name='{data_name}'; \"\"\").fetchall()\n if data_name_in_db:\n df = pd.read_sql(f\"select * from {data_name}\", con=conn)\n substitute_names(df)\n # revert single column DataFrame to Series\n if 'index' in df.columns:\n df.set_index('index', inplace=True)\n df = df.squeeze('columns')\n data_imported.append(df)\n print('ok')\n else:\n data_imported.append(None)\n print('no data')\n conn.close()\n return data_imported #if len(data_imported)>1 else data_imported[0]", "def clean_up_data(self):\n pass", "def load_data(self):", "def read_data(db_name, query_file):\r\n con = sqlite3.connect(db_name)\r\n cursor = con.cursor()\r\n\r\n sql = open(query_file,'r')\r\n query = sql.read()\r\n sql.close()\r\n\r\n data = pd.read_sql_query(query, con=con)\r\n data.drop_duplicates(subset=['Title'], inplace=True)\r\n data = data[data['Type']=='movie']\r\n data.set_index('imdbID', inplace=True)\r\n\r\n con.commit()\r\n con.close()\r\n\r\n return data", "def process_database(self):\n self.DBDict = self.data_df.to_dict(orient=\"index\")\n\n # calculate weight ratio\n self.DBDict = {k: self.calc_compound_weight_ratio(\n self.DBDict[k]) for k in self.DBDict}\n\n # export as dataframe\n self.converted_df = pd.DataFrame(self.DBDict).T\n\n unnest_list = [\"SMILES_wt_list\", \"structureList\",\n \"wt_ratio\", \"fp_list\", \"MWList\"]\n self.converted_df = unnest_dataframe(\n self.converted_df, unnest_list, axis=0)\n\n # unnest FP\n unNest_FP_list = list(self.converted_df.columns[[True if re.match(\n \"fp_list\", i) else False for i in self.converted_df.columns]])\n rename_dict = {k: k+\"_\" for k in unNest_FP_list}\n self.converted_df = self.converted_df.rename(columns=rename_dict)\n\n self.converted_df = unnest_dataframe(\n self.converted_df, rename_dict.values(), axis=0)", "def load_data(database_filepath):\n\n engine = create_engine('sqlite:///{}'.format(database_filepath))\n df = pd.read_sql_table('messages_cleaned', engine)\n X = df.message \n Y = df.drop(columns=['message', 'original', 'genre'])\n category_names = list(Y.columns.values)\n return X, Y, category_names", "def load_data(self) -> None:", "def clear_data():\n conn = get_connect()\n #conn.execute(\"DELETE from match\")\n #conn.execute(\"DELETE from account\")\n #conn.execute(\"DELETE from championMatchData\")\n conn.execute(\"DELETE from championData\")\n conn.commit()\n conn.close()\n print(\"all data in info.db has been cleared\")\n return", "def fill_db(self, data):\n check_input_params(data, self.DB)\n self.db = data[self.DB]", "def read_stock_codes_from_db():\n\n print('connecting to database...')\n Stocks = get_db()['Stocks']\n print('reading...')\n\n stocks = Stocks.find()\n return stocks", "def get_data_from_database(self, database_type):\n if database_type == \"render\":\n try:\n connection = sqlite3.connect(self.filepath_render_database)\n pointer=connection.cursor()\n\n pointer.execute(\"select * from render_information\")\n\n conntent= pointer.fetchall()\n connection.commit()\n print(conntent)\n return conntent\n except:\n print(\"was not able to read data\")\n return False\n if database_type == \"object\":\n try:\n connection = sqlite3.connect(self.filepath_object_database)\n pointer=connection.cursor()\n\n pointer.execute(\"select * from object_information\")\n\n conntent= pointer.fetchall()\n connection.commit()\n print(conntent)\n return conntent\n except:\n print(\"was not able to read data from object database\")\n return False \n pass\n\n if database_type == \"output\":\n try:\n connection = sqlite3.connect(self.filepath_output_database)\n pointer=connection.cursor()\n\n pointer.execute(\"select * from output_information\")\n\n conntent= pointer.fetchall()\n connection.commit()\n print(conntent)\n return conntent\n except:\n print(\"was not able to read data from output database\")\n return False \n pass", "def update_database():\n\n # We obtain the data from the official database\n df = getData.extractData()\n\n # We save the dataframe for later use in the API\n auxiliary.saveToCsv(df, 'app/resources')", "def load_data(database_filepath):\n engine = create_engine('sqlite:///'+database_filepath)\n df = pd.read_sql_table('clean_df', con=engine)\n X=df['message']\n Y = df.iloc[:, 4:]\n category_names = list(Y.columns)\n return X,Y,category_names", "def load_DB(self):\n\t\tprint 'Loadind Data Base...'\n\t\tstream = open(self.DB_file)\n\t\tself.DB = cPickle.load(stream)\n\t\tstream.close()\n\t\tprint \"Number of documents in the Data Base: \", self.DB.nb_doc_total\n\t\tprint 'Loading completed'\n\t\treturn", "def cleanData(self):\n clean_data = []\n for file in self.raw_data_file:\n data = RawData(file)\n data.clean()\n data.prepare()\n clean_data.append(data)\n return clean_data", "def populate_db():\n stdout.write('Emptying the tables...\\n')\n empty_tables()\n stdout.write('Populating Language records...\\n')\n populate_language()\n stdout.write('Populating Lemma, Wordform, and Definition records...\\n')\n populate_lexical()\n stdout.write('Populating ProperName records...\\n')\n populate_proper_names()", "def data_cleaning():\n conn = get_connect()\n conn.execute(\"DELETE FROM championMatchData WHERE kills < 2 AND deaths < 2 AND assists < 2\")\n conn.commit()\n conn.close()\n return", "def load_file_data_from_db(self):\n\n file_objs = self.file_queryset.filter(sip=self.sip, removedtime__isnull=True)\n for file_obj in self._batch_query(file_objs):\n self.file_events = get_file_events(file_obj)\n if not self.file_events:\n return\n try:\n # merge the map_file_data dict with the map_av_data\n mapped_file_info = merge_file_data_dicts(\n map_file_data(file_obj, self.file_events), map_av_data(file_obj)\n )\n self.md_info[\"files\"].append(mapped_file_info)\n self.md_info[\"premis:size\"] = create_package_size(\n mapped_file_info[\"premis:size\"]\n )\n self.md_info[\"amount_of_files\"] += 1\n failed_virus_checks = get_failed_virus_checks(self.file_events)\n if failed_virus_checks:\n self.md_info[\"virus_scan_info\"][\"failed_virus_checks\"].append(\n failed_virus_checks\n )\n passed_virus_checks = get_passed_virus_checks(self.file_events)\n # add info virus_scan_tools if they passed and respect\n # different tools and versions if needed.\n if (\n passed_virus_checks\n and passed_virus_checks\n not in self.md_info[\"virus_scan_info\"][\"virus_scan_tools\"]\n ):\n self.md_info[\"virus_scan_info\"][\"virus_scan_tools\"].append(\n passed_virus_checks\n )\n except KeyError:\n logger.info(\n \"File is no longer present on the filesystem: %s\",\n file_obj.currentlocation,\n )\n continue", "def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()", "def load_and_clean_data():\n\n def file2pd(path):\n\n # load a data file, remove comments, convert to list\n f = open(path, 'r').read().replace('# rsid', 'rsid').split('\\n')\n f = [x for x in f if len(x) and x[0] != '#']\n\n # get column names and values\n cols = f[0].split('\\t')\n f = [x.split('\\t') for x in f[1:]]\n\n # convert to DataFrame, convert position column to int\n df = pd.DataFrame(f, columns=cols)\n df['position'] = df['position'].astype(np.int64)\n\n return df\n\n return [file2pd(PATH_ONE), file2pd(PATH_TWO)]", "def initialize_database():\n db = Database(database_name)\n i, m, u, p = db.fetch_needed_data()\n\n return i, m, u, p", "def read_from_db():\n\t# prepare the query for reading from DB\n\tquery = \"SELECT * FROM tasks\"\n\n\t# connection to database\n\tconnection = pymysql.connect(user=\"root\", password=\"sysadmin\", host=\"localhost\", database=\"todolist\")\n\n\t# get a cursor\n\tcursor = connection.cursor()\n\n\t# execute query\n\tcursor.execute(query)\n\n\t# fetch result from query\n\tresults = cursor.fetchall()\n\n\t# close cursor and connection\n\tcursor.close()\n\tconnection.close()\n\n\ttask_list = list()\n\tfor result in results:\n\t\ttmp = {'id': result[0], 'description': result[1], 'urgent': result[2]}\n\t\ttask_list.append(tmp)\n\n\treturn task_list", "def load_conditions():\n\n print (\"conditions\")\n\n Condition.query.delete()\n\n with open(\"seed_data/disease_seed.psv\") as diseases:\n for row in diseases:\n condition = row.strip()\n\n condition = Condition(condition=condition)\n\n db.session.add(condition)\n\n db.session.commit()", "def recover(self):\n if self.get_info_from_db():\n logger.info(\"Recover by reading previous results\")\n self.check_items(self.get_user_results_from_db())\n else:\n self.create_info_in_db() # create record in axdb", "def update_db(db_name=_db_indicators, start=1950, end=dt.datetime.now().year, write_db=True):\n def read_indicators(pdfI=None, coutries=[], ctry_chunksize=50, write_db=True):\n print('UPDATE IMF: Start reading {0} indicators'.format(pdfI.shape[0]))\n #dct_not_data=dict()\n lst_ret=[]\n for k, v in pdfI.iterrows():\n\n lst_pdf=list()\n lst_not_country=list()\n tbl_name=k #'{0}_{1}'.format(k, freq)\n print('UPDATE IMF ({2}-{3}): reading {0}, tDS={1}\\t'.format(k, v['Dataset'], start, end), end='... ')\n for cs in cmm.iterate_group(coutries, ctry_chunksize):\n\n try:\n pdf = pds.read_imf(strDataSetID=v['Dataset'], indiID=k, countryCode=cs,\n frequency=v['Freq'], startDate=start, endDate=end)\n\n lst_pdf.append(pdf)\n lst_not_country+=pdf.not_country\n #print(pdf.name, pdf.shape, len(pdf.not_country))\n except ValueError as e:\n lst_not_country += cs\n\n #print(e, k, 0, 50)\n try:\n pdfC=pds.DataFrameDATA(pd.concat([ppdf for ppdf in lst_pdf if not ppdf.empty]))\n pdfC.name=tbl_name\n #dct_not_data.update({'IND_NOT':tbl_name, 'NOT_DATA':lst_not_country})\n print('read {name},\\tlen {len_df},\\tnot data countries - {nc}'.format(name=pdfC.name,\n len_df=pdfC.shape[0],\n nc=len(lst_not_country)), end='... ')\n if write_db:\n print('write to DB...', end='')\n\n lstWrite=[c for c in pdfC.columns.tolist() if c !='mult']\n\n pdfC[lstWrite].to_sql(pdfC.name, coni, if_exists='upsert')\n cmm.write_status(db_name, k, pdfC.shape[0], mult=pdfC['mult'].unique()[0])\n\n print('done', end='\\n')\n pdfC['INDI']=k\n lst_ret.append(pdfC)\n #print(dct_not_data)\n except ValueError as e:\n print(e, 'not data for ', k, v['Dataset'], len(cs))\n\n return pd.concat(lst_ret)\n\n coni = sa.create_engine('sqlite+pysqlite:///{db_name}'.format(db_name=db_name))\n # pdfIndi=pd.read_sql('select * from INDICATORS where LastUpdateDateA is NULL', coni, index_col='Code')\n pdfIndi = pd.read_sql('select * from {INDI_NAME}'.format(INDI_NAME=cmm.strINDI_db_name), coni, index_col='Code')#.iloc[:40]\n pdfCountry = pd.read_sql('select * from {COUNTRY_NAME}'.format(COUNTRY_NAME=cmm.strCOUNTRY_db_name), coni, index_col='id')\n country_list = pdfCountry.index.tolist()\n print('UPDATE IMF: reading {0} countries'.format(len(country_list)))\n\n pdfQ=read_indicators(pdfI=pdfIndi.sort_index(), coutries=country_list, write_db=write_db)\n print('=' * 50)\n\n print('UPDATE IMF: all done')\n return pdfQ", "def load_data(db_handler):\n\n from random import seed\n from random import random\n \n seed(1)\n\n new_notes = []\n\n for i in range(1,10):\n\n new_notes.append({\n\n\n 'title': str(i) + str(random()),\n 'content': 'Lorem ipsum' + str(i),\n 'active': True,\n 'created_by':\"Cristhian\" + str(i),\n 'created_at': date.today(),\n 'edited_at':date.today(),\n \n })\n\n new_notes.append(\n {\n \"active\": False,\n \"content\": \"Jesenia\",\n \"edited_at\": \"2019-10-24\",\n \"title\": \"Jesenia La chica de al lado\",\n \"created_by\": \"Cristhian1\",\n \"created_at\": \"2019-10-24\"\n })\n\n new_notes.append(\n {\n \"active\": False,\n \"title\": \"La vida de los numeros\",\n \"content\": \"Lorem ipsum y los numeros de la muerte\",\n \"edited_at\": \"2019-10-25\",\n \"created_by\": \"Jesenia\",\n \"created_at\": \"2019-10-24\"\n })\n\n Note.insert_many(new_notes).execute()\n\n User(name=\"Cristhian\", email=\"[email protected]\",\n password=b'$2b$12$U/QjtHt/j0xRT4r8Hx3fOe93EssM6M0iiUaQJOrTd64RXbxvhw6Ii').save()", "def readAll():\n readTemperature()\n readAirHumidity()\n readSoilHumidity()\n print(\"Success! Temperature and humidity inserted into database.\\n\")\n # DEBUG: Uncomment here for debbuging\n # print(\"Temperatura: \" + read_temperature)\n # print(\"Umidade: \" + read_humidity)", "def load_data(self):\n df= self.read_file()\n for row,col in df.iterrows():\n Employeeid = int(col['Empolyeeid'])\n Employee_Name = col['Employee_Name']\n Age = col['Age']\n Salary = col['Salary']\n self.table.put_item(\n Item={\n \"Employeeid\":Employeeid,\n \"Employee_Name\": Employee_Name,\n \"Age\": Age,\n \"Salary\": Salary\n }\n )\n return True", "def clean_data():\n redis_db.flushdb()", "def db_values(self, db):", "def loadDB(self,dbfilename):\n \n db=[]\n with open(dbfilename,'r',encoding='ISO-8859-1') as dbfilename:\n dbreader= csv.reader(dbfilename,delimiter=self.sDelimiter )\n for lFields in dbreader:\n db.append(lFields)\n\n return db", "def data(self, refresh=False):\n self.logging.debug( \"data(%s)\" % (self.db) )\n\n if not self.db: self.validate()\n\n if refresh: self.update()\n\n return (self._clean_cache(self.cache), self._clean_cache(self.cache_error))", "def read(self, database ='project'):\n\t\tfile = open(self.file_name, \"r\")\n\n\t\ti = 1\n\t\tseptics = []\n\t\tfor line in file:\n\t\t\tif i > 2:\n\t\t\t\tval = line.split()\n\t\t\t\tself.check_cols(val, 13, 'septic')\n\n\t\t\t\tsep = {\n\t\t\t\t\t'name': val[0].lower(),\n\t\t\t\t\t'q_rate': val[1],\n\t\t\t\t\t'bod': val[2],\n\t\t\t\t\t'tss': val[3],\n\t\t\t\t\t'nh4_n': val[4],\n\t\t\t\t\t'no3_n': val[5],\n\t\t\t\t\t'no2_n': val[6],\n\t\t\t\t\t'org_n': val[7],\n\t\t\t\t\t'min_p': val[8],\n\t\t\t\t\t'org_p': val[9],\n\t\t\t\t\t'fcoli': val[10],\n\t\t\t\t\t'description': val[12] if val[12] != 'null' else None # 12 index because extra column\n\t\t\t\t}\n\t\t\t\tseptics.append(sep)\n\t\t\ti += 1\n\n\t\tif database == 'project':\n\t\t\tdb_lib.bulk_insert(project_base.db, project_parmdb.Septic_sep, septics)\n\t\telse:\n\t\t\tdb_lib.bulk_insert(datasets_base.db, datasets_parmdb.Septic_sep, septics)", "def read(self):\n if not self._objectid:\n raise Record.RecordIncomplete()\n\n if not self._table :\n #prepare meta-data if not available\n if not self.setupRecord():\n raise Record.RecordIncomplete()\n try:\n extra = map (lambda x: '{1} {0}'.format(x), self._extra_sql_columns.items() )\n row = CFG.CX.getrow ( CFG.DB.SCHEMA + \".\" + self._table.name, \n self._objectid, extra)\n except pg.DatabaseError, e: \n raise Record.RecordNotFound(self._objectid, e)\n \n self.feedDataRow(row)", "def distributed_clean_db(empty_db):\n team.load_file(GOOD_TEST_TEAM_FILE, False)\n game.load_file(GOOD_TEST_GAME_FILE)\n game.load_file(join(TEST_DATA_DIR, \"distribution2.csv\"))\n game.load_file(join(TEST_DATA_DIR, \"distribution3.csv\"))\n service.set_player_codes()\n team.set_matches()\n return empty_db", "def get_data(db_dir, command, args = None):\n with lite.connect((db_dir)) as conn:\n try:\n cursor = conn.cursor()\n if args:\n cursor.execute(command,args)\n else:\n cursor.execute(command)\n data = cursor.fetchall()\n #print '[sql management] got all of the data requested according to:\\n--- %s ---\\n the data: %s'%(command, data)\n return data\n except:\n return None", "def load_data():\n\tscores = pd.read_csv('../data/user_assessment_scores.csv')\n\tviews = pd.read_csv('../data/user_course_views.csv')\n\ttags = pd.read_csv('../data/course_tags.csv')\n\tinterests = pd.read_csv('../data/user_interests.csv')\n\n\tdb_file = '../db/usersim.sqlite'\n\ttry:\n\t\tengine = sqlite3.connect(db_file, timeout=10)\n\t\tscores.to_sql('scores', engine, if_exists='replace', index=False, index_label='user_handle')\n\t\tviews.to_sql('views', engine, if_exists='replace', index=False, index_label='user_handle')\n\t\ttags.to_sql('tags', engine, if_exists='replace', index=False, index_label='course_id')\n\t\tinterests.to_sql('interests', engine, if_exists='replace', index=False, index_label='user_handle')\n\texcept:\n\t\tprint('Error occured while inserting into database')\n\tfinally:\n\t\tif engine:\n\t\t\tengine.close()\n\treturn scores, views, tags, interests", "def read_data():\r\n\r\n\tdata = open(\"database.txt\", \"r\", encoding=\"UTF-8\") # opening the file\r\n\tfor line in data: # iterating through the database.txt file\r\n\t\tif line[0] == \"#\": # this is comment, so skip it\r\n\t\t\tcontinue\r\n\t\telse:\r\n\t\t\tvalues = line.split(\",\") # split line into values\r\n\r\n\t\t\tfor i in range(len(values)):\r\n\t\t\t\tvalues[i].strip() # removing spaces from values\r\n\r\n\t\t\t# values are separated by comma\r\n\t\t\t# line no. description\r\n\t\t\t# 0: type of payment (directDebit, standingOrder, cardPayment, income)\r\n\t\t\t# this will be used to create a respective Payment class object\r\n\t\t\t# 1: is the payment active\r\n\t\t\t# 2: description of the payment\r\n\t\t\t# 3: amount of the payment\r\n\t\t\t# 4: day of the payment\r\n\t\t\t# 5: how many payments is intended (0=indefinite, 1=one-off, x=number of payments)\r\n\t\t\t# 6: frequency: how often is the payment processed (1=weekly, 2=monthly, 4=four-weekly)\r\n\t\t\t# 7: count of how many payments has gone since the first payment\r\n\t\t\t# 8: lastPaid: the date of the last payment\r\n\r\n\t\t\t# setting the payment date\r\n\t\t\tpayment_day = int(values[4]) # payment day taken from database file\r\n\t\t\tfrequency = int(values[7]) # how often is it paid taken from database file\r\n\r\n\t\t\tif payment_day >= PAY_DAY:\r\n\t\t\t\tpayment_month = start_date.month\r\n\t\t\telse:\r\n\t\t\t\tpayment_month = end_date.month\r\n\r\n\t\t\tpayment_date = date(start_date.year, payment_month, payment_day)\r\n\r\n\t\t\tinsert_payment(payment_day, payment_date, values)\r\n\r\n\t\t\t# calendar_dict[calendar_key] = payment\r\n\t\t\tprint(calendar_dict)\r\n\r\n\t\t\t# getting the next payment date\r\n\t\t\tnext_payment = calculate_next_payment(frequency, payment_date, payment_month)\r\n\t\t\t# print(\"Calculated next payment - 1: \" + str(next_payment))\r\n\r\n\t\t\t# checking the next payment date\r\n\t\t\twhile start_date <= next_payment <= end_date:\r\n\t\t\t\tinsert_payment(payment_day, next_payment, values)\r\n\r\n\t\t\t\t# calendar_dict[calendar_key] = payment\r\n\t\t\t\tprint(calendar_dict)\r\n\r\n\t\t\t\tnext_payment = calculate_next_payment(frequency, next_payment, payment_month)\r\n\t\t\t\t# print(\"Calculated next payment - 2: \" + str(next_payment))\r\n\r\n\tdata.close()", "def clean_database(self):\n for name in list(self.database):\n self._remove_database_entry(name)", "def clean_data(self):\n return self.instance.data", "def load_data_sql(): \r\n conn = mysql.connect(**st.secrets[\"mysql\"])\r\n\r\n data = pd.read_sql('SELECT * FROM song_data', conn)\r\n lookup_table = pd.read_sql('SELECT * FROM lookup_table', conn)\r\n \r\n return data, lookup_table", "def main():\r\n if len(sys.argv)==4:\r\n\r\n # files path\r\n m_file_path,c_file_path,database = sys.argv[1:]\r\n\r\n # first, read the data\r\n print('Reading the data...')\r\n df = read_data(m_file_path,c_file_path)\r\n print('OK!')\r\n print(' ')\r\n \r\n # clean it\r\n print('Cleaning the data...')\r\n df = clean_data(df)\r\n print('OK!')\r\n print(' ')\r\n \r\n # save it\r\n print('Saving data...')\r\n save_data(df,database)\r\n print(' ')\r\n \r\n # when it's done\r\n print(f'Cleaned data is stored in {database[:-3]} database') \r\n\r\n else:\r\n print('Please provide the filepaths of the messages and categories '\\\r\n 'datasets as the first and second argument respectively, as '\\\r\n 'well as the filepath of the database to save the cleaned data '\\\r\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\r\n 'disaster_messages.csv disaster_categories.csv '\\\r\n 'DisasterResponse.db')", "def prepare_data(self):", "def load_data(database_filepath):\n # load data from database\n engine = sqlalchemy.create_engine('sqlite:///' + database_filepath)\n df = pd.read_sql_table(\"disaster_clean_data\", con=engine)\n X = df['message']\n Y = df.drop(columns=['id', 'message', 'original', 'genre'])\n category_names = list(df.columns[4:])\n return X, Y, category_names", "def read_and_set(self):\n self.df = self.run_find(self.qry, self.hide_fields)\n return", "def check_database(self):\r\n\r\n sql_command = \"\"\"\r\n SELECT *\r\n FROM UserRecommendations\r\n \"\"\"\r\n self.controller.execute(sql_command)\r\n\r\n for col in self.controller.fetchall():\r\n print(col)", "def read_file(self):\n colspecs = [[0, 7]] # for the id\n names = ['id']\n for question in self.question_list:\n colspecs.extend(question.get_column_range())\n names.extend(question.get_column_names())\n\n self.data = pd.read_fwf(self.file, colspecs=colspecs, encoding=self.encoding, names=names, header=None)\n self.data.fillna(0, inplace=True)\n self.data = self.data.astype(int)\n return self.data", "def load_and_fix(self):\n # Read in json\n self.read_json()\n\n if self.size_to_load:\n self.data = self.data[:self.size_to_load]\n\n # Add names from database given _bsn:\n self.extend_dataframe_with_personnames()\n\n # Clean rows in the data_frame where the names column is empty - > thus no response from the database\n self.clean_none_response()\n\n # Fix path from A09.pdf to A09.json\n self.fix_path()\n\n # Get the correct names from the database response\n self.parse_names_from_response()\n\n print(\" --- Final Shape Data ---\")\n print(self.data.shape)\n print(list(self.data))\n\n # Save pickled object in ./data map\n self.save_obj(self.data, self.file_name_to_save)", "def _clean_data(self):\n if not path.exists('auto-mpg.data.txt'):\n logger.info('Could not find auto-mpg.data.txt in the current working directory')\n sys.exit()\n else:\n try:\n with open('auto-mpg.data.txt', 'r') as dirty_data:\n with open('auto-mpg.clean.txt', 'w') as clean_data:\n ## counter for row writes\n counter = 0\n for row in csv.reader(dirty_data):\n clean_data.write(row[0].expandtabs(1) + '\\n')\n counter +=1\n except Exception as e:\n logger.info('File error occurred: {e}. Exiting')\n sys.exit()", "def read_donor_db(self, filename=\"donor_db.txt\"):\n try:\n with open(filename, 'r') as fn:\n lines = fn.readlines()\n for line in lines:\n pieces = line.rstrip().split(\",\")\n name = pieces[0]\n if len(pieces) > 1:\n donations = pieces[1:]\n else:\n donations = 0\n self.add_donor(name=name, donation=donations)\n except IOError:\n print(\"Could not save donor database\")", "def load_database(self, main_class):\n main_class.database.delete_all(\"render\")\n main_class.database.delete_all(\"object\")\n #main_class.database.delete_all(\"output\")\n render_csv = os.path.join(self.filepath, \"Render_data.csv\")\n object_csv = os.path.join(self.filepath, \"Obj_data.csv\")\n main_class.database.import_excel(render_csv, \"render\")\n main_class.database.import_excel(object_csv, \"object\")\n\n render_dic=main_class.database.get_database_dict(\"render\")\n\n main_class.render_database = main_class.database.get_data_from_database(\"render\")\n main_class.object_database = main_class.database.get_data_from_database(\"object\")\n\n main_class.background_picture_list = main_class.database.get_background_pictures_names()\n main_class.packaging_picture_list = main_class.database.get_bubble_wrap_pictures_names()\n\n main_class.camera_settings.append([0, 0, 0, 0, 100])\n for obj in main_class.render_database:\n \"\"\"\n extracting Camerasetting from Database and set all important angles and distances\n \"\"\"\n if obj[render_dic[\"object_type\"]] == \"camera\":\n for i in range(0, int(obj[render_dic[\"polar_angle_segments\"]])):\n for j in range(0, int(obj[render_dic[\"azimuth_angle_segments\"]])):\n pol_min = obj[render_dic[\"polar_angle_min\"]]\n pol_max = obj[render_dic[\"polar_anglel_max\"]]\n pol_segments= obj[render_dic[\"polar_angle_segments\"]]\n pol_random=obj[render_dic[\"polar_angle_random_rad\"]]\n try:\n pol_min = float( pol_min.replace(',','.'))\n except:\n pass\n try:\n pol_max = float( pol_max.replace(',','.'))\n except:\n pass\n try:\n pol_segments = float( pol_segments.replace(',','.'))\n except:\n pass\n try:\n pol_random = float( pol_random.replace(',','.'))\n except:\n pass\n polar_angle = (pol_min + ((pol_max - pol_min)/(pol_segments))*i)\n\n azi_min = obj[render_dic[\"azimuth_angle_min\"]]\n azi_max = obj[render_dic[\"azimuth_angle_max\"]]\n azi_segments= obj[render_dic[\"azimuth_angle_segments\"]]\n azi_random= obj[render_dic[\"azimuth_angle_random_rad\"]]\n\n try:\n azi_min = float( azi_min.replace(',','.'))\n except:\n pass\n try:\n azi_max = float( azi_max.replace(',','.'))\n except:\n pass\n try:\n azi_segments = float( azi_segments.replace(',','.'))\n except:\n pass\n try:\n azi_random = float( azi_random.replace(',','.'))\n except:\n pass\n azimuth_angle = (azi_min + ((azi_max - azi_min)/(azi_segments))*j)\n\n position=[polar_angle, pol_random, azimuth_angle, azi_random, obj[render_dic[\"radius\"]] ]\n print(\"camera position added: \",position)\n main_class.camera_settings.append(position)\n \n if obj[render_dic[\"object_type\"]]==\"light\":\n\n if obj[render_dic[\"name\"]]==\"SUN\":\n radius= obj[render_dic[\"radius\"]]\n try:\n radius = float( radius.replace(',','.'))\n except:\n pass\n light_obj=[ obj[render_dic[\"name\"]] , [0,0, radius ] ]\n main_class.light_settings.append(light_obj)\n print(\"sun added to list\")\n\n if obj[render_dic[\"name\"]]==\"SPOT\":\n for i in range(0, int(obj[render_dic[\"polar_angle_segments\"]])):\n for j in range(0, int(obj[render_dic[\"azimuth_angle_segments\"]])):\n pol_min = obj[render_dic[\"polar_angle_min\"]]\n pol_max = obj[render_dic[\"polar_anglel_max\"]]\n pol_segments= obj[render_dic[\"polar_angle_segments\"]]\n pol_random=obj[render_dic[\"polar_angle_random_rad\"]]\n try:\n pol_min = float( pol_min.replace(',','.'))\n except:\n pass\n try:\n pol_max = float( pol_max.replace(',','.'))\n except:\n pass\n try:\n pol_segments = float( pol_segments.replace(',','.'))\n except:\n pass\n try:\n pol_random = float( pol_random.replace(',','.'))\n except:\n pass\n polar_angle = (pol_min + ((pol_max - pol_min)/(pol_segments))*i)\n\n azi_min = obj[render_dic[\"azimuth_angle_min\"]]\n azi_max = obj[render_dic[\"azimuth_angle_max\"]]\n azi_segments= obj[render_dic[\"azimuth_angle_segments\"]]\n azi_random= obj[render_dic[\"azimuth_angle_random_rad\"]]\n try:\n azi_min = float( azi_min.replace(',','.'))\n except:\n pass\n try:\n azi_max = float( azi_max.replace(',','.'))\n except:\n pass\n try:\n azi_segments = float( azi_segments.replace(',','.'))\n except:\n pass\n try:\n azi_random = float( azi_random.replace(',','.'))\n except:\n pass\n azimuth_angle = (azi_min + ((azi_max - azi_min)/(azi_segments))*j)\n position=[polar_angle, pol_random, azimuth_angle, azi_random, obj[render_dic[\"radius\"]] ]\n light_obj=[ obj[render_dic[\"name\"]] , position, obj[render_dic[\"tracking_obj\"]],1000 ]\n print(\"added light_obj: \", light_obj)\n main_class.light_settings.append(light_obj)\n main_class.max_loop_count=len(main_class.camera_settings)*len(main_class.light_settings)\n print(\"loop count is:\", main_class.max_loop_count)\n return", "def load_data(database_filepath):\n \n engine = create_engine('sqlite:///' + database_filepath)\n df = pd.read_sql_table('df',engine)\n X = df['message']\n y = df.drop(columns=['id','message','original','genre'], axis=1)\n category_names = y.columns\n return X, y, category_names", "def read_stock(db, openfile):\n pass", "def _database(self):\n ...", "def read():\n with open(DBNAME) as f:\n foo = pickle.loads(f.read())\n print foo", "def build_input_db():\n build_input_database()", "def read_db():\n\n # Look for database in the same folder as this script\n script_dir = os.path.dirname(os.path.realpath(__file__))\n db_filepath = os.path.join(script_dir, 'cn_loads_database.dat')\n\n db = None\n if os.path.isfile(db_filepath):\n with open(db_filepath, 'r') as f:\n db = yaml.load(f.read())\n if db == None:\n db = dict()\n else:\n db = dict()\n\n return db", "def clean(self) -> Dict:\n form_data = super().clean()\n\n # # Process Excel file using pandas read_excel\n try:\n self.data_frame = services.load_df_from_excelfile(\n self.files['data_file'],\n form_data['sheet'])\n except Exception as exc:\n self.add_error(\n None,\n _('File could not be processed: {0}').format(str(exc)))\n return form_data\n\n # Check the validity of the data frame\n self.validate_data_frame()\n\n return form_data", "def load_data(database_filepath):\n engine = create_engine('sqlite:///' + database_filepath)\n df = pd.read_sql_table('Disasters', engine)\n X = df['message']\n Y = df.drop(['id', 'message', 'original', 'genre'], axis=1)\n category_names = Y.columns\n return X, Y, category_names", "def read_meta(self):\n meta = cPickle.load(open('../sugar_analysis_data/META-CABALLO2.pkl'))\n self.meta_sn_name_list = []\n self.meta_zcmb = []\n self.meta_x0 =[]\n self.meta_x0_err = []\n self.meta_x1 =[]\n self.meta_x1_err = []\n self.meta_c = []\n self.meta_c_err = []\n self.meta_mb = []\n self.meta_mb_err = []\n self.meta_cov_x0_x1 = [] \n self.meta_cov_x0_c = []\n self.meta_cov_x1_c = []\n self.meta_cov_mb_x1 = []\n self.meta_cov_mb_c = [] \n self.meta_zhl = []\n self.meta_zhl_err = []\n self.meta_idr = []\n for meta_sn_name in meta.keys(): \n \n if meta[meta_sn_name]['idr.subset'] != 'bad' and meta[meta_sn_name]['idr.subset'] != 'auxiliary':\n \n self.meta_sn_name_list.append(meta_sn_name)\n self.meta_zhl_err.append(meta[meta_sn_name]['host.zhelio.err'])\n self.meta_zhl.append(meta[meta_sn_name]['host.zhelio'])\n self.meta_zcmb.append(meta[meta_sn_name]['host.zcmb'])\n self.meta_x0.append(meta[meta_sn_name]['salt2.X0'])\n self.meta_x0_err.append(meta[meta_sn_name]['salt2.X0.err'])\n self.meta_x1.append(meta[meta_sn_name]['salt2.X1'])\n self.meta_x1_err.append(meta[meta_sn_name]['salt2.X1.err'])\n self.meta_c.append(meta[meta_sn_name]['salt2.Color'])\n self.meta_c_err.append(meta[meta_sn_name]['salt2.Color.err'])\n self.meta_mb.append(meta[meta_sn_name]['salt2.RestFrameMag_0_B'])\n self.meta_mb_err.append(meta[meta_sn_name]['salt2.RestFrameMag_0_B.err'])\n self.meta_cov_x0_x1.append(meta[meta_sn_name]['salt2.CovX0X1'])\n self.meta_cov_x0_c.append(meta[meta_sn_name]['salt2.CovColorX0'])\n self.meta_cov_x1_c.append(meta[meta_sn_name]['salt2.CovColorX1'])\n self.meta_cov_mb_x1.append(meta[meta_sn_name]['salt2.CovRestFrameMag_0_BX1'])\n self.meta_cov_mb_c.append(meta[meta_sn_name]['salt2.CovColorRestFrameMag_0_B'])\n self.meta_idr.append(meta[meta_sn_name]['idr.subset'])\n \n self.meta_idr = np.array(self.meta_idr)\n self.meta_zcmb = np.array(self.meta_zcmb)\n self.meta_zhl = np.array(self.meta_zhl)\n self.meta_zhl_err = np.array(self.meta_zhl_err)\n self.meta_x0 = np.array(self.meta_x0)\n self.meta_x0_err = np.array(self.meta_x0_err)\n self.meta_x1 = np.array(self.meta_x1)\n self.meta_x1_err = np.array(self.meta_x1_err) \n self.meta_c = np.array(self.meta_c)\n self.meta_c_err = np.array(self.meta_c_err)\n self.meta_mb = np.array(self.meta_mb)\n self.meta_mb_err = np.array(self.meta_mb_err)\n self.meta_cov_x0_x1 = np.array(self.meta_cov_x0_x1)\n self.meta_cov_x0_c = np.array(self.meta_cov_x0_c)\n self.meta_cov_x1_c = np.array(self.meta_cov_x1_c)\n self.meta_cov_mb_x1 = np.array(self.meta_cov_mb_x1)\n self.meta_cov_mb_c = np.array(self.meta_cov_mb_c)", "def readDataFromCosmosDB(self):\n self.cosmosdb.updateCollectionThroughput(\n self.config.get_database_name(), self.config.get_hash_table(), self.config.get_scaleup_cosmos(),\n self.config.get_key(),\n self.config.get_cosmos_account())\n\n # read all the data from cosmos DB with encrypted fields and store in a data frame\n df = spark.read.format(\"com.microsoft.azure.cosmosdb.spark\").options(\n **self.config.get_hash_readconfig()).load()\n\n # iterate over the dataframe and decrypt and replace all fields except the cosmos db system fields strating\n # with \"_\" and the key --> id field since its hashed not encrypted and also not the partition field\n df = df.repartition(160).cache()\n dec_udf = udf(decrypt)\n\n for columns in df.columns:\n if columns.startswith('_') or columns.startswith('id') or columns.startswith('partition'):\n print('not to be encrypted field: ' + columns)\n else:\n print('to be encrypted field: ' + columns)\n df = df.withColumn(columns, dec_udf(df[columns]))\n print(\"succesfully decrypted the fields in spark df data frame\")\n\n # Register the DataFrame as a SQL temporary view\n df = df.repartition(1).cache()\n # df.persist(StorageLevel.DISK_ONLY_2)\n df.createOrReplaceTempView(\"customer\")\n spark.sql(\"CACHE TABLE customer\").collect()\n\n print(\"succesfully read \" + str(df.count()) +\n \" records from CosmosDB and saved in spark df data frame\")\n self.cosmosdb.updateCollectionThroughput(\n self.config.get_database_name(), self.config.get_hash_table(), self.config.get_scaledown_cosmos(),\n self.config.get_key(),\n self.config.get_cosmos_account())\n\n return df", "def cleaning (data):", "def get_file_contents(self):\n with open(self.sql_file, 'r') as sql:\n text = sql.read()\n # text = text.replace('\\n', '\\n\\n')\n # text=sql.read()\n # TODO: fix some text replacement issues here\n # https://github.com/andialbrecht/sqlparse/issues/313\n return self.filter_text(text)", "def load_data(database_filepath):\n engine_name = 'sqlite:///' + database_filepath\n engine = create_engine(engine_name)\n df =pd.read_sql(\"SELECT * FROM messages_table\", engine)\n X = df['message']\n Y = df.drop(['id','message','original','genre'], axis=1)\n return X, Y", "def data_preparation(self) -> None:\n self.logger.info('data cleaning')\n self.logger.info('num of secs: {}, num of ipo_dates: {}, num of secs with prices: {}'.format(\n len(self.data),\n len(self.ipo_dates),\n len(self.prices)\n ))\n excluded = []\n excluded = [i.lower() for i in excluded]\n self.logger.info(f'number of excluded: {len(excluded)}')\n for i in excluded:\n self.data.pop(i)\n for s in self.data:\n # columns with empty assets sum (empty columns and other situations)\n self.data[s].dropna(axis='columns', how='any', subset=['A_0'], inplace=True)\n # columns with descriptions (polish and english names of values)\n self.data[s].drop(self.data[s].columns[[0, 1]], inplace=True, axis=1)\n\n self.logger.info(f'number of secs after cleaning: {len(self.data)}')\n data_list = [k for k in self.data.values()]\n self.uber_data = pd.concat(data_list, ignore_index=True, axis=1)\n self.uber_data = self.uber_data.transpose()\n self.uber_data = self.uber_data.loc[:, pd.notnull(self.uber_data.columns)]", "def test_PubChemDataSet_clean_load(self):\n\n df = PubChemDataSet(1).clean_load()\n assert len((df.Activity[df.Activity == 1])) == len((df.Activity[df.Activity == 0]))\n assert None not in [Chem.MolToSmiles(mol) if mol else None for mol in df.rdkit]", "def load_raw_records_from_db(db, mid, batch=2, clean=True):\n if not os.path.exists(db):\n raise SequenceError(\"Path does not exist.\")\n db = sqlite3.connect(db)\n sql = \"\"\"SELECT random_region, full_sequence FROM sequences\n WHERE batch_id = ? AND mid_id = ?\"\"\"\n if clean:\n sql += \"\"\" AND LENGTH(random_region) = 18\n AND random_region NOT LIKE '%N%'\"\"\"\n db.row_factory = sqlite3.Row\n with closing(db.cursor()) as cursor:\n results = cursor.execute(sql, (batch, mid,)).fetchall()\n return results", "def _load_data(self):\n\n def __correct_car_make(car_make):\n \"\"\" Corrects given make names to a standard make name. \"\"\"\n ## define model corrections\n correct_makes = {\n 'chevroelt': 'chevrolet',\n 'chevy': 'chevrolet',\n 'maxda': 'mazda',\n 'mercedes-benz': 'mercedes',\n 'toyouta': 'toyota',\n 'vokswagen': 'volkswagen',\n 'vw': 'volkswagen'\n }\n ## return corrected make\n return correct_makes[car_make] if car_make in correct_makes.keys() else car_make\n\n logger.debug('checking auto-mpg.data.txt')\n if not path.exists('auto-mpg.data.txt'):\n ## file not present, get it\n logger.debug('getting auto-mpg.data.txt')\n self._get_data()\n if not path.exists('auto-mpg.clean.txt'):\n ## file not present, clean it\n self._clean_data()\n \n ## we got the data and we cleaned it\n logger.debug('checking auto-mpg.clean.txt')\n try:\n with open('auto-mpg.clean.txt', 'r') as clean_data:\n logger.debug('auto-mpg.clean.txt exists')\n ## counter for auto objects\n counter = 0\n logger.debug('Parsing auto-mpg.clean.txt into AutoMPG objects')\n for auto_record in csv.reader(clean_data, delimiter= ' ', skipinitialspace= True):\n ## split the car name into 2 tokens\n split = auto_record[8].replace('\\'', '').split(' ', 1)\n ## handle the case for 'subaru'\n if len(split) < 2:\n make = f'{split[0]}'\n auto = Record(auto_record[0], auto_record[6], __correct_car_make(make), '')\n elif len(split) == 2:\n make = f'{split[0]}'\n model = f'{split[1]}'\n auto = Record(auto_record[0], auto_record[6], __correct_car_make(make), model)\n counter += 1\n ## append the auto object\n self.data.append(AutoMPG(auto.make, auto.model, auto.year, auto.mpg))\n except Exception as e:\n logger.info(f'Error occurred: {e}')", "def get_inform_from_db(database_file_name: str) -> list:\n global data\n con = sqlite3.connect(database_file_name)\n cur = con.cursor()\n master = 'sqlite_master'\n query = \"SELECT name FROM \" + master + \" WHERE type = 'table'\"\n cur.execute(query)\n data = cur.fetchall()\n return data", "def read_personal_data(self):\n self._filename = self.input_filename()\n try:\n new_list = pd.read_csv(\n self._filename,\n sep=\"\\s+\",\n names=['index'] + self.columns,\n index_col=['index'],\n parse_dates=['birthday'],\n dtype={'id':'object', 'grade':'object'}\n )\n\n self.merge_list(new_list)\n except pd.errors.EmptyDataError as e:\n print(f'The file is empty [{e!r}].')", "def load_renter_data():\n return pd.read_sql_query(_sql_query, _con)", "def _read_dataset(self):\n import pandas as pd\n\n freesolv_path = get_data_file_path(FREESOLV_PATH)\n\n freesolv_db = pd.read_csv(freesolv_path, delimiter=';',\n skipinitialspace=True,\n skiprows=[0, 1, 2], header=0,\n names=['compound id', 'SMILES',\n 'iupac name',\n 'experimental value',\n 'experimental uncertainty',\n 'calculated value (GAFF)',\n 'calculated uncertainty',\n 'experimental reference',\n 'calculated reference',\n 'notes'])\n\n compound_ids = freesolv_db['compound id'].to_list()\n smiles_tags = freesolv_db['SMILES'].to_list()\n experimental_v = freesolv_db['experimental value'].to_list()\n return compound_ids, smiles_tags, experimental_v", "def test_clean(self):\n\n to_write = {\n self.file_to_test: {\n \"example.com\": {\n \"included_at_epoch\": 0.0,\n \"included_at_iso\": \"1970-01-01T01:00:00\",\n \"last_retested_at_epoch\": 0.0,\n \"last_retested_at_iso\": \"1970-01-01T01:00:00\",\n \"status\": PyFunceble.STATUS.official.invalid,\n },\n },\n }\n\n PyFunceble.helpers.Dict(to_write).to_json_file(self.storage_file)\n\n self.inactive_db.load()\n self.inactive_db.parent = True\n self.inactive_db.clean()\n self.inactive_db.parent = False\n\n expected = {self.file_to_test: {}}\n\n self.assertEqual(expected, self.inactive_db.database)", "def read_DB(self):\n mgdb = mongodata.db\n client = MongoClient(mgdb)\n db = client.local\n db.authenticate(mongodata.user, mongodata.passwd)\n minLat, maxLat, minLon, maxLon = self.city[1]\n cityname = self.city[2]\n if type(self.application) != list:\n col = db[mongodata.collection[self.application]]\n\n c = col.find({'city': cityname,\n 'lat': {'$gt': minLat, '$lt': maxLat},\n 'lng': {'$gt': minLon, '$lt': maxLon},\n # 'time': {'$gt': intinit, '$lt': intend}\n }, {'lat': 1, 'lng': 1, 'time': 1, 'user': 1})\n\n qsize = c.count()\n self.dataset = np.zeros((qsize,), dtype='f8,f8,i4,S20')\n cnt = 0\n for val in c:\n if cnt < qsize:\n self.dataset[cnt][0] = val['lat']\n self.dataset[cnt][1] = val['lng']\n self.dataset[cnt][2] = val['time']\n self.dataset[cnt][3] = val['user']\n cnt += 1\n else:\n lcol = []\n lcount = []\n for app in self.application:\n col = db[mongodata.collection[app]]\n\n c = col.find({'city': cityname,\n 'lat': {'$gt': minLat, '$lt': maxLat},\n 'lng': {'$gt': minLon, '$lt': maxLon},\n # 'time': {'$gt': intinit, '$lt': intend}\n }, {'lat': 1, 'lng': 1, 'time': 1, 'user': 1})\n\n lcount.append(c.count())\n lcol.append(c)\n\n self.dataset = np.zeros((sum(lcount),), dtype='f8,f8,i4,S20')\n for c, qsize in zip(lcol, lcount):\n cnt = 0\n for val in c:\n if cnt < qsize:\n self.dataset[cnt][0] = val['lat']\n self.dataset[cnt][1] = val['lng']\n self.dataset[cnt][2] = val['time']\n self.dataset[cnt][3] = val['user']\n cnt += 1", "def load_data(database_filepath):\n \n # load data from database\n url ='sqlite:///'+database_filepath\n engine = create_engine(url)\n df = pd.read_sql_table('DisasterMessages',con=engine)\n X = df['message'].values\n Y = df.drop(['id','message','original','genre'], axis=1)\n category_names = Y.columns\n return X, Y, category_names", "def val_db(dbfile):\n\n conn = sqlite3.connect(dbfile)\n c = conn.cursor()\n c.execute('SELECT * FROM bringatrailer ORDER BY id')\n for elem in c.fetchall():\n print(elem)\n conn.close()", "def load_data(database_filepath):\n engine = create_engine('sqlite:///'+ database_filepath)\n df = pd.read_sql(\"SELECT * FROM DisasterResponse\", engine)\n #exclude colums that are not needed in model\n col=[i for i in df.columns if i not in ['id','original', 'genre']]\n X = df[\"message\"]\n Y = df.iloc[:,4:]\n #global category_names\n category_names = Y.columns\n return X, Y, category_names", "def readDataFromFile(self):\n #import pdb; pdb.set_trace()\n if self.wantAnyDbm:\n try:\n if os.path.exists(self.filepath):\n self.data = anydbm.open(self.filepath,'w')\n self.notify.debug('Opening existing anydbm database at: %s.' % \\\n (self.filepath,))\n else:\n self.data = anydbm.open(self.filepath,'c')\n self.notify.debug('Creating new anydbm database at: %s.' % \\\n (self.filepath,))\n except anydbm.error:\n self.notify.warning('Cannot open anydbm database at: %s.' % \\\n (self.filepath,))\n \n else:\n try:\n # Try to open the backup file:\n file = open(self.filepath + '.bu', 'r')\n self.notify.debug('Opening backup pickle data file at %s.' % \\\n (self.filepath+'.bu',))\n # Remove the (assumed) broken file:\n if os.path.exists(self.filepath):\n os.remove(self.filepath)\n except IOError:\n # OK, there's no backup file, good.\n try:\n # Open the real file:\n file = open(self.filepath, 'r')\n self.notify.debug('Opening old pickle data file at %s..' % \\\n (self.filepath,))\n except IOError:\n # OK, there's no file.\n file = None\n self.notify.debug('New pickle data file will be written to %s.' % \\\n (self.filepath,))\n if file:\n data = cPickle.load(file)\n file.close()\n self.data = data\n else:\n self.data = {}", "def read(self):\n self.connect()\n get_books = f\"select * from {self.book_table}\"\n try:\n self.cur.execute(get_books)\n self.con.commit()\n for i in self.cur:\n yield i\n except MySQLError as err:\n messagebox.showinfo(\"Failed to fetch files from database\")\n print(err)", "def get_clean_sets(clean_cycle_dict, file_name, database_name):\n clean_set_df = pd.DataFrame()\n #name = file_name.split('.')[0]\n #while '/' in file_name: \n while '/' in file_name:\n file_name = file_name.split('/', maxsplit = 1)[1]\n name = file_name.split('.')[0]\n \n for k, v in clean_cycle_dict.items():\n clean_set_df = clean_set_df.append(v, ignore_index = True)\n\n #clean_set_df = clean_set_df.sort_values(['Data_Point'], ascending = True)\n # clean_set_df.reset_index(drop = True)\n \n dbfs.update_database_newtable(clean_set_df, name + 'CleanSet', database_name)\n \n print('All clean cycles recombined and saved in database')\n return clean_set_df", "def rebuild(self):\n if self.drop_tables:\n self._drop_tables()\n\n # reload meta.json into db\n self._meta_json_to_database()\n\n processed_data_ids = []\n\n # Iterate through each row in the manifest then clean and validate\n for manifest_row in self.manifest:\n # Note: Incompletely filled out rows in the manifest can break the\n # other code\n # TODO: figure out a way to flag this issue early in loading\n # TODO: of manifest\n\n # only clean and validate data files flagged for use in database\n if manifest_row['include_flag'] == 'use':\n logging.info(\"{}: preparing to load row {} from the manifest\".\n format(manifest_row['unique_data_id'],\n len(self.manifest)))\n\n self._process_data_file(manifest_row=manifest_row)\n\n processed_data_ids.append(manifest_row['unique_data_id'])\n\n # build Zone Facts table\n self._create_zone_facts_table()\n\n return processed_data_ids" ]
[ "0.6639239", "0.65105474", "0.63020116", "0.6263526", "0.61224455", "0.61113507", "0.60522664", "0.59786123", "0.59628767", "0.5925373", "0.59235024", "0.5909515", "0.5893247", "0.5887125", "0.5887125", "0.58866704", "0.58852226", "0.5885046", "0.5872037", "0.5848737", "0.5835556", "0.5819982", "0.5815051", "0.5802814", "0.5793415", "0.5770611", "0.57281154", "0.57046413", "0.5699284", "0.56926847", "0.56887704", "0.56873196", "0.56858", "0.56850904", "0.5671902", "0.56631917", "0.5653827", "0.5630353", "0.56168586", "0.5616518", "0.56161726", "0.56104773", "0.5609989", "0.5608403", "0.5607317", "0.56021094", "0.55976576", "0.5593914", "0.5559597", "0.5553536", "0.5552815", "0.5552033", "0.553931", "0.5526547", "0.5525782", "0.5520574", "0.5511902", "0.5499393", "0.5492266", "0.54917204", "0.54892236", "0.5486113", "0.54808253", "0.54729766", "0.5470314", "0.54694545", "0.54694164", "0.54592574", "0.5445695", "0.5439749", "0.54373", "0.5432791", "0.5427536", "0.5417086", "0.54146916", "0.54133266", "0.5404509", "0.53976375", "0.5392289", "0.5384642", "0.5380147", "0.53800535", "0.5374615", "0.5357003", "0.5351228", "0.53448385", "0.5338466", "0.5337421", "0.53291786", "0.5327777", "0.5323418", "0.5316113", "0.53146344", "0.53102165", "0.53075784", "0.53074485", "0.53045243", "0.5297879", "0.52927166", "0.5290905", "0.52892524" ]
0.0
-1
This function tokenizes the input text and performs necessary cleaning.
def tokenize(text): url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' detected_urls = re.findall(url_regex, text) for url in detected_urls: text = text.replace(url, "urlplaceholder") tokens = word_tokenize(text) lemmatizer = WordNetLemmatizer() clean_tokens = [] for tok in tokens: clean_tok = lemmatizer.lemmatize(tok).lower().strip() clean_tokens.append(clean_tok) return clean_tokens
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_text(text):\n global cleaned_text\n # remove numbers\n text_nonum = re.sub(r'\\d+', '', text)\n # remove punctuations and convert characters to lower case\n text_nopunct = \"\".join([char.lower() for char in text_nonum if char not in string.punctuation]) \n # substitute multiple whitespace with single whitespace\n # Also, removes leading and trailing whitespaces\n text_no_doublespace = re.sub('\\s+', ' ', text_nopunct).strip()\n #tokenise text\n tokenised_text = text_no_doublespace.split()\n for word in tokenised_text:\n if len(word) == 1:\n tokenised_text.remove(word)\n #if word is a stop word, remove it from the list\n elif word in stopwords.words('english'):\n tokenised_text.remove(word)\n #de-tokenise text\n cleaned_text = ' '.join(tokenised_text)\n return cleaned_text", "def clean_the_text(text):\n \n #Replace non-word characters with empty space\n text = re.sub('[^A-Za-z0-9\\s]', ' ', text)\n \n #Remove punctuation\n text = ''.join([word for word in text if word not in string.punctuation])\n \n #Bring text to lower case\n text = text.lower()\n \n #Tokenize the text\n tokens = re.split('\\W+', text)\n \n #Remove stopwords\n text = [word for word in tokens if word not in stopword]\n \n #Lemmatize the words\n text = [wn.lemmatize(word) for word in text]\n \n #Return text\n return text", "def process_text(input_txt):\r\n # if input is string\r\n tidy_txt = remove_pattern(input_txt,\"@[\\w]*\")\r\n ##=============================== if input is dataframe ====================##\r\n # tidy_txt = np.vectorize(remove_pattern)(input_txt,\"@[\\w]*\") #\r\n ##==========================================================================##\r\n # remove special characters\r\n tidy_txt = tidy_txt.replace(\"[^a-zA-Z#]\",\" \")\r\n # split into words\r\n tokenized_txt = tidy_txt.split()\r\n # perform stemming\r\n stemmer = PorterStemmer()\r\n tokenized_txt = [stemmer.stem(i) for i in tokenized_txt]\r\n print(tokenized_txt)\r\n # joining words back\r\n tokenized_txt = ' '.join(tokenized_txt)\r\n return tokenized_txt", "def text_cleaning(self, text):\n # remove string formatting '\\n' or '\\t'\n tmp_text = re.sub(r'\\n+', '. ', text)\n tmp_text = re.sub(r'\\t+', '. ', text)\n # remove words with non-ascii characters\n tmp_text = \" \".join([word for word in tmp_text.split() if self.is_ascii(word)])\n # remove email address\n tmp_text = \" \".join([word for word in tmp_text.split() if not word.startswith(\"@\")])\n # remove urls\n tmp_text = re.sub(r'http\\S+', '', tmp_text, flags=re.MULTILINE)\n tmp_text = re.sub(r'www\\S+', '', tmp_text, flags=re.MULTILINE)\n # remove punctuation but . (to split sentences)\n cleaned_text = re.sub('[^A-Za-z.,]+', ' ', tmp_text)\n # lowercase\n cleaned_text = cleaned_text.lower()\n\n return cleaned_text", "def clean_and_twokenize(text):\n cleaned_text = clean_twitter_tokens(text)\n twokenized_text = twokenize.tokenize(cleaned_text)\n\n return twokenized_text", "def preprocess(text):\n text = remove_space(text)\n text = clean_special_punctuations(text)\n text = handle_emojis(text)\n text = clean_number(text)\n text = spacing_punctuation(text)\n text = clean_repeat_words(text)\n text = remove_space(text)\n #text = stop(text)# if changing this, then chnage the dims \n #(not to be done yet as its effecting the embeddings..,we might be\n #loosing words)...\n return text", "def clean_text_for_skill_extraction(text):\n multi_space_regex = re.compile(r\"[,;?!()\\\\/]\", re.IGNORECASE)\n text = re.sub(multi_space_regex, ' ', text)\n\n text = clean_text_from_private_unicode(text)\n text = clean_text_from_geometrical_shape_unicode(text)\n\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n\n return text", "def preprocess(text):\n text = remove_space(text)\n text = clean_special_punctuations(text)\n text = clean_number(text)\n text = decontracted(text)\n text = correct_spelling(text)\n text = spacing_punctuation(text)\n text = spacing_some_connect_words(text)\n text = clean_repeat_words(text)\n text = remove_space(text)\n text = text.lower()\n return text", "def preprocess_input(self, text):\n text = re.sub(r\"([^a-zA-Z0-9 -]+ +[^a-zA-Z0-9 -]*|[^a-zA-Z0-9 -]*\" +\n \" +[^a-zA-Z0-9 -]+)\", ' ', text, flags=re.UNICODE)\n text = re.sub(r\"([^a-zA-Z0-9 -]+$|^[^a-zA-Z0-9 -]+)\", '', text)\n text = re.sub(r\"([a-zA-Z0-9 -]+?)([^a-zA-Z0-9 -])([a-zA-Z0-9 -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE)\n text = re.sub(r\"([\\x00-\\x7F -]+?)([^a-zA-Z0-9 -]+)([\\x00-\\x7F -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE).encode(\"utf-8\")\n return re.sub(r\"([^a-zA-Z0-9 \\-\\'])\", '', text, flags=re.UNICODE)", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def _clean(text, remove_stopwords=False):\n text = _remove_between_square_brackets(text)\n text = _replace_contractions(text)\n \n words = nltk.word_tokenize(text)\n words = _remove_non_ascii(words)\n words = _to_lowercase(words)\n words = _remove_punctuation(words)\n words = _replace_numbers(words)\n\n if remove_stopwords:\n words = _remove_stopwords(words)\n\n return ' '.join(words)", "def normalize_text(text):\n\n text = text.lower().strip().replace(\"\\n\", \" \").replace(\"\\r\", \"\")\n\n text = replace_money_token(text)\n text = replace_urls_token(text)\n text = fix_unicode_quotes(text)\n text = format_large_numbers(text)\n text = pad_punctuation(text)\n return text.strip()", "def clean_article(self):\n # split into tokens by white space\n tokens = self.text.split(\" \")\n # remove punctuation from each token\n table = str.maketrans('', '', punctuation)\n tokens = [w.translate(table) for w in tokens] # type: List[Any]\n # remove remaining tokens that are not alphabetic\n tokens = [word for word in tokens if word.isalpha()]\n # filter out stop words\n stop_words = set(stopwords.words('english'))\n tokens = [w for w in tokens if not w in stop_words]\n # lemmatization and lowercase\n lmtzr = WordNetLemmatizer()\n tokens = [lmtzr.lemmatize(w.lower()) for w in tokens]\n # filter out short tokens\n tokens = [word for word in tokens if len(word) > 1]\n return tokens", "def processText(text):\n\n no_punc = [word for word in text.split() if word.isalpha()] # and word not in stopwords.words('english')]\n #removes non-letter characters and only includes words not included in stopwords\n no_punc = \" \".join(no_punc) \n clean_words = nltk.word_tokenize(no_punc) #splits the punctuation marks from the real words\n return clean_words", "def preprocess_text(text, tokenize=False, ner=False, stem=False, stopw=False, all_lower=False, strip_punct=True):\n\n # Clean the text\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"cannot \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\"i\\.e\\.\", \"\", text)\n text = re.sub(r\"\\.\", \" . \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\/\", \" \", text)\n text = re.sub(r\"\\^\", \" ^ \", text)\n text = re.sub(r\"\\+\", \" + \", text)\n text = re.sub(r\"\\-\", \" - \", text)\n text = re.sub(r\"\\=\", \" = \", text)\n text = re.sub(r\"'\", \" \", text)\n text = re.sub(r'\"', \" \", text)\n text = re.sub(r\"(\\d+)(k)\", r\"\\g<1>000\", text)\n text = re.sub(r\":\", \" : \", text)\n text = re.sub(r\" e g \", \" eg \", text)\n text = re.sub(r\"^e g \", \" eg \", text)\n text = re.sub(r\" b g \", \" bg \", text)\n text = re.sub(r\"^b g \", \" bg \", text)\n text = re.sub(r\" u s \", \" american \", text)\n text = re.sub(r\"^u s \", \" american \", text)\n text = re.sub(r\"\\0s\", \"0\", text)\n text = re.sub(r\" 9 11 \", \"911\", text)\n text = re.sub(r\"e - mail\", \"email\", text)\n text = re.sub(r\"j k\", \"jk\", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n text = re.sub(r\"\\b[a-zA-Z]\\b\", \"\", text)\n\n if ner:\n tokenized_text = word_tokenize(text)\n tagged_text = pos_tag(tokenized_text)\n chunked_text = ne_chunk(tagged_text, binary=True)\n\n named_entities = extract_entity_names(chunked_text)\n for named_entity in named_entities:\n entity = named_entity.replace(\".\", \"\")\n entity = re.sub(r'\\s+', \"_\", entity)\n text = text.replace(named_entity, entity)\n\n if all_lower:\n text = text.lower()\n\n if stopw:\n global stops\n if stops is None:\n try:\n stops = set(stopwords.words(\"english\"))\n except Exception as e:\n print(\"%s - Please download english stopwords from NLTK\" % e)\n exit()\n text = [word.strip() for word in text.split() if word not in stops]\n text = \" \".join(text)\n\n if tokenize:\n text = word_tokenize(text)\n text = \" \".join(text)\n\n # shorten words to their stems\n if stem:\n text = text.split()\n stemmer = SnowballStemmer('english')\n stemmed_words = [stemmer.stem(word) for word in text]\n text = \" \".join(stemmed_words)\n\n if strip_punct:\n text = text.translate(str.maketrans('', '', string.punctuation))\n\n text = text.strip()\n\n # Empty string\n if text == '':\n return EMPTY_TOKEN\n\n return text", "def clean_text(text):\n text = text.lower()\n text = text.replace('\\xa0', ' ')\n text = text.replace('fls.', 'folhas ')\n text = text.replace('fl.', 'folha ')\n text = text.replace('arts.', 'artigos ')\n text = text.replace('art.', 'artigo ')\n text = re_tree_dots.sub('...', text)\n text = re.sub(r'\\.\\.\\.', ' ', text)\n text = re_remove_brackets.sub(' ', text)\n text = re_changehyphen.sub('-', text)\n text = re_remove_html.sub(' ', text)\n text = re_transform_numbers.sub('0', text)\n text = re_transform_url.sub('URL', text)\n text = re_transform_emails.sub('EMAIL', text)\n text = re_quotes_1.sub(r'\\1\"', text)\n text = re_quotes_2.sub(r'\"\\1', text)\n text = re_quotes_3.sub('\"', text)\n text = re.sub('\"', ' ', text)\n text = re_dots.sub('.', text)\n text = re_punctuation.sub(r'\\1', text)\n text = re_hiphen.sub(' - ', text)\n text = re_punkts.sub(r'\\1 \\2 \\3', text)\n text = re_punkts_b.sub(r'\\1 \\2 \\3', text)\n text = re_punkts_c.sub(r'\\1 \\2', text)\n text = re_doublequotes_1.sub('\\\"', text)\n text = re_doublequotes_2.sub('\\'', text)\n text = re_trim.sub(' ', text)\n return text.strip()", "def sanitize(text):\n \n # Convert text to lowercase\n text = text.lower()\n\n # Replace all whitespace with a single space\n text = re.sub(r'\\s+',' ',text)\n\n # Remove all links (e.g. [abc](xyz)def --> [abc]def)\n text = re.sub(r'(\\[.*\\])(\\(.*\\))', r'\\1', text)\n\n # Remove URLs\n text = re.sub(r'((http[s]?://)?www.\\S+)|(http[s]?://\\S+)', '', text) \n\n # Split text on single spaces\n words = text.split()\n \n # Separate external punctuation then remove non-ending and non-embedded punctuation\n tokens = []\n for word in words:\n \tseparate_tokens(word, tokens)\n \n parsed_text = \"\"\n unigrams = \"\"\n bigrams = \"\"\n trigrams = \"\"\n \n # Populate lists to return\n for index, token in enumerate(tokens):\n \tparsed_text += token + ' '\n \tif token not in common:\n \t\tunigrams += token + ' '\n \t\tif index + 1 <= len(tokens)-1 and tokens[index+1] not in common:\n \t\t\tbigram = token + '_' + tokens[index+1]\n \t\t\tbigrams += bigram + ' '\n \t\t\tif index + 2 <= len(tokens)-1 and tokens[index+2] not in common:\n \t\t\t\ttrigrams += bigram + '_' + tokens[index+2] + ' '\n \n return parsed_text.strip().split() + unigrams.strip().split() + bigrams.strip().split()+ trigrams.strip().split()", "def tokenize(text):\n text = re.sub('[^A-Za-z0-9]', ' ', text)\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens", "def processText(text_data):\n\tclean_texts = []\n\tfor text in text_data:\n\t\tno_parens = removeParentheses(text)\n\t\tonly_alpha_spaces = removeNonAlphabet(no_parens)\n\t\tone_space = removeExtraSpaces(only_alpha_spaces)\n\n\t\tstemmed_text = stemWords(one_space)\n\n\t\tclean_texts.append(stemmed_text.lower())\n\treturn clean_texts", "def scrub_words(text):\n \"\"\"Taken from https://github.com/kavgan/nlp-in-practice/blob/master/text-pre-processing/Text%20Preprocessing%20Examples.ipynb \"\"\"\n \n # remove html markup\n text=re.sub(\"(<.*?>)\",\"\",text)\n \n #remove non-ascii and digits\n text=re.sub(\"(\\\\W|\\\\d)\",\" \",text)\n \n # remove the extra spaces that we have so that it is easier for our split :) Taken from https://stackoverflow.com/questions/2077897/substitute-multiple-whitespace-with-single-whitespace-in-python\n text=re.sub(' +', ' ', text).strip()\n return text", "def initial_clean(text):\n text = re.sub(\"[^a-zA-Z ]\", \"\", text)\n text = text.lower() # lower case text\n text = nltk.word_tokenize(text)\n return (text)", "def tokenize(text):\n return text.split(' ')", "def text_clean(text):\n out = []\n # Define a punctuation dictionary so that we can replace each punctuation with an empty space.\n table = str.maketrans('', '', string.punctuation)\n stopWords = set(stopwords.words('senti')) # Set stop words language to English\n tokens = text.split() # Split each tweet into list of words.\n tokens = filter(lambda x: x[0] != '@', tokens) # Remove mentions\n tokens = [word.translate(table) for word in tokens] # Remove punctuation marks\n tokens = [word for word in tokens if word.isalpha()] # Remove any word that is not completely alphabetic.\n tokens = [word for word in tokens if len(word) > 1] # Remove any word that is shorter than two letters\n tokens = [word.lower() for word in tokens]\n tokens = [word for word in tokens if not word in stopWords] # Remove any stopwords\n token = \"\"\n for i in tokens:\n token += (i + \" \")\n out.append(token)\n return out", "def clean_text(text):\n text = text.lower() # Convert the text to lower case\n text = re.sub(\",\", \" \", text) # Replace commas with an extra space\n\n text = re.sub(\"<.*?>\", \"\", text) # Clean out any HTML tags\n text = re.sub(\"\\s+\", \" \", text) # Replace multiple spaces with\n\n text = text.split()\n\n text = [\n re.sub(\"[^\\w]\", \"\", i.rstrip()) for i in text if i not in all_stopwords\n ] # Clean out stopwords\n\n # text = engStem.stemWords(text)# English Stemming\n\n text = \" \".join(text)\n return text", "def tokenize(text):\n tokens=word_tokenize(text)\n lemmatizer=WordNetLemmatizer()\n \n clean_tokens=[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens\n pass", "def tokenize(text):\n text = re.sub(r\"[^a-zA-Z0-9]+\", \" \", text.lower())\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "def clean_text(text):\n\n lemmizer = WordNetLemmatizer()\n stemmer = porter.PorterStemmer()\n\n stop = stopwords.words('english')\n stop += ['.', ',', ':', '...', '!\"', '?\"', \"'\", '\"', ' - ', ' — ', ',\"', '.\"', '!', ';', '♫♫', '♫', \\\n '.\\'\"', '[', ']', '—', \".\\'\", 'ok', 'okay', 'yeah', 'ya', 'stuff', ' 000 ', ' em ', \\\n ' oh ', 'thank', 'thanks', 'la', 'was', 'wa', '?', 'like', 'go', ' le ', ' ca ', ' I ', \" ? \", \"s\", \" t \",\n \"ve\", \"re\"]\n # stop = set(stop)\n\n cleaned_text = []\n\n for post in text:\n cleaned_words = []\n\n # remove parentheticals\n clean_parens = re.sub(r'\\([^)]*\\)', ' ', post)\n\n #clean_parens = [line.decode('utf-8').strip() for line in clean_parens]\n\n # tokenize into words\n for word in wordpunct_tokenize(clean_parens):\n\n\n # lowercase and throw out any words in stop words\n if word.lower() not in stop:\n\n # lemmatize to roots\n low_word = lemmizer.lemmatize(word)\n\n # stem and lowercase ( an alternative to lemmatize)\n # low_word = stemmer.stem(root.lower())\n\n # keep if not in stopwords (yes, again)\n if low_word.lower() not in stop:\n # put into a list of words for each document\n cleaned_words.append(low_word.lower())\n\n # keep corpus of cleaned words for each document\n cleaned_text.append(' '.join(cleaned_words))\n\n\n return cleaned_text", "def clean(text):\n\n # removing paragraph numbers\n text = re.sub('[0-9]+.\\t', '', str(text))\n # removing new line characters\n text = re.sub('\\n ', ' ', str(text))\n text = re.sub('\\n', ' ', str(text))\n # removing apostrophes\n text = re.sub(\"'s\", '', str(text))\n # removing hyphens\n text = re.sub(\"-\", '', str(text))\n text = re.sub(\"— \", '', str(text))\n # removing quotation marks\n text = re.sub('\\\"', '', str(text))\n # removing salutations\n text = re.sub(\"Mr\\.\", 'Mr', str(text))\n text = re.sub(\"Mrs\\.\", 'Mrs', str(text))\n # removing any reference to outside text\n text = re.sub(\"[\\(\\[].*?[\\)\\]]\", \"\", str(text))\n\n return text", "def preprocessing(raw_text):\n words_list = tokenize(raw_text)\n words_list = remove_stop_words(words_list)\n words_list = remove_punctuations(words_list)\n words_list = lemmatization(words_list)\n return words_list", "def preprocess_training_text(text, accented_chars=True, \n convert_num=False, extra_whitespace=True, \n lemmatization=True, lowercase=True, punctuations=True,\n remove_html=True, remove_num=True, special_chars=True, \n stop_words=True):\n \n\n \"\"\"preprocess text with default option set to true for all steps\"\"\"\n if remove_html == True: #remove html tags\n text = strip_html_tags(text)\n if extra_whitespace == True: #remove extra whitespaces\n text = remove_whitespace(text)\n if accented_chars == True: #remove accented characters\n text = remove_accented_chars(text)\n if lowercase == True: #convert all characters to lowercase\n text = text.lower()\n \n \n doc = nlp(text) #tokenise text\n\n\n clean_text = []\n for token in doc:\n flag = True\n edit = token.text\n # print(\"Word: \", edit, \" Type: \", token.pos_)\n # remove stop words\n if stop_words == True and token.is_stop and token.pos_ != 'NUM': \n flag = False\n # remove punctuations\n if punctuations == True and (token.pos_ == 'PUNCT') and flag == True: \n flag = False\n \n # remove 'X' characters:\n if token.pos_ == 'X':\n flag = False\n # remove special characters\n if special_chars == True and token.pos_ == 'SYM' and flag == True: \n flag = False\n # remove numbers\n if remove_num == True and (token.pos_ == 'NUM' or token.text.isnumeric()) \\\n and flag == True:\n flag = False\n # convert number words to numeric numbers\n if convert_num == True and token.pos_ == 'NUM' and flag == True:\n edit = w2n.word_to_num(token.text)\n # convert tokens to base form\n elif lemmatization == True and token.lemma_ != \"-PRON-\" and flag == True:\n edit = token.lemma_\n # append tokens edited and not removed to list \n if edit != \"\" and flag == True:\n clean_text.append(edit)\n \n # Convert back to string:\n new_text = ' '.join(clean_text)\n regex = re.compile('[^a-zA-Z]')\n new_text = regex.sub(' ', new_text)\n words = re.findall(r'\\w+.', new_text)\n return ' '.join(words)", "def tokenize(text):\n \n #regular expression to avoid pucntuations or any special character\n tokenizer = nltk.RegexpTokenizer(r\"\\w+\")\n \n #tokenizing text\n tokens = tokenizer.tokenize(text)\n \n #initiating lemmatizer\n lemmatizer = WordNetLemmatizer()\n \n #iteratating through each token\n clean_tokens = []\n for tok in tokens:\n \n #stop words are irrelevant in this context of classifying response\n if (tok.lower() not in stopwords.words(\"english\")):\n \n # lemmatizing, normalizing case, and removing leading/trailing white space\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "def tokenize(text):\n \n #regular expression to avoid pucntuations or any special character\n tokenizer = nltk.RegexpTokenizer(r\"\\w+\")\n \n #tokenizing text\n tokens = tokenizer.tokenize(text)\n \n #initiating lemmatizer\n lemmatizer = WordNetLemmatizer()\n \n #iteratating through each token\n clean_tokens = []\n for tok in tokens:\n \n #stop words are irrelevant in this context of classifying response\n if (tok.lower() not in stopwords.words(\"english\")):\n \n # lemmatizing, normalizing case, and removing leading/trailing white space\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "def clean_text(data):\r\n data = data.replace('\\n', ' ') #remove new lines\r\n replace_l = [\"'\",'!','/','\\\\','=',',',':', '<','>','?','.','\"',')','(','|','-','#','*','+', '_'] #list of characters to remove\r\n data = data.lower() #Convert all the words to lower case\r\n for i in replace_l:\r\n data = data.replace(i,' ') #replace words with blank character\r\n return data #return clean data\r", "def tokenize(self, text):\n # Ignore non-ASCII characters.\n text = remove_non_ascii(text)\n text = text.translate(Tokenizer.trans)\n tokens = [t for t in text.split() \n if len(t) >= self._config[u'min_len']\n and t not in self._config[u'stopwords']]\n self._counter.update(tokens)", "def sanitize_text(tokens, stopwords=None):\n\n tokens = [x.lower() for x in tokens]\n regex = re.compile('[^a-z]')\n\n for index in range(len(tokens)):\n tokens[index] = regex.sub('', tokens[index])\n if stopwords and tokens[index] in stopwords:\n tokens[index] = ''\n\n # remove empty elements\n tokens = [token for token in tokens if token != '']\n return tokens", "def process_text(text):\n no_split_dict = {'u . s': 'u.s', 'u . n': 'u.n', 'u . k': 'u.k', 'l . a': 'l.a', 'j . k': 'j.k', 'a . m': 'a.m',\n 'p . m': 'p.m', 'd . j': 'd.j', 'd . a': 'd.a'}\n\n text = re.sub(\".*--\", \"\", text, count=1) # Removing cnn from start of text\n if text.startswith('(CNN)'): # Remove cnn from articles that starts with only cnn\n text = re.sub('\\(CNN\\)', '', text, count=1)\n text = re.sub(r'(?<=[^?!.0-9])(?=[.,!?])', ' ', text) # 4\n text = re.sub(r'(?![0-9])(?<=[.,])(?=[^\\s])', r' ', text) # 4\n text = text.lower() # 2\n text = re.sub('[^A-Za-z0-9 .!?,øæå]+', '', text) # 3\n text = re.sub(r'((?<=[a-z])(?=[.]))|((?=[a-z])(?<=[.]))(?=[^\\s])', r' ', text) # space a-z.a-z\n text = re.sub(r'((?=[0-9])(?<=[a-z]))|((?=[a-z])(?<=[0-9]))(?=[^\\s])', r' ', text) # space 0-9a-z\n for key in no_split_dict:\n text = text.replace(key, no_split_dict[key]) # Fixing word splits\n text = re.sub('[0-9]', '#', text) # 8\n text = \" \".join(text.split()) # 5, 6, 7 - i think\n return text", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for token in tokens:\n clean_token = lemmatizer.lemmatize(token).lower().strip()\n clean_tokens.append(clean_token)\n \n return clean_tokens", "def clean_text(text):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n return text.strip().lower()", "def naive(self, text):\n\t\t#print(text)\n\t\ttokenizedText = []\n\t\tfor k in text: #look at each entity in one sentence\n\t\t\t\n\t\t\ta = \"\"#stores the current word \n\t\t\trun = []; #appends all words in a particular sentence\n\t\t\tfor i in range(len(k)):\n\t\t\t\t\n\t\t\t\tif(k[i] == ' ' or k[i] == '\t'): #tokenization at space or tab\n\t\t\t\t\t\n\t\t\t\t\tif(a!=\"\"):\n\t\t\t\t\t\tif(a[-1] == ',' or a[-1] == '-' or a[-1] == \"\\'\" or a[-1] == \";\" or a[-1] == \":\" or a[-1] ==\"!\" or a[-1] == \"?\" or a[-1] ==\"\\\"\") : #but remove mentioned punctuations from the end of the word, if present\n\t\t\t\t\t\t\ta = a[:-1]\n\t\t\t\t\t\tif(len(a)>0 and a[0] == \"\\\"\"):#remove starting quotes\n\t\t\t\t\t\t\ta = a[1:]\n\t\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\t\trun.append(a)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\ta = \"\"\n\n\n\t\t\t\telif(i == len(k)-1): #remove the last punctuation mark, if present\n\t\t\t\t\t\n\t\t\t\t\ta = a+k[i];\n\t\t\t\t\t\n\t\t\t\t\tif(a[-1] == '.' or a[-1] == '\\\"' or a[-1] ==\"!\" or a[-1] == \"?\" or a[-1] ==\"\\'\" ):\n\t\t\t\t\t\ta = a[:-1]\n\t\t\t\t\tif(len(a)>0 and a[0] == \"\\\"\"):\n\t\t\t\t\t\ta = a[1:]\n\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\trun.append(a)\n\t\t\t\t\t\t\n\t\t\t\t\t\ta = \"\"\n\n\n\t\t\t\telse:\n\t\t\t\t\t\n\t\t\t\t\tif((k[i] == ',' or k[i] == ':' or k[i] == ';') and k[i+1]!= ' ' ): # for other punctuation marks followed by a space\n\t\t\t\t\t\t#print(k[i-1])\n\t\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\t\tif(a[-1] == '\\\"' or a[-1] ==\"!\" or a[-1] == \"?\" ):\n\t\t\t\t\t\t\t\ta = a[:-1]\n\t\t\t\t\t\t\tif(len(a)>0 and a[0] == \"\\\"\"):\n\t\t\t\t\t\t\t\ta = a[1:]\n\t\t\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\t\t\trun.append(a)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\ta = \"\"\n\n\n\t\t\t\t\telse:\n\n\t\t\t\t\t\ta = a+k[i];\n\n\t\t\ttokenizedText.append(run)\t\t\n\n\t\t\n\t\t\t\n\n\n\n\n\t\t#Fill in code here\n\n\t\treturn tokenizedText", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens", "def preprocess(self, sentence, vocab_set=None):\n tokens = sentence.split()\n new_tokens = []\n for token in tokens:\n new_tokens += self.__clean(token)\n tokens = new_tokens\n\n tokens = self.__normalize_document(' '.join(tokens))\n\n return tokens", "def _preprocess_text(text: str) -> List[List[str]]:\n\n # replace all except characters_to_keep with space\n characters_to_keep = '[^\\n:-äÄöÖåÅA-Za-z0-9]'\n text = re.sub(characters_to_keep,' ', text )\n\n # split the whole text to list of strings\n sentences = text.splitlines()\n\n # split each string further to list of words\n sentences = [sentence.split(' ') for sentence in sentences if sentence.strip()]\n\n words = _analyze_sentences(sentences)\n return words", "def CleanText(text):\n\n pretty_issue = text.lower().strip()\n\n quoteless_issue = re.sub('\\'', '', pretty_issue)\n no_punctuation_issue = re.sub('[^\\w\\s]|_+', ' ', quoteless_issue)\n one_space_issue = ' '.join(no_punctuation_issue.split())\n\n return one_space_issue", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def tokenize(self, text):\n text = self._clean_text(text)\n text = self._tokenize_chinese_chars(text)\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case and token not in self.never_split:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n output_tokens = whitespace_tokenize(' '.join(split_tokens))\n return output_tokens", "def processes_and_tokenize(raw_document):\n\ttokenizer = RegexpTokenizer(r'\\w+')\n\ttokens = tokenizer.tokenize(raw_document.lower())\t\t# tokens = nltk.word_tokenize(corpus.lower()) # without removing punctiation\n\n\t#remove stop words\n\tstop_words = set(nltk.corpus.stopwords.words('english'))\n\t#stop_words = set(stopwords.words('english'))\n\tfiltered_tokens = [w for w in tokens if not w in stop_words]\n\treturn filtered_tokens", "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "def clean(text):\n\n lower_proper = src.utils.nlp.lower_with_proper(text)\n lemmas = src.utils.nlp.lemmatize(lower_proper)\n cleaned = src.utils.nlp.clean_stopwords_punctuation(lemmas)\n return cleaned", "def cleanup(text):\n\n\tRE_D = re.compile('\\d')\n\n\ttokens = text.split()\n\tnew_tokens = list()\n\tfor t in tokens:\n\t\tif RE_D.search(t):\n\t\t\tcontinue\n\t\tfor p in string.punctuation:\n\t\t\tif p == \".\":\n\t\t\t\tcontinue\n\t\t\tt=t.replace(p,\"\")\n\t\tnew_tokens.append(t.lower().strip())\n\n\treturn \" \".join(new_tokens)", "def preprocess(self,text):\n return preprocess.get_tokens(text)", "def preProcess(text):\n\ttext = text.lower() # lower case the text\n\t# Q4 replace the word with expanded contractions\n\tfor k,v in general_contraction.items():\n\t\tif k in text.split():\n\t\t\ttext = text.replace(k,v)\n\t# Q4 remove speacial char including all puncuattions and replace it with a space\n\ttext = re.sub('[^A-Za-z0-9]+',' ',text) \n\t# tokenise\n\ttokens = text.split()\n\t# stop word removal\n\ttokens = [w for w in tokens if w not in stopwords ]\n\t# Q4 Stemming\n\ttokens = [str(porter.stem(w)) for w in tokens]\n\t# if word is non-english return its english form # too much time-complexity\n\t# tokens = [porter.stem(w) if porter.stem(w) in set(words.words()) else w for w in tokens ]\n\t# for words having digits such as 12gb, 1st, etc expanding the token list\n\tfor k in tokens:\n\t\tif len(k) >2 and re.match(r'[0-9]+',k):\t\t\t\n\t\t\tif len(k) >2 and not k.isdigit():\n\t\t\t\tl = re.split(r'(\\d+)',k)\n\t\t\t\tl = [w for w in l if w is not '' ]\n\t\t\t\tif l and len(l) <= 3:\n\t\t\t\t\tfor i in l:\n\t\t\t\t\t\tif i in digit_contractions.keys():\n\t\t\t\t\t\t\tl = list(map(lambda b: b.replace(i,digit_contractions[i]), l))\n\t\t\t\t\ttokens.remove(k)\n\t\t\t\t\ttokens = tokens+l\n\t\t\t\telse:\n\t\t\t\t\ttokens.remove(k)\n\tfor k,v in digit_contractions.items():\n\t\tif k in tokens:\n\t\t\tif tokens[tokens.index(k)-1].isdigit():\t\n\t\t\t\ttokens = list(map(lambda b: b.replace(k,v), tokens))\n\t# remove tokens of size less than 2\n\ttokens = [t for t in tokens if len(t) > 2]\n\treturn tokens", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for token in tokens:\n clean_token = lemmatizer.lemmatize(token).lower().strip()\n clean_tokens.append(clean_token)\n\n return clean_tokens", "def clean_text(text):\n\n\n regex = re.compile('[\\.|\\-|\\,|\\?|\\_|\\:|\\\"|\\)|\\(\\)\\/|\\\\|\\>|\\<]')\n text = text.lower() # Turn everything to lower case\n text = regex.sub(' ', text).strip()\n out = re.sub(' +', ' ', text) # Reduce whitespace down to one\n \n return out", "def tokenize(text):\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n \n clean_tokens =[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def clean_text(txt):\n\n for symbol in \"\"\".,'?!()/-:;\"\"\":\n txt = txt.replace(symbol, '')\n txt = txt.lower()\n txt = txt.split()\n return txt", "def preprocess_text(text):\n # replace non characers with space and lower case\n temp = re.sub(r\"[/W/D/S.,-]+\", \" \", str(text).lower())\n # merge multiple spaces to a single one\n return re.sub(r\"[ ]+\", \" \", temp)", "def clean_data(self, data):\r\n data=data.lower()\r\n doc=nlp(data, disable=['parser', 'ner'])\r\n \r\n #Removing stopwords, digits and punctuation from data\r\n tokens = [token.lemma_ for token in doc if not (token.is_stop\r\n or token.is_digit\r\n or token.is_punct\r\n )]\r\n \r\n tokens = \" \".join(tokens)\r\n return tokens", "def text_prepare(text):\r\n\r\n replace_by_space_re = re.compile('[/(){}\\[\\]\\|@,;]')\r\n good_symbols_re = re.compile('[^0-9a-z #+_]')\r\n stopwords_set = set(stopwords.words('english'))\r\n\r\n text = text.lower()\r\n text = replace_by_space_re.sub(' ', text)\r\n text = good_symbols_re.sub('', text)\r\n text = ' '.join([x for x in text.split() if x and x not in stopwords_set])\r\n\r\n return text.strip()", "def tokenize(self, text):\n text = convert_to_unicode(text)\n text = self.clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._add_space_around_cjk_chars(text)\n\n orig_tokens = split_by_whitespace(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = remove_accents(token)\n split_tokens.extend(split_by_punctuation(token))\n\n output_tokens = split_by_whitespace(\" \".join(split_tokens))\n return output_tokens", "def clean_text(input: str) -> str:\n # 1. REMOVE ARTIFACTS\n cleaned_text = remove_nested_parentheses(input)\n # Remove section headings\n cleaned_text = re.sub(r'={2,}.*?={2,}', '', cleaned_text)\n\n # 2. REFORMAT REMAINING TEXT\n # Remove duplicate white spaces\n cleaned_text = \" \".join(cleaned_text.split()).strip()\n # Remove white space before comma - left by removal of other content\n cleaned_text = cleaned_text.replace(' , ', ', ')\n # Separate joined sentences eg \"end of one.Start of another\"\n # Only perform this when a new sentence starts with a capitalized word\n # will not catch sentences starting with single letters.\n cleaned_text = re.sub(r'\\.([A-Z][a-z]+)', r'. \\1', cleaned_text)\n\n return cleaned_text", "def clean_raw_data(self, text):\r\n return [token.lower() for token in nltk.word_tokenize(text)\r\n if token not in self.stop_words and token not in punctuation]", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)", "def cleanText(text):\n try:\n text = str(text)\n\n # remove contactions and stop words\n text = contractions(text)\n # remove html entities\n cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')\n new_text = cleanr.sub('', text.strip())\n return re.sub(r'\\s+', ' ', re.sub(r'\\W+', \" \", new_text))\n # TAG_RE = re.compile(r'<[^>]+>')\n except:\n print(\"An exception occurred with: \" + text)\n return str(text)", "def tokenize(text):\n\n # Replace URLs\n url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n \n detected_urls = re.findall(url_regex, text)\n for url in detected_urls:\n text = text.replace(url, \"urlplaceholder\")\n\n # Remove non alphanumeric characters\n text = re.sub(pattern=r'[^A-Za-z0-9]+',repl=' ', string=text.lower().strip())\n \n # Tokenize words\n tokens = word_tokenize(text)\n \n # Remove stop words\n stop_words = set(stopwords.words('english'))\n filtered_tokens = [w for w in tokens if not w in stop_words]\n \n lemmatizer = WordNetLemmatizer()\n \n clean_tokens = []\n for token in filtered_tokens:\n new_token = lemmatizer.lemmatize(token)\n clean_tokens.append(new_token)\n \n return clean_tokens", "def tokenize(text):\n \n tokens = word_tokenize(text)\n \n STOPWORDS = list(set(stopwords.words('english')))\n # remove short words\n tokens = [token for token in tokens if len(token) > 2]\n # remove stopwords\n tokens = [token for token in tokens if token not in STOPWORDS]\n \n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def process_text(text, stem=True):\n exclude = set(string.punctuation)\n text = ''.join(ch for ch in text if ch not in exclude)\n #text = text.translate(None, string.punctuation)\n tokens = word_tokenize(text)\n if stem:\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n return tokens", "def tokenize(text):\n #Clean data, remove all character except character and number,such as punctuation etc.\n text = re.sub(r'[^a-zA-Z0-9]', ' ', text.lower())\n tokens = word_tokenize(text)\n tokens = [WordNetLemmatizer().lemmatize(word) for word in tokens if word not in ST_english]\n return tokens", "def cleaning(self, document):\n remove_punct = ''.join(i for i in document.lower() if i not in self.punctuation)\n tokenized = [i for i in remove_punct.split() if i not in self.stopwords]\n if self.lang is not 'chinese':\n # Lemmatizes if not chinese\n tokenized = [self.lemmatize.lemmatize(i) for i in tokenized]\n return tokenized", "def tokenize(self, text):\n text = convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n if self.split_on_punc:\n split_tokens.extend(self._run_split_on_punc(token))\n else:\n split_tokens.append(token) # pragma: no cover\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def tokenize(self, text):\n text = utils.convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens", "def preprocess_sent(sent):\n #tokenized = word_tokenize(sent.lower())\n tokenizer = Tok()\n tokenized = tokenizer.tokenize(sent.lower())\n return tokenized", "def tokenize(self, text):\n # text = convert_to_unicode(text)\n\n output_tokens = []\n for token in split_by_whitespace(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens", "def pre_process(text):\n # replace (,.'\") with ''\n text = text.replace(',', '')\n text = text.replace('.', '')\n text = text.replace(\"'\", '')\n text = text.replace(\"\\\"\", '')\n\n # tokenize into words\n tokens = [word for sent in sent_tokenize(text) for word in word_tokenize(sent)]\n\n # remove stopwords\n stop = stopwords.words('english')\n tokens = [token for token in tokens if token not in stop]\n\n # remove words less than three letters\n tokens = [word for word in tokens if len(word) >= 3]\n\n # lower capitalization\n tokens = [word.lower() for word in tokens]\n\n # lemmatize\n lmtzr = WordNetLemmatizer()\n tokens = [lmtzr.lemmatize(word) for word in tokens]\n\n return tokens", "def clean_text(self, text):\n words = SPLIT_TEXT.findall(text.lower())\n words = self.rm_stop_words(words)\n words = self.stem_words(words)\n return words", "def preprocess_text(self, input_text):\n input_text = self.clean_text(input_text)\n tokenization_list = self.tokenize_text(input_text)\n index_list = self.replace_token_with_index(tokenization_list, self.max_length_dictionary)\n index_list = self.pad_sequence(index_list, self.max_length_tweet)\n return index_list", "def _tokenize(self, text: str) -> List[str]:\n text = text.lower().strip()\n return self.bpe.tokenize(text)", "def clean_stopwords(text):\n tokens = tokenize(text)\n tokens = stopwordsRem(tokens)\n return tokens", "def tokenize(self, text):\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n ### joonho.lim @ 2019-03-15\n # if start > 0:\n # substr = \"##\" + substr\n # print ( '[substr]\\t%s\\t%s\\t%d\\t%d' % ( substr, substr in self.vocab, start, end))\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n output_tokens.insert(0, '[CLS]')\n output_tokens.append('[SEP]')\n return output_tokens", "def detokenize(tokens):\n pass", "def clean_whitespace(text):\n return text\n #return re.sub(r'\\r\\n|\\n', \"\\t\", text)", "def process_text(document):\n return preprocess_string(document,\n filters=[strip_tags, strip_punctuation,\n strip_multiple_whitespaces,\n strip_numeric, remove_stopwords,\n strip_short]\n )", "def detokenize(self, text):\n text = ' ' + text + ' '\n text = self._dash_fixes.sub(r' \\1-\\2 ', text)\n text = self._dash_fixes2.sub(r' \\1-\\2 ', text)\n text = self._currency_or_init_punct.sub(r' \\1', text)\n text = self._noprespace_punct.sub(r'\\1 ', text)\n text = self._contract.sub(r\" \\1'\\2\", text)\n text = self._contractions.sub(r\"\\1\", text)\n text = self._esses.sub(r\"s \", text)\n text = self.moses_detokenizer.detokenize(text.split())\n text = text.strip()\n # capitalize\n if not text:\n return ''\n return text", "def clean_text(some_text):\n # import re\n some_clean_text = re.sub(r'\\n|\\t', '', some_text) # Remove new line and tabs\n some_clean_text = re.sub(' +', ' ', some_clean_text) # Replace multiple spaces with one space\n return some_clean_text", "def set_clean_raw_text(raw_text):\n\tlogger.debug('Cleaning Text')\n\n\t#tokenize and lower sentence\n\ttokenizer = RegexpTokenizer(r'\\w+')\n\ttokens = tokenizer.tokenize(raw_text.lower())\t\t# tokens = nltk.word_tokenize(corpus.lower()) # without removing punctiation\n\n\t#remove stop words\n\ttokens = [w for w in tokens if not is_stopword(w)]\n\n\t#remove punctuation\n\ttokens = [w for w in tokens if not is_punctuation(w)]\n\n\t#remove short \n\ttokens = [w for w in tokens if not is_shorter(w)]\n\n\t#remove number\n\ttokens = [w for w in tokens if not is_number(w)]\n\n\t#stem words\n\ttokens = map(stem, tokens)\n\n\tlogger.debug('Cleaning Text Complete')\n\treturn set(tokens)", "def tokenize(self, text):\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token) # pragma: no cover\n continue # pragma: no cover\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr # pragma: no cover\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1 # pragma: no cover\n if cur_substr is None:\n is_bad = True # pragma: no cover\n break # pragma: no cover\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token) # pragma: no cover\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens", "def process_text(text):\n text = re.sub(r'<@>\\s+|<s>\\s+|</s>\\s+|<p>\\s+|</p>\\s+|\\s+\\,|\\'s|\\'|\\;|\\(|\\)|\\-\\-\\s+|\\s+\\.', '', text)\n text = re.sub(r'\\.\\,', '. ,', text)\n text = re.sub(r'\\,', '', text)\n text = re.sub(r'\\$', '$ ', text)\n text = re.sub(r'\\%', ' %', text)\n text = re.sub(r'\\s\\\"\\s', ' ', text)\n text = re.sub(r'\\.\\s+', '. ', text)\n text = text.lower()\n return text", "def txt(input):\n output=atpic.cleaner_alex.txtclean(input)\n return output", "def process_text(text, stemmer=SnowballStemmer(\"english\"), min_length=3):\n text = text.lower()\n text = re.sub('dictated.*', '', text, flags=re.MULTILINE|re.DOTALL)\n text = re.sub('.*:\\s+', '', text)\n text = re.sub('\\n', ' ', text)\n text = re.sub('\\[.*?\\]', '', text)\n text = re.sub('\\s\\s+', ' ', text)\n text = re.sub('[,.]', '', text)\n text = re.sub('[/-]', ' ', text)\n tokens = word_tokenize(text)\n return \" \".join([stemmer.stem(t) for t in tokens if t not in stop_words\n and len(t) >= min_length])", "def normalize_text_sentences(text,pad_punc='!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~',remove_punc='!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~'):\n normalize_text_list=[]\n for sent in list(sent_tokenize(text)):\n normalize_text_list.append(normalize_text(sent,pad_punc=pad_punc,remove_punc=remove_punc))\n return normalize_text_list", "def clean_text(text):\n text = str(text).lower()\n text = text.strip(string.punctuation)\n text = re.sub(\"&amp;\", '', text)\n text = re.sub(\"https\", '', text)\n text = re.sub('\\W\\s', '', text)\n text = re.sub('\\s,\\W', '', text)\n text = re.sub('[.!@#$%^&*()_,:;/-]', '', text)\n text = re.sub(\"\\d+\", '', text)\n\n return text", "def sanitise(text: str):\n # Removes new lines, weird characters and dialogue\n text = \" \" + text + \" \"\n\n lined_text = text.split(\"\\n\")\n text = \"\"\n # Remove dialogue\n for line in lined_text:\n if \":\" in line:\n if line.index(\":\") < 15:\n index = line.index(\":\") + 1\n else:\n index = 0\n else:\n index = 0\n text = text + \"\\n\" + line[index:]\n\n # Lower case everything\n text = text.lower()\n\n text = text.replace(\"'s\", \" is\")\n text = text.replace(\"'ve\", \" have\")\n text = text.replace(\"n't\", \" not\")\n text = text.replace(\"I'm\", \"I am\")\n text = text.replace(\"'re\", \" are\")\n text = text.replace(\"’s\", \" is\")\n text = text.replace(\"’ve\", \" have\")\n text = text.replace(\"n’t\", \" not\")\n text = text.replace(\"I’m\", \"I am\")\n text = text.replace(\"’re\", \" are\")\n\n # Remove weird characters and double spaces\n weird_characters = [\".\", \",\", \"?\", \"!\", \"'\", \"’\", \"\\\"\", \"\\n\", \"\\t\", \"-\", \"/\", \"[\", \"]\", \"(\", \")\", \":\", \"“\", \"”\"]\n for weird_character in weird_characters:\n text = text.replace(weird_character, \" \")\n\n while \" \" in text:\n text = text.replace(\" \", \" \")\n\n return text", "def preprocess(self, text):\r\n return text", "def preprocess(text):\r\n\r\n #Regex to remove URL and @ symbol\r\n regex = '@\\S*|http\\S*|www\\S*'\r\n preprocessed_text = re.sub(regex, '', text)\r\n preprocessed_text = deEmojify(preprocessed_text)\r\n preprocessed_text = strip_html(preprocessed_text)\r\n\r\n return preprocessed_text" ]
[ "0.75570065", "0.7090177", "0.7041805", "0.7033546", "0.6981678", "0.696288", "0.6919464", "0.691603", "0.6914198", "0.6900118", "0.6900118", "0.6900118", "0.6900118", "0.6900118", "0.6900118", "0.68975586", "0.6874092", "0.6874056", "0.6841664", "0.6819344", "0.6789257", "0.6749671", "0.67386216", "0.67343014", "0.67244786", "0.66964036", "0.66885895", "0.6679567", "0.66654545", "0.66498166", "0.66474956", "0.66444635", "0.66377246", "0.66299635", "0.6621089", "0.661687", "0.66069824", "0.66069824", "0.6594496", "0.6589167", "0.65867716", "0.65841645", "0.6581446", "0.6580692", "0.65779036", "0.6574262", "0.6574052", "0.65717846", "0.65554243", "0.65430444", "0.65410256", "0.65406126", "0.6538321", "0.6535239", "0.65271544", "0.65218604", "0.65172064", "0.65126777", "0.6511492", "0.65042025", "0.6496269", "0.64777786", "0.64678335", "0.6455111", "0.64544284", "0.64404696", "0.64388573", "0.64368653", "0.64323115", "0.6429918", "0.6426472", "0.64243144", "0.64165", "0.64002585", "0.6392665", "0.6386849", "0.63855904", "0.63809115", "0.63695157", "0.6362744", "0.6356027", "0.63540846", "0.6353814", "0.6352392", "0.6350395", "0.63445425", "0.63420117", "0.6340927", "0.6340844", "0.6330178", "0.632371", "0.6318466", "0.6318379", "0.6310673", "0.6307518", "0.63065076", "0.6302815", "0.63005567", "0.62982804", "0.6279196", "0.6276898" ]
0.0
-1
This function evaluates the new model and generates classification report containing precision, recall, fscore and accuracy information for individual classes.
def evaluate_model(model, X_test, Y_test, category_names): y_pred = model.predict(X_test) for x in range(0, len(category_names)): print(category_names[x]) print(classification_report(Y_test[:,x], y_pred[:,x])) print("Accuracy: " + str(accuracy_score(Y_test[:, x], y_pred[:, x])))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_model(model, X_test, Y_test, category_names):\n# Print out Precision , recall F1_score and support for each column using classification_report function\n y_pred_test = model.predict(X_test)\n print(classification_report(Y_test, y_pred_test, target_names=category_names))", "def evaluate_model(model, X_test, y_test, category_names):\n y_pred = model.predict(X_test)\n labels = np.unique(y_pred)\n print(labels)\n #print out score for each class and mean scores, including precision, recall, f1 score\n print(classification_report(y_test.values, y_pred, target_names=category_names.values))", "def mgcEval(self):\n import numpy as np\n def report_to_df(report):\n\n \"\"\"\n function to convert classification report to dataframe (for visualisation plot)\n \"\"\"\n\n report = re.sub(r\" +\", \" \", report).replace(\"avg / total\", \"avg/total\").replace(\"\\n \", \"\\n\")\n # update this due to sklearn classification report output change\n report = re.sub(r\" +\", \" \", report).replace(\"micro avg\", \"micro_avg\").replace(\"macro avg\", \"macro_avg\").replace(\"weighted avg\", \"weighted_avg\").replace(\"\\n \", \"\\n\")\n report_df = pd.read_csv(StringIO(\"Classes\" + report), sep=' ', index_col=0) \n return(report_df)\n \n #txt report to df\n class_rpttop1 = classification_report(self.y_true, self.y_pred)\n df_report = report_to_df(class_rpttop1)\n\n df_report = df_report.iloc[:self.nb_classes, :].copy()\n df_report.index = df_report.index.astype(int)\n \n\n # classifier prediction metrics\n def classMetrics(averagex):\n precision, recall, fscore, support = score(self.y_true, self.y_pred, average=averagex)\n \n return(\n print(''), \n print('-------------{0:}--------------------'.format(averagex)), \n print('precision: {0:.4f}'.format(precision)),\n print('recall: {0:.4f}'.format(recall)),\n print('fscore: {0:.4f}'.format(fscore)),\n print(''),\n print('kappa score: {0:.4f}'.format(cohen_kappa_score(self.y_true, self.y_pred))),\n print('accuracy score: {0:.4f}'.format(accuracy_score(self.y_true, self.y_pred))))\n \n def predSamp():\n\n correct = np.nonzero(self.y_pred==self.y_true)[0]\n incorrect = np.nonzero(self.y_pred!=self.y_true)[0]\n\n # quick check of the number of correct prediction from validation set\n print(\"\")\n print(\"correct/total = {0: .4f}\".format(len(correct)/(len(correct)+len(incorrect))))\n print(\"total correct sample = {0: .0f}\".format(len(correct)))\n print('------------------------------------------------------------------')\n \n def classReport():\n print('----------------------------- Classfication Report -------------------------------')\n print(classification_report(pd.Series(self.y_true).map(self.dict_label), pd.Series(self.y_pred).map(self.dict_label)))\n \n self.class_rpt = pd.concat([pd.DataFrame(pd.Series(df_report.index.tolist()).map(self.dict_label), columns = ['label']), df_report], axis = 1)\n \n self.classMetricsMac = classMetrics(\"macro\")\n self.classMetricsMic = classMetrics(\"micro\")\n self.predSample = predSamp()\n self.class_rptTop1 = classReport()\n \n return self", "def evaluate_model(model, X_train, y_train, X_test, y_test):\n model = model\n model.fit(X_train, y_train)\n\n y_pred = model.predict(X_test)\n\n report = classificationreport(y_test, y_pred, target_names= [\"0\", \"1\"], output_dict=True)\n\n return report", "def testClassifier(x_train, y_train, x_test, y_test, clf):\n #metrics = []\n start = dt.now()\n clf.fit(x_train, y_train)\n end = dt.now()\n print 'training time: ', (end - start)\n \n # add training time to metrics\n #metrics.append(end-start)\n \n start = dt.now()\n yhat = clf.predict(x_test)\n end = dt.now()\n print 'testing time: ', (end - start)\n \n # add testing time to metrics\n #metrics.append(end-start)\n \n print 'classification report: '\n# print classification_report(y_test, yhat)\n pp(classification_report(y_test, yhat))\n \n print 'f1 score'\n print f1_score(y_test, yhat, average='macro')\n \n print 'accuracy score'\n accuracy = accuracy_score(y_test, yhat)\n print accuracy\n #metrics.append(accuracy)\n #precision = precision_score(y_test, yhat, average=None)\n #recall = recall_score(y_test, yhat, average=None)\n \n # add precision and recall values to metrics\n #for p, r in zip(precision, recall):\n # metrics.append(p)\n # metrics.append(r)\n \n \n #add macro-averaged F1 score to metrics\n #metrics.append(f1_score(y_test, yhat, average='macro'))\n \n print 'confusion matrix:'\n print confusion_matrix(y_test, yhat)\n \n # plot the confusion matrix\n plt.imshow(confusion_matrix(y_test, yhat), interpolation='nearest')\n plt.show()\n \n return accuracy", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n print(classification_report(Y_test, y_pred, target_names=Y_test.keys()))", "def evaluate_classifications(self):\n test_labels = open('./digitdata/testlabels', 'r')\n self.init_confusion_matrix()\n i = 0\n class_stats = {0:[0,0], 1:[0,0], 2:[0,0], 3:[0,0], 4:[0,0], 5:[0,0], 6:[0,0], 7:[0,0], 8:[0,0], 9:[0,0]}\n total_correct = 0\n num_labels = 1000\n for label in test_labels:\n int_label = int(label)\n if int_label == self.solutions[i]:\n class_stats[int_label][0] += 1\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n else:\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n class_stats[int_label][1] += 1\n i += 1\n for k in class_stats:\n print \"Class \" + str(k) + \": \" + str(float(class_stats[k][0])/class_stats[k][1])\n total_correct += float(class_stats[k][0])\n print \"Overall Accuracy: \" + str(total_correct/num_labels) \n for l in range(0,10):\n for w in range(0,10):\n self.confusion_matrix[l][w] = float(self.confusion_matrix[l][w]) / class_stats[l][1]\n \n s = [[str(e) for e in row] for row in self.confusion_matrix]\n lens = [len(max(col, key=len)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n print '\\n'.join(table)\n #self.print_confusion_matrix() ", "def evaluate_test(model, history, class_labels, train_X, test_X, train_y, test_y):\n train_loss, train_acc = model.evaluate(train_X, train_y, verbose=0)\n test_loss, test_acc = model.evaluate(test_X, test_y, verbose=0)\n print('Accuracy \\n Train: %.3f, Test: %.3f' % (train_acc, test_acc))\n print('Loss \\n Train: %.3f, Test: %.3f \\n' % (train_loss, test_loss))\n # plot loss during training\n plt.subplots_adjust(hspace = .5, wspace = 0.5)\n plt.subplot(211)\n plt.title('Loss', weight='bold')\n plt.plot(history.history['loss'], label='train')\n plt.plot(history.history['val_loss'], label='val')\n plt.legend()\n # plot accuracy during training\n plt.subplot(212)\n plt.title('Accuracy', weight='bold')\n plt.plot(history.history['acc'], label='train')\n plt.plot(history.history['val_acc'], label='val')\n plt.legend()\n plt.show()\n print('\\n')\n # predict probabilities for test set\n yhat_probs = model.predict(test_X, verbose=0)\n # predict classes for test set\n yhat_classes = model.predict_classes(test_X, verbose=0)\n # reduce to 1d array\n yhat_probs = yhat_probs[:, 0]\n yhat_classes = yhat_classes[:, 0]\n # calculate metrics\n report = metrics.classification_report(test_y, yhat_classes, target_names=class_labels)\n confusion_matrix = metrics.confusion_matrix(test_y, yhat_classes)\n plot_confusion_matrix(confusion_matrix, class_labels)\n print('\\n')\n return report", "def evaluate_model(model, X_test, y_test):\n # run prediction with test data\n y_pred = model.predict(X_test)\n\n # print precision, recall and f1-score\n i = 0\n for col in y_test:\n print('Evaluation for \"{}\": \\n {} \\n\\n'.format(col, classification_report(y_test[col], y_pred[:,i])))\n i += 1", "def evaluate_model(self, model, testX_norm, testY_bin, batch_size, label_names, n_epochs, output_filename):\n # Predictions\n predictions = model.predict(testX_norm, batch_size=batch_size)\n \n # Classification report\n classification = classification_report(testY_bin.argmax(axis=1),\n predictions.argmax(axis=1),\n target_names=label_names)\n \n # Print classification report\n print(classification)\n \n # Save classification report\n output_path = os.path.join(\"..\", \"output\", output_filename)\n with open(output_path, 'w', encoding='utf-8') as f:\n f.write(f\"Below are the classification metrics for the trained model. Batch size = {batch_size} and number of epochs = {n_epochs}.\\n\\n {classification}\")", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n print(classification_report(Y_test, y_pred, target_names=category_names))\n pass", "def evaluate_model(y_pred, y_true, X_test, y_test, clf, target_names, X_train, y_train, print_scores = False, document=None, fname=None):\n if print_scores:\n ######################################################\n # accuracy\n print(\"Accuracy: \", accuracy_score(y_true, y_pred))\n ###################################################\n # balanced accuracy\n print(\"Balanced accuracy score: \", balanced_accuracy_score(y_true, y_pred))\n #########################\n # cohen_kappa_score\n \"\"\"\n The kappa score is a number between -1 and 1. Scores above .8 are generally considered good agreement; zero or lower means no agreement (practically random labels)\n \"\"\"\n print(\"cohen kappa score: \",cohen_kappa_score(y_true, y_pred), \"above 0.8 is good agreement\")\n ##############################\n # plot confusion matrix\n plot_confusion(clf, X_test, y_test, [\"HD\", \"WT\"])\n ####################################\n # classification report\n\n print(\"classification report: \\n\", classification_report(y_true, y_pred, target_names=target_names))\n #########################################\n # general metrics\n print(\"Precision: \",metrics.precision_score(y_true, y_pred, average=\"binary\", pos_label=\"HD\"))\n\n print(\"Recall:\", metrics.recall_score(y_true, y_pred, average=\"binary\", pos_label=\"HD\"))\n\n print(\"F1:\",metrics.f1_score(y_true, y_pred, average=\"binary\", pos_label=\"HD\"))\n\n print(\"F beta, beta-0.5\", metrics.fbeta_score(y_true, y_pred, beta=0.5,average=\"binary\", pos_label=\"HD\"))\n\n print(\"F beta, beta-1\",metrics.fbeta_score(y_true, y_pred, beta=1,average=\"binary\", pos_label=\"HD\"))\n\n print(\"F beta, beta-2\",metrics.fbeta_score(y_true, y_pred, beta=2,average=\"binary\", pos_label=\"HD\"))\n\n print(\"precision recall fscore support\", metrics.precision_recall_fscore_support(y_true, y_pred, beta=0.5,average=\"binary\", pos_label=\"HD\"))\n\n\n # ROC curve\n y_scores = clf.predict_proba(X_test)[:, 1]\n precision, recall, threshold = precision_recall_curve(y_true, y_scores, pos_label=\"HD\")\n\n\n print(\"Average precision score: \", average_precision_score(y_true, y_scores, pos_label=\"HD\"))\n\n if document is not None:\n if fname is None:\n raise NameError(\"Provide a filename to save this document\")\n document.add_heading(\"Test Metrics\", level=2)\n document.add_paragraph((\"Accuracy: {}\".format(accuracy_score(y_true, y_pred))), style = \"List Bullet\")\n document.add_paragraph((\"Balanced accuracy score: {}\".format(balanced_accuracy_score(y_true, y_pred))), style = \"List Bullet\")\n document.add_paragraph((\"Cohen kappa score: {} \".format(accuracy_score(y_true, y_pred))), style = \"List Bullet\")\n p=document.add_paragraph(\"\", style = \"List Bullet\")\n p.add_run('(The kappa score is a number between -1 and 1. Scores above .8 are generally considered good agreement; zero or lower means no agreement (practically random labels)).').italic = True\n\n\n # confusion matricies\n document.add_heading(\"Confusion Matrices\", level=2)\n\n np.set_printoptions(precision=2)\n\n # Plot confusion matrices\n titles_options = [(\"Confusion matrix, without normalization\", None),\n (\"Normalized confusion matrix\", 'true')]\n for title, normalize in titles_options:\n memfile = io.BytesIO()\n disp = plot_confusion_matrix(clf, X_test, y_test,\n display_labels=[\"HD\", \"WT\"],\n cmap=plt.cm.Blues,\n normalize=normalize)\n disp.ax_.set_title(title)\n\n plt.savefig(memfile)\n document.add_picture(memfile, width=Inches(5))\n memfile.close()\n\n # classification report\n document.add_heading(\"Classification report\", level=2)\n document.add_paragraph(\"{}\".format(classification_report(y_true, y_pred, target_names=target_names)))\n\n # Precision/recall\n document.add_heading(\"Precision/Recall Scores\", level=2)\n document.add_paragraph(\"Precision: {}\".format(metrics.precision_score(y_true, y_pred, average=\"binary\", pos_label=\"HD\")), style= \"List Bullet\")\n document.add_paragraph(\"Recall: {}\".format(metrics.recall_score(y_true, y_pred, average=\"binary\", pos_label=\"HD\")), style= \"List Bullet\")\n document.add_paragraph(\"F1 {}\".format(metrics.f1_score(y_true, y_pred, average=\"binary\", pos_label=\"HD\")), style= \"List Bullet\")\n\n # Decision boundaries plot\n\n document.add_heading(\"Decision Surface of model - training\")\n memfile = io.BytesIO()\n m = clf\n pca_clf = plot_decision_boundaries.DecisionBoundaries(model=m, name=fname).plot(X_train, y_train, memfile)\n plt.savefig(memfile)\n document.add_picture(memfile, width=Inches(5))\n memfile.close()\n\n \"\"\"\n # todo - Krutik, I can't imagine I will have time to finish this section. If you want to create figures to show the test data on the decision surface, i think you need to adjust the dimensions of the plot within plot_decision_boundaries.DecisionBoundaries(), so they are the same as on the first plot, thus, the decision surface will be comparable for both plots \n \n document.add_heading(\"Decision Surface of model - testing\")\n memfile2 = io.BytesIO()\n plot_decision_boundaries.DecisionBoundaries(model=pca_clf, name=fname).test_plot(pca_clf, X_test, y_test, memfile2, X_train, y_train)\n plt.savefig(memfile2)\n document.add_picture(memfile2, width=Inches(5))\n memfile2.close()\n\n \"\"\"\n\n # feature importance\n\n memfile = io.BytesIO()\n y = permutation_based_feature_importance(clf, X_test, y_test, X_train.columns, X_train, y_train, save=True, filename = memfile)\n document.add_picture(memfile, width=Inches(5))\n memfile.close()\n\n document.save(r'../../ML/Classifiers/{}.docx'.format(fname))\n print(\"Saved {}.docx\".format(fname), \"in ../../ML/Classifiers/\")", "def evaluate_model(model, X_test, Y_test, category_names):\n \n Y_pred = model.predict(X_test)\n \n print(classification_report(Y_test.values, Y_pred, target_names=category_names))", "def evaluate_model(model, X_test, Y_test, category_names): \n \n Y_pred = model.predict(X_test)\n print(classification_report(Y_test, Y_pred))\n display_results(Y_test, Y_pred)", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred_grid = model.predict(X_test)\n print(\n classification_report(Y_test.values, y_pred_grid, target_names=category_names)\n )", "def model_analysis(self, model_name: str, history) -> None:\n # probabilites\n y_pred_prob = self.recognizer.predict(self.X_test)\n # most likely class\n y_pred = np.argmax(y_pred_prob, axis=1)\n # compare true and predicted classes on test set\n\n # path handling for writing to file\n output_dir = Path(os.environ[\"MODEL_DATA\"]) / model_name\n out_name = \"classification_report.txt\"\n out_path = output_dir / out_name\n\n acc = history.history[\"accuracy\"]\n val_acc = history.history[\"val_accuracy\"]\n loss = history.history[\"loss\"]\n val_loss = history.history[\"val_loss\"]\n\n epochs = range(1, len(acc) + 1)\n\n # plot accuracies and losses with respect to epochs\n plt.plot(epochs, acc, \"r\", label=\"Train accuracy\")\n plt.plot(epochs, val_acc, \"b\", label=\"Val accuracy\")\n\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy\")\n plt.legend()\n\n plt.savefig(output_dir / \"acc-plot\")\n\n plt.figure()\n plt.plot(epochs, loss, \"r\", label=\"Training loss\")\n plt.plot(epochs, val_loss, \"b\", label=\"Val loss\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.legend()\n\n plt.savefig(output_dir / \"loss-plot\")\n\n # create, print and write to file a sklearn classification report\n print(set(self.y_test) - set(y_pred))\n report = classification_report(self.y_test, y_pred)\n print(report)\n with open(out_path, \"w\") as f:\n f.write(report)\n\n self.make_heatmap(y_pred, output_dir)", "def evaluate_model(model, X_test, Y_test, category_names):\n \n y_preds = model.predict(X_test)\n predictions = pd.DataFrame(data=y_preds, columns=Y_test.columns, index=Y_test.index)\n for col in Y_test.columns:\n print(classification_report(predictions[col],Y_test[col]))", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = pd.DataFrame(data = model.predict(X_test), columns = category_names)\n\n precision, recall, f1_score = [], [], []\n\n for category in category_names:\n scores = classification_report(Y_test[category], y_pred[category])\n precision.append([x for x in scores.strip().split(\"avg / total\")[1].strip().split(\" \") \n if len(x) > 0][:3][0])\n recall.append([x for x in scores.strip().split(\"avg / total\")[1].strip().split(\" \") \n if len(x) > 0][:3][1])\n \n model_metric = pd.concat([\n pd.DataFrame(data = [precision, recall], index = [\"precision\", \"recall\"], \n columns = category_names),\n (Y_test.reset_index() == y_pred.reset_index()).mean()[1:].to_frame(\"accuracy\").T\n ])\n\n for col in model_metric.columns:\n model_metric[col] = model_metric[col].astype(float)\n\n return model_metric", "def _eval_classifier(self):\n\n y_pred_baseline = self.df_baseline[self.score_column]\n y_pred_sample = self.df_sample[self.score_column]\n\n y_label_baseline = self.df_baseline[self.label_column]\n y_label_sample = self.df_sample[self.label_column]\n\n precision_baseline = precision_score(y_label_baseline, y_pred_baseline)\n recall_baseline = recall_score(y_label_baseline, y_pred_baseline)\n acc_baseline = accuracy_score(y_label_baseline, y_pred_baseline)\n f1_baseline = f1_score(y_label_baseline, y_pred_baseline)\n try:\n auc_baseline = roc_auc_score(y_label_baseline, y_pred_baseline)\n except ValueError:\n auc_baseline = \"NA\"\n\n precision_sample = precision_score(y_label_sample, y_pred_sample)\n recall_sample = recall_score(y_label_sample, y_pred_sample)\n acc_sample = accuracy_score(y_label_sample, y_pred_sample)\n f1_sample = f1_score(y_label_sample, y_pred_sample)\n try:\n auc_sample = roc_auc_score(y_label_sample, y_pred_sample)\n except ValueError:\n auc_sample = \"NA\"\n\n metrics_df = pd.DataFrame(\n {\n \"Accuracy\": [acc_baseline, acc_sample],\n \"Precision\": [precision_baseline, precision_sample],\n \"Recall\": [recall_baseline, recall_sample],\n \"F1\": [f1_baseline, f1_sample],\n \"AUC\": [auc_baseline, auc_sample],\n },\n index=[\"baseline\", \"sample\"],\n )\n\n self.performance_comparison = metrics_df", "def evaluate_model(model, X_test, Y_test, category_names):\n\n logging.info(\"run evaluate_model\")\n\n # find current foler path for savings\n folder_path = os.path.dirname(__file__)\n\n # predict outputs on test data\n Y_pred = model.predict(X_test)\n\n # create classification report with precision, recall, and F1 score for each categories\n clf_report_df = pd.DataFrame(classification_report(Y_test, Y_pred,\n target_names=category_names, output_dict=True)).T\n clf_report_df.to_markdown(buf=os.path.join(folder_path,'test','classification_report.md'), mode='w')\n\n # calculate confusion matrix for each categories and save corresponding heatmap plots\n conf_matrix_df = multilabel_confusion_matrix(Y_test, Y_pred)\n plot_confusion_matrix(conf_matrix_df, category_names,\n os.path.join(folder_path,'test','confusion_matrix.png'))", "def run_classification_models(train,test,metric_file_path,classes):\n metric_names = ['accuracy','weightedRecall','weightedPrecision']\n f = open(metric_file_path,'w')\n f.write('model,'+','.join(metric_names)+'\\n')\n name = 'Logistic Regression'\n model = LogisticRegression()\n param_grid = ParamGridBuilder()\\\n .addGrid(model.regParam,[0,.25,.5]) \\\n .addGrid(model.elasticNetParam,[0,.25,.5])\\\n .build()\n model_cv = CrossValidator(\n estimator = model,\n estimatorParamMaps = param_grid,\n evaluator = MulticlassClassificationEvaluator(),\n numFolds = 3,\n seed = 7).fit(train)\n best_model = model_cv.bestModel\n print name\n print '\\t Best regParam (lambda): %.2f'%best_model._java_obj.getRegParam()\n print '\\t Best elasticNetparam (alpha): %.2f'%best_model._java_obj.getElasticNetParam()\n eval_model(f,name,model_cv,test,MulticlassClassificationEvaluator,metric_names)\n name = 'Decision Tree'\n model = DecisionTreeClassifier(seed=7)\n param_grid = ParamGridBuilder()\\\n .addGrid(model.maxDepth,[5,10,15]) \\\n .addGrid(model.maxBins,[8,16,32])\\\n .build()\n model_cv = CrossValidator(\n estimator = model,\n estimatorParamMaps = param_grid,\n evaluator = MulticlassClassificationEvaluator(),\n numFolds = 3,\n seed = 7).fit(train)\n best_model = model_cv.bestModel \n print name\n print '\\t Best maxDepth: %d'%best_model._java_obj.getMaxDepth()\n print '\\t Best maxBins: %d'%best_model._java_obj.getMaxBins()\n eval_model(f,name,model_cv,test,MulticlassClassificationEvaluator,metric_names)\n name = 'Random Forest'\n model = RandomForestClassifier(seed=7)\n param_grid = ParamGridBuilder()\\\n .addGrid(model.maxDepth,[5,10,15]) \\\n .addGrid(model.numTrees,[10,15,20])\\\n .build()\n model_cv = CrossValidator(\n estimator = model,\n estimatorParamMaps = param_grid,\n evaluator = MulticlassClassificationEvaluator(),\n numFolds = 3,\n seed = 7).fit(train)\n best_model = model_cv.bestModel \n print name\n print '\\t Best maxDepth: %d'%best_model._java_obj.getMaxDepth()\n print '\\t Best numTrees: %d'%best_model._java_obj.getNumTrees()\n eval_model(f,name,model_cv,test,MulticlassClassificationEvaluator,metric_names)\n name = 'One vs Rest'\n model = OneVsRest(classifier=LogisticRegression()).fit(train)\n print name\n eval_model(f,name,model,test,MulticlassClassificationEvaluator,metric_names)\n name = 'Naive Bayes'\n model = NaiveBayes()\n param_grid = ParamGridBuilder()\\\n .addGrid(model.smoothing,[.5,1,2])\\\n .build()\n model_cv = CrossValidator(\n estimator = model,\n estimatorParamMaps = param_grid,\n evaluator = MulticlassClassificationEvaluator(),\n numFolds = 3,\n seed = 7).fit(train)\n best_model = model_cv.bestModel \n print name\n print '\\t Best smoothing: %.1f'%best_model._java_obj.getSmoothing()\n eval_model(f,name,model_cv,test,MulticlassClassificationEvaluator,metric_names)\n if classes == 2:\n name = 'Gradient Boosted Trees'\n model = GBTClassifier(seed=7).fit(train)\n print name\n eval_model(f,name,model,test,MulticlassClassificationEvaluator,metric_names)\n name = 'Linear Support Vector Machine'\n model = LinearSVC().fit(train)\n print name\n eval_model(f,name,model,test,MulticlassClassificationEvaluator,metric_names) \n f.close()", "def test_model (self, text_test, labels_test):\n print(classification_report(labels_test, self.classify(text_test)))", "def evaluate_model(model, X_test, Y_test, category_names):\n\n # Predict labels using model\n Y_pred = model.predict(X_test)\n\n # Generate accuracy report\n report = pd.DataFrame.from_dict(classification_report(Y_test, Y_pred,\n target_names=category_names, output_dict=True))\n report = pd.DataFrame.transpose(report)\n\n print(report)", "def evaluate_model(model, X_test, y_test, category_names):\n # Predict for test set\n y_pred = model.predict(X_test)\n \n print(\"**** Scores for each category *****\\n\")\n for i in range(36):\n print(\"Scores for '{}':\".format(category_names[i]))\n print(classification_report(y_test.values[:,i], y_pred[:,i]))", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n # print the metrics\n for i, col in enumerate(category_names):\n print('{} category metrics: '.format(col))\n print(classification_report(Y_test.iloc[:,i], y_pred[:,i]))", "def start_training(self):\n self.training()\n \n images, true_labels, pred_labels, pred_probs = self.evaluate_model(proba=True)\n \n metrics = Metrics(images, true_labels, pred_labels, pred_probs, self.classes)\n\n cm = metrics.get_confusion_matrix()\n print('The confusion matrix is:\\n', cm)\n print('*'*100)\n \n cr = metrics.get_classification_report()\n print('The classification report is:\\n', cr)\n print('*'*100)", "def classification_report(self):\n print('Classification Report ...')\n cr = classification_report(self.y_test, self.y_pred, output_dict=True)\n df = pd.DataFrame(cr)\n df.to_csv('csv/cr/' + self.model_name + '_' + self.label + '_cr.csv')\n print(cr)", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n return classification_report(Y_test, y_pred, target_names = category_names)", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_pred=model.predict(X_test)\n acc=[]\n for i,c in enumerate(Y_test.columns):\n print(c)\n print(classification_report(Y_test[c], Y_pred[:,i]))\n acc.append(accuracy_score(Y_test[c], Y_pred[:,i]))\n print('Accuracy :',np.mean(acc))\n\n pass", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_pred = model.predict(X_test)\n Y_pred = pd.DataFrame(Y_pred, columns=category_names)\n \n # calculate summary stats on test data\n results = pd.DataFrame()\n for column_name in Y_pred.columns:\n col_report = classification_report(y_true=Y_test[[column_name]], y_pred=Y_pred[[column_name]], output_dict=True)\n accuracy = col_report['accuracy']\n precision = col_report['macro avg']['precision']\n recall = col_report['macro avg']['recall']\n results[column_name] = [accuracy, precision, recall]\n results.index = ['accuracy', 'precision', 'recall']\n results.mean(axis=1) \n \n # save results to local csv file\n model_name = type(model.best_params_['clf']).__name__\n avg_accuracy = results.mean(axis=1)['accuracy']\n avg_precision = results.mean(axis=1)['precision']\n avg_recall = results.mean(axis=1)['recall']\n params = model.best_params_\n stored_results = pd.DataFrame({'Model': [model_name], 'Accuracy': [avg_accuracy], 'Precision': [avg_precision], \n 'Recall': [avg_recall], 'Parameters': [params]})\n\n add_header = not os.path.isfile('models/model_results.csv')\n with open('models/model_results.csv', 'a') as f:\n stored_results.to_csv(f, header=add_header, index=False)", "def evaluate_model(model, X_test, Y_test, category_names):\n\n print(\"Testing Performance\")\n print(classification_report(Y_test, model.predict(X_test), target_names=category_names))\n\n #Todo cat names", "def evaluate_model(model, X_test, Y_test, category_names):\n\n # Predict labels using model\n y_pred1 = model.predict(X_test)\n\n # Generate accuracy report\n accuracy = [[(y_pred1[:, i] == Y_test.values[:, i]).mean(),\n *precision_recall_fscore_support(\n Y_test.values[:, i], y_pred1[:, i], average='weighted')]\n for i in range(y_pred1.shape[1])]\n accuracy = np.array(accuracy)[:, :-1]\n accuracy = (accuracy * 10000).astype(int) / 100\n scores1= pd.DataFrame( data=accuracy, index=list(Y_test), columns=['Accuracy', 'Precision', 'Recall', 'F-score'])\n print(scores1)\n return scores1", "def evaluate(test_set, predictions):\n full_report = metrics.classification_report(test_set.labels, predictions,\n labels=range(len(test_set.index2label)),\n target_names=test_set.index2label, digits=3)\n pre, rec, f1, support = metrics.precision_recall_fscore_support(test_set.labels, predictions, average='weighted')\n return pre, rec, f1, support, full_report", "def evaluate_model(model, X_test, Y_test, category_names): \n # predict on the X_test\n y_pred = model.predict(X_test)\n \n # build classification report on every column\n performances = []\n for i in range(len(category_names)):\n performances.append([f1_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro'),\n precision_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro'),\n recall_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro')])\n # build dataframe\n performances = pd.DataFrame(performances, columns=['f1 score', 'precision', 'recall'],\n index = category_names) \n return performances", "def classify(self, verbose=True, print_scores=False):\n if verbose:\n print(\"%s: Training model ...\" % self.clf_name)\n self.clf.fit(self.X_train, self.y_train)\n\n if verbose:\n print(\"%s: Calculating probablities ... \" % self.clf_name)\n y_proba = self.clf.predict_proba(self.X_test)\n\n if verbose:\n print(\"%s: Making predictions\" % self.clf_name)\n y_pred = self.clf.predict(self.X_test)\n\n if verbose:\n print(\"%s: Calculating metrics ...\" % self.clf_name)\n res = ClassificationResult(self.clf, self.y_test, y_pred, y_proba[:, 1])\n res.calculate_scores()\n\n # Print result if print_scores == True\n if print_scores:\n res.print_metrics\n \n return res", "def evaluate_model(model, X_test, Y_test, category_names):\n\n Y_pred = pd.DataFrame(model.predict(X_test))\n Y_pred.columns = category_names\n Y_test = pd.DataFrame(Y_test)\n Y_test.columns = category_names\n\n for column in category_names:\n print('** {} **'.format(column).upper())\n print(classification_report(Y_test[column], Y_pred[column]))", "def evaluate(\n self,\n test_data=None,\n print_report=True,\n save_path=\"ktrain_classification_report.csv\",\n class_names=[],\n ):\n return self.validate(\n val_data=test_data,\n print_report=print_report,\n save_path=save_path,\n class_names=class_names,\n )", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_prediction = model.predict(X_test)\n Y_prediction_df = pd.DataFrame(Y_prediction, columns=category_names)\n \n for col in category_names:\n print(f\"category:{col}\")\n print(classification_report(Y_test[col], Y_prediction_df[col]))\n print('------------------------------------------------------')\n \n accuracy = np.mean(Y_prediction == Y_test.values)\n print(f\"Accuracy: {accuracy:.2%}\")", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n y_actu = Y_test.values\n\n results_dict = {}\n for i in range(1, 37):\n predicted = \"pred_\" + str(i)\n actual = \"actu_\" + str(i)\n pred_values = []\n actu_values = []\n for ii in range(len(y_pred)):\n\n pred_values.append(int(y_pred[ii][i-1]))\n actu_values.append(int(y_actu[ii][i-1]))\n\n results_dict[predicted] = pred_values\n results_dict[actual] = actu_values\n\n for i in range(1, 37):\n pred = results_dict['pred_' + str(i)]\n actu = results_dict['actu_' + str(i)]\n\n print(\"\\n### \" + category_names[i-1] + \" ###\\n\")\n print(classification_report(pred, actu))", "def evaluate(ground_truth, prediction):\n\n def prfs_to_dict(prfs):\n \"\"\"Returns a precision_recall_fscore_support() result as a dict.\"\"\"\n return {\"precision\": prfs[0], \"recall\": prfs[1], \"fscore\": prfs[2]}\n\n results = {}\n items_count = len(ground_truth)\n\n # accuracy\n accuracy = accuracy_score(ground_truth, prediction)\n results[\"accuracy\"] = accuracy\n\n # confusion matrix\n categories = set(ground_truth) | set(prediction)\n confusions = {\n gold: {pred: 0 for pred in categories} for gold in categories\n }\n for g, p in zip(ground_truth, prediction):\n confusions[g][p] += 1\n results[\"confusions\"] = confusions\n\n # class wise precision, recall & f1\n classwise = precision_recall_fscore_support(\n ground_truth, prediction, average=None, warn_for=()\n )\n results[\"true_cat_dist\"] = list(classwise[-1])\n results[\"classwise\"] = {\n str(cl): prfs_to_dict(\n [classwise[0][cl], classwise[1][cl], classwise[2][cl]]\n )\n for cl in categories\n }\n\n # average precision, recall & f1\n results[\"macro_avg\"] = prfs_to_dict(\n precision_recall_fscore_support(\n ground_truth,\n prediction,\n average=\"macro\",\n pos_label=None,\n warn_for=(),\n )\n )\n results[\"micro_avg\"] = prfs_to_dict(\n precision_recall_fscore_support(\n ground_truth,\n prediction,\n average=\"micro\",\n pos_label=None,\n warn_for=(),\n )\n )\n results[\"weigh_avg\"] = prfs_to_dict(\n precision_recall_fscore_support(\n ground_truth,\n prediction,\n average=\"weighted\",\n pos_label=None,\n warn_for=(),\n )\n )\n\n # marginals\n gold_category_distribution = {\n g: sum([confusions[g][p] for p in categories]) for g in categories\n }\n pred_category_distribution = {\n p: sum([confusions[g][p] for g in categories]) for p in categories\n }\n\n # kappa\n expected_agreement_fleiss = sum(\n [\n (\n (gold_category_distribution[c] + pred_category_distribution[c])\n / (2.0 * items_count)\n )\n ** 2\n for c in categories\n ]\n )\n expected_agreement_cohen = sum(\n [\n (float(gold_category_distribution[c]) / items_count)\n * (float(pred_category_distribution[c]) / items_count)\n for c in categories\n ]\n )\n kappa_fleiss = (\n 1.0\n * (accuracy - expected_agreement_fleiss)\n / (1 - expected_agreement_fleiss)\n )\n kappa_cohen = (\n 1.0\n * (accuracy - expected_agreement_cohen)\n / (1 - expected_agreement_cohen)\n )\n results[\"k_fleiss\"] = {\n \"k\": kappa_fleiss,\n \"AE\": expected_agreement_fleiss,\n \"AO\": accuracy,\n }\n results[\"k_cohen\"] = {\n \"k\": kappa_cohen,\n \"AE\": expected_agreement_cohen,\n \"AO\": accuracy,\n }\n\n return results", "def classification_evaluation(self, test_set, predicted_values, certainty):\r\n\r\n percent_accuracy = self.percent_accuracy(test_set, predicted_values)\r\n one_zero = self.one_zero_loss(test_set, predicted_values)\r\n log_loss = self.log_loss(test_set, predicted_values, certainty)\r\n print(f\"Percent correct:\\t{percent_accuracy * 100:.2f}%\")\r\n print(f\"1/0 Loss:\\t\\t\\t{one_zero:.2f}\")\r\n print(\"Log Loss: \", log_loss)", "def evaluate_model(model, X_test, Y_test, category_names):\n \n \n yPredictorTest = model.predict(X_test)\n \n for idx, col in enumerate(Y_test):\n print(col, classification_report(Y_test[col], yPredictorTest[:, idx]))", "def evaluate_model(clf_, X_tr, X_te, y_tr, y_te, cls_rpt_tr=False, show=True, cls_labels=None, binary=False):\n \n import sklearn.metrics as metrics\n import matplotlib.pyplot as plt\n from yellowbrick.classifier import ROCAUC\n \n ## Fit and predict \n y_hat_trn, y_hat_tes = fit_n_pred(clf_, X_tr, X_te, y_tr)\n \n if show:\n ## Classification Report / Scores\n if cls_rpt_tr:\n print('Classification Report Train')\n print(metrics.classification_report(y_tr,y_hat_trn))\n else:\n print('Classification Report Test')\n print(metrics.classification_report(y_te,y_hat_tes))\n\n ## Confusion Matrix\n fig, ax = plt.subplots(figsize=(10,5), ncols=2)\n \n metrics.plot_confusion_matrix(clf_,X_te,y_te,cmap=\"YlOrRd\",\n normalize='true',ax=ax[0])\n ax[0].set(title='Confusion Matrix Test Data')\n ax[0].grid(False) \n\n roc = ROCAUC(clf_, classes=cls_labels, ax=ax[1])\n roc.fit(X_tr, y_tr)\n roc.score(X_te, y_te)\n roc.finalize()\n \n plt.tight_layout()\n plt.show()\n \n if binary:\n try:\n imps = plot_importance(clf_, X_tr)\n except:\n imps = None\n \n else:\n return y_hat_trn, y_hat_tes", "def clf_eval():\n y_true = np.random.randint(2, size=10000)\n y_pred = np.clip(np.random.normal(0.25, 0.3, size=y_true.shape) + y_true * 0.5, 0, 1)\n\n model_eval = ClassificationEvaluation(\n y_true=y_true,\n y_pred=y_pred,\n class_names=['a', 'b'],\n model_name='foo',\n )\n return model_eval", "def classification(features, scores, n_classes, model_type=0, save_path='results/',\n lr=.01, batch_size=10, n_epochs=20, test_size=.3,\n verbose=False, save_results=False, normalize=True):\n # features, scores = read_data_from_csv()\n verbose_opc = 0\n if verbose:\n print(\"[INFO] Shuffle Data\")\n verbose_opc = 1\n\n features, scores = shuffle(features, scores, random_state=0)\n\n if normalize:\n if verbose:\n print(\"[INFO] Normalizing Data\")\n scaler = StandardScaler()\n scaler.fit(features)\n features = scaler.transform(features)\n\n if verbose:\n print(\"[INFO] Splitting data into train and test sets\")\n x_train, x_test, y_train, y_test = train_test_split(features, scores, test_size=test_size)\n\n\n\n if verbose:\n print(\"[INFO] Creating the machine learning model\")\n\n model = None\n if model_type == 0:\n model = res_model(x_train.shape[1:], n_classes)\n elif model_type == 1:\n model = simple_model(x_train.shape[1:], n_classes)\n elif model_type == 2:\n model = sklearn.svm.SVC(gamma='auto')\n elif model_type == 3:\n model = RandomForestClassifier()\n elif model_type == 4:\n model = AdaBoostClassifier()\n elif model_type == 5:\n model = xgb.XGBClassifier(objective=\"multi:softprob\", random_state=42)\n\n h = None\n if model_type >= 0 and model_type <= 1:\n # classes 0.0 ,0.5, 1.0, 1.5, 2.0\n y_cat_train = to_categorical(y_train, n_classes)\n y_cat_test = to_categorical(y_test, n_classes)\n\n model.compile(loss=\"logcosh\",\n #optimizer=keras.optimizers.SGD(lr=lr, momentum=.3),\n optimizer=\"adamax\",\n metrics=['accuracy'])\n\n h = model.fit(x_train, y_cat_train,\n batch_size=batch_size,\n epochs=n_epochs,\n validation_data=(x_test, y_cat_test),\n verbose=verbose_opc)\n\n evaluate_model(x_test, y_cat_test, batch_size, model, n_epochs, h, n_classes, folder_name=save_path,\n save_results=save_results, is_rna=True)\n else:\n model.fit(x_train, y_train)\n\n evaluate_model(x_test, y_test, batch_size, model, n_epochs, h, n_classes, folder_name=save_path,\n save_results=save_results)\n\n return model", "def evaluate(self):\n predictions = self.model.predict(self.test[0])\n accuracy = accuracy_score(self.test[1], predictions)\n print(\"Accuracy:\", str(accuracy * 100) + \"%\")\n self.plot_results(predictions)", "def evaluate(self, test_dir='data/dev', target='real'):\n test_data = {c: os.path.join(test_dir, c) for c in self.classes}\n if not target in test_data:\n print('Error: target class does not exist in test data.')\n return\n outcomes = {'TP': 0, 'TN': 0, 'FP': 0, 'FN': 0}\n # >>> YOUR ANSWER HERE\n data = []\n for c in test_data:\n docs = open(test_data[c]).readlines()\n for doc in docs:\n preprocess_doc = doc.strip()\n data.append((c, preprocess_doc))\n for item in data:\n predict_ans = self.predict(item[1])\n if item[0] == 'real':\n if predict_ans == 'real':\n outcomes['TP'] += 1\n else:\n outcomes['FN'] += 1\n else:\n if predict_ans == 'real':\n outcomes['FP'] += 1\n else:\n outcomes['TN'] += 1\n precision = outcomes['TP'] / (outcomes['TP'] + outcomes['FP']) # replace with equation for precision\n recall = outcomes['TP'] / (outcomes['TP'] + outcomes['FN']) # replace with equation for recall\n f1_score = 2 * ((precision * recall) / (precision + recall)) # replace with equation for f1\n # >>> END YOUR ANSWER\n return precision, recall, f1_score", "def evaluate_model(model, X_test, y_test, category_names):\n try:\n y_pred = model.predict(X_test)\n y_test_avg_labels = round(np.mean(y_test.sum(axis=1)), 2)\n \n print(\"Printing classification report...\\n\")\n y_pred = model.predict(X_test).todense()\n\n i = -1\n for col in category_names:\n i += 1\n ytrue = y_test[col]\n ypred = y_pred[:,i]\n print(col)\n print(classification_report(ytrue, ypred)) \n print('-' * 60)\n\n print(\"\\n Printing coverage error...\\n\")\n print(round(coverage_error(y_test, y_pred), 2))\n print(f\"\\n Average number of true labels per sample in test sample: {y_test_avg_labels}\")\n except:\n raise Exception(\"Could not evaluate model.\")", "def evaluate_model():\n\n # Get the processed data (in proper format to evaluate the NER model)\n data = get_json_from_file_path(PROCESSED_DATA_PATH)\n # Split the dataset for training and test as we did for training\n train_data, test_data = train_test_split(data, train_size=0.7, \n random_state=4)\n\n # Load the model trained\n try:\n ner_model = spacy.load(OUTPUT_MODEL_PATH)\n except Exception as err:\n msg = f'Could not load the model. Error: {err}'\n raise Exception(msg)\n\n # Compute evaluation scores\n print('Computing metrics...')\n scores = evaluate(ner_model, test_data)\n # General metrics of the model\n f_score = scores.get('ents_f')\n precision = scores.get('ents_p')\n recall = scores.get('ents_r')\n print('\\nScoring:')\n print(f'F-score: {f_score}')\n print(f'Precision: {precision}')\n print(f'Recall: {recall}')\n\n # Get the specific scores for each entity \n scores_per_entity = scores.get('ents_per_type')\n # Get the F-score of the entities\n f_scores_of_entities = []\n for entity_scores in scores_per_entity.values():\n f_scores_of_entities.append(entity_scores['f'])\n # Compute the macro averaged F-score\n macro_avg_f_score = sum(f_scores_of_entities)/len(f_scores_of_entities)\n print(f'Macro averaged F-score: {macro_avg_f_score}')\n \n print('\\nScores per entity;')\n print('{:<15} {:<10} {:<10} {:<10}'.format('Entity','F-score','Precision','Recall'))\n for key, value in scores_per_entity.items():\n entity = key\n f, p, r = value['f'], value['p'], value['r']\n print('{:<15} {:<10.2f} {:<10.2f} {:<10.2f}'.format(entity, f, p, r))", "def evaluate(probs, y_test, output_folder, file_prefix='test', model_names=None):\n colours = ['b', 'g', 'm', 'c', 'y', 'r', 'k']\n\n if not os.path.isdir(output_folder):\n os.makedirs(output_folder)\n test_log = open(output_folder + '/' + file_prefix + '.log', 'w+')\n\n fprs, tprs, aucs = [], [], []\n for prob, model_name in zip(probs, model_names):\n test_log.write(model_name + \"\\n\\n\")\n pred = prob.argmax(axis=1)\n test_log.write(str(classification_report(y_test, pred)) + '\\n')\n test_log.write('\\n' + ' Predicted' + '\\n')\n test_log.write(str(confusion_matrix(y_test, pred)) + '\\n')\n\n fpr, tpr, thr = roc_curve(y_test, prob[:, 1])\n ## find best threshold : http://www.medicalbiostatistics.com/roccurve.pdf\n dist = np.sqrt((1. - tpr) ** 2 + (fpr) ** 2)\n best_thr = thr[np.argmin(dist)]\n best_thr_pred = (prob[:,1] > best_thr) * 1\n\n test_log.write('\\n' + \"Accuracy : \" + str((accuracy_score(y_test, pred))) + '\\n')\n test_log.write(\"F1 score : \" + str(f1_score(y_test, pred)) + '\\n')\n test_log.write(\"F1 score (thrs : {:.3f}) : \".format(best_thr) + str(f1_score(y_test, best_thr_pred)) + '\\n')\n test_log.write(\"Recall : \" + str(recall_score(y_test, pred)) + '\\n')\n test_log.write(\"Precision : \" + str(precision_score(y_test, pred)) + '\\n\\n')\n\n roc_auc = auc(fpr, tpr)\n fprs.append(fpr)\n tprs.append(tpr)\n aucs.append(roc_auc)\n\n if len(probs) > 1:\n model_names.extend(['mean', 'geom_mean'])\n test_log.write(\"Ensemble (mean)\\n\\n\")\n prob = (np.array(probs).sum(axis=0) / 2)\n pred = prob.argmax(axis=1)\n test_log.write(str(classification_report(y_test, pred)) + '\\n')\n test_log.write('\\n' + ' Predicted' + '\\n')\n test_log.write(str(confusion_matrix(y_test, pred)) + '\\n')\n\n test_log.write('\\n' + \"Accuracy : \" + str((accuracy_score(y_test, pred))) + '\\n')\n test_log.write(\"F1 score : \" + str(f1_score(y_test, pred)) + '\\n')\n test_log.write(\"Recall : \" + str(recall_score(y_test, pred)) + '\\n')\n test_log.write(\"Precision : \" + str(precision_score(y_test, pred)) + '\\n\\n')\n\n fpr, tpr, _ = roc_curve(y_test, prob[:, 1])\n roc_auc = auc(fpr, tpr)\n fprs.append(fpr)\n tprs.append(tpr)\n aucs.append(roc_auc)\n\n test_log.write(\"Ensemble (geom. mean)\\n\\n\")\n prob = (np.array(probs).prod(axis=0) / np.array(probs).prod(axis=0).sum(axis=1)[:, np.newaxis])\n pred = prob.argmax(axis=1)\n test_log.write(str(classification_report(y_test, pred)) + '\\n')\n test_log.write('\\n' + ' Predicted' + '\\n')\n test_log.write(str(confusion_matrix(y_test, pred)) + '\\n')\n\n test_log.write('\\n' + \"Accuracy : \" + str((accuracy_score(y_test, pred))) + '\\n')\n test_log.write(\"F1 score : \" + str(f1_score(y_test, pred)) + '\\n')\n test_log.write(\"Recall : \" + str(recall_score(y_test, pred)) + '\\n')\n test_log.write(\"Precision : \" + str(precision_score(y_test, pred)) + '\\n\\n')\n\n fpr, tpr, _ = roc_curve(y_test, prob[:, 1])\n roc_auc = auc(fpr, tpr)\n fprs.append(fpr)\n tprs.append(tpr)\n aucs.append(roc_auc)\n\n #plt.figure(figsize=(15, 15))\n for fpr, tpr, roc_auc, col, name in zip(fprs, tprs, aucs, colours, model_names):\n plt.plot(fpr, tpr, col, label='[%s] AUC = %0.5f' % (name, roc_auc))\n\n plt.legend(loc='lower right')\n plt.plot([0, 1], [0, 1], 'r--')\n plt.xlim([0, 1])\n plt.ylim([0, 1])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.savefig(output_folder + '/' + file_prefix + '_auc.png')\n plt.close()\n\n test_log.close()", "def evaluate_model(model,test_inputs,test_labels,model_mode):\n\n if model_mode == \"classification\":\n y_pred = model.predict(test_inputs)\n print(\"Accuracy score: \", accuracy_score(test_labels, y_pred))\n #print(\"F1 score: \", f1_score(test_labels,y_pred, average='weighted'))\n\n conf_mx = confusion_matrix(test_labels, y_pred)\n #print(conf_mx)\n plt.matshow(conf_mx, cmap = plt.cm.jet)\n plt.show()\n\n if model_mode == \"regression\":\n y_pred = model.predict(test_inputs)\n print(\"Mean absolute error: \", mean_absolute_error(test_labels, y_pred))", "def classifiction_metric(preds, labels, label_list):\n\n acc = metrics.accuracy_score(labels, preds)\n\n labels_list = [i for i in range(len(label_list))]\n\n report = metrics.classification_report(\n labels, preds, labels=labels_list, target_names=label_list, digits=5, output_dict=True)\n\n return acc, report", "def full_multiclass_report(y_true, y_pred, classes, png_output=None, show=True):\n\n # Print accuracy score\n print(\"Accuracy : \"+ str(accuracy_score(y_true,y_pred)))\n \n print(\"\")\n \n # Print classification report\n print(\"Classification Report\")\n Eval.classification_report(y_true,y_pred,digits=5)\n \n # Plot confusion matrix\n cnf_matrix = confusion_matrix(y_true,y_pred)\n print(cnf_matrix)\n\n Eval.plot_confusion_matrix(cnf_matrix,classes=classes, png_output=png_output, show=show)", "def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()", "def evaluate_model(model, X_test, Y_test, category_names):\n\n #predict on test data\n y_pred = model.predict(X_test)\n y_pred_pd = pd.DataFrame(y_pred, columns = category_names)\n\n print(\"\\nBest Parameters:\", model.best_params_)\n\n for column in category_names:\n print('--------------------------------------------------------\\n')\n print(str(column))\n print(classification_report(Y_test[column], y_pred_pd[column]))", "def evaluate(model, x_test, y_test,y_predict):\n \n \n print( \"================================================================================\")\n print (\"Summary of Results:\")\n \n print (\"Forest Size :\" , FOREST_SIZE)\n \n print(\"Accuracy Score: \",accuracy_score(y_test,y_predict))\n print(\"Mse: \",mean_squared_error(y_test,y_predict))\n #average_precision = average_precision_score(y_test, y_predict)\n #print(average_precision)\n \n #fpr, tpr, thresholds = roc_curve(y_test, y_predict, pos_label=2)\n fpr, tpr, thresholds = metrics.roc_curve(y_test, y_predict, pos_label=2)\n print(\"auc\",metrics.auc(fpr, tpr))\n \n \n \n #print (\"ROC : \", roc_curve(y_test,y_predict))\n #print (\"AUC : \", auc(y_test,y_predict,reorder=False))\n \n\n \n print (\"================================================================================\")\n #print(average_precision=average_precision_score(Y_test, y_predict))\n #print(\"Average precision-recall score:\", average_precision)\n print()\n print (\"================================================================================\")\n print( \"Feature importance for Random Forest Classifier:\\n\")\n names=['client_id','host_name','page_path','click_time']\n print (sorted(zip(map(lambda x: round(x, 4), model.feature_importances_), names), reverse=True))\n \n print (\"================================================================================\")\n print (\"Done with evaluation\")\n return None", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_pred = model.predict(X_test)\n\n for i, column in enumerate(category_names):\n y_true = Y_test.values[:, i]\n y_pred = Y_pred[:, i]\n target_names = ['not {}'.format(column), '{}'.format(column)]\n print(classification_report(\n y_true, y_pred, target_names=target_names))", "def evaluate_model(model, X_test, Y_test, category_names):\n\n y_pred = model.predict(X_test)\n Y_test_as_array = np.array(Y_test)\n for i in range(len(category_names)):\n print(\"{} accuracy {} precision {} recall {} f1 {}\".format(\n category_names[i],\n (y_pred[:, i] == Y_test_as_array[:, i]).mean(), # accuracy\n precision_score(Y_test_as_array[:, i], y_pred[:, i], average=None), # precision\n recall_score(Y_test_as_array[:, i], y_pred[:, i], average=None), # recall\n f1_score(Y_test_as_array[:, i], y_pred[:, i], average=None) # f1\n ))\n print(\"mean accuracy {}\".format((y_pred == Y_test_as_array).mean().mean()))", "def detection_analysis(y_pred, y_true):\n print(\"Precision: \", sm.precision_score(y_pred, y_true))\n print(\"Recall: \", sm.recall_score(y_pred, y_true))\n print(\"Accuracy: \", sm.accuracy_score(y_pred, y_true))\n print(\"\\n\")", "def get_classification_metrics(features, true_output, model):\n accuracy = model.score(features, true_output)\n guess = model.predict(features)\n precision = metrics.precision_score(true_output, guess)\n recall = metrics.recall_score(true_output, guess)\n return accuracy, precision, recall", "def model_metrics(X, y, model, data_set = 'data_set'):\n score = model.score(X, y)\n matrix = confusion_matrix(y, model.predict(X))\n tpr = matrix[1,1] / (matrix[1,1] + matrix[1,0])\n fpr = matrix[0,1] / (matrix[0,1] + matrix[0,0])\n tnr = matrix[0,0] / (matrix[0,0] + matrix[0,1])\n fnr = matrix[1,0] / (matrix[1,1] + matrix[1,0])\n prc = matrix[1,1] / (matrix[1,1] + matrix[0,1])\n \n print(f'{data_set} accuracy score: {score:.2%}')\n print(f'{data_set} precision score {prc:.2%}')\n print(f'{data_set} recall score: {tpr:.2%}\\n')\n class_report = classification_report(y, model.predict(X), zero_division=True)\n print('-------------------------------')\n print(f'classification report')\n print(class_report)\n print ('-------------------------------\\n')\n print('confusion matrix')\n print(f'{matrix}\\n')\n print(f'{data_set} model metrics')\n print('---------------------------------')\n print(f'True positive rate for the model is {tpr:.2%}')\n print(f'False positive rate for the model is {fpr:.2%}')\n print(f'True negative rate for the model is {tnr:.2%}')\n print(f'False negative rate for the model is {fnr:.2%}\\n')", "def train_predict_and_results(data, clf):\n tra_x, tst_x, tra_y, tst_y = data\n clf.fit(tra_x, tra_y)\n prd_y = clf.predict(tst_x)\n cnf = confusion_matrix(tst_y, prd_y)\n print (\"Classifier: %s \\tAccuracy score:%7.2f %%\"\n \"\\tTN:%5d FP:%5d FN:%5d TP:%5d\"\n % (clf.name, accuracy_score(tst_y, prd_y) * 100,\n cnf[0][0], cnf[0][1], cnf[1][0], cnf[1][1]))", "def evaluate_model(model, X_test_input, y_test_input):\r\n pred_class = [model.classes_[i] for i in model.predict_proba(X_test_input).argmax(axis=-1)]\r\n pred_accuracy = np.sum(np.array(y_test_input)==np.array(pred_class))/len(pred_class)\r\n return pred_class, pred_accuracy", "def trainAndEvaluate(trainDataFile, devDataFile, classifier, average):\n\n ids, instances, labels, features, classes = readArffFile(trainDataFile)\n\n startTime = time.time()\n\n classifier = classifier.lower()\n if classifier == \"svc\" or classifier == \"svm\":\n print(\"Using SVM\")\n clf = LinearSVC()\n elif classifier == \"nb\":\n print(\"Using Naive Bayes\")\n clf = MultinomialNB()\n elif classifier.lower() == \"nbboost\" or classifier.lower() == \"nbboosted\":\n print(\"Using Boosted Naive Bayes\")\n clf = MultinomialNB()\n clf = AdaBoostClassifier(clf)\n elif classifier == \"1r\":\n print(\"Sorry, 1R / LinearRegression isn't working right now\")\n exit()\n clf = LinearRegression(copy_X=False,fit_intercept=True, normalize=False)\n elif classifier == \"0r\":\n print(\"Using 0R\")\n from collections import Counter\n mostCommonTrainingClass = Counter(labels).most_common(1)[0][0]\n else:\n print(\"Invalid classifier choice.\")\n return\n\n print(\"Training the model\")\n\n if classifier != \"0r\":\n clf.fit(instances, labels)\n\n timeForTrain = time.time() - startTime\n numTrainInstances = len(instances)\n\n \"\"\"\n Testing and evaluating the model\n \"\"\"\n\n # Throw away the features and classes, we've already read them in.\n ids, instances, labels, _, _ = readArffFile(devDataFile)\n\n startTime = time.time()\n\n print(\"Testing the model\")\n numCorrect = 0\n numWrong = 0\n lenInstances = len(instances)\n predicted = []\n for i in range(lenInstances):\n # Status update of how it's going.\n if i % 1000 == 0:\n print(\"\\r\" + str(i).zfill(len(str(lenInstances))) + \"/\" + str(lenInstances) + \" \", end=\"\")\n instance = instances[i]\n label = labels[i]\n\n if classifier == \"0r\":\n res = mostCommonTrainingClass\n else:\n res = predictPrint(clf, instance)\n predicted.append(res)\n # print(\"-- Predicted label: {} || Correct label: {} --\". format(res, label))\n if res == label:\n numCorrect += 1\n else:\n numWrong += 1\n print()\n\n timeForTest = time.time() - startTime\n\n predicted = np.array(predicted)\n outName = outputFileName + classifier.upper() + dataSet + \".csv\"\n writeOutput(ids, predicted, outName)\n numDevInstances = len(instances)\n\n\n \"\"\"\n Printing various evaluation metrics.\n \"\"\"\n # report = classification_report(labels, predicted, target_names=classes)\n report = parameterizableReport(labels, predicted, beta=0.5, target_names=classes, averageType=average)\n print(report)\n print()\n # print(classification_report(labels, predicted, target_names=classes))\n\n \"\"\"\n print(\"Number of training instances: {}\".format(numTrainInstances))\n print(\"Number of dev instances: {}\".format(numDevInstances))\n print()\n\n print(\"Number of correct classifications: {}\".format(numCorrect))\n print(\"Number of wrong classifications: {}\".format(numWrong))\n print(\"Percentage of correct classifications: {0:.2f}%\".format(numCorrect*100/(numCorrect+numWrong)))\n print()\n \"\"\"\n\n print(\"Time taken to train the model: {0:.2f} sec\".format(timeForTrain))\n print(\"Time taken to test the model: {0:.2f} sec\".format(timeForTest))\n print()\n\n confMatrix = confusion_matrix(labels, predicted)\n if classifier == \"nb\":\n title = \"Naive Bayes\"\n elif classifier == \"svm\" or classifier == \"svc\":\n title = \"Support Vector Machine\"\n title += \" \" + dataSet\n plot_confusion_matrix(confMatrix, classes, title=title, normalize=True)", "def compute_and_print_eval_metrics(self):\n s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', '[email protected]:.95')\n precision, recall, f1, mean_precision, mean_recall, map50, map = 0., 0., 0., 0., 0., 0., 0.\n ap = []\n eval_stats = [np.concatenate(x, 0) for x in zip(*self.eval_stats)]\n if len(eval_stats) and eval_stats[0].any():\n precision, recall, ap, f1, ap_class = ap_per_class(*eval_stats)\n precision, recall, ap50, ap = precision[:, 0], recall[:, 0], ap[:, 0], ap.mean(1)\n mean_precision, mean_recall, map50, map = precision.mean(), recall.mean(), ap50.mean(), ap.mean()\n nt = np.bincount(eval_stats[3].astype(np.int64), minlength=len(self.class_names)) # number of targets per class\n else:\n nt = np.zeros(1)\n\n pf = '%20s' + '%12.5g' * 6 # print format\n print(\"\\n EVALUTAION \\n\")\n print(s)\n print(pf % ('all', self.seen, nt.sum(), mean_precision, mean_recall, map50, map))\n if self.cfg.eval.verbose:\n for indx, cls in enumerate(ap_class):\n print(pf % (self.class_names[cls], self.seen, nt[cls], precision[indx], recall[indx], ap50[indx], ap[indx]))", "def compute(self) -> None:\n \n self.model.eval()\n \n with torch.no_grad():\n for (input, target, _) in self.loader:\n\n # self.model = self.model.train(False) # TEST @lacoupe\n output, _ = self.model(input)\n \n output = (output >= 0.5)\n \n for out, tar in zip(output, target):\n \n tar = bool(tar)\n \n if out and tar:\n self.confusion['true_positive'] += 1\n elif not out and not tar:\n self.confusion['true_negative'] += 1\n elif out and not tar:\n self.confusion['false_positive'] += 1\n elif not out and tar:\n self.confusion['false_negative'] += 1\n \n self.accuracy = (self.confusion['true_positive'] + self.confusion['true_negative']) \\\n / sum(list(self.confusion.values()))\n \n if (self.confusion['true_positive'] + self.confusion['false_positive']) == 0.:\n self.precision = 0.\n else:\n self.precision = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_positive'])\n \n if (self.confusion['true_positive'] + self.confusion['false_negative']) == 0.:\n self.recall = 0.\n else:\n self.recall = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_negative'])\n \n if (self.precision + self.recall) == 0.:\n self.f1_score = 0.\n else:\n self.f1_score = 2 * self.precision * self.recall / (self.precision + self.recall)", "def evaluate_preds_classification(y_true, y_preds):\n accuracy = accuracy_score(y_true, y_preds)\n precision = precision_score(y_true, y_preds)\n recall = recall_score(y_true, y_preds)\n f1 = f1_score(y_true, y_preds)\n metric_dict = {\"accuracy\": round(accuracy, 2),\n \"precision\": round(precision, 2),\n \"recall\": round(recall, 2),\n \"f1\": round(f1, 2)}\n print(f\"Accuracy: {accuracy * 100:.2f}%\")\n print(f\"Precision: {precision}\")\n print(f\"Recall: {recall}\")\n print(f\"F1 Score: {f1} \\n\")\n return metric_dict", "def train_and_test_model(self, X_train, y_train, X_test, y_test):\n\n\t\t# Fit the classification model on the whole training set (as opposed to cross-validation)\n\t\t# print(\"Y TRAIN: \", y_train[:10])\n\t\t# print(\"x TRAIN: \", X_train[:10])\n\t\tself.classifier.fit(X_train, y_train)\n\t\ty_train_predicted = self.classifier.predict(X_train)\n\t\tprint(\"np.mean Accuracy TRAINING: %s\" % np.mean(y_train_predicted == y_train))\n\n\t\t''' Predict the outcome on the test set\n\t\t\tNote that the clf classifier has already been fit on the training data.\n\t\t'''\n\t\ty_predicted = self.classifier.predict(X_test)\n\n\t\tprint(\"%.2f seconds: Finished training the model and predicting class labels for the test set\" % time.process_time())\n\n\t\t# Simple evaluation using numpy.mean\n\t\t# print(\"np.mean Accuracy: %s\" % np.mean(y_predicted == y_test))\n\n\t\t# Log the classification report\n\t\t# print(\"Classification report:\\n%s\" % metrics.classification_report(y_test, y_predicted))\n\n\t\t# The confusion matrix\n\t\t# confusion_matrix = metrics.confusion_matrix(y_test, y_predicted)\n\t\t# print(\"Confusion matrix:\\n%s\" % confusion_matrix)", "def evaluate(train, train_labels, test, test_labels):\n \n # Use the same model for each training set for now\n model = RandomForestClassifier(n_estimators = 100, \n random_state = 50, n_jobs = -1)\n \n train = train.replace({np.inf: np.nan, -np.inf: np.nan})\n test = test.replace({np.inf: np.nan, -np.inf:np.nan})\n \n feature_names = list(train.columns)\n \n # Impute the missing values\n imputer = Imputer(strategy = 'median', axis = 1)\n train = imputer.fit_transform(train)\n test = imputer.transform(test)\n \n cv_score = 1 * cross_val_score(model, train, train_labels, \n scoring = \"f1\", \n cv = 5)\n \n # Fit on the training data and make predictions\n model.fit(train, train_labels)\n preds = model.predict(test)\n \n # Calculate the performance\n f1 = f1_score(test_labels, preds)\n print('5-fold CV F1: {:.2f} with std: {:.2f}'.format(cv_score.mean(),cv_score.std()))\n print('Test F1: {:.2f}.'.format(f1))\n \n feature_importances = pd.DataFrame({'feature': feature_names, \n 'importance': model.feature_importances_})\n \n return preds, feature_importances", "def test_report_classification_multi(self):\n t = init_tensors()\n metrics_dict = create_metrics_dict(3)\n metrics_dict = report_classification(t['pred_multi'], \n t['lbl_multi'], \n batch_size=2, \n metrics_dict=metrics_dict,\n ignore_index=-1)\n assert \"{:.6f}\".format(metrics_dict['precision'].val) == \"0.327083\"\n assert \"{:.6f}\".format(metrics_dict['recall'].val) == \"0.312500\"\n assert \"{:.6f}\".format(metrics_dict['fscore'].val) == \"0.314935\"", "def test(self):\n\t\treturn classification_report(self.test_labels, self.predict(self.test_data), target_names=self.le.classes_)", "def evaluate_individual(predictions, test_files, models):\n\n print(\"\\nAccuracy for individual models\\n\")\n \n # Fix Location\n correct_predictions = [0, 0, 0]\n total_predictions = [0, 0, 0]\n num_failed_predictions = 0\n\n for prediction in predictions:\n if prediction[\"correct_data\"][\"correct_location\"] == prediction[\"predicted_location\"]:\n correct_predictions[FixType[prediction[\"correct_data\"][\"correct_type\"]].value] = correct_predictions[FixType[\n prediction[\"correct_data\"][\"correct_type\"]].value] + 1\n if prediction[\"predicted_location\"] is None:\n num_failed_predictions = num_failed_predictions + 1\n total_predictions[FixType[prediction[\"correct_data\"][\"correct_type\"]].value] = total_predictions[FixType[\n prediction[\"correct_data\"][\"correct_type\"]].value] + 1\n\n for i in range(3):\n if total_predictions[i] == 0: # If the type was never predicted\n accuracy = 0\n else:\n accuracy = correct_predictions[i] / total_predictions[i]\n print(f\"Fix Location accuracy for class {FixType(i).name}: {accuracy * 100} %\")\n\n accuracy = sum(correct_predictions) / (len(predictions) - num_failed_predictions)\n print(f\"Fix Location accuracy overall is {accuracy * 100} %\")\n \n # Fix type\n correct_predictions = [0, 0, 0]\n total_predictions = [0, 0, 0]\n num_failed_predictions = 0\n\n for prediction in predictions:\n if prediction[\"correct_data\"][\"correct_type\"] == prediction[\"predicted_type\"]:\n correct_predictions[FixType[prediction[\"predicted_type\"]].value] = correct_predictions[FixType[\n prediction[\"predicted_type\"]].value] + 1\n if prediction[\"predicted_type\"] is None:\n num_failed_predictions = num_failed_predictions + 1\n total_predictions[FixType[prediction[\"predicted_type\"]].value] = total_predictions[FixType[\n prediction[\"predicted_type\"]].value] + 1\n\n for i in range(3):\n if total_predictions[i] == 0: # If the type was never predicted\n accuracy = 0\n else:\n accuracy = correct_predictions[i] / total_predictions[i]\n print(f\"Fix Type accuracy for class {FixType(i).name}: {accuracy * 100} %\")\n\n accuracy = sum(correct_predictions) / (len(predictions) - num_failed_predictions)\n print(f\"Fix Type accuracy overall is {accuracy * 100} %\")\n \n # We repeat the predictions to evaluate the insert and modify models individually, regardless of the predicted fix type \n\n raw_training_samples = []\n\n if test_files.endswith(\".json\"): # Single JSON file\n with open(test_files) as file:\n logging.info(\"Source ending in .json. Predicting on single JSON file.\")\n raw_training_samples = json.load(file)\n else: # Folder path\n for filename in listdir(test_files):\n with open(test_files + filename) as file:\n raw_training_samples.extend(json.load(file))\n \n correct_predictions_insert = 0\n total_predictions_insert = 0\n correct_predictions_modify = 0\n total_predictions_modify = 0\n insert_tokens = []\n modify_tokens = []\n\n for sample in raw_training_samples:\n # Insert\n if sample[\"metadata\"][\"fix_type\"] == \"insert\":\n actual_sample, tokens = IOProcessor.preprocess(sample[\"wrong_code\"])\n pred = predict_single(actual_sample, models[2])\n token = IOProcessor.postprocess(pred, 2)\n if token == sample[\"metadata\"][\"fix_token\"]: # Correct Prediction\n correct_predictions_insert = correct_predictions_insert + 1\n else: # Incorrect prediction\n insert_tokens.append([token, sample[\"metadata\"][\"fix_token\"]])\n total_predictions_insert = total_predictions_insert + 1\n # Modify\n if sample[\"metadata\"][\"fix_type\"] == \"modify\":\n actual_sample, tokens = IOProcessor.preprocess(sample[\"wrong_code\"])\n pred = predict_single(actual_sample, models[3])\n token = IOProcessor.postprocess(pred, 3)\n if token == sample[\"metadata\"][\"fix_token\"]: # Correct Prediction\n correct_predictions_modify = correct_predictions_modify + 1\n else: # Incorrect prediction\n modify_tokens.append([token, sample[\"metadata\"][\"fix_token\"]])\n total_predictions_modify = total_predictions_modify + 1\n\n insert_accuracy = correct_predictions_insert / total_predictions_insert\n modify_accuracy = correct_predictions_modify / total_predictions_modify\n print(f\"Fix Token accuracy for insert is {insert_accuracy * 100} %\")\n print(f\"Fix Token accuracy for modify is {modify_accuracy * 100} %\")\n\n # The following code may be used to create a swarm plot of the erroneous predictions for fix locations\n # This does, however, require the installation of the pandas, seaborn, and matplotlib libraries.\n \n # import seaborn as sns\n # import matplotlib.pyplot as plt\n # import pandas as pd\n # location_distance_array = []\n # for prediction in predictions:\n # actual_sample, tokens = IOProcessor.preprocess(prediction[\"correct_data\"][\"wrong_code\"])\n # label = get_token_index(prediction[\"correct_data\"][\"wrong_code\"], tokens, prediction[\"correct_data\"][\"correct_location\"])\n # if prediction[\"predicted_token_location\"] - label == 0:\n # pass\n # else:\n # location_distance_array.append([prediction[\"predicted_token_location\"] - label, prediction[\"correct_data\"][\"correct_type\"]])\n \n # df = pd.DataFrame(data=location_distance_array)\n # sns.set_theme(style=\"whitegrid\")\n # f, ax = plt.subplots(figsize=(6, 4))\n # sns.despine(bottom=True, left=True)\n # sns.swarmplot(y=0, x=1, data=df, palette=\"dark\", size=6)\n # ax.set_xlabel('')\n # ax.set_ylabel('')\n # plt.ylim([-15, 16])\n \n # plt.savefig('line_plot.pdf', bbox_inches='tight', pad_inches=0)", "def make_clf(x_train, y_train, x_test, y_test, clf, clf_name, level):\n print('----------{} at {} level ----------'.format(clf_name, level))\n totalTP, totalFP, totalFN, totalTN = 0, 0, 0, 0\n\n # apply SMOTE, train and test the model\n x_train, y_train = SMOTE(sampling_strategy=0.5).fit_resample(x_train, y_train)\n clf.fit(x_train, y_train)\n y_predict = clf.predict(x_test)\n\n for i in range(len(y_predict)):\n if y_test[i] and y_predict[i]:\n totalTP += 1\n if not y_test[i] and y_predict[i]:\n totalFP += 1\n if y_test[i] and not y_predict[i]:\n totalFN += 1\n if not y_test[i] and not y_predict[i]:\n totalTN += 1\n\n recall = totalTP / (totalTP + totalFN)\n return recall", "def model_evaluation(X_train, y_train, X_test, y_test, k=16):\n print(\">>>>>>> x.shape\", X_train.shape)\n p_matrix, X_reduce = dimension_reduction(X_train, k=k)\n print(\"model training ...\")\n bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2), n_estimators=30, learning_rate=1)\n bdt.fit(X_reduce, y_train)\n print(\"fit succeed\")\n\n X_test = np.dot(X_test, p_matrix)\n y_pred = bdt.predict(X_test)\n print(classification_report(y_test, y_pred, target_names=['benign', 'gafgyt', 'miari'], digits=4))", "def runClassifier(clf,title,xtrain,ytrain,xtest,ytest):\n # train the model using the classifier's fit function\n # use a dummy variable to avoid gibberish being printed\n clf.fit(xtrain, ytrain)\n\n # use the model to predict labels for the test set\n # note: this step is redundant if you just want the score\n #predictions = clf.predict(xtest)\n\n # the score function will run the predict method and then calculate\n # the accuracy based on the labels it calculates and the actual labels\n score = clf.score(xtest, ytest)\n\n # print the accuracy of our model on the test data\n print \"%s Accuracy: %0.2f%%\" % (title,(100.0 * score))\n\n # return the predictions in case the caller is interested\n #return predictions", "def evaluate(model, eval_data, num_labels): \n # Turn on the evaluation state to ignore dropouts\n model.eval()\n results = [predict(model, x) for x, y in eval_data]\n f1_score, accuracy = get_metrics(np.array([y for x, y in eval_data]), results, num_labels)\n return f1_score, accuracy", "def run_analyses(y_predict_train, y_train, y_predict, y_test):\n # calculate metrics\n _, training_error = output_error(y_predict_train, y_train)\n (precision, recall, f1, _), testing_error = output_error(y_predict, y_test)\n \n # print out metrics\n print 'Average Precision:', np.average(precision)\n print 'Average Recall:', np.average(recall)\n print 'Average F1:', np.average(f1)\n print 'Training Error:', training_error\n print 'Testing Error:', testing_error", "def main_stats_model(y_train: pd.DataFrame, y_test: pd.DataFrame, y_pred: np.ndarray,\n model_name: str = '',\n model_parameters: dict = None,\n model_preprocessing: str = '',\n sequence_origin: str = '',\n primers_origin: str = '',\n taxonomy_level: Union[List[int], int] = '',\n selected_primer: Union[List[str], str] = '',\n test_size: float = 0.2,\n feature_importances: np.ndarray = None,\n k: int = 4,\n save_csv: bool = False,\n xgb_model=None,\n rf_model=None,\n save_model=False,\n save_tree: int = 0):\n model_path = folder_paths['model_results'] + model_name + '{}'.format(slash)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n\n folder_number = get_new_model_folder_number(model_name=model_name)\n analysis_path = model_path + '{:0>5d}_analysis_{}_{}{}'.format(folder_number, selected_primer, taxonomy_level, slash)\n os.makedirs(analysis_path)\n\n log_path = analysis_path + 'model_results.txt'\n logger = StatLogger(log_path=log_path)\n\n # Basic information on configuration\n test_size = get_model_info(y_test, model_name, model_parameters, model_preprocessing, sequence_origin,\n primers_origin, taxonomy_level, selected_primer, test_size, logger)\n\n # Metrics of model results\n main_class_prop, accuracy = get_metrics_model(y_train, y_test, y_pred, logger, feature_importances, k, save_tree,\n xgb_model,\n analysis_path=analysis_path)\n\n if save_csv:\n add_optimal_model_params(folder_number, selected_primer, taxonomy_level, accuracy, model_parameters,\n model_path=model_path)\n\n if save_model:\n if xgb_model is not None:\n xgb_model.save_model(analysis_path+'0001.model')\n if rf_model is not None:\n filename = analysis_path+'0001.model'\n pickle.dump(rf_model, open(filename, 'wb'))\n\n logger.close_file()\n\n return test_size, main_class_prop, accuracy", "def evaluate_clf(\n clf, X, y, k=None, test_size=0.5, scoring=\"f1_weighted\", feature_names=None\n):\n X_train, X_test, y_train, y_true = model_selection.train_test_split(\n X, y, test_size=test_size\n )\n\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n\n print(\"Accuracy Score: %f\" % metrics.accuracy_score(y_true, y_pred))\n print()\n\n print(\"Classification report\")\n print(metrics.classification_report(y_true, y_pred))\n print()\n\n print(\"Confussion matrix\")\n print(metrics.confusion_matrix(y_true, y_pred))\n print()\n\n if hasattr(clf, \"feature_importances_\"):\n print(\"Feature importances\")\n if not feature_names:\n feature_names = [\"%d\" % i for i in range(X.shape[1])]\n for f, imp in zip(feature_names, clf.feature_importances_):\n print(\"%20s: %s\" % (f, round(imp * 100, 1)))\n print()\n\n if k:\n print(\"Cross validation\")\n kf = model_selection.KFold(n_splits=k)\n scores = model_selection.cross_val_score(clf, X_train, y_train, cv=kf, scoring=scoring)\n print(scores)\n print(\n \"%d-fold Cross Validation Accuracy: %0.2f (+/- %0.2f)\"\n % (k, scores.mean() * 100, scores.std() * 200)\n )", "def classification_report(y_test:list, y_predict:list) -> str:\n return classification_report(y_test, y_predict)", "def predict_class(clf, X_test, Y_test, labels=None, stats_fname=None):\n expected = Y_test\n if isinstance(clf, KerasModel):\n char_probs = clf.predict(X_test)\n predicted = np.argmax(char_probs, axis=1)\n\n if len(Y_test.shape) > 1:\n expected = np.argmax(Y_test, axis=1)\n else:\n predicted = clf.predict(X_test)\n\n conf_mat = metrics.confusion_matrix(\n expected, predicted, labels=range(len(labels))\n )\n\n stats = {\n 'Accuracy': metrics.accuracy_score(expected, predicted),\n 'F1': metrics.f1_score(expected, predicted, average='weighted'),\n 'Precision': metrics.precision_score(expected, predicted,\n average='weighted'),\n 'Recall': metrics.recall_score(expected, predicted,\n average='weighted')\n }\n print('Accuracy: %f' % stats['Accuracy'])\n print('F1: %f' % stats['F1'])\n print('percision: %f' % stats['Precision'])\n print('recall: %f' % stats['Recall'])\n\n save_conf_mat(conf_mat, stats, labels, stats_fname)\n\n return predicted", "def summarize_model(clf_, X_tr, X_te, y_tr, y_te, tree=False):\n \n import sklearn.metrics as metrics\n import matplotlib.pyplot as plt\n import pandas as pd\n \n y_hat_tr, y_hat_te = fit_n_pred(clf_, X_tr, X_te, y_tr)\n print('Classification Report:')\n print(metrics.classification_report(y_te, y_hat_te))\n \n if tree:\n fig, ax = plt.subplots(figsize=(10,5), nrows=2)\n\n metrics.plot_confusion_matrix(clf_,X_te,y_te,cmap=\"YlOrRd\", normalize='true',\n ax=ax[0])\n ax[0].set(title='Confusion Matrix')\n ax[0].grid(False)\n\n plot_importance(clf_, X_tr, ax=ax[1])\n plt.tight_layout()\n \n else:\n clf_coef = pd.Series(clf_.coef_[0], index=X_tr.columns, name='Normal')\n abs_coef = pd.Series(abs(clf_.coef_[0]), index=X_tr.columns, name='Absolute')\n posi_coef = pd.Series((clf_coef > 0), name='Positive')\n coef_all = pd.concat([clf_coef, abs_coef, posi_coef], axis=1)\n coef_all.sort_values('Absolute', ascending=True, inplace=True)\n coef_all.tail(20)['Normal'].plot(kind='barh', color=coef_all['Positive'].map({True:'b',False:'r'})\n\n metrics.plot_confusion_matrix(clf_,X_te,y_te,cmap=\"YlOrRd\", normalize='true')\n plt.title('Confusion Matrix')\n plt.grid(False)\n plt.tight_layout()\n\ndef grid_searcher(clf_, params, X_tr, X_te, y_tr, y_te, cv=None, keep_t=False, train_score=True):\n \n \"\"\"Takes any classifier, train/test data for X/y, and dict of parameters to\n iterate over. Optional parameters select for cross-validation tuning, keeping\n time for running the gridsearch, and returning training scores when done.\n Default parameters only return the fitted grid search object. MUST HAVE Timer\n class imported.\"\"\"\n \n from sklearn.model_selection import GridSearchCV\n import numpy as np\n \n ## Instantiate obj. with our targets\n grid_s = GridSearchCV(clf_, params, cv=cv, return_train_score=train_score)\n \n ## Time and fit run the 'search'\n time = Timer()\n time.start()\n grid_s.fit(X_tr, y_tr)\n time.stop()\n \n ## Display results\n tr_score = np.mean(grid_s.cv_results_['mean_train_score'])\n te_score = grid_s.score(X_te, y_te)\n print(f'Mean Training Score: {tr_score :.2%}')\n print(f'Mean Test Score: {te_score :.2%}')\n print('Best Parameters:')\n print(grid_s.best_params_)\n \n ## Time keeping and grid obj\n if keep_t:\n lap = time.record().total_seconds()\n print('**********All done!**********')\n return grid_s, lap\n else:\n return grid_s", "def classificationOutput(clf, X, Y):\n n_samples = 36\n\n print \"\\n\\nClassifier: \\n %s\" % (clf)\n print \"#\" * 79\n # classifier_gnb = naive_bayes.GaussianNB() # initiating the classifier\n\n clf.fit(X[:n_samples], Y[:n_samples]) # train on first n_samples and test on last 10\n\n expected = Y[n_samples:]\n predicted = clf.predict(X[n_samples:])\n print(\"Classification report:\\n%s\\n\" % (metrics.classification_report(expected, predicted)))\n print(\"\\nConfusion matrix:\\n%s\" % metrics.confusion_matrix(expected, predicted))", "def classification_report(\n y_true, y_pred, labels=None, target_names=None,\n sample_weight=None, digits=2):\n\n import numpy as np\n from sklearn.metrics import precision_recall_fscore_support\n from sklearn.utils.multiclass import unique_labels\n\n if labels is None:\n labels = unique_labels(y_true, y_pred)\n else:\n labels = np.asarray(labels)\n\n last_line_heading = 'avg / total'\n\n if target_names is None:\n target_names = ['%s' % l for l in labels]\n name_width = max(len(cn) for cn in target_names)\n width = max(name_width, len(last_line_heading), digits)\n\n headers = [\"precision\", \"recall\", \"f1-score\", \"support\"]\n fmt = '%% %ds' % width # first column: class name\n fmt += ' '\n fmt += ' '.join(['% 9s' for _ in headers])\n fmt += '\\n'\n\n headers = [\"\"] + headers\n report = fmt % tuple(headers)\n report += '\\n'\n\n p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,\n labels=labels,\n average=None,\n sample_weight=sample_weight)\n\n for i, label in enumerate(labels):\n values = [target_names[i]]\n for v in (p[i], r[i], f1[i]):\n values += [\"{0:0.{1}f}\".format(v, digits)]\n values += [\"{0}\".format(s[i])]\n report += fmt % tuple(values)\n\n report += '\\n'\n values = [\"weighted \" + last_line_heading]\n for v in (np.average(p, weights=s),\n np.average(r, weights=s),\n np.average(f1, weights=s)):\n values += [\"{0:0.{1}f}\".format(v, digits)]\n values += ['{0}'.format(np.sum(s))]\n report += fmt % tuple(values)\n\n p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,\n labels=labels,\n average=\"macro\",\n sample_weight=sample_weight)\n\n # compute averages\n values = [\"macro \" + last_line_heading]\n for v in (p, r, f1):\n values += [\"{0:0.{1}f}\".format(v, digits)]\n values += ['{0}'.format(np.sum(s))]\n report += fmt % tuple(values)\n\n p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,\n labels=labels,\n average=\"micro\",\n sample_weight=sample_weight)\n\n # compute averages\n values = [\"micro \" + last_line_heading]\n for v in (p, r, f1):\n values += [\"{0:0.{1}f}\".format(v, digits)]\n values += ['{0}'.format(np.sum(s))]\n report += fmt % tuple(values)\n return report", "def classification(trainData, trainLabels, testData, method):\n\n nClass = 2\n classLabels = [0,1]\n\n trainLabelsUnqArr = np.unique(trainLabels)\n\n if method == 'NaiveBayes':\n classifier = GaussianNB()\n model = classifier.fit(trainData, trainLabels)\n result = model.predict(testData)\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n elif method == 'knnVoting':\n\n classifier = KNeighborsClassifier(5)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'RandomForests':\n\n classifier = RandomForestClassifier(max_depth=10, random_state=0)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n ############################################\n importances = model.feature_importances_\n std = np.std([tree.feature_importances_ for tree in model.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n # Print the feature ranking\n print(\"Feature ranking:\")\n for f in range(trainData.shape[1]):\n print(\"%d. feature %d (%f)\" % (f + 1, indices[f], importances[indices[f]]))\n # Plot the feature importances of the forest\n plt.figure()\n plt.title(\"Feature importances\")\n plt.bar(range(trainData.shape[1]), importances[indices],\n color=\"r\", yerr=std[indices], align=\"center\")\n plt.xticks(range(trainData.shape[1]), indices)\n plt.xlim([-1, trainData.shape[1]])\n plt.show()\n\n elif method == 'SVM':\n\n classifier = svm.SVC(C=3, gamma=0.003, probability=True)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'AdaBoost':\n\n classifier = AdaBoostClassifier()\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n ############################################\n importances = model.feature_importances_\n std = np.std([tree.feature_importances_ for tree in model.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n # Print the feature ranking\n print(\"Feature ranking:\")\n for f in range(trainData.shape[1]):\n print(\"%d. feature %d (%f)\" % (f + 1, indices[f], importances[indices[f]]))\n # Plot the feature importances of the forest\n plt.figure()\n plt.title(\"Feature importances\")\n plt.bar(range(trainData.shape[1]), importances[indices],\n color=\"r\", yerr=std[indices], align=\"center\")\n plt.xticks(range(trainData.shape[1]), indices)\n plt.xlim([-1, trainData.shape[1]])\n plt.show()\n\n elif method == 'NeuralNetwork':\n classifier = MLPClassifier(alpha=1)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'LogisticRegression':\n classifier = LogisticRegression()\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'LinearSVM':\n classifier = LinearSVC(random_state=0)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n ############################################\n importances = model.coef_\n # std = np.std([tree.feature_importances_ for tree in model.estimators_],\n plt.plot(importances.shape[1])\n plt.ylabel('some numbers')\n plt.show()\n elif method == 'kNN':\n\n # logger.info(model.coef_)\n # proba = model.predict_proba(testData)\n # proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n # probaDf = pd.DataFrame(data=proba, columns=classLabels)\n neigh = KNeighborsClassifier(n_neighbors=3)\n neigh.fit(trainData, trainLabels)\n\n result=neigh.predict(testData)\n probaDf=neigh.predict_proba(testData)\n\n # logger.info(method)\n\n return result, probaDf", "def classify(self):\r\n Classify(os.path.join(self.__path,'test.csv'),self.__rang,self.__numeric,self.__statistics,self.__k,self.__classes,self.__abs_n,self)\r\n self.view.Build_Button.configure(state=\"active\")", "def model_evaluate(model,x_train,n_y_array,x_val, vald_array):\n\n scores = model.evaluate(x_train, n_y_array, verbose=1)\n\n scores2 = model.evaluate(x_val, vald_array, verbose=1)\n\n\n print(\"for traininf set\")\n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores[0]))\n\n\n\n print(\"for validation set : \") \n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores2[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores2[0]))", "def classify(data, labels, (train_idx, test_idx), classifier=None):\r\n\r\n assert classifier is not None, \"Why would you pass not classifier?\"\r\n\r\n # Data scaling based on training set\r\n scaler = SupervisedStdScaler() #SupervisedRobustScaler() # # \r\n scaler.fit(data[train_idx,:], labels[train_idx], label=-1)\r\n #scaler.fit(data[train_idx,:], labels[train_idx])\r\n data_train = scaler.transform(data[train_idx,:])\r\n data_test = scaler.transform(data[test_idx,:])\r\n try:\r\n classifier.fit(data_train, labels[train_idx])\r\n \r\n \r\n confMat = confusion_matrix(labels[test_idx],\r\n classifier.predict(data_test))\r\n if confMat.shape == (1,1):\r\n if all(labels[test_idx] == -1):\r\n confMat = np.array([[confMat[0], 0], [0, 0]], dtype=confMat.dtype)\r\n else:\r\n confMat = np.array([[0, 0], [0, confMat[0]]], dtype=confMat.dtype)\r\n confMatRate = confMat / np.tile(np.sum(confMat, axis=1).astype('float'), (2,1)).transpose()\r\n totalErr = (confMat[0, 1] + confMat[1, 0]) / float(confMat.sum())\r\n #if type(classifier) not in [type(None), DummyClassifier]:\r\n if hasattr(classifier,'param_grid'): \r\n #isinstance(classifier, GridSearchCV) or \\\r\n # isinstance(classifier, RandomizedSearchCV):\r\n fitted_model = classifier.best_estimator_\r\n else:\r\n fitted_model = copy.copy(classifier) \r\n return confMatRate, totalErr, fitted_model\r\n except np.linalg.linalg.LinAlgError as e:\r\n # sahil added statement to raise the error instead of returning nun values\r\n print e.message\r\n raise e\r\n # return np.array([[np.nan, np.nan], [np.nan, np.nan]]), np.nan, None\r", "def evaluate(self):\n\n\t\t## We should be evaluating on dev dataset as well, so commenting x_test\n\t\t#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)\n\t\tself.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\n\t\t##Saving atucal vs predicted predictions\n\t\t##np.argmax returns the index where it see's 1 in the row\n\t\t#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)\n\t\ty_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)\n\n\t\t## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack\n\t\t#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T\n\t\toutput_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T\n\t\toutputFile = self.resultDir + \"/outputPredict.csv\" \n\t\tnp.savetxt(outputFile, output_predict, fmt=\"%5.0f\", delimiter=\",\")\n\n\t\t##Error Analysis of the prediction\n\t\terrorAnalysis(outputFile)\n\n\t\treturn self.model_score", "def show_score(clf, X_test, y_test):\n y_pred = predict(clf, X_test)\n print metrics.classification_report(y_test.astype(np.int), y_pred)", "def performance_metrics(model, X_train, y_train, X_test, y_test, train=True, cv=True):\n from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, f1_score \n from sklearn.metrics import precision_score, recall_score, roc_auc_score\n from sklearn.model_selection import cross_validate, cross_val_score, StratifiedKFold\n scoring = {'acc': 'accuracy',\n 'prec_micro': 'precision_micro',\n 'rec_micro': 'recall_micro',\n 'f1_micro': 'f1_micro',\n 'auc':'roc_auc'} \n if train==True:\n if cv==True:\n kfold=StratifiedKFold(n_splits=10, random_state=42)\n scores = cross_validate(model, X_train, y_train, scoring=scoring, cv=kfold)\n ypredTrain = model.predict(X_train)\n Acc_train = scores['test_acc'].mean()\n Precision_train = scores['test_prec_micro'].mean()\n Recall_train = scores['test_rec_micro'].mean()\n F1_train = scores['test_f1_micro'].mean()\n AUC_train = scores['test_auc'].mean()\n conf_matrix_train = confusion_matrix(y_train, ypredTrain)\n class_report = classification_report(y_train, ypredTrain)\n print(\"TRAIN:\\n===========================================\")\n print(f\"CV - Accuracy : {Acc_train:.2f}\\n\")\n print(f\"CV - Precision: {Precision_train:.2f}\\n\")\n print(f\"CV - Recall: {Recall_train:.2f}\\n\")\n print(f\"CV - F1 score: {F1_train:.2f}\\n\") \n print(f\"CV - AUC score: {AUC_train:.2f}\\n\") \n print(f\"Confusion Matrix:\\n {conf_matrix_train}\\n\")\n print(f\"Classification Report:\\n {class_report}\\n\") \n elif cv==False:\n scores = cross_validate(model, X_train, y_train, scoring=scoring)\n ypredTrain = model.predict(X_train)\n Acc_train = scores['test_acc'].mean()\n Precision_train = scores['test_prec_micro'].mean()\n Recall_train = scores['test_rec_micro'].mean()\n F1_train = scores['test_f1_micro'].mean()\n AUC_train = scores['test_auc'].mean()\n conf_matrix_train = confusion_matrix(y_train, ypredTrain)\n class_report = classification_report(y_train, ypredTrain)\n print(\"TRAIN:\\n===========================================\")\n print(f\"CV - Accuracy : {Acc_train:.2f}\\n\")\n print(f\"CV - Precision: {Precision_train:.2f}\\n\")\n print(f\"CV - Recall: {Recall_train:.2f}\\n\")\n print(f\"CV - F1 score: {F1_train:.2f}\\n\") \n print(f\"CV - AUC score: {AUC_train:.2f}\\n\") \n print(f\"Confusion Matrix:\\n {conf_matrix_train}\\n\")\n print(f\"Classification Report:\\n {class_report}\\n\")\n elif train==False:\n if cv==True:\n kfold=StratifiedKFold(n_splits=10, random_state=42)\n scores = cross_validate(model, X_test, y_test, scoring=scoring, cv=kfold)\n ypredTest = model.predict(X_test)\n Acc_test = scores['test_acc'].mean()\n Precision_test = scores['test_prec_micro'].mean()\n Recall_test = scores['test_rec_micro'].mean()\n F1_test = scores['test_f1_micro'].mean()\n AUC_test = scores['test_auc'].mean()\n conf_matrix_test = confusion_matrix(y_test, ypredTest)\n class_report = classification_report(y_test, ypredTest) \n print(\"TEST:\\n===========================================\")\n print(f\"CV - Accuracy : {Acc_test:.2f}\\n\")\n print(f\"CV - Precision: {Precision_test:.2f}\\n\")\n print(f\"CV - Recall: {Recall_test:.2f}\\n\")\n print(f\"CV - F1 score: {F1_test:.2f}\\n\") \n print(f\"CV - AUC score: {AUC_test:.2f}\\n\") \n print(f\"Confusion Matrix:\\n {conf_matrix_test}\\n\")\n print(f\"Classification Report:\\n {class_report}\\n\")\n elif cv==False:\n scores = cross_validate(model, X_test, y_test, scoring=scoring)\n ypredTest = model.predict(X_test)\n Acc_test = scores['test_acc'].mean()\n Precision_test = scores['test_prec_micro'].mean()\n Recall_test = scores['test_rec_micro'].mean()\n F1_test = scores['test_f1_micro'].mean()\n AUC_test = scores['test_auc'].mean()\n conf_matrix_test = confusion_matrix(y_test, ypredTest)\n class_report = classification_report(y_test, ypredTest) \n print(\"TEST:\\n===========================================\")\n print(f\"CV - Accuracy : {Acc_test:.2f}\\n\")\n print(f\"CV - Precision: {Precision_test:.2f}\\n\")\n print(f\"CV - Recall: {Recall_test:.2f}\\n\")\n print(f\"CV - F1 score: {F1_test:.2f}\\n\") \n print(f\"CV - AUC score: {AUC_test:.2f}\\n\") \n print(f\"Confusion Matrix:\\n {conf_matrix_test}\\n\")\n print(f\"Classification Report:\\n {class_report}\\n\")", "def __train__(self):\n if (self.type_camf == 'CAMF_CI'):\n #users, items, context, ratings\n ci = camf_ci.CI_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = ci.fit()\n elif (self.type_camf == 'CAMF_CU'):\n cu = camf_cu.CU_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = cu.fit()\n elif (self.type_camf == 'CAMF_C'):\n c = camf_c.C_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = c.fit()\n\n dummy_pred = np.zeros((predictions.shape))\n for r, pred_array in enumerate(predictions):\n for c, pred in enumerate(pred_array):\n dummy_pred[r][c] = self.__check_ratings__(pred)\n predictions = dummy_pred\n #save a plot with a loss function\n plots = prs.PlotRSData()\n #print(losses)\n plots.plot_loss_cars(losses, self.type_camf, self.__save_prefix__+\"_loop\"+str(self.loop))\n pd.DataFrame(losses).to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ +\"losses_loop\"+str(self.loop)+\".csv\")\n print('Saving the feature matrix...')\n # set predictions back to the pivot table\n self.__utility_saved_training__(predictions) \n # save results\n self.utility_predictions.to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ + \"_SGD_predictions_loop\"+str(self.loop)+\".csv\")", "def eval(self):\n target_truth_labels = self.get_target_labels()\n for key in self.id_uncertainty_measures.keys():\n # deep copy needed as we mutate confidence values later on\n decision_fn_value = np.concatenate((copy.deepcopy(self.id_uncertainty_measures[key]),\n copy.deepcopy(self.ood_uncertainty_measures[key])),\n axis=0)\n # negation needed for confidence, as confidence is indicator of label=0 samples\n # i.e for correct classified samples.\n # But we need scores for label=1 samples i.e misclassified samples\n # to be higher, so we negate.\n if key == UncertaintyMeasuresEnum.CONFIDENCE or key == UncertaintyMeasuresEnum.PRECISION:\n decision_fn_value *= -1.0\n\n aupr, auroc = ClassifierPredictionEvaluator.compute_pr_roc_curves(\n decision_fn_value, target_truth_labels, self.result_dir, key._value_)\n\n with open(os.path.join(self.result_dir, 'results.txt'), 'a') as f:\n f.write('AUPR using ' + key._value_ + \": \" +\n str(np.round(aupr * 100.0, 1)) + '\\n')\n f.write('AUROC using ' + key._value_ + \": \" +\n str(np.round(auroc * 100.0, 1)) + '\\n')", "def confusion_metrics(experiment, plot_matrix = False):\n\n # y_train = experiment['y_train']\n # y_train_prediction = experiment['y_train_prediction']\n y_test = experiment['y_test']\n y_test_prediction = experiment['y_test_prediction']\n\n # precision1, recall1, fbeta1, support1 = precision_recall_fscore_support(y_train, y_train_prediction)\n precision2, recall2, fbeta2, support2 = precision_recall_fscore_support(y_test, y_test_prediction)\n\n # accuracy1 = accuracy_score(y_train, y_train_prediction)\n accuracy2 = accuracy_score(y_test, y_test_prediction)\n\n # TPR and TNR (TPR should equal to recall)\n # TPR1 = np.mean(y_train[y_train_prediction == 1])\n # NPR1 = 1. - np.mean(y_train[y_train_prediction == 0])\n TPR2 = np.mean(y_test[y_test_prediction == 1])\n NPR2 = 1. - np.mean(y_test[y_test_prediction == 0])\n\n # experiment['SCORES_train'] = (precision1, recall1, fbeta1, support1, accuracy1, TPR1, NPR1)\n experiment['SCORES_test'] = (precision2, recall2, fbeta2, support2, accuracy2, TPR2, NPR2)\n\n print('')\n print('+++++++++++++++++++++++++++++++++++++++++++++++++++++')\n print('Testing Results:')\n print(' Class 0')\n print(' Precision = {:,.2f}%'.format(precision2[0] * 100))\n print(' Recall = {:,.2f}%'.format(recall2[0] * 100))\n print(' F1-Score = {:,.2f}%'.format(fbeta2[0] * 100))\n print(' Support = {:,.0f}'.format(support2[0]))\n print(' Class 1')\n print(' Precision = {:,.2f}%'.format(precision2[1] * 100))\n print(' Recall = {:,.2f}%'.format(recall2[1] * 100))\n print(' F1-Score = {:,.2f}%'.format(fbeta2[1] * 100))\n print(' Support = {:,.0f}'.format(support2[1]))\n print('True positive rate = {:,.2f}'.format(TPR2 * 100))\n print('True negative rate = {:,.2f}'.format(NPR2 * 100))\n print('Accuracy = {:,.2f}%'.format(accuracy2 * 100))\n print('+++++++++++++++++++++++++++++++++++++++++++++++++++++')\n print('')\n\n if plot_matrix:\n cnf_matrix = confusion_matrix(y_test, y_test_prediction)\n plot_confusion_matrix(cnf_matrix, ['$H_0$', '$H_1$'])\n\n return experiment", "def multiclass_metrics(modelname, y_test, y_pred):\n multiclass_metrics = {\n 'Accuracy' : metrics.accuracy_score(y_test, y_pred),\n 'macro F1' : metrics.f1_score(y_test, y_pred, average='macro'),\n 'micro F1' : metrics.f1_score(y_test, y_pred, average='micro'),\n 'macro Precision' : metrics.precision_score(y_test, y_pred, average='macro'),\n 'micro Precision' : metrics.precision_score(y_test, y_pred, average='micro'),\n 'macro Recall' : metrics.recall_score(y_test, y_pred, average='macro'),\n 'micro Recall' : metrics.recall_score(y_test, y_pred,average='macro'),\n }\n \n df_metrics = pd.DataFrame.from_dict(multiclass_metrics, orient='index')\n df_metrics.columns = [model]\n\n \n \n return df_metrics", "def evaluate_classifier(classifier, data_x, data_y, matrix_title='', show=True):\n pred_y = classifier.predict(data_x)\n confusion_matrix = metrics.confusion_matrix(data_y, pred_y)\n f1_score = metrics.f1_score(data_y, pred_y, average='macro')\n print('\\nTest set F1 macro score: %0.4f .\\n' % f1_score)\n if show:\n show_confusion_matrix(confusion_matrix, f1_score, matrix_title)\n return f1_score", "def eval_classifier(clf, X, y_correct, classes, plot_cm=True):\n y_pred = clf.predict(X)\n return get_accuracy_and_plot_confusion(y_correct, list(y_pred), classes, plot=plot_cm)", "def evaluate(model, X, y):\n\tmodel.eval()\n\n\t# make the predictions\n\ty_hat = predict(model,X)\n\n\t# convert to cpu\n\ty_hat = y_hat.detach().cpu()\n\ty = y.detach().cpu()\n\n\t# compute evaluation metrics\n\taccuracy = accuracy_score(y, y_hat)\n\tprf \t = precision_recall_fscore_support(y, y_hat, labels=[0,1], average='macro')\n\n\treturn accuracy, prf", "def classify(train=None, test=None, data=None, res_dir=\"res/\", disp=True, outfilename=None):\n utils.print_success(\"Comparison of differents classifiers\")\n if data is not None:\n train_features = data[\"train_features\"]\n train_groundtruths = data[\"train_groundtruths\"]\n test_features = data[\"test_features\"]\n test_groundtruths = data[\"test_groundtruths\"]\n else:\n train = utils.abs_path_file(train)\n test = utils.abs_path_file(test)\n train_features, train_groundtruths = read_file(train)\n test_features, test_groundtruths = read_file(test)\n if not utils.create_dir(res_dir):\n res_dir = utils.abs_path_dir(res_dir)\n classifiers = {\n \"RandomForest\": RandomForestClassifier()\n # \"RandomForest\": RandomForestClassifier(n_estimators=5),\n # \"KNeighbors\":KNeighborsClassifier(3),\n # \"GaussianProcess\":GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),\n # \"DecisionTree\":DecisionTreeClassifier(max_depth=5),\n # \"MLP\":MLPClassifier(),\n # \"AdaBoost\":AdaBoostClassifier(),\n # \"GaussianNB\":GaussianNB(),\n # \"QDA\":QuadraticDiscriminantAnalysis(),\n # \"SVM\":SVC(kernel=\"linear\", C=0.025),\n # \"GradientBoosting\":GradientBoostingClassifier(),\n # \"ExtraTrees\":ExtraTreesClassifier(),\n # \"LogisticRegression\":LogisticRegression(),\n # \"LinearDiscriminantAnalysis\":LinearDiscriminantAnalysis()\n }\n for key in classifiers:\n utils.print_success(key)\n clf = classifiers[key]\n utils.print_info(\"\\tFit\")\n clf.fit(train_features, train_groundtruths)\n utils.print_info(\"\\tPredict\")\n predictions = clf.predict(test_features)\n\n if outfilename is not None:\n with open(outfilename, \"w\") as filep:\n for gt, pred in zip(test_groundtruths, predictions):\n filep.write(gt + \",\" + pred + \"\\n\")\n\n # Global\n data = [key]\n data.append(str(precision_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(recall_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(f1_score(test_groundtruths, predictions, average='weighted')))\n data = \",\".join(data)\n if disp:\n print(data)\n else:\n with open(res_dir + \"global.csv\", \"a\") as filep:\n filep.write(data + \",\\n\")\n # Local\n for index, tag in enumerate(list(set(train_groundtruths))):\n precision = precision_score(test_groundtruths, predictions, average=None)\n recall = recall_score(test_groundtruths, predictions, average=None)\n f1 = f1_score(test_groundtruths, predictions, average=None)\n line = key + \",\" + str(precision[index]) + \",\" + str(recall[index]) + \",\" + str(f1[index])\n if disp:\n print(line)\n else:\n with open(res_dir + \"tag_\" + tag + \".csv\", \"a\") as filep:\n filep.write(line + \",\\n\")\n return predictions", "def print_report(\n m, X_valid, y_valid, t=0.5, X_train=None, y_train=None, show_output=True\n):\n # X_train = X_train.values\n # X_valid = X_valid.values\n\n if isinstance(m, list):\n probs_valid = predict_ensemble(m, X_valid)\n y_val_pred = adjusted_classes(probs_valid, t)\n\n if X_train is not None:\n probs_train = predict_ensemble(m, X_train)\n y_train_pred = adjusted_classes(probs_train, t)\n else:\n probs_valid = m.predict_proba(X_valid)[:, 1]\n y_val_pred = adjusted_classes(probs_valid, t)\n\n if X_train is not None:\n probs_train = m.predict_proba(X_train)[:, 1]\n y_train_pred = adjusted_classes(probs_train, t)\n\n res = [\n roc_auc_score(y_valid, probs_valid),\n f1_score(y_valid, y_val_pred),\n confusion_matrix(y_valid, y_val_pred),\n ]\n result = f\"AUC valid: {res[0]} \\nF1 valid: {res[1]}\"\n\n if X_train is not None:\n res += [\n roc_auc_score(y_train, probs_train),\n f1_score(y_train, y_train_pred),\n ]\n result += f\"\\nAUC train: {res[3]} \\nF1 train: {res[4]}\"\n\n acc_train = m.score(X_train, y_train)\n acc_valid = m.score(X_valid, y_valid)\n\n if show_output:\n logging.info(f\"train acc: {acc_train}\")\n logging.info(f\"test acc: {acc_valid} \")\n\n logging.info(result)\n plot_confusion_matrix(\n m, X_valid, y_valid, display_labels=y_valid.unique()\n )\n logging.info(classification_report(y_valid, y_val_pred))\n plt.show()\n return {\n \"train\": {\"AUC\": res[3], \"F1\": res[4], \"acc\": acc_train},\n \"test\": {\"AUC\": res[0], \"F1\": res[1], \"acc\": acc_valid},\n }" ]
[ "0.78544277", "0.7645207", "0.7580781", "0.7521916", "0.73978734", "0.7377645", "0.7346552", "0.73453677", "0.73044485", "0.7295265", "0.7271339", "0.7254532", "0.7244622", "0.72413814", "0.7187265", "0.7177157", "0.71704125", "0.71259546", "0.7120069", "0.7119113", "0.70895916", "0.7085942", "0.7073982", "0.706928", "0.70627475", "0.7043767", "0.70430213", "0.70378673", "0.7009756", "0.70044833", "0.69974035", "0.69951373", "0.69936496", "0.6989337", "0.6989123", "0.69368076", "0.6925968", "0.6917329", "0.6908766", "0.69021845", "0.6882814", "0.6861898", "0.686082", "0.6835119", "0.6832791", "0.6822228", "0.6808347", "0.6797577", "0.67697793", "0.6761746", "0.67291504", "0.6710448", "0.67023355", "0.66962487", "0.66914994", "0.66911507", "0.6690207", "0.6666918", "0.6650763", "0.6639322", "0.6630064", "0.66274476", "0.66182655", "0.66126233", "0.66118515", "0.6608687", "0.6607164", "0.6597794", "0.659335", "0.65852255", "0.6568183", "0.65649277", "0.6506131", "0.6500344", "0.64960796", "0.64931655", "0.64899826", "0.64891744", "0.64859515", "0.648009", "0.6477387", "0.6466164", "0.64625204", "0.6459228", "0.6457932", "0.64496386", "0.6442863", "0.6428476", "0.6416875", "0.6411643", "0.640028", "0.63931155", "0.63920647", "0.63903606", "0.6380065", "0.63777417", "0.6375305", "0.63707143", "0.63642406", "0.6359153" ]
0.70245725
28
This function packages the trained model into the pickle file.
def save_model(model, model_filepath): # save the classifier with open(model_filepath, 'wb') as fid: pkl.dump(model, fid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_model(self):\n\n # =============================================================\n # Default : pickle the trained model. Change this (and the load\n # function, below) only if the library you used does not support\n # pickling.\n # self.Model_made.save(\"Model_made.h5\")\n # self.Model_claim.save(\"Model_claim.h5\")\n # Model_made = self.Model_made\n # Model_claim = self.Model_claim\n # self.Model_made = None\n # self.Model_claim = None\n with open('pricing_model.p', 'wb') as target:\n pickle.dump(self, target)\n\n # self.Model_made = Model_made\n # self.Model_claim = Model_claim\n\n # zipObj = ZipFile(\"model.zip\",\"w\")\n # zipObj.write(\"Model_made.h5\")\n # zipObj.write(\"Model_claim.h5\")\n # zipObj.write(\"pricing_model.p\")\n # zipObj.close()", "def pickle_model(self, filename):\n with open(filename, 'wb') as pkl:\n pickle.dump(self.model, pkl)", "def save_model(model, model_filepath, protocol=0):\n # using pickle to store trained classifier\n #pickle.dump(model,open(model_filepath,'wb'))\n \n file = gzip.GzipFile(model_filepath, 'wb')\n file.write(pickle.dumps(model, protocol))\n file.close()\n \n pass", "def save_model(self):\n pickle.dump(self, open(\"Logistic_Regression_Model.pkl\", \"wb\"))", "def save(self, path=None):\n if path is None:\n path = os.path.join(logger.get_dir(), \"model.pkl\")\n\n with tempfile.TemporaryDirectory() as td:\n save_state(os.path.join(td, \"model\"))\n arc_name = os.path.join(td, \"packed.zip\")\n with zipfile.ZipFile(arc_name, 'w') as zipf:\n for root, dirs, files in os.walk(td):\n for fname in files:\n file_path = os.path.join(root, fname)\n if file_path != arc_name:\n zipf.write(file_path, os.path.relpath(file_path, td))\n with open(arc_name, \"rb\") as f:\n model_data = f.read()\n with open(path, \"wb\") as f:\n cloudpickle.dump((model_data, self._act_params), f)", "def save_model(self):\n\n self.check_model()\n\n with open(self.filename, 'wb') as file:\n pickle.dump({'model': self.model, 'vec': self.vectorizer, 'vec_data': self.vectorized_data,\n 'df': self.df_topic_keywords}, file)", "def save_model(self, filename):\n\t\tpickle.dump(self, open(filename, 'wb'))\n\t\tprint('Model saved in',filename)", "def savemodel(self, fname):\n if not fname.endswith('.gz'):\n fname += '.gz'\n D = {'clf':self.clf, 'vocab':self.vocab,\n 'idxlabelmap':self.labelmap}\n with gzip.open(fname, 'w') as fout:\n dump(D, fout)\n print 'Save model into file: {}'.format(fname)", "def save(self, model_file):\n pickle.dump(self, open(model_file, 'wb'))", "def save_model(model, filepath):\n try:\n dump(model, filepath)\n except Exception as e:\n print(e)\n print('Failed to pickle model.')", "def save_model(self):\n joblib.dump(self.pipeline, \"model.joblib\")", "def save_model(learn, name):\n# callback_fns = learn.callback_fns # preserve wandb callback and others\n# callbacks = learn.callbacks\n \n# learn.callback_fns = [] # clean callbacks\n# learn.callbacks = []\n \n learn.save(PATH_TO_MODELS / name) # save only weights, adds .pth automatically\n learn.export(PATH_TO_MODELS / f\"{name}.pkl\") # serialize entire model, need to add .pkl", "def save(self, model_name = 'mr-senti'):\n\n\t\tjoblib.dump(self.classifier, os.path.join('model', model_name + '.pkl'))", "def save_model(self):\n joblib.dump(self.pipeline, 'model.joblib')\n print(colored('model.joblib saved locally', 'green'))", "def save(self, pickle_fp):\n if isinstance(self.model, KerasClassifier) and self.model_trained:\n model_fp = os.path.splitext(pickle_fp)[0]+\".h5\"\n self.model.model.save(model_fp)\n current_model = self.model.__dict__.pop(\"model\", None)\n with open(pickle_fp, \"wb\") as fp:\n dill.dump(self, fp)\n setattr(self.model, \"model\", current_model)\n else:\n dill.dump(self, fp)", "def save_model(model, file_name):\n with open(file_name, 'wb') as file:\n pickle.dump(model, file)", "def serialize_model(model, X, y):\n\n # Fitting the model to the full dataset\n model.fit(X, y)\n # Pickling\n pkl_filename = 'rehosp_model.pkl'\n with open(pkl_filename, 'wb') as file:\n pickle.dump(model, file)\n\n return", "def dumpme(self) :\n fileName = \"./data/oP4_ModelBuilder.dump\"\n with open(fileName,\"wb\") as dumpedFile:\n oPickler = pickle.Pickler(dumpedFile)\n oPickler.dump(self)", "def serialize_model(file_name: str, model):\n file_name = path.join(MODEL_DIR, file_name)\n print(file_name)\n pickle.dump(model, open(file_name, 'wb'))", "def store_pickle(model):\n\n model_file = \"gridsearch_model.sav\"\n with open(model_file, mode='wb') as m_f:\n pickle.dump(model, m_f)\n print(f\"Model saved in: {os.getcwd()}\")\n return model_file", "def save_model(self, step):\n\n # file_name = params['name']\n # pickle.dump(self, gzip.open(file_name, 'wb'))", "def save_model(model):\n model.to_disk(\"../model/custom_ner_model\")", "def save_model(self, file_name):\n with open(file_name, 'wb') as file:\n pickle.dump(self.lin_reg, file)", "def _save_model(self):\n with open(self.filepath, 'wb') as file:\n pickle.dump(self.cmodel, file)", "def save_model(model, model_filepath):\n\n with open(model_filepath , 'wb') as file:\n pickle.dump(model, file)", "def serialize(self, path):\r\n newModelFitter = self.copy()\r\n with open(path, \"wb\") as fd:\r\n rpickle.dump(newModelFitter, fd)", "def save_model(self, path):\n try:\n # os.makedirs(osp.dirname(path), exist_ok=1)\n joblib.dump(self.model, path)\n except Exception as e:\n print(e)\n print(\"Couldn't save scikit learn model on path {}!\".format(path))", "def save_model(self, filename):\r\n pass", "def save_model(self, model_name):\n\n # Set up the main destination folder for the model\n dst_root = './data/LinearSVM/{0:s}'.format(model_name)\n if not os.path.exists(dst_root):\n os.makedirs(dst_root)\n print(f'No folder for LinearSVM model {model_name} storage found')\n print(f'Make folder to store model at')\n\n # Dump the model into the designated folder\n file_name = \"{0:s}_{1:s}.pkl\".format(model_name, self.amine)\n with open(os.path.join(dst_root, file_name), \"wb\") as f:\n pickle.dump(self, f)", "def dump(self, model_path):\n pickle.dump(self.scaler, gzip.open(os.path.join(model_path, 'scaler.pkl.gz'), 'w'),\n protocol=pickle.HIGHEST_PROTOCOL)\n# pickle.dump(self.mapper, gzip.open(os.path.join(model_path, 'mapper.pkl.gz'),'w'),\n# protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(self.batcher, gzip.open(os.path.join(model_path, 'batcher.pkl.gz'), 'w'),\n protocol=pickle.HIGHEST_PROTOCOL)", "def save_model(self, file_name: str):\n os.makedirs(self.checkpoint_path, exist_ok=True)\n to_save = {'params': self.params, 'opt_state': self.opt_state}\n path = os.path.join(self.checkpoint_path, file_name)\n with open(path, 'wb') as f:\n pickle.dump(to_save, f)", "def save_model(model, model_filepath): \n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(self):\n torch.save(self.get_params(), 'code/lr-model.pt')", "def load_trained_model(filename = 'pricing_model.p'):\n # with ZipFile(\"model.zip\",\"r\") as w:\n # w.extractall()\n \n with open(filename, 'rb') as model:\n pricingmodel = pickle.load(model)\n \n # pricingmodel.Model_made = tf.keras.models.load_model(\"Model_made.h5\")\n # pricingmodel.Model_claim = tf.keras.models.load_model(\"Model_claim.h5\")\n \n \n return pricingmodel", "def save(self, main_dir):\n with open(f'{main_dir}/models/model_N{self.N}.pkl', 'wb') as f:\n pickle.dump(self.model, f)", "def save_model(model, model_filepath):", "def save_model(model, model_filepath): \n \n model_file = open(model_filepath,\"wb\")\n pickle.dump(model, model_file)\n model_file.close()", "def save(self, path):\n with tempfile.TemporaryDirectory() as td:\n U.save_state(os.path.join(td, \"model\"))\n arc_name = os.path.join(td, \"packed.zip\")\n with zipfile.ZipFile(arc_name, 'w') as zipf:\n for root, dirs, files in os.walk(td):\n for fname in files:\n file_path = os.path.join(root, fname)\n if file_path != arc_name:\n zipf.write(file_path, os.path.relpath(file_path, td))\n with open(arc_name, \"rb\") as f:\n model_data = f.read()\n with open(path, \"wb\") as f:\n dill.dump((model_data, self._act_params), f)", "def save_model(self, fname):\n self.get_booster().save_model(fname)", "def save(model, filepath='bnlearn_model.pkl', overwrite=False, verbose=3):\n if (filepath is None) or (filepath==''):\n filepath = 'bnlearn_model.pkl'\n if filepath[-4:] != '.pkl':\n filepath = filepath + '.pkl'\n filepath = str(Path(filepath).absolute())\n\n # Store data\n # storedata = {}\n # storedata['model'] = model\n # Save\n status = pypickle.save(filepath, model, overwrite=overwrite, verbose=verbose)\n # return\n return status", "def save_model(trained_model):\n twitter_user_gender_classifier_pkl_filename = 'twitter_user_gender_classifier_NB.pkl'\n # Open the file to save as pkl file\n trained_model_pickle_file = open(twitter_user_gender_classifier_pkl_filename, 'wb')\n pickle.dump(trained_model, trained_model_pickle_file)\n # Close the pickle instances\n trained_model_pickle_file.close()", "def save_trained_model(self):\n save_keras_sequential(self.model, self.RELATIVE_DATA_DIRECTORY, self.get_name())\n logger.info(f\"DQL Trader: Saved trained model\")", "def save_model(self, model_path: str):", "def save_model(model, model_filepath):\n pickle.dump( model, open( model_filepath, \"wb\" ) )", "def save_model(model, model_path):\n pickle.dump(model.best_estimator_,open(model_path,'wb'))", "def save_model(self, file_name):\n\t\tself.model.save_weights(file_name)", "def save_pipeline(model_to_persist):\n\n save_file_name = 'model.pkl'\n save_path = configuracion.TRAINED_MODEL_DIR / save_file_name\n joblib.dump(model_to_persist, save_path)\n\n print('saved pipeline')", "def model_save(model, name):\n extension = \".pickle\"\n\n save_model_name = os.path.join(MODEL_SAVE_DIR, name + extension)\n pk.dump(model, save_model_name)", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, \"wb\"))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, \"wb\"))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(self, filename='model.pt'):\n checkpoint = {\n 'input_size': self.linear_layers[0].in_features,\n 'output_size': self.linear_layers[-1].out_features,\n 'hidden_layers': [layer.out_features for layer in self.linear_layers[:-1]],\n 'state_dict': self.state_dict()}\n torch.save(checkpoint, filename)", "def save_model(model, model_filepath):\n\n outfile = open('model_filepath','wb')\n pickle.dump(model, outfile)\n outfile.close()", "def save_model(self, name): \n torch.save(dict(params=self.model.encoder.state_dict()), osp.join(self.args.save_path, name + '.pth'))", "def save_model(self, model, model_filepath):\n joblib.dump(model, model_filepath)", "def dump_model(model, filename):\n import pickle\n logging.info(\"Dumping model into model.pkl\")\n with open(filename, 'w') as dump_file:\n pickle.dump(model, dump_file)", "def save_model(model, model_filepath):\n dump(model, model_filepath)", "def save_model(model, model_filepath):\n joblib.dump(model, model_filepath)", "def save_model(model, model_filepath):\n joblib.dump(model, model_filepath)", "def save_model(model, model_filepath):\n joblib.dump(model, model_filepath)", "def _save_model(self):\n save_generic(self.model, self.model_pkl_fname)", "def save(self, model_path):\n self.encoder.save(model_path)", "def save_model(clf, save_folder, filename):\n import pickle\n path = save_folder + filename\n with open(path, 'wb') as handle:\n pickle.dump(clf, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def saveModel(self, save_path):\n if not os.path.exists('/'.join(os.path.split(save_path)[:-1])):\n os.makedirs('/'.join(os.path.split(save_path)[:-1]))\n with open(save_path, 'wb') as fw:\n pickle.dump(self.clf, fw)", "def model() -> Any:\n with open(\"airbnb_regressor.pickle\",\"rb\") as f:\n model = pickle.load(f)\n return model", "def save_data_pickle(self, save_full=False):\n self.train.to_pickle('../input/train_mod.pkl')\n self.test.to_pickle('../input/test_mod.pkl')\n if save_full:\n self.train_full.to_pickle('../input/train_full_mod.pkl')", "def save_model(self):\n saved_path = self.config.path_tmp / self.model.model_name\n saved_path.mkdir(parents=True, exist_ok=True)\n self.model.save_weights(str(saved_path / 'model.vec'))", "def save_model(self):\n print(\"\\nModels are integrated to be multi scale.\\nSaving to disk.\")\n self.column_names = [ \"x_\" + str(x) for x in range(self.embedding.shape[1])]\n self.embedding = pd.DataFrame(self.embedding, columns = self.column_names)\n self.embedding.to_csv(self.args.output, index = None)", "def save_model(model, filename):\n with open(filename, 'wb') as f:\n joblib.dump(model, f)", "def save_model(self, filename):\n with open(filename + \".dil\", 'wb') as f:\n dill.dump(self.social_distancing_func, f)\n self.clear_social_distancing_func()\n with open(filename + \".pkl\", 'wb') as f:\n pickle.dump(self, f)", "def save_model(model, model_filepath):\n\n with open(model_filepath, 'wb') as f:\n pickle.dump(model, f)", "def test_pickle(self):\n X = self.generate_X()\n task = mmRDTR()\n task.fit(X)\n with tempfile.TemporaryFile(mode='w+b') as tf:\n cPickle.dump(task, tf)", "def _save_model_and_checkpoint(self, save_model_class=False):\n import os\n\n try:\n import cloudpickle\n except ImportError:\n cloudpickle = None\n\n logger.info(\"Saving model...\")\n output_dir = os.path.join(\n self.args.output_dir, f\"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}\"\n )\n\n # save model parameters\n self._save_checkpoint(self.model, trial=None, metrics=None)\n # save the serialized model\n if save_model_class:\n # TODO : fix serialization of DatasetSchema object\n if cloudpickle is None:\n raise ValueError(\"cloudpickle is required to save model class\")\n\n with open(os.path.join(output_dir, \"model_class.pkl\"), \"wb\") as out:\n cloudpickle.dump(self.model.module, out)", "def save_model(self, filename, overwrite=None):\n return\n pickle.dump(\n obj=self._model,\n file=open(filename, \"wb\")\n )", "def save_trained_model(self, filename):\n d = self.pack_npz()\n with open(filename, 'wb') as f:\n np.savez(f, base_str=super(SpatialGP, self).__repr_base_params__(), **d)", "def save_model(model, model_filepath):\n try:\n filename = f'{model_filepath}.pkl'\n joblib.dump(model,\n open(filename, 'wb'),\n compress=3)\n except:\n raise Exception(\"Could not save model.\")", "def save_model(model, filename):\n model_dir = \"models\"\n os.makedirs(model_dir,exist_ok=True) #create only if model directory dosent exists\n filePath = os.path.join(model_dir, filename)\n logging.info(filePath)\n joblib.dump(model, filePath)", "def save_model(self, dir=\"\", **kwargs):\n ckpt_fn = os.path.join(dir, f\"model.pkl\")\n torch.save(\n {\n \"global_step\": self.global_step_,\n \"epoch\": self.epoch_,\n \"model\": self.net_.state_dict(),\n \"optimizer\": self.optimizer_.state_dict(),\n \"sampler_state\": self.sampler.state_dict(),\n \"model_samples\": list(self.model_samples_),\n \"ais_state\": self.ais_loss.state_dict(),\n \"replay_prob\": self.replay_prob,\n \"max_replay\": self.max_replay,\n },\n ckpt_fn,\n )\n return ckpt_fn", "def save(self, model_path: str) -> None:\n metadata_string = json.dumps({ \"classes\": self.classes })\n with open(os.path.join(model_path, \"metadata.json\"), \"w\") as metadata_file:\n metadata_file.write(metadata_string)\n with self.graph.as_default():\n with self.session.as_default():\n self.model.save_weights(os.path.join(model_path, \"weights.h5\"))", "def export_model(model, name):\n\tpath = \"data/{}/\".format(name)\n\tfilename = \"{}.model\".format(name)\n\tif os.path.isdir(path):\n\t\tprint(\"model already exists\")\n\t\treturn\n\telse:\n\t\tos.mkdir(path)\n\t\tjoblib.dump(model, path + filename)", "def save(self, model_name):\n\n with tempfile.TemporaryDirectory() as dirpath:\n\n # Save the Keras models\n if self.mol_to_latent_model is not None:\n self.mol_to_latent_model.save(dirpath + \"/mol_to_latent_model.h5\")\n\n self.latent_to_states_model.save(dirpath + \"/latent_to_states_model.h5\")\n self.batch_model.save(dirpath + \"/batch_model.h5\")\n\n # Exclude unpicklable and unwanted attributes\n excl_attr = [\n \"_DDC__mode\",\n \"_DDC__train_gen\",\n \"_DDC__valid_gen\",\n \"_DDC__mol_to_latent_model\",\n \"_DDC__latent_to_states_model\",\n \"_DDC__batch_model\",\n \"_DDC__sample_model\",\n \"_DDC__multi_sample_model\",\n \"_DDC__model\",\n ]\n\n # Cannot deepcopy self.__dict__ because of Keras' thread lock so this is\n # bypassed by popping and re-inserting the unpicklable attributes\n to_add = {}\n # Remove unpicklable attributes\n for attr in excl_attr:\n to_add[attr] = self.__dict__.pop(attr, None)\n\n # Pickle metadata, i.e. almost everything but the Keras models and generators\n pickle.dump(self.__dict__, open(dirpath + \"/metadata.pickle\", \"wb\"))\n\n # Zip directory with its contents\n shutil.make_archive(model_name, \"zip\", dirpath)\n\n # Finally, re-load the popped elements for the model to be usable\n for attr in excl_attr:\n self.__dict__[attr] = to_add[attr]\n\n print(\"Model saved.\")", "def pickle_dump_files():\n with open('data/' + dataset_name + '_' + model_name + '_' + 'predictions', 'wb') as f:\n pickle.dump(predictions, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'state_sentences', 'wb') as f:\n pickle.dump(final_state_sentences, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'decoded_sentences', 'wb') as f:\n pickle.dump(final_decoded_sentences, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'ids', 'wb') as f:\n pickle.dump(idx, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'exemplars', 'wb') as f:\n pickle.dump(exemplars, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'counter_exemplars', 'wb') as f:\n pickle.dump(counter_exemplars, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'top_exemplar_words', 'wb') as f:\n pickle.dump(top_exemplar_words, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'top_counter_exemplar_words', 'wb') as f:\n pickle.dump(top_counter_exemplar_words, f)", "def save_model(self, output_path):\n joblib.dump(self.dtr, output_path)", "def save(self, filename='nn_model.pkl'):\n seconds = time.time()\n\n directory = os.path.join(os.curdir, 'models')\n filepath = os.path.join(directory, str(seconds)+'_'+filename)\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n with open(filepath, 'wb') as f:\n pickle.dump(self, f)\n f.close()", "def save(self,model_path):\n pass\n # filename = \"Models/\"+model_path+\"1.sav\"\n # pickle.dump(self.crf_model, open(filename, 'wb'))", "def saveModel(self):\n self._model.save_weights('./my_model')\n return None", "def trainAndSaveModels():\n print \"\\nTraining models...\"\n\n #Use the best-performed train and test splitted data \n X_train = pickle.load(open('X_train.sav','rb'))\n X_test = pickle.load(open('X_test.sav','rb'))\n Y_train = pickle.load(open('Y_train.sav','rb'))\n \n #train models\n lassoModel = LassoPrediction(X_train, X_test, Y_train)\n forestModel = RandomForestPrediction(X_train, X_test, Y_train)\n boostingModel = GradientBoosting(X_train, X_test, Y_train)\n \n #save the modes\n pickle.dump(lassoModel,open('lasso_Model.sav','wb'))\n pickle.dump(forestModel,open('forest_Model.sav','wb'))\n pickle.dump(boostingModel,open('sgb_Model.sav','wb'))", "def save(self, save_dir='models'):\n with open(os.path.join(save_dir, 'model_expert_predictor.pkl'), 'wb') as f:\n pickle.dump(self.model, f)\n with open(os.path.join(save_dir, 'vectorizer_expert_predictor.pkl'), 'wb') as f:\n pickle.dump(self.vectorizer, f)\n with open(os.path.join(save_dir, 'userid2name.pkl'), 'wb') as f:\n pickle.dump(self.userid2name, f)\n with open(os.path.join(save_dir, 'name2userid.pkl'), 'wb') as f:\n pickle.dump(self.name2userid, f)", "def save_model(self, model_filename):\n self.model.save_weights(model_filename)", "def fit_store(X, y):\n print(\"Fitting model to training set...\")\n model = pr.build_model.fit_model(X, y)\n pickle.dump(model, open(\"models/\" + \"model\" + \".pkl\", \"wb\"))", "def saveModel(model, file_name):\n with open(SAVE_PATH + file_name, \"wb\") as out_file:\n # wo do not want to save redundant data, so keys and vals are excluded\n pickle.dump(model, out_file)\n print(\"model save to\", SAVE_PATH + file_name)" ]
[ "0.7843905", "0.7577629", "0.73923266", "0.73051053", "0.727403", "0.7219735", "0.71813995", "0.71445173", "0.7117435", "0.7111384", "0.710956", "0.71073145", "0.7095668", "0.70784366", "0.7034732", "0.7018957", "0.7018404", "0.70135003", "0.70103824", "0.700899", "0.6993362", "0.6974658", "0.6932523", "0.68955374", "0.6891684", "0.6890408", "0.6871358", "0.68677545", "0.6866925", "0.68601435", "0.68375725", "0.6830314", "0.6829109", "0.68273133", "0.6803765", "0.67858714", "0.67786187", "0.6771936", "0.67694813", "0.6755291", "0.6754523", "0.67417395", "0.67301637", "0.67294407", "0.672671", "0.6722887", "0.6716626", "0.6713919", "0.6713861", "0.6713861", "0.6709813", "0.6709813", "0.6709813", "0.6709813", "0.6709813", "0.6709813", "0.6709813", "0.6709813", "0.6709813", "0.6709813", "0.6709299", "0.67077667", "0.6701834", "0.66893655", "0.6687512", "0.66759056", "0.66726446", "0.66726446", "0.66726446", "0.6664999", "0.666131", "0.6642351", "0.663403", "0.662304", "0.6620862", "0.6617317", "0.66101056", "0.66081583", "0.6605052", "0.6600094", "0.65952927", "0.65785986", "0.65663975", "0.65656567", "0.65601724", "0.65589404", "0.65559965", "0.6554681", "0.6550649", "0.6547321", "0.6545276", "0.65441924", "0.65375775", "0.6536721", "0.6532579", "0.65264076", "0.65208805", "0.65063566", "0.65032345", "0.6499116" ]
0.67822295
36
Get current date and time string.
def now_short(_format="%Y%m%d-%H%M%S"): return time.strftime(_format, time.localtime()) + "\t"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current_datetime_string ( ) :\n return get_current_datetime( ).strftime( \"%Y%m%d-%H%M%S\" )", "def get_current_time():\n return time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())", "def get_now():\r\n now = dt.datetime.now()\r\n now_str = now.strftime(\"%d/%m %H:%M\")\r\n return now_str", "def _getCurrentDateString(self):\n currentDateTime = datetime.now()\n return currentDateTime.strftime(\"%Y%m%d_%H%M\")", "def current_time():\n now = datetime.datetime.now()\n time = now.strftime(\"%Y-%m-%d %H:%M:%S:%f\")\n return time", "def str_current_time():\n return strftime(\"%Y_%m_%d_%H_%M_%S_%Z\", gmtime())", "def _get_current_time() -> str:\n return datetime.now().strftime(\"%FT%H:%M:%S\")", "def get_now():\n\treturn datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")", "def time_now() -> str:\n return datetime_to_str(datetime_now())", "def now_datetime():\n now = datetime.datetime.now()\n return now.strftime('%Y%m%d%H%M%S')", "def time_now():\n cur_time = str(datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\"))\n return cur_time", "def current_time():\n now = datetime.now().strftime(\"%Y/%m/%d %H:%M:%S.%f\")\n return now", "def now():\n return time.strftime(\"%Y_%m_%d_%H_%M_%S\")", "def get_datetime_string():\n return datetime.now().strftime(DATETIME_FORMAT)", "def get_current_date():\n return datetime.datetime.today().strftime(constants.DATE_FORMAT)", "def get_current_time():\n dateTime = datetime.datetime.now()\n # \"%Y-%m-%d %H:%M:%S:%f\" is default formatting with everything\n dateTime = dateTime.strftime(\"%m-%d-%y %H:%M:%S\")\n\n logger.logger.debug(\"Getting current time: {}\".format(dateTime))\n\n return dateTime", "def get_current_timestamp_str(self):\n return str(time.mktime(datetime.datetime.now().timetuple()))", "def get_date_time():\n date_time = datetime.now()\n date_time_string = date_time.strftime(\"%b-%d-%Y (%H:%M:%S)\")\n return date_time_string", "def nowdt():\n from datetime import datetime\n\n now = datetime.now()\n return now.strftime(\"%d/%m/%Y %H:%M:%S\")", "def get_time():\n\n time_format = \"%Y-%m-%d %H:%M:%S\"\n now = str(datetime.datetime.now().strftime(time_format))\n\n return now", "def get_now():\n right_now = datetime.datetime.now()\n return (\"%04d%02d%02d-%02d:%02d:%02d\"\n % (right_now.year, right_now.month, right_now.day,\n right_now.hour, right_now.minute, right_now.second))", "def now(self):\n return time.strftime(r'[%d/%b/%Y:%H:%M:%S]')", "def date_time():\n\n\treturn datetime.now().strftime(\"%d%m%Y_%H%M%S\")", "def now():\n now = datetime.datetime.now()\n return \"%04d-%02d-%02d %02d:%02d:%02d.%03d\" % ( now.year, now.month,now.day,\n now.hour,now.minute,now.second,int(now.microsecond/1e3))", "def time():\n return datetime.datetime.now().strftime(\"%Y%m%dT%H%M%SZ\")", "def get_datetime_str():\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())", "def get_current_time():\n return datetime.now()", "def time_str(self):\n return datetime.now().strftime('%c')", "def now(self):\n os.environ['TZ'] = conf.timezone\n time.tzset()\n return time.strftime(\"%B %d %Y %H:%M:%S IST\", time.localtime())", "def now(self):\r\n return time.ctime(time.time())", "def get_date():\n return datetime.now().strftime(\"%c\")", "def get_current_time():\n return datetime.datetime.now()", "def get_now_time():\r\n return '[' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f') + ']: '", "def get_current_date(fmt=\"%Y-%m-%d\"):\n return datetime.datetime.now().strftime(fmt)", "def time_now():\n ts = datetime.datetime.now().timetuple()\n return '{wday} {day} {month} {year} {hour}:{minute:0>2d}:{second:0>2d} UTC'.format(\n year=ts.tm_year, month=calendar.month_name[ts.tm_mon],\n day=ts.tm_mday, wday=calendar.day_name[ts.tm_wday],\n hour=ts.tm_hour, minute=ts.tm_min, second=ts.tm_sec)", "def now():\n print(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))", "def get_time_stamp_str() -> str:\n return datetime.datetime.now().strftime(DateFormat)", "def now():\n return datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')", "def timestr():\n return dt.strftime(dt.now(),'%H:%M:%S')", "def now():\n return datetime.now().strftime(FORMAT)", "def _today() -> str:\n return strftime(DATE_FORMAT, gmtime())", "def time_date(self):\r\n from datetime import date\r\n from datetime import datetime\r\n\r\n self.today = date.today() # Output is: 2020-05-19\r\n # Need to change that in a format 19/05/2020\r\n self.today_format = self.today.strftime(\"%d/%m/%Y\") #Output is: 19/05/2020\r\n\r\n self.now = datetime.now()\r\n self.current_time = self.now.strftime(\"%I:%M:%S %p\") # %I- Hour 12hr foemat %M- minitue %S- sec %p- AM/PM\r\n # Both self.current_time and self.today_format are in string format\r\n\r\n return self.current_time, self.today_format", "def date_now():\n return datetime.today().strftime('%c')", "def get_now() -> str:\n global _NOW\n if _NOW is None:\n _NOW = str(datetime.now().replace(microsecond=0))\n return _NOW", "def timestamp_now():\n return datetime.now().strftime(\"%A, %B %d, %Y, %I:%M %p\")", "def currentTime():\n zone='America/Bogota'\n getDate = datetime.now(pytz.timezone(zone));\n #Format -> d/m/Y H:M:S\n return getDate", "def current_date_time_stamp():\n return datetime.now().strftime('%Y.%m.%d %H:%M:%S.%f')[:-7]", "def time(self):\r\n now = datetime.datetime.now()\r\n month = rfc822._monthnames[now.month - 1].capitalize()\r\n return ('[%02d/%s/%04d:%02d:%02d:%02d]' %\r\n (now.day, month, now.year, now.hour, now.minute, now.second))", "def getdate():\r\n import datetime\r\n return datetime.datetime.now()", "def get_date():\n return str(datetime.now()).split(' ')[0]", "def get_date_time():\n d = datetime.datetime.now().date()\n t = datetime.datetime.now().time()\n date = str(d.day) + '-' + str(d.month) + '-' + str(d.year)\n time = str(t.hour) + ':' + str(t.minute)\n date_time = date + ' ' + time\n return date_time", "def date() -> str:\n\n return datetime.strftime(datetime.today(), _fmt)", "def getTimeString():\n\tfrom time import strftime\n\treturn strftime(\"%d-%m-%Y__%H-%M-%S\")", "def currentTime():\n return strftime(\"%H:%M:%S\", time.localtime())", "def get_current_datetime ( ) :\n return datetime.datetime.now( )", "def time_stamp():\n \n today = datetime.datetime.now()\n return today.strftime(\"%Y-%m-%d %a %H:%M\")", "def _time_string():\n os.environ['TZ'] = config.time_zone\n time.tzset()\n return time.asctime()", "def formatted_time() -> datetime.datetime:\r\n return datetime.datetime.now()", "def get_date_today() -> str:\n return datetime.now().strftime(\"%Y-%m-%d\")", "def getDate():\n current_time = datetime.datetime.now()\n day = current_time.day\n month = current_time.month\n year = current_time.year\n date = \"{dd}-{mm}-{yyyy}\".format(dd=day,mm=month,yyyy=year)\n return date", "def getdate():\n return strftime(\"%A %B %d, %I:%M %p\")", "def __get_timestamp() -> str:\n return str(datetime.now().astimezone())", "def __get_timestamp() -> str:\n return str(datetime.now().astimezone())", "def dtstr():\n return dt.strftime(dt.now(),'%Y %m %d, %H:%M:%S')", "def current_time():\n return time.time()", "def tnow():\n return datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')", "def getCurrentTime():\n\tnow = datetime.datetime.now()\n\thr = now.hour\n\tgreeting = \"\"\n\tampm = \"\"\n\tif (hr < 12): #morning\n\t\thr = hr\n\t\tgreeting = \"morning\"\n\t\tampm = \"am\"\n\telif (hr >= 12 and hr < 1): #afternoon\n\t\thr = hr\n\t\tgreeting = \"afternoon\"\n\t\tampm = \"noon\"\n\telif (hr > 12 and hr < 19): #evening\n\t\thr = hr - 12\n\t\tgreeting = \"evening\"\n\t\tampm = \"pm\"\n\telse: #night\n\t\thr = hr - 12\n\t\tgreeting = \"night\"\n\t\tampm = \"pm\"\n\treturn str(hr) + ':' + str(now.minute),ampm, ' in the ', greeting", "def get_time():\r\n \r\n dt = datetime.datetime.now()\r\n dt_parsed = dt.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n return dt_parsed", "def get_time_date(self):\n return time.strftime(\"%m-%d-%Y %H:%M\")", "def get_timestamp():\n return datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")", "def get_time():\n return time.strftime(\"%Y%m%d-%H%M%S\")", "def get_time():\r\n return datetime.datetime.now().strftime(\"%H\")+\":\"+datetime.datetime.now().strftime(\"%M\")+\":\"+datetime.datetime.now().strftime(\"%S\")", "def time_stamper() :\n\treturn datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")", "def getNowStr():\n\n thisStr = str(datetime.datetime.now())\n thisStr = thisStr.replace(' ','_')\n thisStr = thisStr.replace('-','_')\n thisStr = thisStr.replace(':','_')\n thisStr = thisStr.replace('.','_')\n return thisStr", "def get_date():\n return datetime.datetime.now()", "def get_current_time(self):\n return self.time", "def get_time_stamp():\n\n return datetime.now().strftime('%y-%m-%d_%H-%M-%S')", "def get_date():\n\n return datetime.datetime.utcnow().isoformat()", "def get_date():\n return (datetime.now() - TIMEDELTA).isoformat()", "def get_time(self):\n return time.strftime(\"%d/%m/%y %M:%H:%S\", self.time)", "def getTimeStamp(self):\n\n\t\ttime_stamp = datetime.datetime.now();\n\n\t\treturn str(time_stamp);", "def get_time():\n return datetime.datetime.now()", "def today():\n today_object = datetime.utcnow()\n today_string = today_object.strftime('%m/%d/%Y')\n return today_string", "def get_time(self):\n x = time.localtime()\n return ''.join([\n str(x[0]).rjust(4, '0'), '/', str(x[1]).rjust(2, '0'), '/',\n str(x[2]).rjust(2, '0'), ' ', str(x[3]).rjust(2, '0'), ':',\n str(x[4]).rjust(2, '0'), ':', str(x[5]).rjust(2, '0')])", "def now(format='%m/%d/%Y %H:%M:%S'):\n return datetime.datetime.now().strftime(format)", "def time_now():\n return datetime.datetime.now().time()", "def getNow(self):\n sql = \"SELECT DATE_FORMAT(NOW(), '%Y-%m-%d %H:%i:%S');\"\n results = self.selectOpt(sql)\n now = ''\n for r in results:\n now = r[\"DATE_FORMAT(NOW(), '%Y-%m-%d %H:%i:%S')\"]\n return now", "def timestamp():\n my_date_object = datetime.utcnow()\n my_date_string = my_date_object.strftime('%d-%m-%Y %H:%M:%S')\n return my_date_string", "def timestamp():\n return datetime.now().strftime('%H:%M:%S %m-%d')", "def get_date():\n dt = datetime.now()\n return dt.strftime(\"%Y-%m-%d\")", "def now_s():\n return calendar.timegm(now_dt().utctimetuple())", "def now(time):\n a = datetime.fromtimestamp(time).strftime('%Y-%m-%d %H:%M:%S')\n return a", "def now(time):\n a = datetime.fromtimestamp(time).strftime('%Y-%m-%d %H:%M:%S')\n return a", "def get_current_time(format='%Y-%m-%d %H:%M:%S.%f %z'):\n return datetime.utcnow().replace(\n tzinfo=pytz.utc\n ).strftime(format)", "def get_now_time() -> str | None:\n if now_time := arg_to_datetime('now'):\n return now_time.strftime(DATE_FORMAT)\n else:\n return None", "def todayDate(self):\n return time.strftime(\"%m/%d/%Y\", time.localtime())", "def _timestamp_now(self) -> str:\n date_now = datetime.utcnow().replace(tzinfo=timezone(\"UTC\"))\n if self._app_conf[\"aiscalator\"]:\n pst = timezone(self.app_config().timezone)\n else:\n pst = timezone('Europe/Paris')\n return date_now.astimezone(pst).strftime(\"%Y%m%d%H%M%S\")", "def timestamp():\n return datetime.datetime.now().strftime(\"%Y-%m-%d-T%H-%M-%S\")", "def get_date():\n now = datetime.now()\n date = now.strftime(\"%Y%m%d\")\n return date", "def timestamp():\n return datetime.now().strftime(\"%Y%m%dT%H%M%S\")", "def get_date():\n now=datetime.now()\n s=\"%s%s%s\" % (now.year, str(now.month).zfill(2), str(now.day).zfill(2))\n return (now, s)" ]
[ "0.8746683", "0.8295901", "0.8274269", "0.8213029", "0.81925744", "0.81858903", "0.8185167", "0.81688607", "0.80856556", "0.8042373", "0.7992371", "0.78575885", "0.7848898", "0.78472054", "0.78358054", "0.779838", "0.7789519", "0.77884716", "0.77856934", "0.7751212", "0.7739476", "0.7722707", "0.7718662", "0.765722", "0.76480603", "0.7634317", "0.7623871", "0.75942546", "0.7577508", "0.75624025", "0.7561861", "0.7552124", "0.75339204", "0.75258535", "0.7521899", "0.7521217", "0.75179136", "0.74957395", "0.748498", "0.74832034", "0.7478886", "0.7474068", "0.7464331", "0.74618196", "0.7446579", "0.74159664", "0.74152565", "0.740164", "0.73673576", "0.7352795", "0.73353505", "0.7334515", "0.732054", "0.7307405", "0.73049676", "0.729625", "0.7291599", "0.72693956", "0.7267526", "0.72643626", "0.72583514", "0.7249469", "0.7249469", "0.72387934", "0.72177035", "0.7178084", "0.71682125", "0.71663576", "0.7165831", "0.7151759", "0.71432847", "0.71145064", "0.7106884", "0.7102412", "0.7097192", "0.7084623", "0.70841557", "0.7077532", "0.70710254", "0.70626307", "0.7054052", "0.70467776", "0.7040078", "0.7033438", "0.70331174", "0.70314455", "0.70276767", "0.70041555", "0.6997813", "0.6993925", "0.69874287", "0.6984903", "0.6984903", "0.6970173", "0.6965047", "0.69592386", "0.6957966", "0.6957932", "0.6956999", "0.69384813", "0.6937817" ]
0.0
-1
Log and assert based on condition. If condition True, log message as PASS to testcase log file. If condition False, Assert and Print message with status FAIL.
def logfile_assert_message(s, condition, message): if not condition: s.log_to_file += now_short() + message + ": FAIL\r\n" assert 0, message + ": FAIL\r\n" else: log_message(s, message + ": PASS")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test(self):\n self.info(\"LOGGING: Testing log messages\")\n self.debug(\"This is a debugging message\")\n self.info(\"This is an informational message\")\n self.warning(\"This is a warning message\")\n self.error(\"This is an error message\")\n self.critical(\"This is a critical message\")\n self.info(\"LOGGING: Testing log messages COMPLETE\")\n return", "def Assert(condition):\n try:\n assert TestStepsTools.Eval(condition)\n except AssertionError:\n _LOGGER.error('Condition %s is not True', condition)\n raise\n\n return True", "def do_assert(self, str_arg):\n arg = validateString(str_arg)\n if arg not in ('true', 'false'):\n self.resultFlag = False\n raise ValueError('Bad parameter.')\n if (arg == 'true' and self.resultFlag) or (arg == 'false' and not self.resultFlag):\n printLog(self.threadName + '[ASSERT PASS]', logging.DEBUG)\n self.resultFlag = True\n else:\n # printLog(self.threadName+'[status=%s]' % self.resultFlag)\n printLog(self.threadName + '[ASSERT FAIL!]', logging.DEBUG)\n self.resultFlag = False\n raise AssertionError()", "def assertTrue(self, statement, message):\n prefix = \"In component %s: \" % self.name\n if not statement:\n error(prefix + str(message))", "def _assert(condition, message):\n if not condition:\n raise AssertionError(message)", "def test_assert_truth(self):\n\n # Confused? This video should help:\n #\n # http://bit.ly/about_asserts\n\n self.assertTrue(True) # This should be true", "def print(self):\n if self.passed():\n self.print_passed()\n else:\n self.print_failed()", "def print_result(testcase, passed):\n print '{:<6}{}'.format('{}:'.format(testcase), \"allowed\" if passed else \"blocked\")", "def test_log_success(self, mock_info):\n\n with utils.log_activity(\"for test\"):\n pass\n\n mock_info.assert_any_call(\"[jaxline] %s starting...\", \"for test\")\n mock_info.assert_any_call(\"[jaxline] %s finished.\", \"for test\")", "def test_02_log_something(self):\n logger = get_logger(self)\n logger.info('Info in test_02')\n logger.debug('Debug in test_02')\n logger.warn('Warn in test_02')\n logfiles = glob.glob(os.path.join(self.LOG_FOLDER,\n '{}*.log'.format(self.scenario)))\n assert logfiles\n print(logfiles)\n for logfile in logfiles:\n with open(logfile) as f:\n for line in f:\n print(line.strip())", "def unitTest(self, _strMessage=\"\"):\n self.edLogging.unitTest(_strMessage)", "def test_case_01(self):\n if True:\n self.fail()", "def print_tcase_success(self,testcaseName,reasonPassed):\n\n # go throuht the test case objects\n\tfor t in self.testcases:\n\t\t\n\t\ttName = t.name\n\t\tif tName == testcaseName:\n\t\t\t#print tName\n\t\t\tt.status = \"Passed\"\n\t\t\tt.reasonPassed = reasonPassed\n return 1\n\tprint_green(\"=\" * 80)\n\ttrace_success(\"TESTCASE: PASSED %s,reason '%s'\"%(testcaseName,reasonPassed))\n\tprint_green(\"=\" * 80)\n \n\traise ViriValuePassedError(\"Testcase '%s' doesnt seem to be run but print success called\"%testcaseName)", "def ASSERT(self, _strMessage):\n self.edLogging.ASSERT(_strMessage)", "def test_xfail_with_run_false_and_with_reason():\n pass", "def test_failed():\n assert False", "def test_common_case(self):\n loglevel_from_command_line = \"WARNING\"\n assert output(self.msg, \"INFO\", loglevel_from_command_line)", "def test1(self):\n\n log.info('This is a test')\n self.assertTrue((random.randint(0,9) % 2) == 0)#! /usr/bin/env python", "def test_condition_split(self):\n self.write_contents(\n 'external/wpt/fail.html.ini', \"\"\"\\\n [fail.html]\n expected:\n if product == \"content_shell\": FAIL\n \"\"\")\n self.update(\n {\n 'run_info': {\n 'product': 'content_shell',\n 'os': 'mac',\n 'port': 'mac12',\n },\n 'results': [{\n 'test': '/fail.html',\n 'status': 'TIMEOUT',\n 'expected': 'FAIL',\n }],\n }, {\n 'run_info': {\n 'product': 'content_shell',\n 'os': 'win',\n 'port': 'win11',\n },\n 'results': [{\n 'test': '/fail.html',\n 'status': 'FAIL',\n 'expected': 'FAIL',\n }],\n }, {\n 'run_info': {\n 'product': 'chrome',\n 'os': 'linux',\n 'port': 'trusty',\n },\n 'results': [{\n 'test': '/fail.html',\n 'status': 'PASS',\n 'expected': 'PASS',\n }],\n },\n overwrite_conditions='yes')\n path = self.finder.path_from_web_tests('external', 'wpt',\n 'fail.html.ini')\n lines = self.tool.filesystem.read_text_file(path).splitlines()\n expected = textwrap.dedent(\"\"\"\\\n [fail.html]\n expected:\n if (product == \"content_shell\") and (os == \"win\"): FAIL\n if (product == \"content_shell\") and (os == \"mac\"): TIMEOUT\n \"\"\")\n # TODO(crbug.com/1299650): The branch order appears unstable, which we\n # should fix upstream to avoid create spurious diffs.\n self.assertEqual(sorted(lines, reverse=True), expected.splitlines())", "def Checktest(self, expectedoutput):\n\n if expectedoutput == 0:\n result = self.runner.invoke(yoda.cli, [\"setup\", \"check\"])\n self.assertEqual(result.exit_code, 0)\n self.assertIn(\"The configuration file does not exist.\", result.output)\n return\n\n if expectedoutput == 1:\n result = self.runner.invoke(yoda.cli, [\"setup\", \"check\"])\n self.assertEqual(result.exit_code, 0)\n self.assertIn(\"Name: Name\", result.output)\n self.assertIn(\"Email: [email protected]\", result.output)\n self.assertIn(\"Github username: GhUser\", result.output)", "def _check(self, expected, actual):\n\n assert expected == actual, 'Assert fail. expected={} but actual={}'.format(expected, actual)", "def self_test(message):\n global failed_tests\n\n if result != correct:\n failed_tests += 1\n print module_banner\n print \"test failed:\", message\n print \" correct:\", correct\n print \" result: \", result\n print", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number\r\n if did_pass:\r\n msg = \"Test at line {0} is ok\".format(linenum)\r\n else:\r\n msg = \"Test at line {0} is FAILED\".format(linenum)\r\n print(msg)", "def visit_assert(self: Parser, node: doc.Assert) -> None:\n cond = self.eval_expr(node.test)\n msg = self.eval_expr(node.msg)\n frame = T.Assert(cond, msg)\n frame.add_callback(partial(frame.__exit__, None, None, None))\n frame.__enter__()", "def assert_verbose(actual, expected):\n assert expected == actual, f\"Expected value: {expected}. But actual value is {actual}\"", "def test_execute_or_bail_ok(self):\n with self.assertLogs(level=\"INFO\") as cm:\n with etl.commands.execute_or_bail(\"unittest\"):\n pass\n self.assertEqual(len(cm.output), 1)\n self.assertTrue(\"finished successfully\" in cm.output[0])", "def test_level_error(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(loglevel=logging.ERROR)), \":exclamation: **test**\")", "def test_the_tests():\n\n assert True is True", "def _run_test(level_name):\n input_data, expected_output = _data_for_level(level_name)\n\n print 'Running test %s' % level_name\n program_output = bundler.bundle_mail(input_data)\n\n passed, message = _verify_output(program_output, expected_output,\n input_data)\n\n if len(message) > 0:\n print ''\n print message\n\n print '----------------------------'\n if passed:\n print 'Success!'\n else:\n print 'Fail'\n\n return passed", "def _test():\n try:\n print 'Test for Loging'\n # Establish Logging at the beginning of the script\n fh = establish(lvl='DEBUG', logName='TestLog.txt', logPath='', backups=0)\n\n # Supply log functions with message as a STRING\n info('TEST - Info lvl')\n debug('TEST - Debug lvl')\n warning('TEST - Warning lvl')\n error('TEST - Error lvl')\n exception('TEST - Exception. See the exception below this line.')\n info('Would any of this be logged to ArcPy: {0}'.format(_logToArcpyMessagingWindow))\n\n except:\n exception('Error in main function of script')\n print 'ERROR WITH SCRIPT: {0}'.format(traceback.format_exc())\n finally:\n # Ensure to Shut-down the Logging\n info('Script Completed')\n shutdown(fh)\n print 'Test Complete'", "def test(actual, expected):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if (expected == actual):\n msg = \"Test on line {0} passed.\".format(linenum)\n else:\n msg = (\"Test on line {0} failed. Expected '{1}', but got '{2}'.\"\n .format(linenum, expected, actual))\n print(msg)", "def test(actual, expected):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if (expected == actual):\n msg = \"Test on line {0} passed.\".format(linenum)\n else:\n msg = (\"Test on line {0} failed. Expected '{1}', but got '{2}'.\"\n .format(linenum, expected, actual))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno\n if did_pass:\n msg = 'Test at line {0} ok.'.format(linenum)\n else:\n msg = 'Test at line {0} FAILED.'.format(linenum)\n print(msg)", "def test_level_debug(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(loglevel=logging.DEBUG)), \":detective: **test**\")", "def test_always_succeed():\n assert True", "def test_logging(self):\n self._verify_logging()", "def assertSuccessStatus(self, options, arguments):\n self.assertFailStatus(0, options, arguments)", "def test_should_be_ok(self):\n self.assertTrue(True)", "def doit_assert(\n cond, emsg = \"Assertion failed\", exc = DoItAssertionError, nframe = 1\n):\n\n if not cond:\n raise exc(\"%s: %s\" % (__get_caller_name(sys._getframe(nframe)), emsg))", "def testif(b, testname, msgOK=\"\", msgFailed=\"\"):\n if b:\n print(\"Success: \", testname, \"; \", msgOK)\n else:\n print(\"Failed: \", testname, \"; \", msgFailed)\n return b", "def self_test(message):\n global failed_tests\n\n if result != correct:\n failed_tests += 1\n print(module_banner)\n print(\"test failed:\", message)\n print(\" correct:\", correct)\n print(\" result: \", result)", "def test_execution(self):\n self.assertTrue(True)", "def notify_result(self, test_case, success, message):\n self.num_successes += 1 if success else 0\n self.num_failures += 0 if success else 1\n counter_string = str(self.num_successes + self.num_failures) + '/' + str(\n self.num_tests)\n print('%-10s %-40s ' % (counter_string, test_case.test.name()) +\n ('Passed' if success else '-Failed-'))\n if not success:\n print(' '.join(test_case.command))\n print(message)", "def test_04_fail(self):\n if y == 2:\n self.fail('This is a custom fail message')", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def excecute(self):\r\n self.initialize()\r\n self.addteststeps()\r\n for teststep in self.test_steps_list:\r\n if teststep.run() == TestStatus.PASS:\r\n logging.info(\"test {} passed the test\".format(teststep.stepname))\r\n self.result = TestStatus.PASS\r\n else:\r\n logging.warn(\"test {} failed the test\".format(teststep.stepname))\r\n self.result = TestStatus.FAIL\r\n self.cleanup()\r\n return self.result", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test_case(self):\n log.e('error日志')\n log.d('debug日志')\n log.i('info日志')\n log.w('warning日志')", "def test_condition_keep(self):\n self.write_contents(\n 'external/wpt/pass.html.ini', \"\"\"\\\n [pass.html]\n [subtest]\n expected:\n if (product == \"content_shell\") and (os == \"win\"): PASS\n FAIL\n \"\"\")\n self.update(\n {\n 'run_info': {\n 'product': 'content_shell',\n 'os': 'win'\n },\n 'results': [{\n 'test':\n '/pass.html',\n 'status':\n 'TIMEOUT',\n 'expected':\n 'OK',\n 'subtests': [{\n 'name': 'subtest',\n 'status': 'TIMEOUT',\n 'expected': 'PASS',\n }],\n }],\n }, {\n 'run_info': {\n 'product': 'content_shell',\n 'os': 'mac'\n },\n 'results': [],\n }, {\n 'run_info': {\n 'product': 'chrome',\n 'os': 'linux'\n },\n 'results': [],\n })\n # Without result replay, the `FAIL` expectation is erroneously deleted,\n # which will give either:\n # expected: TIMEOUT\n #\n # with a full update alone (i.e., `--overwrite-conditions=yes`), or\n # expected:\n # if os == \"win\": TIMEOUT\n #\n # without a full update (i.e., `--overwrite-conditions=no`).\n self.assert_contents(\n 'external/wpt/pass.html.ini', \"\"\"\\\n [pass.html]\n expected:\n if (product == \"content_shell\") and (os == \"win\"): TIMEOUT\n [subtest]\n expected:\n if (product == \"content_shell\") and (os == \"win\"): TIMEOUT\n FAIL\n \"\"\")", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def testit(did_pass):\n\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILIED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILIED.\".format(linenum))\n print(msg)", "def test_01_pass(self):\n if x==1:\n pass", "def test_01_pass(self):\n if x==1:\n pass", "def test_02_pass(self):\n if x==1:\n pass", "def runTest(self):\n import logging\n lg_name = expector.logger_name\n lg = logging.getLogger(lg_name)\n start_level = logging.getLevelName('DEBUG_9')\n end_level = logging.getLevelName('CRITICAL_0')\n for lvl in range(start_level, end_level):\n lg.log(lvl, 'MATCH-START %s %d(%s) MATCH-END',\n lg_name, lvl, logging.getLevelName(lvl))", "def test_DDSim_runIt_failure_LogFile(self):\n self.ddsim.platform = \"Windows\"\n self.ddsim.applicationLog = self.logFileName\n self.ddsim.InputFile = \"pairs.hepmc\"\n self.ddsim.ignoreapperrors = False\n ## side effect for Script, userlibs, log, logAfter\n with patch(\"os.path.exists\", new=Mock(side_effect=[False, False, False, False] ) ):\n res = self.ddsim.runIt()\n self.assertIn( \"did not produce the expected log\", res['Message'] )", "def assertion_passed(self, func):", "def test_completed():\n assert complete == 1\n assert errorflag == 0", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def assertTrue(self, expr, msg=None, data=None):\n if not expr:\n # GET ERROR MESSAGE FROM YAML\n msg = self.get_error_msg(msg)\n # CHECK FOR DATA\n if data is not None:\n import pprint\n msg += \" WITH DATA {}\".format(data)\n # COMPOSE MESSAGE AND RAISE\n msg = self._formatMessage(msg, f\"{safe_repr(expr)} is not true\")\n raise self.failureException(msg)", "def test_deploy_log_messages(deploy_result: Result, namespace: str) -> None:\n expected_lines = [\n \"cfngin.yml:deploy (in progress)\",\n \"dependent-rollback-parent:submitted (creating new stack)\",\n f\"{namespace}-dependent-rollback-parent:roll back reason: \"\n \"The following resource(s) failed to create: [BrokenWaitCondition]. \"\n \"Rollback requested by user.\",\n \"dependent-rollback-child:failed (dependency has failed)\",\n \"The following steps failed: dependent-rollback-parent, dependent-rollback-child\",\n ]\n for line in expected_lines:\n assert f\"[runway] {line}\" in deploy_result.stdout, (\n \"stdout is missing expected line\\n\\nEXPECTED:\\n\"\n f\"{line}\\n\\nSTDOUT:\\n{deploy_result.stdout}\"\n )", "def show_text_test_case_succeeded(test_condition) -> None:\n\n test_cases_success_outputs = {\n 'input_search_field': 'Input search field found. OK.',\n 'suggest_table': 'Table with suggestions for searching found. OK.',\n 'table_with_search_results': 'Table with results found. OK.',\n 'result_links': 'Search results have variants. OK.',\n 'looking_for_link': 'Looking for link found in given range. OK.'\n }\n print(test_cases_success_outputs[test_condition])", "def test_setup_logging_debug(self) -> None:\n # set the log level high to ensure they are properly being change by setup_logging\n self.f_logger.setLevel(LogLevels.CRITICAL)\n self.boto3_logger.setLevel(LogLevels.CRITICAL)\n self.botocore_logger.setLevel(LogLevels.CRITICAL)\n\n with self.assertLogs(self.f_logger, LogLevels.DEBUG) as setup_ctx:\n setup_logging(LogLevels.DEBUG)\n\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.CRITICAL))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.ERROR))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.SUCCESS))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.WARNING))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.NOTICE))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.INFO))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.VERBOSE))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.DEBUG))\n\n self.assertTrue(self.boto3_logger.isEnabledFor(LogLevels.DEBUG))\n self.assertTrue(self.botocore_logger.isEnabledFor(LogLevels.DEBUG))\n\n self.assertEqual(setup_ctx.output,\n [f'DEBUG:f-cli:Initalized logging for f-cli version {__version__}'])", "def validate_Assert(result, _dummy_condition):\n return result", "def run_test_tool(self, cmd, expected_status=0 ):\n\n status, output = self.target.run(cmd)\n self.assertEqual(status, expected_status, msg='\\n'.join([cmd, output]))", "def test_level_warning(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(loglevel=logging.WARNING)), \":warning: **test**\")", "def print_test_results(func_tested, expected, actual):\r\n\r\n if not callable(func_tested):\r\n raise Exception(\"{} is not a function\".format(func_tested))\r\n\r\n func_name = func_tested.__name__\r\n desc = func_tested.__doc__\r\n\r\n if expected == actual:\r\n print \"PASSED: {}\".format(func_name)\r\n else:\r\n print \"FAILED: {}\".format(func_name)\r\n print \"Expect: {}\".format(expected)\r\n print \"Actual: {}\".format(actual)\r\n print \"Desc: {}\".format(desc)\r\n\r\n print \"\"", "def check(steps: Steps, error_message: str, condition) -> bool:\n if steps:\n step = steps.get_last_step()\n if not callable(condition):\n raise ValueError(\"The 'condition' argument \"\n \"must be a callable object\")\n else:\n if not condition():\n raise ValueError(error_message)\n else:\n step.set_status(Status.PASSED)\n return True\n\n return False", "def test_03_pass(self):\n if x==1:\n pass", "def check_asserts(con, host, warning, critical,perf_data):\n warning = warning or 1\n critical = critical or 10 \n data=get_server_status(con)\n\n asserts=data['asserts']\n \n #{ \"regular\" : 0, \"warning\" : 6, \"msg\" : 0, \"user\" : 12, \"rollovers\" : 0 } \n regular=asserts['regular']\n warning_asserts=asserts['warning']\n msg=asserts['msg']\n user=asserts['user']\n rollovers=asserts['rollovers']\n\n err,delta=maintain_delta([regular,warning_asserts,msg,user,rollovers],host,\"asserts\")\n \n if err==0:\n if delta[5]!=0:\n #the number of rollovers were increased\n warning=-1 # no matter the metrics this situation should raise a warning\n # if this is normal rollover - the warning will not appear again, but if there will be a lot of asserts \n # the warning will stay for a long period of time\n # although this is not a usual situation\n \n regular_ps=delta[1]/delta[0]\n warning_ps=delta[2]/delta[0]\n msg_ps=delta[3]/delta[0]\n user_ps=delta[4]/delta[0]\n rollovers_ps=delta[5]/delta[0]\n total_ps=regular_ps+warning_ps+msg_ps+user_ps\n message = \"Total asserts : %.2f ps\" % total_ps \n message+=performance_data(perf_data,[(total_ps,\"asserts_ps\",warning,critical),(regular_ps,\"regular\"),\n (warning_ps,\"warning\"),(msg_ps,\"msg\"),(user_ps,\"user\")])\n return check_levels(total_ps,warning,critical,message)\n else:\n return exit_with_general_warning(\"problem reading data from temp file\")", "def assertLogged(self, **kwargs):\n self.logging_handler.assertLogged(**kwargs)", "def assertTrue(self, expr, msg=None):\r\n if not expr:\r\n msg = self._formatMessage(msg, \"%s is not True\" % safe_repr(expr))\r\n raise self.failureException(msg)", "def test_xfailed_but_passed():\n pass", "def test1(self):\n self.assertTrue(True)", "def eval_assertions(run_data, test_unit, testconfig):\n\n tests_passed = True\n passed = 0\n failed = 0\n changed = 0\n for name, entry in run_data['return'].iteritems():\n if entry['result']:\n passed = passed + 1 \n else:\n failed = failed + 1\n if entry['changes'] != {}:\n changed = changed + 1 \n\n assert_passed = testconfig['assert']['passed']\n assert_changed = testconfig['assert']['changed']\n assert_failed = testconfig['assert']['failed']\n assert_total = testconfig['assert']['total']\n total = passed + failed\n\n def assert_test(name, expect, value): \n if expect is not None and expect != value:\n print('FAIL ({}): expected {} {} states; got {} instead'\\\n .format(test_unit, name, expect, value))\n tests_passed = False\n\n assert_test('passed', assert_passed, passed)\n assert_test('changed', assert_changed, changed)\n assert_test('failed', assert_failed, failed)\n assert_test('total', assert_total, total)\n\n return tests_passed", "def test_level_unknown(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(loglevel=999)), \"**test**\")", "def test_unit(self):\n self.assertTrue(return_true())", "def test_deploy_log_messages(deploy_result: Result) -> None:\n expected_lines = [\n \"deployment_1:processing deployment (in progress)\",\n \"deployment_1:processing regions sequentially...\",\n \"\",\n \"deployment_1.test_raw_cfn:processing module in us-east-1 (in progress)\",\n \"cfngin.yml:init (in progress)\",\n \"skipped; cfngin_bucket not defined\",\n \"cfngin.yml:init (complete)\",\n \"cfngin.yml:deploy (in progress)\",\n \"raw-template-vpc:submitted (creating new stack)\",\n \"raw-template-vpc:complete (creating new stack)\",\n \"cfngin.yml:deploy (complete)\",\n \"deployment_1.test_raw_cfn:processing module in us-east-1 (complete)\",\n \"deployment_1:processing deployment (complete)\",\n ]\n expected = \"\\n\".join(f\"[runway] {msg}\" for msg in expected_lines)\n assert expected in deploy_result.stdout, (\n \"stdout does not match expected\\n\\nEXPECTED:\\n\"\n f\"{expected}\\n\\nSTDOUT:\\n{deploy_result.stdout}\"\n )", "def passed(test: bool) -> str:\n return 'passed' if test else 'failed'", "def expecting(self, flag, errstring='null'):\n self.tests += 1\n if flag:\n self.successes += 1\n print(\"***\", self.successes, 'of', self.tests, 'tests passed', end=' ')\n if not flag:\n print('**FAILURE**', self.reason + errstring)\n else:\n print()\n if self.limit > 0 and self.tests - self.successes >= self.limit:\n print(\"Halting because of too many errors\")\n exit(1)", "def test_if(self):", "def test_log(self):\r\n # expected result when no result_path is provided\r\n self.default_app(\r\n seq_path=self.tmp_seq_filepath,\r\n result_path=None,\r\n log_path=self.tmp_log_filepath,\r\n )\r\n\r\n # open the actual log file and the expected file, and pass into lists\r\n with open(self.tmp_log_filepath) as f:\r\n obs = [l.strip() for l in list(f)]\r\n exp = rdp_test1_log_file_contents.split('\\n')\r\n # sort the lists as the entries are written from a dict,\r\n # so order may vary\r\n obs.sort()\r\n exp.sort()\r\n self.assertEqual(obs, exp)", "def test_healthcheck(self):\n self.assertEqual(\"OK\", \"OK\")", "def test_tc():\n assert 1 == 1", "def print_tcase_failed(self,testcaseName,reasonFailed):\n\n # go throuht the test case objects\n\tfor t in self.testcases:\n\t\t\n\t\ttName = t.name\n\t\tif tName == testcaseName:\n\t\t\t#print tName\n\t\t\tt.status = \"Failed\"\n\t\t\tt.reasonPassed = reasonFailed\n self.print_summary()\n raise TestCaseFailed (\"Testcase '%s' Failed, reason '%s\"%(testcaseName,reasonFailed))\n sys.exit(1)\n return 1\n\n\traise ViriValuePassedError(\"Testcase '%s' doesnt seem to be run but print failed called\"%testcaseName)" ]
[ "0.6288022", "0.61218476", "0.6107574", "0.60947496", "0.5991102", "0.5953129", "0.5885416", "0.587344", "0.5861646", "0.58180374", "0.5753832", "0.5752025", "0.5750852", "0.5717795", "0.57175326", "0.570502", "0.5684395", "0.5653539", "0.5651103", "0.5643155", "0.5638683", "0.5637379", "0.5637224", "0.5632616", "0.56316113", "0.56235796", "0.5623042", "0.5621439", "0.56190467", "0.56172097", "0.5597962", "0.5597962", "0.55845714", "0.5578266", "0.5570718", "0.55698144", "0.55474883", "0.5542186", "0.5526071", "0.5524527", "0.55215824", "0.55116147", "0.54913884", "0.549107", "0.5479768", "0.5477799", "0.547031", "0.54677", "0.5466317", "0.5460526", "0.5460526", "0.5460526", "0.5460526", "0.5460526", "0.5460526", "0.5460526", "0.54600066", "0.545848", "0.545848", "0.545848", "0.545848", "0.545848", "0.5451401", "0.5451401", "0.54507345", "0.54507345", "0.54507315", "0.54462725", "0.5435542", "0.54308975", "0.54275376", "0.54265535", "0.54265535", "0.5423789", "0.54113764", "0.54001826", "0.5390165", "0.53899926", "0.538307", "0.5378096", "0.537596", "0.53754824", "0.53601694", "0.53499764", "0.533134", "0.53245354", "0.532269", "0.5314568", "0.5310584", "0.53096", "0.53084815", "0.530373", "0.52913475", "0.5287751", "0.52749133", "0.5272337", "0.5267813", "0.52669525", "0.52668357" ]
0.78782284
1
Write detailed log file for given test.
def write_test_log(t, output_dir): if t.log_to_file is not None and hasattr(t, "stop_time"): filename = type(t).__name__ + "-" + time.strftime("%Y%m%d-%H%M%S") + ".txt" testtime = t.stop_time - t.start_time with open(os.path.join(output_dir, filename), "w") as log: log.write("\t=======================================================") log.write(f"\n\tTest case ID: {type(t).__name__}") log.write(f"\n\tTest case Description: {type(t).__doc__}") log.write("\n\t=======================================================\n") log.write(t.log_to_file) log.write("\n\t=======================================================") log.write(f"\n\t{type(t).__name__} test result: {t.result_grade}") log.write(f"\n\tTotal test time: {testtime} seconds") log.write("\n\t=======================================================")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_test_log(t, output_dir):\n if t.log_to_file is not None and hasattr(t, \"stop_time\"):\n filename = type(t).__name__ + \"-\" + time.strftime(\"%Y%m%d-%H%M%S\") + \".txt\"\n testtime = t.stop_time - t.start_time\n with open(os.path.join(output_dir, filename), \"w\") as log:\n log.write(\"\\t=======================================================\")\n log.write(\"\\n\\tTest case ID: %s\" % (type(t).__name__))\n log.write(\"\\n\\tTest case Description: %s\" % (type(t).__doc__))\n log.write(\"\\n\\t=======================================================\\n\")\n log.write(t.log_to_file)\n log.write(\"\\n\\t=======================================================\")\n log.write(\"\\n\\t%s test result: %s\" % (type(t).__name__, t.result_grade))\n log.write(\"\\n\\tTotal test time: %s seconds\" % testtime)\n log.write(\"\\n\\t=======================================================\")", "def _dump_test_parser_log(self):\n\t\tFileSystem.dump_to(self._result_directory_name + \"/\" + \"Test_Parser.log\", self._form_test_parser_log())", "def writeToLogFile(self, event):\n outPutStr = '{:013}'.format(0)\n logOutPutStr = outPutStr + '\\t' + '{:.2f}'.format (time ()) + '\\t' + event + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ')\n printOutPutStr = outPutStr + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ') + '\\t' + event\n print (printOutPutStr)\n if self.logFP is not None:\n self.logFP.write(logOutPutStr + '\\n')\n self.logFP.flush()", "def write_test(path, test):\n with open(path, 'w') as f:\n f.write('test = ')\n pprint.pprint(test, f, indent=4, width=200, depth=None)", "def logToFile(output, file): \r\n print( output, file=file )", "def create_log_file(path):\n with open(path, 'w'):\n pass", "def write_to_file(output, test_case_name, path):\n path_to_store = OutputWrite.make_test_dir(path, test_case_name)\n time_stamp = OutputWrite.get_time_stamp()\n try:\n LOG.debug('Changing the dir to {0}'.format(path_to_store))\n os.chdir(path_to_store)\n except Exception as _ex_:\n LOG.exception('Error :{0}'.format(_ex_))\n else:\n file_name = os.path.join(path_to_store, test_case_name +\n time_stamp)\n LOG.debug('The file name after joining = {0}'.format(file_name))\n try:\n LOG.debug('Writing Test case output to the file')\n with open(file_name, 'w') as file_obj:\n file_obj.write(output)\n except FileNotFoundError as _ex_:\n LOG.exception('Error : {0}'.format(_ex_))", "def _log_to_file(self, message):\n if self.log is not None:\n message = \"[%s] %s\" % (datetime.datetime.utcnow().strftime('%H:%M:%S'), message)\n self.log.write(\"%s\\n\" % (message,))\n self.log.flush()\n print message", "def logsave(self):\n log_file = open(self.conf[\"output_prefix\"] + \"_log.txt\", \"w\")\n try:\n log_file.write(self.log)\n finally:\n log_file.close()", "def logger_test():\n test_logger = Logger(True)\n test_dir = r'{}/logger_test'.format(os.getcwd())\n header = ['x', 'y', 'z']\n test_logger.new('test', header)\n for i in range(10):\n data = np.random.random((3,))\n test_logger.add('test', data)\n test_logger.save('test', test_dir)", "def write_to_file(self, *args, **kwargs) -> None:\n with open(self._log_file, 'a') as file:\n print(file=file, *args, **kwargs)", "def write_to_file(train_file, test_file, log_dict):\n i = 0\n train_events = []\n test_events = []\n\n for key in log_dict:\n trace = log_dict[key]\n if random.randint(0,1) == 0: # Add file to training set with 50% chance\n for e_idx in range(len(trace)):\n train_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Add file to test set\n if random.randint(0,100) > 50: # No anomaly injection with 50% chance\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Anomaly injection\n trace, types = introduce_anomaly(trace, single=False)\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",1,\\\"\" + str(types) + \"\\\"\")\n\n with open(train_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in train_events:\n fout.write(e + \"\\n\")\n\n with open(test_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in test_events:\n fout.write(e + \"\\n\")", "def log(self, loginfo):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(filename)s:%(message)s',\n datefmt='%d %b %Y %H:%M:%S',\n filename=self.logfilepath,\n filemode='w')\n filelog = logging.FileHandler(self.logfilepath)\n logging.getLogger('Functest').addHandler(filelog)\n logging.info(loginfo)", "def WriteLog(self, content, file_name=None):\n file_path = ''\n if file_name is None:\n file_path = tempfile.NamedTemporaryFile(dir=self.events_dir,\n delete=False).name\n else:\n file_path = os.path.join(self.events_dir, file_name)\n with open(file_path, 'a') as f:\n f.write(content)", "def save_log(self, test_status: str = Status.FAILED):\n self.__log.close()\n sys.stdout = self.__original_stdout\n if test_status == Status.PASSED and Logger.__KEEP_LOG_FLAG not in sys.argv:\n if os.path.isfile(self.__log_file_path):\n os.remove(self.__log_file_path)\n print(Colors.OKBLUE + \"\\nLog file has been removed\\n\" + Colors.ENDC)\n return\n\n if os.path.isfile(self.__log_file_path):\n print(Colors.OKBLUE + \"\\nLog file has been kept at: {}\\n\".format(self.__log_file_path) + Colors.ENDC)", "def test_02_log_something(self):\n logger = get_logger(self)\n logger.info('Info in test_02')\n logger.debug('Debug in test_02')\n logger.warn('Warn in test_02')\n logfiles = glob.glob(os.path.join(self.LOG_FOLDER,\n '{}*.log'.format(self.scenario)))\n assert logfiles\n print(logfiles)\n for logfile in logfiles:\n with open(logfile) as f:\n for line in f:\n print(line.strip())", "def _createLogFile(LogFile,date,LocalPath,ShowTagsResult):\n try:\n LOG = open(LogFile,\"w\")\n if _verbose:\n print(\"Writing Production Host, Location, Release and Tags information in %s\" % LogFile) \n LOG.write(\"These performance tests were executed on host %s and published on %s\" % (HOST,date))\n LOG.write(\"They were run in %s\" % LocalPath)\n LOG.write(\"Results of showtags -r in the local release:\\n%s\" % ShowTagsResult)\n LOG.close()\n except IOError as detail:\n print(\"WARNING: Can't create log file\") \n print(detail)", "def log_event_to_file(event):\n with open('eventlogs/{}.json'.format(time.time()), 'w') as event_write:\n event_write.write(json_dumpstring(event))\n pass", "def create_writer(self, session: tf.Session):\n self.logger.writer = tf.summary.FileWriter(str(self.info.summary_path), session.graph)", "def write_log_to_file(filename, content):\n append_to_file(filename, content)", "def saveLogFile(self, fname = \"data/status.txt\"):\n with open(fname, 'w') as f:\n f.write(\"<br>\\n\".join(self.logLines))\n self.log(\"wrote \"+fname)", "def log_to_file(text, status='INFO'):\n outfile = open(LogName, 'a')\n outfile.write(timestamp()+' - '+status+' - '+str(text)+'\\n')\n outfile.close()", "def printToLogfile (self, text):\n if self.logFile is not None:\n self.logFile.write(text)\n self.logFile.flush()", "def main(): \n suite = unittest.TestLoader().discover(unitTestDirectory) \n os.chdir(os.path.join(os.getcwd(), unitTestDirectory)) #need to change cwd if the unit test runs files that it doesn't just import\n \n f = open('log_file.txt', 'w')\n testRunner = unittest.TextTestRunner(f, verbosity=2).run(suite) #diverts stderr to the log_file when running the test suite\n f.close()", "def dump_to_log(self, log_dir, log_filename):\n\t\twith open(os.path.join(log_dir, log_filename), \"w\") as f:\n\t\t\tf.write(\"================ Arguments ==================\\n\")\n\t\t\tfor k, v in vars(self).items():\n\t\t\t\tf.write(\"{} : {}\\n\".format(k, v))\n\t\t\tf.write(\"=============================================\\n\")", "def log(msg=\"\"):\n print(msg)\n sys.stdout.flush()\n f = open(\"/target/testdriver.log\", \"a\")\n f.write('{:%Y-%m-%d %H:%M:%S.%s} :: '.format(datetime.datetime.now()))\n f.write(f\"{msg}\\n\")\n f.close()", "def write_tests(project_name, root_dir):\r\n test_path = get_file_path(root_dir, \"tests\", \"%s_tests.py\" % project_name) #Get the path for setup.py\r\n test_content = get_test_text(project_name)\r\n \r\n test_file = open(test_path, 'w')\r\n test_file.write(test_content)\r\n test_file.close()\r\n print_file(test_path)", "def test_04_logs(self):\n\n file_name = 'train-test.log'\n request_json = {'file':'train-test.log'}\n r = requests.get('http://localhost:{}/logs/{}'.format(port,file_name))\n\n with open(file_name, 'wb') as f:\n f.write(r.content)\n \n self.assertTrue(os.path.exists(file_name))\n\n if os.path.exists(file_name):\n os.remove(file_name)", "def writeLog(self):\n if self.logBuffer != None and self.logging :\n f = open(self.logfileName, 'w')\n self.logBuffer += \"Final Fitness: %f\\n\" % self.getTotalReward()\n self.logBuffer += \"\\n\"\n f.write(self.logBuffer)\n f.close()", "def write_log(message: str, base_url, path=\"logs/\"):\n print(message)\n url_filename = url_to_filename(base_url)\n filename = f\"{path}LOG-{url_filename}.txt\"\n\n if os.path.exists(filename):\n append_write = \"a\"\n else:\n append_write = \"w\"\n\n f = open(filename, append_write)\n f.write(message)\n f.close()", "def log(logfile, st):\n with open(logfile, 'a') as f:\n f.write(st + '\\n')\n print(st)", "def set_log_file(filename):\n pass", "def log_data(self):\n\n self.check_dir()\n with open(self.log_file, \"a\") as logger_file:\n logger_file.write(\"{}, {}\\n\".format(self.time, self.msg))", "def __logtofile(self, log_name):\n logger = logging.getLogger(log_name)\n\n file_path = os.path.join(self.log_file_path, log_name + '.txt')\n\n formatter = logging.Formatter('<%(asctime)s> %(levelname)-8s %(message)s',\n datefmt='%y-%m-%d %H:%M:%S')\n self.file_handlers[logger] = logging.FileHandler(file_path, mode='w')\n self.file_handlers[logger].setFormatter(formatter)\n self.file_handlers[logger].setLevel(logging.DEBUG)\n logger.addHandler(self.file_handlers[logger])\n\n logger.info('SAVING LOGS IN: %s' % file_path)", "def dump(log, file):\n file.write('FSH|%s|PyDL7|ZXU|%s|\\n' %\n (log.metadata.get('encoding_characters', '^~<>{}'),\n log.created.strftime('%Y%m%d%H%M%S')))\n file.write('ZRH|%s|%s|%s|%s|%s|%s|%s|%s|\\n' %\n (log.metadata.get('encoding_characters', '^~<>{}'),\n log.computer_model,\n log.computer_serial,\n log.depth_pressure_unit,\n log.altitude_unit,\n log.temperature_unit,\n log.tank_pressure_unit,\n log.tank_volume_unit))\n for dive in log.dives:\n file.write('ZDH|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\\n' %\n (dive.metadata.get('export_sequence', dive.sequence_number),\n dive.sequence_number,\n dive.metadata.get('record_type', 'M'),\n dive.recording_interval,\n dive.leave_surface_time.strftime('%Y%m%d%H%M%S'),\n dive.air_temperature,\n dive.tank_volume,\n dive.O2_mode,\n dive.rebreather_diluent_gas,\n dive.altitude))\n if dive.record:\n file.write('ZDP{\\n')\n for detail in dive.record:\n file.write('|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\\n' %\n (detail.elapsed_time,\n detail.depth,\n detail.gas_switch,\n detail.current_PO2,\n str(detail.ascent_rate_violation)[0],\n str(detail.decompression_violation)[0],\n detail.current_ceiling,\n detail.water_temperature,\n detail.warning_number,\n detail.main_cylinder_pressure,\n detail.diluent_cylinder_pressure,\n detail.oxygen_flow_rate,\n detail.CNS_toxicity,\n detail.OUT,\n detail.ascent_rate))\n file.write('ZDP}\\n')\n file.write('ZDT|%s|%s|%s|%s|%s|%s|\\n' %\n (dive.metadata.get('export_sequence', dive.sequence_number),\n dive.sequence_number,\n dive.max_depth,\n dive.reach_surface_time.strftime('%Y%m%d%H%M%S'),\n dive.min_water_temperature,\n dive.pressure_drop))", "def write_log(self, log_output):\r\n with open(self.log_link, \"a\") as log_file:\r\n log_file.writelines(log_output + \"\\n\")", "def write_ulog(self, path):\n with open(path, \"wb\") as ulog_file:\n # Definition section\n self._write_file_header(ulog_file)\n self._write_flags(ulog_file)\n self._write_format_messages(ulog_file)\n self._write_info_messages(ulog_file)\n self._write_info_multiple_message(ulog_file)\n self._write_initial_parameters(ulog_file)\n self._write_default_parameters(ulog_file)\n\n # Data section\n self._write_logged_message_subscriptions(ulog_file)\n self._write_data_section(ulog_file)", "def _log_trial(self, is_add: bool):\n try:\n with open(str(self.info.trials_log_file), \"r\") as file:\n trials = util.yaml_load(file.read())\n except FileNotFoundError:\n trials = []\n\n if is_add:\n trials.append(self.trial.to_dict())\n else:\n trials[-1] = self.trial.to_dict()\n\n with open(str(self.info.trials_log_file), \"w\") as file:\n file.write(util.yaml_dump(trials))", "def log(self, event):\n\n log_message = '{} - {} file: {}'.format(\n datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n event.event_type.capitalize(),\n event.src_path\n )\n\n if hasattr(event, 'dest_path'):\n log_message += ' => {}'.format(event.dest_path)\n\n sys.stdout.write(log_message + '\\n')\n sys.stdout.flush()", "def save_test(self,test_id):\n l = test_id.split('.')\n if len(l) > 1:\n self.suites[l[:1][0]].save_test('.'.join(l[1:]))\n else:\n suite_id = self.get_id()\n if suite_id:\n test_id = '.'.join((suite_id,test_id))\n test = [t for t in self.tests if t.id == test_id]\n if len(test) >= 0:\n test = test[0]\n else:\n raise Exception(\"Unknown test '%s'\" % test_id)\n testfile = test.id.split('.')[-1:][0]+'.fbt'\n try:\n f = open(os.path.join(self.path,testfile),'w')\n f.write(test.as_expression())\n f.write('\\n')\n finally:\n f.close()", "def _create_logfile(self):\r\n if not self.console_redirect:\r\n return None\r\n\r\n # PCU_logs.robot need a timestamp for console logs as can be run several times\r\n if self.name == self.log_test.replace('.robot', ''):\r\n return open('{0}\\{1}_console_log_{2}'.format(\r\n self.output_dir_path, self.name, datetime.now().strftime(\"%m%d%H%M\")), \"w+\")\r\n else:\r\n return open('{0}\\{1}_console_log'.format(self.output_dir_path, self.name), \"w+\")", "def write_log(output_dir, texts, new_file=False):\n if new_file:\n f = open(os.path.join(output_dir, \"std.log\"), \"w\")\n else:\n f = open(os.path.join(output_dir, \"std.log\"), \"a\")\n f.write(str(texts) + \"\\n\")\n f.close()", "def create_file_logs(config, board, tests_to_run, logger):\n combined_list = []\n\n def add_to_combined_list(log, name, combined_list=combined_list):\n for line in log.split(\"\\r\\n\"):\n try:\n if line == \"\":\n continue\n if line.startswith(\"\\n\"):\n line = line[1:]\n if line.startswith(\" [\"):\n line = line[1:]\n ts, text = line.split(\"]\", 1)\n timestamp = float(ts[1:-1])\n else:\n text = line\n timestamp = 0.0\n combined_list.append(\n {\"time\": timestamp, \"text\": str(text), \"name\": name}\n )\n except Exception as error:\n logger.error(error)\n logger.debug(\"Failed to parse log line = %s\" % repr(line))\n\n idx = 1\n console_combined = []\n for console in board.consoles:\n with open(os.path.join(config.output_dir, \"console-%s.log\" % idx), \"w\") as clog:\n clog.write(console.log)\n add_to_combined_list(console.log, \"console-%s\" % idx)\n add_to_combined_list(console.log_calls, \"console-%s\" % idx)\n add_to_combined_list(console.log, \"\", console_combined)\n idx = idx + 1\n\n def write_combined_log(combined_list, fname):\n with open(os.path.join(config.output_dir, fname), \"w\") as clog:\n for e in combined_list:\n try:\n if e[\"name\"] == \"\":\n clog.write(\"[%s]%s\\r\\n\" % (e[\"time\"], repr(e[\"text\"])))\n else:\n clog.write(\n \"%s: [%s] %s\\n\" % (e[\"name\"], e[\"time\"], repr(e[\"text\"]))\n )\n except Exception as error:\n logger.error(error)\n logger.debug(\"failed to parse line: %s\" % repr(e))\n\n import operator\n\n console_combined.sort(key=operator.itemgetter(\"time\"))\n write_combined_log(console_combined, \"console-combined.log\")\n\n for device in config.devices:\n with open(os.path.join(config.output_dir, device + \".log\"), \"w\") as clog:\n d = getattr(config, device)\n if hasattr(d, \"log\"):\n clog.write(d.log)\n add_to_combined_list(d.log, device)\n add_to_combined_list(d.log_calls, device)\n\n for test in tests_to_run:\n if hasattr(test, \"log\") and test.log != \"\":\n with open(\n os.path.join(config.output_dir, \"%s.log\" % test.__class__.__name__), \"w\"\n ) as clog:\n clog.write(test.log)\n if hasattr(test, \"log_calls\"):\n add_to_combined_list(test.log_calls, test.__class__.__name__)\n\n combined_list.sort(key=operator.itemgetter(\"time\"))\n write_combined_log(combined_list, \"all.log\")", "def test_passing_log_fname(self):\n\n log_env_file = \"test.log\"\n log_file = \"test_2.log\"\n whole_env_log_file = os.path.join(LOG_FOLDER, log_env_file)\n whole_log_file = os.path.join(LOG_FOLDER, log_file)\n\n # remove both files if they exist\n for file in (whole_env_log_file, whole_log_file):\n if os.path.exists(file):\n os.remove(file)\n\n os.environ[ENV_WORK_DIR] = TMP_DIR\n os.environ[ENV_LOG_FNAME] = log_env_file\n\n logger = pgo_logger.get_logger(log_file_name=log_file)\n assert logger is not None\n\n logger.info(\"test\")\n\n assert os.path.exists(whole_log_file) is True\n assert os.path.isfile(whole_log_file) is True\n assert os.path.exists(whole_env_log_file) is False", "def write_log_file(output_dir,\r\n input_fasta_fp,\r\n fasta_report):\r\n\r\n output_fp = join(output_dir,\r\n split(input_fasta_fp)[1] + \"_report.log\")\r\n\r\n output_f = open(output_fp, \"w\")\r\n\r\n output_f.write(\"# fasta file %s validation report\\n\" % input_fasta_fp)\r\n\r\n output_f.write(\"Percent duplicate labels: %s\\n\" %\r\n fasta_report['duplicate_labels'])\r\n output_f.write(\"Percent QIIME-incompatible fasta labels: %s\\n\" %\r\n fasta_report['invalid_labels'])\r\n output_f.write(\"Percent of labels that fail to map to SampleIDs: %s\\n\" %\r\n fasta_report['nosample_ids_map'])\r\n output_f.write(\"Percent of sequences with invalid characters: %s\\n\" %\r\n fasta_report['invalid_seq_chars'])\r\n output_f.write(\"Percent of sequences with barcodes detected: %s\\n\" %\r\n fasta_report['barcodes_detected'])\r\n output_f.write(\"Percent of sequences with barcodes detected at the \" +\r\n \"beginning of the sequence: %s\\n\" % fasta_report['barcodes_at_start'])\r\n output_f.write(\"Percent of sequences with primers detected: %s\\n\" %\r\n fasta_report['linkerprimers_detected'])\r\n\r\n # Optional tests\r\n\r\n if fasta_report['same_seq_lens']:\r\n output_f.write(\"Sequence lengths report\\n\")\r\n output_f.write(\"Counts of sequences, followed by their sequence \" +\r\n \"lengths:\\n\")\r\n for len_data in fasta_report['same_seq_lens']:\r\n output_f.write(\"%s\\t%s\\n\" % (len_data[0], len_data[1]))\r\n\r\n # need to make an explicit check for true as there can be other non-boolean\r\n\r\n # values stored under this key in the dictionary; this needs to be fixed\r\n\r\n if fasta_report['all_ids_found']:\r\n output_f.write(\"Sample ID in fasta sequences report\\n\")\r\n if fasta_report['all_ids_found'] == True:\r\n\r\n output_f.write(\"All SampleIDs found in sequence labels.\\n\")\r\n else:\r\n output_f.write(\"The following SampleIDs were not found:\\n\")\r\n for curr_id in fasta_report['all_ids_found']:\r\n output_f.write(\"%s\\n\" % curr_id)\r\n\r\n if fasta_report['tree_subset']:\r\n output_f.write(\"Fasta label subset in tree tips report\\n\")\r\n if fasta_report['tree_subset'] == True:\r\n\r\n output_f.write(\"All fasta labels were a subset of tree tips.\\n\")\r\n else:\r\n output_f.write(\"The following labels were not in tree tips:\\n\")\r\n for curr_id in fasta_report['tree_subset']:\r\n output_f.write(\"%s\\n\" % curr_id)\r\n\r\n if fasta_report['tree_exact_match']:\r\n output_f.write(\"Fasta label/tree tip exact match report\\n\")\r\n if fasta_report['tree_exact_match'][0] == True:\r\n\r\n output_f.write(\"All fasta labels found in tree tips.\\n\")\r\n else:\r\n output_f.write(\"The following labels were not in tree tips:\\n\")\r\n for curr_label in fasta_report['tree_exact_match'][0]:\r\n output_f.write(\"%s\\n\" % curr_label)\r\n if fasta_report['tree_exact_match'][1] == True:\r\n\r\n output_f.write(\"All tree tips found in fasta labels.\\n\")\r\n else:\r\n output_f.write(\"The following tips were not in fasta labels:\\n\")\r\n for curr_tip in fasta_report['tree_exact_match'][1]:\r\n output_f.write(\"%s\\n\" % curr_tip)\r\n\r\n if fasta_report['duplicate_ids']:\r\n output_f.write(\"Duplicate labels found:\\n\")\r\n for id in fasta_report['duplicate_ids']:\r\n output_f.write(\"%s\\n\" % id)", "def test_006_log_append(self):\n __test = chess_storage.ChessStorage()\n __test_data = list(range(consts.TEST_LIST_LENGHT))\n __dir_game_saves = os.path.dirname(__file__)\n __dir_game_saves = os.path.join(__dir_game_saves, 'games')\n __dir_game_log = os.path.join(__dir_game_saves, \"log\")\n __test_logname = consts.TEST_FILENAME + \"_log.txt\"\n __dir_game_logfile = os.path.join(__dir_game_log, __test_logname)\n # pylint: disable = protected-access\n __log_test = __test._ChessStorage__log_append(__dir_game_logfile, __test_data)\n # pylint: enable = protected-access\n self.assertEqual(__log_test, consts.ERROR_CODES[\"SUCCESSFULL\"])", "def dump_tests(test_dir, assignment, log=None):\n # TODO(albert): prettyify string formatting by using triple quotes.\n # TODO(albert): verify that assign_copy is serializable into json.\n info = formatting.prettyjson(assignment.serialize())\n with open(os.path.join(test_dir, INFO_FILE), 'w') as f:\n if log:\n log.info('Dumping %s', INFO_FILE)\n f.write('info = ' + info)\n\n # TODO(albert): writing causes an error halfway, the tests\n # directory may be left in a corrupted state.\n # TODO(albert): might need to delete obsolete test files too.\n # TODO(albert): verify that test_json is serializable into json.\n for test in assignment.tests:\n test_json = formatting.prettyjson(test.serialize())\n with open(os.path.join(test_dir, test.name + '.py'), 'w') as f:\n if log:\n log.info('Dumping %s', test.name)\n f.write('test = ' + test_json)", "def create_test(self, test_case, file_name):\n with open(os.path.join(self.tests, file_name), 'w+') as f:\n f.write(test_case)", "def writeLog(self, log_path):\r\n f = open(log_path, 'w')\r\n f.write(str(self))\r\n f.close()", "def create_file_logs(config, board, tests_to_run, logger):\n combined_list = []\n\n def add_to_combined_list(log, name, combined_list=combined_list):\n for line in log.split(\"\\r\\n\"):\n try:\n if line == \"\":\n continue\n if line.startswith(\"\\n\"):\n line = line[1:]\n if line.startswith(\" [\"):\n line = line[1:]\n ts, text = line.split(\"]\", 1)\n timestamp = float(ts[1:-1])\n else:\n text = line\n timestamp = 0.0\n combined_list.append(\n {\"time\": timestamp, \"text\": str(text), \"name\": name}\n )\n except Exception as error:\n logger.error(error)\n logger.debug(f\"Failed to parse log line = {repr(line)}\")\n\n idx = 1\n console_combined = []\n for console in board.consoles:\n with open(os.path.join(config.output_dir, f\"console-{idx}.log\"), \"w\") as clog:\n clog.write(console.log)\n add_to_combined_list(console.log, f\"console-{idx}\")\n add_to_combined_list(console.log_calls, f\"console-{idx}\")\n add_to_combined_list(console.log, \"\", console_combined)\n idx = idx + 1\n\n def write_combined_log(combined_list, fname):\n with open(os.path.join(config.output_dir, fname), \"w\") as clog:\n for e in combined_list:\n try:\n if e[\"name\"] == \"\":\n clog.write(f\"[{e['time']}]{repr(e['text'])}\\r\\n\")\n else:\n clog.write(f\"{e['name']}: [{e['time']}] {repr(e['text'])}\\n\")\n except Exception as error:\n logger.error(error)\n logger.debug(f\"failed to parse line: {repr(e)}\")\n\n import operator\n\n console_combined.sort(key=operator.itemgetter(\"time\"))\n write_combined_log(console_combined, \"console-combined.log\")\n\n for device in config.devices:\n with open(os.path.join(config.output_dir, device + \".log\"), \"w\") as clog:\n d = getattr(config, device)\n if hasattr(d, \"log\"):\n clog.write(d.log)\n add_to_combined_list(d.log, device)\n add_to_combined_list(d.log_calls, device)\n\n for test in tests_to_run:\n if hasattr(test, \"log\") and test.log != \"\":\n with open(\n os.path.join(config.output_dir, f\"{test.__class__.__name__}.log\"), \"w\"\n ) as clog:\n clog.write(test.log)\n if hasattr(test, \"log_calls\"):\n add_to_combined_list(test.log_calls, test.__class__.__name__)\n\n combined_list.sort(key=operator.itemgetter(\"time\"))\n write_combined_log(combined_list, \"all.log\")", "def log_write(log_f, text, action='a'):\n\n f = open(log_f, action)\n f.write(text)\n f.close()", "def print_test_details(scenario, days, test_name, buf):\n for d in days:\n if scenario not in days[d]:\n continue\n\n for n in days[d][scenario][test_name]:\n print(f\"\\n{n}:\", file=buf)\n for test in days[d][scenario][test_name][n]:\n if \"start_time\" not in test:\n start_time = \"\"\n else:\n start_time = datetime.fromtimestamp(test[\"start_time\"]).strftime(\"%m/%d/%Y %H:%M:%S\")\n\n if \"elapsed_time\" not in test:\n elapsed_time = 0\n else:\n elapsed_time = test[\"elapsed_time\"]\n\n # Get the result message\n msg = test[\"result\"].rsplit(\"FAILED:\")[-1]\n print(f' {start_time} ({elapsed_time}s): {msg}', file=buf)", "def write_log(self, log_filename, data):\n open(log_filename, 'a').write(str(data))", "def write_telemetry(self, telemetry):\n\n _id = telemetry['id']\n _type = telemetry['type']\n\n # If there is no log open for the current ID check to see if there is an existing (closed) log file, and open it.\n if _id not in self.open_logs:\n _search_string = os.path.join(self.log_directory, \"*%s_%s*_sonde.log\" % (_id, _type))\n _existing_files = glob.glob(_search_string)\n if len(_existing_files) != 0:\n # Open the existing log file.\n _log_file_name = _existing_files[0]\n self.log_info(\"Using existing log file: %s\" % _log_file_name)\n # Create entry in open logs dictionary\n self.open_logs[_id] = {'log':open(_log_file_name,'a'), 'last_time':time.time()}\n else:\n # Create a new log file.\n _log_suffix = \"%s_%s_%s_%d_sonde.log\" % (\n datetime.datetime.utcnow().strftime(\"%Y%m%d-%H%M%S\"),\n _id,\n _type,\n int(telemetry['freq_float']*1e3) # Convert frequency to kHz\n )\n _log_file_name = os.path.join(self.log_directory, _log_suffix)\n self.log_info(\"Opening new log file: %s\" % _log_file_name)\n # Create entry in open logs dictionary\n self.open_logs[_id] = {'log':open(_log_file_name,'a'), 'last_time':time.time()} \n\n\n # Produce log file sentence.\n _log_line = self.telemetry_to_string(telemetry)\n\n # Write out to log.\n self.open_logs[_id]['log'].write(_log_line)\n self.open_logs[_id]['log'].flush()\n # Update the last_time field.\n self.open_logs[_id]['last_time'] = time.time()\n self.log_debug(\"Wrote line: %s\" % _log_line.strip())", "def data_log(self, file, spectra):\n if self.datalogflag:\n with open(file, 'a') as f:\n f.write('{0}, '.format(spectra))\n self.vprint(\n 2, 'Writing spectra to data log at {}'.format(file))", "def _print_log(self, step, data=None):\n \n # Set mode to append to log file\n mode = 'a'\n\n if self.logfile is None:\n # Increment log counter for the class. Each instance of the class generates a new log.\n self.__class__.log_no += 1\n\n # Create a log file for the instance\n # Logs will be stored in ..\\logs\\SKLearn Log <n>.txt\n self.logfile = os.path.join(os.getcwd(), 'logs', 'SKLearn Log {}.txt'.format(self.log_no))\n \n if step == 1:\n # Output log header\n output = \"\\nSKLearnForQlik Log: {0} \\n\\n\".format(time.ctime(time.time()))\n # Set mode to write new log file\n mode = 'w'\n \n elif step == 2:\n # Output the parameters\n output = \"Model Name: {0}\\n\\n\".format(self.model.name)\n output += \"Execution arguments: {0}\\n\\n\".format(self.exec_params)\n \n try:\n output += \"Scaler: {0}, missing: {1}, scale_hashed: {2}, scale_vectors: {3}\\n\".format(\\\n self.model.scaler, self.model.missing,self.model.scale_hashed, self.model.scale_vectors)\n output += \"Scaler kwargs: {0}\\n\\n\".format(self.model.scaler_kwargs)\n except AttributeError:\n output += \"scale_hashed: {0}, scale_vectors: {1}\\n\".format(self.model.scale_hashed, self.model.scale_vectors)\n\n try:\n if self.model.dim_reduction:\n output += \"Reduction: {0}\\nReduction kwargs: {1}\\n\\n\".format(self.model.reduction, self.model.dim_reduction_args)\n except AttributeError:\n pass\n \n output += \"Estimator: {0}\\nEstimator kwargs: {1}\\n\\n\".format(self.model.estimator, self.model.estimator_kwargs)\n \n elif step == 3: \n # Output the request dataframe\n output = \"REQUEST: {0} rows x cols\\nSample Data:\\n\\n\".format(self.request_df.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.request_df.head().to_string(), self.request_df.tail().to_string())\n \n elif step == 4:\n # Output the response dataframe/series\n output = \"RESPONSE: {0} rows x cols\\nSample Data:\\n\\n\".format(self.response.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.response.head().to_string(), self.response.tail().to_string())\n \n elif step == 5:\n # Print the table description if the call was made from the load script\n output = \"\\nTABLE DESCRIPTION SENT TO QLIK:\\n\\n{0} \\n\\n\".format(self.table)\n \n elif step == 6:\n # Message when model is loaded from cache\n output = \"\\nModel {0} loaded from cache.\\n\\n\".format(self.model.name)\n \n elif step == 7:\n # Message when model is loaded from disk\n output = \"\\nModel {0} loaded from disk.\\n\\n\".format(self.model.name)\n \n elif step == 8:\n # Message when cache is updated\n output = \"\\nCache updated. Models in cache:\\n{0}\\n\\n\".format([k for k,v in self.__class__.model_cache.items()])\n \n elif step == 9:\n # Output when a parameter grid is set up\n output = \"Model Name: {0}, Estimator: {1}\\n\\nGrid Search Arguments: {2}\\n\\nParameter Grid: {3}\\n\\n\".\\\n format(self.model.name, self.model.estimator, self.model.grid_search_args, self.model.param_grid)\n \n elif step == 10:\n # self.model.estimator_kwargs['architecture']\n output = \"\\nKeras architecture added to Model {0}:\\n\\n{1}\\n\\n\".format(self.model.name,\\\n self.model.architecture.to_string())\n\n elif step == 11:\n # Output after adding lag observations to input data\n output = \"Lag observations added ({0} per sample). New input shape of X is {1}.\\n\\n\".format(self.model.lags, data.shape)\n output += \"Feature Definitions:\\n{0}\\n\\n\".format(self.model.features_df.to_string())\n output += \"Sample Data:\\n{0}\\n...\\n{1}\\n\\n\".format(data.head(5).to_string(), data.tail(5).to_string())\n \n sys.stdout.write(output)\n with open(self.logfile, mode, encoding='utf-8') as f:\n f.write(output)", "def log_to_file(text: str, target: Union[str, Path]) -> None:\n with open(target, \"a\" if Path(target).exists() else \"w\", encoding=\"utf-8\") as file:\n file.write(text)", "def write_debug_log(self, msg):\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n with open(self.debug_log, 'a+') as logfile:\n logfile.write(\"%s: %s\\n\" % (now, msg))", "def write_to_log(self, log_file, log_data):\n with open(self.gamelogs_path + log_file, 'a') as f:\n writer = csv.writer(f)\n writer.writerow(log_data)\n f.close()", "def test_case(self):\n log.e('error日志')\n log.d('debug日志')\n log.i('info日志')\n log.w('warning日志')", "def test_writing(self):\n with contextlib.closing(logfile.LogFile(self.name, self.dir)) as log:\n log.write(\"123\")\n log.write(\"456\")\n log.flush()\n log.write(\"7890\")\n\n with open(self.path) as f:\n self.assertEqual(f.read(), \"1234567890\")", "def generate_test_txt(name, path):\n with open(path + '/test.txt', 'a') as file:\n file.write('data/test/' + name + '\\n')", "def _write_log(self, log_data):\n # for data in log_data:\n # self.log_file.write(\"{}\\n\".format(data).encode('utf-8'))\n self.log_file.write(\"{}\\n\".format(log_data).encode('utf-8'))\n self.log_file.flush()", "def write_summary_file(records, path):\n logfile = open(path, \"w\")\n for i in range(0, len(records)):\n transaction = records[i][0]\n if transaction == \"transfer\":\n logfile.write(\"TR {} {} {} ***\\n\".format(records[i][2],\n records[i][1], records[i][3]))\n elif transaction == \"deposit\":\n logfile.write(\"DE {} 00000000 {} ***\\n\".format(records[i][1],\n records[i][2]))\n elif transaction == \"withdraw\":\n logfile.write(\"WD 00000000 {} {} ***\\n\".format(records[i][1],\n records[i][2]))\n elif transaction == \"create\":\n logfile.write(\"CR {} 00000000 000 {}\\n\".format(records[i][1],\n records[i][2]))\n else:\n logfile.write(\"DL {} 00000000 000 {}\\n\".format(records[i][1],\n records[i][2]))\n logfile.write('ES 00000000 00000000 000 ***\\n')\n logfile.close()\n return", "def set_experiment_logger(path_out, file_name=FILE_LOGS, reset=True):\n log = logging.getLogger()\n log.setLevel(logging.DEBUG)\n if reset:\n release_logger_files()\n path_logger = os.path.join(path_out, file_name)\n fh = logging.FileHandler(path_logger)\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(LOG_FILE_FORMAT)\n log.addHandler(fh)", "def set_experiment_logger(path_out, file_name=FILE_LOGS, reset=True):\n log = logging.getLogger()\n log.setLevel(logging.DEBUG)\n if reset:\n release_logger_files()\n path_logger = os.path.join(path_out, file_name)\n fh = logging.FileHandler(path_logger)\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(LOG_FILE_FORMAT)\n log.addHandler(fh)", "def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def render_log(self, template):\n\n # only write to the log file if it exists\n if self._selenium_log_file:\n\n id = self.id()\n description = self.shortDescription()\n\n # grab the stack frame info from test_* method\n (obj, filename, lineno, function, code_context, index) \\\n = self.get_test_frame()\n\n # render the test case debug\n html = render_to_string(\n template, {\n 'id': id,\n 'description': description,\n 'filename': filename,\n 'lineno': lineno,\n 'function': function,\n 'code_context': code_context,\n 'index': index,\n 'png': self.get_image_uri(),\n 'text': self.get_visible_text()})\n\n # write it to the file\n self._selenium_log_file.write(html.encode('utf8'))", "def __write_measurement(self, measurement):\n with self.__filename.open(mode='a') as history_file:\n history_file.write(measurement + '\\n')", "def log_to_file(log_path, logroot=True):\n\n # LOGGING FORMAT\n fmt = '[%(asctime)s %(filename)18s] %(levelname)-7s - %(message)7s'\n date_fmt = '%Y-%m-%d %H:%M:%S'\n formatter = logging.Formatter(fmt, datefmt=date_fmt)\n\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(formatter)\n log.addHandler(file_handler)\n\n if logroot:\n root_logger.addHandler(file_handler)\n root_logger.setLevel(logging.DEBUG)", "def to_log(self, namefile=None):\n if namefile is None:\n namefile = self.name.replace(' ', '_')+'.log'\n f = open(namefile, 'w')\n f.write(self.__str__())\n f.close()", "def notest_file(text):\n if debug == 2:\n print(text)\n with open(\"info_file.txt\", \"a\", encoding=\"utf-8\", ) as f:\n f.write(text + \"\\n\")\n elif debug == 1:\n with open(\"info_file.txt\", \"a\", encoding=\"utf-8\", ) as f:\n f.write(text + \"\\n\")", "def _write_log(self, log_data):\n self.log_file.write(ensure_bytes(log_data + \"\\n\"))\n self.log_file.flush()", "def test(self, f):\n self.log(f)", "def write_log(text):\n write_file(read_file(log_file), log + '\\n' + text)", "def test_fp_logger(self):\n log_path = self.log_paths['tcs.file.file_parser']\n log = logging.getLogger('tcs.file.file_parser')\n ctrl = self.md5(log_path)\n log.debug(\"test\")\n assert self.md5(log_path) != ctrl", "def write_log(path):\n time.sleep(0.01)\n with open(path, 'w') as fid:\n for line in FAKE_CHECKOUT_SUCCESS.splitlines():\n fid.write(line + '\\n')\n time.sleep(0.01)", "def log_to_file(self, filename=None):\n if not filename:\n filename = '%s/../../output/sentimentpy.log' % os.path.dirname(os.path.realpath(__file__))\n file_handler = RotatingFileHandler(filename, 'a', 1000000, 1)\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(self.formatter)\n self.log.addHandler(file_handler)\n return self", "def _logToFile(logsLst, resultJSON=None, logFile=\"logFile.txt\"):\n if not LOGGING_TO_FILE: return\n with open(logFile, \"a+\") as file:\n message = \"\\n\".join(logsLst)\n file.write(\"------------------Logging--------------------\\n\")\n file.write(str(datetime.datetime.now()) + \"\\n\")\n # file.write(str(datetime.datetime.utcnow()) + \"\\n\")\n file.write(\"---------------------------------------------\\n\")\n file.write(message + \"\\n\")\n if resultJSON is not None:\n file.write(\"resulting JSON after comparison:\\n\")\n file.write(resultJSON)\n file.write(\"\\n\")", "def save_log(file_path=None, stat=\"\"):\n fp = open(file_path, mode=\"a+\")\n fp.write(stat + \"\\n\")\n fp.close()", "def getAndSetLogfile(self,device,outputFile = None):\n\n dirResults = \"/var/www/html/sqa/testResults/%s/\"%self.build\n httpPath = \"http://sysmaster/sqa/testResults/%s/\"%self.build\n if self.isCardSerialSet == 0:\n \n self.setCardSerialAndType(device)\n \n # add number to the end, so it wont complain\n if outputFile:\n outputFile = outputFile + \"-0\"\n \n if not outputFile:\n outputFile = \"%s-%s-%s-%s-%s-%s-0\"%(self.host.name,get_device_letter(device),self.cardType,self.cardSerial,self.host.cat_etc_issue(),self.testcaseStr)\n \n outputFile = dirResults + outputFile\n \n #outputFile = outputFile + \".html\"\n \n outputFile = incrementFileNameIfExists(outputFile,ext = \"html\")\n \n \n self.logFile = outputFile\n \n m = re.search(\".*/(\\S+)\",outputFile)\n outputFile = m.group(1)\n self.logFileHttpPath = httpPath + outputFile\n return self.logFile", "def _write_test_dump(pcap_file, dump):\n\t\tfor packet in dump:\n\t\t\twrpcap(pcap_file, packet, append=True)", "def init_log(path):\n file = open(path, 'w+')\n file.close()", "def write_test(test_contents, new_test_host_path):\n with open(new_test_host_path, 'w') as f:\n f.write(test_contents)", "def log(self):\n\t\tfilename = '/var/log/postunsuspendacct-%s.log' % self.argv.get('user')\n\t\tfileobj = open(filename, 'w');\n\t\tfileobj.write(self.title)\n\t\tfor (key, value) in self.argv.items():\n\t\t\tfileobj.write('%s: %s\\n' % (key, value))\n\t\tfileobj.close()\n\t\tprint \"[%s] Log saved '%s'\" % (ctime(), filename)", "def test_writing(self):\n with contextlib.closing(RiggedDailyLogFile(self.name, self.dir)) as log:\n log.write(\"123\")\n log.write(\"456\")\n log.flush()\n log.write(\"7890\")\n\n with open(self.path) as f:\n self.assertEqual(f.read(), \"1234567890\")", "def recordLog(project, status, memo):\n path = getPath(project)\n log = open(path, 'a')\n writer = csv.writer(log, lineterminator='\\n')\n writer.writerow((time.time(), status, memo))\n log.close()\n if status == 'a':\n print(\"Tracking your time on \" + project)\n if status == 's':\n print(\"Tracking suspended on \" + project)\n if status == 't':\n print(\"Time shifted on \" + project)\n if not path == '.sourglass':\n store = open(os.path.join(basepath, 'last'), 'w')\n store.write(project)\n store.close", "def get_log_file(self):\n self.log_file = os.path.join(\n self.directory,\n \"ts\",\n self.ts.reaction_label,\n \"conformers\",\n \"{}_{}_{}.log\".format(self.ts.reaction_label, self.ts.direction, self.ts.index))\n return self.log_file", "def write_data(self, data):\n print \"Writing data...\"\n # Write data into log\n self.log.write_file(data)\n\n # Close log so information can be sent\n self.log.close_log()", "def recordLogsToFile(logpath):\n ret = True\n global LOGLIST\n if not os.path.exists(logpath):\n os.makedirs(logpath)\n\n f = open(logpath+'/TesterUpdatelogs.log','wb')\n LOGLIST = [line+'\\n' for line in LOGLIST]\n try:\n f.truncate()\n f.writelines(LOGLIST)\n except Exception:\n print 'Write logs to path %s failed!' %logpath\n print Exception\n ret = False\n finally:\n f.close()\n return ret", "def write_result_to_file(self):\n self.__test_result[Result.__RUN] = self.__run\n with open(self.__json_file_path, \"w+\") as outfile:\n json.dump(self.__test_result, outfile,\n ensure_ascii=False, indent=2)", "def open_logfile(self):\r\n if self.output_option == 2:\r\n self.ER_file = open(self.result_filename, 'w')", "def test_log(self):\r\n # expected result when no result_path is provided\r\n self.default_app(\r\n seq_path=self.tmp_seq_filepath,\r\n result_path=None,\r\n log_path=self.tmp_log_filepath,\r\n )\r\n\r\n # open the actual log file and the expected file, and pass into lists\r\n with open(self.tmp_log_filepath) as f:\r\n obs = [l.strip() for l in list(f)]\r\n exp = rdp_test1_log_file_contents.split('\\n')\r\n # sort the lists as the entries are written from a dict,\r\n # so order may vary\r\n obs.sort()\r\n exp.sort()\r\n self.assertEqual(obs, exp)", "def writeToLog(logName, message, writeOrAppend):\r\n\r\n\twith open(logName, writeOrAppend) as out:\r\n\t\tout.write(message)", "def report(LOGDIR, epoch, e_dict, saver, sess, fh_log):\n # print loss\n print (\"Epoch: %i; Loss: %f; KLd: %f; CE %f\" % (epoch, e_dict[\"loss\"][-1], e_dict[\"KLd\"][-1], e_dict[\"CE\"][-1]))\n fh_log.write(\"%i\\t%0.5e\\t%0.5e\\t%0.5e\\n\" % (epoch, e_dict[\"loss\"][-1], e_dict[\"KLd\"][-1], e_dict[\"CE\"][-1]))", "def write_results_to_file(test_results_dict, results_json_file, test_summary, summary_file):\n try:\n logging.info(\"Removing previous version of results file...\")\n if os.path.exists(results_json_file):\n os.remove(results_json_file)\n if os.path.exists(summary_file):\n os.remove(summary_file)\n except Exception as e:\n logging.error(\"Deleting file failed with error '{ERROR}'\".format(ERROR=e))\n\n try:\n logging.info(\"Writing test results to JSON file '{FILE}'...\".format(FILE=results_json_file))\n with open(results_json_file, 'w', encoding='utf-8') as results_json:\n json.dump(test_results_dict, results_json, ensure_ascii=False, indent=4)\n\n logging.info(\"Writing test summary to file '{FILE}'...\".format(FILE=summary_file))\n f = open(summary_file, \"w\")\n f.write(test_summary)\n f.close()\n except Exception as e:\n logging.error(\"Writing test results to files failed with error '{ERROR}'\".format(ERROR=e))", "def write_log_file(log_df, log_file):\n if log_file.is_file():\n # if log file already exists append to it, without the column headers\n log_df.to_csv(log_file, mode='a', sep='\\t', index=False, header=False)\n else:\n # if log file doesn't exist create it, with column headers\n log_df.to_csv(log_file, sep='\\t', index=False)", "def make_log():\n log_file = os.path.join(phys_dir,'ge_phys2bids_'+datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")+'.log')\n with open(log_file,'w') as log:\n log.write('-------- GE phys2bids --------\\n\\n')\n log.write('DICOM directory: %s\\n'%dcm_dir)\n log.write('Physiology directory: %s\\n'%phys_dir)\n log.write('Output directory: %s\\n\\n'%out_dir)\n log.write('%d EPI files were found\\n\\n'%len(dcm_dict))\n for rn in dcm_dict.keys():\n log.write('------------------------------\\n')\n log.write('%s\\n'%dcm_dict[rn]['out_name'])\n log.write('Start time: %s\\n'%dcm_dict[rn]['start_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('End time: %s\\n'%dcm_dict[rn]['end_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('PPG file: %s\\n'%dcm_dict[rn]['ppg_file'])\n log.write('Respiration file: %s\\n'%dcm_dict[rn]['resp_file'])\n log.write('ECG file: %s\\n'%dcm_dict[rn]['ecg_file'])\n log.write('------------------------------\\n\\n')", "def log_transfer(function, pid, timestamp, timestampend):\n # see if we should log this\n enable = arizonaconfig.get_option(\"enablelogging\")\n logdir = arizonaconfig.get_option(\"logdir\")\n if not enable:\n return\n\n logfile= logdir + \"/\" + get_transfer_log_filename()\n\n # prepare the string\n writeout = function + \" \" + timestamp + \" \" + timestampend + \" \" + pid + \"\\n\"\n\n # try to append to the file\n try:\n tempfile = open(logfile, \"a\")\n tempfile.write(writeout)\n tempfile.close()\n except:\n pass" ]
[ "0.77759415", "0.6469761", "0.6405734", "0.63734347", "0.6358866", "0.6292069", "0.627844", "0.6241813", "0.62395686", "0.6190359", "0.618326", "0.6178866", "0.6163366", "0.61453724", "0.61151177", "0.6075739", "0.6044056", "0.6033274", "0.5966068", "0.59578186", "0.5957704", "0.59228593", "0.5915319", "0.5910582", "0.5851896", "0.5845745", "0.5840753", "0.5827593", "0.5812578", "0.58048826", "0.57903725", "0.57810026", "0.57790023", "0.57537746", "0.57533264", "0.5749405", "0.574907", "0.57302076", "0.5727805", "0.572757", "0.5723915", "0.57210857", "0.5718639", "0.57161427", "0.57035327", "0.5685404", "0.56819856", "0.5679531", "0.5679503", "0.5678928", "0.56785977", "0.5659147", "0.5658819", "0.56559753", "0.56522405", "0.5644431", "0.563679", "0.5636467", "0.5634217", "0.5633538", "0.5619306", "0.56172013", "0.56137425", "0.56102127", "0.56021094", "0.56021094", "0.5600504", "0.5599562", "0.55988777", "0.55953264", "0.55929226", "0.55901176", "0.5577847", "0.5568001", "0.5567881", "0.55613995", "0.5552699", "0.555176", "0.55513334", "0.55463386", "0.5539196", "0.5537683", "0.5534974", "0.55301964", "0.5511694", "0.5508422", "0.55048656", "0.5503447", "0.5501103", "0.5500632", "0.5481779", "0.54796636", "0.54729307", "0.5467632", "0.5465057", "0.54605514", "0.54584634", "0.54557633", "0.54516864", "0.5446913" ]
0.7797923
0
Write log messages to console and to log file(with timestamp).
def log_message(s, msg, header=False): if s.log_to_file is None: s.log_to_file = "" line_sep = "=" * min(len(msg), 80) full_msg = "\n\t\t" + line_sep + "\n\t\t" + msg + "\n\t\t" + line_sep + "\n" if header: logger.debug("\n\n\t\t\t***" + msg + "***\n\n") s.log_to_file += now_short() + full_msg + "\r\n" else: logger.debug(full_msg) s.log_to_file += now_short() + msg + "\r\n"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _log_to_file(self, message):\n if self.log is not None:\n message = \"[%s] %s\" % (datetime.datetime.utcnow().strftime('%H:%M:%S'), message)\n self.log.write(\"%s\\n\" % (message,))\n self.log.flush()\n print message", "def writeToLogFile(self, event):\n outPutStr = '{:013}'.format(0)\n logOutPutStr = outPutStr + '\\t' + '{:.2f}'.format (time ()) + '\\t' + event + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ')\n printOutPutStr = outPutStr + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ') + '\\t' + event\n print (printOutPutStr)\n if self.logFP is not None:\n self.logFP.write(logOutPutStr + '\\n')\n self.logFP.flush()", "def log(self, message):\n timestamp = time.strftime(\"[%H:%M:%S]\", time.localtime(time.time()))\n self.file.write('%s %s\\n' % (timestamp, message))\n self.file.flush()", "def log(self, msg):\n current_datetime = self.get_date_time()\n self.file.write(\"%s %s\\n\" % (current_datetime, msg))", "def write_to_file(self, *args, **kwargs) -> None:\n with open(self._log_file, 'a') as file:\n print(file=file, *args, **kwargs)", "def printToLogfile (self, text):\n if self.logFile is not None:\n self.logFile.write(text)\n self.logFile.flush()", "def log(msg=\"\"):\n print(msg)\n sys.stdout.flush()\n f = open(\"/target/testdriver.log\", \"a\")\n f.write('{:%Y-%m-%d %H:%M:%S.%s} :: '.format(datetime.datetime.now()))\n f.write(f\"{msg}\\n\")\n f.close()", "def log(message):\n path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n with open(os.path.join(path, logfile_name), 'a+') as f:\n t = strftime(\"%d %b %Y %H:%M:%S\", gmtime())\n f.write(\"\\n\" + t + \" \" + str(message))", "def log(message):\n path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n with open(os.path.join(path, logfile_name), 'a+') as f:\n t = strftime(\"%d %b %Y %H:%M:%S\", gmtime())\n f.write(\"\\n\" + t + \" \" + message)", "def logToFile(message):\n\tif logTime:\n\t\tmessage = time.asctime()[11:20] + message\n\tif isinstance(message, unicode):\n\t\tmessage = message.encode(\"iso8859\")\n\tsys.stdout.write(message + \"\\n\")", "def outputlogMessage(message):\n global logfile\n timestr = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime() )\n outstr = timestr +': '+ message\n print(outstr)\n f=open(logfile,'a')\n f.writelines(outstr+'\\n')\n f.close()", "def log(message):\n path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n with open(os.path.join(path, logfile_name), 'a+') as f:\n t = strftime(\"%d %b %Y %H:%M:%S\", gmtime())\n f.write(\"\\n\" + t + \" %s\" % message)", "def write_log(*args):\n\n with open(\"server.log\", 'a') as log_file:\n log_file.write(datetime.now().isoformat() + \"\\t\")\n log_file.write(\"\\n\".join(args))\n log_file.write(\"\\n\")", "def _log(self, log, message):\n log_entry = '[%s] %s\\n' % (time.strftime('%Y/%m/%d %H:%M:%S'), message)\n log.write(log_entry)\n if self.verbose:\n print log_entry.rstrip()", "def log(msg, logfile):\n print(msg)\n logfile.write(msg + \"\\n\")", "def write_log(self, logfile='./src/movement_log.txt'):\n # TODO: parameterize logfile name\n print('Writing logs...')\n f = open(logfile, \"w\")\n for command in self.log_arr:\n f.write(command + \"\\n\")\n print('Writing finished')", "def write(self, msg, flag_print=True):\n file = open(self.log_path, \"a\")\n insert_time=datetime.now().strftime('%H:%M:%S.%f')[:-3]\n current_time = \"[\"+insert_time+\"]\"\n log_msg = current_time + \" \" + msg + \"$\" +\"\\n\" \n file.write(log_msg)\n # if flag_print is True:\n print(log_msg)", "def log_to_file(text, status='INFO'):\n outfile = open(LogName, 'a')\n outfile.write(timestamp()+' - '+status+' - '+str(text)+'\\n')\n outfile.close()", "def write(self):\n with open(\"log.txt\", 'w') as f:\n for message in self.message_list:\n f.write(message + \"\\n\")", "def write_log(text):\n write_file(read_file(log_file), log + '\\n' + text)", "def saveToLogFile(self, msg):\n path = os.path.join(self.parent.progpath, \"logfile.txt\")\n fo = open(path, 'a')\n # prefix with current date and time from now variable\n msg = \"\\n#{0}\\n\".format(datetime.datetime.now()) + msg\n fo.write(msg)\n fo.close()", "def saveToLogFile(self, msg):\n path = os.path.join(self.parent.progpath, \"logfile.txt\")\n fo = open(path, 'a')\n # prefix with current date and time from now variable\n msg = \"\\n#{0}\\n\".format(datetime.datetime.now()) + msg\n fo.write(msg)\n fo.close()", "def log_message(self, message):\n with open(LOGFILE, \"a\") as f:\n currentDt = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S.%f)\")\n message = \"\\n\" + currentDt + '---' + message\n f.write(message)", "def log(self, txt):\n if self.logfile:\n self.logfile.write(txt)", "def logToFile(output, file): \r\n print( output, file=file )", "def log(self, *args):\n self.log_stdout(*args)\n print(*args, file=self.general_log_file.file)\n self.general_log_file.flush()", "def log(self, line):\n now = datetime.datetime.now()\n time = datetime.datetime.strftime(now, '(%d %b %Y %H:%M:%S)')\n with open(self.logfile, 'a') as log:\n log.write(time + ' ' + line + '\\n')", "def writeToLog(self,msg):\n\tlocaltime = \"%s \"%time.strftime(\"%H:%M:%S\",time.localtime())\n\tpid = \"%s \"%self.pid\n self.log.write(pid+localtime+'###### '+msg+'\\n')", "def write(self, message):\r\n now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S - ')\r\n self.terminal.write(message)\r\n self.log.write(message)", "def write_debug_log(self, msg):\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n with open(self.debug_log, 'a+') as logfile:\n logfile.write(\"%s: %s\\n\" % (now, msg))", "def _log2mylog(self, msg):\n time_str = mod_time.strftime(\n \"%Y-%m-%d %H:%M:%S\", mod_time.localtime(mod_time.time())\n )\n msg = str(msg)\n content = \"%s [%s]\\n\" % (time_str, msg)\n fa = open(self.mylogfile, \"a\")\n fa.write(content)\n fa.close()", "def log_to_file(self, filename=None):\n if not filename:\n filename = '%s/../../output/sentimentpy.log' % os.path.dirname(os.path.realpath(__file__))\n file_handler = RotatingFileHandler(filename, 'a', 1000000, 1)\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(self.formatter)\n self.log.addHandler(file_handler)\n return self", "def log_data(self):\n\n self.check_dir()\n with open(self.log_file, \"a\") as logger_file:\n logger_file.write(\"{}, {}\\n\".format(self.time, self.msg))", "def write_to_all(self, *args, **kwargs) -> None:\n print(*args, **kwargs)\n with open(self._log_file, 'a') as file:\n print(file=file, *args, **kwargs)", "def write_log(output_dir, texts, new_file=False):\n if new_file:\n f = open(os.path.join(output_dir, \"std.log\"), \"w\")\n else:\n f = open(os.path.join(output_dir, \"std.log\"), \"a\")\n f.write(str(texts) + \"\\n\")\n f.close()", "def to_log(*text):\n print(*text)\n with open('log.txt', 'a') as log:\n print(*text, file=log)", "def log(message):\n from tempfile import gettempdir\n from time import strftime\n from sys import stderr\n timestamp = strftime(\"%d-%b-%y %H:%M:%S\")\n if len(message) == 0 or message[-1] != \"\\n\": message += \"\\n\"\n stderr.write(\"%s: %s\" % (timestamp,message))\n logfile = gettempdir()+\"/beam_profiler.log\"\n file(logfile,\"a\").write(timestamp+\" \"+message)", "def logsave(self):\n log_file = open(self.conf[\"output_prefix\"] + \"_log.txt\", \"w\")\n try:\n log_file.write(self.log)\n finally:\n log_file.close()", "def _write_log(self, log_data):\n # for data in log_data:\n # self.log_file.write(\"{}\\n\".format(data).encode('utf-8'))\n self.log_file.write(\"{}\\n\".format(log_data).encode('utf-8'))\n self.log_file.flush()", "def log_and_print(self, message):\n self.f.write(message + \"\\n\")\n print message", "def create_file_logs(config, board, tests_to_run, logger):\n combined_list = []\n\n def add_to_combined_list(log, name, combined_list=combined_list):\n for line in log.split(\"\\r\\n\"):\n try:\n if line == \"\":\n continue\n if line.startswith(\"\\n\"):\n line = line[1:]\n if line.startswith(\" [\"):\n line = line[1:]\n ts, text = line.split(\"]\", 1)\n timestamp = float(ts[1:-1])\n else:\n text = line\n timestamp = 0.0\n combined_list.append(\n {\"time\": timestamp, \"text\": str(text), \"name\": name}\n )\n except Exception as error:\n logger.error(error)\n logger.debug(\"Failed to parse log line = %s\" % repr(line))\n\n idx = 1\n console_combined = []\n for console in board.consoles:\n with open(os.path.join(config.output_dir, \"console-%s.log\" % idx), \"w\") as clog:\n clog.write(console.log)\n add_to_combined_list(console.log, \"console-%s\" % idx)\n add_to_combined_list(console.log_calls, \"console-%s\" % idx)\n add_to_combined_list(console.log, \"\", console_combined)\n idx = idx + 1\n\n def write_combined_log(combined_list, fname):\n with open(os.path.join(config.output_dir, fname), \"w\") as clog:\n for e in combined_list:\n try:\n if e[\"name\"] == \"\":\n clog.write(\"[%s]%s\\r\\n\" % (e[\"time\"], repr(e[\"text\"])))\n else:\n clog.write(\n \"%s: [%s] %s\\n\" % (e[\"name\"], e[\"time\"], repr(e[\"text\"]))\n )\n except Exception as error:\n logger.error(error)\n logger.debug(\"failed to parse line: %s\" % repr(e))\n\n import operator\n\n console_combined.sort(key=operator.itemgetter(\"time\"))\n write_combined_log(console_combined, \"console-combined.log\")\n\n for device in config.devices:\n with open(os.path.join(config.output_dir, device + \".log\"), \"w\") as clog:\n d = getattr(config, device)\n if hasattr(d, \"log\"):\n clog.write(d.log)\n add_to_combined_list(d.log, device)\n add_to_combined_list(d.log_calls, device)\n\n for test in tests_to_run:\n if hasattr(test, \"log\") and test.log != \"\":\n with open(\n os.path.join(config.output_dir, \"%s.log\" % test.__class__.__name__), \"w\"\n ) as clog:\n clog.write(test.log)\n if hasattr(test, \"log_calls\"):\n add_to_combined_list(test.log_calls, test.__class__.__name__)\n\n combined_list.sort(key=operator.itemgetter(\"time\"))\n write_combined_log(combined_list, \"all.log\")", "def append_to_logfile(self):\n with open(self.path, \"a+\") as f:\n for item in self.logs:\n f.write(item)\n self.logs.clear()", "def log_write(log_f, text, action='a'):\n\n f = open(log_f, action)\n f.write(text)\n f.close()", "def writelog(self,*args):\n import sys\n print(' '.join([str(a) for a in args]),file=sys.stderr)", "def setup_logging(\n level,\n console_level,\n file_level,\n):\n global _LOGGING_INITIALIZED\n if _LOGGING_INITIALIZED:\n logging.debug('SetupLogging: logging system already initialized')\n return\n\n program_name = get_program_name()\n logging.addLevelName(LogLevel.DEBUG_VERBOSE, 'DEBUG_VERBOSE')\n logging.addLevelName(LogLevel.ALL, 'ALL')\n\n # Initialize the logging system:\n\n log_formatter = logging.Formatter(\n fmt='%(asctime)s %(levelname)s %(filename)s:%(lineno)s : %(message)s',\n )\n\n log_formatter.formatTime = _format_time\n\n logging.root.handlers.clear()\n logging.root.setLevel(level)\n\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(log_formatter)\n console_handler.setLevel(console_level)\n logging.root.addHandler(console_handler)\n\n # Initialize log dir:\n tstamp = timestamp()\n pid = os.getpid()\n\n if FLAGS.log_dir is None:\n tmp_dir = os.path.join('/tmp', getpass.getuser(), program_name)\n make_dir(tmp_dir)\n FLAGS.log_dir = tempfile.mkdtemp(\n prefix='%s.%d.' % (tstamp, pid),\n dir=tmp_dir)\n\n # Link current log dir to latest:\n latest_path = os.path.join(tmp_dir, \"latest\")\n remove(latest_path)\n os.symlink(src=os.path.basename(FLAGS.log_dir), dst=latest_path)\n\n logging.info('Using log dir: %s', FLAGS.log_dir)\n make_dir(FLAGS.log_dir)\n\n log_file = os.path.join(FLAGS.log_dir, '%s.%s.%d.log' % (program_name, tstamp, pid))\n\n # Link current log file to latest.log:\n latest_path = os.path.join(FLAGS.log_dir, \"latest.log\")\n remove(latest_path)\n os.symlink(src=log_file, dst=latest_path)\n\n file_handler = logging.FileHandler(filename=log_file)\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(file_level)\n logging.root.addHandler(file_handler)\n\n from base import log\n log.set_logger(log.Logger(level=log.Level.ALL))\n\n _LOGGING_INITIALIZED = True", "def _logToFile(logsLst, resultJSON=None, logFile=\"logFile.txt\"):\n if not LOGGING_TO_FILE: return\n with open(logFile, \"a+\") as file:\n message = \"\\n\".join(logsLst)\n file.write(\"------------------Logging--------------------\\n\")\n file.write(str(datetime.datetime.now()) + \"\\n\")\n # file.write(str(datetime.datetime.utcnow()) + \"\\n\")\n file.write(\"---------------------------------------------\\n\")\n file.write(message + \"\\n\")\n if resultJSON is not None:\n file.write(\"resulting JSON after comparison:\\n\")\n file.write(resultJSON)\n file.write(\"\\n\")", "def log(self, msg=None):\n f = open(self.logbook, 'a')\n # if send or receive, write message\n if msg: \n f.write(\" System time: \" + str(datetime.now()) + \n \" Logical clock time: \" + str(self.clock_time) + \n \" \" + str(msg) + '\\n')\n # if it is an internal event just write the system time and current\n # logical clock time\n else:\n f.write(\" System time: \" + str(datetime.now()) + \n \" Logical clock time: \" + str(self.clock_time) + '\\n')\n f.close()", "def write_to_log(self, log_file, log_data):\n with open(self.gamelogs_path + log_file, 'a') as f:\n writer = csv.writer(f)\n writer.writerow(log_data)\n f.close()", "def log(self, loginfo):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(filename)s:%(message)s',\n datefmt='%d %b %Y %H:%M:%S',\n filename=self.logfilepath,\n filemode='w')\n filelog = logging.FileHandler(self.logfilepath)\n logging.getLogger('Functest').addHandler(filelog)\n logging.info(loginfo)", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def write_log_to_file(filename, content):\n append_to_file(filename, content)", "def create_file_logs(config, board, tests_to_run, logger):\n combined_list = []\n\n def add_to_combined_list(log, name, combined_list=combined_list):\n for line in log.split(\"\\r\\n\"):\n try:\n if line == \"\":\n continue\n if line.startswith(\"\\n\"):\n line = line[1:]\n if line.startswith(\" [\"):\n line = line[1:]\n ts, text = line.split(\"]\", 1)\n timestamp = float(ts[1:-1])\n else:\n text = line\n timestamp = 0.0\n combined_list.append(\n {\"time\": timestamp, \"text\": str(text), \"name\": name}\n )\n except Exception as error:\n logger.error(error)\n logger.debug(f\"Failed to parse log line = {repr(line)}\")\n\n idx = 1\n console_combined = []\n for console in board.consoles:\n with open(os.path.join(config.output_dir, f\"console-{idx}.log\"), \"w\") as clog:\n clog.write(console.log)\n add_to_combined_list(console.log, f\"console-{idx}\")\n add_to_combined_list(console.log_calls, f\"console-{idx}\")\n add_to_combined_list(console.log, \"\", console_combined)\n idx = idx + 1\n\n def write_combined_log(combined_list, fname):\n with open(os.path.join(config.output_dir, fname), \"w\") as clog:\n for e in combined_list:\n try:\n if e[\"name\"] == \"\":\n clog.write(f\"[{e['time']}]{repr(e['text'])}\\r\\n\")\n else:\n clog.write(f\"{e['name']}: [{e['time']}] {repr(e['text'])}\\n\")\n except Exception as error:\n logger.error(error)\n logger.debug(f\"failed to parse line: {repr(e)}\")\n\n import operator\n\n console_combined.sort(key=operator.itemgetter(\"time\"))\n write_combined_log(console_combined, \"console-combined.log\")\n\n for device in config.devices:\n with open(os.path.join(config.output_dir, device + \".log\"), \"w\") as clog:\n d = getattr(config, device)\n if hasattr(d, \"log\"):\n clog.write(d.log)\n add_to_combined_list(d.log, device)\n add_to_combined_list(d.log_calls, device)\n\n for test in tests_to_run:\n if hasattr(test, \"log\") and test.log != \"\":\n with open(\n os.path.join(config.output_dir, f\"{test.__class__.__name__}.log\"), \"w\"\n ) as clog:\n clog.write(test.log)\n if hasattr(test, \"log_calls\"):\n add_to_combined_list(test.log_calls, test.__class__.__name__)\n\n combined_list.sort(key=operator.itemgetter(\"time\"))\n write_combined_log(combined_list, \"all.log\")", "def write_log(self, msg: str):\n self.cta_engine.write_log(msg, self)", "def message(msg):\n # Let's print to console too. Can remove if requested.\n print (\"{} - {}\\n\".format(time.asctime(), msg))\n with open(LOG_FILE, 'a') as log:\n log.write(\"{} - {}\\n\".format(time.asctime(), msg))", "def append_logfile(filename, file_str):\n file_str = \"[\" + get_datetime_str() + \"]\" + file_str\n write_file(filename, file_str, append=1)", "def write_log(self, log_filename, data):\n open(log_filename, 'a').write(str(data))", "def enable_log_file():\n\n file_handler = logging.FileHandler(\"run-{}.log\".format(get_time_str()))\n file_handler.setFormatter(FORMATTER)\n\n for logger in LOGGER_TABLE.values():\n logger.addHandler(file_handler)", "def write(self, message, print_to_stdout=True):\n with open(self.log_path, 'a') as log_file:\n log_file.write(message + '\\n')\n if print_to_stdout:\n print(message)", "def WriteLog(self, content, file_name=None):\n file_path = ''\n if file_name is None:\n file_path = tempfile.NamedTemporaryFile(dir=self.events_dir,\n delete=False).name\n else:\n file_path = os.path.join(self.events_dir, file_name)\n with open(file_path, 'a') as f:\n f.write(content)", "def __logtofile(self, log_name):\n logger = logging.getLogger(log_name)\n\n file_path = os.path.join(self.log_file_path, log_name + '.txt')\n\n formatter = logging.Formatter('<%(asctime)s> %(levelname)-8s %(message)s',\n datefmt='%y-%m-%d %H:%M:%S')\n self.file_handlers[logger] = logging.FileHandler(file_path, mode='w')\n self.file_handlers[logger].setFormatter(formatter)\n self.file_handlers[logger].setLevel(logging.DEBUG)\n logger.addHandler(self.file_handlers[logger])\n\n logger.info('SAVING LOGS IN: %s' % file_path)", "def append_logfile(message=None, logfile=log, path=cwd):\n if message is None:\n return\n # Wrap the text if it is greater than 80 - 25 = 55 characters.\n # Indent 25 spaces to on left to allow for width of time stamp\n wrapper = textwrap.TextWrapper()\n wrapper.initial_indent = \" \" * 25\n wrapper.subsequent_indent = \" \" * 25\n wrapper.width = 80\n message = wrapper.fill(message).lstrip()\n\n if debug: print(path + logfile)\n f = open(path + logfile, \"a\")\n # Truncate the 6 digit microseconds to be 3 digits of milli-seconds\n stamp = (\"{0:%Y-%m-%d %H:%M:%S}.{1}:\".format(datetime.datetime.now(),\n datetime.datetime.now().strftime(\"%f\")[:-3]))\n if debug: print(stamp + \" \" + message)\n f.write(stamp + \" \" + message + \"\\n\")", "def logToStdout(timestamp=False):\n logs['console'] = MyLogObserver(sys.stdout)\n if not timestamp:\n logs['console'].timeFormat = \"\" #get rid of that\n sys.stdout = StdioKabob(0)\n sys.stderr = StdioKabob(1)", "def _write_log(self, log_data):\n self.log_file.write(ensure_bytes(log_data + \"\\n\"))\n self.log_file.flush()", "def logFile(self):\n\n event = 'stim'\n mStr = '{:013}'.format(self.mouse.tag) + '\\t'\n outPutStr = mStr + \\\n datetime.fromtimestamp(int(time())).isoformat(' ') + '\\t' + event\n print (outPutStr)\n if self.textfp != None:\n outPutStr = mStr + '{:.2f}'.format(time()) + '\\t' + event\n self.textfp.write(outPutStr + '\\n')\n self.textfp.flush()", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def saveLogFile(self, fname = \"data/status.txt\"):\n with open(fname, 'w') as f:\n f.write(\"<br>\\n\".join(self.logLines))\n self.log(\"wrote \"+fname)", "def log_to_file(log_path, logroot=True):\n\n # LOGGING FORMAT\n fmt = '[%(asctime)s %(filename)18s] %(levelname)-7s - %(message)7s'\n date_fmt = '%Y-%m-%d %H:%M:%S'\n formatter = logging.Formatter(fmt, datefmt=date_fmt)\n\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(formatter)\n log.addHandler(file_handler)\n\n if logroot:\n root_logger.addHandler(file_handler)\n root_logger.setLevel(logging.DEBUG)", "def log(arguments, message):\n\n # Prints the message to console if verbose is set to True.\n if arguments.verbose:\n print(message)\n\n # Logs the message within a specific log file is defined.\n if arguments.log_dir != '':\n # Creates the directory for the log files.\n os.makedirs(arguments.log_dir, exist_ok=True)\n\n # Logs the message to the log file.\n print(message, file=open(os.path.join(arguments.log_dir, f\"{arguments.experiment}_log.txt\"), 'a'))", "def __write_logs_to_file(self, file_path):\n\n \"\"\"\n The following options are used to format the date/time of logs\n %Y Year with century as a decimal number.\n %m Month as a decimal number [01,12].\n %d Day of the month as a decimal number [01,31].\n\n %H Hour (24-hour clock) as a decimal number [00,23].\n %M Minute as a decimal number [00,59].\n \"\"\"\n\n log_path = \"Results/Script_Logs/merge_files_log.txt\"\n Log(\"Merged files to: {0}\".format(file_path),\n log_path=log_path,\n erase_file=False)", "def _logger(self):\r\n\r\n # Create filename for log\r\n filenameF = self._vna.getDateFormatted() + \".txt\"\r\n filenameF = \"Logs/\" + filenameF \r\n f = open(filenameF, \"a+\") # Log saved in directory named logs located in same directory as this file\r\n \r\n # if self._vna.isTwoComponents():\r\n # for i in range(len(self._voltages)):\r\n # f.write('%s\\t\\t\\t' % self._voltages[i][0])\r\n # else:\r\n for i in range(len(self._voltages)):\r\n f.write('%s\\t\\t' % self._voltages[i][0])\r\n f.write('\\n')\r\n\r\n # if self._vna.isTwoComponents():\r\n # for i in range(len(self._voltages[0])):\r\n # line = \"\"\r\n # for j in range(len(self._voltages)):\r\n # line = line + str(self._frequency[j][i]) + '\\t' + str(self._intensity[j][2*i]) + \\\r\n # str(self._intensity[j][2*i + 1]) + '\\t'\r\n # f.write(line)\r\n # f.write('\\n')\r\n # else: \r\n for i in range(len(self._voltages[0])):\r\n line = \"\"\r\n for j in range(len(self._voltages)):\r\n line = line + str(self._frequency[j][i]) + '\\t' + str(self._intensity[j][i]) + '\\t' \r\n f.write(line)\r\n f.write('\\n')", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def setup_log():\n\n #logging.basicConfig(filename='log.txt',filemode='a',format='%(asctime)s %(threadName)s %(filename)s %(funcName) %(lineno) %(levelname)s %(message)s', datefmt='%Y/%m/%d %H:%M:%S')\n\n #get the root logger\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n #set up logging to console for INFO and worse\n sh = colorlog.StreamHandler()\n sh.setLevel(logging.INFO)\n #sh_formatter = colorlog.Formatter(fmt='%(log_color)s%(levelname):%(asctime)s\\n%(message)s', datefmt='%H:%M:%S')\n sh_formatter = colorlog.ColoredFormatter(\n \"%(log_color)s%(levelname)-8s - %(name)-25s - %(threadName)-15s - %(asctime)s - %(cyan)s \\n %(message)s\\n\",\n datefmt=None,\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red,bg_white',\n },\n secondary_log_colors={},\n style='%'\n)\n sh.setFormatter(sh_formatter)\n\n #set up logging to file for ALL messages\n #fh = logging.FileHandler('log.txt')\n # fh = logging.handlers.TimedRotatingFileHandler('log.txt', when='midnight', interval=1, backupCount=7)\n # fh.setLevel(logging.DEBUG)\n # fh_formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d - %(threadName)s - %(filename)s.%(funcName)s.%(lineno)s - %(levelname)s\\n%(message)s\\n\\n', datefmt='%Y/%m/%d %H:%M:%S')\n # fh.setFormatter(fh_formatter)\n\n #put the handlers to use\n logger.addHandler(sh)\n # logger.addHandler(fh)", "def init_log(log_level=logging.DEBUG):\n now = time.time()\n ts = datetime.datetime.fromtimestamp(now).strftime('%Y%m%d')\n file_name = os.path.abspath(os.path.join(os.getcwd(), '..', 'traffic_logs', f'{ts}_traffic.log'))\n folder, _ = os.path.split(file_name)\n Path(folder).mkdir(parents=True, exist_ok=True)\n\n # create formatter and add it to the handlers\n log_format = '[%(asctime)s][%(name)s][%(levelname)s] %(message)s'\n\n logging.basicConfig(filemode='a',\n format=log_format,\n datefmt='%H:%M:%S',\n level=logging.ERROR,\n stream=sys.stdout,\n # filename=file_handler\n )\n\n formatter = logging.Formatter(log_format)\n\n # create file handler which logs even debug messages\n file_handler = logging.FileHandler(file_name)\n file_handler.setFormatter(formatter)\n file_handler.setLevel(log_level)\n\n std_out = logging.StreamHandler(sys.stdout)\n std_out.setFormatter(formatter)\n std_out.setLevel(log_level)\n\n # This for avoiding streams to log to root's stderr, which prints in red in jupyter\n root_logger = logging.getLogger()\n for handler in root_logger.handlers:\n # continue\n root_logger.removeHandler(handler)\n\n # add the handlers to the logger\n root_logger.addHandler(file_handler)\n\n # By default the install() function installs a file_handler on the root root_logger,\n # this means that log messages from your code and log messages from the\n # libraries that you use will all show up on the terminal.\n coloredlogs.install(level=log_level, fmt=log_format, stream=sys.stdout)", "def log_error(self, message):\n # log the datetime+message to error_log.txt\n curr_time = datetime.datetime.now().strftime(\"%H:%M:%S \"\n \"%Y-%m-%d\")\n with open(ERROR_FILE_PATH, \"a+\") as error_file:\n error_file.write(\"{} $ {}\\n\".format(curr_time, message))", "def log(message):\n\n print(message)\n\n with open(LOG_FILE, \"a\") as log:\n log.write(\n datetime.datetime.now().strftime(LOG_TIMESTAMP) + \" \" + message + \"\\n\"\n )\n\n # Remove the oldest lines if greater than LOG_SIZE\n with open(LOG_FILE) as f:\n contents = f.read()\n\n lines_num = contents.count(\"\\n\")\n if lines_num > LOG_SIZE:\n lines = contents.split(\"\\n\")\n line_index = lines_num - LOG_SIZE\n lines = lines[line_index:]\n\n with open(LOG_FILE, \"w\") as f:\n f.write(\"\\n\".join(lines))", "def append_to_log(file_name: str, text: str):\n if not log_file_exists(file_name):\n create_log_file(file_name)\n log = open(get_complete_file_name(file_name), 'a')\n log.write(text)\n log.write(\"\\n\")\n log.close()", "def qaPrint(log, message):\n # current date and time as string + message. example: [Oct 25 01:52:33.000001] TC1 - Passed\n log_message = getCurTime(\"[%b %d %H:%M:%S.%f]\") + \" \" + message\n # prints log_message\n print log_message\n # writes message to a log file\n log.write(log_message + \"\\n\")", "def _create_logfile(self):\r\n if not self.console_redirect:\r\n return None\r\n\r\n # PCU_logs.robot need a timestamp for console logs as can be run several times\r\n if self.name == self.log_test.replace('.robot', ''):\r\n return open('{0}\\{1}_console_log_{2}'.format(\r\n self.output_dir_path, self.name, datetime.now().strftime(\"%m%d%H%M\")), \"w+\")\r\n else:\r\n return open('{0}\\{1}_console_log'.format(self.output_dir_path, self.name), \"w+\")", "def log(self, msg, alwaysPrint = False):\n if self.fileObject is None or alwaysPrint:\n print msg\n if self.fileObject:\n self.fileObject.write( msg + '\\n' )", "def printlog(\n *objects, \n file_path='logs/default.log', \n linebreak=True, \n encoding='utf-8', \n creative=False, \n printable=True):\n if printable:\n print('{}: {}'.format(datetime.datetime.now().replace(microsecond=0).time(), *objects))\n if creative:\n if not os.path.isdir(os.path.dirname(file_path)):\n os.mkdir(os.path.dirname(file_path))\n if not os.path.isfile(file_path):\n open(file_path, 'a+').close()\n assert os.path.isdir(os.path.dirname(file_path)), 'Log.log: directory {} does not exist'.format(file_path)\n assert os.path.isfile(file_path), 'Log.log: file {} does not exist'.format(os.path.basename(file_path))\n with open(file_path, 'a') as file:\n if linebreak:\n file.write('{}: {}\\n'.format(datetime.datetime.now().time(), *objects))\n else:\n file.write('{}: {}'.format(datetime.datetime.now().time(), *objects))", "def SetupLogging(level=logging.WARNING, log_file_name=None):\n logging.basicConfig(\n format='%(levelname)-8s %(asctime)-8s %(message)s',\n datefmt='%H:%M:%S',\n level=level,\n **({'filename': log_file_name} if log_file_name else {}))\n logging.Formatter.converter = time.gmtime\n logging.info(time.strftime('%Y.%m.%d %Z', time.gmtime()))", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def log_tofile(self, inst):\n self._tick += 1\n if self._tick >= self._second:\n self.logger.log(inst)\n self._tick = 0", "def logger(start_time, file):\n with open(file, \"a\") as text:\n text.write(\"\"\"\n\n Current date and time: {}\n Program ran in {} seconds.\n \"\"\".format(datetime.datetime.now(), time.process_time() - start_time))\n\n return 'hello'", "def _log_to_file(path: str) -> None:\n if path:\n fh = logging.FileHandler(path)\n LOGGER.addHandler(fh)\n LOGGER.setLevel(logging.DEBUG)", "def create_log_file(path):\n with open(path, 'w'):\n pass", "def log(self, message):\n self._logger.write(message)", "def init_logger():\n logpath = Path(f\"logs/{time.strftime('%Y.%m.%d %H:%M')}.txt\")\n logpath.parent.mkdir(exist_ok=True)\n logging.basicConfig(filename=logpath, level=logging.DEBUG)", "def setup_logging(log_file_path,timestamp_filename=True,max_log_size=104857600):\n assert( len(log_file_path) > 1 )\n assert( type(log_file_path) == type(\"\") )\n global logger\n\n # Make sure output dir(s) exists\n log_file_folder = os.path.dirname(log_file_path)\n if log_file_folder is not None:\n if not os.path.exists(log_file_folder):\n os.makedirs(log_file_folder)\n\n # Add timetamp for filename if needed\n if timestamp_filename:\n # http://stackoverflow.com/questions/8472413/add-utc-time-to-filename-python\n # '2015-06-30-13.44.15'\n timestamp_string = datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H.%M.%S%Z\")\n # Full log\n log_file_path = add_timestamp_to_log_filename(log_file_path,timestamp_string)\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n # 2015-07-21 18:56:23,428 - t.11028 - INFO - ln.156 - Loading page 0 of posts for u'mlpgdraws.tumblr.com'\n formatter = logging.Formatter(\"%(asctime)s - t.%(thread)d - %(levelname)s - ln.%(lineno)d - %(message)s\")\n\n # File 1, log everything\n # https://docs.python.org/2/library/logging.handlers.html\n # Rollover occurs whenever the current log file is nearly maxBytes in length; if either of maxBytes or backupCount is zero, rollover never occurs.\n fh = logging.handlers.RotatingFileHandler(\n filename=log_file_path,\n # https://en.wikipedia.org/wiki/Binary_prefix\n # 104857600 100MiB\n maxBytes=max_log_size,\n backupCount=10,# Ten should be enough to debug but not use too mcuh storage\n )\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # Console output\n ch = logging.StreamHandler()\n ch.setLevel(config.console_log_level)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n logging.info(\"Logging started.\")\n return logger", "def setup_logging(log_file_path,timestamp_filename=True,max_log_size=104857600):\n assert( len(log_file_path) > 1 )\n assert( type(log_file_path) == type(\"\") )\n global logger\n\n # Make sure output dir(s) exists\n log_file_folder = os.path.dirname(log_file_path)\n if log_file_folder is not None:\n if not os.path.exists(log_file_folder):\n os.makedirs(log_file_folder)\n\n # Add timetamp for filename if needed\n if timestamp_filename:\n # http://stackoverflow.com/questions/8472413/add-utc-time-to-filename-python\n # '2015-06-30-13.44.15'\n timestamp_string = datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H.%M.%S%Z\")\n # Full log\n log_file_path = add_timestamp_to_log_filename(log_file_path,timestamp_string)\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n # 2015-07-21 18:56:23,428 - t.11028 - INFO - ln.156 - Loading page 0 of posts for u'mlpgdraws.tumblr.com'\n formatter = logging.Formatter(\"%(asctime)s - t.%(thread)d - %(levelname)s - ln.%(lineno)d - %(message)s\")\n\n # File 1, log everything\n # https://docs.python.org/2/library/logging.handlers.html\n # Rollover occurs whenever the current log file is nearly maxBytes in length; if either of maxBytes or backupCount is zero, rollover never occurs.\n fh = logging.handlers.RotatingFileHandler(\n filename=log_file_path,\n # https://en.wikipedia.org/wiki/Binary_prefix\n # 104857600 100MiB\n maxBytes=max_log_size,\n backupCount=10000,# Ten thousand should be enough to crash before we reach it.\n )\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # Console output\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n logging.info(\"Logging started.\")\n return logger", "def initLogging ( logFile ):\n logging.basicConfig(\n filename=logFile,\n level=logging.INFO,\n format='%(asctime)s %(levelname)-8s %(message)s',\n filemode='w'\n )", "def write_log(self, log_output):\r\n with open(self.log_link, \"a\") as log_file:\r\n log_file.writelines(log_output + \"\\n\")", "def log_print(message):\n print('[%s]: %s'%(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()), message))", "def config(file, log_level=logging.INFO):\n logging.basicConfig(level=log_level, format='%(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S',\n filename=file, filemode='w')\n formatter = logging.Formatter('%(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n handler_console = logging.StreamHandler(sys.stdout)\n handler_console.setFormatter(formatter)\n handler_console.setLevel(log_level)\n logging.getLogger('').addHandler(handler_console)", "def add_line_in_log():\n logging.info(' ' + '-' * 60 + '\\n')", "def log(logfile, st):\n with open(logfile, 'a') as f:\n f.write(st + '\\n')\n print(st)", "def init_log(path):\n file = open(path, 'w+')\n file.close()", "def log_message(self, text):\n if self.message_log_file != -1:\n #open file in append mode and write line to file\n with open(self.message_log_file, 'a') as log_file:\n log_file.write(text+'\\n')\n return", "def write_log(self, level, message): \n \n level = level.lower()\n #print(level, message,str(self.logger))\n if level == 'debug':\n self.logger.debug('%s', message)\n elif level == 'error':\n self.logger.error('%s', message)\n elif level == 'critical':\n self.logger.critical('%s', message)\n elif level == 'warning':\n self.logger.warning('%s', message)\n else:\n self.logger.info('%s', message)", "def init_logging(to_file=False, filename=None):\n if to_file:\n if filename is None:\n filename = timestamp() + '.log'\n logging.basicConfig(level=logging.INFO, format='%(message)s', filename=filename)\n logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) # write to stdout + file\n print('Logging to:', filename)\n else:\n logging.basicConfig(level=logging.INFO, format='%(message)s')", "def _write(self, data, mode):\n check_path(self.config_path)\n\n with open(self.log_file, mode) as log:\n if mode == 'a' and self.add_time:\n msg = self.TIME_TEMPLATE.format(time=strftime('%c'), error_msg=data)\n else:\n msg = data\n\n log.write(msg.encode(self._encoding, 'ignore'))" ]
[ "0.76535463", "0.7256361", "0.7207056", "0.71898395", "0.7076056", "0.70700955", "0.70448047", "0.7034109", "0.70302784", "0.70248175", "0.69756", "0.69753206", "0.6955353", "0.6929519", "0.6922523", "0.6889003", "0.68705016", "0.67957056", "0.67859244", "0.6784583", "0.67739236", "0.67739236", "0.6710875", "0.6687953", "0.668233", "0.6679", "0.6676844", "0.66468", "0.6627842", "0.6624348", "0.66172165", "0.65891665", "0.65784854", "0.65758324", "0.65626824", "0.65600014", "0.6549325", "0.6540912", "0.6532669", "0.65133387", "0.65024203", "0.64859635", "0.6460058", "0.6436196", "0.6434271", "0.642963", "0.6425511", "0.64119107", "0.640954", "0.6407744", "0.6388949", "0.6386092", "0.6383041", "0.6381035", "0.63672775", "0.63351667", "0.63134366", "0.63017285", "0.6301207", "0.6296747", "0.6284195", "0.6278981", "0.6275181", "0.6231327", "0.62289166", "0.621978", "0.6203161", "0.6199678", "0.6197462", "0.61803824", "0.61496955", "0.61453414", "0.6139319", "0.6131728", "0.61298066", "0.61297196", "0.61288625", "0.6118865", "0.6106599", "0.61020577", "0.60982263", "0.6097288", "0.60970265", "0.6075417", "0.60736513", "0.6072878", "0.6069317", "0.60574603", "0.60528743", "0.6050139", "0.60485584", "0.6033375", "0.6031978", "0.6014795", "0.60124254", "0.6006895", "0.60034925", "0.60021746", "0.5998621", "0.5993947", "0.5989485" ]
0.0
-1
Instance initialisation to handle the output logging.
def __init__(self, parent, out, color): self.color = color or "white" self.out = out self.parent = parent self.first_write = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n\n self.logger = logging.getLogger('sound-count')\n\n self.logger.setLevel(logging.DEBUG)\n\n self.formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')\n\n self.stdout_handler = logging.StreamHandler()\n self.stdout_handler.setFormatter(self.formatter)\n\n self.file_handler = logging.FileHandler(config['LOG_PATH'])\n self.file_handler.setFormatter(self.formatter)\n\n self.logger.addHandler(self.stdout_handler)\n self.logger.addHandler(self.file_handler)", "def __init__(self):\n ## Creating the looger\n self.logger = logging.getLogger('iLiner_Logger')\n ## Setting the level for the logger\n self.logger.setLevel(logging.DEBUG)\n ## Creating the handler\n stdout_handler = logging.StreamHandler(sys.stdout)\n ## Creating the formatter\n formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')\n stdout_handler.setFormatter(formatter)\n stdout_handler.setLevel(logging.DEBUG)\n self.logger.addHandler(stdout_handler)", "def __init__(self):\n\n self._logger = logging.getLogger(__name__)", "def _init_logging(self):\n # Setup logging variable\n self.log = logging.getLogger(\"collection-log\")\n self.log.setLevel(logging.INFO)\n self.formatter = logging.Formatter(\"%(asctime)s %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n\n # Log to stdout\n streamhandler = logging.StreamHandler()\n streamhandler.setLevel(logging.INFO)\n streamhandler.setFormatter(self.formatter)\n self.log.addHandler(streamhandler)", "def __init__(self):\n\n self.__logger = logging.getLogger()\n\n formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: ' +\n '%(message)s')\n\n file_handler = RotatingFileHandler('.log', 'a', 1000000, 1)\n\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(formatter)\n self.__logger.addHandler(file_handler)\n\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging.INFO)\n self.__logger.addHandler(stream_handler)", "def __init__(self):\n self.log = logging.getLogger()", "def __init__(self):\n self.logger = logger()", "def initLogger(self):\n loglevel = self.loglevels[self.loglevel]\n log_format = '%(asctime)s name=%(name)s loglevel=%(levelname)s message=%(message)s'\n logging.basicConfig(format=log_format,\n level=loglevel)\n \tmultiprocessing.log_to_stderr(loglevel)", "def __init__(self):\n self.logger = logging.getLogger(self.ASSIGNMENT)\n self.logger.setLevel(\"INFO\")\n consoleLog = logging.StreamHandler()\n self.logger.addHandler(consoleLog)", "def initialize_logger(self):\n\n # initialize logger\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # logger console handler\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(logging.Formatter(\"\"))\n logger.addHandler(console_handler)", "def initLogging(self):\n logging.basicConfig(level=self.loglevel, stream=sys.stderr)", "def __init__(self, default_level=logging.WARNING):\n # All loggers are an attr of self for tab completion in iPython\n # (with . replaced with _)\n self._loggerdict = logging.Logger.manager.loggerDict\n for name, logger in self._loggerdict.iteritems():\n attr = name.replace('.', '_')\n setattr(self, attr, logger)\n\n if len(logging.root.handlers) == 0:\n # The default level is INFO\n fmt='%(levelname)-7s | %(asctime)-23s | %(name)-8s | %(message)s'\n logging.basicConfig(format=fmt, level=default_level)\n logging.StreamHandler.emit = self._emit_wrap", "def setup_class(self):\n # Initialize instance variable(s)\n self.log = logging.getLogger()\n self.log.level = logging.DEBUG", "def __init__(self):\n\n self.log = logger.getLogger(name=\"directord\")", "def _init():\n global logger\n logger = logging.getLogger(\"Log\")", "def init_logger(self):\n\n if self.args.log_level:\n log_level = getattr(logging, self.args.log_level)\n if coloredlogs:\n coloredlogs.install(level=log_level, fmt=LOG_FMT)\n else:\n logging.basicConfig(level=log_level)\n ch = logging.StreamHandler()\n formatter = logging.Formatter(LOG_FMT)\n ch.setFormatter(formatter)\n elif coloredlogs:\n coloredlogs.install(level='INFO', fmt=LOG_FMT)\n\n if coloredlogs:\n effective_level = coloredlogs.get_level()\n else:\n effective_level = logger.getEffectiveLevel()\n\n # make sure warning and error display at any effective level\n if effective_level > logging.WARNING:\n self.warning = logger.critical\n else:\n self.warning = logger.warning\n\n if effective_level > logging.ERROR:\n self.error = logger.critical\n else:\n self.error = logger.error\n\n self.info = logger.info\n self.debug = logger.debug\n self.exception = logger.exception\n self.critical = logger.critical", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.log = logging.getLogger(logger_name(__name__))", "def _instanciate_logger(self):\n\t\tself._logger = logging.getLogger('main')\n\t\tself._logger.setLevel(logging.DEBUG)\n\t\tself._logger.addHandler(logging.StreamHandler())", "def __init__(self, level=logging.INFO):\r\n logging.basicConfig(handlers=[self.InterceptHandler()], level=level)\r\n logger.remove()\r\n logger.add(sys.stdout, level=level, enqueue=True)\r\n logger.log(level, f\"Level: {level}\")\r\n self.level = level\r\n self.ilogger = logger", "def __init__(self):\r\n self.logger = dict()", "def __init__(self):\n self.logger = logging.getLogger(FeatureEngineeringLogger.__name__)", "def __init__(self, name):\n self.mylog = logging.getLogger(name)\n self.handler = logging.StreamHandler()\n self.formatter = MyFormatter('%(levelname)s: %(message)s')\n self.handler.setFormatter(self.formatter)\n self.mylog.addHandler(self.handler)\n self.mylog.setLevel(logging.INFO)\n self.handler.setLevel(logging.INFO)\n self.debug_level = 0\n self.verbosity = False", "def setup_log(self):\n self.logger, _ = get_logger(\"datatransform\")", "def __init__(self):\n if 'LOG_LEVEL' in os.environ:\n log_level = os.environ['LOG_LEVEL']\n else:\n log_level = 'INFO'\n\n logging.basicConfig(\n format='%(levelname)s:%(message)s',\n level=log_level)\n\n if 'TOLERATE' in os.environ:\n self.tolerance_name(os.environ['TOLERATE'])\n else:\n self.tolerance_name('Medium')\n\n self._max_severity_level = 0\n self._filename = None\n self._show_all = False\n\n if 'SHOW_ALL_VULNERABILITIES' in os.environ:\n self.show_all(True)", "def __init__(self):\n\n #initiate logging\n file_name = os.path.splitext(sys.argv[0])\n tc_name = file_name[0].split('/')[-1]\n log_name = os.path.join(config.LOG_DIR, ''.join([tc_name, '.log']))\n log.init(log_name)\n self.logging = logging.getLogger('objects')", "def __init__(self, *args, **kwargs):\n super(BaseHandler, self).__init__(*args, **kwargs)\n self.log = logbook.Logger(self.LOGGER)", "def _init_logger(self):\n self.logger = logging.getLogger('WSClientAPILogger')\n self.logger.setLevel(logging.DEBUG)\n self.logger_handler = logging.FileHandler(self.__class__.__name__ + '.log')\n self.logger_handler.setLevel(logging.DEBUG)\n self.logger_formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%d-%m %H:%M:%S')\n self.logger_handler.setFormatter(self.logger_formatter)\n self.logger.addHandler(self.logger_handler)", "def __init__(self, logfilename='logfile.log'):\n # Create file handler (output to file)\n # \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n # \"[%(asctime)s %(process)d] %(message)s\"\n # fileFormatter = logging.Formatter(\"%(asctime)s : %(threadName)-12.12s : %(levelname)-5.5s : %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\")\n fileFormatter = logging.Formatter(\"%(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\")\n fileHandler = logging.FileHandler(filename=logfilename)\n fileHandler.setFormatter(fileFormatter)\n fileHandler.setLevel(logging.INFO)\n self.fileHandler = fileHandler\n\n # Create console handler (output to console/terminal)\n # consoleFormatter = logging.Formatter(\"%(name)-12s : %(levelname)-8s : %(message)s\")\n consoleFormatter = logging.Formatter(\"%(message)s\")\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(consoleFormatter)\n consoleHandler.setLevel(logging.INFO)\n self.consoleHandler = consoleHandler\n\n # Create logger and add handlers\n # logger = logging.getLogger(__name__)\n logger = logging.getLogger('')\n logger.setLevel(logging.INFO)\n logger.addHandler(fileHandler)\n logger.addHandler(consoleHandler)\n self.logger = logger\n\n # from combo (when use candle)\n # for log in [logger, uno_data.logger]:\n # log.setLevel(logging.DEBUG)\n # log.addHandler(fh)\n # log.addHandler(sh)\n\n self.logger.info('{}'.format('-' * 90))\n self.logger.info(datetime.now())\n self.logger.info(f'Machine: {platform.node()} ({platform.system()}, {psutil.cpu_count()} CPUs)')\n #return logger", "def __init__(self):\n self._logger = None\n self._enable_debug = False", "def setup_class(cls):\n if os.path.exists(logfilename):\n os.remove(logfilename)\n log = logutils.get_logger(__name__)\n log.root.handlers = []\n logutils.config(mode='standard', console_lvl='stdinfo',\n file_name=logfilename)", "def initialize_logging_handler(self):\n self._ch = logging.StreamHandler()\n self._ch.setLevel(logging.DEBUG)\n self._ch.setFormatter(logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S'))", "def __init__(self, log=False):\n self.log = log", "def __init__(self):\n #path = \"/\".join( ( os.path.abspath( __file__ ).replace( \"\\\\\", \"/\" ) ).split( \"/\" )[:-1])\n #logging.config.fileConfig( os.path.join ( path, \"..\\\\resources\\\\config-log.ini\" ), disable_existing_loggers=False ) # working in local\n #logging.config.fileConfig ( \"config-log.ini\", disable_existing_loggers=False) # working in local\n self.logger = logging.getLogger(__name__)\n self.logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n # create formatter and add it to the handlers\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n #fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n # add the handlers to the logger\n self.logger.addHandler(ch)\n # TODO decorator it doesnt create file handler which logs even debug messages\n # fh = logging.FileHandler('spam.log')\n # fh.setLevel(logging.DEBUG)\n # create console handler with a higher log level\n # fh.setFormatter(formatter)\n # logger.addHandler(fh)", "def __setup_logging(self):\n\n loglevel = logging.INFO\n if self.config[\"verbose\"]:\n loglevel = logging.DEBUG\n\n FORMAT = '[%(asctime)s %(filename)s:%(lineno)s %(levelname)s] %(message)s'\n if self.config[\"log\"]:\n logging.basicConfig(format=FORMAT, level=loglevel, filename=self.config[\"log\"])\n else:\n logging.basicConfig(format=FORMAT, level=loglevel)", "def __init__(self, UI, strm=None):\n logging.Handler.__init__(self)\n # replace Handler's instance-specific lock with the shared class lock\n # to ensure that only one instance of this handler can write to\n # the console at a time\n self.lock = TerminalHandler.sharedlock\n if strm is None:\n strm = sys.stderr\n self.stream = strm\n self.formatter = None\n self.UI = UI", "def __init__(self, out_queue):\n logging.Handler.__init__(self)\n self.oqueue = out_queue\n self.session = None", "def __init__(self):\r\n self.file_object = './ExecutionLogs/PredictFromModel.log'\r\n\r\n \"\"\" Initialize logger class for log writing \"\"\"\r\n self.log_writer = logger.logger(self.file_object)", "def __init__(self, log):\n self.log = log\n self.logger = logging.getLogger(self.__class__.__name__)", "def _initialize_logging(self):\n LOG_CFG = os.environ.get('LOG_CFG', 'LOCAL')\n configure_logging(LOG_CFG)\n self.logger = logging.getLogger(self.__class__.__name__)", "def _init_logger(self):\n # Create log directory, if it doesn't already exist.\n self._create_directory(directory=self._log_directory)\n log_filename = \"{0}/{1}.log\".format(self._log_directory, self._program)\n\n # Add the date to the log file names.\n logging.basicConfig(\n filename=log_filename,\n filemode='w',\n level=logging.DEBUG,\n format='%(asctime)s|%(name)s|%(levelname)-5s| %(message)s',\n datefmt='%Y-%m-%d %I:%M:%S %p')\n\n # define a Handler which writes LOG messages or higher to the sys.stderr\n console = logging.StreamHandler()\n #\n # Note: Anything above the logging level is displayed to stdout.\n #\n # Level Numeric value\n # CRITICAL\t50\n # ERROR \t40\n # WARNING\t30\n # LOG 25 (our log level)\n # INFO\t 20\n # DEBUG \t10\n # NOTSET\t0\n #\n # Add a logging level to always display to stderr.\n logging.addLevelName(self._LOG_LEVEL, self._LOG_NAME)\n if self._debug:\n console.setLevel(logging.DEBUG)\n else:\n console.setLevel(self._LOG_LEVEL)\n # Set a format which is simpler for console use.\n formatter = logging.Formatter('%(name)s|%(levelname)-5s| %(message)s')\n console.setFormatter(formatter)\n # Add the handler to the root logger.\n logging.getLogger('').addHandler(console)\n self._logger = logging.getLogger()", "def __init__(self):\n # The logging object. \n # Example: self.log.info(f\"Current value of var: {my_var}\")\n self.log = logging.getLogger()", "def __init__( self, out = None ):\n self._out = out if out is not None else sys.stdout", "def init() -> None:\n\n\t\tif Logging.logger:\n\t\t\treturn\n\n\t\tLogging.enableFileLogging \t\t= Configuration.get('logging.enableFileLogging')\n\t\tLogging.enableScreenLogging\t\t= Configuration.get('logging.enableScreenLogging')\n\t\tLogging.stackTraceOnError\t\t= Configuration.get('logging.stackTraceOnError')\n\t\tLogging.enableBindingsLogging\t= Configuration.get('logging.enableBindingsLogging')\n\t\tLogging.queueSize\t\t\t\t= Configuration.get('logging.queueSize')\n\n\t\tLogging._configureColors(Configuration.get('cse.console.theme'))\n\n\t\tLogging.logger\t\t\t\t\t= logging.getLogger('logging')\t\t\t# general logger\n\t\tLogging.loggerConsole\t\t\t= logging.getLogger('rich')\t\t\t\t# Rich Console logger\n\t\tLogging._console\t\t\t\t= Console()\t\t\t\t\t\t\t\t# Console object\n\t\tLogging._richHandler\t\t\t= ACMERichLogHandler()\n\n\t\tLogging.setLogLevel(Configuration.get('logging.level'))\t\t\t\t\t# Assign the initial log level\n\n\t\t# Add logging queue\n\t\tLogging.queue = Queue(maxsize = Logging.queueSize)\n\t\tLogging.queueOn()\n\n\t\t# List of log handlers\n\t\tLogging._handlers = [ Logging._richHandler ]\n\t\t#Logging._handlers = [ ACMERichLogHandler() ]\n\n\t\t# Log to file only when file logging is enabled\n\t\tif Logging.enableFileLogging:\n\t\t\tfrom ..services import CSE as CSE\n\n\t\t\tlogpath = Configuration.get('logging.path')\n\t\t\tos.makedirs(logpath, exist_ok = True)# create log directory if necessary\n\t\t\tlogfile = f'{logpath}/cse-{CSE.cseType.name}.log'\n\t\t\tlogfp = logging.handlers.RotatingFileHandler(logfile,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t maxBytes = Configuration.get('logging.size'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t backupCount = Configuration.get('logging.count'))\n\t\t\tlogfp.setLevel(Logging.logLevel)\n\t\t\tlogfp.setFormatter(logging.Formatter('%(levelname)s %(asctime)s %(message)s'))\n\t\t\tLogging.logger.addHandler(logfp) \n\t\t\tLogging._handlers.append(logfp)\n\n\t\t# config the logging system\n\t\tlogging.basicConfig(level = Logging.logLevel, format = '%(message)s', datefmt = '[%X]', handlers = Logging._handlers)\n\n\t\t# Start worker to handle logs in the background\n\t\tfrom ..helpers.BackgroundWorker import BackgroundWorkerPool\n\t\tLogging._logWorker = BackgroundWorkerPool.newActor(Logging.loggingActor, name = 'loggingWorker', ignoreException = True)\n\t\tLogging._logWorker.start()\t# Yes, this could be in one line but the _logworker attribute may not be assigned yet before the \n\t\t\t\t\t\t\t\t\t# actor callback is executed, and this might result in a None exception\n\n\t\t# React on config update. Only assig if it hasn't assigned before\n\t\tfrom ..services import CSE\n\t\tif not CSE.event.hasHandler(CSE.event.configUpdate, Logging.configUpdate):\t\t# type: ignore [attr-defined]\n\t\t\tCSE.event.addHandler(CSE.event.configUpdate, Logging.configUpdate)\t\t\t# type: ignore", "def log_setup(self):\n # Logger initialisation\n logger = logging.getLogger(self.app_name)\n logger.setLevel(logging.DEBUG)\n\n # Creating console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # Creating formatter\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n\n # Adding formatter to ch\n ch.setFormatter(formatter)\n\n # Adding ch to logger\n logger.addHandler(ch)\n\n # Setting the Logger Level (INFO)\n logger.setLevel(logging.INFO)\n\n return logger", "def __init__(self):\n self.config = configs.Configuration()\n self.log = logger.CustomLogger(__name__).get_logger()\n self.output_dir = self.config.getConfigValue('OUTPUT_DIR')\n self.s3_directory = self.config.getConfigValue('S3_FILE_PATH_TRANSFORM')", "def setupLogging(self):\n\t\ttry:\n\t\t\tself.logger = logging.getLogger(__name__)\n\t\t\thandler = RotatingFileHandler(self.logFile, maxBytes=500000, backupCount=5)\n\t\t\tformat = \"%(asctime)s %(levelname)-8s %(message)s\"\n\t\t\thandler.setFormatter(logging.Formatter(format))\n\t\t\thandler.setLevel(logging.INFO)\n\t\t\tself.logger.addHandler(handler)\n\t\t\tself.logger.setLevel(logging.INFO)\n\t\texcept Exception as err:\n\t\t\terrorStr = 'Error initializing log file, ',err\n\t\t\tprint(errorStr)\n\t\t\texit(1)", "def __init__(self,\r\n default_path = None,\r\n default_level = None,\r\n logging_dir = None,\r\n log_file = None,\r\n log_file_dir = None,\r\n log_conf_full = None\r\n ):\r\n self.logger_is_set = False\r\n\r\n '''\r\n Get ready to setup everything.\r\n TO DO: read from structure is badly needed. \r\n '''\r\n self.default_path = default_path\r\n self.default_level = default_level\r\n self.logging_dir = logging_dir\r\n self.log_file = log_file\r\n self.log_file_dir = log_file_dir\r\n self.log_conf_full = log_conf_full\r\n\r\n\r\n\r\n self.setup_logging(self.default_path,\r\n self.default_level,\r\n self.logging_dir,\r\n self.log_file,\r\n self.log_file_dir,\r\n self.log_conf_full\r\n )", "def _setup_default_logger(self):\n #print(f\"setup default logger is called by {self}\")\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter(\n '%(process)d-%(levelname)s-%(asctime)s.%(msecs)02d-%(message)s',\n datefmt='%Y-%m-%d,%H:%M:%S'))\n self.logger.addHandler(stream_handler)\n self.logger.propagate = True # don't propagate to the root logger! ", "def initialize_log():\n logging.basicConfig(\n format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S',\n )", "def init_logger():\n logging.basicConfig(\n stream=sys.stdout,\n level=logging.INFO,\n format='%(asctime)s.%(msecs)03d %(name)s:%(lineno)d %(levelname)s %(message)s',\n datefmt='%m-%d %H:%M:%S',\n )", "def __init__(self):\n # Hardware initialization\n gpio.init()\n # Logging\n self._logger = logging.getLogger(' '.join([__name__, __version__]))\n self._logger.debug(\n 'Instance of %s created: %s',\n self.__class__.__name__,\n str(self)\n )", "def __init__(self, console_output, console_stream):\n self.console_output = console_output\n self.console_stream = console_stream", "def __init__(self):\n\n self.Connection = None\n self.logger = LogFactory().getLibLogger()", "def init_logging():\n\n logger = logging.getLogger()\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s %(levelname)-8s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)", "def __init__(self):\n super(ForceBalanceTestResult,self).__init__()\n self.logger = forcebalance.output.getLogger('forcebalance.test.results')", "def init() -> None:\n log_format = logging.Formatter(\"%(levelname)s || %(name)s || %(asctime)s || %(message)s\")\n\n log_file = Path(\"logs\", \"rl_snake.log\")\n log_file.parent.mkdir(exist_ok=True)\n\n file_handler = handlers.RotatingFileHandler(\n log_file,\n maxBytes=3000000,\n backupCount=5\n )\n file_handler.setFormatter(log_format)\n\n root_logger = logging.getLogger()\n root_logger.addHandler(file_handler)\n root_logger.setLevel(logging.DEBUG if constants.Misc.debug else logging.INFO)\n\n root_logger.info(\"Root logger initilised\")", "def __init__(self, logger=logging.getLogger(\"dummy\")):\n super(ShowMap, self).__init__()\n self.logger = logger", "def init(self) -> None:\n logger.debug(f\"[nbtutorial]: Outdir is: {self.outdir}\")", "def configure(cls):\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n logger_handler = logging.StreamHandler()\n logger.addHandler(logger_handler)\n logger_handler.setFormatter(logging.Formatter('%(message)s'))\n cls.logger = logger", "def _configure_logging(self):\n pass", "def _configure_logging(self):\r\n self._logger = logging.getLogger('AWSIoTPythonSDK.core')\r\n self._logger.setLevel(logging.ERROR)\r\n self._streamHandler = logging.StreamHandler()\r\n self._formatter = logging.Formatter(\r\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n self._streamHandler.setFormatter(self._formatter)\r\n self._logger.addHandler(self._streamHandler)", "def __init__(self, log_dir):\n self.writer = SummaryWriter(log_dir)", "def __init_logging(self):\n\n logger = logging.getLogger('__name__')\n if os.path.exists(constants.LOG_FILE):\n logger.setLevel(logging.DEBUG)\n logger_file_handler = logging.FileHandler(constants.LOG_FILE)\n logger_formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')\n logger_file_handler.setFormatter(logger_formatter)\n logger.addHandler(logger_file_handler)\n else:\n logger.disabled = True", "def init_logging():\n logger.setLevel(logging.DEBUG)\n # set a common log format\n logFormatter = logging.Formatter(\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\")\n # setup our rotating file handler and assign our common formatter to it\n rotating_file_handler = RotatingFileHandler('my_log.log', maxBytes=200000, backupCount=10)\n rotating_file_handler.setFormatter(logFormatter)\n logger.addHandler(rotating_file_handler)\n \n if DEBUG:\n # print to stdout if we are debugging\n stream_handler = logging.StreamHandler(sys.stdout)\n stream_handler.setFormatter(logFormatter)\n logger.addHandler(stream_handler)", "def __add_logger(self):\n #FIXME: adapt to the settings that are proper for you\n self.__logger = logging.getLogger('lib-autopilot')\n self.__logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n self.__logger.addHandler(ch) \n # TODO: CHANGE from Console to file handler\n # fh = logging.FileHandler('lib-autopilot.log')\n # fh.setLevel(logging.DEBUG)\n #fh.setFormatter(formatter)\n #self.__logger.addHandler(fh)", "def init_logger():\n lformat = \"%(asctime)s [%(levelname)-5.5s] [%(name)s] [%(threadName)-12.12s] %(message)s\"\n\n logging.basicConfig(\n level=logging.INFO,\n format=lformat,\n )\n\n file_handler = handlers.RotatingFileHandler(\n \"{0}/{1}.log\".format('.', 'meta-meta-hive'),\n maxBytes=(50*1024*1024),\n backupCount=7\n )\n file_handler.setFormatter(logging.Formatter(lformat))\n logging.getLogger().addHandler(file_handler)\n return", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def __init__(self):\n super(GithubCollector, self).__init__()\n config_file = ('collectors.cfg')\n log_file = self.config['Github']['log_file']\n logging.config.fileConfig(config_file,\n defaults={'GithubCollector': log_file}\n )\n self.logger = logging.getLogger('GithubCollector')\n self.elasticsearch = Elasticsearch(['localhost:9200'])\n self.redis = redis.Redis(host='127.0.0.1', port=6379, password='')\n self.timestamp = datetime.date.today().isoformat()", "def __init__(self, logname, loglevel, logger):\n\n self.logger = logging.getLogger(logger)\n self.logger.setLevel(loglevel)\n\n fh = logging.FileHandler(logname)\n fh.setLevel(loglevel)\n\n # ch = logging.StreamHandler()\n # ch.setLevel(logging.DEBUG)\n\n formatter = self.format_dict[int(loglevel)]\n fh.setFormatter(formatter)\n # ch.setFormatter(formatter)\n\n self.logger.addHandler(fh)\n # self.logger.addHandler(ch)", "def setup_logging(self):\n console_handler = logging.StreamHandler()\n request_logging.assign_request_filter(console_handler,\n self.additional_fields)\n logging.basicConfig(level=self.level,\n format=self.format_string,\n handlers=[console_handler])\n for handler in logging.root.handlers:\n handler.setFormatter(RedactionFormatter(handler.formatter))\n logger = logging.getLogger(__name__)\n logger.info('Established logging defaults')\n self._setup_log_levels()", "def __init__(self, params):\n self.logger = logging.getLogger(\"simple\")\n self.params = params", "def logger(self):\n pass", "def __init__(self):\r\n super(LogParser, self).__init__([CmdQueryHandler(),\r\n UpdateQueryHandler(),\r\n StandardQueryHandler(),\r\n TimeLineHandler()])", "def __init__(self):\n self.logger = logging.getLogger(__name__)\n self.logger.debug(\"Initializing Services for module - %s\"\n %(ENCRYPT_DECRYPT_MODULE_NAME)\n )", "def _setup_logger():\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n\n log_handle = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n \"[%(levelname)s] (%(asctime)s) - %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\"\n )\n log_handle.setFormatter(formatter)\n root.addHandler(log_handle)\n\n logging.info(\"Initializing snakes\")", "def init_logs() -> None:\n logging.basicConfig(\n filename=\"logs.txt\",\n filemode=\"w\",\n format=\"%(asctime)s:%(levelname)s:%(filename)s - %(message)s\",\n level=logging.ERROR,\n )\n\n formatter = logging.Formatter(\n \"%(asctime)s:%(levelname)s:%(filename)s - %(message)s\"\n )\n\n global logger\n logger = logging.getLogger(__name__)\n\n # simlogger = logging.getLogger(\"netsquid\")\n # simlogger.setLevel(logging.DEBUG)\n # fhandler = logging.FileHandler(\"simlogs.txt\", mode=\"w\")\n # fhandler.setFormatter(formatter)\n # simlogger.addHandler(fhandler)\n\n # shandler = logging.StreamHandler(stream=sys.stdout)\n # shandler.setLevel(logging.ERROR)\n # shandler.setFormatter(formatter)\n # simlogger.addHandler(shandler)", "def _set_output_file(self):\n dictConfig(self.DEFAULT_LOGGING)", "def __init__(self, level, general_log_path, outputs_folder):\n self.log_level = level\n\n # self.general_log_file = general_log_path.open('w')\n self.general_log_file = GCOpen(general_log_path, 'w')\n self.general_log_file.open()\n\n self.file_outputs_dir = outputs_folder / 'output_files'\n # self.file_outputs_dir.mkdir(exist_ok=True)\n\n exp_name = str(outputs_folder).split('/')[-1]\n\n self.summary_writer = SummaryWriter(log_dir=str(TEMP_FOLDER),\n filename_suffix='.' + exp_name)\n tf_filename = find_tf_event(exp_name)\n self.sw_local_path = Path(TEMP_FOLDER) / tf_filename\n self.sw_gc_path = outputs_folder / tf_filename\n\n self.log(\"Starting new experiment at \" +\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n self.log(\"User: \" + getpass.getuser())\n self.log(\"Host: \" + socket.gethostname())\n\n Logger.unique_logger = self", "def init_logs(self):\n\n handler = logging.FileHandler(self.app.config['LOG'])\n handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))\n self.app.logger.addHandler(handler)\n if self.app.config.get(\"LOG_LEVEL\") == \"DEBUG\":\n self.app.logger.setLevel(logging.DEBUG)\n elif self.app.config.get(\"LOG_LEVEL\") == \"WARN\":\n self.app.logger.setLevel(logging.WARN)\n else:\n self.app.logger.setLevel(logging.INFO)\n self.app.logger.info('Startup with log: %s' % self.app.config['LOG'])", "def __init__(self):\n super(StdoutWriter, self).__init__()", "def __init__(self):\n super(Log, self).__init__(namespace=\"Floranet\")", "def setup_logger():\n root = logging.getLogger()\n root.setLevel(LOGGING_LEVEL)\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(LOGGING_LEVEL)\n ch.setFormatter(formatter)\n root.addHandler(ch)", "def __init__(self, root_path=ROOT_PATH):\n self.root_path = root_path\n self.logger = Logger(self.__class__.__name__, self.root_path)\n self.log = self.logger.get_log()\n self.instance = None\n self.visualizers = []\n self.subprocess = {}", "def __init__(self, queue):\n logging.Handler.__init__(self)\n self.queue = queue", "def __init__(self, logger=logging.getLogger(\"dummy\")):\n super(OperatorObserver, self).__init__()\n self.logger = logger", "def log_init(self):\n\t\tlayer_id = \"Data\"\n\t\tself.graph[layer_id] = layer_id\n\t\tself.bottoms[layer_id] = None\n\t\tself.output_shape[layer_id] = \"\"\n\t\tself.cur_id = layer_id\n\t\tself.tmp_list = []", "def setup_logger(self):\n setup_logger(logger, 'mayavi.log', mode=self.log_mode)", "def __init__(self, terminal=None):\n # create logger\n self.logger = logging.getLogger('PlaylistMaker')\n self.logger.setLevel(logging.DEBUG)\n \n # create file handler which logs even debug messages\n fh = logging.FileHandler('playlistMaker.log')\n fh.setLevel(LogLevel.INFO)\n\n # create console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(LogLevel.DEBUG)\n\n # create formatter\n formatterScreen = logging.Formatter('%(message)s')\n formatterFile = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # add formatter to ch\n ch.setFormatter(formatterScreen)\n fh.setFormatter(formatterFile)\n\n # add ch to logger\n self.logger.addHandler(ch)\n self.logger.addHandler(fh)\n \n self.terminal = None\n if terminal is not None:\n self.terminal = terminal", "def __init__(self):\n self.timestamp = datetime.datetime.now().strftime(\n '%Y-%m-%d-%H%M%S%f')\n self.logfile_template = '{0}-{{0}}.log'.format(self._prog_name)\n self.default_logfile_name = self.logfile_template.format(\n self.timestamp)\n self.make_cli_parser()", "def init_logging(self, log_level):\n self.logger = logging.getLogger(__name__)\n self.logger.setLevel(log_level)\n formatter = logging.Formatter('%(asctime)s %(name)s [%(levelname)s] '\n '%(message)s')\n ch = logging.StreamHandler()\n ch.setLevel(log_level)\n ch.setFormatter(formatter)\n self.logger.addHandler(ch)\n self.logger.debug('logging initialized')", "def initialize_logging():\n\n print 'Setting up logging...'\n\n log_level = app.config['LOGGING_LEVEL']\n # Set up default logging for submodules to use STDOUT\n # datefmt='%m/%d/%Y %I:%M:%S %p'\n fmt = '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'\n logging.basicConfig(stream=sys.stdout, level=log_level, format=fmt)\n\n # Make a new log handler that uses STDOUT\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(fmt))\n handler.setLevel(log_level)\n\n # Remove the Flask default handlers and use our own\n handler_list = list(app.logger.handlers)\n\n for log_handler in handler_list:\n app.logger.removeHandler(log_handler)\n\n app.logger.addHandler(handler)\n app.logger.setLevel(log_level)\n app.logger.info('Logging handler established')", "def __init__(self, api_path=None, log_path=None, log_level=\"DEBUG\"):\n\n # Construct the log path. \n if log_path:\n self.log_path = log_path\n else:\n defaultlog_path = \"~/Spirent/CTA/Logs/\"\n\n now = datetime.datetime.now()\n defaultlog_path += now.strftime(\"%Y-%m-%d-%H-%M-%S\")\n defaultlog_path += \"_PID\"\n defaultlog_path += str(os.getpid())\n defaultlog_path = os.path.expanduser(defaultlog_path)\n \n # The environment variable overwrites the default path. \n self.log_path = os.getenv(\"CTA_LOG_OUTPUT_DIRECTORY\", defaultlog_path) \n\n self.log_path = os.path.abspath(self.log_path)\n self.logfile = os.path.join(self.log_path, \"cta_python.log\") \n\n if not os.path.exists(self.log_path):\n os.makedirs(self.log_path)\n\n # NOTE: Consider limiting the number of log directories that are created.\n # It would mean deleting older directories.\n\n #16/05/18 11:03:53.717 INFO 3078268608 - user.scripting - stc::get automationoptions -suppressTclErrors\n #16/05/18 11:03:53.717 INFO 3078268608 - user.scripting - return false\n #2016-05-19 14:05:56,382 UserID =mjefferson\n #2016-05-19 14:05:56,382 Log Level=INFO\n\n if log_level == \"CRITICAL\":\n log_level = logging.CRITICAL\n elif log_level == \"ERROR\":\n log_level = logging.ERROR\n elif log_level == \"WARNING\":\n log_level = logging.WARNING\n elif log_level == \"INFO\": \n log_level = logging.INFO\n else:\n # DEBUG is the default log level.\n log_level = logging.DEBUG \n \n logging.basicConfig(filename=self.logfile, filemode=\"w\", level=log_level, format=\"%(asctime)s %(levelname)s %(message)s\")\n #logging.Formatter(fmt='%(asctime)s.%(msecs)03d',datefmt='%Y/%m/%d %H:%M:%S')\n # Add timestamps to each log message.\n #logging.basicConfig()\n # The logger is now ready. \n\n logging.info(\"Spirent TestCenter Conformance Application Python API is starting up...\")\n logging.info(\"OS Type = \" + os.name)\n logging.info(\"API Path = \" + api_path)\n logging.info(\"UserID = \" + getpass.getuser())\n logging.info(\"Log Level = \" + logging.getLevelName(log_level)) \n logging.info(\"Current Path = \" + os.path.abspath(os.getcwd())) \n logging.info(\"Log Path = \" + self.log_path)\n\n # Instantiate the Tcl interpreter.\n self.tcl = Tcl()\n\n self.tcl.eval(\"lappend ::auto_path {\" + api_path + \"}\")\n\n logging.info(\"Tcl Version = \" + self.tcl.eval(\"info patchlevel\"))\n logging.info(\"Tcl ::auto_path = \" + self.tcl.eval('set ::auto_path'))\n logging.info(\"Loading the Spirent TestCenter Conformance Application in the Tcl interpreter...\")\n self.Exec(\"package require SpirentTestCenterConformance\")\n\n return", "def initialize(self, context):\n self.logger = None\n pass", "def init_logging():\n global logger\n logger = logging.getLogger('autogen_quartus')", "def __init__(self, name, command, output, events,\n restart_process, repeats_output_when_opened):\n super(ProcessLogger, self).__init__()\n self.name = name\n self.command = command\n self.restart_process = restart_process\n self.repeats_output_when_opened = repeats_output_when_opened\n self.process = None\n self.lock = threading.Lock()\n self.looking = False\n\n # Compile the list of regexes that we're supposed to be looking for.\n self.events = []\n for event in events:\n self.events.append(ProcessLogger.EventScanner(event.name, self.name,\n event.regex))\n\n if output:\n stress_test_common.MakeDirsIfNeeded(os.path.dirname(output))\n self.output_fp = open(output, \"w\", encoding=\"utf-8\")\n logging.info(\"Logging device info to %s\", output)\n else:\n self.output_fp = None", "def setup_logging(self) -> None:\n logger.setup_logging(self.settings)\n base_format = self.settings.core.logging_format\n base_datefmt = self.settings.core.logging_datefmt\n\n # configure channel logging if required by configuration\n if self.settings.core.logging_channel:\n channel_level = self.settings.core.logging_channel_level\n channel_format = self.settings.core.logging_channel_format or base_format\n channel_datefmt = self.settings.core.logging_channel_datefmt or base_datefmt\n channel_params = {}\n if channel_format:\n channel_params['fmt'] = channel_format\n if channel_datefmt:\n channel_params['datefmt'] = channel_datefmt\n formatter = logger.ChannelOutputFormatter(**channel_params)\n handler = logger.IrcLoggingHandler(self, channel_level)\n handler.setFormatter(formatter)\n\n # set channel handler to `sopel` logger\n LOGGER = logging.getLogger('sopel')\n LOGGER.addHandler(handler)", "def init_logging(output_dir, exp):\n logging.basicConfig(level=logging.DEBUG,\n format=\"%(asctime)s %(message)s\",\n datefmt=\"%Y%m%d-%H:%M:%S\",\n filename=os.path.join(output_dir, str(exp) + \".log\"),\n filemode=\"w\")\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n logging.getLogger(\"\").addHandler(console)\n return logging", "def __init__(self):\n self.import_config()\n # Set up configuration\n\n # Register exit handler\n atexit.register(self.exit_handler)\n\n # Set up logging\n self.logger = logging.getLogger('unisonctrl')\n self.logger.setLevel(logging.INFO)\n\n # Set up main log file logging\n logFileFormatter = logging.Formatter(\n fmt='[%(asctime)-s] %(levelname)-9s : %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p'\n )\n\n # Size based log rotation\n if (self.config['rotate_logs'] == \"size\"):\n logfileHandler = logging.handlers.RotatingFileHandler(\n self.config['unisonctrl_log_dir'] + os.sep + 'unisonctrl.log',\n # maxBytes=50000000, # 50mb\n maxBytes=5000, # 50mb\n backupCount=20\n )\n\n # Timed log rotation\n elif (self.config['rotate_logs'] == \"time\"):\n logfileHandler = logging.handlers.TimedRotatingFileHandler(\n self.config['unisonctrl_log_dir'] + os.sep + 'unisonctrl.log',\n when=\"midnight\",\n backupCount=14, # Keep past 14 days\n )\n\n # No log rotation\n elif (self.config['rotate_logs'] == \"off\"):\n logfileHandler = logging.FileHandler()\n\n else:\n logfileHandler = logging.FileHandler()\n\n logfileHandler.setLevel(logging.DEBUG)\n logfileHandler.setFormatter(logFileFormatter)\n self.logger.addHandler(logfileHandler)\n\n # Send logs to console when running\n consoleFormatter = logging.Formatter('[%(asctime)-22s] %(levelname)s : %(message)s')\n consoleHandler = logging.StreamHandler()\n consoleHandler.setLevel(logging.INFO)\n consoleHandler.setFormatter(consoleFormatter)\n self.logger.addHandler(consoleHandler)\n\n # Disabling debugging on the storage layer, it's no longer needed\n self.data_storage = DataStorage(False, self.config)\n\n self.logger.info(\"UnisonCTRL Starting\")\n\n # Clean up dead processes to ensure data files are in an expected state\n self.cleanup_dead_processes()", "def __init__(self):\n # The logging object. \n # Example: log.info(f\"Current value of var: {my_var}\")", "def __init__(self, queue):\r\n logging.Handler.__init__(self)\r\n self.queue = queue", "def __init__(self):\n self._logger = logging.getLogger(__name__)\n self.step_name = \"OpenFDA\"" ]
[ "0.7915068", "0.78774565", "0.7758291", "0.7710645", "0.76501584", "0.76135844", "0.75693744", "0.7531081", "0.7513416", "0.7488386", "0.74827266", "0.74706954", "0.7453568", "0.7426138", "0.7380624", "0.73672324", "0.7357756", "0.7321987", "0.73012114", "0.7299283", "0.72518986", "0.72406065", "0.7221507", "0.72059256", "0.7200522", "0.71985316", "0.71796936", "0.7149724", "0.71495193", "0.71470594", "0.7146786", "0.71448326", "0.7125203", "0.7112651", "0.70930326", "0.70645976", "0.7058028", "0.69857466", "0.6980584", "0.6978113", "0.6971289", "0.6955229", "0.6951592", "0.69463265", "0.6900452", "0.68680835", "0.6863652", "0.6861857", "0.6854631", "0.6854477", "0.68392444", "0.6837403", "0.68299794", "0.68294764", "0.68081564", "0.6790303", "0.6779243", "0.6778754", "0.677595", "0.6772215", "0.675525", "0.6746338", "0.67371213", "0.6727247", "0.6724529", "0.6708715", "0.6707582", "0.6703703", "0.6701408", "0.6697723", "0.6694176", "0.6693873", "0.669279", "0.66854376", "0.667947", "0.66734195", "0.6667325", "0.66605574", "0.6657288", "0.6643959", "0.6643009", "0.66429627", "0.6638403", "0.6625209", "0.6619832", "0.66177624", "0.66175514", "0.66101444", "0.6608345", "0.66071296", "0.6600468", "0.6597295", "0.6589709", "0.65697515", "0.6567229", "0.65643567", "0.6550911", "0.6547321", "0.65400237", "0.6534181", "0.65270257" ]
0.0
-1
Write or stdout input messages in colored(if defined). Create the file if not already present.
def write(self, string): if self.out is not None: if self.first_write: self.first_write = False string = "\r\n" + string if self.color is not None: self.out.write(colored(string, self.color)) else: self.out.write(string) current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) # check for the split case if ( len(self.parent.log) > 1 and self.parent.log[-1] == "\r" and string[0] == "\n" ): string = f"\n{current_time} {string[1:]}" to_log = re.sub("\r\n", f"\r\n{current_time} ", string) self.parent.log += to_log if hasattr(self.parent, "test_to_log"): self.parent.test_to_log.log += re.sub( r"\r\n\[", f"\r\n{self.parent.test_prefix}: [", to_log )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def color_print(*args, **kwargs):\n file = kwargs.get('file', sys.stdout)\n\n end = kwargs.get('end', '\\n')\n\n write = file.write\n if file.isatty():\n for i in range(0, len(args), 2):\n msg = args[i]\n if i + 1 == len(args):\n color = ''\n else:\n color = args[i + 1]\n\n if color:\n msg = _color_text(msg, color)\n\n # Some file objects support writing unicode sensibly on some Python\n # versions; if this fails try creating a writer using the locale's\n # preferred encoding. If that fails too give up.\n if not PY3 and isinstance(msg, bytes):\n msg = _decode_preferred_encoding(msg)\n\n write = _write_with_fallback(msg, write, file)\n\n write(end)\n else:\n for i in range(0, len(args), 2):\n msg = args[i]\n if not PY3 and isinstance(msg, bytes):\n # Support decoding bytes to unicode on Python 2; use the\n # preferred encoding for the locale (which is *sometimes*\n # sensible)\n msg = _decode_preferred_encoding(msg)\n write(msg)\n write(end)", "def style_output(msg='{}'):\n green_code = '\\033[0;32m'\n return text_color(msg, green_code)", "def create_output(self, messages):", "def write(text, output_file=None, fg=None, bg=None): # pylint: disable=unused-argument\n if output_file is None:\n output_file = sys.stdout\n output_file.write(text)", "def init_writing():\n\n # This module is a quick workaround for Unicode \n # varying byte length in windows. \n win_unicode_console.enable()\n colorama.init(convert=True)", "def save_default_error(name):\n name_output = colored(name, attrs=['bold'])\n output_1 = colored(' - Error: Version name ', 'red')\n output_2 = colored(' is not allowed\\n', 'red')\n return output_1 + name_output + output_2", "def add_color_emit_ansi(fn):\n def new(*args):\n \"\"\"Method overload.\"\"\"\n if len(args) == 2:\n new_args = (args[0], copy(args[1]))\n else:\n new_args = (args[0], copy(args[1]), args[2:])\n if hasattr(args[0], 'baseFilename'):\n return fn(*args)\n levelno = new_args[1].levelno\n if levelno >= 50:\n color = '\\x1b[31;5;7m\\n ' # blinking red with black\n elif levelno >= 40:\n color = '\\x1b[31m' # red\n elif levelno >= 30:\n color = '\\x1b[33m' # yellow\n elif levelno >= 20:\n color = '\\x1b[32m' # green\n elif levelno >= 10:\n color = '\\x1b[35m' # pink\n else:\n color = '\\x1b[0m' # normal\n try:\n new_args[1].msg = color + str(new_args[1].msg) + ' \\x1b[0m'\n except Exception as reason:\n print(reason) # Do not use log here.\n return fn(*new_args)\n return new", "def print_green(msg: str = None) -> None:\n if msg is None:\n raise Exception(\"msg was not defined\")\n\n print(Fore.GREEN + msg)\n print(Style.RESET_ALL + \"\", end=\"\")", "def color_print(message, color, newline='\\n'):\n sys.stderr.write('%s%s%s%s' % (color, message, ANSI_NORMAL, newline))", "def file_exists_error(pth):\n\n output_1 = colored(' - Error: File already exists\\n', 'red')\n output_2 = path(pth)\n return output_1 + output_2", "def make_logger(name=str(os.getpid())):\n if not sys.platform.startswith(\"win\") and sys.stderr.isatty():\n def add_color_emit_ansi(fn):\n \"\"\"Add methods we need to the class.\"\"\"\n def new(*args):\n \"\"\"Method overload.\"\"\"\n if len(args) == 2:\n new_args = (args[0], copy(args[1]))\n else:\n new_args = (args[0], copy(args[1]), args[2:])\n if hasattr(args[0], 'baseFilename'):\n return fn(*args)\n levelno = new_args[1].levelno\n if levelno >= 50:\n color = '\\x1b[31;5;7m\\n ' # blinking red with black\n elif levelno >= 40:\n color = '\\x1b[31m' # red\n elif levelno >= 30:\n color = '\\x1b[33m' # yellow\n elif levelno >= 20:\n color = '\\x1b[32m' # green\n elif levelno >= 10:\n color = '\\x1b[35m' # pink\n else:\n color = '\\x1b[0m' # normal\n try:\n new_args[1].msg = color + str(new_args[1].msg) + ' \\x1b[0m'\n except Exception as reason:\n print(reason) # Do not use log here.\n return fn(*new_args)\n return new\n log.StreamHandler.emit = add_color_emit_ansi(log.StreamHandler.emit)\n log_file = os.path.join(gettempdir(), str(name).lower().strip() + \".log\")\n log.basicConfig(level=-1, filemode=\"w\", filename=log_file)\n log.getLogger().addHandler(log.StreamHandler(sys.stderr))\n adrs = \"/dev/log\" if sys.platform.startswith(\"lin\") else \"/var/run/syslog\"\n try:\n handler = log.handlers.SysLogHandler(address=adrs)\n except:\n log.debug(\"Unix SysLog Server not found, ignored Logging to SysLog.\")\n else:\n log.getLogger().addHandler(handler)\n log.debug(\"Logger created with Log file at: {0}.\".format(log_file))\n return log", "def save_file_error(pth):\n\n output_1 = colored(' - Error: Failed to save file\\n', 'red')\n output_2 = path(pth)\n return output_1 + output_2", "def color_style():\n if (sys.platform == 'win32' or sys.platform == 'Pocket PC'\n or sys.platform.startswith('java') or not sys.stdout.isatty()):\n return no_style()\n class dummy: pass\n style = dummy()\n style.ERROR = termcolors.make_style(fg='red', opts=('bold',))\n style.ERROR_OUTPUT = termcolors.make_style(fg='red', opts=('bold',))\n style.NOTICE = termcolors.make_style(fg='red')\n style.SQL_FIELD = termcolors.make_style(fg='green', opts=('bold',))\n style.SQL_COLTYPE = termcolors.make_style(fg='green')\n style.SQL_KEYWORD = termcolors.make_style(fg='yellow')\n style.SQL_TABLE = termcolors.make_style(opts=('bold',))\n return style", "def create_status_file(self, contents):\r\n with open(settings.STATUS_MESSAGE_PATH, 'w') as f:\r\n f.write(contents)", "def console(self, msg, color):\r\n if self.__isInit != True:\r\n return", "def write(self, *params):\n if system() == 'Windows':\n windll.kernel32.SetConsoleTextAttribute(windll.kernel32.GetStdHandle(-11),\n self.COLORS[CONSOLE_MESSAGES[params[0]][0]])\n getattr(self._logger, CONSOLE_MESSAGES[params[0]][0].lower())(CONSOLE_MESSAGES[params[0]][1].\n format(*params[1:]))\n if system() == 'Windows':\n windll.kernel32.SetConsoleTextAttribute(windll.kernel32.GetStdHandle(-11), self.COLORS['DEFAULT'])", "def color_print(\n *messages,\n default_color=Color.NORMAL,\n sep=' ',\n end='\\n',\n file=stdout,\n flush=False,\n):\n\n string = []\n print_colors = file.isatty()\n if print_colors:\n string.append(str(default_color))\n\n messages_iter = iter(messages)\n # Print first message and deal with 'sep' later\n first = next(messages_iter)\n is_color = isinstance(first, Color)\n if is_color and print_colors or not is_color:\n string.append(str(first))\n\n # Print sep only when message is a string\n for m in messages_iter:\n is_color = isinstance(m, Color)\n if is_color and print_colors:\n string.append(str(m))\n elif not is_color:\n string.append(f'{sep}{m}')\n\n # Back to normal\n if print_colors:\n string.append(str(Color.NORMAL))\n\n print(''.join(string), end=end, flush=flush, file=file)", "def writec(text, color='black', style='normal'):\n\n sys.stdout.write(strc(text, color, style))", "def write(self, msg):\n\n self.clear()\n if not msg.endswith(\"\\n\"):\n sys.stderr.write(msg+\"\\n\")\n else:\n sys.stderr.write(msg)\n self.draw()", "def open_file_error(pth):\n\n output_1 = colored(' - Error: Failed to open file\\n', 'red')\n output_2 = path(pth)\n return output_1 + output_2", "def print_red(msg: str = None) -> None:\n if msg is None:\n raise Exception(\"msg was not defined\")\n\n print(Fore.RED + msg)\n print(Style.RESET_ALL + \"\", end=\"\")", "def log(self, message, *,\n color: List[ANSICode] or ANSICode or None = None,\n new_line=True):\n\n message = self.ansi_code(message, color)\n\n self.current_line.append(message)\n\n if new_line:\n end_char = '\\n'\n else:\n end_char = ''\n\n text = \"\".join(self.current_line)\n lim = self.__count_text(self.over_write_line,\n self.__count_text(text))\n\n if lim < len(self.over_write_line):\n text += self.over_write_line[lim:]\n self.over_write_line = text\n\n print(\"\\r\" + text, end=end_char, flush=True)\n if new_line:\n self.current_line = []\n self.over_write_line = \"\"", "def setup_log():\n\n #logging.basicConfig(filename='log.txt',filemode='a',format='%(asctime)s %(threadName)s %(filename)s %(funcName) %(lineno) %(levelname)s %(message)s', datefmt='%Y/%m/%d %H:%M:%S')\n\n #get the root logger\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n #set up logging to console for INFO and worse\n sh = colorlog.StreamHandler()\n sh.setLevel(logging.INFO)\n #sh_formatter = colorlog.Formatter(fmt='%(log_color)s%(levelname):%(asctime)s\\n%(message)s', datefmt='%H:%M:%S')\n sh_formatter = colorlog.ColoredFormatter(\n \"%(log_color)s%(levelname)-8s - %(name)-25s - %(threadName)-15s - %(asctime)s - %(cyan)s \\n %(message)s\\n\",\n datefmt=None,\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red,bg_white',\n },\n secondary_log_colors={},\n style='%'\n)\n sh.setFormatter(sh_formatter)\n\n #set up logging to file for ALL messages\n #fh = logging.FileHandler('log.txt')\n # fh = logging.handlers.TimedRotatingFileHandler('log.txt', when='midnight', interval=1, backupCount=7)\n # fh.setLevel(logging.DEBUG)\n # fh_formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d - %(threadName)s - %(filename)s.%(funcName)s.%(lineno)s - %(levelname)s\\n%(message)s\\n\\n', datefmt='%Y/%m/%d %H:%M:%S')\n # fh.setFormatter(fh_formatter)\n\n #put the handlers to use\n logger.addHandler(sh)\n # logger.addHandler(fh)", "def label(self, message, fg = None, bg = None, bold = None, blink = None):\n self.savepos()\n self.out.write(self._colorize(message, fg, bg, bold, blink))\n self.restorepos()", "def write(self, text, output_file=None, fg=None, bg=None):\n if output_file is None:\n output_file = sys.stdout\n\n text = self._ansi_wrap(text, fg, bg)\n output_file.write(text)", "def print_success_message(message):\n try:\n import colorama\n print(colorama.Fore.GREEN + message + colorama.Fore.RESET)\n except ImportError:\n print(message)", "def create_procesed_file(msg, filename, path):\n write_path_txt = os.path.join(path, filename)\n with open(write_path_txt, 'w') as file:\n file.write(str(msg))", "def color(color):\n if sys.platform == \"win32\":\n if color == \"green\":\n set_text_attr(FOREGROUND_GREEN | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"yellow\":\n set_text_attr(FOREGROUND_YELLOW | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"red\":\n set_text_attr(FOREGROUND_RED | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"blue\":\n set_text_attr(FOREGROUND_BLUE | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"reset\":\n set_text_attr(FOREGROUND_GREY | get_text_attr() & 0x0070)\n else :\n if color == \"green\":\n sys.stdout.write('\\033[92m')\n elif color == \"red\":\n sys.stdout.write('\\033[91m')\n elif color == \"blue\":\n sys.stdout.write('\\033[94m')\n elif color == \"reset\":\n sys.stdout.write('\\033[0m')", "def charcolor(message):\n try:\n print(c.clear)\n while True:\n print_colored(c.clear + c.multi + \"Hello\" + \" \" + who + \"!\")\n except KeyboardInterrupt:\n exit()", "def create_css():\r\n background_color, font, paragraph_color, head_color = prompt_style()\r\n style = \"\"\r\n file = open(TEMPLATE_FILE)\r\n for line in file:\r\n search = True\r\n while search is True:\r\n if \"@BACKCOLOR\" in line:\r\n line = line.split(\"@BACKCOLOR\")\r\n line = line[0] + background_color + line[1]\r\n search = True\r\n elif \"@HEADCOLOR\" in line:\r\n line = line.split(\"@HEADCOLOR\")\r\n line = line[0] + head_color + line[1]\r\n search = True\r\n elif \"@FONTSTYLE\" in line:\r\n line = line.split(\"@FONTSTYLE\")\r\n line = line[0] + font + line[1]\r\n search = True\r\n elif \"@FONTCOLOR\" in line:\r\n line = line.split(\"@FONTCOLOR\")\r\n line = line[0] + paragraph_color + line[1]\r\n search = True\r\n else:\r\n style += line\r\n search = False\r\n style += '\\n'\r\n file.close()\r\n return style", "def redtext(mesg):\n if sys.platform == 'win32':\n import win32console\n handle = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)\n reset = handle.GetConsoleScreenBufferInfo()['Attributes']\n handle.SetConsoleTextAttribute(12)\n sys.stdout.writelines(mesg+'\\n')\n handle.SetConsoleTextAttribute(reset)\n else:\n sys.stdout.write('\\033[91m'+mesg+'\\033[0m\\n')", "def print_with_color(message: str, color: str):\n import sys\n print(color + message + constant.Color.ENDC, file=sys.stderr)", "def print_with_color(message, color):\n if color in colors:\n print(colors[color] + message + '\\x1b[0m')\n else:\n print(message)", "def __output(self,msg,status):\n status = int(status)\n if status:\n print \"%s-----------\\033[1;37;42m%s\\033[0m\" % (format(msg,\"<15\"),\"OK\")\n else:\n print \"%s***********\\033[1;37;41m%s\\033[0m\" % (format(msg,\"<15\"),\"ERROR\")", "def echo_style(message, no_color, fg='yellow'):\n if no_color:\n click.echo(message)\n else:\n click.secho(message, fg=fg)", "def create_file(name, text=None):\n\n if os.path.exists(config_tools.full_dest+name):\n print(f\"{name} уже существует, для записи текста введите его ниже, для завершения команды введите 'no': \")\n answer = input()\n if answer != \"no\":\n with open(config_tools.full_dest + name, 'a', encoding='utf-8') as fi:\n fi.write(answer)\n print(f\"В {name} успешно записан текст\")\n elif answer == \"no\":\n quit()\n else:\n with open(config_tools.full_dest + name, 'w', encoding='utf-8') as fi:\n print(f\"{name} успешно создан\")\n if text:\n fi.write(text)\n print(f\"В {name} успешно записан текст\")", "def print_yellow(msg: str = None) -> None:\n if msg is None:\n raise Exception(\"msg was not defined\")\n\n print(Fore.YELLOW + msg)\n print(Style.RESET_ALL + \"\", end=\"\")", "def style_info(msg='{}'):\n blue_code = '\\033[0;34m'\n return text_color(msg, blue_code)", "def yaml_file(yml):\n\n return colored(yml, 'cyan')", "def err(msg):\n print(colored.red(\"[ERROR]: {0}\".format(msg)))", "def write_message(f, readme_txt, outdir):\n name = f.split(\".gz\")[0].split('/')[-1]\n rf = open(outdir + \"/\" + str(name) + \"_README.txt\", \"a\")\n to_write = \" \" + str(readme_txt.pandascheck) + \" \" + str(readme_txt.samplesize) + \" \" + str(readme_txt.namecol)\n rf.write(str(f) + to_write + \"\\n\")\n rf.close()\n\n # Remove file if there is FAILED in error message\n if 'FAILED' in to_write:\n name = f.split(\".gz\")[0].split('/')[-1]\n if os.path.exists(outdir + \"/\" + name + \"_cols_edit.gz\"):\n os.remove(outdir + \"/\" + name + \"_cols_edit.gz\")", "def success():\n sys.stdout.write('%s[ pass ]%s\\n' % (colors.GREEN, colors.RESET))", "def _insertErrorMsg(self, ErrorMessage, outputFileObject):\n outputFileObject.write('<font color=\"' + AutoGrader.Const.ERROR_COLOR + '\">')\n outputFileObject.write (ErrorMessage)\n outputFileObject.write('</font>')", "def write_with_colors_win_py3(stream, outfile, flush):\n color = b'\\x1b['\n encoding = outfile.encoding\n for chunk in stream:\n if color in chunk:\n outfile.write(chunk.decode(encoding))\n else:\n outfile.buffer.write(chunk)\n if flush:\n outfile.flush()", "def afficher_message(self, message, color='black'):\n self.messages['foreground'] = color\n self.messages['text'] = message", "def info(msg):\n sys.stdout.write('%s[ INFO ]%s %s\\n' % (colors.GREEN, colors.RESET , msg))", "def write(self, msg):\n\n if _activeBar is not None:\n # The c++ logger sends spurious empty lines,\n # just gobble them up.\n if msg.strip():\n _activeBar.write(msg)\n else:\n sys.stderr.write(msg)", "def idle_writer(output, color=None):\n if isinstance(output, str):\n if color is None:\n sys.stdout.shell.write(output, \"stderr\") # noqa\n else:\n sys.stdout.shell.write(output, color) # noqa\n return\n for fragment in output:\n if isinstance(fragment, str):\n sys.stdout.shell.write(fragment, \"stderr\") # noqa\n elif len(fragment) == 2:\n sys.stdout.shell.write(fragment[0], fragment[1]) # noqa\n else:\n sys.stdout.shell.write(fragment[0], \"stderr\") # noqa", "def printToFile(self, message=''):\n self._outputFile.write(str(message) + '\\n')", "def write(self, text: str):\n if self.color:\n text_color = self.edit.textColor()\n self.edit.setTextColor(text_color)\n if self.out:\n self.out.write(text)\n self.edit.moveCursor(QtGui.QTextCursor.End)\n self.edit.insertPlainText(text)", "def error(msg):\n sys.stdout.write('%s[ ERROR ]%s %s\\n' % (colors.RED, colors.RESET, msg))", "def test_colorify(self):\n\n # pylint: disable=protected-access\n\n expected = False\n actual = self.file_instance.exists()\n\n self.assertEqual(expected, actual)\n\n # Test with a template that is not designed for colorify\n expected = self.to_print[\"basic_string\"]\n actual = Prints(None, \"Hehehe\", output_file=None, only_on_file=False)._colorify(\n self.to_print[\"basic_string\"]\n )\n\n self.assertEqual(expected, actual)\n\n # Test with a template that is designed for colorify + Status is UP\n expected = Fore.BLACK + Back.GREEN + self.to_print[\"basic_string\"]\n actual = Prints(\n [\"This is a test\", PyFunceble.STATUS.official.up],\n \"Generic\",\n output_file=None,\n only_on_file=False,\n )._colorify(self.to_print[\"basic_string\"])\n\n self.assertEqual(expected, actual)\n\n # Test with a template that is designed for colorify + Status is DOWN\n expected = Fore.BLACK + Back.RED + self.to_print[\"basic_string\"]\n actual = Prints(\n [\"This is a test\", PyFunceble.STATUS.official.down],\n \"Generic\",\n output_file=None,\n only_on_file=False,\n )._colorify(self.to_print[\"basic_string\"])\n\n self.assertEqual(expected, actual)\n\n # Test with a template that is designed for colorify + Status is\n # UNKNOWN or INVALID\n expected = Fore.BLACK + Back.CYAN + self.to_print[\"basic_string\"]\n actual = Prints(\n [\"This is a test\", PyFunceble.STATUS.official.invalid],\n \"Generic\",\n output_file=None,\n only_on_file=False,\n )._colorify(self.to_print[\"basic_string\"])\n\n self.assertEqual(expected, actual)", "def error(message='Ops, there are some error...'):\n print(colorful_text(message, Fore.RED))", "def print_illegal_color_format_screen( enteredBGColor, enteredFGColor, convertedBGColor, convertedFGColor ):\n print \"\"\n print \"Error: are the passed in colors valid?\"\n print \" - passed in background-color '\" + enteredBGColor + \"' was converted to '\" + convertedBGColor + \"'.\"\n print \" - passed in foreground-color '\" + enteredFGColor + \"' was converted to '\" + convertedFGColor + \"'.\"\n print \"\"", "def style(command, checkonly=False):\n black(command, checkonly=checkonly)\n isort(command, checkonly=checkonly)\n lint(command)\n # Only prints if doesn't exit from the above not failing out\n print(\n \"\"\"\nAll Style Checks Passed Successfully\n====================================\n\"\"\"\n )", "def print_ok_blue(message: str):\n print_with_color(message, constant.Color.OKBLUE)", "def print_ok_green(message: str):\n print_with_color(message, constant.Color.OKGREEN)", "def style_error(msg='{}'):\n red_code = '\\033[0;31m'\n return text_color(msg, red_code)", "def _print(self, message, level, color):\n if (self.level >= level):\n sys.stdout.write(color)\n try: sys.stdout.write(\"%s\\n\" % message)\n except: sys.stdout.write(encode(\"%s\\n\" % message))\n sys.stdout.write(COLOR_RESET)\n sys.stdout.flush()\n return message", "def init_logger_color():\n if os.environ.get('COLOREDLOGS_LOG_LEVEL') is None:\n os.environ['COLOREDLOGS_LOG_LEVEL'] = 'INFO'\n if os.environ.get('COLOREDLOGS_LOG_FORMAT') is None:\n os.environ['COLOREDLOGS_LOG_FORMAT'] = '%(asctime)s '\n '[%(levelname)s] %(message)s'\n if os.environ.get('COLOREDLOGS_DATE_FORMAT') is None:\n os.environ['COLOREDLOGS_LOG_DATE_FORMAT'] = '%Y-%m-%d %H:%M:%S'\n coloredlogs.install()", "def test_cli_demo_with_formatters(self):\n with CaptureOutput() as capturer:\n main('coloredlogs', '--demo-with-formatter')\n output = capturer.get_text()\n # Make sure the output contains all of the expected logging level names.\n for name in 'debug', 'info', 'error', 'critical':\n assert name.upper() in output", "def error_message(message='Ops, there are some error...'):\n print(colorful_text(message, Fore.RED))", "def text_writer(plaintext: str, output_file: Optional[Union[IO[str], Path]] = sys.stdout):\n if hasattr(output_file, \"write_text\"):\n output_file.write_text(plaintext)\n logging.info(\"File written is: \", str(output_file))\n elif hasattr(output_file, \"write\"):\n output_file.write(plaintext)\n logging.info(\"File written is: \", output_file.name)", "def create_file():\n with open(\"example.txt\", \"w\") as file:\n file.write(\"\")", "def error_logging(filename, cloud, msg):\n with open(filename, 'a') as f:\n f.write(cloud + \" \" + msg + '\\n')\n f.write('\\n')", "def success(self, message=''):\n print(colored(message, 'green'))", "def print_success(text):\n\n print(colorize(text, Colors.SUCCESS))", "def auto_color(stream=sys.stdin):\n term_name = os.environ.get(\"TERM\", \"\").lower()\n if (stream.isatty()\n and (term_name in KNOWN_TERMINAL_TYPES or \"xterm\" in term_name)):\n return VtColor()\n return NoColor()", "def __init__(self):\n self.colors = (\n 'BLACK', 'RED', 'GREEN', 'YELLOW',\n 'BLUE', 'MAGENTA', 'CYAN', 'WHITE'\n )\n\n self.disable_color = True\n\n if sys.stdout.isatty():\n self.disable_color = False", "def _write_message_files(lang, command='update'):\n BabelCLI().run(['', command, '-i', os.path.join(I18N_PATH, 'messages.pot'), '-d', I18N_PATH,\n '-l', lang])", "def log(prefix, msg):\n msg = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S [%%s] %%s\") % (prefix, msg)\n print(COLOR_MAP.get(prefix, DEFAULT) + msg + DEFAULT)", "def msg(self, msg):\n if hasattr(self.output, 'writeline'):\n self.output.writeline(msg)\n elif hasattr(self.output, 'writelines'):\n self.output.writelines(msg + \"\\n\")\n pass\n return", "def is_invalid():\n print(colored('Invalid input\\n', 'red', attrs=['bold']))", "def error(text):\n print(red(\"✘ {0}\".format(text)))\n sys.stdout.flush()", "def setup_logging_theme(handler, colors=\"light\"):\n if colors not in (\"light\", \"dark\"):\n logging.getLogger(\"delfick_logging\").warning(\n lc( \"Told to set colors to a theme we don't have\"\n , got=colors\n , have=[\"light\", \"dark\"]\n )\n )\n return\n\n # Haven't put much effort into actually working out more than just the message colour\n if colors == \"light\":\n handler._column_color['%(message)s'][logging.INFO] = ('cyan', None, False)\n else:\n handler._column_color['%(message)s'][logging.INFO] = ('blue', None, False)", "def colored_formatter(record):\n\n colours = {\n \"info\": (\"blue\", \"normal\"),\n \"debug\": (\"magenta\", \"normal\"),\n \"warning\": (\"yellow\", \"normal\"),\n \"print\": (\"green\", \"normal\"),\n \"error\": (\"red\", \"bold\"),\n }\n\n levelname = record.levelname.lower()\n\n if levelname == \"error\":\n return\n\n if levelname.lower() in colours:\n levelname_color = colours[levelname][0]\n header = color_text(\"[{}]: \".format(levelname.upper()), levelname_color)\n\n message = record.getMessage()\n\n if levelname == \"warning\":\n warning_category_groups = re.match(r\"^\\w*?(.+?Warning) (.*)\", message)\n if warning_category_groups is not None:\n warning_category, warning_text = warning_category_groups.groups()\n\n warning_category_colour = color_text(\n \"({})\".format(warning_category), \"cyan\"\n )\n message = \"{} {}\".format(\n color_text(warning_text, \"\"), warning_category_colour\n )\n\n sys.__stdout__.write(\"{}{}\\n\".format(header, message))\n sys.__stdout__.flush()\n\n return", "def _write(message: Optional[str] = None) -> None:\n if message is not None:\n stdout(\"%s\\n\" % message)\n else:\n stdout(\"\\n\")", "def output(self, msg):", "def run_file(self, value=None):\n self.save_file()\n self.p = Popen(\"./Project/myfile.py\", stdout=PIPE, stderr=PIPE)\n output, errors = self.p.communicate()\n self.my_output.delete(\"1.0\", END)\n self.my_output.insert(\"1.0\", output)\n if errors != \"\":\n print_to_log(errors)\n self.my_output.configure(fg=\"red\")\n else:\n self.my_output.configure(fg=\"white\")\n self.my_output.insert(\"1.0\", errors)", "def print_success(msg):\n colour.cprint(msg, 'success')\n sys.stdout.flush()", "def ask(message, ofile=sys.stderr, ifile=sys.stdin, style=Fore.MAGENTA,\r\n noecho=False, accept_empty=True):\r\n if noecho and ifile != sys.stdin:\r\n raise ValueError(\"noecho option implies input from stdin\")\r\n\r\n while True:\r\n with ScopedColoredStream(ofile, style, flush_on_exit=True) as stream:\r\n stream.write(message)\r\n\r\n if noecho:\r\n ans = getpass.getpass(prompt=\"\", stream=ofile)\r\n else:\r\n ans = ifile.readline().rstrip(\"\\n\\r\")\r\n\r\n if not accept_empty and not ans.strip():\r\n continue\r\n return ans", "def setup_logger(log_level, log_file=None, console_out=True):\n level = getattr(logging, log_level.upper(), None)\n if not level:\n color_print(\"Invalid log level: %s\" % log_level, \"RED\")\n sys.exit(1)\n\n # hide traceback when log level is INFO/WARNING/ERROR/CRITICAL\n if level >= logging.INFO:\n # sys.tracebacklimit = 0\n pass\n\n if(console_out==True):\n formatter = ColoredFormatter(\n #modified by zhengchun 20180607 message的颜色由log_color控制\n # u\"%(log_color)s%(bg_white)s%(levelname)-8s%(reset)s %(asctime)s - %(filename)s.%(funcName)s().%(lineno)dL %(log_color)s%(message)s\",\n u\"%(log_color)s%(levelname)-8s%(reset)s %(asctime)s - %(log_color)s%(message)s\",\n datefmt=None,\n reset=True,\n log_colors=log_colors_config\n )\n\n handler_console = logging.StreamHandler()\n handler_console.setFormatter(formatter)\n logging.root.addHandler(handler_console)\n\n if log_file:\n formatter_file = ColoredFormatter(\n u\"%(asctime)s - %(levelname)-8s - %(message)s\",\n reset=False,\n log_colors={}\n )\n\n head, tail=os.path.split(log_file)\n if head and tail and not os.path.exists(head):\n os.makedirs(head)\n handler_file = logging.FileHandler(log_file, encoding=\"utf-8\")\n handler_file.setFormatter(formatter_file)\n logging.root.addHandler(handler_file)\n\n logging.root.setLevel(level)", "def setup_quiet_build(env):\r\n # colors\r\n c = dict()\r\n c['cyan'] = '\\033[96m'\r\n c['purple'] = '\\033[95m'\r\n c['blue'] = '\\033[94m'\r\n c['bold_blue'] = '\\033[94;1m'\r\n c['green'] = '\\033[92m'\r\n c['yellow'] = '\\033[93m'\r\n c['red'] = '\\033[91m'\r\n c['magenta']= '\\033[35m'\r\n c['bold_magenta']= '\\033[35;1m'\r\n c['inverse']= '\\033[7m'\r\n c['bold'] = '\\033[1m'\r\n c['rst'] = '\\033[0m'\r\n\r\n # if the output is not a terminal, remove the c\r\n # also windows console doesn't know about ansi c seems\r\n if not sys.stdout.isatty() or re.match('^win.*', plat_id()):\r\n for key, value in c.iteritems():\r\n c[key] = ''\r\n\r\n compile_cxx_msg = '%s[CXX]%s %s$SOURCE%s' % \\\r\n (c['blue'], c['rst'], c['yellow'], c['rst'])\r\n\r\n compile_c_msg = '%s[CC]%s %s$SOURCE%s' % \\\r\n (c['cyan'], c['rst'], c['yellow'], c['rst'])\r\n\r\n compile_shared_msg = '%s[SHR]%s %s$SOURCE%s' % \\\r\n (c['bold_blue'], c['rst'], c['yellow'], c['rst'])\r\n\r\n link_program_msg = '%s[LNK exe]%s %s$TARGET%s' % \\\r\n (c['bold_magenta'], c['rst'], c['bold'] + c['yellow'] + c['inverse'], c['rst'])\r\n\r\n link_lib_msg = '%s[LIB st]%s %s$TARGET%s' % \\\r\n ('', c['rst'], c['cyan'], c['rst'])\r\n\r\n ranlib_library_msg = '%s[RANLIB]%s %s$TARGET%s' % \\\r\n ('', c['rst'], c['cyan'], c['rst'])\r\n\r\n link_shared_library_msg = '%s[LNK shr]%s %s$TARGET%s' % \\\r\n (c['bold_magenta'], c['rst'], c['bold'], c['rst'])\r\n\r\n env['CXXCOMSTR'] = compile_cxx_msg\r\n env['SHCXXCOMSTR'] = compile_shared_msg\r\n env['CCCOMSTR'] = compile_c_msg\r\n env['SHCCCOMSTR'] = compile_shared_msg\r\n env['ARCOMSTR'] = link_lib_msg\r\n env['SHLINKCOMSTR'] = link_shared_library_msg\r\n env['LINKCOMSTR'] = link_program_msg\r\n env['RANLIBCOMSTR']= ranlib_library_msg", "def main(in_file, out_file, verbose):\n if verbose:\n logging.basicConfig(\n format='%(levelname)s %(filename)s: %(message)s',\n level=logging.DEBUG\n )\n else:\n # Log info and above to console\n logging.basicConfig(\n format='%(levelname)s: %(message)s',\n level=logging.INFO\n )\n\n if not out_file:\n out_file = in_file.replace(\".less\", \".sass\")\n\n if \".less\" in out_file:\n out_file = in_file.replace(\".less\", \".sass\")\n\n with open(in_file, 'r') as file_input:\n less_string = file_input.read()\n\n sass_string = convert(less_string)\n\n with open(out_file, 'w') as file_out:\n file_out.write(sass_string)", "def render(console: Console) -> None:\n console.print(Rule(\"[bold blue]CLI File Manager\", style=\"red\"))\n console.print(Panel(\"[white]Welcome to The [bold]BETTER[/bold] File manager\\nFor help type: `help` or `h`\",\n style=\"green\"))", "def create(self, messages):\n output = self.create_output(messages)\n if output:\n output.name = self.name\n\n return output", "def ferrmsg(msg, proc=''):\r\n\tf = open(ERROR_LOG, 'a')\r\n\tf.write(\"<%s>: %s\\n\" % (proc,msg))\r\n\tf.close()", "def message(msg):\n # Let's print to console too. Can remove if requested.\n print (\"{} - {}\\n\".format(time.asctime(), msg))\n with open(LOG_FILE, 'a') as log:\n log.write(\"{} - {}\\n\".format(time.asctime(), msg))", "def create_file(file_name: str, startup_text: str) -> None:\n with open(file_name, 'w') as f:\n f.write(startup_text)", "def report(self, file=None, color=True, **kwargs):\n status = self.get_status(**kwargs)\n\n if file is None and color:\n col = self._report_colors[status]\n status = col + str(status) + self._end_color\n\n s = ' {:<50} - Status : {}'.format(self._TASK_NAME, status)\n\n file = file if file is not None else sys.stdout\n print(s, file=file)", "def pr(*args, c=None, sep=' ', end='\\n'):\n\n msg = \"\"\n cnt = 0\n for i in args:\n cnt += 1\n if c is None:\n msg += str(i) + sep\n else:\n color = get_color_from_str(c, cnt)\n if color == 'w':\n msg += WHITE.format(i) + sep\n elif color == 'r':\n msg += RED.format(i) + sep\n elif color == 'g':\n msg += GREEN.format(i) + sep\n elif color == 'y':\n msg += YELLOW.format(i) + sep\n elif color == 'b':\n msg += BLUE.format(i) + sep\n elif color == 'm':\n msg += MAGENTA.format(i) + sep\n elif color == 'c':\n msg += CYAN.format(i) + sep\n else:\n msg += str(i) + sep\n msg += end\n print(msg, sep='', end='')", "def print_failure_message(message):\n try:\n import colorama\n print(colorama.Fore.RED + message + colorama.Fore.RESET,\n file=sys.stderr)\n except ImportError:\n print(message, file=sys.stderr)", "def style(cline, write=False):\n check_arg = \"\" if write else \"--check\"\n print(\"Checking code style with Black.\")\n cline.run(f\"python3 -m black {check_arg} .\")", "def info(msg):\n print(colored.green(\"[INFO]: {0}\".format(msg)))", "def message(self):\n self.add_file_string('Message file')\n self.make_dangerous('Message file')", "def print_success_msg(msg):\n click.secho(msg, fg='green', file=sys.stdout)", "def format(self, record):\n\n\t\t# Use copy.copy - c.f. https://stackoverflow.com/a/7961390\n\t\tcolored_record = copy.copy(record)\n\n\t\tcolor = None\n\t\ttry:\n\t\t\tcolor = record.color\n\t\texcept AttributeError as e:\n\t\t\tpass\n\n\t\tif color is not None:\n\t\t\tif color is None or not color or color == \"none\":\n\t\t\t\tpass\n\t\t\telif color == \"white\":\n\t\t\t\twhite = \"\\033[37m\"\n\t\t\t\tclear = \"\\033[0;0m\"\n\t\t\t\tcolored_record.msg = \"{0:s}{1:s}{2:s}\".format(\n\t\t\t\t\twhite,\n\t\t\t\t\tcolored_record.msg,\n\t\t\t\t\tclear,\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\traise WCMIError(\"error: ConsoleFilter: unrecognized color `{0:s}'.\".format(str(color)))\n\n\t\treturn super().format(colored_record)", "def show_msg(self):\n if self.result and self.success_msg:\n print color_str('g', '\\n'.join(self.success_msg))\n elif self.result == False and self.fail_msg:\n print color_str('r', '\\n'.join(self.fail_msg))\n if self.stat_msg:\n print color_str('b', '\\n'.join(self.stat_msg))", "def _post(level, tag, message=None):\n if message == None:\n message = tag\n tag = \"hotword\"\n\n message = \"%s%s\\033[0;37;40m\" % (Log.COLOURS[level], message)\n\n logger = Log._get_logger(level, tag)\n method = getattr(logger, level)\n method(Log._message(message))", "def main(argv):\n if not scribus.haveDoc(): #do we have a doc?\n scribus.messageBox(\"csv2color\", \"No document to import colors \\n Please open one, first.\")\n sys.exit()\n else:\n filename=scribus.fileDialog(\"csv2color\", \"CSV files(*.csv *.CSV *.txt *.TXT)\")\n while os.path.isdir(filename):\n filename=scribus.fileDialog(\"csv2color\", \"CSV files(*.csv *.CSV *.txt *.TXT)\") #proper filename?\n else:\n try:\n colorlist=getColorsFromCsv(filename)\n messagestring = \"You are going to import %i colors \\n This may take a while\" % len(colorlist)\n answer = scribus.messageBox(\"csv2color\", messagestring, button1=scribus.BUTTON_OK, button2=scribus.BUTTON_CANCEL)\n if answer != scribus.BUTTON_OK:\n sys.exit()\n else:\n importColors(colorlist)\n scribus.docChanged(True)\n scribus.messageBox(\"csv2color\", \"Colors imported! \\n Thank you for using csv2color and Scribus!\")\n except:\n scribus.messageBox(\"csv2color\", \"Could not import file!\", icon=scribus.ICON_WARNING)\n sys.exit()", "def create_css():\n \n cssFile = \"tests.css\"\n cf = open(cssFile, 'w')\n cf.write(cssContents)\n cf.close()" ]
[ "0.664176", "0.58514845", "0.573148", "0.56499183", "0.5588023", "0.5570282", "0.55647844", "0.55574226", "0.5555951", "0.5462549", "0.5433804", "0.542239", "0.53665817", "0.5349737", "0.5295903", "0.5280991", "0.52719", "0.5243998", "0.52435213", "0.52247983", "0.5207164", "0.5199744", "0.51898885", "0.5183002", "0.51728386", "0.5172187", "0.51529694", "0.514009", "0.51183593", "0.5116439", "0.5089575", "0.5087736", "0.5081504", "0.5060403", "0.5040091", "0.5037478", "0.5023935", "0.5023023", "0.5010492", "0.50103194", "0.4986069", "0.49619746", "0.49593237", "0.49553144", "0.49418435", "0.4938494", "0.4936215", "0.49357668", "0.49297616", "0.49264187", "0.49216667", "0.49062216", "0.4902591", "0.4895872", "0.48940083", "0.48895627", "0.48850033", "0.48801413", "0.48758653", "0.48715487", "0.4871426", "0.48512733", "0.48430693", "0.48427528", "0.4841084", "0.4837253", "0.48360327", "0.48337865", "0.48300236", "0.48145178", "0.48053932", "0.48031652", "0.47915402", "0.47910294", "0.47862068", "0.47854212", "0.4784689", "0.47698852", "0.47624665", "0.47618103", "0.47588265", "0.47580028", "0.4757566", "0.47540197", "0.475011", "0.47499898", "0.47435886", "0.47425765", "0.4735815", "0.47305578", "0.4722455", "0.47205436", "0.47204694", "0.47194523", "0.47174472", "0.47147486", "0.471208", "0.47020176", "0.4698423", "0.46971652", "0.4694123" ]
0.0
-1
Add process time with the log messages.
def extra_log(self, string): if hasattr(self.parent, "log"): self.parent.log += f"\r\n[{time.process_time()}] " self.parent.log += string + "\r\n"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_line_in_log():\n logging.info(' ' + '-' * 60 + '\\n')", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def extra_log(self, string):\n if hasattr(self.parent, \"log\"):\n self.parent.log += \"\\r\\n[%s] \" % time.process_time()\n self.parent.log += string + \"\\r\\n\"", "def writeToLog(self,msg):\n\tlocaltime = \"%s \"%time.strftime(\"%H:%M:%S\",time.localtime())\n\tpid = \"%s \"%self.pid\n self.log.write(pid+localtime+'###### '+msg+'\\n')", "def log(self, line):\n now = datetime.datetime.now()\n time = datetime.datetime.strftime(now, '(%d %b %Y %H:%M:%S)')\n with open(self.logfile, 'a') as log:\n log.write(time + ' ' + line + '\\n')", "def log(self, msg):\n current_datetime = self.get_date_time()\n self.file.write(\"%s %s\\n\" % (current_datetime, msg))", "def log(self, msg=\"\"):\n if len(msg):\n msg = \"[%.03fs] %s\" % (time.time()-self.timeStart, msg)\n print(msg)\n self.logLines.append(msg)", "def add_message(self, message):\n message_time = t.process_time()\n self.log_cache.append((message, message_time))\n if len(self.log_cache) % 20 == 0:\n self._commit_log_db()", "def task_display_funny_time():\n print(\"funny time is %s\" % datetime.datetime.now())\n logger.info(\"Hurray its working\")", "def writeLog(pid):\n\tglobal processes,logfile,strikes,sleep\n\tproc = processes[pid]\n\tlogfile.write('[%s] %d %s %f%%cpu %f%%mem (over %d s): %s\\n'%(time.strftime('%b %d %H:%M:%S'),pid,proc.user,proc.cpu,proc.mem,proc.count*sleep,proc.command))", "def log_time(name):\n if DEBUG:\n now = time.time()\n logging.debug('emcc step \"%s\" took %.2f seconds', name, now - TimeLogger.last)\n TimeLogger.update()", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def logStats(self, msg):\n self.logLinesStats.append(msg)", "async def log_time(self, event):\n sender = await event.get_sender()\n user = utils.get_display_name(sender)\n\n message = event.message\n\n time = message.date.astimezone(self.__to_zone).time().hour\n\n logging.debug(\"Got the following message: \\\"\" + event.raw_text + \"\\\" at time \" + str(time))\n\n self.__contact_times.labels(user).observe(time)", "def _reportExecTime(self, exec_time, outputFile):\n f=self.openFile(outputFile, \"a\") #open for appending\n f.write ('<font face=\"verdana\" color=\"' + AutoGrader.Const.ANALYTICS_COLOR2 + '\">[Execution Time: ' + format(\"%0.4f\" % exec_time) + ' sec.]</font><br>\\n')\n f.close()", "def appendMsg(self, msg):\n # self.message += msg\n theTime = self.logger.mytime()\n # self.message += theTime + \" \" + str( msg )\n self.message = str(self.message) + str(theTime) + \" \" + str(msg)", "def log(message):\n print(\"{0}: {1}\".format(acm.Time.TimeNow(), message))", "def exec_time_processor(self):\n with open(join(self.logs_dir, \"clock_time.dat\"), 'w') as fh:\n fh.write(\"Time ExecutionTime ClockTime\\n\")\n while True:\n rexp = (yield)\n fh.write(self.time_str + \"\\t\" +\n \"\\t\".join(x for x in rexp.groups()) + \"\\n\")\n self._tick = True", "def _log(self, runtime, extra):\n\t\tif extra is None:\n\t\t\tdebug(\"Timer - %s took %d ms\" % (self._item, 1000 * runtime))\n\t\telse:\n\t\t\tdebug(\"Timer - %s [%s] took %d ms\" % (self._item, str(extra), 1000 * runtime))\n\t\treturn self", "def LogProcess(self):\n time = datetime.today().strftime('%a %Y%b%d %X')\n# Get user name.\n f = os.popen(\"whoami\",\"r\")\n user = f.read().strip()\n f.close()\n\n entry = '%s\\t%s\\t%s\\t%s\\n' % (time, self.topdir, user, self.version)\n\n if ismounted(c.exams_file):\n# Append info to the exams file.\n try:\n f = open(c.exams_file,'a+')\n f.seek(0, 2)\n f.write(entry)\n f.close()\n except:\n# Not a huge problem if this doesn't work.\n pass", "def Log(self, times):\n\n print '--'\n print times.PrettyPrintLog()\n\n return", "def __call__(self, msg='', total=False):\r\n if not total:\r\n time_lapse = time.time() - self.last_time\r\n full_msg = \"%s: %s\" % (msg, format_time(time_lapse))\r\n else:\r\n # FIXME: Too much logic duplicated\r\n time_lapse = time.time() - self.start_time\r\n full_msg = \"%s: %.2fs, %.1f min\" % (msg, time_lapse,\r\n time_lapse / 60)\r\n print(full_msg, file=sys.stderr)\r\n if self.logfile is not None:\r\n try:\r\n with open(self.logfile, 'a') as f:\r\n print(full_msg, file=f)\r\n except:\r\n \"\"\" Multiprocessing writing to files can create race\r\n conditions. Rather fail silently than crash the\r\n calculation.\r\n \"\"\"\r\n # XXX: We actually need a debug flag to disable this\r\n # silent failure.\r\n self.last_time = time.time()", "def _log2mylog(self, msg):\n time_str = mod_time.strftime(\n \"%Y-%m-%d %H:%M:%S\", mod_time.localtime(mod_time.time())\n )\n msg = str(msg)\n content = \"%s [%s]\\n\" % (time_str, msg)\n fa = open(self.mylogfile, \"a\")\n fa.write(content)\n fa.close()", "def log(self, message):\n timestamp = time.strftime(\"[%H:%M:%S]\", time.localtime(time.time()))\n self.file.write('%s %s\\n' % (timestamp, message))\n self.file.flush()", "def _log_update_time(self, *_):\n import time\n if not hasattr(self, '_time'):\n setattr(self, '_time', time.time())\n _time = time.time()\n debug('Time since last call: {:.6f}s'.format(_time - getattr(self, '_time')))\n setattr(self, '_time', _time)", "def trace(msg):\n import datetime\n print('[{:%Y-%m-%d %H:%M:%S}]: '.format(datetime.datetime.now()) + msg)", "def add_log(conn, task, start_time):\n cursor = conn.cursor()\n cursor.execute('INSERT INTO timelogs (task, start_time) VALUES (?, ?);', (task, start_time))", "def do_timestamp_messages(self, messages):\n timestamp = self.env.now\n self.reception_records[timestamp] = messages\n log.debug(\"{} recorded {}\".format(self, self.reception_records))", "def print_log (self, n = None):\r\n\t\tif n is None:\r\n\t\t\tn = len(self.log)\r\n\t\t\r\n\t\tfor i in range(-n,0):\r\n\t\t\tprint('@ {0: 8.1f} ms, {1} : {2}'.format(1000*self.log[i]['proctime'], self.log[i]['type'], self.log[i]['desc']) )", "def internal_event(self):\n # log activity\n self.log_activity(LogEntry(\n sys_time=time(),\n logical_time=self.logical_clock,\n action=\"work\"\n ))", "def _log(self, log, message):\n log_entry = '[%s] %s\\n' % (time.strftime('%Y/%m/%d %H:%M:%S'), message)\n log.write(log_entry)\n if self.verbose:\n print log_entry.rstrip()", "def internal_event (self):\n self.clock_time += 1\n self.log()", "def log(self, extra=None):\n\t\tself.stop()\n\t\treturn self._log(self.time(), extra)", "async def add_process_time_header(request: Request, call_next):\n start_time = time.time()\n response = await call_next(request)\n process_time = time.time() - start_time\n response.headers[\"X-Process-Time\"] = str(process_time)\n return response", "def log_time(log_method, msg, *args):\n start_time = time.time()\n\n try:\n yield\n finally:\n duration = time.time() - start_time\n args = args + (duration,)\n log_method(msg, *args)", "def log_message(self, message):\n with open(LOGFILE, \"a\") as f:\n currentDt = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S.%f)\")\n message = \"\\n\" + currentDt + '---' + message\n f.write(message)", "def log(message):\n path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n with open(os.path.join(path, logfile_name), 'a+') as f:\n t = strftime(\"%d %b %Y %H:%M:%S\", gmtime())\n f.write(\"\\n\" + t + \" \" + message)", "def log_date_time(self, log_date_time):\n\n self._log_date_time = log_date_time", "def log_time(label: str) -> None:\n print(label, datetime.now())", "def log(message):\n path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n with open(os.path.join(path, logfile_name), 'a+') as f:\n t = strftime(\"%d %b %Y %H:%M:%S\", gmtime())\n f.write(\"\\n\" + t + \" \" + str(message))", "def log(message):\n from tempfile import gettempdir\n from time import strftime\n from sys import stderr\n timestamp = strftime(\"%d-%b-%y %H:%M:%S\")\n if len(message) == 0 or message[-1] != \"\\n\": message += \"\\n\"\n stderr.write(\"%s: %s\" % (timestamp,message))\n logfile = gettempdir()+\"/beam_profiler.log\"\n file(logfile,\"a\").write(timestamp+\" \"+message)", "def add_process_date(record):\n process_date = pd.Timestamp.now().strftime('%Y-%m-%d %H:%M:%S')\n record['process_date'] = process_date\n return record", "def log(self, msg):\n logging.info(\"Logging Message\")\n ml = self.monk_logs\n today = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n ml.insert({today: {\"log\": msg,\n \"sentiment\": self.get_sentiment(msg),\n \"weather\": Weather.get_weather()}})", "def log(self, msg=None):\n f = open(self.logbook, 'a')\n # if send or receive, write message\n if msg: \n f.write(\" System time: \" + str(datetime.now()) + \n \" Logical clock time: \" + str(self.clock_time) + \n \" \" + str(msg) + '\\n')\n # if it is an internal event just write the system time and current\n # logical clock time\n else:\n f.write(\" System time: \" + str(datetime.now()) + \n \" Logical clock time: \" + str(self.clock_time) + '\\n')\n f.close()", "def log(message):\n path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n with open(os.path.join(path, logfile_name), 'a+') as f:\n t = strftime(\"%d %b %Y %H:%M:%S\", gmtime())\n f.write(\"\\n\" + t + \" %s\" % message)", "def write(self, message):\r\n now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S - ')\r\n self.terminal.write(message)\r\n self.log.write(message)", "def write(self, msg, flag_print=True):\n file = open(self.log_path, \"a\")\n insert_time=datetime.now().strftime('%H:%M:%S.%f')[:-3]\n current_time = \"[\"+insert_time+\"]\"\n log_msg = current_time + \" \" + msg + \"$\" +\"\\n\" \n file.write(log_msg)\n # if flag_print is True:\n print(log_msg)", "def log_print(message):\n print('[%s]: %s'%(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()), message))", "def Create_log():\r\n \"\"\"And Maintain log file to the current date in MMM_DD_YY format\"\"\"\r\n \r\n name = multiprocessing.current_process().name\r\n config = config_create()\r\n Stream = config.get('Log', 'Log1')\r\n Tweet = config.get('Log', 'Log2')\r\n OverallLog = config.get('Log', 'Log3')\r\n \r\n uscore = '_'\r\n txtn = '.txt'\r\n StreamL = uscore +Stream+ txtn\r\n TweetL = uscore +Tweet+ txtn\r\n OverallLogL = OverallLog+txtn\r\n \r\n \r\n \r\n name = multiprocessing.current_process().name\r\n StreamFileName = time.strftime(\"%b_%d_%y\")+StreamL\r\n TweetFileName = time.strftime(\"%b_%d_%y\")+TweetL\r\n config.set('Latest_Log', 'currentstreamlog',StreamFileName)\r\n config.set('Latest_Log', 'currenttweetlog',TweetFileName)\r\n config.set('Latest_Log', 'overalllog',OverallLogL)\r\n \r\n with open('botconfig.ini', 'w') as x:\r\n config.write(x)\r\n if os.path.isfile(StreamFileName) is False:\r\n open(StreamFileName, 'w')\r\n \r\n if os.path.isfile(OverallLogL) is False:\r\n open(OverallLogL, 'w')\r\n \r\n if os.path.isfile(TweetFileName) is False:\r\n twfile = open(TweetFileName, 'w')\r\n ## Edit this or comment to change first line entered upon\r\n ## File creation\r\n twfile.write('0 ComicTweetBot')\r\n #time.sleep(1)\r\n #Create_log()\r", "def log(msg=\"\"):\n print(msg)\n sys.stdout.flush()\n f = open(\"/target/testdriver.log\", \"a\")\n f.write('{:%Y-%m-%d %H:%M:%S.%s} :: '.format(datetime.datetime.now()))\n f.write(f\"{msg}\\n\")\n f.close()", "def _consolidate_mp_logs(self):\n for i, fn in enumerate(self.logfiles):\n with open(fn) as f:\n logger.info(\"Log from thread {0}:\\n{1}\".format(i, f.read()))\n open(fn, \"w\").write(\"\")", "def log(self, label, times, overlapping=False):\r\n self._timings.append(Timing(label, times, overlapping))", "def log_schedule(self):\n self.logger.log_schedule(self.params.schedule)", "def log(text):\n print \"%s: %s\" % (str(datetime.datetime.now()), text)", "def writeToLog(self, type, line):\r\n self._log.append((type, time(), line))\r\n if len(self._log[self._logIndex:]) >= self.config.get('logging', 'loginterval'):\r\n self.logFlush()\r\n return True\r\n return False", "def exec_time(pl, segment_info):\n\n execution_time = getattr(segment_info.get(\"args\", None), \"execution_time\", 0)\n\n if execution_time:\n return [{\"contents\": f\"{execution_time:.2f}s\", \"highlight_groups\": [\"exec_time\"]}]", "def write_debug_log(self, msg):\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n with open(self.debug_log, 'a+') as logfile:\n logfile.write(\"%s: %s\\n\" % (now, msg))", "def _log(self, lvl, msg):\n log.log(lvl, \"Proc[{0}] : {1}\".format(self.name, msg))", "def SetTimestampLogging(new_timestamp=True):\n global _log_time\n _log_time = new_timestamp", "def log(self):\n logs = TaskLog.objects.filter(task=self)\n seconds = 0\n\n for log in logs:\n seconds += log.seconds\n\n return str(datetime.timedelta(seconds=seconds))", "def debug_started(self, command):\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n lines = [\n \"%s\\n\" % (\"*\" * self.line_lenght),\n \"Command: %s\\n\" % command,\n \"DateTime: %s\\n\" % now,\n \"%s\\n\" % (\"*\" * self.line_lenght)\n ]\n\n with open(self.debug_log, 'a+') as logfile:\n logfile.writelines(lines)", "def set_times(self, p, f):\n self._dot_print_time = p\n self._dot_feed_time = f", "def main(argv) :\n\n # default\n logfilename = ''\n debug = 0\n\n try:\n opts, args = getopt.getopt(argv, \"\", [\"help\", \"debug\", \"logfile=\"])\n except getopt.GetoptError:\n print main.__doc__\n sys.exit(2)\n\n # check command line parameter\n for opt, arg in opts :\n if opt == \"--help\" :\n print main.__doc__\n sys.exit()\n elif opt == \"--debug\" :\n debug = 1\n elif opt == \"--logfile\" :\n logfilename = arg\n\n if logfilename == '' :\n print main.__doc__\n sys.exit(2)\n \n # instantiate dictionaries and counters\n timeEvent = 0\n timeEventCount = 0\n timeModuleDict = {}\n timeModuleCount = {}\n\n\n for filename in glob.glob(logfilename) :\n try:\n logfile = open(filename)\n except IOError:\n print ''\n print 'Could not open logfile: ',logfilename\n print main.__doc__\n sys.exit(2)\n\n line = logfile.readline()\n tempeventtime = 0\n addedmoduletime = 0\n while line :\n line = line.strip()\n if line.count('TimeEvent>') > 0 :\n if debug :\n print 'TimeEvent line:',line\n linearray = line.split()\n try:\n tempeventtime = float(linearray[-1])\n if tempeventtime < 1000000000. :\n timeEvent += tempeventtime\n timeEventCount += 1\n except ValueError:\n print 'line:',line,'could not be used to extract time of the event'\n if line.count('TimeModule>') > 0 :\n if debug :\n print 'TimeModule line:',line\n try:\n linearray = line.split()\n if len(linearray) == 6 :\n temptime = float(linearray[-1])\n addedmoduletime += temptime\n tempmodule = linearray[-3]\n if tempmodule in timeModuleDict.keys() :\n timeModuleDict[tempmodule] += temptime\n else :\n timeModuleDict[tempmodule] = temptime\n if tempmodule in timeModuleCount.keys() :\n timeModuleCount[tempmodule] += 1\n else :\n timeModuleCount[tempmodule] = 1\n except ValueError:\n print 'line:',line,'could not be used to extract time of a module'\n line = logfile.readline()\n if tempeventtime != 0 :\n tempeventtime = 0\n addedmoduletime = 0\n\n for module in timeModuleDict.keys() :\n print 'module: %30s time: %7.5f percentage: %03.1f' % (module, timeModuleDict[module]/timeModuleCount[module], (timeModuleDict[module]/timeModuleCount[module])/(timeEvent/timeEventCount)*100)\n\n print 'events: %30d time: %7.5f' % (timeEventCount, timeEvent/timeEventCount)\n\n # calculate time per event for RS and Ckf\n\n rsmodules = ['roadSearchSeeds','rawRoadSearchClouds','cleanRoadSearchClouds','rsTrackCandidates','rsWithMaterialTracks']\n ckfmodules = ['globalMixedSeeds','ckfTrackCandidates','ctfWithMaterialTracks']\n\n rsTimePerEvent = 0.\n for rsmodule in rsmodules :\n if rsmodule in timeModuleDict.keys() :\n rsTimePerEvent += timeModuleDict[rsmodule]/timeModuleCount[rsmodule]\n\n ckfTimePerEvent = 0.\n for ckfmodule in ckfmodules :\n if ckfmodule in timeModuleDict.keys() :\n ckfTimePerEvent += timeModuleDict[ckfmodule]/timeModuleCount[ckfmodule]\n\n print ''\n if rsTimePerEvent != 0 :\n print 'module: %30s time: %7.5f percentage: %03.1f' % ('RS', rsTimePerEvent, rsTimePerEvent/(timeEvent/timeEventCount)*100)\n if ckfTimePerEvent != 0 :\n print 'module: %30s time: %7.5f percentage: %03.1f' % ('CKF', ckfTimePerEvent, ckfTimePerEvent/(timeEvent/timeEventCount)*100)\n\n # column for overview table\n print ''\n print 'Column for overview table, orderer after:'\n print 'globalMixedSeeds ckfTrackCandidates ctfWithMaterialTracks roadSearchSeeds rawRoadSearchClouds cleanRoadSearchClouds rsTrackCandidates rsWithMaterialTracks'\n print ''\n module = 'globalMixedSeeds'\n print timeModuleDict[module]/timeModuleCount[module]\n module = 'ckfTrackCandidates'\n print timeModuleDict[module]/timeModuleCount[module]\n module = 'ctfWithMaterialTracks'\n print timeModuleDict[module]/timeModuleCount[module]\n module = 'roadSearchSeeds'\n print timeModuleDict[module]/timeModuleCount[module]\n module = 'rawRoadSearchClouds'\n print timeModuleDict[module]/timeModuleCount[module]\n module = 'cleanRoadSearchClouds'\n print timeModuleDict[module]/timeModuleCount[module]\n module = 'rsTrackCandidates'\n print timeModuleDict[module]/timeModuleCount[module]\n module = 'rsWithMaterialTracks'\n print timeModuleDict[module]/timeModuleCount[module]", "def qaPrint(log, message):\n # current date and time as string + message. example: [Oct 25 01:52:33.000001] TC1 - Passed\n log_message = getCurTime(\"[%b %d %H:%M:%S.%f]\") + \" \" + message\n # prints log_message\n print log_message\n # writes message to a log file\n log.write(log_message + \"\\n\")", "def logger(start_time, file):\n with open(file, \"a\") as text:\n text.write(\"\"\"\n\n Current date and time: {}\n Program ran in {} seconds.\n \"\"\".format(datetime.datetime.now(), time.process_time() - start_time))\n\n return 'hello'", "def log(content):\n\n now = datetime.datetime.now().strftime(\"%c\")\n now_time = time.time()\n # msg_last = '{} - {: >5.1f} seconds - {}'.format(now, now_time - TIME_LAST, content)\n\n if Logger._time_last is not None:\n msg_last = Logger.human_seconds(now_time - Logger._time_last)\n else:\n msg_last = ' ' * 13\n\n msgs = [now, msg_last, content]\n\n msg = \" │ \".join(msgs)\n\n msg_lines = [\"─\" * len(content) for content in msgs]\n\n msg_top = \"─┬─\".join(msg_lines)\n msg_lower = \"─┴─\".join(msg_lines)\n\n print(\" ┌─{}─┐\".format(msg_top))\n print(\" │ {} │\".format(msg))\n print(\" └─{}─┘\".format(msg_lower))\n\n Logger._time_last = time.time()", "def log(self, i):\n if i % 100 == 0:\n sys.stderr.write(\"process \" + str(os.getpid()) + \": at timestep \" + str(i) + \"\\n\")", "def save_last_build_processed_timestamp(log_file):\n with open(log_file, \"w\") as process_file:\n process_file.write(str(time.time()))", "def log_data(self):\n\n self.check_dir()\n with open(self.log_file, \"a\") as logger_file:\n logger_file.write(\"{}, {}\\n\".format(self.time, self.msg))", "def on_start(self):\r\n self.log()", "def timer_callback(self):\n # There're 5 logger-level in ROS 2 get_logger() System.\n # Try out and watch whats difference.\n self.get_logger().debug(f'==== Hello ROS 2 : {self.count}====')\n self.get_logger().info(f'==== Hello ROS 2 : {self.count}====')\n self.get_logger().warn(f'==== Hello ROS 2 : {self.count}====')\n self.get_logger().error(f'==== Hello ROS 2 : {self.count}====')\n self.get_logger().fatal(f'==== Hello ROS 2 : {self.count}====')\n\n self.count += 1", "def _update_cmd_time_info(self, end=False):\n time_stamp = time.time()\n time_passed = time_stamp - self._start_time\n if end:\n docs_proc_now = self._docs_processed % self._file_write_threshhold\n if docs_proc_now == 0:\n msg = ('Written {} documents to file in total. '\n 'Time passed: {:2f}')\n print(msg.format(self._docs_processed, time_passed))\n else:\n msg = ('Writing {} documents to file. '\n 'Written {} documents to file in total. '\n 'Time passed: {:2f}')\n print(msg.format(\n docs_proc_now, self._docs_processed, time_passed))\n else:\n msg = ('Writing {} documents to file. '\n 'Written {} documents to file in total. '\n 'Time passed: {:2f}')\n print(msg.format(self._file_write_threshhold,\n self._docs_processed, time_passed))", "def writeToLogFile(self, event):\n outPutStr = '{:013}'.format(0)\n logOutPutStr = outPutStr + '\\t' + '{:.2f}'.format (time ()) + '\\t' + event + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ')\n printOutPutStr = outPutStr + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ') + '\\t' + event\n print (printOutPutStr)\n if self.logFP is not None:\n self.logFP.write(logOutPutStr + '\\n')\n self.logFP.flush()", "def process_log_line(self, line):\n int_map = self.int_map\n timestamp = line[0:26]\n if len(timestamp) >= 26:\n msg = {}\n try:\n # %Y-%m-%d %H:%M:%S.%f - 2017-06-27 13:46:10.048844\n day = int_map[timestamp[8:10]]\n hour = int_map[timestamp[11:13]]\n minute = int_map[timestamp[14:16]]\n second = int_map[timestamp[17:19]]\n usecond = int_map[timestamp[20:22]] * 10000 + \\\n int_map[timestamp[22:24]] * 100 + int_map[timestamp[24:26]]\n event_time = (hour * 3600.0 + minute * 60.0 + second) + (usecond / 1000000)\n if day == self.start_day:\n elapsed = event_time - self.start_time\n else:\n elapsed = event_time + (float(3600 * 24) - self.start_time)\n msg['timestamp'] = elapsed\n if msg['timestamp'] >= 0:\n offset = line.find(']: ', 32)\n if offset >= 0:\n try:\n thread = line[34:offset]\n separator = thread.find(':')\n if separator >= 0:\n thread = thread[separator + 1:].strip()\n msg['thread'] = thread\n msg['level'] = line[offset + 3:offset + 4]\n msg_start = line.find(' ', offset + 5)\n if msg_start >= 0:\n msg['category'] = line[offset + 5:msg_start]\n msg['message'] = line[msg_start + 1:]\n if msg['category'] == 'nsHttp':\n if msg['thread'] == 'Main Thread':\n self.main_thread_http_entry(msg)\n elif msg['thread'] == 'Socket Thread':\n self.socket_thread_http_entry(msg)\n elif msg['category'] == 'nsSocketTransport':\n self.socket_transport_entry(msg)\n elif msg['category'] == 'nsHostResolver':\n self.dns_entry(msg)\n except Exception:\n logging.exception('Error processing log line')\n except Exception:\n pass", "def time(self, start_time):\n \n TIME_LIST.append((time.time() - start_time))\n print(\"--- %s seconds ---\" % (time.time() - start_time))", "def _add_log_data(self, data):\n self.solver._notify_new_log(data)\n if self.log_enabled:\n if self.log_print:\n write_checking_unicode_errors(self.log_output, data)\n self.log_output.flush()\n if self.log_data is not None:\n self.log_data.append(data)\n # Update statistics\n self.process_infos.incr(CpoProcessInfos.TOTAL_LOG_DATA_SIZE, len(data))", "def check_time(start, message):\n\n logger.info(\" {} -> took {}\".format(message, clock() - start))", "def modlog_msg(self, msg: str) -> str:\n return '`[{0.hour:02d}:{0.minute:02d}]` {1}'.format(datetime.datetime.utcnow(), msg)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def log(self, event_cls, *args, **kw):\n args = list(args)\n args.append(self.time)\n args.append(self)\n args.append(self.worldview.locality_copy())\n self.diary.log(self.time, event_cls(*args, **kw))", "def logWork(self, id, logData):\n\t\tnow = datetime.datetime.now()\n\t\ttimeText = now.strftime(LOG_DATE_FORMAT)\n\n\t\tlogToEdit = self.LOGS.get(id).get(\"log\")\n\t\t#If inside this case and there is already a log entry for this time\n\t\talreadyEntryForThisTime = False\n\t\tfor entry in logToEdit:\n\t\t\tif timeText == entry[0]:\n\t\t\t\tentry[1] += logData\n\t\t\t\talreadyEntryForThisTime = True\n\n\t\tif not alreadyEntryForThisTime:\n\t\t\tlogToEdit.append([timeText, logData])\n\t\t\tself.logByDate(now.strftime(CASE_DATE_FORMAT), id)\n\n\t\tself.save_logs(\"c\")\n\t\tself.autoCommit()\n\t\treturn 0", "def OnProcessTimer(self, event):\r\n\r\n # This event runs only when a compilation process is alive\r\n # When the process finishes (or dies), we simply stop the timer\r\n if self.process is not None:\r\n # Defer to the Process class the message scanning\r\n self.process.HandleProcessMessages()", "def print_time(self):\n now = time.time()\n running_time = now - self.start_time\n print('\\nCurrent session running time: ' + str(running_time) + 's')\n\n total = self.get_running_time() + running_time\n print('Total running time: ' + str(total) + 's')\n self.set_running_time(total)", "def saveToLogFile(self, msg):\n path = os.path.join(self.parent.progpath, \"logfile.txt\")\n fo = open(path, 'a')\n # prefix with current date and time from now variable\n msg = \"\\n#{0}\\n\".format(datetime.datetime.now()) + msg\n fo.write(msg)\n fo.close()", "def saveToLogFile(self, msg):\n path = os.path.join(self.parent.progpath, \"logfile.txt\")\n fo = open(path, 'a')\n # prefix with current date and time from now variable\n msg = \"\\n#{0}\\n\".format(datetime.datetime.now()) + msg\n fo.write(msg)\n fo.close()", "def add_time(t):\n\n times.append(t)\n\n # update number display to show real time\n number_display.time = t\n number_display.update()\n\n # generate new scramble and update scramble_image\n new_scramble = generate_scramble(int(settings['puzzle']),\n int(settings['scramble-length']))\n scrambles.append(new_scramble)\n scramble_image.clear()\n scramble_image.chars = char(new_scramble)\n\n ao5, ao12 = update_stats()\n\n with open(session_file.string, 'a') as f:\n if len(times) == 1:\n f.write(f'{add_zero(t)}\\t{ao5}\\t{ao12}\\t{new_scramble}')\n else:\n f.write(f'\\n{add_zero(t)}\\t{ao5}\\t{ao12}\\t{new_scramble}')", "def log(self):\n f = open(self.log_dir + 'parsed.log', 'a')\n try:\n # Write: local time | CurrentCost \"time\" | id | temp/C | power/W \n f.write(\"%s\\t%s\\t%s\\t%s\\t%s\\n\" \n % (self.ts('now'), self.ts('cc'), self.id, self.temp, self.watts))\n finally:\n f.close()", "def logDebug(self, text):\n time = datetime.now().strftime(\"%H:%M:%S \")\n self.log(time + \"(DBG):\\t\", text)", "def record_time(times, enabled, *args):\n if not enabled:\n yield\n else:\n start = time.time()\n yield\n end = time.time()\n times.append((' '.join(args), start, end))", "def log_cpu(level: int = INFO, msg: str = \"\", *args, **kwargs) -> None:\n if msg:\n DefaultLogger.log(level, \"%s: %s\", msg, microgp4_process_time())\n else:\n DefaultLogger.log(level, \"%s\", msg, microgp4_process_time())", "def InsertLog():", "def recordLog(project, status, memo):\n path = getPath(project)\n log = open(path, 'a')\n writer = csv.writer(log, lineterminator='\\n')\n writer.writerow((time.time(), status, memo))\n log.close()\n if status == 'a':\n print(\"Tracking your time on \" + project)\n if status == 's':\n print(\"Tracking suspended on \" + project)\n if status == 't':\n print(\"Time shifted on \" + project)\n if not path == '.sourglass':\n store = open(os.path.join(basepath, 'last'), 'w')\n store.write(project)\n store.close", "def add_to_error_log(message):\n f = open(Filenames.ERROR_LOG, \"a\")\n f.write((\"------------- %s --------------\\n\" % time.ctime()) + message)\n f.close()", "def timedebug(self, function, message):\n ##TODO timing is off by one!\n now = datetime.datetime.now()\n print(message, end=\" \")\n function()\n delta = now - self.lasttime\n delta = delta.total_seconds()\n print(\"this took: \" + str(delta) + \" seconds\")\n self.lasttime = datetime.datetime.now()", "def add(self, message, time):\n if message not in self.results.keys():\n self.results[message] = [time]\n\n self.results[message].append(time)", "def set_log_time(enable_time=False):\n THE_LOGGER.set_formatter(enable_time)", "def outputlogMessage(message):\n global logfile\n timestr = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime() )\n outstr = timestr +': '+ message\n print(outstr)\n f=open(logfile,'a')\n f.writelines(outstr+'\\n')\n f.close()", "def print_log(msg):\n print(\"[{}] {}\".format(datetime.now(), msg))", "def time_stamp(self, line):\n time_delta = datetime.now() - self._start_time\n return '(' + ':'.join(str(time_delta).split(':')[1:]) + ') ' + line", "def update_log(self, message):\n self.LogOutput_Field.appendPlainText(message)" ]
[ "0.67774904", "0.6688818", "0.6468294", "0.64525807", "0.6431555", "0.6324461", "0.6312171", "0.6251729", "0.6245165", "0.62316775", "0.6187182", "0.61647475", "0.6160086", "0.61188847", "0.6091051", "0.60706383", "0.6057694", "0.60288566", "0.60258675", "0.60083884", "0.60078716", "0.5989666", "0.5983083", "0.59767365", "0.59348774", "0.59277207", "0.5915972", "0.59032565", "0.5900044", "0.58637893", "0.58627445", "0.5851981", "0.5842129", "0.5838131", "0.5824232", "0.5802807", "0.5771361", "0.57648194", "0.5761163", "0.5749871", "0.57427955", "0.5737419", "0.5734398", "0.57234865", "0.5715195", "0.5713573", "0.57000947", "0.5698829", "0.56960094", "0.5675872", "0.5664181", "0.56612325", "0.5659197", "0.56410414", "0.563974", "0.56253254", "0.5619084", "0.5617193", "0.5613601", "0.56125015", "0.56001693", "0.5578171", "0.5575221", "0.5575144", "0.55747634", "0.5553991", "0.55288774", "0.5522862", "0.5503612", "0.5501814", "0.5499234", "0.54987264", "0.54873437", "0.5485765", "0.5485707", "0.54782057", "0.54779494", "0.5474302", "0.5471055", "0.5470338", "0.54636973", "0.54564565", "0.5456165", "0.5452123", "0.5452123", "0.5450497", "0.5444398", "0.5432589", "0.54233915", "0.5420843", "0.5419752", "0.5413629", "0.54119754", "0.54113907", "0.5389572", "0.53844255", "0.5382233", "0.5380458", "0.5359922", "0.535669" ]
0.66905326
1
Flushes the buffer storage in console before pexpect.
def flush(self): if self.out is not None: self.out.flush()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _flush_buffer(self):\n self.pexpect_child.logfile = None\n flushedStuff = \"\"\n while self.pexpect_child.expect([pexpect.TIMEOUT, r\".+\"], timeout=1):\n flushedStuff += self.pexpect_child.match.group(0)\n self.pexpect_child.logfile = self.log_file", "def _flush():\n libtcod.console_flush()", "def wipe(self):\n self.console.clear()", "def flush(self):\n self.old_stdout.flush()", "def __flush(self):\n self.session.buffer=''\n self.session.before=''", "def _flush_buffer(self):\n pass", "def clear():\n sys.stdout.write('\\033[2J')\n sys.stdout.write('\\033[H')\n sys.stdout.flush()", "def flush(self):\n self.old_stderr.flush()", "def flush(self) -> None:\n if not self._buffer:\n # Only flush stdout buffer. (It could be that Python still has\n # something in its buffer. -- We want to be sure to print that in\n # the correct color.)\n self.stdout.flush()\n return\n\n data = \"\".join(self._buffer)\n\n if _DEBUG_RENDER_OUTPUT:\n self.LOG.write((\"%r\" % data).encode(\"utf-8\") + b\"\\n\")\n self.LOG.flush()\n\n # Print characters one by one. This appears to be the best solution\n # in order to avoid traces of vertical lines when the completion\n # menu disappears.\n for b in data:\n written = DWORD()\n\n retval = windll.kernel32.WriteConsoleW(\n self.hconsole, b, 1, byref(written), None\n )\n assert retval != 0\n\n self._buffer = []", "def clear_console(cls):\n print('\\n' * 200)", "def clear_console():\n import os\n clear = lambda: os.system('cls')\n clear()\n return None", "def flush_output():\n if len(buffered) == 1:\n code.add_line(\"append_result(%s)\" % buffered[0])\n elif len(buffered) > 1:\n code.add_line(\"extend_result([%s])\" % \", \".join(buffered))\n del buffered[:]", "def _flush(self):\n pass", "def flushBuffer():\n\tif settings.dry_run or settings.force_sync == True:\n\t\treturn\n\tif settings.debug:\n\t\tsettings._counters['flush'] += 1\n\t\n\tsendData('NOOP', read=True, flush=True)", "def flush():\n None", "def _flush(self):", "def clear():\n # TODO: this should actually create a stack of output so I can test each screen\n lines.clear()", "def _clear_window(self):\n self.buf[:] = []", "def clear():\n clear_output()", "def clear_console():\n os.system('cls' if os.name == 'nt' else 'clear')", "def consoleClear() -> None:\n\t\tLogging._console.clear()", "def screen_clear():\n from subprocess import call\n import os\n call('clear' if os.name == 'posix' else 'cls')", "def reset(self):\n\t\tself.buf = []", "def clear_console():\n os.system('cls' if os.name == 'nt' else \"clear\")", "def flush(self):\r\n # this flush method is needed for python 3 compatibility.\r\n # this handles the flush command by doing nothing.\r\n pass", "def flush_buffer(self):\n if self._pipeline:\n self._pipeline.execute()", "def clear_screen():\n\tprint(\"\\033[H\\033[J\")", "def flush(self):", "def flush(self):", "def flush(self):", "def flush(self):", "def flush(self):", "def flush(self):", "def flush(self):\n pass", "def flush(self):\n pass", "def flush(self):\n pass", "def flush(self):\n pass", "def flush(self):\n if self.stderr:\n sys.__stderr__.flush()\n else:\n sys.__stdout__.flush()", "def clear_screen():\n print('\\n' * TERMINAL_HEIGHT)", "def flush(self):\n if not self.handle:\n raise RuntimeError(\"No handle to flush!\")\n\n before = []\n while True:\n LOGGER.debug(f\"{self}flushing...\")\n try:\n self.handle.expect(self.prompt, timeout=1)\n before.extend(self.handle.before.decode().splitlines())\n except pexpect.TIMEOUT:\n LOGGER.info(\"Got timeout, done flushing...\")\n break\n\n return before", "def clear_screen(self):\n logging.info(\"Clearing screen for console_ip %s port %s\", self.console_ip, self.act_port)\n console = pexpect.spawn('telnet %s %s'%(self.console_ip,self.act_port))\n console.logfile = self.log\n console.send('\\003')\n console.close()\n\n if self.stnd_port and self.stnd_console_ip:\n logging.info(\"Clearing screen for console_ip %s port %s\", self.stnd_console_ip, self.stnd_port)\n console = pexpect.spawn('telnet %s %s'%(self.stnd_console_ip,self.stnd_port))\n console.logfile = self.log\n console.send('\\003')\n console.close()\n logging.info('done clear screen') \n return", "def print_flush(msg):\n print(msg, end='')\n sys.stdout.flush()", "def _flush(self):\n tempbuf = self.databuffer\n self.databuffer = []\n self.database.runInteraction(self._executemany, tempbuf)", "def _done_sending():\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def clearBuffer(self):\n self.buffer = b''\n self._linesReceived = []\n self._lineBuffer = b''", "def clear():\n sub.call('cls', shell=True)", "def clear_output():\n print(\"\\n\" * 20)", "def purge_buffer(self):\n self._buf = b''", "def __flush_input(self):\n self.device.reset_input_buffer()", "def flush(self) -> None:\r\n if self.file is not None:\r\n self.file.flush()\r\n\r\n self.stdout.flush()", "def reset(self):\n while (True):\n index = self.expect([pexpect.TIMEOUT, SHELL_PROMPT], timeout=1)\n if index == 0:\n break", "def _clear(self):\n self._calculate_bar_width()\n # sys.stdout.write(\"\\033[K\")\n # to fix bug when logging to console\n print(\" \" * self._tw, end='\\r')\n # sys.stdout.write(\"\\033[K\")", "def clear_screen():\n os.system('cls')", "def flush(self):\n self._send()", "def tearDown(self):\n sys.stdout = self.stdout\n if self.oldColumns is not None:\n os.environ['COLUMNS'] = self.oldColumns", "def limpa_console() -> None:\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')", "def _done_sending(self):\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def flush(self) -> None:\n pass", "def __del__(self):\n\n if sys.stdout.isatty():\n sys.stdout.write(self.term.normal)\n sys.stdout.write(self.term.move(self.linecount, 0))\n sys.stdout.flush()", "def reset_output_buffer(self):\n self._main_buffer = BufferUtils.create_buffer()\n self._secondary_buffer = BufferUtils.create_buffer()", "def _clear_screen():\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')", "def flush_cmds(self):\r\n if self.select_cmd is not None:\r\n self.do_cmd()", "def _buffer_flush(self, event):\n self._out_buffer_lock.acquire()\n _out_buffer = self._out_buffer\n self._out_buffer = []\n self._out_buffer_lock.release()\n self.write(''.join(_out_buffer), refresh=False)", "def clear_terminal(self):\n os.system('clear')", "def _do_clear(self):\n print()\n print()\n console.rule()\n os.system(\"cls\" if os.name in (\"nt\", \"dos\") else \"clear\")\n self.history_manager.remove_items(n=1)", "def flush(self):\n return", "async def flush(self):\n\t\tpass", "def clearScreen():\n pass", "def cls():\n os.system(\"cls\")\n os.system(\"clear\") # clear the moniter function", "def console_clear(wait_time):\n\n sleep(wait_time) # Produces a delay based on input passed through console_clear()\n\n # These commands only work in the terminal\n try:\n system(\"cls\") # Clears console for users on Windows operating system\n\n except:\n system(\"clear\") # Clears console for users on Mac and Linux operating systems", "def flush(self, mode=None):\r\n pass", "def clear(self):\n sys.stderr.write(self._FRONT+self._CLEAR)", "def clear():\n print(chr(27) + \"[2J\")\n print(\"\\033[0;0H\")", "def clear_screen(self):\n if self.x:\n self.move_cur_up((self.prev_x+1)/self.get_col_width())\n self.clear_line(self.get_num_lines(self.prev_lines) +\n self.get_num_lines(['>' + self.prev_str + ' ']))\n #time.sleep(2)", "def flush_tx_buffer(self):\n pass", "def clearConsole():\r\n\r\n command = 'clear' # command for console clearing\r\n if os.name in ('nt', 'dos'): # if the machine is running on Windows, then use cls\r\n command = 'cls'\r\n os.system(command) # othen than Windows, use clear\r", "def _flush(self):\n self._d = {}", "def clear_screen():\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')", "def clear_screen() -> None:\n os.system(\"cls\" if os.name == \"nt\" else \"clear\")", "def resetTerminal():\n sys.stdout.write('\\n\\n') # add a few blank lines\n sys.stdout.flush()\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')", "def clear_screen(self,):\n sys.stdout.write('\\033[2J')\n sys.stdout.write('\\033[H')\n sys.stdout.flush()\n print \"\\n\\t\\tDo To - %s\\n\\n\" % self.user", "def flush(self):\n pass", "def clear_screen():\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear_screen():\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear_player_screen():\n print('\\n' * get_terminal_size().lines, end='')\n\n return None", "def test_console_output_size_flush(self):\n self.render_config_template(\n console={\n \"pretty\": \"false\",\n \"bulk_max_size\": 1,\n }\n )\n\n proc = self.start_beat(logging_args=[\"-e\"])\n self.wait_until(lambda: self.log_contains(\"Mockbeat is alive\"),\n max_timeout=2)\n proc.check_kill_and_wait()", "def clear_input_buffer(ser):\n sys.stderr.write(\"\\n\")\n LOGGER.warning(\"***** Unprocessed input buffer content *****\")\n sys.stderr.write(\"\\n\")\n capture = \"\"\n rx = 1\n while rx:\n rx = ser.read(ser.in_waiting or 1)\n if rx:\n capture += rx.decode(errors=\"replace\")\n if capture != \"\":\n LOGGER.info(capture.strip())\n sys.stderr.write(\"\\n\")\n LOGGER.warning(\"*\" * 44)\n sys.stderr.write(\"\\n\")\n ser.reset_input_buffer()", "def flush_buffer(self):\n t1 = time.time()\n while True:\n t2 = time.time()\n if t2-t1>0.03:\n break\n t1 = t2\n self.cam.read()", "def flush_on_term(self):\n self.influx.flush()", "def clear():\n try:\n try:\n # For Macs and Linux\n os.system('clear');\n except:\n # For Windows REPORTED BUG: Sometimes does not work on 64 bit Windows\n os.system('cls');\n except:\n # If nothing else works, a hacky, non optimal solution\n for i in range(50): print(\"\")", "def clear_screen(self):\n self.ClearAll()\n self.new_prompt(self.input_prompt_template.substitute(\n number=(self.last_result['number'] + 1)))", "def _flush(self):\n for x in xrange(0,10):\n self._eng.iterate()", "def clear_screen():\n os.system(\"cls\" if os.name == 'nt' else 'clear')", "def flush(self):\n self.write(self.ASCII_FF)", "def cls(self):\n os.system('clear')", "def flush(self):\r\n lines = []\r\n\r\n # Append new buffer to last stored line\r\n total_buffer = self.lines[-1] + self.buffer\r\n\r\n width = self.size[COLS]\r\n for block in total_buffer.split('\\n'):\r\n # If the line fits, just add it as is. Otherwise use the textwrap\r\n # tool to wrap. We don't use textwrap on every line because it will strip trailing spaces\r\n if len(block) < width:\r\n lines.append(block)\r\n else:\r\n for line in textwrap.wrap(block,width-1): # Formatting works better with a 1-character buffer on right\r\n lines.append(line)\r\n\r\n first_line=True\r\n for line in lines:\r\n if not first_line:\r\n self._add_row()\r\n first_line=False\r\n self.lines[self.cursor_row] = line\r\n \r\n if total_buffer.endswith('\\n') and first_line:\r\n self.add_row()\r\n self.buffer=''\r\n self.draw()", "def clear():\r\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear_screen():\n print('now printing 25 lines')\n for _ in range(2):\n nine_lines()\n for _ in range(2):\n three_lines()\n new_line()\n return", "def reset(self):\n self.at_cmd('Z')", "def clear_screen():\r\n if os.name in ('nt','dos'):\r\n os.system(\"cls\")\r\n elif os.name in ('linux','osx','posix'):\r\n os.system(\"clear\")\r\n else:\r\n print(\"\\n\") * 120", "def _Flush(self):\n self._ignore_width = False\n if self._fill:\n self._out.write('\\n')\n self._blank = False\n self._fill = 0" ]
[ "0.7308612", "0.7289075", "0.7045256", "0.6931466", "0.6834416", "0.6816106", "0.6713305", "0.66353625", "0.6627915", "0.65735334", "0.65354127", "0.64686835", "0.6459456", "0.6449715", "0.6398675", "0.6393837", "0.6390865", "0.6362393", "0.6325738", "0.6310141", "0.6291992", "0.6291771", "0.62693614", "0.6254413", "0.6237783", "0.6217993", "0.6216942", "0.61265165", "0.61265165", "0.61265165", "0.61265165", "0.61265165", "0.61265165", "0.6117929", "0.6117929", "0.6117929", "0.6117929", "0.60963875", "0.6080119", "0.60755265", "0.60701746", "0.604213", "0.60307", "0.60201335", "0.6019614", "0.6010845", "0.6009288", "0.6006174", "0.59943277", "0.59924525", "0.5988196", "0.5985925", "0.5977759", "0.59767413", "0.5975299", "0.5963056", "0.5954869", "0.59445876", "0.59285754", "0.5911804", "0.59093815", "0.5893125", "0.58888996", "0.5880834", "0.5875964", "0.5864838", "0.58632785", "0.5856933", "0.58369917", "0.58283746", "0.582624", "0.5825889", "0.58255136", "0.58184725", "0.58109224", "0.57987535", "0.57976174", "0.5793134", "0.57919574", "0.5791681", "0.57909495", "0.5782801", "0.576356", "0.576356", "0.5763555", "0.576024", "0.5760142", "0.57497126", "0.5747303", "0.574396", "0.5741837", "0.5740656", "0.5737883", "0.57314396", "0.5730537", "0.57244587", "0.5720022", "0.5717155", "0.57102805", "0.5708817", "0.57078767" ]
0.0
-1
Add and write log messages to a combined list.
def create_file_logs(config, board, tests_to_run, logger): combined_list = [] def add_to_combined_list(log, name, combined_list=combined_list): for line in log.split("\r\n"): try: if line == "": continue if line.startswith("\n"): line = line[1:] if line.startswith(" ["): line = line[1:] ts, text = line.split("]", 1) timestamp = float(ts[1:-1]) else: text = line timestamp = 0.0 combined_list.append( {"time": timestamp, "text": str(text), "name": name} ) except Exception as error: logger.error(error) logger.debug(f"Failed to parse log line = {repr(line)}") idx = 1 console_combined = [] for console in board.consoles: with open(os.path.join(config.output_dir, f"console-{idx}.log"), "w") as clog: clog.write(console.log) add_to_combined_list(console.log, f"console-{idx}") add_to_combined_list(console.log_calls, f"console-{idx}") add_to_combined_list(console.log, "", console_combined) idx = idx + 1 def write_combined_log(combined_list, fname): with open(os.path.join(config.output_dir, fname), "w") as clog: for e in combined_list: try: if e["name"] == "": clog.write(f"[{e['time']}]{repr(e['text'])}\r\n") else: clog.write(f"{e['name']}: [{e['time']}] {repr(e['text'])}\n") except Exception as error: logger.error(error) logger.debug(f"failed to parse line: {repr(e)}") import operator console_combined.sort(key=operator.itemgetter("time")) write_combined_log(console_combined, "console-combined.log") for device in config.devices: with open(os.path.join(config.output_dir, device + ".log"), "w") as clog: d = getattr(config, device) if hasattr(d, "log"): clog.write(d.log) add_to_combined_list(d.log, device) add_to_combined_list(d.log_calls, device) for test in tests_to_run: if hasattr(test, "log") and test.log != "": with open( os.path.join(config.output_dir, f"{test.__class__.__name__}.log"), "w" ) as clog: clog.write(test.log) if hasattr(test, "log_calls"): add_to_combined_list(test.log_calls, test.__class__.__name__) combined_list.sort(key=operator.itemgetter("time")) write_combined_log(combined_list, "all.log")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_list(self,list_,level='INFO'):\r\n logger.write('\\n'.join(self._log_list(list_)),level)", "def CombineLogFiles(list_of_lists, logger):\n cur_device_log = ['']\n for cur_file, cur_file_lines in list_of_lists:\n # Ignore files with just the logcat header\n if len(cur_file_lines) < 2:\n continue\n common_index = 0\n # Skip this step if list just has empty string\n if len(cur_device_log) > 1:\n try:\n line = cur_device_log[-1]\n # Used to make sure we only splice on a timestamped line\n if re.match(r'^\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.\\d{3} ', line):\n common_index = cur_file_lines.index(line)\n else:\n logger.warning('splice error - no timestamp in \"%s\"?', line.strip())\n except ValueError:\n # The last line was valid but wasn't found in the next file\n cur_device_log += ['***** POSSIBLE INCOMPLETE LOGCAT *****']\n logger.info('Unable to splice %s. Incomplete logcat?', cur_file)\n\n cur_device_log += ['*'*30 + ' %s' % cur_file]\n cur_device_log.extend(cur_file_lines[common_index:])\n\n return cur_device_log", "def write(self):\n with open(\"log.txt\", 'w') as f:\n for message in self.message_list:\n f.write(message + \"\\n\")", "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "def log(self, *lst):\n self.print2file(self.logfile, self.debug, True, *lst)\n if 'Error' in '\\n'.join([str(x) for x in lst]):\n self.caught_error = '\\n'.join([str(x) for x in lst])", "def _consolidate_mp_logs(self):\n for i, fn in enumerate(self.logfiles):\n with open(fn) as f:\n logger.info(\"Log from thread {0}:\\n{1}\".format(i, f.read()))\n open(fn, \"w\").write(\"\")", "def log_list(msg, items, level=logging.INFO):\n\n max_len = 1024 - len(msg % \"\")\n cur_len = 0\n cur_items = list()\n\n while [ i[:max_len] for i in items]:\n i = items.pop()\n if cur_len + len(i) + 2 > max_len:\n logging.info(msg % \", \".join(cur_items))\n cur_len = 0\n cur_items = list()\n\n cur_items.append(i)\n cur_len += len(i) + 2\n\n logging.log(level, msg % \", \".join(cur_items))", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def append_to_logfile(self):\n with open(self.path, \"a+\") as f:\n for item in self.logs:\n f.write(item)\n self.logs.clear()", "def add_game_log(self, game_log: list) -> None:\n self.game_logs.append(game_log)", "def merge_logs(self):\n ourlog = LogData()\n for l in self.data_set:\n ourlog.entries = ourlog.entries + l.entries\n ourlog.sort_time()\n self.finalized_data = ourlog", "def log(listfile,line): \r\n\tif listfile != 0:\r\n\t\t#print line\r\n\t\tif line.startswith(\"ERROR\") or line.startswith(\"WARNING\"):\r\n\t\t\tprint line\r\n\r\n\t\tif not line.endswith('\\n'):\r\n\t\t\tline += '\\n'\r\n\t\t#listfile.write(line)\r\n\t\tlogfile.append(line)\r\n\t\tif len(logfile) >= 1: #:\r\n\t\t\tfor i in range(len(logfile)):\r\n\t\t\t\tlistfile.write(logfile[i])\r\n\t\t\tlogfile[:] = []\r\n\telse:\r\n\t\tprint line", "def append_log_message(self, text):\n self._new_logs.append(text)", "def get_and_append_log_events(self):\n\n log_events = self.get_log_events()\n\n # Write log events to file.\n if len(log_events) > 0:\n self.write_log_events(log_events)", "def send_append_entries(self):\n\n assert self.role == Role.Leader\n\n for peer in self.peers:\n prev_index = self.next_index[peer]\n\n self.set_rpc_timeout(peer)\n\n # After the rpc, the node will have the entire log\n self.next_index[peer] = len(self.log)\n\n self.orchestrator.send_to_broker(\n AppendEntries(\n self.name,\n [peer],\n self.current_term,\n self.leader,\n self.next_index[peer] - 1,\n self.log.term(self.next_index[peer] - 1),\n self.log.entries[prev_index : self.next_index[peer]],\n self.commit_index,\n )\n )", "def do_append(self, level, msg, *args, **kwargs):\n record = self.log.makeRecord(\"poller\", level, \"(fn)\", 0, msg, args, None, \"()\", None)\n s = self.buf_formatter.format(record)\n self.buf.append(s)", "def write(self, msg):\n self._current_stream.write(msg)\n if type(self._log) == list:\n self._log.append(msg)", "def process_log_files(source_name, log_file_list):\n\n result_list = []\n out_fname = create_out_fname(source_name, suffix='_sum', ext=\".csv\")\n\n for log_file in log_file_list:\n result_list += process_log(log_file)\n\n if len(result_list) == 0:\n warning(\"Found no lammps log data to process from: {}\".format(source_name))\n else:\n write_csv(result_list, out_fname, LOG_FIELDNAMES, extrasaction=\"ignore\")", "def combining_alternate_element(log):\n log_list = []\n count = 0\n temp = \"\"\n for content in log:\n if count % 2 == 0:\n temp = content\n else:\n temp = temp + content\n log_list.append(temp)\n temp = \"\"\n count += 1\n return log_list", "def _log_append(self, msg):\n\t\tp = self._edit.get_buffer()\n\t\tstart,end = p.get_bounds()\n\t\tp.insert(end, msg)\n\t\tself._trunc_lines()\n\t\tself._edit.scroll_to_iter(p.get_end_iter(), 0.0)", "def add_external_logs(self, logs) -> None:\n if logs is None:\n return\n\n if type(logs) == list:\n data: List[LogModel] = []\n for log in logs:\n if isinstance(log, LogModel):\n data.append(log)\n\n if data.__len__() > 0:\n self.__logs.extend(logs)\n elif isinstance(logs, LogModel):\n self.__logs.append(logs)", "def write_logs():\n global log_queue\n global maxcount\n\n # Process the first set\n for count, msg in enumerate(log_queue):\n\n loginfo = {}\n print msg\n for entry in msg['log'].keys():\n\n loginfo[entry] = {}\n\n for key in msg['log'][entry].keys():\n loginfo[entry][key] = msg['log'][entry][key]\n\n with open(msg['info'], 'r') as f:\n metadata = json.load(f)\n\n try:\n metadata[msg['run']]\n\n except(KeyError):\n metadata[msg['run']] = {}\n\n if msg['cmd'] == 'write':\n for key in loginfo.keys():\n metadata[msg['run']][key] = loginfo[key]\n\n elif msg['cmd'] == 'reset':\n metadata[msg['run']] = {}\n\n with open(msg['info'], 'w') as f:\n f.write(json.dumps(metadata, indent=2, sort_keys=True))\n\n log_queue.remove(msg)\n\n if count > maxcount:\n break", "def merge_messages(self, msg_list):\n return self.recv_instance.empty_msg.join(msg_list)", "def logStats(self, msg):\n self.logLinesStats.append(msg)", "def add_to_logs(self, msg):\n\n\t\tself.setRowCount(self.__cnt + 1)\n\n\t\tself.setItem(self.__cnt, 0, QTableWidgetItem(msg['time']))\n\t\tself.setItem(self.__cnt, 1, QTableWidgetItem(msg['src']))\n\t\tself.setItem(self.__cnt, 2, QTableWidgetItem(msg['dest']))\n\t\tself.setItem(self.__cnt, 3, QTableWidgetItem(msg['proto']))\n\t\tself.setItem(self.__cnt, 4, QTableWidgetItem(msg['facility']))\n\t\tself.setItem(self.__cnt, 5, QTableWidgetItem(msg['severity']))\n\t\tself.setItem(self.__cnt, 6, QTableWidgetItem('YES' if(msg['is_threat']) else 'NO'))\n\t\tself.setItem(self.__cnt, 7, QTableWidgetItem(msg['msg']))\n\n\t\t# increment count\n\t\tself.__cnt += 1\n\n\t\tself.another_msg.emit(msg['desc'])\n\n\t\tif msg['is_threat']:\n\t\t\tself.new_alert.emit(msg['src'][:msg['src'].rfind(':')])\n\t\t\tself.another_anom_msg.emit()", "def _logToFile(logsLst, resultJSON=None, logFile=\"logFile.txt\"):\n if not LOGGING_TO_FILE: return\n with open(logFile, \"a+\") as file:\n message = \"\\n\".join(logsLst)\n file.write(\"------------------Logging--------------------\\n\")\n file.write(str(datetime.datetime.now()) + \"\\n\")\n # file.write(str(datetime.datetime.utcnow()) + \"\\n\")\n file.write(\"---------------------------------------------\\n\")\n file.write(message + \"\\n\")\n if resultJSON is not None:\n file.write(\"resulting JSON after comparison:\\n\")\n file.write(resultJSON)\n file.write(\"\\n\")", "def add_log(self,txt):\n try:\n now=datetime.datetime.now()\n new_item=QtWidgets.QListWidgetItem(now.strftime('%Y/%m/%d %H:%M:%S')+\": \"+txt)\n self.ui.logger_list.addItem(new_item)\n if self.h5saver.h5_file.isopen:\n self.h5saver.append(self.h5saver.logger_array, now.strftime('%Y/%m/%d %H:%M:%S')+\": \"+txt)\n\n except:\n pass", "def insertall_message(self, text):\n return self.insertall([{'logging': text}])", "def test_006_log_append(self):\n __test = chess_storage.ChessStorage()\n __test_data = list(range(consts.TEST_LIST_LENGHT))\n __dir_game_saves = os.path.dirname(__file__)\n __dir_game_saves = os.path.join(__dir_game_saves, 'games')\n __dir_game_log = os.path.join(__dir_game_saves, \"log\")\n __test_logname = consts.TEST_FILENAME + \"_log.txt\"\n __dir_game_logfile = os.path.join(__dir_game_log, __test_logname)\n # pylint: disable = protected-access\n __log_test = __test._ChessStorage__log_append(__dir_game_logfile, __test_data)\n # pylint: enable = protected-access\n self.assertEqual(__log_test, consts.ERROR_CODES[\"SUCCESSFULL\"])", "def write(lst):\n # TODO", "def do_handle_log(self, workunit, level, *msg_elements):\r\n pass", "def __updateLog(self):\n try:\n while True:\n self.__logWindow.addRow({\"log\": self.__logs.pop()})\n except IndexError:\n # All log messages read\n pass", "def add_new_logs(self, new_logs=''):\n old_logs = self.log_frame.get(\"1.0\", END)\n self.log_frame.delete(\"1.0\", END)\n\n final_logs = new_logs.__str__() + \"\\n\" + old_logs\n self.log_frame.insert(END, final_logs)", "def mix():\n\n with open(\"output.log\", 'w') as outfile:\n log_file = [container.logs(timestamps=True).split(\",\") for container in\n CLIENT.containers.list()]\n for c_log in log_file:\n outfile.write(\" \".join(map(str, c_log)) + '\\n')\n click.secho('Log output of each container has been written to output.log.',\n bg='blue', fg='white')", "def _write_log(self, log_data):\n # for data in log_data:\n # self.log_file.write(\"{}\\n\".format(data).encode('utf-8'))\n self.log_file.write(\"{}\\n\".format(log_data).encode('utf-8'))\n self.log_file.flush()", "def update(self, log_ids: list, dest='logs'):\n self.logs_updated = []\n for i in range(len(log_ids)):\n self.logs_updated.append(dict(\n filename=self.logs[i].get('filename'),\n data=log_ids[i],\n filesize=len(self.logs[i].get('data')) if self.logs[i].get('data') else 0,\n ))\n\n for item in self.items:\n item[dest] = self.logs_updated", "def CHANGE_appendAll(self):\r\n # Separate new files to be loaded\r\n FoI = list(set(self.listenFiles)-set(self.logFiles))\r\n FoI.sort()\r\n for file in FoI:\r\n print(\"Loading {}\".format(file))\r\n filePath = os.path.join(self.listenDir, file)\r\n\r\n try:\r\n (newProj, newAngle) = self.read_projection_image(filePath)\r\n\r\n self.logTiltAngles = np.append(self.logTiltAngles, newAngle)\r\n\r\n # Invert Contrast for BF-TEM\r\n if self.invert:\r\n newProj *= -1\r\n\r\n newProj = self.background_subtract(newProj)\r\n\r\n # Apply Center of Mass (if selected)\r\n if self.alignMethod == 'CoM':\r\n newProj = self.center_of_mass_align(newProj)\r\n\r\n # Account for Python's disdain for AxAx1 arrays\r\n # (compresses to 2D)\r\n if (len(self.logTiltSeries0) == 0):\r\n dataDim = np.shape(newProj)\r\n self.logTiltSeries0 = np.zeros([dataDim[0], dataDim[1], 1])\r\n self.logTiltSeries0[:, :, 0] = newProj\r\n self.wbp = wbp.WBP(dataDim[0], dataDim[1], 1)\r\n else:\r\n self.logTiltSeries0 = np.dstack((self.logTiltSeries0,\r\n newProj))\r\n\r\n self.logFiles = np.append(self.logFiles, file)\r\n\r\n except Exception:\r\n print('Could not read : {}, will proceed with reconstruction\\\r\n and re-download on next pass'.format(file))\r\n break\r\n\r\n # Apply Cross-Correlation after reading images (if selected)\r\n if self.alignMethod == 'xcor':\r\n self.logTiltSeries = self.xcorr_align(self.logTiltSeries0)\r\n # update tilt angles and sinogram\r\n self.wbp.set_tilt_series(self.logTiltSeries, self.logTiltAngles)\r\n # re-center tilt axis\r\n self.logTiltSeries = self.shift_tilt_axis(self.logTiltSeries,\r\n self.logTiltAngles)\r\n else:\r\n self.logTiltSeries = self.logTiltSeries0", "def _writeLog (self, item):\n self.log.write (item.encode (self.logEncoding))\n self.log.write (b'\\n')\n # instead of locking, check we’re running in the main thread\n if self.log.tell () > self.maxLogSize and \\\n threading.current_thread () is threading.main_thread ():\n self._flushLogEntries ()", "def fLOG (*l, **p) :\n path_add = p.get (\"LogPathAdd\", [] )\n\n lock = p.get(\"Lock\", None)\n if lock is not None : sys.hal_log_values[\"Lock\"] = lock\n \n if \"LogFile\" in p and \"LogPath\" in p : init (p [\"LogPath\"], p [\"LogFile\"])\n elif \"LogFile\" in p : init (filename = p [\"LogFile\"], path_add = path_add)\n elif \"LogPath\" in p : init (path = p [\"LogPath\"], path_add = path_add)\n \n def myprint(s): print(s)\n \n if \"OutputPrint\" in p : \n Print (p [\"OutputPrint\"])\n \n if \"LogFile\" in p :\n logfile = GetLogFile(True)\n \n dt = datetime.datetime (2009,1,1).now ()\n if len (l) > 0 :\n def _str_process (s) :\n if isinstance (s, str) : return s\n elif isinstance(s, bytes) : return s.decode(\"utf8\")\n else : \n try:\n return str (s)\n except Exception as e :\n raise Exception(\"unable to convert s into string: type(s)=\" + str(type(s))) from e\n \n message = str (dt).split (\".\")[0] + \" \" + \" \".join ( [_str_process(s) for s in l ] ) + sys.hal_log_values [\"__log_file_sep\"]\n \n if sys.hal_log_values [\"__log_display\"] : \n try :\n myprint (message.strip (\"\\r\\n\"))\n except UnicodeEncodeError :\n try :\n myprint (\"\\n\".join (repr (message.strip (\"\\r\\n\")).split (\"\\\\n\")))\n except UnicodeEncodeError :\n try :\n rr = repr (message.strip (\"\\r\\n\")).split (\"\\\\n\")\n for r in rr :\n myprint (r.encode(\"utf8\"))\n except UnicodeEncodeError :\n myprint (\"look error in log file\")\n GetLogFile ().write (message)\n st = \" \"\n else :\n st = str (dt).split (\".\")[0] + \" \"\n \n for k,v in p.items () :\n if k == \"OutputPrint\" and v : continue\n message = st + \"%s = %s%s\" % (str (k), str (v), sys.hal_log_values [\"__log_file_sep\"])\n if \"INNER JOIN\" in message :\n break\n GetLogFile ().write (message)\n if sys.hal_log_values [\"__log_display\"] : \n try :\n myprint (message.strip (\"\\r\\n\"))\n except UnicodeEncodeError :\n myprint (\"\\n\".join (repr (message.strip (\"\\r\\n\")).split (\"\\\\n\")))\n GetLogFile ().flush ()", "def add_loggers(self, loggers, stdout_level=logging.NOTSET, file_level=logging.NOTSET):\n self._enabled = True\n self._loggers.append((loggers, _sanitize_level(stdout_level), _sanitize_level(file_level)))", "def _add_log_data(self, data):\n self.solver._notify_new_log(data)\n if self.log_enabled:\n if self.log_print:\n write_checking_unicode_errors(self.log_output, data)\n self.log_output.flush()\n if self.log_data is not None:\n self.log_data.append(data)\n # Update statistics\n self.process_infos.incr(CpoProcessInfos.TOTAL_LOG_DATA_SIZE, len(data))", "def add_row_to_logfile(output_dir, *args):\n with open(os.path.join(output_dir, LOGFILE), 'a') as f:\n args_as_strings = map(str, args)\n f.write('\\n' + ', '.join(args_as_strings))", "def publish_list(self, messages: list) -> None:", "def publish_list(self, messages: list) -> None:", "def collect_log_output(activity_log, result):\n output = activity_log.get('emittedOutput')\n if output:\n result.append(output['_value'])\n else:\n subsections = activity_log.get('subsections')\n if subsections:\n for subsection in subsections['_values']:\n collect_log_output(subsection, result)", "def appendMsg(self, msg):\n # self.message += msg\n theTime = self.logger.mytime()\n # self.message += theTime + \" \" + str( msg )\n self.message = str(self.message) + str(theTime) + \" \" + str(msg)", "def log(self, workunit, level, *msg_elements):\r\n with self._lock:\r\n for reporter in self._reporters.values():\r\n reporter.handle_log(workunit, level, *msg_elements)", "def log_request(ingredient_list):\n logPath = getLogPath()\n ingredient_string = \"\".join([str(i) for i in ingredient_list])\n with open(logPath, 'a') as log:\n log.write(ingredient_string)\n log.write(\"\\n\")", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def after_rotate_logs(msg, config, checklist):\n return []", "def add_loggers(self):\n pass", "def _process_new_logged_info(self):\n wiz = self.wizard()\n\n while self._new_logs:\n # We're going to check the progress and chapter on each iteration\n # so we catch anything that was sent out way without it having to\n # wait for us to get through adding all of the log messages.\n if self._progress or self._chapter:\n self.__progress_on_main_thread(self._chapter, self._progress)\n self._chapter = None\n self._progress = None\n\n wiz.ui.progress_output.appendHtml(self._new_logs.pop(0))\n cursor = wiz.ui.progress_output.textCursor()\n cursor.movePosition(cursor.End)\n cursor.movePosition(cursor.StartOfLine)\n wiz.ui.progress_output.setTextCursor(cursor)\n wiz.ui.progress_output.ensureCursorVisible()\n\n # One last check of the progress and chapter. We've been checking it above\n # in the loop that's adding logs, but we might not have had any of those to\n # process this time.\n if self._progress or self._chapter:\n self.__progress_on_main_thread(self._chapter, self._progress)\n self._chapter = None\n self._progress = None", "def add_log_entry(self, log_entry):\n self.log_entries.append(log_entry)", "def append(self, log: str, action: str):\n self.logs.append(log)\n self.actions.append(action)\n self.debug.update(pip_version=get_pip_version(name=self.name))", "def append(self, items):\n self.__add__(items)", "def send(self):\n for output in self.outputs:\n output.send(self.logger)", "def __writeToFile(self, filePath, lst): \n \n if not self.outDir is None: \n filePath = os.path.join(self.outDir, filePath) \n \n open(filePath,'a').writelines(lst)", "def add_log(self):\n self.stack = []\n diff = self.diff(self.original, self.doc)\n entry = {\"_id\": utils.get_iuid(),\n \"doctype\": constants.DOCTYPE_LOG,\n \"docid\": self.doc[\"_id\"],\n \"diff\": diff,\n \"timestamp\": utils.get_time()}\n self.modify_log_entry(entry)\n if hasattr(flask.g, \"current_user\") and flask.g.current_user:\n entry[\"username\"] = flask.g.current_user[\"username\"]\n else:\n entry[\"username\"] = None\n if flask.has_request_context():\n entry[\"remote_addr\"] = str(flask.request.remote_addr)\n entry[\"user_agent\"] = str(flask.request.user_agent)\n else:\n entry[\"remote_addr\"] = None\n entry[\"user_agent\"] = os.path.basename(sys.argv[0])\n flask.g.db.put(entry)", "def _combine(self, results_list):\n pass", "def log_data(self):\n\n self.check_dir()\n with open(self.log_file, \"a\") as logger_file:\n logger_file.write(\"{}, {}\\n\".format(self.time, self.msg))", "def compose_logfile_lines(start_time, db_format_time, blast_time, option_lines,\r\n formatdb_cmd, blast_results, options, all_ids,\r\n hit_ids, removed_hit_ids,\r\n included_ids, DEBUG):\r\n\r\n log_lines = []\r\n log_lines.append(\"Sequence exclusion analysis run on %s\" % strftime(\"%c\"))\r\n log_lines.append(\r\n \"Formatting subject database took %2.f seconds\" %\r\n (db_format_time))\r\n log_lines.append(\r\n \"BLAST search took %2.f minute(s)\" %\r\n ((blast_time) / 60.0))\r\n log_lines.append(\r\n \"Total analysis completed in %2.f minute(s)\" %\r\n ((time() - start_time) / 60.0))\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Options |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.extend(option_lines)\r\n log_lines.append(\"Subject database formatted with command: %s\"\r\n % formatdb_cmd)\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Results |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.append(\"BLAST results above e-value threshold:\")\r\n log_lines.append(\r\n \"\\t\".join([\"Query id\", \"Subject id\", \"percent identity\", \"alignment length\",\r\n \"mismatches\", \"gap openings\", \"q. start\", \"q. end\", \"s. start\", \"s. end\", \"e-value\", \"bit score\"]))\r\n\r\n for line in blast_results:\r\n if line.startswith(\"#\"):\r\n continue\r\n else:\r\n log_lines.append(line)\r\n\r\n log_lines.append(\r\n \"Hits matching e-value and percent alignment filter: %s\" %\r\n ','.join(sorted(hit_ids)))\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Summary |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.append(\"Input query sequences: %i\" % len(all_ids))\r\n log_lines.append(\r\n \"Query hits from BLAST: %i\" %\r\n (len(hit_ids) + len(removed_hit_ids)))\r\n log_lines.append(\r\n \"Query hits from BLAST lacking minimal percent alignment: %i\" %\r\n len(removed_hit_ids))\r\n log_lines.append(\"Final hits: %i\" % len(hit_ids))\r\n log_lines.append(\"Output screened sequences: %i\" % len(included_ids))\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Output |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.append(\r\n \"Writing excluded sequences (hits matching filters) to: %s\" %\r\n join(options.outputdir, \"matching.fna\"))\r\n log_lines.append(\r\n \"Writing screened sequences (excluding hits matching filters) to: %s\" %\r\n join(options.outputdir, \"non-matching.fna\"))\r\n log_lines.append(\r\n \"Writing raw BLAST results to: %s\" %\r\n join(options.outputdir, 'raw_blast_results.txt'))\r\n\r\n # format for printing\r\n revised_log_lines = []\r\n for line in log_lines:\r\n line = line + \"\\n\"\r\n revised_log_lines.append(line)\r\n\r\n if DEBUG:\r\n for line in log_lines:\r\n print line\r\n\r\n return revised_log_lines", "def log_msg(cls, format_string, format_field_objects=None):\n if cls._logger is None:\n cls._logger = []\n assert isinstance(format_string, str)\n if format_field_objects is None:\n format_field_objects = []\n elif isinstance(format_field_objects, core.Term):\n format_field_objects = [format_field_objects]\n assert isinstance(format_string, collections.abc.Sequence)\n cls._logger.append((format_string, format_field_objects))", "def add_to_queue(self, items):\n\n for i in items:\n self.r.rpush(self.joblist, i)", "def consolidate_messages(self, msg):", "def write_entries(self, entries):\n for entry in entries:\n self.write(entry)", "def add_entries(self, entries):\n\n # If entries is a single entry, put in list for processing below\n if isinstance(entries, str):\n entries = [entries]\n\n for entry in entries:\n #Check if entries already exist\n try:\n self.entries[entry]\n # Entry doesn't already exist\n except KeyError:\n # Validate that entry is either an attribute of owner or in SystemLogEntries\n if not entry in SystemLogEntries and not entry in self.owner.__dict__:\n raise LogError(\"{0} is not an attribute of {1} or in SystemLogEntries\".\n format(entry, self.owner.name))\n # Add entry to self.entries dict\n self.entries[entry] = []\n\n # Entry exists\n else:\n # Issue warning and ignore\n warnings.warn(\"{0} is already an entry in log for {1}; use \\\"log_entry\\\" to add a value\".\n format(entry,self.owner.name))", "def write(self, *items):\n for item in items:\n if type(item) == list:\n for _item in item:\n self.write(_item)\n elif type(item) == str:\n self.include(item)\n else:\n item.write(self)", "def log_batch(self, measurements):\n for m in measurements:\n logger.info(m)\n self.log(metric=m.metric, value=m.value, source=m.source, timestamp=m.timestamp)", "def logs_add_message(self, level, message):\n pass", "def publish_list(self, messages: list) -> None:\n if __debug__:\n logger.warning(\n \"WARN: Unnecessary call on publish on FileDistroStream\"\n )", "def write(self, message):\n\n self.__thread_local_ctx.write_count += 1\n\n try:\n if self.__thread_local_ctx.write_count > 1:\n return\n\n # For each line in the buffer ending with \\n, output that line to\n # the logger\n msgs = (self.buffer + message).split('\\n')\n self.buffer = msgs.pop(-1)\n for m in msgs:\n self.log_orig(m, echo=True)\n finally:\n self.__thread_local_ctx.write_count -= 1", "def __call__(self, message, *tags):\n self.append(message, *tags)", "def log_result(self, result_list, directory=None):\n if not result_list:\n result = \"No cleaning was required\"\n else:\n if self.dry_run:\n file_verb = (\"file\" if len(result_list) == 1 else \"files\") + \" would be\"\n else:\n file_verb = \"file was\" if len(result_list) == 1 else \"files were\"\n directory_verb = f\" to directory {directory}\" if directory else \"\"\n result = f\"{len(result_list)} {file_verb} moved{directory_verb}:\\n{', '.join(result_list)}\"\n print(result)", "def system_log_separation(log_type, log):\n satellite_log = []\n if log_type == \"satellite-log\":\n satellite_log = re.split(r\"(\\[DEBUG \\d*-\\d*-\\d*\\w\\d*:\\d*:\\d* \\w*\\])\", log)\n elif (log_type == \"messages\") or (log_type == \"candelpin-log\"):\n satellite_log = re.split(r\"(\\w{1,3} \\d{1,2} \\d{1,2}:\\d{1,2}:\\d{1,2})\", log)\n if satellite_log:\n satellite_log.pop(0)\n log = Common.combining_alternate_element(satellite_log)\n return log", "def on_check_append_messages_toggled(self, checkBox):\n\t\tself.logView.set_append_messages(checkBox.get_active())\n\t\tself.emit('append-messages-changed')", "def do_handle_log(self, workunit, level, *msg_elements):\r\n content = '<span class=\"%s\">%s</span>' % \\\r\n (HtmlReporter._log_level_css_map[level], self._render_message(*msg_elements))\r\n\r\n # Generate some javascript that appends the content to the workunit's div.\r\n args = {\r\n 'content_id': uuid.uuid4(), # Identifies this content.\r\n 'workunit_id': workunit.id, # The workunit this reporting content belongs to.\r\n 'content': content, # The content to append.\r\n }\r\n s = self._renderer.render_name('append_to_workunit', args)\r\n\r\n # Emit that javascript to the main report body.\r\n self._emit(s)", "def _log_message(self, message):\n\t\tif message not in self._logged_messages:\n\t\t\twith open(self._logfile, \"a\") as f:\n\t\t\t\tf.write(message + \"\\n\")\n\t\tself._logged_messages.append(message)", "def logRecordHandler(self, logrecord):\n logrecords = self._logrecords\n logrecords.append(logrecord)\n if len(logrecords) > self._queuesize:\n logrecords.pop(0)\n self._logRecordsTotal += 1", "def write_to_log(self, log_file, log_data):\n with open(self.gamelogs_path + log_file, 'a') as f:\n writer = csv.writer(f)\n writer.writerow(log_data)\n f.close()", "def handle_log(self, workunit, level, *msg_elements):\r\n if level <= self.settings.log_level:\r\n self.do_handle_log(workunit, level, *msg_elements)", "def flush(self):\n\n # group buffered LogRecords by their source code line and message\n grouping_dict = OrderedDict()\n for record in self.buffer:\n identifying_tuple = (record.module,\n record.lineno,\n getattr(record, 'filename_', None),\n record.getMessage())\n if identifying_tuple not in grouping_dict:\n grouping_dict[identifying_tuple] = []\n grouping_dict[identifying_tuple].append(record)\n\n aggregated_buffer = []\n # for each list of same-message records\n for record_list in grouping_dict.values():\n # make a dict to collect the fields for the aggregate record\n aggregated_field_dict = {}\n # for each field found in (the first of) the records\n for field_name in record_list[0].__dict__:\n # collect the values found for this field across the records.\n # Use the keys of an OrderedDict, as OrderedSet is for some\n # reason not to be found in the Python standard library.\n field_values = OrderedDict((record.__dict__[field_name], None)\n for record in record_list)\n # if this field has the same value in all records\n if len(field_values) == 1:\n # use that value in the new dict\n aggregated_field_dict[field_name] = field_values.popitem()[0]\n else:\n # set a <field>_list field instead\n aggregated_field_dict[field_name + '_list'] = \\\n list(field_values.keys())\n\n # add a new log record with these fields tot the output buffer\n aggregated_buffer.append(\n logging.makeLogRecord(aggregated_field_dict))\n\n # replace the buffer with the aggregated one and flush\n self.buffer = aggregated_buffer\n super(CollapsingLogMessageHandler, self).flush()", "def construct_merged_log_message(url, revnums):\n messages = ['']\n longest_sep = ''\n for r in revnums.sorted():\n message = get_commit_log(url, r)\n if message:\n message = re.sub(r'(\\r\\n|\\r|\\n)', \"\\n\", message)\n message = rstrip(message, \"\\n\") + \"\\n\"\n messages.append(prefix_lines(LOG_LINE_PREFIX, message))\n for match in LOG_SEPARATOR_RE.findall(message):\n sep = match[1]\n if len(sep) > len(longest_sep):\n longest_sep = sep\n\n longest_sep += LOG_SEPARATOR + \"\\n\"\n messages.append('')\n return longest_sep.join(messages)", "def log_error(self, error): \n # add the error to the list\n self.error_log.append(error)", "def do_handle_log(self, workunit, level, *msg_elements):\r\n # If the element is a (msg, detail) pair, we ignore the detail. There's no\r\n # useful way to display it on the console.\r\n elements = [e if isinstance(e, basestring) else e[0] for e in msg_elements]\r\n msg = '\\n' + ''.join(elements)\r\n if self.settings.color:\r\n msg = _maybe_color(msg)\r\n self._emit(msg)", "def addLog(self, userId):\n user = [str(userId), 0]\n self.requestLog.append(user)", "def getLogs():", "def getLogs():", "def log_elements(self, log_elements):\n\n self._log_elements = log_elements", "def write_list(self, data, delimiter=\"\\n\"):\n if self.check_list_exists(data):\n with opened_w_error(self.filename, self.lock, \"a\") as (f, err):\n if err:\n logging.error(\"File '%s'. Error: %s\", self.filename, err)\n else:\n f.write(delimiter.join(data))\n else:\n logging.error(\"Data isn't list or it's not contains elements\")", "def create_file_logs(config, board, tests_to_run, logger):\n combined_list = []\n\n def add_to_combined_list(log, name, combined_list=combined_list):\n for line in log.split(\"\\r\\n\"):\n try:\n if line == \"\":\n continue\n if line.startswith(\"\\n\"):\n line = line[1:]\n if line.startswith(\" [\"):\n line = line[1:]\n ts, text = line.split(\"]\", 1)\n timestamp = float(ts[1:-1])\n else:\n text = line\n timestamp = 0.0\n combined_list.append(\n {\"time\": timestamp, \"text\": str(text), \"name\": name}\n )\n except Exception as error:\n logger.error(error)\n logger.debug(\"Failed to parse log line = %s\" % repr(line))\n\n idx = 1\n console_combined = []\n for console in board.consoles:\n with open(os.path.join(config.output_dir, \"console-%s.log\" % idx), \"w\") as clog:\n clog.write(console.log)\n add_to_combined_list(console.log, \"console-%s\" % idx)\n add_to_combined_list(console.log_calls, \"console-%s\" % idx)\n add_to_combined_list(console.log, \"\", console_combined)\n idx = idx + 1\n\n def write_combined_log(combined_list, fname):\n with open(os.path.join(config.output_dir, fname), \"w\") as clog:\n for e in combined_list:\n try:\n if e[\"name\"] == \"\":\n clog.write(\"[%s]%s\\r\\n\" % (e[\"time\"], repr(e[\"text\"])))\n else:\n clog.write(\n \"%s: [%s] %s\\n\" % (e[\"name\"], e[\"time\"], repr(e[\"text\"]))\n )\n except Exception as error:\n logger.error(error)\n logger.debug(\"failed to parse line: %s\" % repr(e))\n\n import operator\n\n console_combined.sort(key=operator.itemgetter(\"time\"))\n write_combined_log(console_combined, \"console-combined.log\")\n\n for device in config.devices:\n with open(os.path.join(config.output_dir, device + \".log\"), \"w\") as clog:\n d = getattr(config, device)\n if hasattr(d, \"log\"):\n clog.write(d.log)\n add_to_combined_list(d.log, device)\n add_to_combined_list(d.log_calls, device)\n\n for test in tests_to_run:\n if hasattr(test, \"log\") and test.log != \"\":\n with open(\n os.path.join(config.output_dir, \"%s.log\" % test.__class__.__name__), \"w\"\n ) as clog:\n clog.write(test.log)\n if hasattr(test, \"log_calls\"):\n add_to_combined_list(test.log_calls, test.__class__.__name__)\n\n combined_list.sort(key=operator.itemgetter(\"time\"))\n write_combined_log(combined_list, \"all.log\")", "def save(*messages):\n data = Parser.parse_texts(*messages[1:])\n hour = time.strftime(\"_%H_%M_%S\")\n today = time.strftime(\"_%d_%m_%Y\")\n title = Parser.parse_text(messages[0])\n\n file = open(\"./logs/\"+threading.currentThread().getName()+today+\".log\",'a+')\n file.write(\"\\n==\"+title+hour+\"==\\n\")\n if type(data) is dict: #Dictionary with each value being a triplet. From get_all_items\n for key in data.keys():\n file.write(Parser.parse_text(key) + \" -> \"+ Parser.parse_text(str(data[key].x)) +\"\\n\")\n elif type(data) is list: #From get_item, market item, attribute listings\n for listing in data:\n file.write(str(listing.id)+\" - \"+str(listing.price/100)+\" euros\\n\")\n else: #plain text\n file.write(Parser.parse_text(data))\n file.write(\"=====================================\\n\")\n file.close()", "def addEntry(self, entry):\n \n with open(self.current_log, 'ab') as a:\n logAppender = csv.writer(a, delimiter=\"|\")\n logAppender.writerow(entry)", "def _log_added(*args):\n if len(args) > 0:\n fields = \"'\" + \"', '\".join(args) + \"'\"\n LOG.debug(\" Added fields to results: \" + fields)", "def log(self,str_msg):\n\n assert type(str_msg) in [str,list]\n if type(str_msg) is list:\n assert all([type(v) is str for v in str_msg])\n\n self.obj_log.write(m)\n if type(str_msg) is str:\n m = str_msg\n elif type(str_msg) is list:\n m = \"\\n\".join(str_msg)\n else:\n m = \"str_msg must be either be a str or a list of str\"\n raise TypeError(m)\n self.obj_log.write(m)", "def log(level, logger_name, *msgs):\n logger = logging.getLogger(logger_name)\n for msg in msgs:\n if (msg != ''):\n logger.log(level, msg)", "def set_append_messages(self, flag):\n\t\tself.checkAppendMessages.set_active(flag)", "def log(self, reward, action):\n self.logs.append([reward, action])", "def log(self, msg):\n logging.info(\"Logging Message\")\n ml = self.monk_logs\n today = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n ml.insert({today: {\"log\": msg,\n \"sentiment\": self.get_sentiment(msg),\n \"weather\": Weather.get_weather()}})", "def _log2mylog(self, msg):\n time_str = mod_time.strftime(\n \"%Y-%m-%d %H:%M:%S\", mod_time.localtime(mod_time.time())\n )\n msg = str(msg)\n content = \"%s [%s]\\n\" % (time_str, msg)\n fa = open(self.mylogfile, \"a\")\n fa.write(content)\n fa.close()", "def addAll(self, *args):\n pass" ]
[ "0.64969385", "0.6466005", "0.62025017", "0.6156893", "0.6059565", "0.6025947", "0.60147756", "0.5977898", "0.5976505", "0.5960277", "0.59393877", "0.5859047", "0.5806393", "0.57645357", "0.5670112", "0.56171393", "0.5605485", "0.55584806", "0.55563587", "0.5537255", "0.55362606", "0.55350035", "0.5527676", "0.5491615", "0.5473621", "0.54605955", "0.5432735", "0.54314625", "0.5415326", "0.5411068", "0.53654784", "0.5349575", "0.5324341", "0.52691734", "0.5255323", "0.5247512", "0.52412784", "0.5229811", "0.5211115", "0.51928437", "0.51743543", "0.5170199", "0.5166396", "0.5166396", "0.5161187", "0.5148984", "0.51409817", "0.5139346", "0.5131723", "0.5122912", "0.5122272", "0.5119042", "0.5109111", "0.51009804", "0.50953716", "0.5091301", "0.5088717", "0.50867116", "0.5085494", "0.5064002", "0.50617766", "0.5056612", "0.503947", "0.50378525", "0.50217134", "0.5006487", "0.5005284", "0.5003015", "0.5000505", "0.49991706", "0.49896955", "0.4988427", "0.49874824", "0.49862555", "0.49787185", "0.49732995", "0.4970847", "0.49684224", "0.49629992", "0.49618372", "0.49609205", "0.49602613", "0.4959267", "0.4957442", "0.49526122", "0.49494687", "0.49494687", "0.49450654", "0.49402928", "0.49392647", "0.4939189", "0.49390182", "0.493697", "0.49335948", "0.49296424", "0.49217808", "0.49169168", "0.49118596", "0.48985833", "0.48984164" ]
0.5024609
64
Set members based using inventory.
def _configure(self): from .topology import FieldBase Component._configure(self) mapBasis = { "simplex": FieldBase.SIMPLEX_BASIS, "tensor": FieldBase.TENSOR_BASIS, "default": FieldBase.DEFAULT_BASIS, } self.cellBasis = mapBasis[self.inventory.cellBasisStr] mapSpace = { "polynomial": FieldBase.POLYNOMIAL_SPACE, "point": FieldBase.POINT_SPACE, } self.feSpace = mapSpace[self.inventory.feSpaceStr] return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def members(self, members):\n\n self._members = members", "def members(self, members: object):\n\n self._members = members", "def members(self, items):\n pass", "def inventory_items(self, inventory_items):\n\n self._inventory_items = inventory_items", "def inventory(self, inventory):\n\n self._inventory = inventory", "def setHgMembers(self, members):\n self.huntGroup.setHgMembers(members)", "def setHgMembers(self, membersToAdd):\n self.members = membersToAdd", "def update_members_from_preferences(self, **parameters):\n super(Sequence, self).update_members_from_preferences(**parameters)\n\n for i, item in enumerate(self.items):\n para = parameters['item_{}'.format(i)]\n item.update_members_from_preferences(**para)", "def members(self, members: \"List[str]\"):\n self._attrs[\"members\"] = members", "def members(self, members: \"List[str]\"):\n self._attrs[\"members\"] = members", "def members(self, members: \"List[str]\"):\n self._attrs[\"members\"] = members", "def assign_members(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"assign_members\"), kwargs)", "def test_update_inventory(self):\n pass", "def _add_to_inv(self, block_):\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1", "def process_members(self, members):\n seq = (list, tuple, set)\n assert isinstance(members, seq), (f\"The members argument must be one\"\n f\"of '{seq}', found '{members}'.\")\n assert all([isinstance(member, dict) for member in members]), (\n f\"The members object must be a list of dicts, found {members}\")\n assert all([field in self.PROCESS_MEMBERS_FIELDS\n for member in members for field in member.keys()]), (\n f\"Invalid fields in dict, must have these keys \"\n f\"{self.PROCESS_MEMBERS_FIELDS}, members {members}\"\n )\n wanted_user_pks = [item['user'].pk for item in members]\n current_user_pks = [inst.user.pk for inst in self.memberships.all()]\n # Delete unwanted Membership objects.\n rem_user_pks = list(set(current_user_pks) - set(wanted_user_pks))\n self.memberships.select_related('user').filter(\n user__pk__in=rem_user_pks).delete()\n # Add new members.\n add_user_pks = list(set(wanted_user_pks) - set(current_user_pks))\n common_pks = list(set(wanted_user_pks) & set(current_user_pks))\n\n for item in members:\n if item['user'].pk in add_user_pks:\n # Create any new members.\n kwargs = {}\n kwargs['project'] = self\n kwargs['user'] = item['user']\n kwargs['role_text'] = item['role_text']\n obj = Membership(**kwargs)\n obj.save()\n elif item['user'].pk in common_pks:\n # Update any comment members.\n role = Membership.ROLE_MAP_REV[item['role_text']]\n self.memberships.filter(user=item['user']).update(role=role)", "def update_guild_members(name, server):\n url = base_wow + guild+\"/\"+ server+\"/\"+ name+\"?\"+ method + locale + api\n r = requests.get(url)\n data = r.json()\n guilde = data['name']\n for member in data[\"members\"]:\n add_member(guilde, member['character']['name'], member['rank'], member['character']['level'])", "def _configure(self):\n Values._configure(self)\n self.values = [self.inventory.one, self.inventory.two]\n return", "def load_inventory(self):\n for item in self.items:\n self.rooms[int(item.initial_room_id) - 1].inventory.add(item)", "def entityUpdates(self, *args):\n\t\tfor entity in self.members.values():\n\t\t\tentity.update(*args)", "def setUp(self):\n self.new_inv_item = ['1', 'Knife Set', 10, 'n', 'n']\n self.new_furn_item = ['2', 'Couch', 25, 'y', 'Cloth', 'L']\n self.new_elec_item = ['3', 'Dryer', 100, 'n', 'y', 'Samsung', 12]", "def __init__(__self__, *,\n members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if members is not None:\n pulumi.set(__self__, \"members\", members)", "def __init__(self, pos, inventory=None):\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)", "def assign_crew(self, crew):\n\n for key, val in crew.items():\n self.crew_members[key] = val", "def test_setitem(self):\n\n vec = Vec3(4, 5, 6)\n\n # Set the values with __setitem__\n vec[0] = 14\n vec[1] = 15\n vec[2] = 16\n\n # Ensure the values got set.\n self.assertEqual(14, vec[0])\n self.assertEqual(15, vec[1])\n self.assertEqual(16, vec[2])", "def test_update_member(self):\r\n resource = 'member'\r\n cmd = member.UpdateMember(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'myname',\r\n '--tags', 'a', 'b'],\r\n {'name': 'myname', 'tags': ['a', 'b'], })", "def __init__(self, name, location, health):\n self.name = name\n self.location = location\n self.inventory = []\n self.weapon = []\n self.health = health", "def addToInventory(modList, item):\r\n modList.append(item)", "def add_members(self, members):\n self.__add_remove_members(members)", "def __init__(self, pos, inventory=None):\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {} # Changes to the world", "def add_to_inventory(self, item):\n\t\tif item in self.inventory:\n\t\t\tself.inventory[item] += 1\n\t\telse:\n\t\t\tself.inventory[item] = 1", "def _handle_member_chunk(self, members: list):\n if self._chunks_left >= 1:\n # We have a new chunk, so decrement the number left.\n self._chunks_left -= 1\n\n for member_data in members:\n id = int(member_data[\"user\"][\"id\"])\n if id in self._members:\n member_obj = self._members[id]\n else:\n member_obj = dt_member.Member(self._bot, **member_data)\n\n member_obj.nickname = member_data.get(\"nick\", member_obj.nickname)\n member_obj.guild_id = self.id\n\n self._members[member_obj.id] = member_obj", "def test_members_are_set(self):\n\n v = Vec3(5.0, 6.0, 7.0)\n\n self.assertEqual(5.0, v.x)\n self.assertEqual(6.0, v.y)\n self.assertEqual(7.0, v.z)", "def __init__(self, ctx) -> None:\n members = ctx.guild.members\n assert len(members) >= 4, \"Member count must be more than 4\"\n \n generator = randint(0, len(members) - 1)\n self.members = ctx.guild.members[generator:(generator + 4)]\n\n missing = (len(self.members) - 4) * -1\n for i in range(missing):\n self.members.append(members[i])\n del members\n self.ctx = ctx\n self.correct_order = randint(0, 3)\n self.alpha = list(\"ABCD\")", "def memberize(self, accounts, members):\n accounts.connect()\n accounts.verify_connection() # Pre-flight test of member database\n members.check_sanity()\n members.decrypt_and_verify() # Check that the member change document is trustable.\n\n accounts_not_current_members = self.add_or_update_accounts(accounts, members)\n self.make_accounts_non_members(accounts, accounts_not_current_members)", "def update_one_set_inventory(set_num):\n set_inv = reapi.pull_set_inventory(set_num)", "def give_item(self,item):\n self.inv[item.alias] = item.desc", "def member_list(self, member_list):\n\n self._member_list = member_list", "def add_to_inv(self, item):\n for obj in self.inv:\n if obj.name == item.name:\n self.inv[obj] += 1\n break\n else:\n self.inv[item] = 1", "async def _set(self, ctx: commands.Context, to: discord.Member, creds: SetParser):\r\n author = ctx.author\r\n currency = await bank.get_currency_name(ctx.guild)\r\n\r\n try:\r\n if creds.operation == \"deposit\":\r\n await bank.deposit_credits(to, creds.sum)\r\n msg = _(\"{author} added {num} {currency} to {user}'s account.\").format(\r\n author=author.display_name,\r\n num=humanize_number(creds.sum),\r\n currency=currency,\r\n user=to.display_name,\r\n )\r\n elif creds.operation == \"withdraw\":\r\n await bank.withdraw_credits(to, creds.sum)\r\n msg = _(\"{author} removed {num} {currency} from {user}'s account.\").format(\r\n author=author.display_name,\r\n num=humanize_number(creds.sum),\r\n currency=currency,\r\n user=to.display_name,\r\n )\r\n else:\r\n await bank.set_balance(to, creds.sum)\r\n msg = _(\"{author} set {user}'s account balance to {num} {currency}.\").format(\r\n author=author.display_name,\r\n num=humanize_number(creds.sum),\r\n currency=currency,\r\n user=to.display_name,\r\n )\r\n except (ValueError, errors.BalanceTooHigh) as e:\r\n await ctx.send(str(e))\r\n else:\r\n await ctx.send(msg)", "def __setitem__(self, key, val):\n self.members[key] = val\n pair = self.pair\n for i in range(key):\n pair = pair.cdr\n pair.car = val", "def do_put(self, arg):\r\n\r\n # put this value in a more suitably named variable\r\n itemToStore = arg.lower()\r\n\r\n # get a list of all \"description words\" for each item in the inventory\r\n invDescWords = getAllDescWords(inventory)\r\n \r\n # Nice little easter egg :) \r\n if itemToStore == 'troll in bag':\r\n print(bcolors.start + \"You cannot put troll in bag, troll is a creature.\" + bcolors.end)\r\n return\r\n\r\n # find out if the player doesn't have that item\r\n if itemToStore not in invDescWords:\r\n print('You want to put \"%s\" in what?!' % (itemToStore))\r\n return\r\n \r\n\r\n # get the item name that the player's command describes\r\n item = getFirstItemMatchingDesc(itemToStore, inventory)\r\n if item != None:\r\n print('You put %s. in the container.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item) # remove from inventory\r\n worldRooms[location][ITEMINV].append(item) # add to the container\r", "def do_inventory(self, arg):\r\n\r\n if len(inventory) == 0:\r\n print('Inventory:\\n (nothing)')\r\n return\r\n\r\n # first get a count of each distinct item in the inventory\r\n itemCount = {}\r\n for item in inventory:\r\n if item in itemCount.keys():\r\n itemCount[item] += 1\r\n else:\r\n itemCount[item] = 1\r\n\r\n # get a list of inventory items with duplicates removed:\r\n print('Inventory:')\r\n for item in set(inventory):\r\n if itemCount[item] > 1:\r\n print(' %s (%s)' % (item, itemCount[item]))\r\n else:\r\n print(' ' + item)", "def inventory_id(self, inventory_id):\n\n self._inventory_id = inventory_id", "def _addProteins(self, proteinIds, containerNames):\n proteinIds = AUX.toList(proteinIds)\n for containerName in containerNames:\n proteinContainer = getattr(self, containerName)\n proteinContainer.update(proteinIds)", "async def mute(self, ctx: Context, members: commands.Greedy[discord.Member], reason=\"no reason\"):\n\n role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n member_display = []\n\n for i, member in enumerate(members):\n if role in member.roles:\n await ctx.send(f\"guild member `{member.display_name}` is already muted\", delete_after=8)\n del members[i]\n\n if role is None:\n permissions = discord.Permissions()\n permissions.change_nickname = True\n permissions.send_messages = False\n permissions.read_message_history = True\n role = await ctx.guild.create_role(name=\"Muted\", permissions=permissions)\n\n await self.set_perms(ctx.guild, role)\n\n for member in members:\n\n if await self.hiearchy_check(ctx, member):\n continue\n\n member_display.append(str(member))\n await member.add_roles(role, reason=reason)\n\n member_display = \", \".join(member_display)\n\n if not member_display:\n member_display = \"no one\"\n\n await ctx.send(f\"> {ctx.author.name} muted {member_display}\")", "def vitamins(self, vitamins: List[RecipeObjectNutrientsCalories]):\n\n self._vitamins = vitamins", "def set_ingredients(self, ingredients: [Ingredient]):\n self.ingredients = ingredients", "def test_add_item(self):\n self.inv.add_item(self.item_helmet)\n str_inventory = self.inv.pretty\n str_item = self.item_helmet.pretty\n\n self.rebuild_instance()\n str_unequipped = self.inv.unequipped[0].pretty\n\n assert str_inventory == self.inv.pretty\n assert str_item == str_unequipped", "def _update(self):\n path = \"/members/%s\" % self._dict['member_id']\n data = self.extract()\n if self._dict['member_status_id'] in (\n MemberStatus.Active, MemberStatus.Error, MemberStatus.OptOut):\n data['status_to'] = self._dict['member_status_id']\n if not self.account.adapter.put(path, data):\n raise ex.MemberUpdateError()", "def setContents(self, item):\n if item == None:\n self.pot.a(None, 0)\n else:\n self.pot.a(CraftMagicNumbers.getItem(item.getItemType()), item.getData())\n # PAIL: rename", "def populate_initial_inventory(self):\r\n\r\n weapons_file = open('initial-inventory.json', \"r\")\r\n json_data = json.loads(weapons_file.read())\r\n weapons_file.close()\r\n\r\n weapons = json_data['weapons']\r\n for weapon in weapons:\r\n requests.post(\"http://\" + self.ip_address + \":3000/Weapons\", data=weapon)", "async def update(self) -> None:\n data = await self._state.http.get_user_inventory(self.owner.id64, self.game.app_id, self.game.context_id)\n self._update(data)", "def test_setitem(self):\n\n vec = Vec3(4, 5, 6)\n\n # Set the values with .set()\n result = vec.set(7, 8, 9)\n\n # Ensure the values got set.\n self.assertEqual(Vec3(7, 8, 9), vec)\n # Ensure the object was also returned\n self.assertIs(result, vec)", "def change(self, ids, **kwargs):\n args = {}\n for key, value in kwargs.iteritems():\n argument = make_rpc_name(key)\n (arg, val) = argument_value_convert('torrent-set'\n , argument, value, self.rpc_version)\n args[arg] = val\n\n if len(args) > 0:\n self._request('torrent-set', args, ids, True)\n else:\n ValueError(\"No arguments to set\")", "def reset(self):\n self.members = []\n self.membertypes = []\n self.size = 0\n self.maxsize = 2+rollDie(6)\n self.alive = True", "def replenish(self, amount: int):\n self._inventory += amount", "def __init__(self, x, y, name, **kwargs):\n super(Actor, self).__init__(x, y, name, **kwargs)\n self.walkable = False\n self.actions = Actions()\n self._life = None\n self.inventory = Inventory(host=self)\n self.equipment = Equipment(host=self)", "def getitem(self):\n self.inventory += 1", "def get_inventory(self, node):", "def member(self, member: object):\n\n self._member = member", "async def setRoles(self, ctx: Context, person: Member, roles: Greedy[Role]):\n roles = remove_dupe_roles(roles)\n\n await person.edit(roles=roles)\n await ctx.send(f\"Setting {roles_str(person, roles)}\")", "def setup_members(self):\n ### cell\n self.cell_size = 8\n self.cell_row = 80\n self.cell_col = 100\n self.color_alive = \"black\"\n self.color_dead = \"white\"\n\n ### world\n self.init_modes = {} # read modes from json file\n self.init_world = {} # begining status\n self.world = {} # world's map\n # current status of world\n self.world_status = GOL(self.cell_row, self.cell_col)\n self.world_setable = True\n self.world_alive = False\n\n # widgets\n self.toolbar_height = 40\n self.world_size = [self.cell_size * self.cell_row,\n self.cell_size * self.cell_col]\n self.window_size = self.world_size\n self.window_size[0] += self.toolbar_height\n\n # resource\n self.saver_icon = \"save.gif\"\n self.run_icon = \"run.gif\"\n self.pause_icon = \"pause.gif\"\n self.stop_icon = \"stop.gif\"\n self.modes_file = \"gol.json\"\n self.modes_names = []", "def set_keys(self):\n self.inventory_dict['csah'] = {'hosts': '{}'.format(socket.getfqdn()), 'vars': {}}", "def openinv(cls): #THIS DOESN'T NEED TO BE MODIFIED!\n\n while True:\n inventory_items = {thing.id: thing.name for thing in cls.inventory}\n inventory_items[\"exit\"] = \"Exit Inventory\"\n inventory_items[\"newln\"] = \"\"\n inventory_items[\"playername\"] = str(gray('\"{}\"'.format(cls.name)))\n inventory_items[\"lv\"] = str(gray(\"LV: {}\".format(cls.lv)))\n inventory_items[\"hp\"] = str(gray(\"HP: {}/{}\".format(cls.hp, cls.max_hp)))\n inventory_items[\"exp\"] = str(gray(\"EXP: {}/40\".format(cls.exp)))\n\n choice = Menu.menu(\n title = \"Inventory\",\n contents = inventory_items \n )\n if choice == \"exit\":\n Terminal.clear_all()\n return\n while True:\n displayed_item = next((thing for thing in cls.inventory if thing.id == choice), None)\n final_choice = Menu.menu(\n title = displayed_item.name,\n contents = {\n \"interact\":displayed_item.interact_label,\n \"inspect\":\"Inspect\",\n \"drop\":\"Drop\",\n \"back\":\"Back\"\n }\n )\n if final_choice == \"back\":\n break\n if final_choice == \"interact\":\n use = displayed_item.interact()\n Terminal.clear_all()\n print(use[\"message\"])\n if \"heal_\" in use[\"action\"]:\n cls.hp += int(use[\"action\"].replace(\"heal_\", ''))\n if cls.hp > cls.max_hp:\n cls.hp = cls.max_hp\n cls.inventory.remove(displayed_item)\n Game.standard_wait()\n break\n if final_choice == \"inspect\":\n Terminal.clear_all()\n print(displayed_item)\n Game.standard_wait()\n continue\n if final_choice == \"drop\":\n Terminal.clear_all()\n print(\"You dropped the {}\".format(displayed_item.name))\n cls.inventory.remove(displayed_item)\n Game.standard_wait()\n break", "def set_all_from_json(self, value:list):\n self.clear()\n for item in value:\n relation_id = item['relation_id']\n members = item['members']\n self[relation_id] = members", "async def vouch(ctx, *, member_name=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n server = ctx.message.server\n member_roles = ctx.message.author.roles\n member_admin = discord.utils.find(lambda r: r.name.lower() in admin_roles, member_roles)\n if member_admin is not None:\n member = discord.utils.find(lambda c: c.name.lower() == member_name.lower(), server.members)\n roles = member.roles\n new_role = discord.utils.find(lambda r: r.name.lower() == required_role, server.roles)\n roles.append(new_role)\n await amor_manager.replace_roles(member, *roles)\n await amor_manager.say('{0} granted citizenship'.format(member.name))", "def setup(player, level):\n display('f1', 'inventory', player._inventory)\n\n maze(callback=partial(image, 'stone'))\n\n player.keys(right = 'd', left = 'a', up = 'w', down = 's')\n\n # randomly pick a background\n background()\n\n player.take(Punch(call='1'))\n player.take(FlameThrower(call='2'))\n player.take(Grenade(call='3', distance=6, radius=10))\n player.take(MustardGas(call='4', distance=10, radius=20))\n player.take(AirGun(call='space'))\n player.take(MachineGun(call='5', distance=15, repeat=3))\n player.take(Landmine(call='6', delay=1))\n player.take(C4(call='7', detonate='8', distance=8, radius=10))\n player.take(NuclearBomb(call='n'))\n\n player.take(WallBuster())\n #wall = partial(image, 'stone')\n #player.take(WallBuilder(left='left', right='right', front='up', back='down', wall=wall))\n display('f1', 'inventory', player._inventory)\n\n def drink(soda, player):\n soda.destroy()\n player.energy = 10\n fill(partial(image,'sprite', size=1.0), 0.05, player, drink)\n\n def claim(coin, player):\n coin.destroy()\n player.wealth = 5\n fill(partial(image,'coin', size=1.0), 0.25, player, claim)", "def set_attributes(self, argv):\n if argv[0] == \"population\" or argv[0] == \"pop\":\n if argv[1] == \"ttl\":\n self.population[int(argv[2])].ttl = int(argv[3])", "def inventory(self):\n\n #when the item list is 0 , print out having no items \n if len(self.items) == 0:\n \n print('The player has no items')\n\n #if not, print out the item list \n else:\n print(self.items)", "def __init__(self, members):\n require_type(isa(members, list),\n 'the parameter of list must be a list of objects')\n self.members = members\n self.pair = self._list(members)", "def clean_up_inventory(self):\n self.inventory = [i for i in self.inventory if i.quantity != 0]", "def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass", "def place_items(self):\n for item in self.item_kit:\n coords = self.maze.random_coordinates()\n item(coords, self.scale)", "def put(self, request, pool_id):\n # Assemble the lists of member id's to add and remove, if any exist\n request_member_data = request.DATA.get('members', [])\n\n conn = get_sdk_connection(request)\n existing_members = _sdk_object_to_list(\n conn.load_balancer.members(pool_id))\n\n (members_to_add, members_to_delete) = get_members_to_add_remove(\n request_member_data, existing_members)\n\n if members_to_add or members_to_delete:\n kwargs = {'existing_members': existing_members,\n 'members_to_add': members_to_add,\n 'members_to_delete': members_to_delete,\n 'pool_id': pool_id}\n update_member_list(request, **kwargs)", "def getMembers():", "def getMembers():", "def getMembers():", "def getMembers():", "def setValue(self,variable,value):\n for adjective_key in value:\n variable.adjectives[adjective_key].membership = value[adjective_key]\n return None", "def setitems(self, items):\n self.clear()\n # FIXME: this allows you to pass in an OrderedDict as well :-)\n self.update(items)", "def test_list_members(self):\n pass", "def f_set(self, *args, **kwargs):\n if args and self.v_name is None:\n raise AttributeError(\n \"Cannot set positional value because I do not have a name!\"\n )\n for idx, arg in enumerate(args):\n valstr = self.f_translate_key(idx)\n self.f_set_single(valstr, arg)\n\n for key, arg in kwargs.items():\n self.f_set_single(key, arg)", "def test_members(self):\n self.run_function(\"group.add\", [self._group], gid=self._gid)\n self.run_function(\"user.add\", [self._user])\n self.run_function(\"user.add\", [self._user1])\n m = \"{},{}\".format(self._user, self._user1)\n ret = self.run_function(\"group.members\", [self._group, m])\n self.assertTrue(ret)\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertIn(self._user, str(group_info[\"members\"]))\n self.assertIn(self._user1, str(group_info[\"members\"]))", "def test_vault_update_vault_item(self):\n pass", "def __init__(self, location):\n self.name = 'Billy Boy'\n self.spirit = 'Boar'\n self.inventory = []\n self.location = location\n self.actions = {'1': self.search, '2': self.grab, '3': self.gurgle}", "def update(self, **vars):\n for name in vars:\n # Use __setitem__ for all effects\n self[name] = vars[name]", "def interact_with(arguments, player):\n inputted_item = \" \".join(arguments)\n current_loc = world.tile_exists(player.location_x, player.location_y)\n\n inventory_names = []\n for item in player.inventory:\n for name in item.name:\n inventory_names.append(name.lower())\n room_names = []\n for item in current_loc.items:\n for name in item.name:\n room_names.append(name.lower())\n\n # If it's in player inventory\n if inputted_item in inventory_names:\n for i, item in enumerate(player.inventory):\n if inputted_item in [name.lower() for name in item.name]:\n player.inventory[i].interact(player)\n return\n # If it's in the room\n elif inputted_item in room_names:\n for i, item in enumerate(current_loc.items):\n if inputted_item in [name.lower() for name in item.name]:\n current_loc.items[i].interact(player)\n return\n # If it's not in inventory or room\n else: #TODO: POSSIBLE ERROR - WHAT IF THERE'S AN IDENTICALLY NAMED ITEM IN THE INVENTORY AND ROOM?\n print(\"Can't do that\")\n return", "async def test_voice_kick(self):\n await self.cog.cog_load()\n\n # Create a regular member, and one member for each of the moderation roles\n moderation_members = [MockMember(roles=[MockRole(id=role)]) for role in MODERATION_ROLES]\n members = [MockMember(), *moderation_members]\n\n channel = MockVoiceChannel(members=members)\n await self.cog._kick_voice_members(channel)\n\n for member in members:\n if member in moderation_members:\n member.move_to.assert_not_called()\n else:\n self.assertEqual((None,), member.move_to.call_args_list[0].args)", "def update_group_profile_members(instance, sender, **kwargs):\n from geonode.groups.models import GroupProfile, GroupMember\n if not instance.groups:\n return\n\n if instance == instance.get_anonymous():\n # The invited user cannot be anonymous\n return\n\n member_joined = []\n\n for user_group in instance.groups.all():\n try:\n group = GroupProfile.objects.get(group=user_group)\n member, created = GroupMember.objects.get_or_create(\n group=group,\n user=instance)\n # Give member role as default\n if not member.role:\n member.role = GroupMember.MEMBER\n member.save()\n member_joined.append(member)\n except GroupProfile.DoesNotExist:\n continue\n\n for group_member in GroupMember.objects.filter(user=instance):\n if group_member not in member_joined:\n group_member.delete()", "def __setitem__(self, idx, fragments):\n # handle case if hsps is a list of hsp\n if isinstance(fragments, (list, tuple)):\n for fragment in fragments:\n self._validate_fragment(fragment)\n else:\n self._validate_fragment(fragments)\n\n self._items[idx] = fragments", "def __setitem__(self, item, value):\r\n debug.write(\"[SourceRPG] Assigning attribute %s with the value of %s to player %s\" % (item, value, self.name), 3)\r\n if item in self.currentAttributes:\r\n debug.write(\"Value is in current attributes, assign to the currentAttributes dict\", 4)\r\n self.currentAttributes[item] = value\r\n elif item in self.currentSkills or item in skills:\r\n debug.write(\"Value is in skills, assign to the currentSkills dict\", 4)\r\n self.currentSkills[item] = value\r\n else:\r\n debug.write(\"Value is not in any dictionary, assign to the custom playerAttributes dict\", 4)\r\n self.playerAttributes[item] = value\r\n debug.write(\"[SourceRPG] Value updated\", 3)", "def update(self, *args, **kwargs):\n sqrlist = [\"id\", \"size\", \"x\", \"y\"]\n if args and len(args) != 0:\n for i in range(len(sqrlist)):\n if i < len(args):\n # call to setter method\n setattr(self, sqrlist[i], args[i])\n else:\n if kwargs and len(kwargs) != 0:\n for k in sqrlist:\n for key, value in kwargs.items():\n if k == key:\n setattr(self, key, value)", "def add_vars(inventory):\n etc_hosts = format_hosts_dict(inventory)\n inventory.setdefault('all', dict()).\\\n setdefault('vars', dict()).\\\n setdefault('prometheus', dict())['etc_hosts'] = etc_hosts\n inventory['all']['vars'].\\\n setdefault('grafana', dict())['admin_password'] = 'admin'\n return inventory", "def setArmor(self, armor):\n self.av = armor", "def part(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)", "def turn_on_member_workspaces(context):\n portal = context.getSite()\n \n if \"Members\" in portal.objectIds():\n portal.manage_delObjects(ids=[\"Members\"])\n\n if \"membership\" not in portal.objectIds(): \n # create members container folder and set default properties\n pt = portal[\"portal_types\"]\n members = portal[portal.invokeFactory(\"Folder\",id=\"membership\")]\n members.setTitle(\"membership\")\n members.setDescription(\"Member workspaces container.\")\n members._getWorkflowTool().doActionFor(members, \"publish\" \"\")\n members.setExcludeFromNav(True) \n members.reindexObject() \n \n # set members folder\n pm = portal['portal_membership']\n pm.memberareaCreationFlag = 1\n pm.setMembersFolderById('membership') \n logger.info(\"Members container created.\")", "def _set_boxes(self, listOfBoxes):\n self._boxes = listOfBoxes", "def set(self, **kwargs):\n field_names = self.get_field_names()\n for name, value in kwargs.iteritems():\n if name in field_names:\n setattr(self, name, value)", "def update_members_from_preferences(self, **parameters):\n super(RootSequence, self).update_members_from_preferences(**parameters)\n\n para = parameters['context']\n self.context.update_members_from_preferences(**para)", "def set_group(self, name, members):\n if name in self._groups:\n return self._groups[name].add_members(members)\n self._groups[name] = BUIaclGroup(name, members)\n return self._groups[name].members", "def modify_membership(self, gfd_info):\n for member, status in gfd_info.items():\n if status:\n if member not in self.membership:\n self.membership.append(member)\n else:\n self.membership.remove(member)\n\n # Send change_replica_ips request to the client \n self.send_replica_IPs()\n\n # Elect a new primary if running on passive mode.\n if self.mode == 'passive':\n if member == self.primary:\n self.pick_primary()\n print(\"\\n The current membership is :\")\n print(self.membership)\n \n return" ]
[ "0.6458913", "0.6443288", "0.64391935", "0.63899845", "0.6246272", "0.6177047", "0.6176741", "0.6058151", "0.6013703", "0.6013703", "0.6013703", "0.58846337", "0.56970346", "0.56590396", "0.5608252", "0.5567245", "0.5558885", "0.55403244", "0.5475591", "0.54485273", "0.5444364", "0.5387036", "0.53305507", "0.53223455", "0.5307633", "0.52854764", "0.5273208", "0.5267437", "0.5257615", "0.5209839", "0.5195018", "0.5188799", "0.5158636", "0.5146021", "0.51269186", "0.5123682", "0.51232606", "0.51014245", "0.50936353", "0.5090786", "0.5089652", "0.5081388", "0.5050235", "0.50294966", "0.50218016", "0.5014207", "0.5007251", "0.49886972", "0.4981144", "0.49798748", "0.49747995", "0.49703857", "0.49701723", "0.4948762", "0.4944046", "0.4942789", "0.49315596", "0.492222", "0.4915328", "0.49152735", "0.49098918", "0.49094158", "0.49067584", "0.48956946", "0.48676944", "0.4865641", "0.48496237", "0.48275685", "0.4816645", "0.48090217", "0.48088023", "0.4805804", "0.47937638", "0.47932294", "0.47913226", "0.47913226", "0.47913226", "0.47913226", "0.47901654", "0.47860634", "0.47857228", "0.4781859", "0.4779227", "0.47744387", "0.47724196", "0.4770983", "0.4770931", "0.47663936", "0.47646233", "0.47620368", "0.47587222", "0.47570416", "0.47550118", "0.4752042", "0.4749718", "0.4748394", "0.47471428", "0.47445124", "0.47431698", "0.47421157", "0.47356918" ]
0.0
-1
Factory for subfield items.
def subfieldFactory(name): from pythia.pyre.inventory import facility return facility(name, family="subfield", factory=Subfield)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subfield():\n return Subfield()", "def __init__(self, *args, **kwargs):\n super(ListFieldType, self).__init__(*args, **kwargs)\n\n self.item_info = self.field_info.get('items')", "def create_subspecialty(sub_data):\n return get_or_create_object(sub_data, Subspecialty)", "def test_customWidgetFactory(self):\n\n value_type = TextLine(__name__='bar')\n self.field = List(__name__='foo', value_type=value_type)\n request = TestRequest()\n\n # set up the custom widget factory and verify that it works\n sw = CustomWidgetFactory(ListSequenceWidget)\n widget = sw(self.field, request)\n assert widget.subwidget is None\n assert widget.context.value_type is value_type\n\n # set up a variant that specifies the subwidget to use and verify it\n class PollOption:\n pass\n ow = CustomWidgetFactory(ObjectWidget, PollOption)\n sw = CustomWidgetFactory(ListSequenceWidget, subwidget=ow)\n widget = sw(self.field, request)\n assert widget.subwidget is ow\n assert widget.context.value_type is value_type", "def GetSubfieldDef(fielddef):\n\n format_, addrdef, datadef, arraydef, validate, cmd, converter = GetFieldDef(fielddef, fields='format_, addrdef, datadef, arraydef, validate, cmd, converter')\n\n # create new arraydef\n if len(arraydef) > 1:\n arraydef = arraydef[1:]\n else:\n arraydef = None\n\n # create new datadef\n if isinstance(datadef, tuple):\n if cmd is not None:\n datadef = (arraydef, validate, cmd)\n else:\n datadef = (arraydef, validate)\n else:\n datadef = arraydef\n\n # set new field def\n subfielddef = None\n if converter is not None:\n subfielddef = (format_, addrdef, datadef, converter)\n else:\n subfielddef = (format_, addrdef, datadef)\n\n return subfielddef", "def add_sub_factories(self) -> None:\n for field in get_model_fields(self.model, base=False, foreign=True, m2m=False):\n if not hasattr(self.factory, field.name):\n factory_name = self._get_factory_name_for_model(field.related_model)\n if field.related_model == self.model:\n _factory = SelfFactory(factory=self.factory, required=not field.null)\n else:\n _factory = SubFactory(\n factory=factory_name,\n required=not field.null,\n related_model=field.related_model\n )\n setattr(self.factory, field.name, _factory)", "def txnSubCollectionFactory(txnSubCollection, txn):\n subCollection = txnSubCollection.cloneMetaData()\n subCollection.append(txn)\n return subCollection", "def multivalue_field_factory(field_class):\n class NewField(field_class):\n widget = forms.SelectMultiple\n\n def to_python(self, value):\n if not value:\n return []\n return [\n # Only append non-empty values (this avoids e.g. trying to cast '' as an integer)\n super(field_class, self).to_python(v) for v in value if v\n ]\n\n return type('MultiValue{}'.format(field_class.__name__), (NewField,), dict())", "def test_subwidget(self):\n self.field = List(__name__='foo',\n value_type=TextLine(__name__='bar'))\n request = TestRequest()\n\n class PollOption:\n pass\n ow = CustomWidgetFactory(ObjectWidget, PollOption)\n widget = SequenceWidget(\n self.field, self.field.value_type, request, subwidget=ow)\n assert widget.subwidget is ow", "def make_instance(self, include_optional):\n # model = rcc.models.crf_item_definition.CRFItemDefinition() # noqa: E501\n if include_optional :\n return CRFItemDefinition(\n item_document_file = rcc.models.file_base64.FileBase64(\n value = '0', \n file_name = '0', \n content_type = '0', \n file_size = 56, ), \n item_data_type = '0', \n measurement_unit_name = '0', \n variable_name = '0', \n label = '0', \n label_plain_text = '0', \n phi_status = True, \n left_alignment = True, \n rc_oid = '0', \n field_width = 56, \n info_text = '0', \n min_value = '0', \n max_value = '0', \n show_validator = True, \n soft_validation = True, \n calc_field_equation = '0', \n custom_info1 = '0', \n custom_info2 = '0', \n warning_when_left_empty = '0', \n stratification_variable = True, \n study_dictionary = '0', \n default_value = '0', \n subject_group = True, \n creation_source = '0', \n promis_oid = '0', \n promis_final_score = True, \n item_fhir_metadata = '0', \n required_query_description = '0', \n crfitem_metadata = rcc.models.crf_item_metadata.CRFItemMetadata(\n item_metadata_oid = '0', \n column_number = 56, \n page_number_label = '0', \n question_number_label = '0', \n left_item_text = '0', \n right_item_text = '0', \n regexp = '0', \n regexp_error_msg = '0', \n ordinal = 56, \n required = True, \n response_layout = '0', \n width_decimal = '0', \n show_item = True, \n code_ref = '0', \n group_oid = '0', \n is_required = True, \n disp_sequence = 56, \n branching_equation = '0', \n crf_version_oid = '0', \n hide_from_survey = True, \n position_row = 56, \n position_column = 56, \n item_data_type = '0', \n measurement_unit_name = '0', \n variable_name = '0', \n label = '0', \n label_plain_text = '0', \n phi_status = True, \n left_alignment = True, \n field_width = 56, \n info_text = '0', \n min_value = '0', \n max_value = '0', \n show_validator = True, \n soft_validation = True, \n calc_field_equation = '0', \n custom_info1 = '0', \n custom_info2 = '0', \n stratification_variable = True, \n show_response_set_value_too = True, \n study_dictionary = '0', \n default_value = '0', \n subject_group = True, \n required_query_description = '0', \n warning_when_left_empty = '0', \n dynamic_list_rs_values_eq = '0', \n dynamic_list_type = '0', \n dynamic_list_no_duplicates = True, \n used_in_dys_fields = True, \n econsent_signature = True, ), \n crfitem_metadata_group = rcc.models.crf_item_metadata_group.CRFItemMetadataGroup(\n crfitem_metadata = [\n rcc.models.crf_item_metadata.CRFItemMetadata(\n item_metadata_oid = '0', \n column_number = 56, \n page_number_label = '0', \n question_number_label = '0', \n left_item_text = '0', \n right_item_text = '0', \n regexp = '0', \n regexp_error_msg = '0', \n ordinal = 56, \n required = True, \n response_layout = '0', \n width_decimal = '0', \n show_item = True, \n code_ref = '0', \n group_oid = '0', \n is_required = True, \n disp_sequence = 56, \n branching_equation = '0', \n crf_version_oid = '0', \n hide_from_survey = True, \n position_row = 56, \n position_column = 56, \n item_data_type = '0', \n measurement_unit_name = '0', \n variable_name = '0', \n label = '0', \n label_plain_text = '0', \n phi_status = True, \n left_alignment = True, \n field_width = 56, \n info_text = '0', \n min_value = '0', \n max_value = '0', \n show_validator = True, \n soft_validation = True, \n calc_field_equation = '0', \n custom_info1 = '0', \n custom_info2 = '0', \n stratification_variable = True, \n show_response_set_value_too = True, \n study_dictionary = '0', \n default_value = '0', \n subject_group = True, \n required_query_description = '0', \n warning_when_left_empty = '0', \n dynamic_list_rs_values_eq = '0', \n dynamic_list_type = '0', \n dynamic_list_no_duplicates = True, \n used_in_dys_fields = True, \n econsent_signature = True, )\n ], )\n )\n else :\n return CRFItemDefinition(\n item_document_file = rcc.models.file_base64.FileBase64(\n value = '0', \n file_name = '0', \n content_type = '0', \n file_size = 56, ),\n )", "def subtable(\n field: str,\n table: Type[ModelledTable],\n subfield: Optional[str] = None,\n pivot: Optional[str] = None,\n selectors: Optional[Dict[str, PrimitiveTypes]] = None,\n) -> Callable[[Type[SecondTable]], Type[SecondTable]]:\n\n if not subfield:\n subfield = field\n\n if not selectors:\n selectors = dict()\n\n sub: SubTable[ModelledTable] = SubTable(table, subfield, pivot, selectors)\n\n def _subtable(cls: Type[SecondTable]) -> Type[SecondTable]:\n \"\"\"Adds a subtable key to a Table\"\"\"\n\n if not issubclass(cls, Table):\n raise Exception(f\"{cls.__name__} is not a sub class of Table\")\n\n subtables: Dict[str, SubTable[ModelledTable]] = getattr(cls, _SUBTABLES, {})\n subtables[field] = sub\n setattr(cls, _SUBTABLES, subtables)\n\n return cls\n\n return _subtable", "def extend_or_add_fields(cls, subfields, dbmanager, flag_mixin_atroot, propname, proplabel):\n import mdbmodel_fieldset\n if (flag_mixin_atroot):\n # prepare extra fields that will be added at root; this doesnt actually create any prerequisites\n cls.extend_fields(subfields)\n else:\n # add a special sub table that will contain some fields, using a helper class object attached to us\n # create (AND REGISTER) the new helper object\n backrefname = cls.get_dbtablename_pure()\n mdbmodel_fieldset.MewloDbFieldset.make_fieldset_dbobjectclass(cls, propname, proplabel, backrefname, dbmanager, subfields)", "def __getitem__(self, item: dict) -> 'Field':\n raise NotImplementedError(self)", "def _subtable(cls: Type[SecondTable]) -> Type[SecondTable]:\n\n if not issubclass(cls, Table):\n raise Exception(f\"{cls.__name__} is not a sub class of Table\")\n\n subtables: Dict[str, SubTable[ModelledTable]] = getattr(cls, _SUBTABLES, {})\n subtables[field] = sub\n setattr(cls, _SUBTABLES, subtables)\n\n return cls", "def Item(self) -> object:", "def Item(self) -> object:", "def _make_subset(cls, name, data, **kwargs):\r\n return cls(name, data, **kwargs)", "def _create_item(self, item_id: str, data: dict) -> Pipeline:\n return Pipeline(id=item_id, **data)", "def NewItems(self) -> _n_1_t_7:", "def _video(self, parent, group):\r\n return ItemFactory.create(\r\n parent_location=parent.location,\r\n category=\"video\",\r\n display_name=\"Group {} Sees This Video\".format(group),\r\n )", "def _make_item(self, parent_item: FolderTreeItem, folder: dict, level=0) -> FolderTreeItem:\n for sub_folder in folder['folders']:\n sub_folder_item = FolderTreeItem(Folder(sub_folder), parent_item)\n item = self._make_item(sub_folder_item, sub_folder, level + 1)\n parent_item.append_child(item)\n for query in folder['queries']:\n item = QueryTreeItem(Query(query), parent_item)\n parent_item.append_child(item)\n return parent_item", "def build_schema(self, spec, **kwargs):\n item_body = spec['items']\n item_builder = self.builder_provider.get_builder(item_body['type'])\n return fields.List(item_builder.build_schema(item_body), **self.translate_args(spec, **kwargs))", "def __init__(self, parent_=None, instance_name_=None, **values):\n self.__parent = parent_\n self.__instance_name = instance_name_\n\n self._factories = {}\n\n for name, field in self._get_fields().items():\n if isinstance(field, fields.Factory):\n # for factory fields, we need to create a new factory with the given factory_type\n value = field.factory_type(field.type, name_=name, parent_instance_=self)\n self._factories[name] = value\n else:\n value = values.get(name, field.from_raw(field.default))\n\n # accept raw as a default value\n # and set inner value, so it should be availale from the start\n setattr(self, f\"__{name}\", value)", "def item_subadres_adapter(obj, request):\n return {\n 'id': obj.id,\n 'subadres': obj.subadres,\n 'postadres': obj.postadres,\n 'status': {\n 'id': obj.status.id,\n 'naam': obj.status.naam,\n 'definitie': obj.status.definitie\n },\n 'aard': {\n 'id': obj.aard.id,\n 'naam': obj.aard.naam,\n 'definitie': obj.aard.definitie\n },\n 'metadata': {\n 'begin_tijd': obj.metadata.begin_tijd,\n 'begin_datum': obj.metadata.begin_datum,\n 'begin_bewerking': {\n 'id': obj.metadata.begin_bewerking.id,\n 'naam': obj.metadata.begin_bewerking.naam,\n 'definitie': obj.metadata.begin_bewerking.definitie\n },\n 'begin_organisatie': {\n 'id': obj.metadata.begin_organisatie.id,\n 'naam': obj.metadata.begin_organisatie.naam,\n 'definitie': obj.metadata.begin_organisatie.definitie\n }\n }\n }", "def __init__(self, item_data):\n self.order_item_id = item_data['OrderItemRowId']\n self.quantity = item_data['Quantity']\n self.stock_id = item_data['pkStockItemId']\n self.sku = item_data['SKU']\n self.title = item_data['ItemTitle']\n self.despatch_unit_cost = item_data['DespatchUnitCost']\n self.cost_ex_tax = item_data['CostExTax']\n self.cost_inc_tax = item_data['CostIncTax']\n self.per_unit_inc_tax = item_data['PerUnitIncTax']\n self.per_unit_ex_tax = item_data['PerUnitExTax']\n self.tax_rate = item_data['TaxRate']\n self.total_tax = item_data['TotalTax']\n self.line_discount = item_data['LineDiscount']\n self.tax_cost_inclusive = item_data['TaxCostInclusive']\n self.note = item_data['Note']\n self.parent_item_id = item_data['ParentItemRowId']\n self.has_children = item_data['HasChildren']\n self.child_items = item_data['ChildItems']\n self.has_options = item_data['HasOptions']\n self.options = item_data['Options']", "def create_sub_time_series_one_item(sub_data: pandas.core.frame.DataFrame, item: str, store: str):\n ## create sub dataset\n sub_data = sub_data[sub_data['SKU'] == item]\n sub_data = sub_data[sub_data['Store'] == store]\n sub_data = sub_data.sort_values(by=\"Date\")\n return sub_data", "def make(self, item):\n self.name = item.get(\"name\", \"\")\n self.description = item.get(\"description\", \"\")\n self.type = item.get(\"type\", \"filler\")\n if not isinstance(self.type, str) or self.type is None:\n self.usable = NotUsable\n elif len(self.type) > 1:\n self.set_usable(self.type)\n else:\n self.usable = NotUsable", "def make_field(field):\n\n if \"time\" in field:\n return TimeField(field)\n if \"zd\" in field:\n return RadianField(field)\n else:\n return SimpleField(field)", "def _create_item(self, parent_location, category, display_name, **kwargs):\n return ItemFactory.create(\n parent_location=parent_location,\n category=category,\n display_name=display_name,\n publish_item=False,\n user_id=self.user.id,\n **kwargs\n )", "def _create_item(self, category, name, data, metadata, parent_category, parent_name, draft=True, split=True):\r\n location = self.old_course_key.make_usage_key(category, name)\r\n if not draft or category in DIRECT_ONLY_CATEGORIES:\r\n mongo = self.old_mongo\r\n else:\r\n mongo = self.draft_mongo\r\n mongo.create_and_save_xmodule(location, data, metadata, self.runtime)\r\n if isinstance(data, basestring):\r\n fields = {'data': data}\r\n else:\r\n fields = data.copy()\r\n fields.update(metadata)\r\n if parent_name:\r\n # add child to parent in mongo\r\n parent_location = self.old_course_key.make_usage_key(parent_category, parent_name)\r\n if not draft or parent_category in DIRECT_ONLY_CATEGORIES:\r\n mongo = self.old_mongo\r\n else:\r\n mongo = self.draft_mongo\r\n parent = mongo.get_item(parent_location)\r\n parent.children.append(location)\r\n mongo.update_item(parent, self.userid)\r\n # create pointer for split\r\n course_or_parent_locator = BlockUsageLocator(\r\n course_key=self.split_course_key,\r\n block_type=parent_category,\r\n block_id=parent_name\r\n )\r\n else:\r\n course_or_parent_locator = self.split_course_key\r\n if split:\r\n self.split_mongo.create_item(course_or_parent_locator, category, self.userid, block_id=name, fields=fields)", "def _deserialize_item(self, data: dict) -> Pipeline:\n return Pipeline(**data)", "def test_item_factory(self):\r\n course = CourseFactory.create()\r\n item = ItemFactory.create(parent_location=course.location)\r\n self.assertIsInstance(item, SequenceDescriptor)", "def __init__(self, items):\n self.items = items", "def __init__(self):\n self._data = PositionalList() # list of Item instances", "def make_fields_of_study(\n *,\n n_fields_of_study_per_level: int,\n faker: Faker,\n n_levels: int = 6,\n min_title_length: int = 1,\n max_title_length: int = 3,\n) -> FieldOfStudyList:\n\n fields_of_study = []\n fos_id_ = 0\n for level in range(n_levels):\n for _ in range(n_fields_of_study_per_level):\n n_words_ = random.randint(min_title_length, max_title_length)\n name_ = faker.sentence(nb_words=n_words_)\n fos_ = FieldOfStudy(fos_id_, name=name_, level=level)\n fields_of_study.append(fos_)\n fos_id_ += 1\n\n return fields_of_study", "def test_create_item(self):\n item = self.item\n\n self.assertTrue(isinstance(item, Item))\n self.assertEqual(item.name, \"Test Item\")", "def __init__(self, item_name):\r\n \r\n self.name = item_name\r\n self.description = \"\"", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]", "def _generate_items_for_subtask(item_queryset, item_fields, total_num_items, total_num_subtasks, items_per_query, items_per_task):\r\n num_queries = int(math.ceil(float(total_num_items) / float(items_per_query)))\r\n last_pk = item_queryset.order_by('pk')[0].pk - 1\r\n num_items_queued = 0\r\n available_num_subtasks = total_num_subtasks\r\n all_item_fields = list(item_fields)\r\n all_item_fields.append('pk')\r\n\r\n for query_number in range(num_queries):\r\n # In case total_num_items has increased since it was initially calculated\r\n # include all remaining items in last query.\r\n item_sublist = item_queryset.order_by('pk').filter(pk__gt=last_pk).values(*all_item_fields)\r\n if query_number < num_queries - 1:\r\n item_sublist = list(item_sublist[:items_per_query])\r\n else:\r\n item_sublist = list(item_sublist)\r\n\r\n last_pk = item_sublist[-1]['pk']\r\n num_items_this_query = len(item_sublist)\r\n\r\n # In case total_num_items has increased since it was initially calculated just distribute the extra\r\n # items among the available subtasks.\r\n num_tasks_this_query = min(available_num_subtasks, int(math.ceil(float(num_items_this_query) / float(items_per_task))))\r\n available_num_subtasks -= num_tasks_this_query\r\n\r\n chunk = int(math.ceil(float(num_items_this_query) / float(num_tasks_this_query)))\r\n for i in range(num_tasks_this_query):\r\n items_for_task = item_sublist[i * chunk:i * chunk + chunk]\r\n yield items_for_task\r\n\r\n num_items_queued += num_items_this_query\r\n\r\n # Because queueing does not happen in one transaction the number of items in the queryset may change\r\n # from the initial count. For example if the queryset is of the CourseEnrollment model students may\r\n # enroll or unenroll while queueing is in progress. The purpose of the original count is to estimate the\r\n # number of subtasks needed to perform the requested task.\r\n if num_items_queued != total_num_items:\r\n TASK_LOG.info(\"Number of items generated by chunking %s not equal to original total %s\", num_items_queued, total_num_items)", "def __getitem__(self, item):\n self._check_iterable()\n return FixtureRefItem(self, item)", "def make_item_body(self, item):\n raise NotImplementedError", "def init_items(self):\r\n raise NotImplementedError()", "def defineDownItemFieldset(dash_instance):\n\t\n\tdown_nodes = getDownNodes()\n\tdown_groups = getDownGroups()\n\tdown_nodes = [node['Caption'] for node in down_nodes]\n\tdown_groups = [group['Name'] for group in down_groups]\n\t\n\tnode_mappings = resolveEntitiesToUrls(down_nodes, \"node\")\n\tgroup_mappings = resolveEntitiesToUrls(down_groups, \"group\")\n\t\n\t# Create Navigation pane for both nodes and groups\n\tdown_nodes_nav_label = html.Label(\"Down Nodes\", style=dict(margin=\"25px auto\", fontWeight=\"bold\", textDecoration=\"underline\", fontSize=24))\n\tdown_nodes_nav_list = html.Ul(\n\t\tchildren=[html.Li(html.A(key, href=value, target=\"_blank\")) for key, value, in node_mappings.items()])\n\tdown_groups_nav_label = html.Label(\"Down Groups\", style=dict(margin=\"25px auto\", fontWeight=\"bold\", textDecoration=\"underline\", fontSize=24))\n\tdown_groups_nav_list = html.Ul(\n\t\tchildren=[html.Li(html.A(key, href=value, target=\"_blank\", style=dict(wordBreak=\"break-all\"))) for key, value, in group_mappings.items()])\n\t\n\t# Create containers for nav menus\n\tdown_nodes_nav = html.Div(\n\t\tchildren=[down_nodes_nav_label, down_nodes_nav_list],\n\t\tstyle=dict(display=\"inline-block\", textAlign=\"center\", float=\"left\", marginRight=\"50px\"))\n\tdown_groups_nav = html.Div(\n\t\tchildren=[down_groups_nav_label, down_groups_nav_list],\n\t\tstyle=dict(display=\"inline-block\", textAlign=\"center\", float=\"left\", width=\"200px\"))\n\tdown_nav = html.Div(\n\t\tchildren=[down_nodes_nav, down_groups_nav],\n\t\tstyle=dict(maxWidth=\"600px\", margin=\"0 auto\"))\n\t\n\t# Add the containers to a fieldset\n\tfield_legend = html.Legend(\n\t\t\t\t\t\t\"Down Items\", \n\t\t\t\t\t\tstyle=dict(fontSize=36, fontWeight=\"bold\", position=\"relative\", top=\"8px\"))\n\tfieldset = html.Fieldset(\n\t\t\t\t\t[field_legend, down_nav], \n\t\t\t\t\ttitle=\"Down Items\",\n\t\t\t\t\tstyle=dict(width=\"60%\", backgroundColor=\"white\", border=\"1px solid black\", marginLeft=\"auto\", marginRight=\"auto\"))\n\treturn fieldset", "def customize_fields(self, fields):\n\n for field in fields.values():\n\n field_type = type(field.field)\n\n if field_type is List or field_type is Set:\n field.widgetFactory = CheckBoxFieldWidget\n\n elif field_type is Choice:\n field.widgetFactory = RadioFieldWidget", "def __init__(self):\n # Item Dictionary (key: call number, value: Item object)\n self.item_list = {}\n\n # Add some items manually for testing purposes.\n book1 = Book(\"In praise of Idleness\", \"B-1\", 3, \"bertrand russell\")\n book2 = Book(\"Breaking the Code\", \"B-2\", 1, \"Pat Matter\")\n dvd = DVD(\"Breaking Bad\", \"D-1\", 2, \"2019-01-05\", \"CA\")\n self._add_item_by_item(book1)\n self._add_item_by_item(book2)\n self._add_item_by_item(dvd)", "def FromJSONData(self, data) -> AbstractItemList:\n ret = self.NewList()\n ret._name = data['name']\n ret._desc = data['desc']\n return ret", "def create(self, validated_data):\n variants_data = validated_data.pop('variants')\n item = Item.objects.create(**validated_data)\n\n # loop through the variant data and create a variant.\n for variant_data in variants_data:\n properties_data = variant_data.pop('properties')\n variant = Variant.objects.create(item=item, **variant_data)\n variant.last_modified_by = item.last_modified_by\n variant.save()\n\n # loop through the property for a variant and create.\n for property_data in properties_data:\n property = Property.objects.create(variant=variant,\n **property_data)\n property.last_modified_by = item.last_modified_by\n property.save()\n\n return item", "def __init__(self):\n self._data = PositionalList() # list of _Item instances", "def __init__(self, item_type=None):\n super(List, self).__init__()\n self.item_type = item_type or Type()", "def __new__(subtype, input):\n # input should be a record array\n if input.dtype.subdtype is None:\n self = np.recarray.__new__(\n subtype, input.shape, input.dtype, buf=input.data\n )\n else:\n self = np.recarray.__new__(\n subtype, input.shape, input.dtype, buf=input.data, strides=input.strides\n )\n\n self._init()\n if self.dtype.fields:\n self._nfields = len(self.dtype.fields)\n\n return self", "def meta_files_from_item(cls, rel_item_path: pl.Path) -> tt.PathGen:\n pass", "def create_item(project: str, category: str, iso3: str, popyear: str,\n metadatas: List[Any]) -> Union[Item, None]:\n\n # Get the specific metadata for a popyear\n metadata_popyear = [m for m in metadatas if m[\"popyear\"] == popyear]\n if len(metadata_popyear) == 0:\n print(f\"No metadata found for {project}/{category}/{iso3}/{popyear}\")\n return None\n metadata = metadata_popyear[0]\n\n # Get raster metadata\n # Use FTP server because HTTPS server doesn't work with rasterio.open\n tif_url = metadata[\"files\"][0].replace(\"https://data\", \"ftp://ftp\")\n with rasterio.open(tif_url) as src:\n bbox = src.bounds\n shape = src.shape\n transform = src.transform\n wkt = src.crs.wkt\n epsg = src.meta[\"crs\"].to_epsg()\n nodata = src.nodata\n dtype = src.dtypes[0]\n\n # Create bbox and geometry\n if epsg != WORLDPOP_EPSG:\n raise AssertionError(\n f\"Expecting EPSG={WORLDPOP_EPSG} but got EPSG={epsg} for {project}/{category}\"\n )\n polygon = box(*bbox, ccw=True)\n coordinates = [list(i) for i in list(polygon.exterior.coords)]\n geometry = {\"type\": \"Polygon\", \"coordinates\": [coordinates]}\n\n # Item properties\n properties = {\n \"title\": metadata[\"title\"],\n \"description\": metadata[\"desc\"],\n \"start_datetime\": f\"{popyear}-01-01T00:00:00Z\",\n \"end_datetime\": f\"{popyear}-12-31T00:00:00Z\",\n \"gsd\": COLLECTIONS_METADATA[project][category][\"gsd\"],\n }\n\n # Create item\n item = Item(\n id=f\"{iso3}_{popyear}\",\n geometry=geometry,\n bbox=bbox,\n datetime=str_to_datetime(f\"{popyear}, 1, 1\"),\n properties=properties,\n )\n\n # Create summary link\n item.add_link(\n Link(\n rel=\"child\",\n target=metadata[\"url_summary\"],\n title=\"Summary Page\",\n ))\n\n # Include thumbnail\n item.add_asset(\n \"thumbnail\",\n Asset(\n href=metadata[\"url_img\"],\n media_type=MediaType.PNG,\n roles=[\"thumbnail\"],\n title=\"WorldPop Thumbnail\",\n ),\n )\n\n # Include JSON metadata\n item.add_asset(\n \"metadata\",\n Asset(\n href=f\"{API_URL}/{project}/{category}?iso3={iso3}\",\n media_type=MediaType.JSON,\n roles=[\"metadata\"],\n title=\"WorldPop Metadata\",\n ),\n )\n\n # Incluce scientific information\n sci_ext = ScientificExtension.ext(item, add_if_missing=True)\n sci_ext.doi = metadata[\"doi\"]\n sci_ext.citation = metadata[\"citation\"]\n\n # Include projection information\n proj_ext = ProjectionExtension.ext(item, add_if_missing=True)\n proj_ext.epsg = epsg\n proj_ext.transform = transform\n proj_ext.bbox = bbox\n proj_ext.wkt2 = wkt\n proj_ext.shape = shape\n\n # Create data assets\n for href in sorted(metadata[\"files\"]):\n try:\n media_type = {\n \"tif\": MediaType.GEOTIFF,\n \"zip\": \"application/zip\"\n }[href[-3:].lower()]\n except KeyError:\n print(f\"Unknown media type for {href}\")\n title = os.path.basename(href)[:-4]\n data_asset = Asset(href=href,\n media_type=media_type,\n roles=[\"data\"],\n title=title)\n\n item.add_asset(title, data_asset)\n\n # Include raster information\n sampling: Any = \"area\"\n rast_band = RasterBand.create(nodata=nodata,\n data_type=dtype,\n sampling=sampling)\n rast_ext = RasterExtension.ext(data_asset, add_if_missing=True)\n rast_ext.bands = [rast_band]\n\n return item", "def create(data):\n \n return Partlist(\n list_id = data['id'],\n name = data['name'],\n pieces = data['num_parts'])", "def meta_fields(item):\n return scom.meta_fields(item)", "def __init__(self, jsondict=None, strict=True):\n \n self.adjudication = None\n \"\"\" Added items detail adjudication.\n List of `ClaimResponseItemAdjudication` items (represented as `dict` in JSON). \"\"\"\n \n self.factor = None\n \"\"\" Price scaling factor.\n Type `float`. \"\"\"\n \n self.modifier = None\n \"\"\" Service/Product billing modifiers.\n List of `CodeableConcept` items (represented as `dict` in JSON). \"\"\"\n \n self.net = None\n \"\"\" Total item cost.\n Type `Money` (represented as `dict` in JSON). \"\"\"\n \n self.noteNumber = None\n \"\"\" Applicable note numbers.\n List of `int` items. \"\"\"\n \n self.productOrService = None\n \"\"\" Billing, service, product, or drug code.\n Type `CodeableConcept` (represented as `dict` in JSON). \"\"\"\n \n self.quantity = None\n \"\"\" Count of products or services.\n Type `Quantity` (represented as `dict` in JSON). \"\"\"\n \n self.unitPrice = None\n \"\"\" Fee, charge or cost per item.\n Type `Money` (represented as `dict` in JSON). \"\"\"\n \n super(ClaimResponseAddItemDetailSubDetail, self).__init__(jsondict=jsondict, strict=strict)", "def Item(self) -> ITypeDescriptor:", "def test_item_factory_class():\n # __init__()\n factory = ItemFactory()\n pizza_menuitem = MenuItem(\"cheese\", \"Pizzas\", True, 10.0, 1)\n drink_menuitem = MenuItem(\"fanta\", \"Drinks\", True, 10.0, 1)\n side_menuitem = MenuItem(\"fries\", \"Sides\", True, 10.0, 1)\n none_menuitem = MenuItem(\"oreo\", \"oreo\", True, 10.0, 1)\n medium = MenuItem(\"medium\", \"size\", False, 4.0, 1)\n\n # create_item()\n expected_pizza = Pizza(pizza_menuitem, medium)\n expected_drink = Drink(drink_menuitem, medium)\n expected_side = Side(side_menuitem)\n pizza = factory.create_item(pizza_menuitem, medium)\n assert pizza == expected_pizza\n assert factory.create_item(drink_menuitem, medium) == expected_drink\n assert factory.create_item(side_menuitem) == expected_side\n assert not factory.create_item(none_menuitem, medium)", "def _get_list_item(self, document):\n list_item = Item()\n list_item.document = document\n # TODO: call callables?\n list_item.display_fields = [self._prepare_field(document, field) for field in self.list_display]\n return list_item", "def split_item(self, *args):\n return _ida_hexrays.vdui_t_split_item(self, *args)", "def __init__(\n self, trait=None, minlen=0, maxlen=six.MAXSIZE, has_items=True\n ):\n self.item_trait = trait_from(trait)\n self.minlen = max(0, minlen)\n self.maxlen = max(minlen, maxlen)\n self.has_items = has_items", "def parse_items(self):", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def create_submodule(self, *args: Any, **kwargs: Any) -> Submodule:\n return Submodule.add(self, *args, **kwargs)", "def add_new_item_field(*fields, **keywords):\n\n for field in fields:\n print \"Creating {0} custom field...\".format(field)\n doc = frappe.get_doc({\n \"doctype\": \"Custom Field\",\n \"dt\": \"Item\",\n \"fieldtype\": \"Data\",\n \"label\": field,\n \"insert_after\": keywords['insert_after']\n })\n doc.insert()\n\n print \"-----\"\n print \"Finished creating custom fields...\"\n print \"-----\"", "def __init__(self, items=None):\n if items is None:\n self.items = []\n elif isinstance(items, list):\n self.items = items\n elif isinstance(items, SQLParam):\n self.items = [items]\n elif isinstance(items, SQLQuery):\n self.items = list(items.items)\n else:\n self.items = [items]\n \n # Take care of SQLLiterals\n for i, item in enumerate(self.items):\n if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral):\n self.items[i] = item.value.v", "def __init__(self, field_raw):\n self.name = \"pages\"\n self.value = self.parse_field(field_raw)", "def subscription_factory_fixture():\n def _factory(capability):\n sub = Subscription()\n sub.capability = capability\n return sub\n return _factory", "def field_subtype(f, default=MISSING, *, unwrap=True):\n return _field_type(f, SUBTYPE, default, unwrap=unwrap)", "def createSubListOfSpeciesFeatures(self):\n return _libsbml.MultiSpeciesPlugin_createSubListOfSpeciesFeatures(self)", "def __init__(self):\r\n self._items = [[] for _ in range(20)]", "def sidebar_menu_subitems(item, selected):\n\n return {\n 'item': item,\n 'selected': selected,\n }", "def CompositeFactoryTableCell(factory_col_list_arg):\n\n class FactoryClass(TableCell):\n factory_col_list = factory_col_list_arg\n\n def __init__(self, art, **kw):\n TableCell.__init__(self, CELL_TYPE_UNFILTERABLE, [])\n\n for sub_factory, sub_col in self.factory_col_list:\n kw['col'] = sub_col\n sub_cell = sub_factory(art, **kw)\n self.non_column_labels.extend(sub_cell.non_column_labels)\n self.values.extend(sub_cell.values)\n return FactoryClass", "def format_data(self, _item_fields, special=None):\n\n if special:\n _item_fields[\"special\"] = special\n\n return _item_fields", "def __init__(self,name,value,*args,**kargs):\n self.ndim = len(value)\n if 'fields' in kargs:\n fields = kargs['fields']\n else:\n fields = [ str(i) for i in range(self.ndim) ]\n\n self.input = QtGui.QWidget(*args)\n InputItem.__init__(self,name,*args,**kargs)\n #self.layout().insertWidget(1,self.input)\n\n #layout = QtGui.QHBoxLayout(self)\n #self.input.setLayout(layout)\n layout = self.layout()\n self.fields = []\n for fld,val in zip(fields,value):\n f = InputInteger(fld,val)\n self.fields.append(f)\n layout.addWidget(f)", "def get_item_subclass(self, region, namespace, class_id, subclass_id, **filters):\n filters['namespace'] = namespace\n params = [class_id, subclass_id]\n resource = 'data/wow/item-class/{0}/item-subclass/{1}'\n return self.get_resource(resource, region, *params, **filters)", "def intialize_from_fields(self):\n raise NotImplementedError", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title} :\" + s\n\n return s", "def serialItem(form, fieldname, item_value, doc=None, prefix='', nest_datagrid=True, render=True):\n req = []\n db = form.getParentDatabase()\n field = form.getFormField(fieldname)\n fieldtype = '' if not field else field.getFieldType()\n\n # custom DATE field type are considered as DATETIME stansard ones\n if fieldtype == 'DATE':\n fieldtype = 'DATETIME'\n\n if fieldtype == 'DOCLINK':\n\n if nest_datagrid:\n sub_req = []\n\n for link in item_value or []:\n sub_doc = db.getDocument(link)\n # I choose not to follow nested doclink, from now on follow_doclink is false\n el = dict(serialDoc(sub_doc, nest_datagrid=True, serial_as=False,\n field_list=[], render=render, follow_doclink=False))\n if nest_datagrid:\n sub_req.append(el)\n else:\n req += [('%s.%s' % (fieldname, k), v) for k,v in el.items()]\n\n if nest_datagrid:\n req.append((fieldname, sub_req))\n\n elif fieldtype == 'DATAGRID':\n grid_form = db.getForm(field.getSettings().associated_form)\n grid_field_names = field.getSettings().field_mapping.split(',')\n\n if nest_datagrid:\n sub_req = []\n\n for row in item_value or []:\n el = {}\n for idx,sub_field_name in enumerate(grid_field_names):\n sub_item_value = row[idx]\n\n if nest_datagrid:\n el[sub_field_name] = sub_item_value\n else:\n prefix = '%s.' % fieldname\n req += serialItem(grid_form, sub_field_name,\n sub_item_value, prefix=prefix, nest_datagrid=False)\n\n if nest_datagrid:\n sub_req.append(el)\n\n if nest_datagrid:\n req.append((fieldname, sub_req))\n\n else:\n # if I need data representation (or metadata) for printing porposes\n if item_value and render and fieldtype not in ('TEXT', 'NUMBER', ):\n # not worth it to call the template to render text and numbers\n # it is an expensive operation\n fieldtemplate = db.getRenderingTemplate('%sFieldRead' % fieldtype) \\\n or db.getRenderingTemplate('DefaultFieldRead')\n renderedValue = fieldtemplate(fieldname=fieldname,\n fieldvalue = item_value,\n selection = field.getSettings().getSelectionList(doc),\n field = field,\n doc = doc\n ).strip()\n # if I need data value\n else:\n if not item_value:\n renderedValue = ''\n elif fieldtype == 'NUMBER':\n custom_format = field.getSettings('format')\n renderedValue = str(item_value) if not custom_format else custom_format % rendered_value\n elif fieldtype == 'DATETIME':\n custom_format = field.getSettings('format') or db.getDateTimeFormat()\n renderedValue = item_value.strftime(custom_format)\n else:\n # in order to prevent TypeError for unknown not JSON serializable objects\n try:\n json_dumps(item_value)\n except TypeError:\n renderedValue = '%s' % item_value\n else:\n renderedValue = item_value\n key = prefix + fieldname\n req.append((key, renderedValue, ))\n return req", "def __init__(self, field: str):\n super().__init__()\n self.field = field", "def create_items(sender, instance, **kwargs):\n if instance.item_id is None and instance.item is None:\n item = Item()\n if hasattr(instance, 'active'):\n item.active = getattr(instance, 'active')\n item.save()\n instance.item = item", "def __init__(self, name, **properties):\n # Initialize the base class.\n fields = [UnicodeField(name = \"pattern\",\n title = \"Pattern\",),\n UnicodeField(name = \"replacement\",\n title = \"Replacement\")]\n qm.fields.TupleField.__init__(self, name, fields, **properties)", "def new_varItem(self):\n newInd = (len(pQt.getTopItems(self)) + 1)\n newItem = QtGui.QTreeWidgetItem()\n newItem.setText(0, str(newInd))\n newItem._treeParent = self\n newItem._wdgtParent = self.treeParent\n newItem.wdgEnabled = self.new_varEnabledWidget()\n newItem.wdgLabel = self.new_varTextWidget()\n newItem.wdgType = self.new_varTypeWidget()\n newItem.wdgValue = self.new_varTextWidget()\n newItem.wdgComment = self.new_varTextWidget()\n return newItem", "def from_fields(cls, field_list):\n field_dict = dict(field_list)\n return cls(**field_dict)", "def create_sample_item(name, price, data_only):\n item_info = {\n 'name': name,\n 'price': price\n }\n if data_only:\n return item_info\n\n else:\n item_obj = Item.objects.create(**item_info)\n return item_obj, item_info", "def generate_bw_parent_field(parent_id):\n new_field = etree.Element(\"{http://www.loc.gov/MARC21/slim}datafield\")\n new_field.set(\"ind1\", \" \")\n new_field.set(\"ind2\", \" \")\n new_field.set(\"tag\", \"ADF\")\n subfield = etree.SubElement(new_field, \"{http://www.loc.gov/MARC21/slim}subfield\")\n subfield.set(\"code\", \"a\")\n subfield.text = parent_id\n return new_field", "def create(self, action_type):\n res = self.ITEM_TYPE()\n self.Create(action_type, res)\n return res.subtype", "def _make_field(index, field_desc, names):\n field_schema = schema_from_json_data(\n json_data=field_desc['type'],\n names=names,\n )\n other_props = (\n dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS)))\n return Field(\n data_type=field_schema,\n name=field_desc['name'],\n index=index,\n has_default=('default' in field_desc),\n default=field_desc.get('default', _NO_DEFAULT),\n order=field_desc.get('order', None),\n doc=field_desc.get('doc', None),\n other_props=other_props,\n )", "def getFieldValue(self, field, no_html=False, external_id=True, depth=1, optimize=False):\n if field[\"type\"] == \"category\":\n if field[\"config\"][\"settings\"][\"multiple\"]:\n values = []\n for category in field[\"values\"]:\n values.append(category[\"value\"][\"text\"])\n return values\n else:\n return field[\"values\"][0][\"value\"][\"text\"]\n elif field[\"type\"] == \"image\":\n values = []\n for image in field['values']:\n values.append([image[\"value\"][\"mimetype\"], image[\"value\"][\"file_id\"]])\n return values\n elif field[\"type\"] == \"date\":\n return field[\"values\"][0]\n elif field[\"type\"] == \"app\":\n itemID = field[\"values\"][0][\"value\"][\"item_id\"]\n appID = field[\"values\"][0][\"value\"][\"app\"][\"app_id\"]\n if depth<=0:\n return itemID\n else:\n if optimize:#Si es necesario optimizar la carga del item\n try: #Intenta buscar la lista de items como un atributo en self\n items = getattr(self, str(appID))\n except AttributeError:\n #Como no los encontró, crea una nueva PodioAPI con la appID de destino y le pide los items\n if self.client:\n nested_api = self\n else:\n try:\n nested_api = self.__class__(appID)\n except: #TODO: Especificar la excepcion que es de tipo \"DoesNotExist\"\n raise Exception(\"Hubo un error creando el nuevo objeto 'PodioApi' para el item relacionado con app_id %s. Por favor agregar el app_id y el app_token de esa aplicacion a la base de datos\" % appID)\n items = nested_api.get_filtered_items(None, depth=depth-1)\n #Luego crea el atributo para que esta llamada no se repita\n setattr(self, str(appID), items)\n #Ya teniendo a todos los items, busca entre la lista aquel cuya ID es igual al item ID de la referencia, y lo pone como valor del campo.\n item = None\n for i in items:\n if i[\"item\"] == int(itemID):\n item = i\n\n else:\n data = self._client.Item.find(int(itemID))\n if not external_id:\n item = self.make_dict(data, external_id=external_id, depth=depth-1)\n else:\n item = self.makeDict(data, nested=True)\n return item\n elif field[\"type\"] == \"text\":\n text = field[\"values\"][0][\"value\"]\n if no_html and field[\"config\"][\"settings\"][\"format\"] == 'html':\n print (text.encode('utf-8'))\n html_text = BeautifulSoup(text, \"html5lib\")\n for p_tag in html_text.find_all('p'):\n p_tag.unwrap()\n for br_tag in html_text.find_all('br'):\n br_tag.name=\"text:line-break\"\n html_text.find('html').unwrap()\n html_text.find('head').unwrap()\n html_text.find('body').unwrap()\n text = unicode(html_text)\n #text = strip_tags(text)\n return text\n elif field[\"type\"] == \"embed\":\n return field[\"values\"][0][\"embed\"][\"url\"]\n else:\n #print field[\"type\"]\n return field[\"values\"][0][\"value\"]", "def child(self, p_int, int_column=0): # real signature unknown; restored from __doc__\r\n return QStandardItem", "def fake_get_display_items():\r\n return [FakeChild()]", "def __init__(self, name, field_path):\n self.name = name\n self.field_path = field_path", "def __init__(self, field_info):\n self.field_info = field_info", "def __getitem__(self, item):\n return self.fields[item]", "def add(self, field_create_information):\n field = Field.create_field_from_type(self.context, field_create_information)\n self.add_child(field)\n qry = CreateEntityQuery(self, field, field)\n self.context.add_query(qry)\n return field", "def create_field(self, field, dim_translation=None):\n raise NotImplementedError", "def invoiceitems(self):\r\n return InvoiceItems(self)", "def split(items):\n return {\n \"class\": \"split\",\n \"items\": items\n }", "def __init__(self, item, item_clazz):\n self.key = item['key']\n self.stat = item['value']\n self.item_clazz = item_clazz" ]
[ "0.7079664", "0.60948706", "0.57870966", "0.5703622", "0.5617625", "0.56146836", "0.56075746", "0.5595222", "0.554094", "0.546303", "0.54049605", "0.53629017", "0.53175354", "0.5301199", "0.5221799", "0.5221799", "0.51944876", "0.51886076", "0.5142889", "0.5127228", "0.5126402", "0.5116837", "0.5099944", "0.50934803", "0.50798714", "0.5075126", "0.50698483", "0.5046927", "0.5024666", "0.49899125", "0.49823135", "0.49242845", "0.49220717", "0.49199012", "0.4918851", "0.48893052", "0.4871825", "0.48715928", "0.48632172", "0.48488712", "0.48379928", "0.48223123", "0.4816033", "0.4814029", "0.4808172", "0.47937173", "0.4789603", "0.47754118", "0.47647485", "0.47508958", "0.47456747", "0.47441262", "0.47236037", "0.47200826", "0.47041982", "0.46900702", "0.4687634", "0.46839783", "0.46801722", "0.46727994", "0.466984", "0.46415392", "0.46329376", "0.46329376", "0.46078956", "0.4607372", "0.46036875", "0.45983127", "0.459653", "0.45902258", "0.45899194", "0.4588133", "0.45877442", "0.4587358", "0.45860267", "0.4582024", "0.45787963", "0.45787567", "0.45782346", "0.45773637", "0.45744687", "0.45695415", "0.45674607", "0.45640916", "0.45628083", "0.45625287", "0.456163", "0.45546967", "0.45523894", "0.45521176", "0.45470494", "0.45469713", "0.45450565", "0.45426008", "0.45388618", "0.45377892", "0.45363855", "0.45361787", "0.4529748", "0.452919" ]
0.7673134
0
Factory associated with Subfield.
def subfield(): return Subfield()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subfieldFactory(name):\n from pythia.pyre.inventory import facility\n return facility(name, family=\"subfield\", factory=Subfield)", "def add_sub_factories(self) -> None:\n for field in get_model_fields(self.model, base=False, foreign=True, m2m=False):\n if not hasattr(self.factory, field.name):\n factory_name = self._get_factory_name_for_model(field.related_model)\n if field.related_model == self.model:\n _factory = SelfFactory(factory=self.factory, required=not field.null)\n else:\n _factory = SubFactory(\n factory=factory_name,\n required=not field.null,\n related_model=field.related_model\n )\n setattr(self.factory, field.name, _factory)", "def GetSubfieldDef(fielddef):\n\n format_, addrdef, datadef, arraydef, validate, cmd, converter = GetFieldDef(fielddef, fields='format_, addrdef, datadef, arraydef, validate, cmd, converter')\n\n # create new arraydef\n if len(arraydef) > 1:\n arraydef = arraydef[1:]\n else:\n arraydef = None\n\n # create new datadef\n if isinstance(datadef, tuple):\n if cmd is not None:\n datadef = (arraydef, validate, cmd)\n else:\n datadef = (arraydef, validate)\n else:\n datadef = arraydef\n\n # set new field def\n subfielddef = None\n if converter is not None:\n subfielddef = (format_, addrdef, datadef, converter)\n else:\n subfielddef = (format_, addrdef, datadef)\n\n return subfielddef", "def create_subspecialty(sub_data):\n return get_or_create_object(sub_data, Subspecialty)", "def subtable(\n field: str,\n table: Type[ModelledTable],\n subfield: Optional[str] = None,\n pivot: Optional[str] = None,\n selectors: Optional[Dict[str, PrimitiveTypes]] = None,\n) -> Callable[[Type[SecondTable]], Type[SecondTable]]:\n\n if not subfield:\n subfield = field\n\n if not selectors:\n selectors = dict()\n\n sub: SubTable[ModelledTable] = SubTable(table, subfield, pivot, selectors)\n\n def _subtable(cls: Type[SecondTable]) -> Type[SecondTable]:\n \"\"\"Adds a subtable key to a Table\"\"\"\n\n if not issubclass(cls, Table):\n raise Exception(f\"{cls.__name__} is not a sub class of Table\")\n\n subtables: Dict[str, SubTable[ModelledTable]] = getattr(cls, _SUBTABLES, {})\n subtables[field] = sub\n setattr(cls, _SUBTABLES, subtables)\n\n return cls\n\n return _subtable", "def _subtable(cls: Type[SecondTable]) -> Type[SecondTable]:\n\n if not issubclass(cls, Table):\n raise Exception(f\"{cls.__name__} is not a sub class of Table\")\n\n subtables: Dict[str, SubTable[ModelledTable]] = getattr(cls, _SUBTABLES, {})\n subtables[field] = sub\n setattr(cls, _SUBTABLES, subtables)\n\n return cls", "def subscription_factory_fixture():\n def _factory(capability):\n sub = Subscription()\n sub.capability = capability\n return sub\n return _factory", "def make_field(field):\n\n if \"time\" in field:\n return TimeField(field)\n if \"zd\" in field:\n return RadianField(field)\n else:\n return SimpleField(field)", "def test_customWidgetFactory(self):\n\n value_type = TextLine(__name__='bar')\n self.field = List(__name__='foo', value_type=value_type)\n request = TestRequest()\n\n # set up the custom widget factory and verify that it works\n sw = CustomWidgetFactory(ListSequenceWidget)\n widget = sw(self.field, request)\n assert widget.subwidget is None\n assert widget.context.value_type is value_type\n\n # set up a variant that specifies the subwidget to use and verify it\n class PollOption:\n pass\n ow = CustomWidgetFactory(ObjectWidget, PollOption)\n sw = CustomWidgetFactory(ListSequenceWidget, subwidget=ow)\n widget = sw(self.field, request)\n assert widget.subwidget is ow\n assert widget.context.value_type is value_type", "def test_subwidget(self):\n self.field = List(__name__='foo',\n value_type=TextLine(__name__='bar'))\n request = TestRequest()\n\n class PollOption:\n pass\n ow = CustomWidgetFactory(ObjectWidget, PollOption)\n widget = SequenceWidget(\n self.field, self.field.value_type, request, subwidget=ow)\n assert widget.subwidget is ow", "def field_subtype(f, default=MISSING, *, unwrap=True):\n return _field_type(f, SUBTYPE, default, unwrap=unwrap)", "def __init__(self, parent_=None, instance_name_=None, **values):\n self.__parent = parent_\n self.__instance_name = instance_name_\n\n self._factories = {}\n\n for name, field in self._get_fields().items():\n if isinstance(field, fields.Factory):\n # for factory fields, we need to create a new factory with the given factory_type\n value = field.factory_type(field.type, name_=name, parent_instance_=self)\n self._factories[name] = value\n else:\n value = values.get(name, field.from_raw(field.default))\n\n # accept raw as a default value\n # and set inner value, so it should be availale from the start\n setattr(self, f\"__{name}\", value)", "def get_factory():", "def childFactory(self, request, childName):\n return None", "def factory(self):\n raise NotImplementedError()", "def multivalue_field_factory(field_class):\n class NewField(field_class):\n widget = forms.SelectMultiple\n\n def to_python(self, value):\n if not value:\n return []\n return [\n # Only append non-empty values (this avoids e.g. trying to cast '' as an integer)\n super(field_class, self).to_python(v) for v in value if v\n ]\n\n return type('MultiValue{}'.format(field_class.__name__), (NewField,), dict())", "def _new_field(self):\n field = self.domain.new_field()\n return field", "def __createField(self, field):\n name = field['name']\n fType = field['type']\n fieldLength = None\n if 'shape' in name.lower():\n return\n elif \"String\" in fType:\n fieldType = \"TEXT\"\n fieldLength = field['length']\n elif \"Date\" in fType:\n fieldType = \"DATE\"\n elif \"SmallInteger\" in fType:\n fieldType = \"SHORT\"\n elif \"Integer\" in fType:\n fieldType = \"LONG\"\n elif \"Double\" in fType:\n fieldType = \"DOUBLE\"\n elif \"Single\" in fType:\n fieldType = \"FLOAT\"\n else:\n fieldType = \"Unknown\"\n featureClass = self.featureClassLocation + \"\\\\\" + self.name\n validatedName = arcpy.ValidateFieldName(name, self.featureClassLocation)\n arcpy.AddField_management(in_table=featureClass, field_name=name, field_type=fieldType, field_length=fieldLength)", "def factory(self):\n return self._factory", "def factory(self):\n return self._factory", "def extend_or_add_fields(cls, subfields, dbmanager, flag_mixin_atroot, propname, proplabel):\n import mdbmodel_fieldset\n if (flag_mixin_atroot):\n # prepare extra fields that will be added at root; this doesnt actually create any prerequisites\n cls.extend_fields(subfields)\n else:\n # add a special sub table that will contain some fields, using a helper class object attached to us\n # create (AND REGISTER) the new helper object\n backrefname = cls.get_dbtablename_pure()\n mdbmodel_fieldset.MewloDbFieldset.make_fieldset_dbobjectclass(cls, propname, proplabel, backrefname, dbmanager, subfields)", "def _make_subset(cls, name, data, **kwargs):\r\n return cls(name, data, **kwargs)", "def factory_method(self):\n pass", "def factory_method(self):\n pass", "def txnSubCollectionFactory(txnSubCollection, txn):\n subCollection = txnSubCollection.cloneMetaData()\n subCollection.append(txn)\n return subCollection", "def getFactorys(self) -> List[ghidra.app.util.viewer.field.FieldFactory]:\n ...", "def __init__(self, field: str):\n super().__init__()\n self.field = field", "def get_factory(self):\n\n return Factory(type(self), self.kwargs)", "def create_field(self, field, dim_translation=None):\n raise NotImplementedError", "def create_field(dj_field, **kwargs):\n if isinstance(dj_field, dj_models.OneToOneField):\n return field.OneToOne.from_dj_field(dj_field, **kwargs)\n elif isinstance(dj_field, dj_models.ForeignKey):\n return field.ForeignKey.from_dj_field(dj_field, **kwargs)\n elif isinstance(dj_field, dj_models.ManyToManyField):\n return field.ManyToMany.from_dj_field(dj_field, **kwargs)\n else:\n return field.Field.from_dj_field(dj_field, **kwargs)", "def format_sub_part(cls, field, length):\n try:\n if not length:\n raise ValueError\n\n length = int(length)\n return \"`%s`(%d)\" % (field, length)\n\n except ValueError:\n return \"`%s`\" % (field,)", "def build_standard_field(self, field_name, model_field_type):\n field_mapping = self.serializer_field_mapping\n field_class = field_mapping[model_field_type]\n field_kwargs = get_field_kwargs(field_name, model_field_type)\n\n if \"choices\" in field_kwargs:\n # Fields with choices get coerced into `ChoiceField`\n # instead of using their regular typed field.\n field_class = self.serializer_choice_field\n # Some model fields may introduce kwargs that would not be valid\n # for the choice field. We need to strip these out.\n # Eg. models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES)\n valid_kwargs = {\n \"read_only\",\n \"write_only\",\n \"required\",\n \"default\",\n \"initial\",\n \"source\",\n \"label\",\n \"help_text\",\n \"style\",\n \"error_messages\",\n \"validators\",\n \"allow_null\",\n \"allow_blank\",\n \"choices\",\n }\n for key in list(field_kwargs):\n if key not in valid_kwargs:\n field_kwargs.pop(key)\n\n if not issubclass(field_class, fields.CharField) and not issubclass(\n field_class, fields.ChoiceField\n ):\n # `allow_blank` is only valid for textual fields.\n field_kwargs.pop(\"allow_blank\", None)\n\n return field_class, field_kwargs", "def __init__(self, field: FT):\n self.field: Final[FT] = field", "def getUuidDocSubfield(coll, uuid, field, subField):\n mongoColl = mongoConfigColls[coll]\n query = { 'field' : field, 'apiviz_front_uuid' : uuid }\n result = mongoColl.find_one(query)\n log_app.debug(\"getUuidDocSubfield / result : %s\", result )\n return result[subField]", "def new_detector(self, name=\"detector\", entry=\"entry\", subentry=\"pyFAI\"):\n entry_grp = self.new_entry(entry)\n pyFAI_grp = self.new_class(entry_grp, subentry, \"NXsubentry\")\n pyFAI_grp[\"definition_local\"] = numpy.string_(\"pyFAI\")\n # pyFAI_grp[\"definition_local\"].attrs[\"version\"] = numpy.string_(version)\n det_grp = self.new_class(pyFAI_grp, name, \"NXdetector\")\n return det_grp", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def _create_sub(name, rostype, topic_callback, *args, **kwargs):\n # counting subscriber instance per topic name\n if name in TopicBack.sub_instance_count.keys():\n TopicBack.sub_instance_count[name] += 1\n else:\n TopicBack.sub_instance_count[name] = 1\n\n return rospy.Subscriber(name, rostype, topic_callback, *args, **kwargs)", "def make_fields_of_study(\n *,\n n_fields_of_study_per_level: int,\n faker: Faker,\n n_levels: int = 6,\n min_title_length: int = 1,\n max_title_length: int = 3,\n) -> FieldOfStudyList:\n\n fields_of_study = []\n fos_id_ = 0\n for level in range(n_levels):\n for _ in range(n_fields_of_study_per_level):\n n_words_ = random.randint(min_title_length, max_title_length)\n name_ = faker.sentence(nb_words=n_words_)\n fos_ = FieldOfStudy(fos_id_, name=name_, level=level)\n fields_of_study.append(fos_)\n fos_id_ += 1\n\n return fields_of_study", "def __init__(self, field):\n super().__init__()\n self.field = str(field)", "def __init__(field, form, content):", "def __init__(field, form, content):", "def __init__(self, field):\n\n self.field = field.lstrip('-')\n self.raw_field = field", "def __init__(__self__, *,\n data_factory_id: Optional[pulumi.Input[str]] = None,\n fqdns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n subresource_name: Optional[pulumi.Input[str]] = None,\n target_resource_id: Optional[pulumi.Input[str]] = None):\n if data_factory_id is not None:\n pulumi.set(__self__, \"data_factory_id\", data_factory_id)\n if fqdns is not None:\n pulumi.set(__self__, \"fqdns\", fqdns)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if subresource_name is not None:\n pulumi.set(__self__, \"subresource_name\", subresource_name)\n if target_resource_id is not None:\n pulumi.set(__self__, \"target_resource_id\", target_resource_id)", "def test_make_form_field():", "def __init__(self, field_info):\n self.field_info = field_info", "def createSubListOfSpeciesFeatures(self):\n return _libsbml.MultiSpeciesPlugin_createSubListOfSpeciesFeatures(self)", "def makeField(self,field_name,field_type,field_precision,field_scale,field_length):\n \n new_field = self.GP.CreateObject(\"field\")\n new_field.Name = field_name\n new_field.Type = field_type\n new_field.Precision = field_precision\n new_field.Scale = field_scale\n new_field.Length = field_length\n new_field.IsNullable = True\n \n return new_field", "def introFieldWidgetFactory(field, request):\n return widget.FieldWidget(field, IntroWidget(request))", "def get_sub_collection(self, sub_id):\n sub_html = self._get_html_for_subject_main(sub_id)\n ep_html = self._get_html_for_subject_eps(sub_id)\n sub_coll = BangumiSubjectCollectionFactory.from_html(sub_html,\n ep_html)\n sub_coll.session = self\n return sub_coll", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title} :\" + s\n\n return s", "def factory(container, name, factory):", "def create_factory(cls, *args):\n raise NotImplementedError", "def newDynaField(field, base, passthrough):\n\n # pass only known accepted arguments to super\n init_args = dicts.filter(field, passthrough)\n\n properties = field.copy()\n\n # all pass through arguments are handled by super\n for key in passthrough:\n if key in properties:\n del properties[key]\n\n # pylint: disable=E1002\n class DynaField(base):\n \"\"\"The dynamically created Field class.\n \"\"\"\n\n __metaclass__ = DynaFieldMetaclass\n dynaproperties = properties\n\n def __init__(self):\n \"\"\"Pass through the init args to super.\n \"\"\"\n\n super(DynaField, self).__init__(**init_args)\n\n return DynaField", "def intialize_from_fields(self):\n raise NotImplementedError", "def __init__(__self__, *,\n data_factory_id: pulumi.Input[str],\n target_resource_id: pulumi.Input[str],\n fqdns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n subresource_name: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"data_factory_id\", data_factory_id)\n pulumi.set(__self__, \"target_resource_id\", target_resource_id)\n if fqdns is not None:\n pulumi.set(__self__, \"fqdns\", fqdns)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if subresource_name is not None:\n pulumi.set(__self__, \"subresource_name\", subresource_name)", "def __new__(subtype, input):\n # input should be a record array\n if input.dtype.subdtype is None:\n self = np.recarray.__new__(\n subtype, input.shape, input.dtype, buf=input.data\n )\n else:\n self = np.recarray.__new__(\n subtype, input.shape, input.dtype, buf=input.data, strides=input.strides\n )\n\n self._init()\n if self.dtype.fields:\n self._nfields = len(self.dtype.fields)\n\n return self", "def save(self, force_insert=False, force_update=False, using=None, update_fields=None):\n\n # virtual fields (for wrappers around hard coded models) don't do db management\n if self.virtual:\n super(Field, self).save(force_insert, force_update, using, update_fields)\n return\n\n exists = self.id\n\n if exists:\n existing = Field.objects.get(pk=self.id)\n if not self._data_equality(existing):\n\n super(Field, self).save(force_insert, force_update, using, update_fields)\n\n # if the field type has changed we need to delete the old subfield data and create\n # the appropriate new subtype\n if existing.type != self.type:\n existing.specific.delete()\n\n # create subfield data settings\n field_type = field_registry.field_type(self.type)\n field = field_type(field=self)\n field.save()\n\n existing_field = self.model.model._meta.get_field_by_name(existing.name)[0]\n new_field = self._db_field()\n\n # make the required changes to the database\n self.model._model_remove_field(existing)\n self.model._model_add_field(self)\n\n self.model.alter_field(existing_field, new_field)\n\n else:\n super(Field, self).save(force_insert, force_update, using, update_fields)\n\n # create subfield data settings\n field_type = field_registry.field_type(self.type)\n field = field_type(field=self)\n field.save()\n\n self.model.add_field(self)\n\n if not self.sort_order:\n self.sort_order = self.id\n self.save()", "def add(self, field_create_information):\n field = Field.create_field_from_type(self.context, field_create_information)\n self.add_child(field)\n qry = CreateEntityQuery(self, field, field)\n self.context.add_query(qry)\n return field", "def getFactorys(self, row: int) -> List[ghidra.app.util.viewer.field.FieldFactory]:\n ...", "def base_field(self):\n return F", "def __init__(self, name, field_path):\n self.name = name\n self.field_path = field_path", "def build(self, factory, *factory_args, **factory_kw):\n return self._instantiate(\"\", factory, factory_args, factory_kw)", "def _make_field(index, field_desc, names):\n field_schema = schema_from_json_data(\n json_data=field_desc['type'],\n names=names,\n )\n other_props = (\n dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS)))\n return Field(\n data_type=field_schema,\n name=field_desc['name'],\n index=index,\n has_default=('default' in field_desc),\n default=field_desc.get('default', _NO_DEFAULT),\n order=field_desc.get('order', None),\n doc=field_desc.get('doc', None),\n other_props=other_props,\n )", "def createSpeciesFeature(self):\n return _libsbml.SubListOfSpeciesFeatures_createSpeciesFeature(self)", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def __init__(self, field: \"SchemaTypeField\", settings: Settings):\n from qlient import helpers\n self.settings = settings\n self.name = field.name\n self.description = field.description\n self.arguments = helpers.adapt_arguments(field.args)\n self.return_type = field.type\n self._return_fields: Union[Tuple[SelectedField], None] = None", "def createField(schemaName, field):\n# print(field.domain)\n# print(field.name, field.domain if isinstance(field.domain, str) else field.domain.type)\n# print(field.__dict__)\n return \"\\\"{name}\\\" {type_}\".format(\n name = field.name,\n type_ = '\"' + schemaName + '\".\"' + field.domain + '\"' if isinstance(field.domain, str) else getType(field.domain)\n )", "def customize_fields(self, fields):\n\n for field in fields.values():\n\n field_type = type(field.field)\n\n if field_type is List or field_type is Set:\n field.widgetFactory = CheckBoxFieldWidget\n\n elif field_type is Choice:\n field.widgetFactory = RadioFieldWidget", "def factory(cls, user):\n if cls.__name__.startswith(user.type): # Children class naming convention is important\n return cls(user)\n for sub_cls in cls.__subclasses__():\n result = sub_cls.factory(user)\n if result is not None:\n return result", "def to_field(obj):\r\n\r\n\r\n if isinstance(obj, Field):\r\n field = obj\r\n else:\r\n d = { \"storage_type\": \"unknown\" }\r\n\r\n if isinstance(obj, basestring):\r\n d[\"name\"] = obj\r\n elif type(obj) == tuple or type(obj) == list:\r\n d[\"name\"] = obj[0]\r\n try:\r\n d[\"storage_type\"] = obj[1]\r\n try:\r\n d[\"analytical_type\"] = obj[2]\r\n except:\r\n pass\r\n except:\r\n pass\r\n else: # assume dictionary\r\n d[\"name\"] = obj[\"name\"]\r\n d[\"label\"] = obj.get(\"label\")\r\n d[\"storage_type\"] = obj.get(\"storage_type\")\r\n d[\"analytical_type\"] = obj.get(\"analytical_type\")\r\n d[\"adapter_storage_type\"] = obj.get(\"adapter_storage_type\")\r\n\r\n if \"analytical_type\" not in d:\r\n storage_type = d.get(\"storage_type\")\r\n if storage_type:\r\n deftype = default_analytical_types.get(storage_type)\r\n d[\"analytical_type\"] = deftype or \"typeless\"\r\n else:\r\n d[\"analytical_type\"] = \"typeless\"\r\n\r\n field = Field(**d)\r\n return field", "def auxiliary_subfields():\n return AuxSubfieldsAbsorbingDampers()", "def __init__(self, field):\n ScalingFunctional.__init__(self, field, 1.0)", "def field(\n resolver: Optional[_RESOLVER_TYPE] = None,\n *,\n name: Optional[str] = None,\n is_subscription: bool = False,\n description: Optional[str] = None,\n permission_classes: Optional[List[Type[BasePermission]]] = None,\n federation: Optional[FederationFieldParams] = None,\n deprecation_reason: Optional[str] = None,\n default: Any = UNSET,\n default_factory: Union[Callable, object] = UNSET,\n) -> StrawberryField:\n\n field_ = StrawberryField(\n python_name=None,\n graphql_name=name,\n type_=None,\n description=description,\n is_subscription=is_subscription,\n permission_classes=permission_classes or [],\n federation=federation or FederationFieldParams(),\n deprecation_reason=deprecation_reason,\n default=default,\n default_factory=default_factory,\n )\n\n if resolver:\n return field_(resolver)\n return field_", "def field(base : SetupVal, field_name : str) -> SetupVal:\n if not isinstance(base, SetupVal):\n raise ValueError('field expected a SetupVal, but got {base!r}')\n if not isinstance(field_name, str):\n raise ValueError('field expected a str, but got {field_name!r}')\n return FieldVal(base, field_name)", "def subs(self, pre, post):\n return SubbedBasisFunction(self, pre, post)", "def create_field(self, label, value_type, key=None):\n payload = self._build_params(label=label, value_type=value_type, key=key)\n return Field.deserialize(self._post('fields', None, payload))", "def extend(self, fieldname, valuefactory):\n names = {}\n values = {}\n typename = self._type.__doc__.split('(')[0]\n newtype = collections.namedtuple( typename, list(self._type._fields) + [ fieldname ] )\n for number, value in self._values.items():\n value = newtype( *(list(value) + [ valuefactory(value) ]) )\n names[value.name] = value\n values[number] = value\n \n self._type = newtype\n self._names = names\n self._values = values", "def get_field(self):\n return self", "def __init__(self, field, derived_field = None):\r\n super(TextSubstituteNode, self).__init__()\r\n\r\n self.field = field\r\n self.derived_field = derived_field\r\n self.substitutions = []", "def __init__(self, parent, field_id, altparent=None):\n log.debug(\"RecordField %s\"%(field_id))\n # assert altparent, \"RecordField instantiated with no altparent\"\n super(RecordField, self).__init__(parent, field_id, altparent=altparent)\n return", "def get_factory():\n # Get config from Django settings\n sdk_config = settings.SPLITIO\n api_key = settings.SPLITIO.get('apiKey', '')\n\n return get_splitio_factory(\n api_key,\n config=sdk_config,\n **{k: sdk_config[k] for k in ('sdk_api_base_url', 'events_api_base_url', 'auth_api_base_url', 'streaming_api_base_url') if k in sdk_config}\n )", "def create_message_sub(msg_sub_as_str):\n msg_comps = msg_sub_as_str.split('\\n')\n from_id = msg_comps[1]\n origin_id = msg_comps[2]\n message_id = msg_comps[3]\n\n subs_map = {}\n for i in range(4, len(msg_comps)):\n sub_comps = msg_comps[i].split(\":\")\n topic = sub_comps[1]\n if sub_comps[0] == \"sub\":\n subs_map[topic] = True\n else:\n subs_map[topic] = False\n return MessageSub(from_id, origin_id, subs_map, message_id)", "def createSubmodel(self):\n return _libsbml.CompModelPlugin_createSubmodel(self)", "def __sub__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec - other.elec\n p.magn[:] = self.magn - other.magn\n return p\n else:\n raise DataError(\"Type error: cannot subtract %s from %s\" % (type(other), type(self)))", "def AddressWithUserdefinedFieldFactory(\n FieldFactory, UpdateablePersonFactory, PostalAddressFactory):\n def _create_user_defined_field(address_book, field_type, field_value):\n \"\"\"Create a user defined field.\"\"\"\n field_name = FieldFactory(\n address_book, IPostalAddress, field_type, u'distance').__name__\n return PostalAddressFactory(\n UpdateablePersonFactory(address_book),\n **{field_name: field_value, 'set_as_default': True})\n return _create_user_defined_field", "def __init__(self, name, description, field_type_processor, required=False):\n FieldDescriptor.__init__(self, name, description, \n field_type_processor.extract, required)\n # add an adapt method\n self.adapt = field_type_processor.adapt", "def __init__(self, name, description, field_type_processor, required=False):\n FieldDescriptor.__init__(self, name, description, \n field_type_processor.extract, required)\n # add an adapt method\n self.adapt = field_type_processor.adapt", "def __init__(self, sub_category: SubCategory, name: str, price_str: str, discount_str: str) -> None:\n\n self.sub_category = sub_category\n self.name = name\n self.price_per_unit, self.unit = self._extract_price_and_unit(price_str=price_str)\n self.discount_strategy = Entity.factory_for_discount(discount_str)(discount_str)", "def generate_bw_parent_field(parent_id):\n new_field = etree.Element(\"{http://www.loc.gov/MARC21/slim}datafield\")\n new_field.set(\"ind1\", \" \")\n new_field.set(\"ind2\", \" \")\n new_field.set(\"tag\", \"ADF\")\n subfield = etree.SubElement(new_field, \"{http://www.loc.gov/MARC21/slim}subfield\")\n subfield.set(\"code\", \"a\")\n subfield.text = parent_id\n return new_field", "def factory(self):\n return self.F.SuiteFactory", "def wfpdoc_factory(**kwargs):\n # it seems we cannot use django-dynamic-fixtures with django-polymorphic\n # therefore we create the fixture the old fashion way\n wfpdoc_number = random.randint(0, 1000)\n title = kwargs.pop('title', None)\n if not title:\n title = 'Static map N. %s' % wfpdoc_number\n abstract = 'Abstract for static map N. %s' % wfpdoc_number\n # we need to upload a file\n imgfile = StringIO.StringIO(\n 'GIF87a\\x01\\x00\\x01\\x00\\x80\\x01\\x00\\x00\\x00\\x00ccc,\\x00'\n '\\x00\\x00\\x00\\x01\\x00\\x01\\x00\\x00\\x02\\x02D\\x01\\x00;')\n doc_file = SimpleUploadedFile(\n '%s.gif' % wfpdoc_number,\n imgfile.read(),\n 'image/gif')\n owner = rol_capooti()\n wfpdoc = WFPDocument(title=title, abstract=abstract, owner=owner, doc_file=doc_file)\n # associate a layer. TODO also associate maps in place of layers\n id_list = list(xrange(Layer.objects.all().count()))\n random.shuffle(id_list)\n layer = Layer.objects.all()[id_list[0]]\n layer_ct = ContentType.objects.get(app_label=\"layers\", model=\"layer\")\n wfpdoc.content_type = layer_ct\n wfpdoc.object_id = layer.id\n wfpdoc.save()\n\n # append some (0 to 3) categories\n id_list = list(xrange(Category.objects.all().count()))\n random.shuffle(id_list)\n for i in range(0, 3):\n category = Category.objects.all()[id_list[i]]\n wfpdoc.categories.add(category)\n\n # set permissions\n perm_spec = {\n \"users\": {\n \"admin\": [\n \"change_resourcebase\",\n \"change_resourcebase_permissions\",\n \"view_resourcebase\"]},\n \"groups\": {}}\n wfpdoc.set_permissions(perm_spec)\n\n return wfpdoc", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n data_factory_id: Optional[pulumi.Input[str]] = None,\n fqdns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n subresource_name: Optional[pulumi.Input[str]] = None,\n target_resource_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __new__(subtype,parent,name,typecode,dimensions,**kwds):\n if 'values' in kwds.keys():\n result=kwds.pop('values')\n else:\n shape=[]\n for d in dimensions:\n dim = parent.dimensions[d]\n\n # Adding support for netCDF3 dimension objects\n if not isinstance(dim, int):\n dim = len(dim)\n shape.append(dim)\n\n result=np.zeros(shape,typecode)\n \n result=result[...].view(subtype)\n\n result.typecode = lambda: typecode\n result.dimensions = tuple(dimensions)\n result._ncattrs = ()\n for k,v in kwds.items():\n setattr(result,k,v)\n return result", "def __init__(self, raw_field: Dict):\n self.name = raw_field.get(\"name\")\n self.description = raw_field.get(\"description\")\n self.args: Dict[str, Argument] = Schema.parse_arguments(raw_field.get(\"args\", []))\n self.type: TypeDefer = TypeDefer(raw_field.get(\"type\")) if raw_field.get(\"type\") is not None else None\n self.is_deprecated: bool = raw_field.get(\"isDeprecated\")\n self.deprecation_reason: str = raw_field.get(\"deprecationReason\")", "def product_field(self, field_id):\r\n return products.ProductField(self, field_id)", "def createInstance (self, factory, **kw):\n kw.update(self.__kw)\n return factory(*self.__args, **kw)", "def __init__(self, for_field, *args, **kwargs):\n self.for_field = for_field\n kwargs.setdefault(\"db_index\", True)\n kwargs.setdefault(\"editable\", False)\n kwargs.setdefault(\"max_length\", 255)\n\n # For use in pre_save()\n self.max_length = kwargs[\"max_length\"]\n\n super().__init__(**kwargs)", "def createField(selected_layer, newFieldName, newFieldType):\r\n field = ogr.FieldDefn(newFieldName, newFieldType)\r\n selected_layer.CreateField(field)", "def __init__(self, name: str, owner_subtype_of: Sequence[Any] = ()):\n self.name = name\n self.subtype_of = tuple(owner_subtype_of)" ]
[ "0.8500797", "0.66517395", "0.6441253", "0.6377734", "0.59815156", "0.58264554", "0.5662838", "0.5660655", "0.56087625", "0.5576944", "0.5576053", "0.5484294", "0.54726523", "0.5440579", "0.5439738", "0.5400487", "0.528335", "0.52721506", "0.52566725", "0.52566725", "0.5199021", "0.5194737", "0.51229864", "0.51229864", "0.51133466", "0.50936073", "0.5090678", "0.50849265", "0.5080003", "0.5052508", "0.5036895", "0.5023241", "0.50217694", "0.50176144", "0.5015954", "0.50032985", "0.50032985", "0.49965444", "0.49817026", "0.49639326", "0.49631095", "0.49631095", "0.49613565", "0.4958596", "0.49572718", "0.4944499", "0.49340758", "0.49267033", "0.4918487", "0.49110883", "0.49092403", "0.49025044", "0.489939", "0.48743597", "0.48679587", "0.48676437", "0.48516244", "0.48495892", "0.48475277", "0.48266497", "0.48220557", "0.4802839", "0.47997966", "0.4790873", "0.47898215", "0.47835875", "0.47541395", "0.4749304", "0.47460103", "0.47293526", "0.4724308", "0.4716947", "0.47153354", "0.47128418", "0.47048467", "0.4704749", "0.4679459", "0.46777925", "0.46706077", "0.4667992", "0.46679074", "0.4652177", "0.46449402", "0.46405056", "0.46347263", "0.46318424", "0.46304023", "0.46304023", "0.46177575", "0.46128178", "0.4608278", "0.460033", "0.45951763", "0.4594977", "0.45847547", "0.4580584", "0.4570764", "0.4567396", "0.45663255", "0.45643604" ]
0.77598864
1
Gets the short path name of a given long path.
def get_short_path_name(long_name: str): output_buf_size = _GetShortPathNameW(long_name, None, 0) if output_buf_size <= 0: return None output_buf = ctypes.create_unicode_buffer(output_buf_size) needed = _GetShortPathNameW(long_name, output_buf, output_buf_size) assert 0 < needed < output_buf_size return output_buf.value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_short_path_name(long_name):\n output_buf_size = 0\n while True:\n output_buf = ctypes.create_unicode_buffer(output_buf_size)\n needed = _GetShortPathNameW(long_name, output_buf, output_buf_size)\n if output_buf_size >= needed:\n return output_buf.value\n else:\n output_buf_size = needed", "def get_short_path(content):", "def get_short_name(self):\n return self.full_name.split(' ')[0]", "def short_name(self, length: int = 25) -> str:\n dir_name, file_name = os.path.split(self.name)\n file_root, file_ext = os.path.splitext(file_name)\n return (file_root[:length] + '..') if len(file_root) > length else file_root", "def get_folder_short_name_for_location(self, location):\n _method_name = 'get_folder_short_name_for_location'\n _logger.entering(location.get_folder_path(), class_name=_class_name, method_name=_method_name)\n folder_dict = self.__get_dictionary_for_location(location, False)\n result = ''\n if SHORT_NAME in folder_dict:\n result = folder_dict[SHORT_NAME]\n _logger.exiting(class_name=_class_name, method_name=_method_name, result=result)\n return result", "def short_filename(path: Path,\n length: int = MAX_FILENAME_LENGTH) -> str:\n shorted_name = Path(path).name\n if len(shorted_name) > length:\n shorted_name = ''.join(\n shorted_name[:length // 2].strip() +\n '...' +\n shorted_name[-length // 2:].strip())\n return shorted_name", "def full_path_to_name(self):\n return self._full_path_to_name", "def getShortName(self) -> str:\n return self.short_name", "def path_name(self):\n return self.full_name", "def findShortestPath(self):\r\n pass", "def short_name(self):\n return self.get(\"short_name\", decode=True)", "def shortname(self):\n return self.get(\"shortName\")", "def short_name(self) -> str:\n return self.name_components[-1]", "def shortpath(path):\r\n import os\r\n if path.startswith(base_dir):\r\n return path[len(base_dir) + len(os.path.sep) : ]\r\n return path", "def _shortpath(abspath):\r\n b = os.path.dirname(os.path.normpath(sys.modules[settings.SETTINGS_MODULE].__file__))\r\n p = os.path.normpath(abspath)\r\n return p[len(os.path.commonprefix([b, p])):]", "def getLongName(self) -> str:\n return self.long_name", "def full_name(self):\n path = [str(p) for p in self.path]\n # TODO add further checks, the mapping should only occur on stdlib.\n try:\n path[0] = self._mapping[path[0]]\n except KeyError:\n pass\n for key, repl in self._tuple_mapping.items():\n if tuple(path[:len(key)]) == key:\n path = [repl] + path[len(key):]\n\n return '.'.join(path if path[0] else path[1:])", "def path_shorten(str_path, length = 80) -> str:\n if length < 0:\n length = os.get_terminal_size().columns + length\n if len(str_path) > length:\n l_parts = list(pathlib.PurePath(str_path).parts)\n l_copy = l_parts.copy()\n max = len(l_parts)\n offset = -1\n center = max // 2\n while len(str_path) > length:\n offset += 1\n l_shorten = [i % (max + 1) for i in range( center - offset,\n center + offset + 1)]\n for prt in l_shorten: l_copy[prt] = '...'\n str_path = str(pathlib.PurePath(*l_copy))\n return str_path", "def shorten_path(path, length):\n if len(path) < length:\n return path\n if os.path.sep not in path:\n return shorten_string(path, length)\n\n short_base = \"\"\n if path.startswith(os.path.sep):\n short_base = os.path.sep\n path = path[1:]\n parts = path.split(os.path.sep)\n short_base += os.path.sep.join([p[0] for p in parts[:-1]])\n if len(short_base) > length:\n short_base = \"\"\n\n # Shorten the last part:\n short_name = parts[-1]\n last_length = length - len(short_base)\n if short_base:\n last_length = last_length - 1\n short_name = shorten_string(short_name, last_length)\n return os.path.join(short_base, short_name)", "def get_short_name(self):\n\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def name_from_path(path):\n return path[0:-3]", "def get_short_code():\n return rh.get_short_code(request)", "def get_short_name(self):\r\n return self.name", "def longName(self):\n return self.name()", "def full_name(self):\n return \"%s.%s\" % (self._dj._wid, self._job)", "def file_path_short(self):\r\n if not hasattr(self, '_file_path_short'):\r\n if self.file_path:\r\n result = None\r\n\r\n for path in sys.path:\r\n candidate = os.path.relpath(self.file_path, path)\r\n if not result or (len(candidate.split('/')) < len(result.split('/'))):\r\n result = candidate\r\n\r\n self._file_path_short = result\r\n else: \r\n self._file_path_short = None\r\n\r\n return self._file_path_short", "def get_full_path(self):\n return self.path_display", "def shortHostname(self) -> str:\n\t\treturn self.hostname[0]", "def longify (self, path):\r\n pass", "def path_name(self, path):\r\n ind = path.rfind(\"/\") + 1\r\n return (path[:ind], path[ind:])", "def get_short_name(self):\n\n return self.name", "def get_short_name(self):\n\n return self.name", "def shorter_name(key):\n key_short = key\n for sep in ['#', '/']:\n ind = key_short.rfind(sep)\n if ind is not None:\n key_short = key_short[ind+1:]\n else:\n key_short = key_short\n return key_short.replace('-', '_').replace('.', '_')", "def full_name(self) -> str:\n # return self.separator.join(map(lambda x: x.name, self.path()))\n return self.separator.join(map(lambda x: x.tagged_name, self.path()))", "def short_displayname(self):\n return self.get_short_displayname()", "def get_display_name(self, short=False):\n if self.filename is None:\n return '[New file]'\n elif short:\n return os.path.basename(self.filename)\n else:\n return self.filename", "def shorten_path(path):\n # On Windows, the filenames are not case-sensitive\n # and the way Python displays filenames may vary.\n # To properly compare, we convert everything to lowercase\n # However, we ensure that the shortened path retains its cases\n ext = os.path.splitext(path)[1]\n\n path_lower = path.lower()\n path_lower = os.path.splitext(path_lower)[0]\n\n if path_lower.startswith(PYTHON):\n path = \"PYTHON:\" + path[len(PYTHON) : -len(ext)]\n elif path_lower.startswith(IDEAS):\n path = \"IDEAS:\" + path[len(IDEAS) : -len(ext)]\n elif path_lower.startswith(TESTS):\n path = \"TESTS:\" + path[len(TESTS) : -len(ext)]\n elif path_lower.startswith(SITE_PACKAGES):\n path = \"SITE-PACKAGES:\" + path[len(SITE_PACKAGES) : -len(ext)]\n elif path_lower.startswith(HOME):\n path = \"~\" + path[len(HOME) : -len(ext)]\n return path", "def get_name(path):\n return path.rsplit('/',1)[1]", "def constructShortestPath(self):", "def _ref_name_from_path(self, path: str) -> str:\n prefix = \"%s/\" % self._path\n assert path.startswith(prefix)\n return path[len(prefix) :]", "def get_short_name(self):\n return self.last_name", "def get_mds_fullname(node):\n return str(node.getFullPath()).lower()", "def get_target_group_name(self, short_name):\n app_env = self.get_current_env()\n full_name = self.get_target_group_fully_qualified_name(short_name)\n namespace = self.config['namespace']\n\n if len(full_name) <= 32:\n return full_name\n elif len(namespace) + 10 <= 32:\n env_target_hash = hashlib.md5((short_name + app_env).encode()).hexdigest()[:9]\n return '{}-{}'.format(namespace, env_target_hash)\n else:\n return hashlib.md5(full_name.encode()).hexdigest()", "def pretty_name(self) -> str:\n try:\n return self._names_from_attrs('pretty_name')\n except AttributeError: # todo: what exception\n warnings.warn('pretty name not found in metadata, fallback to globals.py')\n if self.__short_name in globals._dataset_pretty_names.keys():\n return globals._dataset_pretty_names[self.__short_name]\n else:\n warnings.warn('pretty name also not found in globals.py, use short name')\n return self.__short_name", "def validate_short_path(short_path):", "def fully_qualified_friendly_name(self) -> str:\n return pulumi.get(self, \"fully_qualified_friendly_name\")", "def shpname(self):\n _, tail = os.path.split(self.url)\n return self.folder + ('/' + tail[:-4]) * 2", "def get_full_path(self, reg_path: str, reg_id: str) -> str:\n return '{}.{}'.format(reg_path, reg_id)", "def short_name_or_full(self):\n return self.short_name or self.title", "def get_target_group_fully_qualified_name(self, short_name):\n return '{}-{}'.format(\n self.get_balancer_name(),\n short_name,\n )", "def longname(self):\n if not self.parent:\n return self.name\n return '%s.%s' % (self.parent.longname, self.name)", "def get_module_short_name(klass):\n return klass.__module__.rsplit('.', 1)[-1]", "def getWindowsShortPathName(filename):\n import ctypes.wintypes\n\n GetShortPathNameW = ctypes.windll.kernel32.GetShortPathNameW\n GetShortPathNameW.argtypes = (\n ctypes.wintypes.LPCWSTR,\n ctypes.wintypes.LPWSTR,\n ctypes.wintypes.DWORD,\n )\n GetShortPathNameW.restype = ctypes.wintypes.DWORD\n\n output_buf_size = 0\n while True:\n output_buf = ctypes.create_unicode_buffer(output_buf_size)\n needed = GetShortPathNameW(\n os.path.abspath(filename), output_buf, output_buf_size\n )\n\n if needed == 0:\n # Windows only code, pylint: disable=I0021,undefined-variable\n\n # Permission denied.\n if ctypes.GetLastError() == 5:\n return filename\n\n raise WindowsError(\n ctypes.GetLastError(), ctypes.FormatError(ctypes.GetLastError())\n )\n\n if output_buf_size >= needed:\n # Short paths should be ASCII. Don't return unicode without a need,\n # as e.g. Scons hates that in environment variables.\n if str is bytes:\n return output_buf.value.encode(\"utf8\")\n else:\n return output_buf.value\n else:\n output_buf_size = needed", "def pathtitle(path):\n return thing_from_path(path).title", "def get_short_name(self) -> str:\n return self.first_name", "def __parse_full_path(path):\n dir = path[:path.rfind('/') + 1]\n name = path[path.rfind('/') + 1:]\n return dir, name", "def shorten_path(self, full_path, max_length=70):\n if len(full_path) <= max_length:\n return full_path\n else:\n need_to_save = len(full_path) - max_length\n\n shortened_path = []\n for index, folder in enumerate(full_path.split('/')):\n if index == 0:\n shortened_path.append(folder)\n continue\n\n elif index+1 == len(full_path.split('/')):\n shortened_path.append(folder)\n continue\n\n else:\n if need_to_save > 0:\n shortened_path.append(folder[0])\n need_to_save = need_to_save - len(folder) + 1\n else:\n shortened_path.append(folder)\n\n return '/'.join(shortened_path)", "def niceName(self, path):\n logger.debug(\"Func: niceName\")\n\n basename = os.path.split(path)[1]\n return os.path.splitext(basename)[0]", "def _get_path_infomation(self):\n long_identifier = self._device_path.split('/')[4]\n protocol, remainder = long_identifier.split('-', 1)\n identifier, _, device_type = remainder.rsplit('-', 2)\n return (protocol, identifier, device_type)", "def just_the_name(path):\n return os.path.splitext(os.path.basename(path))[0]", "def short_branch_name(branch):\n return branch.replace('refs/heads/', '')", "def just_the_name(path):\n name = os.path.splitext(os.path.basename(path))[0]\n return name", "def long_displayname(self):\n return self.get_long_displayname()", "def get_info(from_: Path = None,\n to_: Path = None,\n *,\n short: bool = False) -> str:\n short = short_filename if short else (lambda path: path)\n res = ''\n\n if from_ is not None:\n res += f\"'{short(from_)}', {get_size(from_)}MB\"\n if to_ is not None:\n res += bool(from_) * ' to '\n res += f\"'{short(to_)}'\" + to_.exists() * f\"{get_size(to_)}MB\"\n\n return res", "def get_shortest_path(self, src, dst):\n \n return self.get_sorted_paths(src, dst)[0]", "def long_path_formatter(line, max_width=pd.get_option(\"max_colwidth\")):\n\n if len(line) > max_width:\n\n tokens = line.split(\".\")\n trial1 = \"%s...%s\" % (tokens[0], tokens[-1])\n\n if len(trial1) > max_width:\n\n return \"...%s\" % (tokens[-1][-1 : -(max_width - 3)])\n\n else:\n\n return trial1\n\n else:\n\n return line", "def name(self):\n return self.path.stem", "def __str__(self):\n return '{0}'.format(self.path.name[2:])", "def getShortname(input_id):\n \n name = formatDesignation(input_id)\n \n return name[0:6] + name[10:15]", "def short_fname (self):\n self.fname = self.fname.split('/')\n self.trial_date = self.fname[-2]\n self.fname = self.fname[-1]", "def get_content_from_custom_short_path(short_path):", "def get_word(path):\n\treturn path.split('.')[0]", "def path_name(self):\n return u'{0}-{1}'.format(self.plugin.name, self._major_version)", "def name(self) -> str:\n if '/' in self.path.strip('/'):\n basename: str = os.path.basename(self.path)\n return basename\n return self.path", "def get_short(self, cls, long):\n if cls == \"Website\":\n result = self.__session.query(Website).filter(Website.name == long).first()\n return result", "def getComponentByLongName(self, longName):\n result = None\n steps = longName.split('.')\n lastStep = steps[-1]\n currentNode = self\n for step in steps:\n currentNode = currentNode.getChild(step)\n if not currentNode:\n result = None\n break\n if step == lastStep:\n result = currentNode\n return result", "def get_custom_short_paths(content):", "def get_short_species_abbreviation(self, taxon_id): # noqa # will be okay after removing old method\n short_species_abbreviation = 'Alliance'\n try:\n short_species_abbreviation = self.rdh2.get_short_name(taxon_id)\n except KeyError:\n self.logger.critical(\"Problem looking up short species name for %s\", taxon_id)\n\n return short_species_abbreviation", "def path2name(path,\n slash=\"/\",\n hid_char=\".\",\n extension=False):\n if extension is True:\n return str(path.split(slash)[-1].strip(hid_char))\n else:\n return str(path.split(slash)[-1].strip(hid_char).split(\".\")[0])", "def resolved(path: Union[str, Path]) -> str:\n return os.path.basename(os.path.abspath(path))", "def get_path(name: str) -> str:\n return _pooch.fetch(name)", "def get_path(name: str) -> str:\n return _pooch.fetch(name)", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name" ]
[ "0.8374927", "0.692538", "0.6779394", "0.6752157", "0.6703952", "0.65418506", "0.6538999", "0.6492663", "0.64758646", "0.6439164", "0.63933027", "0.6386224", "0.636427", "0.6346333", "0.6328241", "0.63225263", "0.6192011", "0.61536336", "0.6148895", "0.6138921", "0.61234444", "0.61234444", "0.61234444", "0.61234444", "0.61234444", "0.61234444", "0.61234444", "0.61234444", "0.61044645", "0.60989374", "0.6067144", "0.60616606", "0.60576326", "0.60393566", "0.6020389", "0.6004298", "0.5997364", "0.5996585", "0.59937525", "0.59937525", "0.5990152", "0.59789675", "0.59626704", "0.5961362", "0.59509695", "0.594276", "0.5922122", "0.5920666", "0.5910166", "0.5899804", "0.58934784", "0.58898914", "0.58895606", "0.5846867", "0.5837293", "0.5836862", "0.58156264", "0.58100224", "0.5804019", "0.57963806", "0.5779406", "0.57793075", "0.57547444", "0.57545733", "0.5751596", "0.5734259", "0.5730957", "0.573004", "0.5728075", "0.5723611", "0.5720091", "0.57110196", "0.5701686", "0.5690984", "0.56789184", "0.5677206", "0.5669046", "0.5665182", "0.5657701", "0.5649", "0.5634642", "0.56318414", "0.56162167", "0.56156796", "0.56147003", "0.5610854", "0.5590981", "0.55885935", "0.5587371", "0.5587371", "0.5567462", "0.5567462", "0.5567462", "0.5567462", "0.5567462", "0.5567462", "0.5567462", "0.5567462", "0.5567462", "0.5567462" ]
0.8415998
0
get img_type file list such as get jpg files
def get_path_list(self, suffix=img_type): img_list = list(filter(lambda x: x.endswith(suffix), self.path_list)) return img_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_input_files():\n\n raw_list = abspath(get('input_files'))\n valid_types = ['image/jpeg', 'image/tiff']\n images = [x for x in raw_list if mimetypes.guess_type(x)[0] in valid_types]\n print('* Input images: {}'.format(len(images)))\n return images", "def img_extensions():\n return [\".JPG\"]", "def collect_image_files():\n negs = [] # Non image files found\n for filename in os.listdir('.'):\n if filename.lower().endswith('.jpg') or filename.lower().\\\n endswith('.jpeg'):\n jpg_files.append(filename)\n elif filename.lower().endswith('.gif'):\n gif_files.append(filename)\n elif filename.lower().endswith('.png'):\n png_files.append(filename)\n else:\n negs.append(filename)\n return negs", "def getfiletype(self):\n d = magic.from_file(os.path.join(self.path,self.name))\n d = re.sub(', ',',',d)\n e = d.split(',')\n filetype = e[0]\n array = [False,False]\n if filetype == 'data':\n array = ['ARM','BIN']\n elif filetype == 'HIT archive data':\n array = ['MSP430', 'BIN']\n elif re.search('ELF',filetype):\n arch = e[1]\n if arch == 'ARM':\n array = ['ARM','ELF']\n elif arch == 'TI msp430':\n array = ['MSP430','ELF']\n else:\n pass\n else:\n pass\n\n return array", "def getSupportedFileFormats():\n return {\"Bitmap\":[\"*.bmp\", \"*.dib\"], \"JPEG\": [\"*.jpeg\", \"*.jpg\", \"*.jpe\"], \"JPEG 2000\": [\"*.jp2\"],\"Portable Network Graphics\" : [\"*.png\"], \"WebP\": [\"*.webp\"], \"Portable Image Formats\":[\"*.pbm\", \"*.pgm\", \"*.ppm\"], \"Sun Rasters\":[\"*.sr\", \"*.ras\"], \"TIFF Files\": [\"*.tiff\",\"*.tif\"] }", "def get_file_type(image_folder_path: str, allowed_extensions: Optional[List] = None):\n if allowed_extensions is None:\n allowed_extensions = [\".jpg\", \".png\", \".jpeg\"]\n\n file_list = os.listdir(image_folder_path)\n extension_type = [os.path.splitext(file)[-1].lower() for file in file_list]\n extension_dict = Counter(extension_type)\n assert (\n len(extension_dict.keys()) == 1\n ), \"The extension in the folder should all be the same, \"\n \"but found {} extensions\".format(extension_dict.keys)\n extension_type = list(extension_dict.keys())[0]\n assert extension_type in allowed_extensions\n return extension_type", "def list_images(img_dir) -> Iterable[str]:\n extensions = (\".png\", \".jpg\", \".jpeg\", \".tif\", \".tiff\")\n\n paths = Path(img_dir).glob(\"**/*\")\n paths = filter(lambda p: p.is_file() and p.suffix.lower() in extensions, paths)\n return (str(p) for p in paths)", "def get_image_list(source_dir):\n\n dir_list = os.path.os.listdir(source_dir)\n# print(dir_list)\n image_list = []\n os.chdir(source_dir)\n for file in dir_list:\n print(\"Inspecting.... : {}\".format(file))\n\n try:\n if Image.open(file).format:\n image_list.append(file)\n print(\"{} : is an image\".format(file))\n except Exception as e:\n print(\"{} : failed the imageness test.i \\n {}\".format(file, e))\n continue\n\n# print(image_list)\n return image_list", "def get_images(path):\r\n\texts = ['.png', '.jpg', '.jpeg']\r\n\timages = []\r\n\tif os.path.isfile(path):\r\n\t\tfile_name, file_ext = os.path.splitext(path)\r\n\t\tif file_ext in exts:\r\n\t\t\treturn [path]\r\n\telse:\r\n\t\tfiles = get_files(path)\r\n\t\tfor file in files:\r\n\t\t\tfile_name, file_ext = os.path.splitext(file)\r\n\t\t\tif file_ext in exts:\r\n\t\t\t\timages.append(file)\r\n\t\treturn images", "def get_imlist_png(path):\n \n return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.png')]", "def get_imlist(path):\n\treturn [os.path.join( path, f) for f in os.listdir(path) if f.endswith('.jpg')]", "def getimgs():", "def get_image_formats(self):\n return [ 'raw', 'qcow', 'qcow2' ]", "def getMimeTypeFileExtensions(mimeType):\n #getMimeTypeFileExtensions body\n\n if mimeType == applicationzlib:\n return [ \"zz\" ]\n\n if mimeType == applicationzstd:\n return [ \"zst\" ]\n\n if mimeType == applicationxzoo:\n return [ \"zoo\" ]\n\n if mimeType == applicationvndhandheldentertainment_xml:\n return [ \"zmm\" ]\n\n if mimeType == applicationvndzul:\n return [ \"zir\", \"zirz\" ]\n\n if mimeType == applicationzip:\n return [ \"zip\", \"zipx\" ]\n\n if mimeType == applicationxopenzim:\n return [ \"zim\" ]\n\n if mimeType == applicationvndzzazzdeck_xml:\n return [ \"zaz\" ]\n\n if mimeType == applicationxzmachine:\n return [ \"z1\", \"z2\", \"z3\", \"z4\", \"z5\", \"z6\", \"z7\", \"z8\" ]\n\n if mimeType == applicationxcompress:\n return [ \"z\" ]\n\n if mimeType == videovndyoutubeyt:\n return [ \"yt\" ]\n\n if mimeType == textxsuseymp:\n return [ \"ymp\" ]\n\n if mimeType == applicationyin_xml:\n return [ \"yin\" ]\n\n if mimeType == applicationyang:\n return [ \"yang\" ]\n\n if mimeType == applicationxyaml:\n return [ \"yaml\", \"yml\" ]\n\n if mimeType == applicationxxz:\n return [ \"xz\" ]\n\n if mimeType == chemicalxxyz:\n return [ \"xyz\" ]\n\n if mimeType == imagexxwindowdump:\n return [ \"xwd\" ]\n\n if mimeType == applicationvndmozillaxul_xml:\n return [ \"xul\" ]\n\n if mimeType == applicationxspf_xml:\n return [ \"xspf\" ]\n\n if mimeType == applicationvndsyncml_xml:\n return [ \"xsm\" ]\n\n if mimeType == applicationxslt_xml:\n return [ \"xsl\", \"xslt\" ]\n\n if mimeType == applicationprsxsf_xml:\n return [ \"xsf\" ]\n\n if mimeType == applicationvndinterconformnet:\n return [ \"xpw\", \"xpx\" ]\n\n if mimeType == applicationvndmsxpsdocument:\n return [ \"xps\" ]\n\n if mimeType == applicationvndisxpr:\n return [ \"xpr\" ]\n\n if mimeType == imagexxpixmap:\n return [ \"xpm\" ]\n\n if mimeType == applicationxproc_xml:\n return [ \"xpl\" ]\n\n if mimeType == applicationxxpinstall:\n return [ \"xpi\" ]\n\n if mimeType == applicationxop_xml:\n return [ \"xop\" ]\n\n if mimeType == applicationvndolpcsugar:\n return [ \"xo\" ]\n\n if mimeType == applicationxcapns_xml:\n return [ \"xns\" ]\n\n if mimeType == applicationxml:\n return [ \"xml\", \"xbl\", \"xsd\", \"rng\" ]\n\n if mimeType == textxxmi:\n return [ \"xmi\" ]\n\n if mimeType == audioxxmf:\n return [ \"xmf\" ]\n\n if mimeType == audioxxm:\n return [ \"xm\" ]\n\n if mimeType == applicationvndopenxmlformatsofficedocumentspreadsheetmltemplate:\n return [ \"xltx\" ]\n\n if mimeType == applicationvndmsexceltemplatemacroenabled12:\n return [ \"xltm\" ]\n\n if mimeType == applicationvndopenxmlformatsofficedocumentspreadsheetmlsheet:\n return [ \"xlsx\" ]\n\n if mimeType == applicationvndmsexcelsheetmacroenabled12:\n return [ \"xlsm\" ]\n\n if mimeType == applicationvndmsexcelsheetbinarymacroenabled12:\n return [ \"xlsb\" ]\n\n if mimeType == applicationvndmsexcel:\n return [ \"xls\", \"xlc\", \"xll\", \"xlm\", \"xlw\", \"xla\", \"xlt\", \"xld\" ]\n\n if mimeType == applicationxliff_xml:\n return [ \"xlf\", \"xliff\" ]\n\n if mimeType == applicationvndmsexceladdinmacroenabled12:\n return [ \"xlam\" ]\n\n if mimeType == imagevndxiff:\n return [ \"xif\" ]\n\n if mimeType == audioxxi:\n return [ \"xi\" ]\n\n if mimeType == applicationxhtml_xml:\n return [ \"xhtml\", \"xht\", \"html\", \"htm\" ]\n\n if mimeType == applicationvndpwgxhtmlprint_xml:\n return [ \"xhtm\" ]\n\n if mimeType == applicationvndxfdl:\n return [ \"xfdl\" ]\n\n if mimeType == applicationvndadobexfdf:\n return [ \"xfdf\" ]\n\n if mimeType == applicationpatchopserror_xml:\n return [ \"xer\" ]\n\n if mimeType == applicationxenc_xml:\n return [ \"xenc\" ]\n\n if mimeType == applicationxcapel_xml:\n return [ \"xel\" ]\n\n if mimeType == applicationvndfujixeroxdocuworks:\n return [ \"xdw\" ]\n\n if mimeType == applicationdssc_xml:\n return [ \"xdssc\" ]\n\n if mimeType == applicationvndadobexdp_xml:\n return [ \"xdp\" ]\n\n if mimeType == applicationvndsyncmldm_xml:\n return [ \"xdm\" ]\n\n if mimeType == applicationxcapdiff_xml:\n return [ \"xdf\" ]\n\n if mimeType == applicationcalendar_xml:\n return [ \"xcs\" ]\n\n if mimeType == imagexcompressedxcf:\n return [ \"xcfgz\", \"xcfbz2\" ]\n\n if mimeType == imagexxcf:\n return [ \"xcf\" ]\n\n if mimeType == applicationxcapcaps_xml:\n return [ \"xca\" ]\n\n if mimeType == imagexxbitmap:\n return [ \"xbm\" ]\n\n if mimeType == applicationxxbel:\n return [ \"xbel\" ]\n\n if mimeType == applicationvndfujixeroxdocuworksbinder:\n return [ \"xbd\" ]\n\n if mimeType == applicationxmsxbap:\n return [ \"xbap\" ]\n\n if mimeType == applicationxcapatt_xml:\n return [ \"xav\" ]\n\n if mimeType == applicationxxar:\n return [ \"xar\", \"pkg\" ]\n\n if mimeType == applicationxsilverlightapp:\n return [ \"xap\" ]\n\n if mimeType == applicationxaml_xml:\n return [ \"xaml\" ]\n\n if mimeType == imagexsigmax3f:\n return [ \"x3f\" ]\n\n if mimeType == modelx3d_vrml:\n return [ \"x3dv\", \"x3dvz\" ]\n\n if mimeType == modelx3d_binary:\n return [ \"x3db\", \"x3dbz\" ]\n\n if mimeType == modelx3d_xml:\n return [ \"x3d\", \"x3dz\" ]\n\n if mimeType == modelvndparasolidtransmittext:\n return [ \"x_t\" ]\n\n if mimeType == modelvndparasolidtransmitbinary:\n return [ \"x_b\" ]\n\n if mimeType == applicationxwwf:\n return [ \"wwf\" ]\n\n if mimeType == audioxwavpackcorrection:\n return [ \"wvc\" ]\n\n if mimeType == audioxwavpack:\n return [ \"wv\", \"wvp\" ]\n\n if mimeType == applicationvndwebturbo:\n return [ \"wtb\" ]\n\n if mimeType == applicationwspolicy_xml:\n return [ \"wspolicy\" ]\n\n if mimeType == applicationwsdl_xml:\n return [ \"wsdl\" ]\n\n if mimeType == applicationxwonderswancolorrom:\n return [ \"wsc\" ]\n\n if mimeType == applicationxwonderswanrom:\n return [ \"ws\" ]\n\n if mimeType == applicationxmswrite:\n return [ \"wri\" ]\n\n if mimeType == applicationvndwqd:\n return [ \"wqd\" ]\n\n if mimeType == applicationvndmswpl:\n return [ \"wpl\" ]\n\n if mimeType == applicationxwpg:\n return [ \"wpg\" ]\n\n if mimeType == applicationvndwordperfect:\n return [ \"wp\", \"wp4\", \"wp5\", \"wp6\", \"wpd\", \"wpp\" ]\n\n if mimeType == fontwoff2:\n return [ \"woff2\" ]\n\n if mimeType == fontwoff:\n return [ \"woff\" ]\n\n if mimeType == applicationxmswmz:\n return [ \"wmz\" ]\n\n if mimeType == videoxmswmv:\n return [ \"wmv\" ]\n\n if mimeType == applicationvndwapwmlscriptc:\n return [ \"wmlsc\" ]\n\n if mimeType == textvndwapwmlscript:\n return [ \"wmls\" ]\n\n if mimeType == applicationvndwapwmlc:\n return [ \"wmlc\" ]\n\n if mimeType == textvndwapwml:\n return [ \"wml\" ]\n\n if mimeType == imagewmf:\n return [ \"wmf\" ]\n\n if mimeType == applicationxmswmd:\n return [ \"wmd\" ]\n\n if mimeType == audioxmswma:\n return [ \"wma\" ]\n\n if mimeType == videoxmswm:\n return [ \"wm\" ]\n\n if mimeType == applicationxpartialdownload:\n return [ \"wkdownload\", \"crdownload\", \"part\" ]\n\n if mimeType == applicationxmswim:\n return [ \"wim\", \"swm\" ]\n\n if mimeType == applicationwatcherinfo_xml:\n return [ \"wif\" ]\n\n if mimeType == applicationwidget:\n return [ \"wgt\" ]\n\n if mimeType == applicationvndpmiwidget:\n return [ \"wg\" ]\n\n if mimeType == imagewebp:\n return [ \"webp\" ]\n\n if mimeType == applicationmanifest_json:\n return [ \"webmanifest\" ]\n\n if mimeType == videowebm:\n return [ \"webm\" ]\n\n if mimeType == applicationxwebappmanifest_json:\n return [ \"webapp\" ]\n\n if mimeType == audiowebm:\n return [ \"weba\" ]\n\n if mimeType == imagevndmsphoto:\n return [ \"wdp\" ]\n\n if mimeType == applicationvndmsworks:\n return [ \"wcm\", \"wdb\", \"wps\", \"xlr\" ]\n\n if mimeType == applicationvndwapwbxml:\n return [ \"wbxml\" ]\n\n if mimeType == applicationvndcriticaltoolswbs_xml:\n return [ \"wbs\" ]\n\n if mimeType == imagevndwapwbmp:\n return [ \"wbmp\" ]\n\n if mimeType == applicationxquattropro:\n return [ \"wb1\", \"wb2\", \"wb3\" ]\n\n if mimeType == audioxwav:\n return [ \"wav\" ]\n\n if mimeType == applicationwasm:\n return [ \"wasm\" ]\n\n if mimeType == applicationjavaarchive:\n return [ \"war\", \"ear\" ]\n\n if mimeType == applicationvndsunwadl_xml:\n return [ \"wadl\" ]\n\n if mimeType == applicationxwiiwad:\n return [ \"wad\" ]\n\n if mimeType == applicationvoicexml_xml:\n return [ \"vxml\" ]\n\n if mimeType == modelvndvtu:\n return [ \"vtu\" ]\n\n if mimeType == textvtt:\n return [ \"vtt\" ]\n\n if mimeType == imagevndvalvesourcetexture:\n return [ \"vtf\" ]\n\n if mimeType == applicationvndmsvisiotemplatemain_xml:\n return [ \"vstx\" ]\n\n if mimeType == applicationvndmsvisiotemplatemacroenabledmain_xml:\n return [ \"vstm\" ]\n\n if mimeType == applicationvndmsvisiostencilmain_xml:\n return [ \"vssx\" ]\n\n if mimeType == applicationvndmsvisiostencilmacroenabledmain_xml:\n return [ \"vssm\" ]\n\n if mimeType == applicationvndvsf:\n return [ \"vsf\" ]\n\n if mimeType == applicationvndmsvisiodrawingmain_xml:\n return [ \"vsdx\" ]\n\n if mimeType == applicationvndmsvisiodrawingmacroenabledmain_xml:\n return [ \"vsdm\" ]\n\n if mimeType == applicationvndvisio:\n return [ \"vsd\", \"vst\", \"vsw\", \"vss\" ]\n\n if mimeType == modelvrml:\n return [ \"vrm\", \"vrml\", \"wrl\" ]\n\n if mimeType == applicationxvhddisk:\n return [ \"vpc\" ]\n\n if mimeType == audioxvoc:\n return [ \"voc\" ]\n\n if mimeType == applicationxvmdkdisk:\n return [ \"vmdk\" ]\n\n if mimeType == videovndvivo:\n return [ \"viv\", \"vivo\" ]\n\n if mimeType == applicationvndvisionary:\n return [ \"vis\" ]\n\n if mimeType == applicationxvhdxdisk:\n return [ \"vhdx\" ]\n\n if mimeType == textxvhdl:\n return [ \"vhd\", \"vhdl\" ]\n\n if mimeType == modelvndsapvds:\n return [ \"vds\" ]\n\n if mimeType == applicationxvdidisk:\n return [ \"vdi\" ]\n\n if mimeType == applicationvndvcx:\n return [ \"vcx\" ]\n\n if mimeType == textcalendar:\n return [ \"vcs\", \"ics\", \"ifb\" ]\n\n if mimeType == applicationvndgroovevcard:\n return [ \"vcg\" ]\n\n if mimeType == applicationxcdlink:\n return [ \"vcd\" ]\n\n if mimeType == textvcard:\n return [ \"vcard\", \"vcf\", \"vct\", \"gcrd\" ]\n\n if mimeType == textvbscript:\n return [ \"vbs\" ]\n\n if mimeType == applicationxvirtualboxvboxextpack:\n return [ \"vbox-extpack\" ]\n\n if mimeType == applicationxvirtualboxvbox:\n return [ \"vbox\" ]\n\n if mimeType == applicationxvirtualboyrom:\n return [ \"vb\" ]\n\n if mimeType == textxvala:\n return [ \"vala\", \"vapi\" ]\n\n if mimeType == textxverilog:\n return [ \"v\" ]\n\n if mimeType == applicationvnddecezip:\n return [ \"uvz\", \"uvvz\" ]\n\n if mimeType == applicationvnddeceunspecified:\n return [ \"uvx\", \"uvvx\" ]\n\n if mimeType == videovnddecevideo:\n return [ \"uvv\", \"uvvv\" ]\n\n if mimeType == videovnduvvump4:\n return [ \"uvu\", \"uvvu\" ]\n\n if mimeType == applicationvnddecettml_xml:\n return [ \"uvt\", \"uvvt\" ]\n\n if mimeType == videovnddecesd:\n return [ \"uvs\", \"uvvs\" ]\n\n if mimeType == videovnddecepd:\n return [ \"uvp\", \"uvvp\" ]\n\n if mimeType == videovnddecemobile:\n return [ \"uvm\", \"uvvm\" ]\n\n if mimeType == imagevnddecegraphic:\n return [ \"uvi\", \"uvvi\", \"uvg\", \"uvvg\" ]\n\n if mimeType == videovnddecehd:\n return [ \"uvh\", \"uvvh\" ]\n\n if mimeType == applicationvnddecedata:\n return [ \"uvf\", \"uvvf\", \"uvd\", \"uvvd\" ]\n\n if mimeType == audiovnddeceaudio:\n return [ \"uva\", \"uvva\" ]\n\n if mimeType == textxuuencode:\n return [ \"uue\", \"uu\" ]\n\n if mimeType == applicationvnduiqtheme:\n return [ \"utz\" ]\n\n if mimeType == applicationxustar:\n return [ \"ustar\" ]\n\n if mimeType == modelvndusdz_zip:\n return [ \"usdz\" ]\n\n if mimeType == applicationxmswinurl:\n return [ \"url\" ]\n\n if mimeType == texturilist:\n return [ \"uri\", \"uris\", \"urls\" ]\n\n if mimeType == applicationvnduoml_xml:\n return [ \"uoml\", \"uo\" ]\n\n if mimeType == applicationvndunity:\n return [ \"unityweb\" ]\n\n if mimeType == applicationvndumajin:\n return [ \"umj\" ]\n\n if mimeType == applicationxglulx:\n return [ \"ulx\" ]\n\n if mimeType == audioxmod:\n return [ \"ult\", \"uni\", \"m15\", \"mtm\", \"669\", \"med\" ]\n\n if mimeType == textxuil:\n return [ \"uil\" ]\n\n if mimeType == applicationxdesigner:\n return [ \"ui\" ]\n\n if mimeType == applicationxufraw:\n return [ \"ufraw\" ]\n\n if mimeType == applicationvndufdl:\n return [ \"ufd\", \"ufdl\" ]\n\n if mimeType == applicationubjson:\n return [ \"ubj\" ]\n\n if mimeType == messageglobal:\n return [ \"u8msg\" ]\n\n if mimeType == messageglobaldispositionnotification:\n return [ \"u8mdn\" ]\n\n if mimeType == messageglobalheaders:\n return [ \"u8hdr\" ]\n\n if mimeType == messageglobaldeliverystatus:\n return [ \"u8dsn\" ]\n\n if mimeType == modelu3d:\n return [ \"u3d\" ]\n\n if mimeType == textplain:\n return [ \"txt\", \"text\", \"conf\", \"def\", \"list\", \"in\", \"ini\" ]\n\n if mimeType == applicationvndmobiustxf:\n return [ \"txf\" ]\n\n if mimeType == applicationvndgenomatixtuxedo:\n return [ \"txd\" ]\n\n if mimeType == textxtwig:\n return [ \"twig\" ]\n\n if mimeType == applicationvndsimtechmindmapper:\n return [ \"twd\", \"twds\" ]\n\n if mimeType == applicationxfontttx:\n return [ \"ttx\" ]\n\n if mimeType == applicationttml_xml:\n return [ \"ttml\" ]\n\n if mimeType == textturtle:\n return [ \"ttl\" ]\n\n if mimeType == fontttf:\n return [ \"ttf\" ]\n\n if mimeType == fontcollection:\n return [ \"ttc\" ]\n\n if mimeType == audioxtta:\n return [ \"tta\" ]\n\n if mimeType == texttabseparatedvalues:\n return [ \"tsv\" ]\n\n if mimeType == applicationtimestampeddata:\n return [ \"tsd\" ]\n\n if mimeType == textvndtrolltechlinguist:\n return [ \"ts\" ]\n\n if mimeType == applicationxmsterminal:\n return [ \"trm\" ]\n\n if mimeType == applicationtrig:\n return [ \"trig\" ]\n\n if mimeType == applicationvndtrueapp:\n return [ \"tra\" ]\n\n if mimeType == texttroff:\n return [ \"tr\", \"roff\" ]\n\n if mimeType == applicationvndtridtpt:\n return [ \"tpt\" ]\n\n if mimeType == applicationvndgroovetooltemplate:\n return [ \"tpl\" ]\n\n if mimeType == applicationxbittorrent:\n return [ \"torrent\" ]\n\n if mimeType == applicationtoml:\n return [ \"toml\" ]\n\n if mimeType == applicationxcdrdaotoc:\n return [ \"toc\" ]\n\n if mimeType == applicationvndmstnef:\n return [ \"tnef\", \"tnf\", \"winmaildat\" ]\n\n if mimeType == applicationvndtmobilelivetv:\n return [ \"tmo\" ]\n\n if mimeType == imagetiff:\n return [ \"tif\", \"tiff\" ]\n\n if mimeType == applicationvndmsofficetheme:\n return [ \"thmx\" ]\n\n if mimeType == applicationxwindowsthemepack:\n return [ \"themepack\" ]\n\n if mimeType == applicationxtheme:\n return [ \"theme\" ]\n\n if mimeType == imagextga:\n return [ \"tga\", \"icb\", \"tpic\", \"vda\" ]\n\n if mimeType == imagetifffx:\n return [ \"tfx\" ]\n\n if mimeType == applicationxtextfm:\n return [ \"tfm\" ]\n\n if mimeType == applicationthraud_xml:\n return [ \"tfi\" ]\n\n if mimeType == textxtexinfo:\n return [ \"texi\", \"texinfo\" ]\n\n if mimeType == textxtex:\n return [ \"tex\", \"ltx\", \"sty\", \"cls\", \"dtx\", \"ins\", \"latex\" ]\n\n if mimeType == applicationtei_xml:\n return [ \"tei\", \"teicorpus\" ]\n\n if mimeType == applicationvndsmartteacher:\n return [ \"teacher\" ]\n\n if mimeType == applicationurctargetdesc_xml:\n return [ \"td\" ]\n\n if mimeType == texttcl:\n return [ \"tcl\", \"tk\" ]\n\n if mimeType == applicationvnd3gpp2tcap:\n return [ \"tcap\" ]\n\n if mimeType == applicationxzstdcompressedtar:\n return [ \"tarzst\", \"tzst\" ]\n\n if mimeType == applicationxtarz:\n return [ \"tarz\", \"taz\" ]\n\n if mimeType == applicationxxzcompressedtar:\n return [ \"tarxz\", \"txz\" ]\n\n if mimeType == applicationxtzo:\n return [ \"tarlzo\", \"tzo\" ]\n\n if mimeType == applicationxlzmacompressedtar:\n return [ \"tarlzma\", \"tlz\" ]\n\n if mimeType == applicationxlz4compressedtar:\n return [ \"tarlz4\" ]\n\n if mimeType == applicationxlzipcompressedtar:\n return [ \"tarlz\" ]\n\n if mimeType == applicationxlrzipcompressedtar:\n return [ \"tarlrz\", \"tlrz\" ]\n\n if mimeType == applicationxcompressedtar:\n return [ \"targz\", \"tgz\" ]\n\n if mimeType == applicationxbzipcompressedtar:\n return [ \"tarbz2\", \"tarbz\", \"tbz2\", \"tbz\", \"tb2\" ]\n\n if mimeType == applicationxtar:\n return [ \"tar\", \"gtar\", \"gem\" ]\n\n if mimeType == imagevndtencenttap:\n return [ \"tap\" ]\n\n if mimeType == applicationvndtaointentmodulearchive:\n return [ \"tao\" ]\n\n if mimeType == audioxtak:\n return [ \"tak\" ]\n\n if mimeType == applicationvndmynfc:\n return [ \"taglet\" ]\n\n if mimeType == imaget38:\n return [ \"t38\" ]\n\n if mimeType == applicationxt3vmimage:\n return [ \"t3\" ]\n\n if mimeType == textxtxt2tags:\n return [ \"t2t\" ]\n\n if mimeType == textspreadsheet:\n return [ \"sylk\", \"slk\" ]\n\n if mimeType == applicationvndsunxmlwriter:\n return [ \"sxw\" ]\n\n if mimeType == applicationvndsunxmlmath:\n return [ \"sxm\" ]\n\n if mimeType == applicationvndsunxmlimpress:\n return [ \"sxi\" ]\n\n if mimeType == applicationvndsunxmlwriterglobal:\n return [ \"sxg\" ]\n\n if mimeType == applicationvndsunxmldraw:\n return [ \"sxd\" ]\n\n if mimeType == applicationvndsunxmlcalc:\n return [ \"sxc\" ]\n\n if mimeType == applicationswid_xml:\n return [ \"swidtag\" ]\n\n if mimeType == applicationvndaristanetworksswi:\n return [ \"swi\" ]\n\n if mimeType == applicationvndadobeflashmovie:\n return [ \"swf\", \"spl\" ]\n\n if mimeType == textxsvhdr:\n return [ \"svh\" ]\n\n if mimeType == imagesvg_xmlcompressed:\n return [ \"svgz\", \"svggz\" ]\n\n if mimeType == imagesvg_xml:\n return [ \"svg\" ]\n\n if mimeType == applicationvndsvd:\n return [ \"svd\" ]\n\n if mimeType == applicationvnddvbservice:\n return [ \"svc\" ]\n\n if mimeType == applicationxsv4crc:\n return [ \"sv4crc\" ]\n\n if mimeType == applicationxsv4cpio:\n return [ \"sv4cpio\" ]\n\n if mimeType == textxsvsrc:\n return [ \"sv\" ]\n\n if mimeType == applicationvndsuscalendar:\n return [ \"sus\", \"susp\" ]\n\n if mimeType == imagexsunraster:\n return [ \"sun\" ]\n\n if mimeType == textxmicrodvd:\n return [ \"sub\" ]\n\n if mimeType == textstylus:\n return [ \"stylus\", \"styl\" ]\n\n if mimeType == applicationvndsunxmlwritertemplate:\n return [ \"stw\" ]\n\n if mimeType == applicationvndpgformat:\n return [ \"str\" ]\n\n if mimeType == modelstep_zip:\n return [ \"stpz\" ]\n\n if mimeType == modelstepxml_zip:\n return [ \"stpxz\" ]\n\n if mimeType == modelstep_xml:\n return [ \"stpx\" ]\n\n if mimeType == audioxstm:\n return [ \"stm\" ]\n\n if mimeType == modelstl:\n return [ \"stl\" ]\n\n if mimeType == applicationhyperstudio:\n return [ \"stk\" ]\n\n if mimeType == applicationvndsunxmlimpresstemplate:\n return [ \"sti\" ]\n\n if mimeType == applicationvndwtstf:\n return [ \"stf\" ]\n\n if mimeType == applicationvndsunxmldrawtemplate:\n return [ \"std\" ]\n\n if mimeType == applicationvndsunxmlcalctemplate:\n return [ \"stc\" ]\n\n if mimeType == applicationvndsailingtrackertrack:\n return [ \"st\" ]\n\n if mimeType == applicationssml_xml:\n return [ \"ssml\" ]\n\n if mimeType == applicationvndepsonssf:\n return [ \"ssf\" ]\n\n if mimeType == applicationvndkodakdescriptor:\n return [ \"sse\" ]\n\n if mimeType == applicationssdl_xml:\n return [ \"ssdl\" ]\n\n if mimeType == textxssa:\n return [ \"ssa\", \"ass\" ]\n\n if mimeType == applicationsparqlresults_xml:\n return [ \"srx\" ]\n\n if mimeType == applicationsru_xml:\n return [ \"sru\" ]\n\n if mimeType == applicationxsubrip:\n return [ \"srt\" ]\n\n if mimeType == imagexsonysrf:\n return [ \"srf\" ]\n\n if mimeType == applicationxsourcerpm:\n return [ \"srcrpm\", \"spm\" ]\n\n if mimeType == applicationxwaissource:\n return [ \"src\" ]\n\n if mimeType == imagexsonysr2:\n return [ \"sr2\" ]\n\n if mimeType == applicationvndsquashfs:\n return [ \"sqsh\" ]\n\n if mimeType == applicationvndsqlite3:\n return [ \"sqlite3\" ]\n\n if mimeType == applicationxsqlite2:\n return [ \"sqlite2\" ]\n\n if mimeType == applicationsql:\n return [ \"sql\" ]\n\n if mimeType == applicationxapplesystemprofiler_xml:\n return [ \"spx\" ]\n\n if mimeType == applicationscvpvprequest:\n return [ \"spq\" ]\n\n if mimeType == applicationscvpvpresponse:\n return [ \"spp\" ]\n\n if mimeType == textvndin3dspot:\n return [ \"spot\" ]\n\n if mimeType == applicationvndyamahasmafphrase:\n return [ \"spf\" ]\n\n if mimeType == textxrpmspec:\n return [ \"spec\" ]\n\n if mimeType == textspdx:\n return [ \"spdx\" ]\n\n if mimeType == applicationxfontspeedo:\n return [ \"spd\" ]\n\n if mimeType == applicationxsharedlib:\n return [ \"so\", \"so09\" ]\n\n if mimeType == applicationxfontsnf:\n return [ \"snf\" ]\n\n if mimeType == applicationvndsnap:\n return [ \"snap\" ]\n\n if mimeType == applicationvndstepmaniapackage:\n return [ \"smzip\" ]\n\n if mimeType == videoxsmv:\n return [ \"smv\" ]\n\n if mimeType == applicationxsmsrom:\n return [ \"sms\" ]\n\n if mimeType == videovndradgamettoolssmacker:\n return [ \"smk\" ]\n\n if mimeType == applicationsmil_xml:\n return [ \"smil\", \"smi\", \"sml\", \"kino\" ]\n\n if mimeType == applicationvndstardivisionmath:\n return [ \"smf\" ]\n\n if mimeType == applicationvndstardivisionmail:\n return [ \"smd\" ]\n\n if mimeType == applicationvndstepmaniastepchart:\n return [ \"sm\" ]\n\n if mimeType == applicationvndepsonsalt:\n return [ \"slt\" ]\n\n if mimeType == applicationroutestsid_xml:\n return [ \"sls\" ]\n\n if mimeType == textslim:\n return [ \"slim\", \"slm\" ]\n\n if mimeType == applicationvndopenxmlformatsofficedocumentpresentationmlslide:\n return [ \"sldx\" ]\n\n if mimeType == applicationvndmspowerpointslidemacroenabled12:\n return [ \"sldm\" ]\n\n if mimeType == applicationpgpkeys:\n return [ \"skr\", \"pkr\", \"key\" ]\n\n if mimeType == applicationvndkoan:\n return [ \"skp\", \"skd\", \"skt\", \"skm\" ]\n\n if mimeType == imagexskencil:\n return [ \"sk\", \"sk1\" ]\n\n if mimeType == applicationsieve:\n return [ \"siv\", \"sieve\" ]\n\n if mimeType == applicationxstuffitx:\n return [ \"sitx\" ]\n\n if mimeType == applicationxstuffit:\n return [ \"sit\" ]\n\n if mimeType == xepocxsisxapp:\n return [ \"sisx\" ]\n\n if mimeType == applicationvndsymbianinstall:\n return [ \"sis\" ]\n\n if mimeType == audiosilk:\n return [ \"sil\" ]\n\n if mimeType == applicationpgpsignature:\n return [ \"sig\" ]\n\n if mimeType == audioprssid:\n return [ \"sid\", \"psid\" ]\n\n if mimeType == applicationxsiag:\n return [ \"siag\" ]\n\n if mimeType == texthtml:\n return [ \"shtml\" ]\n\n if mimeType == applicationxshorten:\n return [ \"shn\" ]\n\n if mimeType == applicationshf_xml:\n return [ \"shf\" ]\n\n if mimeType == textshex:\n return [ \"shex\" ]\n\n if mimeType == applicationxshar:\n return [ \"shar\" ]\n\n if mimeType == applicationxdiashape:\n return [ \"shape\" ]\n\n if mimeType == applicationxshellscript:\n return [ \"sh\" ]\n\n if mimeType == textsgml:\n return [ \"sgml\", \"sgm\" ]\n\n if mimeType == imagexsgi:\n return [ \"sgi\" ]\n\n if mimeType == applicationxgosgf:\n return [ \"sgf\" ]\n\n if mimeType == applicationxsg1000rom:\n return [ \"sg\" ]\n\n if mimeType == textxsfv:\n return [ \"sfv\" ]\n\n if mimeType == applicationvndspotfiresfs:\n return [ \"sfs\" ]\n\n if mimeType == applicationvndhydrostatixsofdata:\n return [ \"sfd-hdstx\" ]\n\n if mimeType == applicationvndnintendosnesrom:\n return [ \"sfc\", \"smc\" ]\n\n if mimeType == applicationsetregistrationinitiation:\n return [ \"setreg\" ]\n\n if mimeType == applicationsetpaymentinitiation:\n return [ \"setpay\" ]\n\n if mimeType == textxdbusservice:\n return [ \"service\" ]\n\n if mimeType == applicationjavaserializedobject:\n return [ \"ser\" ]\n\n if mimeType == applicationsensml_xml:\n return [ \"sensmlx\" ]\n\n if mimeType == applicationsenml_xml:\n return [ \"senmlx\" ]\n\n if mimeType == applicationvndsemf:\n return [ \"semf\" ]\n\n if mimeType == applicationvndsemd:\n return [ \"semd\" ]\n\n if mimeType == applicationvndsema:\n return [ \"sema\" ]\n\n if mimeType == applicationvndfdsnseed:\n return [ \"seed\", \"dataless\" ]\n\n if mimeType == applicationvndseemail:\n return [ \"see\" ]\n\n if mimeType == applicationxsea:\n return [ \"sea\" ]\n\n if mimeType == applicationvndstardivisionwriter:\n return [ \"sdw\", \"vor\", \"sgl\" ]\n\n if mimeType == applicationvndstardivisionchart:\n return [ \"sds\" ]\n\n if mimeType == applicationvndsolentsdkm_xml:\n return [ \"sdkm\", \"sdkd\" ]\n\n if mimeType == applicationvndstardivisionimpress:\n return [ \"sdd\", \"sdp\" ]\n\n if mimeType == applicationvndstardivisioncalc:\n return [ \"sdc\" ]\n\n if mimeType == applicationvndstardivisiondraw:\n return [ \"sda\" ]\n\n if mimeType == textvndcurlscurl:\n return [ \"scurl\" ]\n\n if mimeType == textxscss:\n return [ \"scss\" ]\n\n if mimeType == applicationscvpcvresponse:\n return [ \"scs\" ]\n\n if mimeType == applicationscvpcvrequest:\n return [ \"scq\" ]\n\n if mimeType == textxscons:\n return [ \"sconstruct\", \"sconscript\" ]\n\n if mimeType == applicationxgodotscene:\n return [ \"scn\", \"tscn\", \"escn\" ]\n\n if mimeType == textxscheme:\n return [ \"scm\", \"ss\" ]\n\n if mimeType == applicationxmsschedule:\n return [ \"scd\" ]\n\n if mimeType == textxscala:\n return [ \"scala\", \"sc\" ]\n\n if mimeType == applicationsbml_xml:\n return [ \"sbml\" ]\n\n if mimeType == applicationxspsssav:\n return [ \"sav\", \"zsav\" ]\n\n if mimeType == textxsass:\n return [ \"sass\" ]\n\n if mimeType == applicationxthomsonsapimage:\n return [ \"sap\" ]\n\n if mimeType == applicationxsami:\n return [ \"sami\" ]\n\n if mimeType == applicationxamipro:\n return [ \"sam\" ]\n\n if mimeType == textxsagemath:\n return [ \"sage\" ]\n\n if mimeType == applicationvndyamahasmafaudio:\n return [ \"saf\" ]\n\n if mimeType == audioxs3m:\n return [ \"s3m\" ]\n\n if mimeType == textxasm:\n return [ \"s\", \"asm\" ]\n\n if mimeType == imagexpanasonicrw2:\n return [ \"rw2\" ]\n\n if mimeType == videovndrnrealvideo:\n return [ \"rv\", \"rvx\" ]\n\n if mimeType == applicationrouteusd_xml:\n return [ \"rusd\" ]\n\n if mimeType == applicationxmakeself:\n return [ \"run\" ]\n\n if mimeType == textrichtext:\n return [ \"rtx\" ]\n\n if mimeType == applicationrtf:\n return [ \"rtf\" ]\n\n if mimeType == textvndrnrealtext:\n return [ \"rt\" ]\n\n if mimeType == textxrst:\n return [ \"rst\" ]\n\n if mimeType == applicationrss_xml:\n return [ \"rss\" ]\n\n if mimeType == applicationurcressheet_xml:\n return [ \"rsheet\" ]\n\n if mimeType == applicationrsd_xml:\n return [ \"rsd\" ]\n\n if mimeType == applicationatscrsat_xml:\n return [ \"rsat\" ]\n\n if mimeType == textrust:\n return [ \"rs\" ]\n\n if mimeType == applicationvndnokiaradiopreset:\n return [ \"rpst\" ]\n\n if mimeType == applicationvndnokiaradiopresets:\n return [ \"rpss\" ]\n\n if mimeType == applicationxrpm:\n return [ \"rpm\" ]\n\n if mimeType == applicationvndcloantorp9:\n return [ \"rp9\" ]\n\n if mimeType == imagevndrnrealpix:\n return [ \"rp\" ]\n\n if mimeType == applicationrpkiroa:\n return [ \"roa\" ]\n\n if mimeType == applicationrelaxngcompactsyntax:\n return [ \"rnc\" ]\n\n if mimeType == audioxpnrealaudioplugin:\n return [ \"rmp\" ]\n\n if mimeType == messagexgnurmail:\n return [ \"rmail\" ]\n\n if mimeType == applicationvndrnrealmedia:\n return [ \"rm\", \"rmj\", \"rmm\", \"rms\", \"rmx\", \"rmvb\" ]\n\n if mimeType == imagerle:\n return [ \"rle\" ]\n\n if mimeType == applicationresourcelistsdiff_xml:\n return [ \"rld\" ]\n\n if mimeType == imagevndfujixeroxedmicsrlc:\n return [ \"rlc\" ]\n\n if mimeType == applicationresourcelists_xml:\n return [ \"rl\" ]\n\n if mimeType == applicationxresearchinfosystems:\n return [ \"ris\" ]\n\n if mimeType == audiovndrip:\n return [ \"rip\" ]\n\n if mimeType == applicationreginfo_xml:\n return [ \"rif\" ]\n\n if mimeType == imagexrgb:\n return [ \"rgb\" ]\n\n if mimeType == applicationxgodotresource:\n return [ \"res\", \"tres\" ]\n\n if mimeType == applicationvndbusinessobjects:\n return [ \"rep\" ]\n\n if mimeType == applicationp2poverlay_xml:\n return [ \"relo\" ]\n\n if mimeType == textxreject:\n return [ \"rej\" ]\n\n if mimeType == textxmsregedit:\n return [ \"reg\" ]\n\n if mimeType == textxreadme:\n return [ \"readme\" ]\n\n if mimeType == applicationvnddatavisionrdz:\n return [ \"rdz\" ]\n\n if mimeType == applicationrdf_xml:\n return [ \"rdf\", \"rdfs\", \"owl\" ]\n\n if mimeType == applicationvndipunpluggedrcprofile:\n return [ \"rcprofile\" ]\n\n if mimeType == applicationxruby:\n return [ \"rb\" ]\n\n if mimeType == applicationxrawdiskimagexzcompressed:\n return [ \"rawdiskimagexz\", \"imgxz\" ]\n\n if mimeType == applicationxrawdiskimage:\n return [ \"rawdiskimage\", \"img\" ]\n\n if mimeType == imagexpanasonicrw:\n return [ \"raw\" ]\n\n if mimeType == imagexcmuraster:\n return [ \"ras\" ]\n\n if mimeType == applicationvndrar:\n return [ \"rar\" ]\n\n if mimeType == applicationrouteapd_xml:\n return [ \"rapd\" ]\n\n if mimeType == applicationraml_yaml:\n return [ \"raml\" ]\n\n if mimeType == applicationram:\n return [ \"ram\" ]\n\n if mimeType == imagexfujiraf:\n return [ \"raf\" ]\n\n if mimeType == audiovndrnrealaudio:\n return [ \"ra\", \"rax\" ]\n\n if mimeType == applicationvndquarkquarkxpress:\n return [ \"qxd\", \"qxt\", \"qwd\", \"qwt\", \"qxl\", \"qxb\" ]\n\n if mimeType == applicationxquicktimemedialink:\n return [ \"qtl\" ]\n\n if mimeType == imagexquicktime:\n return [ \"qtif\" ]\n\n if mimeType == applicationxqtiplot:\n return [ \"qti\", \"qtigz\" ]\n\n if mimeType == videoquicktime:\n return [ \"qt\", \"mov\", \"moov\", \"qtvr\" ]\n\n if mimeType == applicationsparqlquery:\n return [ \"qs\", \"rq\" ]\n\n if mimeType == applicationvndpublisharedeltatree:\n return [ \"qps\" ]\n\n if mimeType == applicationxqpress:\n return [ \"qp\" ]\n\n if mimeType == textxqml:\n return [ \"qml\", \"qmltypes\", \"qmlproject\" ]\n\n if mimeType == applicationxqw:\n return [ \"qif\" ]\n\n if mimeType == applicationvndintuqfx:\n return [ \"qfx\" ]\n\n if mimeType == applicationxqeddisk:\n return [ \"qed\" ]\n\n if mimeType == applicationxqemudisk:\n return [ \"qcow2\", \"qcow\" ]\n\n if mimeType == applicationvndintuqbo:\n return [ \"qbo\" ]\n\n if mimeType == applicationvndepsonquickanime:\n return [ \"qam\" ]\n\n if mimeType == textxpython:\n return [ \"pyx\", \"wsgi\" ]\n\n if mimeType == videovndmsplayreadymediapyv:\n return [ \"pyv\" ]\n\n if mimeType == applicationxpyspreadspreadsheet:\n return [ \"pysu\" ]\n\n if mimeType == applicationxpyspreadbzspreadsheet:\n return [ \"pys\" ]\n\n if mimeType == modelvndpythapyox:\n return [ \"pyox\" ]\n\n if mimeType == applicationxpythonbytecode:\n return [ \"pyc\", \"pyo\" ]\n\n if mimeType == audiovndmsplayreadymediapya:\n return [ \"pya\" ]\n\n if mimeType == textxpython3:\n return [ \"py\", \"py3\", \"py3x\", \"pyi\" ]\n\n if mimeType == applicationvnd3mpostitnotes:\n return [ \"pwn\" ]\n\n if mimeType == applicationxpw:\n return [ \"pw\" ]\n\n if mimeType == applicationvnd3gpppicbwvar:\n return [ \"pvb\" ]\n\n if mimeType == applicationvndmspublisher:\n return [ \"pub\" ]\n\n if mimeType == applicationvndpviptid1:\n return [ \"ptid\" ]\n\n if mimeType == imageprspti:\n return [ \"pti\" ]\n\n if mimeType == applicationxpocketword:\n return [ \"psw\" ]\n\n if mimeType == applicationpskc_xml:\n return [ \"pskcxml\" ]\n\n if mimeType == applicationxgzpostscript:\n return [ \"psgz\" ]\n\n if mimeType == audioxpsflib:\n return [ \"psflib\" ]\n\n if mimeType == applicationxgzfontlinuxpsf:\n return [ \"psfgz\" ]\n\n if mimeType == applicationxfontlinuxpsf:\n return [ \"psf\" ]\n\n if mimeType == imagevndadobephotoshop:\n return [ \"psd\" ]\n\n if mimeType == applicationxbzpostscript:\n return [ \"psbz2\" ]\n\n if mimeType == applicationvnd3gpppicbwsmall:\n return [ \"psb\" ]\n\n if mimeType == applicationpostscript:\n return [ \"ps\" ]\n\n if mimeType == applicationprovenance_xml:\n return [ \"provx\" ]\n\n if mimeType == applicationxgodotproject:\n return [ \"projectgodot\" ]\n\n if mimeType == applicationpicsrules:\n return [ \"prf\" ]\n\n if mimeType == applicationvndlotusfreelance:\n return [ \"pre\" ]\n\n if mimeType == applicationvndpalm:\n return [ \"pqa\", \"oprc\" ]\n\n if mimeType == applicationvndmspowerpoint:\n return [ \"ppz\", \"ppt\", \"pps\", \"pot\" ]\n\n if mimeType == applicationvndopenxmlformatsofficedocumentpresentationmlpresentation:\n return [ \"pptx\" ]\n\n if mimeType == applicationvndmspowerpointpresentationmacroenabled12:\n return [ \"pptm\" ]\n\n if mimeType == applicationvndopenxmlformatsofficedocumentpresentationmlslideshow:\n return [ \"ppsx\" ]\n\n if mimeType == applicationvndmspowerpointslideshowmacroenabled12:\n return [ \"ppsm\" ]\n\n if mimeType == imagexportablepixmap:\n return [ \"ppm\" ]\n\n if mimeType == applicationvndcupsppd:\n return [ \"ppd\" ]\n\n if mimeType == applicationvndmspowerpointaddinmacroenabled12:\n return [ \"ppam\" ]\n\n if mimeType == applicationvndopenxmlformatsofficedocumentpresentationmltemplate:\n return [ \"potx\" ]\n\n if mimeType == applicationvndmspowerpointtemplatemacroenabled12:\n return [ \"potm\" ]\n\n if mimeType == applicationvndmacportsportpkg:\n return [ \"portpkg\" ]\n\n if mimeType == applicationxspsspor:\n return [ \"por\" ]\n\n if mimeType == textxmaven_xml:\n return [ \"pomxml\", \"settingsxml\" ]\n\n if mimeType == textxgettexttranslation:\n return [ \"po\" ]\n\n if mimeType == imagexmacpaint:\n return [ \"pntg\" ]\n\n if mimeType == imagexportableanymap:\n return [ \"pnm\" ]\n\n if mimeType == imagepng:\n return [ \"png\" ]\n\n if mimeType == applicationvndctcposml:\n return [ \"pml\" ]\n\n if mimeType == audioxscpls:\n return [ \"pls\" ]\n\n if mimeType == applicationxplanperfect:\n return [ \"pln\" ]\n\n if mimeType == applicationvndpocketlearn:\n return [ \"plf\" ]\n\n if mimeType == applicationvndmobiusplc:\n return [ \"plc\" ]\n\n if mimeType == applicationvnd3gpppicbwlarge:\n return [ \"plb\" ]\n\n if mimeType == audioxiriverpla:\n return [ \"pla\" ]\n\n if mimeType == applicationxperl:\n return [ \"pl\", \"pm\", \"al\", \"perl\", \"pod\", \"t\" ]\n\n if mimeType == applicationvndapplepkpass:\n return [ \"pkpass\" ]\n\n if mimeType == applicationpkixpkipath:\n return [ \"pkipath\" ]\n\n if mimeType == applicationpkixcmp:\n return [ \"pki\" ]\n\n if mimeType == applicationxtexpk:\n return [ \"pk\" ]\n\n if mimeType == applicationxphp:\n return [ \"php\", \"php3\", \"php4\", \"php5\", \"phps\" ]\n\n if mimeType == applicationpgpencrypted:\n return [ \"pgp\", \"gpg\", \"asc\" ]\n\n if mimeType == applicationvndchesspgn:\n return [ \"pgn\" ]\n\n if mimeType == imagexportablegraymap:\n return [ \"pgm\" ]\n\n if mimeType == applicationfonttdpfr:\n return [ \"pfr\" ]\n\n if mimeType == applicationxfonttype1:\n return [ \"pfa\", \"pfb\", \"gsf\", \"pfm\" ]\n\n if mimeType == imagexpentaxpef:\n return [ \"pef\" ]\n\n if mimeType == applicationxxzpdf:\n return [ \"pdfxz\" ]\n\n if mimeType == applicationxlzpdf:\n return [ \"pdflz\" ]\n\n if mimeType == applicationxgzpdf:\n return [ \"pdfgz\" ]\n\n if mimeType == applicationxbzpdf:\n return [ \"pdfbz2\" ]\n\n if mimeType == applicationpdf:\n return [ \"pdf\" ]\n\n if mimeType == textxprocessing:\n return [ \"pde\" ]\n\n if mimeType == applicationxaportisdoc:\n return [ \"pdb\", \"pdc\" ]\n\n if mimeType == imagevndzbrushpcx:\n return [ \"pcx\" ]\n\n if mimeType == applicationvndcurlpcurl:\n return [ \"pcurl\" ]\n\n if mimeType == imagexpict:\n return [ \"pct\", \"pict\", \"pict1\", \"pict2\", \"pic\" ]\n\n if mimeType == applicationvndhppclxl:\n return [ \"pclxl\" ]\n\n if mimeType == applicationvndhppcl:\n return [ \"pcl\" ]\n\n if mimeType == applicationxfontpcf:\n return [ \"pcf\", \"pcfz\", \"pcfgz\" ]\n\n if mimeType == applicationxpcenginerom:\n return [ \"pce\" ]\n\n if mimeType == imagexphotocd:\n return [ \"pcd\" ]\n\n if mimeType == applicationvndtcpdumppcap:\n return [ \"pcap\", \"cap\", \"dmp\" ]\n\n if mimeType == imagexportablebitmap:\n return [ \"pbm\" ]\n\n if mimeType == applicationvndpowerbuilder6:\n return [ \"pbd\" ]\n\n if mimeType == applicationvndpawaafile:\n return [ \"paw\" ]\n\n if mimeType == imagexgimppat:\n return [ \"pat\" ]\n\n if mimeType == applicationxpar2:\n return [ \"par2\" ]\n\n if mimeType == applicationxpak:\n return [ \"pak\" ]\n\n if mimeType == applicationvndapplepages:\n return [ \"pages\" ]\n\n if mimeType == applicationxjavapack200:\n return [ \"pack\" ]\n\n if mimeType == applicationxnsproxyautoconfig:\n return [ \"pac\" ]\n\n if mimeType == applicationpkcs8encrypted:\n return [ \"p8e\" ]\n\n if mimeType == applicationpkcs8:\n return [ \"p8\" ]\n\n if mimeType == applicationpkcs7signature:\n return [ \"p7s\" ]\n\n if mimeType == applicationxpkcs7certreqresp:\n return [ \"p7r\" ]\n\n if mimeType == applicationpkcs7mime:\n return [ \"p7c\", \"p7m\" ]\n\n if mimeType == applicationxpkcs7certificates:\n return [ \"p7b\", \"spc\" ]\n\n if mimeType == applicationxpagemaker:\n return [ \"p65\", \"pm6\", \"pmd\" ]\n\n if mimeType == applicationpkcs12:\n return [ \"p12\", \"pfx\" ]\n\n if mimeType == applicationpkcs10:\n return [ \"p10\" ]\n\n if mimeType == textxpascal:\n return [ \"p\", \"pas\" ]\n\n if mimeType == applicationvndopenofficeorgextension:\n return [ \"oxt\" ]\n\n if mimeType == applicationoxps:\n return [ \"oxps\" ]\n\n if mimeType == applicationowl_xml:\n return [ \"owx\" ]\n\n if mimeType == applicationxvirtualboxovf:\n return [ \"ovf\" ]\n\n if mimeType == applicationovf:\n return [ \"ova\" ]\n\n if mimeType == applicationvndoasisopendocumenttexttemplate:\n return [ \"ott\" ]\n\n if mimeType == applicationvndoasisopendocumentspreadsheettemplate:\n return [ \"ots\" ]\n\n if mimeType == applicationvndoasisopendocumentpresentationtemplate:\n return [ \"otp\" ]\n\n if mimeType == applicationvndoasisopendocumentimagetemplate:\n return [ \"oti\" ]\n\n if mimeType == applicationvndoasisopendocumenttextweb:\n return [ \"oth\" ]\n\n if mimeType == applicationvndoasisopendocumentgraphicstemplate:\n return [ \"otg\" ]\n\n if mimeType == applicationvndoasisopendocumentformulatemplate:\n return [ \"otf\", \"odft\" ]\n\n if mimeType == applicationvndoasisopendocumentcharttemplate:\n return [ \"otc\" ]\n\n if mimeType == applicationvndopenstreetmapdata_xml:\n return [ \"osm\" ]\n\n if mimeType == applicationvndyamahaopenscoreformatosfpvg_xml:\n return [ \"osfpvg\" ]\n\n if mimeType == applicationvndyamahaopenscoreformat:\n return [ \"osf\" ]\n\n if mimeType == textorg:\n return [ \"org\" ]\n\n if mimeType == imagexolympusorf:\n return [ \"orf\" ]\n\n if mimeType == imageopenraster:\n return [ \"ora\" ]\n\n if mimeType == textxopml_xml:\n return [ \"opml\" ]\n\n if mimeType == applicationoebpspackage_xml:\n return [ \"opf\" ]\n\n if mimeType == textxooc:\n return [ \"ooc\" ]\n\n if mimeType == applicationonenote:\n return [ \"onetoc\", \"onetoc2\", \"onetmp\", \"onepkg\" ]\n\n if mimeType == applicationomdoc_xml:\n return [ \"omdoc\" ]\n\n if mimeType == applicationxoleo:\n return [ \"oleo\" ]\n\n if mimeType == applicationogg:\n return [ \"ogx\" ]\n\n if mimeType == videoogg:\n return [ \"ogv\" ]\n\n if mimeType == videoxogm_ogg:\n return [ \"ogm\" ]\n\n if mimeType == modelvndopengex:\n return [ \"ogex\" ]\n\n if mimeType == audioogg:\n return [ \"oga\", \"ogg\", \"opus\" ]\n\n if mimeType == applicationvndoasisopendocumenttext:\n return [ \"odt\" ]\n\n if mimeType == applicationvndoasisopendocumentspreadsheet:\n return [ \"ods\" ]\n\n if mimeType == applicationvndoasisopendocumentpresentation:\n return [ \"odp\" ]\n\n if mimeType == applicationvndoasisopendocumenttextmaster:\n return [ \"odm\" ]\n\n if mimeType == applicationvndoasisopendocumentimage:\n return [ \"odi\" ]\n\n if mimeType == applicationvndoasisopendocumentgraphics:\n return [ \"odg\" ]\n\n if mimeType == applicationvndoasisopendocumentformula:\n return [ \"odf\" ]\n\n if mimeType == applicationvndoasisopendocumentchart:\n return [ \"odc\" ]\n\n if mimeType == applicationvndoasisopendocumentdatabase:\n return [ \"odb\" ]\n\n if mimeType == applicationoda:\n return [ \"oda\" ]\n\n if mimeType == textxocl:\n return [ \"ocl\" ]\n\n if mimeType == applicationxtgif:\n return [ \"obj\" ]\n\n if mimeType == applicationvndopenbloxgame_xml:\n return [ \"obgx\" ]\n\n if mimeType == applicationxmsbinder:\n return [ \"obd\" ]\n\n if mimeType == applicationvndfujitsuoasys:\n return [ \"oas\" ]\n\n if mimeType == applicationvndfujitsuoasys3:\n return [ \"oa3\" ]\n\n if mimeType == applicationvndfujitsuoasys2:\n return [ \"oa2\" ]\n\n if mimeType == applicationxobject:\n return [ \"o\", \"mod\" ]\n\n if mimeType == applicationxnzb:\n return [ \"nzb\" ]\n\n if mimeType == applicationvndapplenumbers:\n return [ \"numbers\" ]\n\n if mimeType == applicationvndnitf:\n return [ \"ntf\", \"nitf\" ]\n\n if mimeType == applicationntriples:\n return [ \"nt\" ]\n\n if mimeType == videoxnsv:\n return [ \"nsv\" ]\n\n if mimeType == applicationvndlotusnotes:\n return [ \"nsf\" ]\n\n if mimeType == applicationxnetshowchannel:\n return [ \"nsc\" ]\n\n if mimeType == imagexnikonnrw:\n return [ \"nrw\" ]\n\n if mimeType == applicationnquads:\n return [ \"nq\" ]\n\n if mimeType == imagevndnetfpx:\n return [ \"npx\" ]\n\n if mimeType == applicationvndnoblenetweb:\n return [ \"nnw\" ]\n\n if mimeType == applicationvndnoblenetsealer:\n return [ \"nns\" ]\n\n if mimeType == applicationvndnoblenetdirectory:\n return [ \"nnd\" ]\n\n if mimeType == applicationvndenliven:\n return [ \"nml\" ]\n\n if mimeType == applicationvndneurolanguagenlu:\n return [ \"nlu\" ]\n\n if mimeType == applicationxneogeopocketrom:\n return [ \"ngp\" ]\n\n if mimeType == applicationvndnokiangagedata:\n return [ \"ngdat\" ]\n\n if mimeType == applicationxneogeopocketcolorrom:\n return [ \"ngc\" ]\n\n if mimeType == applicationvndnokiangagesymbianinstall:\n return [ \"n-gage\" ]\n\n if mimeType == textxnfo:\n return [ \"nfo\" ]\n\n if mimeType == applicationxnesrom:\n return [ \"nes\", \"nez\", \"unf\", \"unif\" ]\n\n if mimeType == imagexnikonnef:\n return [ \"nef\" ]\n\n if mimeType == applicationxnintendodsrom:\n return [ \"nds\" ]\n\n if mimeType == applicationxdtbncx_xml:\n return [ \"ncx\" ]\n\n if mimeType == applicationvndwolframplayer:\n return [ \"nbp\" ]\n\n if mimeType == applicationmathematica:\n return [ \"nb\", \"ma\", \"mb\" ]\n\n if mimeType == applicationxn64rom:\n return [ \"n64\", \"z64\", \"v64\" ]\n\n if mimeType == textn3:\n return [ \"n3\" ]\n\n if mimeType == applicationvndtriscapemxs:\n return [ \"mxs\" ]\n\n if mimeType == applicationxv_xml:\n return [ \"mxml\", \"xhvml\", \"xvml\", \"xvm\" ]\n\n if mimeType == audiomobilexmf:\n return [ \"mxmf\" ]\n\n if mimeType == applicationvndrecordaremusicxml:\n return [ \"mxl\" ]\n\n if mimeType == applicationmxf:\n return [ \"mxf\" ]\n\n if mimeType == applicationvndmfer:\n return [ \"mwf\" ]\n\n if mimeType == applicationvndmapboxvectortile:\n return [ \"mvt\" ]\n\n if mimeType == applicationxmsmediaview:\n return [ \"mvb\", \"m13\", \"m14\" ]\n\n if mimeType == applicationvndrecordaremusicxml_xml:\n return [ \"musicxml\" ]\n\n if mimeType == applicationmmtusd_xml:\n return [ \"musd\" ]\n\n if mimeType == applicationvndmusician:\n return [ \"mus\" ]\n\n if mimeType == textxmup:\n return [ \"mup\", \"not\" ]\n\n if mimeType == modelmtl:\n return [ \"mtl\" ]\n\n if mimeType == applicationxmsxrom:\n return [ \"msx\" ]\n\n if mimeType == applicationvndmuveestyle:\n return [ \"msty\" ]\n\n if mimeType == imagexmsod:\n return [ \"msod\" ]\n\n if mimeType == applicationvndmobiusmsl:\n return [ \"msl\" ]\n\n if mimeType == applicationxmsi:\n return [ \"msi\" ]\n\n if mimeType == modelmesh:\n return [ \"msh\", \"mesh\", \"silo\" ]\n\n if mimeType == applicationvndmsoutlook:\n return [ \"msg\" ]\n\n if mimeType == applicationvndepsonmsf:\n return [ \"msf\" ]\n\n if mimeType == applicationvndmseq:\n return [ \"mseq\" ]\n\n if mimeType == applicationvndfdsnmseed:\n return [ \"mseed\" ]\n\n if mimeType == applicationmediaservercontrol_xml:\n return [ \"mscml\" ]\n\n if mimeType == textxtroffms:\n return [ \"ms\" ]\n\n if mimeType == imagexminoltamrw:\n return [ \"mrw\" ]\n\n if mimeType == textxmrml:\n return [ \"mrml\", \"mrl\" ]\n\n if mimeType == applicationmarcxml_xml:\n return [ \"mrcx\" ]\n\n if mimeType == applicationmarc:\n return [ \"mrc\" ]\n\n if mimeType == applicationvndmobiusmqy:\n return [ \"mqy\" ]\n\n if mimeType == applicationvndibmminipay:\n return [ \"mpy\" ]\n\n if mimeType == applicationvndmsproject:\n return [ \"mpt\" ]\n\n if mimeType == applicationvndmophunapplication:\n return [ \"mpn\" ]\n\n if mimeType == applicationvndblueicemultipass:\n return [ \"mpm\" ]\n\n if mimeType == textxmpl2:\n return [ \"mpl\" ]\n\n if mimeType == applicationvndappleinstaller_xml:\n return [ \"mpkg\" ]\n\n if mimeType == applicationmediapolicydataset_xml:\n return [ \"mpf\" ]\n\n if mimeType == videompeg:\n return [ \"mpeg\", \"mpg\", \"mpe\", \"vob\", \"090909vdr\", \"m1v\", \"m2v\" ]\n\n if mimeType == applicationdash_xml:\n return [ \"mpd\" ]\n\n if mimeType == audioxmusepack:\n return [ \"mpc\", \"mpp\", \"mp\" ]\n\n if mimeType == applicationmp4:\n return [ \"mp4s\", \"m4p\" ]\n\n if mimeType == videomp4:\n return [ \"mp4\", \"m4v\", \"f4v\", \"lrv\", \"mp4v\", \"mpg4\" ]\n\n if mimeType == audiompeg:\n return [ \"mp3\", \"mpga\", \"mp2a\", \"m2a\", \"m3a\" ]\n\n if mimeType == audiomp2:\n return [ \"mp2\" ]\n\n if mimeType == videoxsgimovie:\n return [ \"movie\" ]\n\n if mimeType == textxmof:\n return [ \"mof\" ]\n\n if mimeType == applicationmods_xml:\n return [ \"mods\" ]\n\n if mimeType == textxmoc:\n return [ \"moc\" ]\n\n if mimeType == applicationxmobipocketebook:\n return [ \"mobi\", \"prc\" ]\n\n if mimeType == audioxmo3:\n return [ \"mo3\" ]\n\n if mimeType == applicationxmsmoney:\n return [ \"mny\" ]\n\n if mimeType == videoxmng:\n return [ \"mng\" ]\n\n if mimeType == imagevndfujixeroxedmicsmmr:\n return [ \"mmr\" ]\n\n if mimeType == applicationmathml_xml:\n return [ \"mml\", \"mathml\" ]\n\n if mimeType == applicationvndsmaf:\n return [ \"mmf\", \"smaf\" ]\n\n if mimeType == applicationvndchipnutskaraokemmd:\n return [ \"mmd\" ]\n\n if mimeType == textxobjc__src:\n return [ \"mm\" ]\n\n if mimeType == applicationvnddolbymlp:\n return [ \"mlp\" ]\n\n if mimeType == textxocaml:\n return [ \"ml\", \"mli\" ]\n\n if mimeType == videoxmatroska:\n return [ \"mkv\", \"mks\" ]\n\n if mimeType == audioxmatroska:\n return [ \"mka\" ]\n\n if mimeType == videoxmatroska3d:\n return [ \"mk3d\" ]\n\n if mimeType == videoxmjpeg:\n return [ \"mjpeg\", \"mjpg\" ]\n\n if mimeType == videomj2:\n return [ \"mj2\", \"mjp2\" ]\n\n if mimeType == audioxminipsf:\n return [ \"minipsf\" ]\n\n if mimeType == applicationxmif:\n return [ \"mif\" ]\n\n if mimeType == applicationxmie:\n return [ \"mie\" ]\n\n if mimeType == audiomidi:\n return [ \"mid\", \"midi\", \"kar\", \"rmi\" ]\n\n if mimeType == applicationxmimearchive:\n return [ \"mhtml\", \"mht\" ]\n\n if mimeType == applicationvndproteusmagazine:\n return [ \"mgz\" ]\n\n if mimeType == applicationxmagicpoint:\n return [ \"mgp\" ]\n\n if mimeType == applicationrpkimanifest:\n return [ \"mft\" ]\n\n if mimeType == applicationvndmfmp:\n return [ \"mfm\" ]\n\n if mimeType == applicationmets_xml:\n return [ \"mets\" ]\n\n if mimeType == applicationmetalink_xml:\n return [ \"metalink\" ]\n\n if mimeType == applicationmetalink4_xml:\n return [ \"meta4\" ]\n\n if mimeType == textxmeson:\n return [ \"mesonbuild\", \"mesonoptionstxt\" ]\n\n if mimeType == textxtroffme:\n return [ \"me\" ]\n\n if mimeType == imagevndmsmodi:\n return [ \"mdi\" ]\n\n if mimeType == applicationvndmsaccess:\n return [ \"mdb\" ]\n\n if mimeType == textmarkdown:\n return [ \"md\", \"mkd\", \"markdown\" ]\n\n if mimeType == textvndcurlmcurl:\n return [ \"mcurl\" ]\n\n if mimeType == applicationvndmcd:\n return [ \"mcd\" ]\n\n if mimeType == textvndsenxwarpscript:\n return [ \"mc2\" ]\n\n if mimeType == applicationvndmedcalcdata:\n return [ \"mc1\" ]\n\n if mimeType == applicationmbox:\n return [ \"mbox\" ]\n\n if mimeType == applicationvndmobiusmbk:\n return [ \"mbk\" ]\n\n if mimeType == textcachemanifest:\n return [ \"manifest\", \"appcache\" ]\n\n if mimeType == applicationxtroffman:\n return [ \"man\", \"19\" ]\n\n if mimeType == textxmakefile:\n return [ \"makefile\", \"gnumakefile\", \"mk\", \"mak\" ]\n\n if mimeType == applicationvndecowinchart:\n return [ \"mag\" ]\n\n if mimeType == applicationmmtaei_xml:\n return [ \"maei\" ]\n\n if mimeType == applicationmads_xml:\n return [ \"mads\" ]\n\n if mimeType == applicationxmarkaby:\n return [ \"mab\" ]\n\n if mimeType == applicationxthomsoncartridgememo7:\n return [ \"m7\" ]\n\n if mimeType == videoisosegment:\n return [ \"m4s\" ]\n\n if mimeType == audioxm4r:\n return [ \"m4r\" ]\n\n if mimeType == audioxm4b:\n return [ \"m4b\", \"f4b\" ]\n\n if mimeType == audiomp4:\n return [ \"m4a\", \"f4a\", \"mp4a\" ]\n\n if mimeType == applicationxm4:\n return [ \"m4\" ]\n\n if mimeType == audioxmpegurl:\n return [ \"m3u\", \"m3u8\", \"vlc\" ]\n\n if mimeType == videomp2t:\n return [ \"m2t\", \"m2ts\", \"mts\", \"cpi\", \"clpi\", \"mpls\", \"bdm\", \"bdmv\" ]\n\n if mimeType == applicationmp21:\n return [ \"m21\", \"mp21\" ]\n\n if mimeType == videovndmpegurl:\n return [ \"m1u\", \"m4u\", \"mxu\" ]\n\n if mimeType == textxobjcsrc:\n return [ \"m\" ]\n\n if mimeType == applicationxlzop:\n return [ \"lzo\" ]\n\n if mimeType == applicationxlzma:\n return [ \"lzma\" ]\n\n if mimeType == applicationxlz4:\n return [ \"lz4\" ]\n\n if mimeType == applicationxlzip:\n return [ \"lz\" ]\n\n if mimeType == applicationxlyx:\n return [ \"lyx\" ]\n\n if mimeType == textxlilypond:\n return [ \"ly\" ]\n\n if mimeType == imagexlws:\n return [ \"lws\" ]\n\n if mimeType == applicationvndlotuswordpro:\n return [ \"lwp\" ]\n\n if mimeType == imagexlwo:\n return [ \"lwo\", \"lwob\" ]\n\n if mimeType == audiovndlucentvoice:\n return [ \"lvp\" ]\n\n if mimeType == applicationxluabytecode:\n return [ \"luac\" ]\n\n if mimeType == textxlua:\n return [ \"lua\" ]\n\n if mimeType == applicationvndfrogansltf:\n return [ \"ltf\" ]\n\n if mimeType == applicationxlrzip:\n return [ \"lrz\" ]\n\n if mimeType == applicationvndmslrm:\n return [ \"lrm\" ]\n\n if mimeType == applicationlost_xml:\n return [ \"lostxml\" ]\n\n if mimeType == textxlog:\n return [ \"log\" ]\n\n if mimeType == audiousac:\n return [ \"loas\", \"xhe\" ]\n\n if mimeType == applicationxatarilynxrom:\n return [ \"lnx\" ]\n\n if mimeType == applicationxmsshortcut:\n return [ \"lnk\" ]\n\n if mimeType == textcoffeescript:\n return [ \"litcoffee\" ]\n\n if mimeType == applicationvndroute66link66_xml:\n return [ \"link66\" ]\n\n if mimeType == applicationxlhz:\n return [ \"lhz\" ]\n\n if mimeType == textxliteratehaskell:\n return [ \"lhs\" ]\n\n if mimeType == applicationxlha:\n return [ \"lha\", \"lzh\" ]\n\n if mimeType == applicationlgr_xml:\n return [ \"lgr\" ]\n\n if mimeType == textless:\n return [ \"less\" ]\n\n if mimeType == applicationvndhhelessonplayer:\n return [ \"les\" ]\n\n if mimeType == textxldif:\n return [ \"ldif\" ]\n\n if mimeType == applicationvndllamagraphicslifebalanceexchange_xml:\n return [ \"lbe\" ]\n\n if mimeType == applicationvndllamagraphicslifebalancedesktop:\n return [ \"lbd\" ]\n\n if mimeType == applicationvndlaslas_xml:\n return [ \"lasxml\" ]\n\n if mimeType == applicationxsharedlibraryla:\n return [ \"la\" ]\n\n if mimeType == applicationxkword:\n return [ \"kwd\", \"kwt\" ]\n\n if mimeType == applicationxkugar:\n return [ \"kud\" ]\n\n if mimeType == applicationvndkahootz:\n return [ \"ktz\", \"ktr\" ]\n\n if mimeType == imagektx2:\n return [ \"ktx2\" ]\n\n if mimeType == imagektx:\n return [ \"ktx\" ]\n\n if mimeType == textxkotlin:\n return [ \"kt\" ]\n\n if mimeType == textxkaitaistruct:\n return [ \"ksy\" ]\n\n if mimeType == applicationxkspread:\n return [ \"ksp\" ]\n\n if mimeType == applicationxkrita:\n return [ \"kra\", \"krz\" ]\n\n if mimeType == applicationvnddskeypoint:\n return [ \"kpxx\" ]\n\n if mimeType == applicationxkpresenter:\n return [ \"kpr\", \"kpt\" ]\n\n if mimeType == applicationxkpovmodeler:\n return [ \"kpm\" ]\n\n if mimeType == applicationxkontour:\n return [ \"kon\" ]\n\n if mimeType == applicationvndkinar:\n return [ \"kne\", \"knp\" ]\n\n if mimeType == applicationvndgoogleearthkmz:\n return [ \"kmz\" ]\n\n if mimeType == applicationvndgoogleearthkml_xml:\n return [ \"kml\" ]\n\n if mimeType == applicationxkillustrator:\n return [ \"kil\" ]\n\n if mimeType == applicationvndkidspiration:\n return [ \"kia\" ]\n\n if mimeType == applicationxkformula:\n return [ \"kfo\" ]\n\n if mimeType == applicationxkexiprojectshortcut:\n return [ \"kexis\" ]\n\n if mimeType == applicationxkexiconnectiondata:\n return [ \"kexic\" ]\n\n if mimeType == applicationxkexiprojectsqlite2:\n return [ \"kexi\" ]\n\n if mimeType == imagexkodakkdc:\n return [ \"kdc\" ]\n\n if mimeType == applicationxkeepass2:\n return [ \"kdbx\" ]\n\n if mimeType == applicationxkarbon:\n return [ \"karbon\" ]\n\n if mimeType == applicationxthomsoncassette:\n return [ \"k7\" ]\n\n if mimeType == imagexkodakk25:\n return [ \"k25\" ]\n\n if mimeType == imagejxss:\n return [ \"jxss\" ]\n\n if mimeType == imagejxsi:\n return [ \"jxsi\" ]\n\n if mimeType == imagejxsc:\n return [ \"jxsc\" ]\n\n if mimeType == imagejxs:\n return [ \"jxs\" ]\n\n if mimeType == imagejxrs:\n return [ \"jxrs\" ]\n\n if mimeType == imagejxra:\n return [ \"jxra\" ]\n\n if mimeType == imagejxr:\n return [ \"jxr\" ]\n\n if mimeType == imagejxl:\n return [ \"jxl\" ]\n\n if mimeType == textjsx:\n return [ \"jsx\" ]\n\n if mimeType == applicationjsonpatch_json:\n return [ \"jsonpatch\" ]\n\n if mimeType == applicationjsonml_json:\n return [ \"jsonml\" ]\n\n if mimeType == applicationld_json:\n return [ \"jsonld\" ]\n\n if mimeType == applicationjson5:\n return [ \"json5\" ]\n\n if mimeType == applicationjson:\n return [ \"json\", \"map\" ]\n\n if mimeType == textjavascript:\n return [ \"js\", \"jsm\", \"mjs\" ]\n\n if mimeType == applicationjrd_json:\n return [ \"jrd\" ]\n\n if mimeType == applicationxjbuilderproject:\n return [ \"jpr\", \"jpx\" ]\n\n if mimeType == imagejpm:\n return [ \"jpm\", \"jpgm\" ]\n\n if mimeType == imagejph:\n return [ \"jph\" ]\n\n if mimeType == videojpeg:\n return [ \"jpgv\" ]\n\n if mimeType == imagejpeg:\n return [ \"jpg\", \"jpeg\", \"jpe\" ]\n\n if mimeType == imagejpx:\n return [ \"jpf\" ]\n\n if mimeType == imagejp2:\n return [ \"jp2\", \"jpg2\" ]\n\n if mimeType == applicationvndjoostjodaarchive:\n return [ \"joda\" ]\n\n if mimeType == applicationxjavajnlpfile:\n return [ \"jnlp\" ]\n\n if mimeType == imagexjng:\n return [ \"jng\" ]\n\n if mimeType == applicationvndhpjlyt:\n return [ \"jlt\" ]\n\n if mimeType == imagejls:\n return [ \"jls\" ]\n\n if mimeType == applicationxjavakeystore:\n return [ \"jks\", \"ks\", \"cacerts\" ]\n\n if mimeType == applicationvndjisp:\n return [ \"jisp\" ]\n\n if mimeType == imagejphc:\n return [ \"jhc\" ]\n\n if mimeType == applicationxjavajcekeystore:\n return [ \"jceks\" ]\n\n if mimeType == textxjava:\n return [ \"java\" ]\n\n if mimeType == applicationxjavaarchivediff:\n return [ \"jardiff\" ]\n\n if mimeType == applicationxjavaarchive:\n return [ \"jar\" ]\n\n if mimeType == applicationvndjam:\n return [ \"jam\" ]\n\n if mimeType == textjade:\n return [ \"jade\" ]\n\n if mimeType == textvndsunj2meappdescriptor:\n return [ \"jad\" ]\n\n if mimeType == imagexjp2codestream:\n return [ \"j2c\", \"j2k\", \"jpc\" ]\n\n if mimeType == applicationvndimmervisionivu:\n return [ \"ivu\" ]\n\n if mimeType == applicationvndimmervisionivp:\n return [ \"ivp\" ]\n\n if mimeType == applicationits_xml:\n return [ \"its\" ]\n\n if mimeType == applicationvndshanainformedformtemplate:\n return [ \"itp\" ]\n\n if mimeType == applicationxit87:\n return [ \"it87\" ]\n\n if mimeType == audioxit:\n return [ \"it\" ]\n\n if mimeType == applicationxcdimage:\n return [ \"iso\", \"iso9660\" ]\n\n if mimeType == applicationvndirepositorypackage_xml:\n return [ \"irp\" ]\n\n if mimeType == applicationvndibmrightsmanagement:\n return [ \"irm\" ]\n\n if mimeType == applicationxipynb_json:\n return [ \"ipynb\" ]\n\n if mimeType == textxiptables:\n return [ \"iptables\" ]\n\n if mimeType == applicationxipspatch:\n return [ \"ips\" ]\n\n if mimeType == applicationvndshanainformedpackage:\n return [ \"ipk\" ]\n\n if mimeType == applicationipfix:\n return [ \"ipfix\" ]\n\n if mimeType == applicationvndastraeasoftwareiota:\n return [ \"iota\" ]\n\n if mimeType == textxinstall:\n return [ \"install\" ]\n\n if mimeType == applicationinkml_xml:\n return [ \"ink\", \"inkml\" ]\n\n if mimeType == textximelody:\n return [ \"imy\", \"ime\" ]\n\n if mimeType == applicationvndmsims:\n return [ \"ims\" ]\n\n if mimeType == applicationvndaccpacsimplyimp:\n return [ \"imp\" ]\n\n if mimeType == applicationvndshanainformedinterchange:\n return [ \"iif\" ]\n\n if mimeType == applicationvndmicrografxigx:\n return [ \"igx\" ]\n\n if mimeType == modeliges:\n return [ \"igs\", \"iges\" ]\n\n if mimeType == applicationvndinsorsigm:\n return [ \"igm\" ]\n\n if mimeType == applicationvndigloader:\n return [ \"igl\" ]\n\n if mimeType == applicationvndshanainformedformdata:\n return [ \"ifm\" ]\n\n if mimeType == imagexilbm:\n return [ \"iff\", \"ilbm\", \"lbm\" ]\n\n if mimeType == imageief:\n return [ \"ief\" ]\n\n if mimeType == textxidl:\n return [ \"idl\" ]\n\n if mimeType == imagevndmicrosofticon:\n return [ \"ico\" ]\n\n if mimeType == imagexicns:\n return [ \"icns\" ]\n\n if mimeType == xconferencexcooltalk:\n return [ \"ice\" ]\n\n if mimeType == applicationvndiccprofile:\n return [ \"icc\", \"icm\" ]\n\n if mimeType == applicationxica:\n return [ \"ica\" ]\n\n if mimeType == applicationvndintergeo:\n return [ \"i2g\" ]\n\n if mimeType == applicationxhwt:\n return [ \"hwt\" ]\n\n if mimeType == applicationxhwp:\n return [ \"hwp\" ]\n\n if mimeType == applicationvndyamahahvscript:\n return [ \"hvs\" ]\n\n if mimeType == applicationvndyamahahvvoice:\n return [ \"hvp\" ]\n\n if mimeType == applicationvndyamahahvdic:\n return [ \"hvd\" ]\n\n if mimeType == applicationvndkenameaapp:\n return [ \"htke\" ]\n\n if mimeType == textxcomponent:\n return [ \"htc\" ]\n\n if mimeType == imagehsj2:\n return [ \"hsj2\" ]\n\n if mimeType == textxhaskell:\n return [ \"hs\" ]\n\n if mimeType == applicationmacbinhex40:\n return [ \"hqx\" ]\n\n if mimeType == applicationvndhphps:\n return [ \"hps\" ]\n\n if mimeType == applicationvndhphpid:\n return [ \"hpid\" ]\n\n if mimeType == applicationvndhphpgl:\n return [ \"hpgl\" ]\n\n if mimeType == applicationwinhlp:\n return [ \"hlp\" ]\n\n if mimeType == applicationhjson:\n return [ \"hjson\" ]\n\n if mimeType == textxc__hdr:\n return [ \"hh\", \"hp\", \"hpp\", \"h\", \"hxx\" ]\n\n if mimeType == applicationxhfefloppyimage:\n return [ \"hfe\" ]\n\n if mimeType == applicationatscheld_xml:\n return [ \"held\" ]\n\n if mimeType == imagehej2k:\n return [ \"hej2\" ]\n\n if mimeType == imageheifsequence:\n return [ \"heifs\" ]\n\n if mimeType == imageheicsequence:\n return [ \"heics\" ]\n\n if mimeType == imageheif:\n return [ \"heic\", \"heif\", \"hif\" ]\n\n if mimeType == applicationxhdf:\n return [ \"hdf\", \"hdf4\", \"h4\", \"hdf5\", \"h5\" ]\n\n if mimeType == applicationxvirtualboxhdd:\n return [ \"hdd\" ]\n\n if mimeType == textxhandlebarstemplate:\n return [ \"hbs\" ]\n\n if mimeType == applicationvndhbci:\n return [ \"hbci\" ]\n\n if mimeType == applicationvndhal_xml:\n return [ \"hal\" ]\n\n if mimeType == videoh264:\n return [ \"h264\" ]\n\n if mimeType == videoh263:\n return [ \"h263\" ]\n\n if mimeType == videoh261:\n return [ \"h261\" ]\n\n if mimeType == applicationgzip:\n return [ \"gz\" ]\n\n if mimeType == applicationvndgeonext:\n return [ \"gxt\" ]\n\n if mimeType == applicationgxf:\n return [ \"gxf\" ]\n\n if mimeType == textxgcodegx:\n return [ \"gx\" ]\n\n if mimeType == textxgooglevideopointer:\n return [ \"gvp\" ]\n\n if mimeType == textvndgraphviz:\n return [ \"gv\" ]\n\n if mimeType == modelvndgtw:\n return [ \"gtw\" ]\n\n if mimeType == applicationvndgroovetoolmessage:\n return [ \"gtm\" ]\n\n if mimeType == audioxgsm:\n return [ \"gsm\" ]\n\n if mimeType == applicationvndgoogleappspresentation:\n return [ \"gslides\" ]\n\n if mimeType == applicationvndgoogleappsspreadsheet:\n return [ \"gsheet\" ]\n\n if mimeType == textxgenie:\n return [ \"gs\" ]\n\n if mimeType == applicationsrgs_xml:\n return [ \"grxml\" ]\n\n if mimeType == applicationvndgrooveinjector:\n return [ \"grv\" ]\n\n if mimeType == textxgroovy:\n return [ \"groovy\", \"gvy\", \"gy\", \"gsh\" ]\n\n if mimeType == applicationxgrampsxml:\n return [ \"gramps\" ]\n\n if mimeType == applicationsrgs:\n return [ \"gram\" ]\n\n if mimeType == textxgradle:\n return [ \"gradle\" ]\n\n if mimeType == applicationxgraphite:\n return [ \"gra\" ]\n\n if mimeType == applicationvndgrafeq:\n return [ \"gqf\", \"gqs\" ]\n\n if mimeType == applicationgpx_xml:\n return [ \"gpx\" ]\n\n if mimeType == applicationvndflographit:\n return [ \"gph\" ]\n\n if mimeType == applicationxgnuplot:\n return [ \"gp\", \"gplt\", \"gnuplot\" ]\n\n if mimeType == textxgo:\n return [ \"go\" ]\n\n if mimeType == applicationxgnumeric:\n return [ \"gnumeric\" ]\n\n if mimeType == applicationxgnucash:\n return [ \"gnucash\", \"gnc\", \"xac\" ]\n\n if mimeType == applicationgnunetdirectory:\n return [ \"gnd\" ]\n\n if mimeType == applicationvndgmx:\n return [ \"gmx\" ]\n\n if mimeType == applicationxprofile:\n return [ \"gmonout\" ]\n\n if mimeType == applicationxgettexttranslation:\n return [ \"gmo\", \"mo\" ]\n\n if mimeType == applicationgml_xml:\n return [ \"gml\" ]\n\n if mimeType == modelgltf_json:\n return [ \"gltf\" ]\n\n if mimeType == modelgltfbinary:\n return [ \"glb\" ]\n\n if mimeType == applicationxglade:\n return [ \"glade\" ]\n\n if mimeType == applicationvndgrooveidentitymessage:\n return [ \"gim\" ]\n\n if mimeType == imagexgimpgih:\n return [ \"gih\" ]\n\n if mimeType == imagegif:\n return [ \"gif\" ]\n\n if mimeType == applicationvndgroovehelp:\n return [ \"ghf\" ]\n\n if mimeType == applicationvndgeogebratool:\n return [ \"ggt\" ]\n\n if mimeType == applicationvndgeogebrafile:\n return [ \"ggb\" ]\n\n if mimeType == applicationxgamegearrom:\n return [ \"gg\" ]\n\n if mimeType == applicationxtexgf:\n return [ \"gf\" ]\n\n if mimeType == applicationvndgeometryexplorer:\n return [ \"gex\", \"gre\" ]\n\n if mimeType == applicationgeo_json:\n return [ \"geojson\" ]\n\n if mimeType == applicationvnddynageo:\n return [ \"geo\" ]\n\n if mimeType == applicationxgenesisrom:\n return [ \"gen\", \"sgd\" ]\n\n if mimeType == applicationxgedcom:\n return [ \"ged\", \"gedcom\" ]\n\n if mimeType == applicationxgodotshader:\n return [ \"gdshader\" ]\n\n if mimeType == applicationvndgoogleappsdocument:\n return [ \"gdoc\" ]\n\n if mimeType == modelvndgdl:\n return [ \"gdl\" ]\n\n if mimeType == applicationxgdromcue:\n return [ \"gdi\" ]\n\n if mimeType == applicationxgdscript:\n return [ \"gd\" ]\n\n if mimeType == textxgcode:\n return [ \"gcode\" ]\n\n if mimeType == applicationxgcacompressed:\n return [ \"gca\" ]\n\n if mimeType == imagexgimpgbr:\n return [ \"gbr\" ]\n\n if mimeType == applicationxgameboycolorrom:\n return [ \"gbc\", \"cgb\" ]\n\n if mimeType == applicationxgbarom:\n return [ \"gba\", \"agb\" ]\n\n if mimeType == applicationxgameboyrom:\n return [ \"gb\", \"sgb\" ]\n\n if mimeType == applicationxtads:\n return [ \"gam\" ]\n\n if mimeType == applicationvndgrooveaccount:\n return [ \"gac\" ]\n\n if mimeType == applicationvndgeospace:\n return [ \"g3w\" ]\n\n if mimeType == imageg3fax:\n return [ \"g3\" ]\n\n if mimeType == applicationvndgeoplan:\n return [ \"g2w\" ]\n\n if mimeType == applicationvndfuzzysheet:\n return [ \"fzs\" ]\n\n if mimeType == applicationvndadobefxp:\n return [ \"fxp\", \"fxpl\" ]\n\n if mimeType == videoxjavafx:\n return [ \"fxm\" ]\n\n if mimeType == videovndfvt:\n return [ \"fvt\" ]\n\n if mimeType == applicationvndanserwebfundstransferinitiation:\n return [ \"fti\" ]\n\n if mimeType == applicationvndfluxtimeclip:\n return [ \"ftc\" ]\n\n if mimeType == imagevndfst:\n return [ \"fst\" ]\n\n if mimeType == applicationvndfscweblaunch:\n return [ \"fsc\" ]\n\n if mimeType == imagevndfpx:\n return [ \"fpx\" ]\n\n if mimeType == applicationvndoasisopendocumenttextflatxml:\n return [ \"fodt\" ]\n\n if mimeType == applicationvndoasisopendocumentspreadsheetflatxml:\n return [ \"fods\" ]\n\n if mimeType == applicationvndoasisopendocumentpresentationflatxml:\n return [ \"fodp\" ]\n\n if mimeType == applicationvndoasisopendocumentgraphicsflatxml:\n return [ \"fodg\" ]\n\n if mimeType == textxxslfo:\n return [ \"fo\", \"xslfo\" ]\n\n if mimeType == applicationvndfrogansfnc:\n return [ \"fnc\" ]\n\n if mimeType == applicationvndframemaker:\n return [ \"fm\", \"frame\", \"maker\", \"book\" ]\n\n if mimeType == textvndfly:\n return [ \"fly\" ]\n\n if mimeType == textvndfmiflexstor:\n return [ \"flx\" ]\n\n if mimeType == applicationxkivio:\n return [ \"flw\" ]\n\n if mimeType == videoxflv:\n return [ \"flv\" ]\n\n if mimeType == applicationvndmicrografxflo:\n return [ \"flo\" ]\n\n if mimeType == videoxflic:\n return [ \"fli\", \"flc\" ]\n\n if mimeType == applicationvndflatpakrepo:\n return [ \"flatpakrepo\" ]\n\n if mimeType == applicationvndflatpakref:\n return [ \"flatpakref\" ]\n\n if mimeType == applicationvndflatpak:\n return [ \"flatpak\", \"xdgapp\" ]\n\n if mimeType == audioflac:\n return [ \"flac\" ]\n\n if mimeType == applicationxfluid:\n return [ \"fl\" ]\n\n if mimeType == applicationfits:\n return [ \"fits\", \"fit\", \"fts\" ]\n\n if mimeType == imagexxfig:\n return [ \"fig\" ]\n\n if mimeType == imagexfreehand:\n return [ \"fh\", \"fhc\", \"fh4\", \"fh5\", \"fh7\" ]\n\n if mimeType == applicationvndfujitsuoasysgp:\n return [ \"fg5\" ]\n\n if mimeType == textxgherkin:\n return [ \"feature\" ]\n\n if mimeType == applicationvnddenovofcselayoutlink:\n return [ \"fe_launch\" ]\n\n if mimeType == applicationfdt_xml:\n return [ \"fdt\" ]\n\n if mimeType == applicationxfdsdisk:\n return [ \"fds\" ]\n\n if mimeType == applicationfdf:\n return [ \"fdf\" ]\n\n if mimeType == applicationxrawfloppydiskimage:\n return [ \"fd\", \"qd\" ]\n\n if mimeType == applicationvndisacfcs:\n return [ \"fcs\" ]\n\n if mimeType == applicationvndadobeformscentralfcdt:\n return [ \"fcdt\" ]\n\n if mimeType == imagevndfastbidsheet:\n return [ \"fbs\" ]\n\n if mimeType == applicationxzipcompressedfb2:\n return [ \"fb2zip\" ]\n\n if mimeType == applicationxfictionbook_xml:\n return [ \"fb2\" ]\n\n if mimeType == textxfortran:\n return [ \"f\", \"f90\", \"f95\", \"for\", \"f77\" ]\n\n if mimeType == applicationvndezpixpackage:\n return [ \"ez3\" ]\n\n if mimeType == applicationvndezpixalbum:\n return [ \"ez2\" ]\n\n if mimeType == applicationandrewinset:\n return [ \"ez\" ]\n\n if mimeType == applicationvndnovadigmext:\n return [ \"ext\" ]\n\n if mimeType == imagexexr:\n return [ \"exr\" ]\n\n if mimeType == applicationexpress:\n return [ \"exp\" ]\n\n if mimeType == applicationexi:\n return [ \"exi\" ]\n\n if mimeType == applicationxmsdosexecutable:\n return [ \"exe\" ]\n\n if mimeType == textxelixir:\n return [ \"ex\", \"exs\" ]\n\n if mimeType == applicationxenvoy:\n return [ \"evy\" ]\n\n if mimeType == applicationxeva:\n return [ \"eva\" ]\n\n if mimeType == textxsetext:\n return [ \"etx\" ]\n\n if mimeType == applicationxetheme:\n return [ \"etheme\" ]\n\n if mimeType == applicationvndepsonesf:\n return [ \"esf\" ]\n\n if mimeType == applicationvndosgisubsystem:\n return [ \"esa\" ]\n\n if mimeType == applicationvndeszigno3_xml:\n return [ \"es3\", \"et3\" ]\n\n if mimeType == applicationecmascript:\n return [ \"es\", \"ecma\" ]\n\n if mimeType == textxerlang:\n return [ \"erl\" ]\n\n if mimeType == applicationepub_zip:\n return [ \"epub\" ]\n\n if mimeType == imagexgzeps:\n return [ \"epsgz\", \"epsigz\", \"epsfgz\" ]\n\n if mimeType == imagexbzeps:\n return [ \"epsbz2\", \"epsibz2\", \"epsfbz2\" ]\n\n if mimeType == imagexeps:\n return [ \"eps\", \"epsi\", \"epsf\" ]\n\n if mimeType == applicationvndmsfontobject:\n return [ \"eot\" ]\n\n if mimeType == audiovnddigitalwinds:\n return [ \"eol\" ]\n\n if mimeType == applicationxmlexternalparsedentity:\n return [ \"ent\" ]\n\n if mimeType == applicationxmsmetafile:\n return [ \"emz\" ]\n\n if mimeType == applicationvndemusicemusic_package:\n return [ \"emp\" ]\n\n if mimeType == applicationemotionml_xml:\n return [ \"emotionml\" ]\n\n if mimeType == applicationemma_xml:\n return [ \"emma\" ]\n\n if mimeType == messagerfc822:\n return [ \"eml\", \"mime\" ]\n\n if mimeType == imageemf:\n return [ \"emf\" ]\n\n if mimeType == textxemacslisp:\n return [ \"el\" ]\n\n if mimeType == applicationvndpgosasli:\n return [ \"ei6\" ]\n\n if mimeType == applicationxegon:\n return [ \"egon\" ]\n\n if mimeType == applicationvndpicsel:\n return [ \"efif\" ]\n\n if mimeType == applicationvndnovadigmedx:\n return [ \"edx\" ]\n\n if mimeType == applicationvndnovadigmedm:\n return [ \"edm\" ]\n\n if mimeType == audiovndnueraecelp9600:\n return [ \"ecelp9600\" ]\n\n if mimeType == audiovndnueraecelp7470:\n return [ \"ecelp7470\" ]\n\n if mimeType == audiovndnueraecelp4800:\n return [ \"ecelp4800\" ]\n\n if mimeType == textxeiffel:\n return [ \"e\", \"eif\" ]\n\n if mimeType == applicationvndspotfiredxp:\n return [ \"dxp\" ]\n\n if mimeType == imagevnddxf:\n return [ \"dxf\" ]\n\n if mimeType == imagevnddwg:\n return [ \"dwg\" ]\n\n if mimeType == modelvnddwf:\n return [ \"dwf\" ]\n\n if mimeType == applicationatscdwd_xml:\n return [ \"dwd\" ]\n\n if mimeType == applicationxgzdvi:\n return [ \"dvigz\" ]\n\n if mimeType == applicationxbzdvi:\n return [ \"dvibz2\" ]\n\n if mimeType == applicationxdvi:\n return [ \"dvi\" ]\n\n if mimeType == videovnddvbfile:\n return [ \"dvb\" ]\n\n if mimeType == videodv:\n return [ \"dv\" ]\n\n if mimeType == textxdevicetreesource:\n return [ \"dtsi\" ]\n\n if mimeType == audiovnddtshd:\n return [ \"dtshd\" ]\n\n if mimeType == audiovnddts:\n return [ \"dts\" ]\n\n if mimeType == applicationxmldtd:\n return [ \"dtd\" ]\n\n if mimeType == textxdevicetreebinary:\n return [ \"dtb\" ]\n\n if mimeType == applicationdssc_der:\n return [ \"dssc\" ]\n\n if mimeType == textxdsl:\n return [ \"dsl\" ]\n\n if mimeType == audioxdsf:\n return [ \"dsf\" ]\n\n if mimeType == textprslinestag:\n return [ \"dsc\" ]\n\n if mimeType == imagedicomrle:\n return [ \"drle\" ]\n\n if mimeType == audiovnddra:\n return [ \"dra\" ]\n\n if mimeType == applicationvnddpgraph:\n return [ \"dpg\" ]\n\n if mimeType == applicationvndosgidp:\n return [ \"dp\" ]\n\n if mimeType == applicationvndopenxmlformatsofficedocumentwordprocessingmltemplate:\n return [ \"dotx\" ]\n\n if mimeType == applicationvndmswordtemplatemacroenabled12:\n return [ \"dotm\" ]\n\n if mimeType == applicationmswordtemplate:\n return [ \"dot\" ]\n\n if mimeType == applicationvndopenxmlformatsofficedocumentwordprocessingmldocument:\n return [ \"docx\" ]\n\n if mimeType == applicationvndmsworddocumentmacroenabled12:\n return [ \"docm\" ]\n\n if mimeType == applicationmsword:\n return [ \"doc\" ]\n\n if mimeType == imagexadobedng:\n return [ \"dng\" ]\n\n if mimeType == applicationvnddna:\n return [ \"dna\" ]\n\n if mimeType == applicationxapplediskimage:\n return [ \"dmg\" ]\n\n if mimeType == imagevnddjvu:\n return [ \"djvu\", \"djv\" ]\n\n if mimeType == messagedispositionnotification:\n return [ \"disposition-notification\" ]\n\n if mimeType == applicationvndmobiusdis:\n return [ \"dis\" ]\n\n if mimeType == applicationxdirector:\n return [ \"dir\", \"dxr\", \"cst\", \"cct\", \"cxt\", \"w3d\", \"fgd\", \"swa\" ]\n\n if mimeType == textxpatch:\n return [ \"diff\", \"patch\" ]\n\n if mimeType == applicationdicom:\n return [ \"dicomdir\", \"dcm\" ]\n\n if mimeType == textxc:\n return [ \"dic\" ]\n\n if mimeType == applicationxdiadiagram:\n return [ \"dia\" ]\n\n if mimeType == applicationxdgccompressed:\n return [ \"dgc\" ]\n\n if mimeType == audioxdff:\n return [ \"dff\" ]\n\n if mimeType == applicationvnddreamfactory:\n return [ \"dfac\" ]\n\n if mimeType == applicationxdesktop:\n return [ \"desktop\", \"kdelnk\" ]\n\n if mimeType == applicationxx509cacert:\n return [ \"der\", \"crt\", \"cert\", \"pem\" ]\n\n if mimeType == applicationvnddebianbinarypackage:\n return [ \"deb\", \"udeb\" ]\n\n if mimeType == imagexdds:\n return [ \"dds\" ]\n\n if mimeType == applicationvndsyncmldmddf_xml:\n return [ \"ddf\" ]\n\n if mimeType == applicationvndfujixeroxddd:\n return [ \"ddd\" ]\n\n if mimeType == applicationvndomadd2_xml:\n return [ \"dd2\" ]\n\n if mimeType == textvndcurldcurl:\n return [ \"dcurl\" ]\n\n if mimeType == imagexkodakdcr:\n return [ \"dcr\" ]\n\n if mimeType == textxdcl:\n return [ \"dcl\" ]\n\n if mimeType == applicationxdocbook_xml:\n return [ \"dbk\", \"docbook\" ]\n\n if mimeType == applicationxdbf:\n return [ \"dbf\" ]\n\n if mimeType == applicationdavmount_xml:\n return [ \"davmount\" ]\n\n if mimeType == textxdart:\n return [ \"dart\" ]\n\n if mimeType == applicationxdar:\n return [ \"dar\" ]\n\n if mimeType == applicationvndmobiusdaf:\n return [ \"daf\" ]\n\n if mimeType == modelvndcollada_xml:\n return [ \"dae\" ]\n\n if mimeType == textxdsrc:\n return [ \"d\", \"di\" ]\n\n if mimeType == applicationprscww:\n return [ \"cww\" ]\n\n if mimeType == applicationcwl:\n return [ \"cwl\" ]\n\n if mimeType == applicationxappleworksdocument:\n return [ \"cwk\" ]\n\n if mimeType == textvndcurl:\n return [ \"curl\" ]\n\n if mimeType == imagexwinbitmap:\n return [ \"cur\" ]\n\n if mimeType == applicationxcue:\n return [ \"cue\" ]\n\n if mimeType == applicationcuseeme:\n return [ \"cu\" ]\n\n if mimeType == textcsvschema:\n return [ \"csvs\" ]\n\n if mimeType == textcsv:\n return [ \"csv\" ]\n\n if mimeType == textcss:\n return [ \"css\" ]\n\n if mimeType == applicationvndcommonspace:\n return [ \"csp\" ]\n\n if mimeType == applicationxcompressediso:\n return [ \"cso\" ]\n\n if mimeType == chemicalxcsml:\n return [ \"csml\" ]\n\n if mimeType == applicationvndcitationstylesstyle_xml:\n return [ \"csl\" ]\n\n if mimeType == applicationxcsh:\n return [ \"csh\" ]\n\n if mimeType == textxcsharp:\n return [ \"cs\" ]\n\n if mimeType == applicationvndrigcryptonote:\n return [ \"cryptonote\" ]\n\n if mimeType == applicationxchromeextension:\n return [ \"crx\" ]\n\n if mimeType == imagexcanoncrw:\n return [ \"crw\" ]\n\n if mimeType == applicationpkixcrl:\n return [ \"crl\" ]\n\n if mimeType == textxcredits:\n return [ \"credits\" ]\n\n if mimeType == applicationxmscardfile:\n return [ \"crd\" ]\n\n if mimeType == imagexcanoncr3:\n return [ \"cr3\" ]\n\n if mimeType == imagexcanoncr2:\n return [ \"cr2\" ]\n\n if mimeType == textxcrystal:\n return [ \"cr\" ]\n\n if mimeType == applicationmaccompactpro:\n return [ \"cpt\" ]\n\n if mimeType == textxc__src:\n return [ \"cpp\", \"cxx\", \"cc\", \"c\" ]\n\n if mimeType == applicationcpl_xml:\n return [ \"cpl\" ]\n\n if mimeType == applicationxcpiocompressed:\n return [ \"cpiogz\" ]\n\n if mimeType == applicationxcpio:\n return [ \"cpio\" ]\n\n if mimeType == applicationxcore:\n return [ \"core\" ]\n\n if mimeType == textxcopying:\n return [ \"copying\" ]\n\n if mimeType == applicationxmsdownload:\n return [ \"com\", \"bat\" ]\n\n if mimeType == applicationvndcoffeescript:\n return [ \"coffee\" ]\n\n if mimeType == applicationvndrimcod:\n return [ \"cod\" ]\n\n if mimeType == imagexcmx:\n return [ \"cmx\" ]\n\n if mimeType == applicationvndyellowrivercustommenu:\n return [ \"cmp\" ]\n\n if mimeType == chemicalxcml:\n return [ \"cml\" ]\n\n if mimeType == chemicalxcmdf:\n return [ \"cmdf\" ]\n\n if mimeType == applicationvndcosmocaller:\n return [ \"cmc\" ]\n\n if mimeType == textxcmake:\n return [ \"cmake\", \"cmakeliststxt\" ]\n\n if mimeType == applicationxmsclip:\n return [ \"clp\" ]\n\n if mimeType == applicationvndcrickclicker:\n return [ \"clkx\" ]\n\n if mimeType == applicationvndcrickclickerwordbank:\n return [ \"clkw\" ]\n\n if mimeType == applicationvndcrickclickertemplate:\n return [ \"clkt\" ]\n\n if mimeType == applicationvndcrickclickerpalette:\n return [ \"clkp\" ]\n\n if mimeType == applicationvndcrickclickerkeyboard:\n return [ \"clkk\" ]\n\n if mimeType == applicationxjava:\n return [ \"class\" ]\n\n if mimeType == applicationvndclaymore:\n return [ \"cla\" ]\n\n if mimeType == textxopenclsrc:\n return [ \"cl\" ]\n\n if mimeType == applicationnode:\n return [ \"cjs\" ]\n\n if mimeType == applicationvndmsartgalry:\n return [ \"cil\" ]\n\n if mimeType == applicationvndanserwebcertificateissueinitiation:\n return [ \"cii\" ]\n\n if mimeType == chemicalxcif:\n return [ \"cif\" ]\n\n if mimeType == applicationxkchart:\n return [ \"chrt\" ]\n\n if mimeType == applicationvndmshtmlhelp:\n return [ \"chm\" ]\n\n if mimeType == applicationxmamechd:\n return [ \"chd\" ]\n\n if mimeType == applicationxchat:\n return [ \"chat\" ]\n\n if mimeType == textxchangelog:\n return [ \"changelog\" ]\n\n if mimeType == imagecgm:\n return [ \"cgm\" ]\n\n if mimeType == applicationxcfscompressed:\n return [ \"cfs\" ]\n\n if mimeType == applicationpkixcert:\n return [ \"cer\" ]\n\n if mimeType == applicationvndcinderella:\n return [ \"cdy\" ]\n\n if mimeType == applicationvndchemdraw_xml:\n return [ \"cdxml\" ]\n\n if mimeType == chemicalxcdx:\n return [ \"cdx\" ]\n\n if mimeType == applicationvndcoreldraw:\n return [ \"cdr\" ]\n\n if mimeType == applicationcdmiqueue:\n return [ \"cdmiq\" ]\n\n if mimeType == applicationcdmiobject:\n return [ \"cdmio\" ]\n\n if mimeType == applicationcdmidomain:\n return [ \"cdmid\" ]\n\n if mimeType == applicationcdmicontainer:\n return [ \"cdmic\" ]\n\n if mimeType == applicationcdmicapability:\n return [ \"cdmia\" ]\n\n if mimeType == applicationvndmediastationcdkey:\n return [ \"cdkey\" ]\n\n if mimeType == applicationxdiscjugglercdimage:\n return [ \"cdi\" ]\n\n if mimeType == applicationcdfx_xml:\n return [ \"cdfx\" ]\n\n if mimeType == applicationxnetcdf:\n return [ \"cdf\", \"nc\" ]\n\n if mimeType == applicationvndcontactcmsg:\n return [ \"cdbcmsg\" ]\n\n if mimeType == applicationccxml_xml:\n return [ \"ccxml\" ]\n\n if mimeType == applicationxcocoa:\n return [ \"cco\" ]\n\n if mimeType == applicationxccmx:\n return [ \"ccmx\" ]\n\n if mimeType == applicationvndcomicbook_zip:\n return [ \"cbz\" ]\n\n if mimeType == applicationxcbt:\n return [ \"cbt\" ]\n\n if mimeType == applicationvndcomicbookrar:\n return [ \"cbr\" ]\n\n if mimeType == textxcobol:\n return [ \"cbl\", \"cob\" ]\n\n if mimeType == applicationxcbr:\n return [ \"cba\" ]\n\n if mimeType == applicationxcb7:\n return [ \"cb7\" ]\n\n if mimeType == applicationvndmspkiseccat:\n return [ \"cat\" ]\n\n if mimeType == applicationvndcurlcar:\n return [ \"car\" ]\n\n if mimeType == audioxcaf:\n return [ \"caf\" ]\n\n if mimeType == applicationvndmscabcompressed:\n return [ \"cab\" ]\n\n if mimeType == applicationvndclonkc4group:\n return [ \"c4g\", \"c4d\", \"c4f\", \"c4p\", \"c4u\" ]\n\n if mimeType == applicationvndcluetrustcartomobileconfigpkg:\n return [ \"c11amz\" ]\n\n if mimeType == applicationvndcluetrustcartomobileconfig:\n return [ \"c11amc\" ]\n\n if mimeType == applicationxbzip:\n return [ \"bz2\", \"bz\" ]\n\n if mimeType == imageprsbtif:\n return [ \"btif\", \"btf\" ]\n\n if mimeType == modelvndvalvesourcecompiledmap:\n return [ \"bsp\" ]\n\n if mimeType == applicationxbsdiff:\n return [ \"bsdiff\" ]\n\n if mimeType == applicationxbpspatch:\n return [ \"bps\" ]\n\n if mimeType == applicationxbzip2:\n return [ \"boz\" ]\n\n if mimeType == applicationvndpreviewsystemsbox:\n return [ \"box\" ]\n\n if mimeType == imagebmp:\n return [ \"bmp\", \"dib\" ]\n\n if mimeType == applicationvndbalsamiqbmml_xml:\n return [ \"bmml\" ]\n\n if mimeType == applicationvndbmi:\n return [ \"bmi\" ]\n\n if mimeType == applicationxblender:\n return [ \"blend\", \"blender\" ]\n\n if mimeType == applicationxblorb:\n return [ \"blb\", \"blorb\" ]\n\n if mimeType == applicationoctetstream:\n return [ \"bin\", \"dms\", \"lrf\", \"mar\", \"dist\", \"distz\", \"bpk\", \"dump\", \"elc\", \"deploy\", \"dll\", \"msp\", \"msm\", \"buffer\" ]\n\n if mimeType == videovndradgamettoolsbink:\n return [ \"bik\", \"bk2\" ]\n\n if mimeType == textxbibtex:\n return [ \"bib\" ]\n\n if mimeType == applicationvndfujitsuoasysprs:\n return [ \"bh2\" ]\n\n if mimeType == applicationvndrealvncbed:\n return [ \"bed\" ]\n\n if mimeType == applicationbdoc:\n return [ \"bdoc\" ]\n\n if mimeType == applicationxfontbdf:\n return [ \"bdf\" ]\n\n if mimeType == applicationxbcpio:\n return [ \"bcpio\" ]\n\n if mimeType == applicationxtrash:\n return [ \"bak\", \"old\", \"sik\" ]\n\n if mimeType == imagevndpcob16:\n return [ \"b16\" ]\n\n if mimeType == applicationvndamazonmobi8ebook:\n return [ \"azw3\", \"kfx\" ]\n\n if mimeType == applicationvndamazonebook:\n return [ \"azw\" ]\n\n if mimeType == imagevndairzipacceleratorazv:\n return [ \"azv\" ]\n\n if mimeType == applicationvndairzipfilesecureazs:\n return [ \"azs\" ]\n\n if mimeType == applicationvndairzipfilesecureazf:\n return [ \"azf\" ]\n\n if mimeType == videoannodex:\n return [ \"axv\" ]\n\n if mimeType == audioannodex:\n return [ \"axa\" ]\n\n if mimeType == applicationxawk:\n return [ \"awk\" ]\n\n if mimeType == audioamrwb:\n return [ \"awb\" ]\n\n if mimeType == applicationxapplixword:\n return [ \"aw\" ]\n\n if mimeType == imageavif:\n return [ \"avif\", \"avifs\" ]\n\n if mimeType == videoxmsvideo:\n return [ \"avi\", \"avf\", \"divx\" ]\n\n if mimeType == imageavcs:\n return [ \"avcs\" ]\n\n if mimeType == imageavci:\n return [ \"avci\" ]\n\n if mimeType == textxsystemdunit:\n return [ \"automount\", \"device\", \"mount\", \"path\", \"scope\", \"slice\", \"socket\", \"swap\", \"target\", \"timer\" ]\n\n if mimeType == textxauthors:\n return [ \"authors\" ]\n\n if mimeType == audiobasic:\n return [ \"au\", \"snd\" ]\n\n if mimeType == applicationvndantixgamecomponent:\n return [ \"atx\" ]\n\n if mimeType == applicationatomsvc_xml:\n return [ \"atomsvc\" ]\n\n if mimeType == applicationatomdeleted_xml:\n return [ \"atomdeleted\" ]\n\n if mimeType == applicationatomcat_xml:\n return [ \"atomcat\" ]\n\n if mimeType == applicationatom_xml:\n return [ \"atom\" ]\n\n if mimeType == applicationvndacucorp:\n return [ \"atc\", \"acutc\" ]\n\n if mimeType == audioxmsasx:\n return [ \"asx\", \"wax\", \"wvx\", \"wmx\" ]\n\n if mimeType == imageastc:\n return [ \"astc\" ]\n\n if mimeType == applicationxasp:\n return [ \"asp\" ]\n\n if mimeType == applicationvndaccpacsimplyaso:\n return [ \"aso\" ]\n\n if mimeType == applicationvndmsasf:\n return [ \"asf\" ]\n\n if mimeType == textxcommonlisp:\n return [ \"asd\", \"fasl\", \"lisp\", \"ros\" ]\n\n if mimeType == applicationxasar:\n return [ \"asar\" ]\n\n if mimeType == applicationxapplixspreadsheet:\n return [ \"as\" ]\n\n if mimeType == imagexsonyarw:\n return [ \"arw\" ]\n\n if mimeType == applicationxarj:\n return [ \"arj\" ]\n\n if mimeType == applicationxfreearc:\n return [ \"arc\" ]\n\n if mimeType == applicationvndlotusapproach:\n return [ \"apr\" ]\n\n if mimeType == applicationxmsapplication:\n return [ \"application\" ]\n\n if mimeType == applicationxiso9660appimage:\n return [ \"appimage\" ]\n\n if mimeType == imageapng:\n return [ \"apng\" ]\n\n if mimeType == applicationvndandroidpackagearchive:\n return [ \"apk\" ]\n\n if mimeType == audioxape:\n return [ \"ape\" ]\n\n if mimeType == applicationannodex:\n return [ \"anx\" ]\n\n if mimeType == videoxanim:\n return [ \"anim19j\" ]\n\n if mimeType == applicationxnavianimation:\n return [ \"ani\" ]\n\n if mimeType == audioxamzxml:\n return [ \"amz\" ]\n\n if mimeType == audioamr:\n return [ \"amr\" ]\n\n if mimeType == applicationvndamigaami:\n return [ \"ami\" ]\n\n if mimeType == applicationxalz:\n return [ \"alz\" ]\n\n if mimeType == applicationvnddvbait:\n return [ \"ait\" ]\n\n if mimeType == applicationvndadobeairapplicationinstallerpackage_zip:\n return [ \"air\" ]\n\n if mimeType == audioxaiff:\n return [ \"aiff\", \"aif\" ]\n\n if mimeType == audioxaifc:\n return [ \"aifc\", \"aiffc\" ]\n\n if mimeType == applicationillustrator:\n return [ \"ai\" ]\n\n if mimeType == applicationvndaheadspace:\n return [ \"ahead\" ]\n\n if mimeType == applicationvndage:\n return [ \"age\" ]\n\n if mimeType == imagexapplixgraphics:\n return [ \"ag\" ]\n\n if mimeType == applicationvndibmmodcap:\n return [ \"afp\", \"listafp\", \"list3820\" ]\n\n if mimeType == applicationxfontafm:\n return [ \"afm\" ]\n\n if mimeType == applicationvndaudiograph:\n return [ \"aep\" ]\n\n if mimeType == audioadpcm:\n return [ \"adp\" ]\n\n if mimeType == applicationxamigadiskformat:\n return [ \"adf\" ]\n\n if mimeType == textxadasrc:\n return [ \"adb\", \"ads\" ]\n\n if mimeType == applicationvndacucobol:\n return [ \"acu\" ]\n\n if mimeType == applicationxace:\n return [ \"ace\" ]\n\n if mimeType == applicationvndamericandynamicsacc:\n return [ \"acc\" ]\n\n if mimeType == audioac3:\n return [ \"ac3\" ]\n\n if mimeType == applicationpkixattrcert:\n return [ \"ac\" ]\n\n if mimeType == applicationxabiword:\n return [ \"abw\", \"abwcrashed\", \"abwgz\", \"zabw\" ]\n\n if mimeType == audiovndaudibleaax:\n return [ \"aax\" ]\n\n if mimeType == applicationxauthorwareseg:\n return [ \"aas\" ]\n\n if mimeType == applicationxauthorwaremap:\n return [ \"aam\" ]\n\n if mimeType == audioaac:\n return [ \"aac\", \"adts\" ]\n\n if mimeType == applicationxauthorwarebin:\n return [ \"aab\", \"x32\", \"u32\", \"vox\" ]\n\n if mimeType == audioxpnaudibleaudio:\n return [ \"aa\" ]\n\n if mimeType == applicationxatari7800rom:\n return [ \"a78\" ]\n\n if mimeType == applicationxatari2600rom:\n return [ \"a26\" ]\n\n if mimeType == applicationxarchive:\n return [ \"a\", \"ar\" ]\n\n if mimeType == applicationx7zcompressed:\n return [ \"7z\", \"7z001\" ]\n\n if mimeType == applicationxt602:\n return [ \"602\" ]\n\n if mimeType == model3mf:\n return [ \"3mf\" ]\n\n if mimeType == video3gpp:\n return [ \"3gp\", \"3gpp\", \"3ga\" ]\n\n if mimeType == video3gpp2:\n return [ \"3g2\", \"3gp2\", \"3gpp2\" ]\n\n if mimeType == applicationxnintendo3dsexecutable:\n return [ \"3dsx\" ]\n\n if mimeType == applicationxnintendo3dsrom:\n return [ \"3ds\", \"cci\" ]\n\n if mimeType == textvndin3d3dml:\n return [ \"3dml\" ]\n\n if mimeType == applicationxgenesis32xrom:\n return [ \"32x\", \"mdx\" ]\n\n if mimeType == applicationvnd1000mindsdecisionmodel_xml:\n return [ \"1km\" ]\n\n if mimeType == applicationvndlotus123:\n return [ \"123\", \"wk1\", \"wk3\", \"wk4\", \"wks\" ]\n \n return []", "def get_imlist(path):\n return [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.jpg')]", "def file_types(self) -> Optional[List[str]]:\n return pulumi.get(self, \"file_types\")", "def get_type(ext):\n if ext.lower() in Asset.SUPPORTED_IMAGE_EXT['in']:\n return 'image'\n return 'file'", "def get_image_list(path: str) -> list:\n\n return list(os.path.join(path, f)\n for f in os.listdir(path)\n if f.endswith('.jpg'))", "def get_image_list(folder):\n image_list = []\n for each_file in os.listdir(folder):\n filename, ext = os.path.splitext(each_file)\n if ext == '.gif':\n image_list.append(each_file)\n return image_list", "def get_filekinds(self, dataset=None):\n return [self.filekind]", "def get_file_extensions(self):\n return [ 'img', 'qcow', 'qcow2' ]", "def list_type_in_dir(path, extension):\n path, extension = check_args(path, extension)\n files = os.listdir(path)\n file_list = [os.path.join(path, f)\n for f in fnmatch.filter(files, '*' + extension)]\n\n return file_list", "def formats():\n if PIL_ENABLED:\n return 'BMP', 'EPS', 'GIF', 'JPEG', 'MSP', 'PCX', 'PNG', 'SVG', 'TIFF', 'XBM'\n else:\n return 'EPS', 'SVG'", "def get_img_files(pth: pathlib.Path) -> typing.List[pathlib.Path]:\n patterns = [glob.iglob(os.path.join(pth, \"*.\" + e)) for e in _img_extensions]\n de_nested = [f for f_ in patterns for f in f_]\n return de_nested", "def get_files_from_dir(dirname, type=None):\n images = [filename for filename in listdir(dirname) if isfile(join(dirname, filename))]\n\n if type is not None:\n images = [filename for filename in images if filename.endswith(type)]\n\n return images", "def getWebImgType_file(filename):\n data = open(filename,'rb').read(11)\n\n # JPG\n if data[:4] == b'\\xff\\xd8\\xff\\xe0' and data[6:11] == b'JFIF\\x00':\n return 'JPEG'\n # PNG\n if data[:6] == b'\\x89PNG\\r\\n':\n return 'PNG'\n # GIF\n if data[:3] == b'\\x47\\x49\\x46\\x38':\n return 'GIF'\n\n # Format not recognised\n return False", "def getimagelist(folder):\n imagefolder = Path(folder) \n imagelist = imagefolder.glob(\"**/*.png\") \n return list(imagelist)", "def get_sorted_img_list():\n dirPath=settings.BASE_DIR\n imgdir=\"/pttWeb/static/topicmodel\"\n fileID=glob.glob(dirPath+imgdir+\"/*.png\")\n fileID=[i.replace('/home/stream/Documents/minimum_django/pttWeb/static/','') for i in fileID]\n fileID=[Week_Image(i) for i in fileID]\n fileID.sort(key=lambda x: x.date, reverse=True)\n #translate . to / since javascript parsing date has some issue!\n fileID=[(i.filename,date_trans_z(i.date.strftime(\"%Y.%m.%d\"))) for i in fileID]\n return fileID", "def _fetch_all_images(self, path) -> List[str]:\n files_all = []\n\n for ext in self.exts:\n files_all.extend(glob.glob(join(path, ext)))\n\n return files_all", "def get_sorted_image_files(directory,priority='png'):\n \n #First get a list of file, start with priority, then all other image file types\n im_types = ['png','jpg','bmp','tif']\n im_types.remove(priority)\n \n file_list = glob(directory+'/*.'+priority)\n if not file_list:\n for im_type in im_types:\n file_list = glob(directory+'/*.'+im_type)\n if file_list:\n break\n\n #Currently assume standard mraw output filename\n sorted_list = sorted(file_list,key=lambda file_name: int(file_name.split('.')[0].split('S00')[-1][3:]))\n #print(file_list)\n #print(sorted_list)\n\n return sorted_list", "def _get_image_type_templates():\n yaml_file = os.path.join(ROOT_DIR, 'docker', 'image_types.yaml')\n all_templates = yaml_utils.read(yaml_file)\n return all_templates", "def image_media_type(name):\n return name.endswith(('.png', '.jpg', '.jpeg', '.gif', '.tiff', '.tif',\n '.svg'))", "def get_images(path, ext=\".jpg\"):\n return get_files(path, ext)", "def get_imlist(path):\n return [\n os.path.join(path, f) for f in os.listdir(path) if f.endswith('.bmp')\n ]", "def get_allowed_file_types(self):\n return self.allowed_file_types", "def parse(image_path):\n if image_path[-1] != '/': image_path += '/'\n images = sorted(os.listdir(image_path))\n if images[0] == '.directory':\n images = images[1:]\n if images[0] == '.DS_Store':\n images = images[1:]\n return images", "def listar_extension(dir_name, ext='.png'):\n\n list_ext = []\n for root, dirs, files in os.walk(directorio):\n for name in files:\n if ext in name:\n list_ext.append(name)\n for name in dirs:\n if ext in name:\n list_ext.append(name)\n\n return list_ext", "def get_lst_images(file_path):\n return [i for i in os.listdir(file_path) if i != '.DS_Store']", "def make_image_list(directory):\r\n\tonly_files = [file for file in listdir(directory) if isfile(join(directory, file))]\r\n\treturn only_files", "def populate_image_lists():\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_a\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n images_a.append(path.path)\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_b\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n images_b.append(path.path)", "def file_type(filepath):\n imexts = ['.png', '.bmp', '.jpg', 'jpeg']\n textexts = ['.csv', '.txt']\n if filepath.endswith('.hdf5') or filepath.endswith('.h5'):\n return 'hdf5'\n if any([filepath.endswith(ext) for ext in textexts]):\n return 'delim'\n if filepath.endswith('.grm.raw'):\n return 'grm.raw'\n if filepath.endswith('.npy'):\n return 'npy'\n if _is_bed(filepath):\n return 'bed'\n if _is_gen(filepath):\n return 'gen'\n if any([filepath.endswith(ext) for ext in imexts]):\n return 'image'\n return 'unknown'", "def read_image_files(folder_path: str, file_type='jpg') -> list:\n\n images = []\n folder_file_list = get_folder_filenames(folder_path)\n\n if file_type == 'png':\n for imgPath in folder_file_list:\n images.append(cv2.imread(f'{folder_path}/{imgPath}', cv2.IMREAD_UNCHANGED))\n else:\n for imgPath in folder_file_list:\n images.append(cv2.imread(f'{folder_path}/{imgPath}'))\n\n return images", "def filterImages(files, cfg):\r\n regex = \"\\.(\" + \"|\".join(cfg.image_formats) + \")$\"\r\n #filter(lambda s: re.match(regex, s), files)\r\n return [s for s in files if re.findall(regex, s)]", "def find_all_images_in_folder(path_to_folder):\n import os, os.path\n \n imgs = [] \n valid_images = [\".jpg\",\".gif\",\".png\",\".jpeg\"]\n for f in os.listdir(path_to_folder):\n pre,ext = os.path.splitext(f) \n if(ext.lower() in valid_images) and not (pre.endswith(\"thumbnail\")):\n #imgs.append( [os.path.join(path_to_folder,pre),ext] )\n imgs.append( [pre ,ext] )\n return imgs", "def is_filetype(img_path, formats=[\"jpg\", \"png\", \"gif\", \"pgm\", \"tif\", \"ppm\"]):\n # formats = [\"jpg\", \"png\", \"gif\", \"pgm\"]\n end = img_path[-3:]\n return os.path.isfile(img_path) and (end in formats)", "def list_a_file_type(path, extension):\n path, extension = check_args(path, extension)\n file_list = [os.path.join(dirpath, f)\n for dirpath, dirnames, files in os.walk(path)\n for f in fnmatch.filter(files, '*' + extension)]\n\n return file_list", "def list_all_image(path, valid_exts=VALID_IMAGE_EXTS):\n for filename in os.listdir(path):\n bname, ext = os.path.splitext(filename)\n if ext.lower() not in VALID_IMAGE_EXTS:\n continue\n filepath = os.path.join(path, filename)\n yield strutils.decode(filepath)", "def get_files():\n\n img_dir = '../ADE20K_2016_07_26/full_data/images/validation/'\n sem_dir = '../ADE20K_2016_07_26/full_data/annotations/validation/'\n ins_dir = '../ADE20K_2016_07_26/full_data/annotations_instance/validation/'\n\n img_files = os.listdir(img_dir)\n sem_files = os.listdir(sem_dir)\n ins_files = os.listdir(ins_dir)\n \n img_files = [ os.path.join(img_dir,item) for item in img_files ]\n sem_files = [ os.path.join(sem_dir,item) for item in sem_files ]\n ins_files = [ os.path.join(ins_dir,item) for item in ins_files ]\n \n img_files.sort()\n sem_files.sort()\n ins_files.sort()\n \n return img_files, sem_files, ins_files", "def get_img_files(images, db):\n img_dir = db.source\n if img_dir == None:\n raise ValueError('Cannot locate file without a base path. This method looks for it at \\\n db.source, which is not set. This should be set by the loader during DB construction!')\n img_dir = path.join(img_dir, 'img') \n locs = db.get_img_locs(images)\n titles = db.get_location_titles()\n returnval = []\n for image in images:\n loc = locs[image]\n if loc is None:\n raise ValueError('The image %s could not be found' % image)\n returnval.append(path.join(img_dir, titles[loc], str(image) + '.jpg'))\n return returnval", "def get_image_bases(image_root: str) -> list:\n return list(sorted(os.listdir(image_root), key=lambda x: tuple(\n int(x.split('.')[0].split('-')[i]) for i in range(1, len(x.split('-'))))))", "def get_filepaths(directory, filetype):\r\n\tfilePathslist = []\r\n\tfor root, directories, files in os.walk(directory):\r\n\t\tfor filename in files:\r\n\t\t\t# Join the two strings in order to form the full filepath.\r\n\t\t\tfilepath = os.path.join(root, filename)\r\n\t\t\t# include only the .jpg file extensions except their hidden/shadow files\r\n\t\t\tif filepath.endswith(filetype) and not filename.startswith('.'):\r\n\t\t\t\tfilePathslist.append(filepath) # Add it to the list.\r\n\treturn filePathslist # Self-explanatory.\r", "def return_images(directory):\r\n allfiles = os.listdir(directory)\r\n image_list = [im for im in allfiles if '.jpg' in str(im)]\r\n image_list = [directory + im for im in image_list]\r\n return image_list", "def display_file_types():\n\n print 'Available file types. Each line contains the file type and the list of extensions by those the file type is determined. To include FOOBAR file type to search use --FOOBAR, to exlude use --noFOOBAR. You can include and exclude a number of file types.'\n for ftype, extensions in TYPES().iteritems():\n print '%s: %s' % (ftype, ', '.join(extensions))", "def get_images(path):\n\n # Cast path to absolute path\n absolute = abspath(path)\n\n img_lis = [] # Holds images in a folder\n file_lis = get_files(absolute)\n\n # Now get the images within file list\n img_lis = [f for f in file_lis if is_filetype(f)]\n\n return img_lis", "def get_imlist2(path):\n return [\n os.path.join(path, f) for f in os.listdir(path) if f.endswith('.ppm')\n ]", "def _ReadImageList(list_path):\n # with tf.gfile.GFile(list_path, 'r') as f:\n # image_paths = f.readlines()\n # image_paths = [entry.rstrip() for entry in image_paths]\n # return image_paths\n image_paths=[]\n for dir, subdir, files in os.walk(list_path):\n for file in files:\n image_paths.append(os.path.join(dir, file))\n return sorted(image_paths)", "def get_mimetype_format(photo_data):\n mime_type = photo_data.mimetype\n image_format = mime_type.split('/')[1]\n return mime_type, image_format", "def image_format(img_f):\n # First use the below explicit extensions to identify image file prospects\n ext = {\n \"JPG\": \"jpeg\",\n \"JPEG\": \"jpeg\",\n \"GIF\": \"gif\",\n \"TGA\": \"tga\",\n \"IFF\": \"iff\",\n \"PPM\": \"ppm\",\n \"PNG\": \"png\",\n \"SYS\": \"sys\",\n \"TIFF\": \"tiff\",\n \"TIF\": \"tiff\",\n \"EXR\": \"exr\",\n \"HDR\": \"hdr\",\n }.get(os.path.splitext(img_f)[-1].upper(), \"\")\n # Then, use imghdr to really identify the filetype as it can be different\n if not ext:\n # maybe add a check for if path exists here?\n print(\" WARNING: texture image has no extension\") # too verbose\n\n ext = what(img_f) # imghdr is a python lib to identify image file types\n return ext", "def _parse_file_path(path, ftype='all'):\n\n # Make sure we have a proper path to the images to use\n if path is None:\n path = os.path.join(os.getcwd(), 'df_utils/beps_data_gen_images')\n else:\n path = os.path.abspath(path)\n\n # Get all files in directory\n file_list = os.listdir(path)\n\n # If no file type specified, return full list\n if ftype == 'all':\n return file_list\n\n # Remove files of type other than the request ftype from the list\n new_file_list = []\n for this_thing in file_list:\n # Make sure it's really a file\n if not os.path.isfile(os.path.join(path, this_thing)):\n continue\n\n split = os.path.splitext(this_thing)\n ext = split[1]\n if ext == ftype:\n new_file_list.append(os.path.join(path, this_thing))\n\n return new_file_list", "def getList(self):\n labelMap = {}\n imageMap = {}\n key = []\n index = 0\n\n for root, dirs, files in os.walk(self.path_data):\n for file in files:\n # If .png or .jpg file found then\n if file.endswith(tuple(config.imageFormat)):\n key.append(index)\n labelMap[index] = preprocessing.getLabel(file)\n imageMap[index] = os.path.join(root, file)\n\n index += 1\n\n else:\n continue\n\n return key, imageMap, labelMap", "def CollectImageFilenames(self):\n # Match all image extensions but not the filenmae of the of beamer pdf\n regex_img = re.compile(\n r'^(?!{}).*\\.(jpg|png|pdf)'.format(self._filename.replace('.tex', '')))\n # regex_img = re.compile(r'^(?!test)'.format(self._filename.replace('.tex', '')))\n files = [f for f in os.listdir(os.getcwd())\n if regex_img.search(f)]\n return files", "def filename_type(filename):\n import re\n\n nii_re = re.compile(\".+(nii.gz)$|.+(nii)$\")\n npy_re = re.compile(\".+(npy)$|.+(npz)$\")\n\n\n if len(nii_re.findall(filename)):\n return 'nii'\n elif len(npy_re.findall(filename)):\n return 'npy'\n return None", "def read_files(self):\n files = []\n # if this is test folder then there are no labels\n if 'test' in self.list_path:\n for item in self.img_list:\n image_path = item\n name = os.path.splitext(os.path.basename(image_path[0]))[0]\n files.append({\n \"img\": image_path[0],\n \"name\": name,\n })\n else:\n for item in self.img_list:\n image_path, label_path = item\n name = os.path.splitext(os.path.basename(label_path))[0]\n files.append({\n \"img\": image_path,\n \"label\": label_path,\n \"name\": name,\n \"weight\": 1\n })\n return files", "def get_img_list(self):\n if self.list_flag == \"train\":\n self.img_list_all = [line.rstrip('\\n') for line in open(os.path.join(self.dataset_dir, 'Image_sets/car_imagenet_' + self.list_flag + '.txt'))]\n print(\"Number of Train image: %d.\" % len(self.img_list_all))\n\n elif self.list_flag == \"val\":\n self.img_list_all = [line.rstrip('\\n') for line in open(os.path.join(self.dataset_dir, 'Image_sets/car_imagenet_' + self.list_flag + '.txt'))]\n print(\"Number of val image: %d.\" % len(self.img_list_all))\n\n return self.img_list_all", "def getImages(path):\n files = list()\n\n for f in listdir(path):\n file = join(path, f)\n if isfile(file):\n files.append(getImage(file))\n\n return files", "def SupportedFiletypes( self ):\n return ['plaintex', 'tex']", "def SupportedFiletypes( self ):\n return ['plaintex', 'tex']", "def dir_ftype(directory,extension):\n\textension = extension.replace(\".\",\"\") # make sure theres no \".\"\n\tfnames = directory+os.sep+\"*\"+\".\"+extension\n\tfnames = glob.glob(fnames)\n\tfnames = np.sort(fnames) # order files from 0 to last\n\treturn fnames", "def get_files_from_of_type(path: str, ext: str) -> List[str]:\n files = []\n for root, dirnames, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames, \"*.\" + str(ext)):\n files.append(os.path.join(root, filename))\n if not files:\n logging.error(\"No language files found in folder: \" + str(os.sep.join([convert_vars.BASE_PATH, \"source\"])))\n logging.debug(f\" --- found {len(files)} files of type {ext}. Showing first few:\\n* \" + str(\"\\n* \".join(files[:3])))\n return files", "def _find_image_ext(path):\n path = os.path.splitext(path)[0]\n for ext in _KNOWN_IMG_EXTS:\n this_path = '%s.%s' % (path, ext)\n if os.path.isfile(this_path):\n break\n else:\n ext = 'png'\n return ('%s.%s' % (path, ext), ext)", "def get_files(imagedir, ext='jpg|jpeg|bmp|png'):\n rex = re.compile(r'^.*\\.({})$'.format(ext), re.I)\n return [os.path.join(imagedir,base) for base in os.listdir(imagedir)\n if rex.match(base)]", "def _get_images(image_path):\n logger.debug(\"Getting images: '%s'\", image_path)\n if not os.path.isdir(image_path):\n logger.debug(\"Folder does not exist\")\n return None\n files = [os.path.join(image_path, f)\n for f in os.listdir(image_path) if f.lower().endswith((\".png\", \".jpg\"))]\n logger.debug(\"Image files: %s\", files)\n return files", "def get_dir_and_file_list(path):\r\n dList = os.listdir(path)\r\n dirList = []\r\n fileList = []\r\n\r\n for item in dList:\r\n \r\n if os.path.isdir(os.path.join(path, item)):\r\n dirList.append(item)\r\n elif os.path.isfile(os.path.join(path, item)):\r\n if any(image_type in item.lower() for image_type in image_types):\r\n preview = image_preview(os.path.join(path, item))\r\n fileList.append((item, preview))\r\n else:\r\n fileList.append((item, None))\r\n\r\n return dirList, fileList", "def get_file_list(folder):\n\tfilelist = []\n\tfor file in os.listdir(folder):\n\t\tif file.endswith('.png'):\n\t\t\tfilelist.append(file)\n\treturn filelist", "def get_image_list(pattern='alpes_%d.jpg', start=0):\n image_list = []\n k = start\n while path.exists(pattern % k):\n image_list.append(pattern % k)\n k += 1\n return image_list", "def get_ext(path):\n if os.path.exists(path):\n return imghdr.what(path)\n else:\n print(\"fichier n'existe pas\")", "def create_file_list(params):\n data_dir = params.get('data_dir', '')\n params['file_list'] = \".tmp.txt\"\n imgtype_list = {'jpg', 'bmp', 'png', 'jpeg', 'rgb', 'tif', 'tiff'}\n with open(params['file_list'], \"w\") as fout:\n tmp_file_list = os.listdir(data_dir)\n for file_name in tmp_file_list:\n file_path = os.path.join(data_dir, file_name)\n if imghdr.what(file_path) not in imgtype_list:\n continue\n fout.write(file_name + \" 0\" + \"\\n\")", "def get_images_of_folder(folder):\n\n Settings.dev_print(\"getting images of folder: {}\".format(folder.get_title()))\n if not folder: return []\n imgs = []\n files = []\n valid_images = [\".jpg\",\".gif\",\".png\",\".tga\",\".jpeg\"]\n for f in os.listdir(folder.get_path()):\n ext = os.path.splitext(f)[1]\n if ext.lower() not in valid_images:\n continue\n file = File()\n setattr(file, \"path\", os.path.join(folder.get_path(),f))\n files.append(file)\n Settings.maybe_print(\"image path: {}\".format(os.path.join(folder.get_path(),f)))\n return files", "def image_names(self):\n pass", "def find_images(folder_path, extensions=('.png', '.jpg', '.tif')):\n image_full_paths = []\n for filename in os.listdir(folder_path):\n basename, extension = os.path.splitext(filename)\n if extension.lower() in extensions:\n image_full_paths.append(os.path.join(folder_path, filename))\n return image_full_paths", "def environmentImages(dirPath):\n images = []\n for f in os.listdir(dirPath):\n if os.path.isfile(os.path.join(dirPath, f)):\n name, ext = os.path.splitext(f)\n if ext.lower().replace(\".\", \"\") in [\"hdr\", \"exr\", \"rad\", \"tif\", \"tiff\"]:\n images.append(f)\n return sorted(images)", "def test_extensions(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n need_result = []\n for i in range(1, 4):\n need_result.append(os.path.join(dummy_folder, 'memes', 'meme monty python', 'meme{}.jpg'.format(i)))\n need_result.append(os.path.join(dummy_folder, 'memes', 'meme1.jpg'))\n need_result.append(os.path.join(dummy_folder, 'memes', 'meme2.png'))\n need_result.append(os.path.join(dummy_folder, 'memes', 'meme4.jpg'))\n need_result.append(os.path.join(dummy_folder, 'memes', 'meme4.png'))\n\n for i in ['antigravity.png',\n 'egg.png',\n 'holy_grenade.png',\n 'spam.jpg',\n ]:\n need_result.append(os.path.join(dummy_folder, i))\n\n result = listdir(dummy_folder, full_path=True, only_files=True, walk=True, extensions=['jpg', 'png'])\n self.assertEqual(sorted(result), sorted(need_result))\n result = listdir(dummy_folder, full_path=True, only_files=True, walk=True, extensions=['.jpg', '.png'])\n self.assertEqual(sorted(result), sorted(need_result))\n result = listdir(dummy_folder, full_path=True, only_files=True, walk=True, extensions=['.JPG', 'png'])\n self.assertEqual(sorted(result), sorted(need_result))\n result = listdir(dummy_folder, full_path=True, only_files=True, walk=True, extensions=('.JPG', 'png'))\n self.assertEqual(sorted(result), sorted(need_result))\n result = listdir(dummy_folder, full_path=True, only_files=True, walk=True, extensions={'.JPG', 'png'})\n self.assertEqual(sorted(result), sorted(need_result))", "def get_files(metadata_dir, images_dir, image_format, metadata_format):\n all_metadata_files = [x for x in set(os.listdir(metadata_dir)) if x.endswith(metadata_format)]\n all_image_files = [x for x in set(os.listdir(images_dir)) if x.endswith(image_format)]\n images_and_metadata = {}\n for metadata, image in itertools.product(all_metadata_files, all_image_files):\n if image.split('.')[0] in metadata:\n images_and_metadata[metadata] = image\n return images_and_metadata", "def imageparts(msg):\n # Don't want a set here because we want to be able to process them in\n # order.\n return filter(lambda part:\n part.get_content_type().startswith('image/'),\n msg.walk())", "def get_file_path_list(indir):\n\n assert os.path.exists(indir), 'indir is not exits.'\n\n img_file_list = os.listdir(indir)\n img_file_list = sorted(img_file_list,\n key=lambda k: int(re.match(r'(\\d+)', k).group()))\n img_list = []\n for i, img in enumerate(img_file_list):\n if '.png' in img:\n path_ = os.path.join(indir, img)\n img_list.append(path_)\n return img_list", "def ComputeFileTypes(self):\n for rel_path, file_data in self._files.iteritems():\n if 'ftype' in file_data:\n continue\n ftype = self._file_type_decoder.GetType(rel_path)\n if ftype:\n file_data['ftype'] = ftype", "def check_and_image_shape(item: ValueType, shape: List) -> List:\n if len(item.shape) > 0:\n item = str(item[0])\n if item.endswith(('.jpg', '.jpeg', '.png')):\n import cv2\n im = cv2.imread(item)\n if im is not None:\n return list(im.shape)\n return shape", "def iterate_path(path):\n fl_lst = []\n for fn in os.listdir(path):\n if fn.endswith('.jpg') or fn.endswith('.png'):\n fname, ext = os.path.splitext(fn)\n tn = fname + '.txt'\n fl_lst.append([fn, tn])\n return fl_lst", "def generate_image_info(path):\n file_types = ['*.png', '*.jpg', '*.gif']\n for file_type in file_types:\n for img_path in glob.glob(path + file_type):\n img = Image.open(img_path)\n img_name = img_path.split('/')[-1].split('.')[0]\n with open(path + 'resolution.txt', 'a') as file:\n file.write(img_name + ' ' + str(img.size[0]) +\n ' ' + str(img.size[1]) + '\\n')", "def get_folder_files(folder, types):\n files_grabed = []\n for file_type in types:\n files_grabed.extend(glob.glob(os.path.join(folder, file_type)))\n return files_grabed", "def get_lists_in_dir(dir_path):\n image_list = []\n\n for filename in glob.glob(dir_path + '/*.jpg'):\n image_list.append(filename)\n return image_list", "def list_pictures(directory, ext='JPEG'):\n return [os.path.join(root, f)\n for root, _, files in os.walk(directory) for f in files\n if re.match(r'([\\w]+\\.(?:' + ext + '))', f)]", "def getMimeTypes(self): #$NON-NLS-1$\r", "def list_images():\n return json_response(list_manifests())", "def getFilePaths():\n \n image_dir = r'/hpc/wfok007/mpi_heart/Training Set'\n mask_paths = []\n image_paths = []\n for root, dirs, files in os.walk(image_dir, topdown=False):\n for name in files:\n if name == 'laendo.nrrd':\n mask_paths.append(os.path.join(root, name))\n elif name == 'lgemri.nrrd':\n image_paths.append(os.path.join(root, name))\n else:\n print ('%s is unknown' %name)\n return mask_paths, image_paths", "def get_filekinds(self, dataset):\n if isinstance(dataset, str):\n from crds import data_file\n instrument = data_file.getval(dataset, self.instrument_key)\n elif isinstance(dataset, dict):\n instrument = self.get_instrument(dataset)\n else:\n raise ValueError(\"Dataset should be a filename or header dictionary.\")\n return self.get_imap(instrument).get_filekinds(dataset)", "def list_image_names(write_folder, user_name, image_size):\n image_dir = f'{write_folder}/{user_name}/{image_size}'\n # print('image_dir',image_dir)\n return os.listdir(image_dir)", "def readPlayerImageFiles(self):\n currentPath = os.path.dirname(os.path.abspath(__file__))\n listOfFileNames=[]\n for i in os.listdir(currentPath):\n if re.match(\"player\\_\\d+\",i): #i.endswith(\".gif\")\n listOfFileNames.append(currentPath+'/'+i)\n return listOfFileNames", "def list_images(basePath, validExts=(\".jpg\", \".jpeg\", \".png\", \".bmp\", \".tif\", \".tiff\"), contains=None):\n return _list_files(basePath, validExts, contains=contains)", "def mime_type():\r\n return tuple(linecache.getline(\r\n os.path.join(os.path.abspath(os.path.dirname(__file__)), 'mimes.csv'),\r\n _random.randrange(0, 647)\r\n ).strip(\"\\n\").split(','))", "def get_ids(voc_path):\n ids = []\n print(\"voc\")\n\n files_images = glob.iglob(os.path.join(voc_path, \"*.JPEG\"))\n for x in files_images:\n name = os.path.splitext(os.path.basename(x))[0]\n ids.append(name)\n print(\"names: \", ids)\n return ids" ]
[ "0.7476918", "0.7081896", "0.6956454", "0.6846246", "0.6845208", "0.6776346", "0.6740786", "0.6740108", "0.66788965", "0.6673548", "0.6597972", "0.6581447", "0.6568904", "0.65615165", "0.6554064", "0.6504134", "0.64887166", "0.6465466", "0.6461444", "0.643755", "0.64279115", "0.6425177", "0.6411071", "0.6381234", "0.63690466", "0.6357423", "0.635532", "0.6347791", "0.63123095", "0.6305261", "0.63030183", "0.6299417", "0.6295173", "0.62800014", "0.62755334", "0.6268474", "0.6266489", "0.62304026", "0.61858624", "0.61821634", "0.61762965", "0.6176132", "0.61649334", "0.6163874", "0.61637765", "0.6163738", "0.6160622", "0.6137445", "0.6133519", "0.6130927", "0.61244845", "0.61191666", "0.6114957", "0.6101424", "0.6080767", "0.6077138", "0.60743576", "0.6071121", "0.6068722", "0.6054654", "0.60464066", "0.6037429", "0.60276854", "0.6023827", "0.6021126", "0.60162085", "0.60162085", "0.60136306", "0.599758", "0.59906036", "0.59683484", "0.5954011", "0.59517455", "0.591842", "0.5900357", "0.58979493", "0.58944756", "0.5891454", "0.58855253", "0.58842033", "0.5881178", "0.5874591", "0.58606005", "0.5859132", "0.5857943", "0.58515906", "0.5850978", "0.58452344", "0.5843755", "0.5840215", "0.5834313", "0.583395", "0.5829295", "0.58248943", "0.5824682", "0.5817901", "0.58058643", "0.5803705", "0.57997847", "0.5798071", "0.5788932" ]
0.0
-1
open jpg file or merge several jpg file then open it
def execute_file(self, event=None): file_list = self.get_path_list() print(file_list) if not file_list: return # merge image # 修复内存泄露的bug,由于没有清除之前打开的图片,第二次打开的图片仍然为之前的图片 try: self.photos.destroy() except: pass self.photos.imgs = file_list merged_photo = self.photos.merge_photos() # show image try: window.destroy() except: import traceback traceback.print_exc() window.build_img_canvas() window.show_img_in_canvas(merged_photo)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_frame(path,number):\n num=str(number).zfill(3) #Zero filling\n name = glob.glob(path+\"/*\"+num+\"*\")\n if len(name)==0:\n name = glob.glob(path+\"/\"+str(number)+\".png\")\n if len(name)>1:\n print \"too many matches \",len(name),\" found\"\n name = name[0]\n img = Image.open(name)\n img = np.asarray(img)\n img.setflags(write=1)\n return img", "def load_jpgs(path, size=(224, 224)):\n fnames = os.listdir(path)\n imgs = []\n i = 0\n if i<1500:\n for f in fnames:\n f= path + '/'+f\n if (os.path.isfile(f) and os.path.getsize(f) > 0):\n if not re.match('.+(jpg|jpeg|JPEG|JPG)', f):\n continue\n try:\n #image = Image.open(os.path.join(path, f))\n image = Image.open(f)\n except OSError:\n continue # ignore corrupt files\n data = list(image.getdata())\n im = Image.new(image.mode, image.size)\n im.putdata(data)\n if im.mode != 'RGB':\n im = im.convert('RGB')\n im = crop_center_or_reshape(im, size)\n img = 2 * (np.asarray(im) / 255) - 1\n #img= np.asarray(im)\n imgs.append(img)\n i= i+1\n\n return np.array(imgs)", "def main():\n me = SimpleImage(\"images/me.JPG\")\n dinosaur = SimpleImage(\"images/dinosaur.jpg\")\n\n dinosaur.make_as_big_as(me)\n combine = magic(me, dinosaur)\n combine.show()", "def test_merge_images(self):\n test_folder = base_path +'/test_data/merging_tests/single_merge/'\n # the files are: render1.png and background.jpg\n\n background = Image.open(test_folder+\"background.jpg\")\n foreground = Image.open(test_folder+\"render1.png\")\n output, bbox = mi.merge_images(foreground, background)\n self.assertEqual((300,300),output.size)\n self.assertEqual('JPEG',output.format)", "def open_pngs_in_dir(out_dir):\n pngs = glob.glob(os.path.join(out_dir, '*png'))\n operating_system = platform.system()\n if 'Windows' in operating_system:\n os.system(\"start \" + \" \".join(pngs))\n elif 'Darwin' in operating_system:\n os.system('open ' + \" \".join(pngs))", "def im_open(path):\n\n try:\n assert os.path.isdir(path)\n #get file list in directory - glob includes full path\n files = sorted(glob.glob('{}{}*'.format(path,os.sep)), key=sort_key) \n #load the collection\n raw_stack = io.imread_collection(files)\n #turn the collection into a np array and remove extraneous OCT portion from 1025:1083 on x axis. (z,y,x)\n #if .bmp files are open (from pv-oct), the slicing will not affect them, the x-axis is only 540 pixels.\n stack = io.collection.concatenate_images(raw_stack)[:,:,0:1024]\n \n return stack\n\n except AssertionError:\n sys.exit(\"A non-directory object was given to the __open__ function\")", "def _open_images(training_filenames, path):\n imagePaths=[os.path.join(path,f) for f in training_filenames]\n faces=[]\n for i, imagePath in enumerate(imagePaths):\n faceImg=Image.open(imagePath).convert('L')\n faceNp=np.array(faceImg,'uint8')\n faces.append(faceNp)\n return faces", "def _open_img(self, img_name):\n try:\n img = Image.open(img_name)\n photo = ImageTk.PhotoImage(img)\n return photo\n except IOError:\n Debug.printi(\"Unable to find image \" + img_name, Debug.Level.ERROR)", "def openFile(self):\r\n from SXM import FileIO,Data\r\n fname = str(QFileDialog.getOpenFileName(self.widget,self.tr(\"Open File\"), \\\r\n \".\",FileIO.getFilterString(types=(Data.Image,))))\r\n if len(fname) > 0:\r\n root, ext = os.path.splitext(fname)\r\n self.statusBar().showMessage(self.tr(\"Loading data: %1\").arg(fname),2000)\r\n image = FileIO.fromFile(fname)\r\n image.load()\r\n imwin = ImageWindow(self,image)\r\n self.Images.append(imwin)\r\n self.updateImageList()\r\n imwin.windowModality = False\r\n imwin.show()", "def load_jpg_series(files: List[str]):\n sort_files_by_name(files)\n volume = _load_volume_from_jpg(files)\n return files, volume", "def JPGtoMatrix(path,w,h):\n listing = os.listdir(path)\n listing.sort()\n count = 0\n docFiles = []\n for infile in listing:\n count = count + 1\n docFiles.append(infile)\n matrix = np.zeros((w*h,count))\n for i in range(len(listing)):\n matrix[:,i]=JPGtoArray(join(path,listing[i]))\n return matrix,listing", "def test_single_merge(self):\n test_folder = base_path +'/test_data/merging_tests/single_test/'\n # the files are: render1.png and background.jpg\n output_file = os.path.join(test_folder, \"output1.jpg\")\n if(os.path.isfile(output_file)):\n os.unlink(output_file)\n\n mi.add_background(test_folder+\"render1.png\", test_folder+\"background.jpg\", output_file)\n self.assertTrue(os.path.isfile(output_file))\n output = Image.open(output_file)\n self.assertEqual((300,300),output.size)\n self.assertEqual('JPEG',output.format)", "def openFile(path_name):\n if os.path.isdir(path_name):\n reader = sitk.ImageSeriesReader()\n dicom_names = reader.GetGDCMSeriesFileNames(path_name)\n reader.SetFileNames(dicom_names)\n image_object = reader.Execute()\n \n elif os.path.isfile(path_name):\n image_object = sitk.ReadImage(path_name)\n\n else:\n print(\"Path name wrong.\")\n return None\n\n return image_object", "def open_images_in(directory):\n\n files = [\n filename\n for filename in os.listdir(directory)\n if \"_\" in filename and not filename.startswith(\"joined\")\n ]\n tiles = []\n if len(files) > 0:\n i = 0\n for file in files:\n pos = get_image_column_row(file)\n im = Image.open(os.path.join(directory, file))\n\n position_xy = [0, 0]\n count = 0\n for a, b in zip(pos, im.size):\n position_xy[count] = a * b\n count = count + 1\n tiles.append(\n Tile(\n image=im,\n position=pos,\n number=i + 1,\n coords=position_xy,\n filename=file,\n )\n )\n i = i + 1\n return tiles", "def main():\n fg = SimpleImage('image_contest/me.jpg')\n bg = SimpleImage('image_contest/house.png')\n bg.make_as_big_as(fg)\n combined_img = combine(bg, fg)\n combined_img.show()", "def fileCmd(self):\n filename = askopenfilename() \n self.cnvImgOrig.displayImage(filename)\n self.cnvImgTest.displayImage(filename)", "def openFiles(self, prog):\n prog = Utilities.normabspath(prog)\n # Open up the new files.\n self.openSourceFile(prog)", "def combineImages(path=None, imgfiles=None, cols=3, size=300):\n\n font = ImageFont.truetype(\"Arial.ttf\", 15)\n x=size\n w=20\n i=0; j=0\n if imgfiles == None:\n imgfiles = findFiles(path, 'png')\n width = cols*(x+w)\n height = int(math.ceil(float(len(imgfiles))/cols)*x)\n new_im = Image.new('RGBA', (width, height), 'white')\n for f in imgfiles:\n name = os.path.basename(f).split('.')[0]\n if not os.path.exists(f):\n continue\n im = Image.open(f)\n im.thumbnail((x,x))\n new_im.paste(im, (i*x+w,j*x+w))\n draw = ImageDraw.Draw(new_im)\n draw.text((i*x+w,j*x+w), name, (0,0,0), font=font)\n i+=1\n if i>=cols:\n i=0; j+=1\n #new_im.show()\n path = os.path.split(imgfiles[0])[0]\n new_im.save(os.path.join(path,\"summary.png\"))\n return", "def open_out_dir():\n\n pngs = glob.glob(os.path.join(out_dir, '*png'))\n operating_system = platform.system()\n if 'Windows' in operating_system:\n os.system(\"start \" + \" \".join(pngs))\n elif 'Darwin' in operating_system:\n os.system('open ' + \" \".join(pngs))", "def convert_files_to_jpeg(joblist, inputpath, tmp_jpeg_folder, poppler_path=None):\n image_list = []\n threadlist = []\n for letter in joblist:\n threadlist.append((inputpath, tmp_jpeg_folder, joblist[letter][0], joblist[letter][-1], letter, poppler_path,))\n\n with concurrent.futures.ProcessPoolExecutor() as executor:\n for _, rv in zip(joblist, executor.map(pdf_to_jpeg, threadlist)):\n for path in rv:\n image_list.append(path)\n\n _, __, tmp_free = shutil.disk_usage(tmp_jpeg_folder)\n if (tmp_free/1000000) < 100:\n return False\n\n image_list.sort()\n return image_list", "def view_media(self, obj):\n for handle in self.selected_handles():\n ref_obj = self.dbstate.db.get_object_from_handle(handle)\n mpath = media_path_full(self.dbstate.db, ref_obj.get_path())\n open_file_with_default_application(mpath)", "def merge_chips(images_files, *, win_bounds):\n datasets = [rasterio.open(p) for p in images_files]\n img, _ = rasterio.merge.merge(datasets, bounds=win_bounds, method=mean_merge_method)\n for ds in datasets:\n ds.close()\n return img", "def add_image(self, f_name,file,new_id):\r\n folder=tempfile.mktemp()\r\n os.mkdir(folder)\r\n datei=open(folder+'/'+f_name,'w+')\r\n datei.write(file.read())\r\n datei.close()\r\n val='' \r\n liste_ext=liste_val\r\n if(self.toolbox.hasProperty('eigene_formate')):\r\n self_val=self.toolbox.getProperty('eigene_formate').split(',')\r\n liste_ext=[]\r\n for x in self_val:\r\n liste_ext.append('_'+x+'.jpeg')\r\n for extension in liste_ext:\r\n #cmd='/usr/bin/convert '+folder+'/'+f_name+' -resize '+extension[1:-4]+'x'+extension[1:-4]+' '+folder+'/'+new_id+extension\r\n cmd='/usr/bin/convert '+folder+'/'+f_name+' -resize '+extension[1:-4]+' '+folder+'/'+new_id+extension\r\n order=os.popen(cmd).read()\r\n kurz_name='_'+str(f_name.split('.')[0])\r\n kurz_name=kurz_name.replace(' ','_')\r\n val=val+self.manage_addImage(id=new_id+kurz_name+extension,file=open(folder+'/'+new_id+extension),title=f_name, precondition='', content_type='',REQUEST=None)+' ' \r\n os.remove(folder+'/'+new_id+extension)\r\n os.remove(folder+'/'+f_name)\r\n os.rmdir(folder)\r\n txt=\"Datei Hochgeladen!<br>\"\r\n #my_root=self.toolbox\r\n #txt+=my_root.id+\"<br>\"\r\n #if(my_root.hasProperty('eigene_formate')):\r\n # txt+=my_root.getProperty('eigene_formate')+\"<br>\"\r\n return txt", "def createAllImageFiles(poly, name) :\n \n for i in range(len(poly.getPaths())):\n fileName = name + \"_\" + str(i) + \".dot\"\n imgName = name + \"_\" + str(i) + \".jpg\"\n \n Command = \"neato -Tjpeg \" + fileName + \" -o \" + imgName\n run(Command, shell=True)", "def TextureFiles():\n import shutil\n\n # first convert the .psd files to .png\n\n FbmDir = glo.outputFolder + '.fbm'\n\n for d1, d2, filenames in os.walk(FbmDir):\n for filename in filenames:\n \"\"\"filename: vitrin_diffuse.psd\n \"\"\"\n # print \"TextureFiles():\", filename\n if filename[-4:].upper() == '.PSD':\n #print \" -- FbmDir:\" , FbmDir\n #print \" -- in the if clause with filename:\" , filename\n #print \" -- glo.outputFolder\" , glo.outputFolder\n # FbmDir = '../fbx/simplelifeembedmedia.fbm'\n # filename = 'shelves_light.PSD'\n PsdToPngConverter(FbmDir, filename)\n\n # Move only the .png file to the ../png/ directory\n filename = filename[:-4] + '.png'\n src = os.path.join(FbmDir, filename)\n elif filename[0] != '.':\n src = os.path.join(FbmDir, filename)\n pass\n\n shutil.copy(src, glo.outputFolder)\n print os.path.join(glo.outputFolder, filename), \"\\n\"\n sys.stdout.flush()\n # for d1, d2, files in os.walk(glo.outputFolder):\n # if not filename in files:\n # #print \"moving: \", files, filename, not filename in files\n # shutil.copy(src, glo.outputFolder)\n # print os.path.join(glo.outputFolder, filename), \"\\n\"\n # else:\n # print \"%s/%s already exists. File not moved\" % (glo.outputFolder,filename)", "def multiopen(files):\n tempfile = write_html(files)\n\n open_in_browser(tempfile)", "def build_jpeg_preview(self, file_path, cache_path, page_id: int, extension='.jpg', size=(256,256)):\n\n # try:\n # os.mkdir(cache_path.format(d_id=document_id)+'/')\n # except OSError:\n # pass\n\n\n with open(file_path, 'rb') as odt:\n\n file_name = self.get_file_hash(file_path)\n if os.path.exists(\n '{path}{file_name}.pdf'.format(\n path=cache_path,\n file_name=file_name\n )):\n result = open(\n '{path}.pdf'.format(\n path=cache_path + file_name,\n ), 'rb')\n\n else:\n if os.path.exists(cache_path + file_name + '_flag'):\n time.sleep(2)\n self.build_pdf_preview(\n file_path=file_path,\n cache_path=cache_path,\n extension=extension\n )\n else:\n result = file_converter.office_to_pdf(odt, cache_path, file_name)\n\n input_pdf = PdfFileReader(result)\n output_pdf = PdfFileWriter()\n output_pdf.addPage(input_pdf.getPage(int(page_id)))\n output_stream = BytesIO()\n output_pdf.write(output_stream)\n output_stream.seek(0, 0)\n result2 = file_converter.pdf_to_jpeg(output_stream, size)\n\n\n\n file_name = self.get_file_hash(file_path, size)\n\n with open(\n '{path}{file_name}_{page_id}_{extension}'.format(\n file_name=file_name,\n path=cache_path,\n page_id=page_id,\n extension=extension\n ),\n 'wb') \\\n as jpeg:\n buffer = result2.read(1024)\n while buffer:\n jpeg.write(buffer)\n buffer = result2.read(1024)", "def image_process(image_info):\n path = os.path.join(cfg.IMAGESET, image_info.get(\"index\") + \".jpg\")\n if not os.path.exists(path):\n raise IOError(\"please check your file is not exists: \" + path)\n def load_image(path):\n image = Image.open(path)\n return image\n return load_image(path)", "def main():\n base_dir = '/home/sjimenez/imagenes_prueba'\n out_dir = '/home/sjimenez/easy_analysis'\n for _, _, files in os.walk(base_dir, topdown=False):\n for f in files:\n print('--------- {} ---------'.format(f))\n act_dir = osp.join(base_dir, f)\n act_im = cv2.imread(act_dir)\n if act_im is not None:\n get_image_stats(act_im, out_dir, f)\n else:\n print('Not able to open the image')", "def append_component_images(pldm_fw_up_pkg, image_files):\n for image in image_files:\n with open(image, \"rb\") as file:\n for line in file:\n pldm_fw_up_pkg.write(line)", "def open_image(path, w, h, antialias=True) -> ImageTk.PhotoImage:\n image = Image.open(path)\n aliasing = Image.ANTIALIAS if antialias else Image.NEAREST\n return ImageTk.PhotoImage(image.resize((w, h), aliasing))", "def show_files(file_locations):\n for file_loc in file_locations:\n show_image(file_loc)", "def handle_image(name):\n from_path = args.from_dir + name\n to_path = args.to_dir + name\n\n if width != args.width:\n subprocess.call('jpegtran -rotate 90 -grayscale ' + from_path + ' > ' \\\n + to_path, shell=True)\n else:\n subprocess.call('jpegtran -grayscale ' + from_path + ' > ' + to_path,\\\n shell=True)", "def open_image(self):\n self.orig_image = Image.open(self.filename)\n if self.in_rgb:\n self.orig_image = self.orig_image.convert(\"RGB\")\n if self.min_filter:\n self.orig_image.filter(ImageFilter.MinFilter(self.min_filter))", "def jpg(self, id, **params):\n if 'async' in params:\n params.pop('async')\n self.request('/encoded_video/' + str(id) + '/thumbnails', 'POST', body=urllib.urlencode(params))\n return True\n\n if len(params) > 0:\n params = '?' + urllib.urlencode(params)\n else:\n params = ''\n\n return self.request('/encoded_video/' + str(id) + '.jpg' + params)", "def open_image(name):\n img_name = 'input/' + name + '.png'\n return cv2.imread(img_name, cv2.IMREAD_UNCHANGED)", "def _openImage(self, fname):\n image = cv2.imread(fname,0)\n\n if(image != None):\n return image\n else:\n raise IOError, \"Image file can not be opened\"", "def imageFileProcessor(path):\n # Show all files in RawCapturedPicture\n # ..., and get the completed path files\n img_paths = []\n for ea in o_tl.showAllFiles(path):\n img_paths.append(os.path.join(path, ea))\n\n # Empty face list\n faces = []\n # Empty ID list\n IDs = []\n\n # Looping through all the image paths and loading the IDs and the faces\n for each_path in img_paths:\n # Loading the image and converting it to gray scale\n pil_img = Image.open(each_path).convert('L')\n # Converting the PIL image into numpy array\n image_numpy = np.array(pil_img, 'uint8')\n # Getting the Id from the image\n Id = int(os.path.split(each_path)[-1].split(\"_\")[1])\n # Extract the face from the training image sample\n faces.append(image_numpy)\n IDs.append(Id)\n return faces, IDs", "def open_image(image_path, mode=\"RGB\"):\n print(\"Opening image file in '%s'.\" % image_path)\n return Image.open(image_path).convert(mode)", "def compress_image(filename,k):", "def main():\n\n in_file = ('/home/desi2/candidatesp9/asteroids_decals_dr2.fits')\n out_dir = os.path.join(os.environ.get('HOME'), 'asteroid_cutouts/')\n\n cand_info = fits_table(in_file)\n # Pre-select asteroids in the ra, dec box you know they exist.\n ramin = 107\n ramax = 130\n decmin = 16\n decmax = 30\n these = np.where((cand_info.ra0>ramin)*(cand_info.ra0<ramax)*\n (cand_info.dec0>decmin)*(cand_info.dec0<decmax))[0]\n #pdb.set_trace() # Runs Python Debugger on code up to this line. \n cand_info = cand_info[these]\n\n urls = []\n jpgfiles = []\n for ii in range(100):\n print('Working on candidate {}'.format(ii))\n ra = cand_info.ra0[ii]\n dec = cand_info.dec0[ii]\n \n jpgurl = 'http://legacysurvey.org/viewer/jpeg-cutout-decals-dr2?ra={:.6f}&dec={:.6f}&pixscale=0.262&size=200'.format(ra, dec)\n \n jpgfile = 'obj-{:03d}.jpg'.format(ii)\n jpgfile = os.path.join(out_dir, jpgfile)\n grab = 'wget --continue -O {:s} \"{:s}\"' .format(jpgfile, jpgurl)\n print(grab)\n os.system(grab)\n #pdb.set_trace() # Runs Python Debugger on code up to this line. \n if os.stat(jpgfile).st_size < 18000: # Remove partial or empty images\n # The cut on filesize takes care of most of the bad images but\n # leaves some behind. If the restriction is any larger,\n # it can remove some valid files.\n os.remove(jpgfile)\n else:\n print(jpgurl)\n jpgfiles.append(jpgfile)\n urls.append(jpgurl)\n # for HTML file. What should the URL be?\n #print('<html>')\n #print('<head> Planet Nine Candidates </head>')\n #print('<body>')\n #for thisurl, thisjpg in zip(urls, jpgfiles):\n # print('<div class=\"image\">')\n # print('<a href=\"{}\"><img src=\"{:s}\"></a>'.format(thisurl, thisjpg))\n # print('<div class=\"caption\"> Image of {:s} </div>' .format(thisjpg))\n # print('</div>')\n #print('</body></html>')", "def addOpenFile():\n model_file = \"mobile_graph.pb\"\n label_file = \"mobile_labels.txt\"\n graph = load_graph(model_file)\n \n \n \n filename = filedialog.askopenfilename(initialdir=\"/\",title=\"Select File\",filetypes=[(\"JPEG Files\",\".jpeg .jpg\")])\n print(\"Selected file: %s\" % filename)\n image = ImageTk.PhotoImage(Image.open(filename))\n canvas.create_image(50,50,anchor=tk.NW,image=image)\n \n imgfile = filename\n \n #recognize(filename)\n \n #line ni paling penting untuk pass parameter model file dengan label file\n detectGate(graph,label_file,filename)", "def read_images_from_single_face_profile(face_profile, face_profile_name_index, dim = (120, 120)):\n index = 0\n \n print(face_profile)\n for the_file in os.listdir(face_profile):\n file_path = os.path.join(face_profile, the_file)\n \n print(file_path)\n if file_path.endswith(\".png\") or file_path.endswith(\".jpg\") or file_path.endswith(\".jpeg\") or file_path.endswith(\".pgm\"):\n \n img = cv2.imread(file_path, 0)\n # print(img.shape)\n # cv2.waitKey(0)\n img = cv2.resize(img, (120, 160))\n cv2.imwrite(file_path, img)\n \n index += 1\n\n if index == 0 : \n shutil.rmtree(face_profile)\n logging.error(\"\\nThere exists face profiles without images\")", "def _open(args):\n directory = args.directory\n if directory is None:\n directory = os.getcwd()\n\n files = []\n [files.extend(glob(os.path.join(directory, infile)))\n for infile in args.infiles]\n return _open_files(files, args.open_mode)", "def genOpen(filename, mode):\n (name, ext) = os.path.splitext(filename)\n if ext == \".gz\":\n return gzip.open(filename, mode)\n else:\n return open(filename, mode)", "def genOpen(filename, mode):\n (name, ext) = os.path.splitext(filename)\n if ext == \".gz\":\n return gzip.open(filename, mode)\n else:\n return open(filename, mode)", "def run_merge(*src, argv=None):\n tmpdir = tempfile.mkdtemp()\n inputs = []\n for i, drv in enumerate(src):\n if type(drv) != str:\n tmppath = os.path.join(tmpdir, \"input_%s.tif\" % i)\n drv.write(tmppath)\n inputs.append(tmppath)\n else:\n inputs.append(src)\n\n if \"-o\" in argv:\n outpath = argv[argv.index(\"-o\") + 1]\n else:\n outpath = os.path.join(tempfile.gettempdir(), \"%s.tif\" % next(tempfile._get_candidate_names()))\n logger.debug(\"Writing to file %s\" % outpath)\n argv += [\"-o\", outpath]\n argv = gdal.GeneralCmdLineProcessor(argv)\n options = argv + inputs\n assert run_external_app(\"gdal_merge.py\", options) == 0, \"Error running gdal_merge\"\n remove_directory(tmpdir)\n return gdal.Open(outpath)", "def JPGtoPNGConverter(source, dest):\n files = os.listdir(f\"./{source}\")\n if not os.path.exists(f\"./{dest}\"):os.makedirs(f\"./{dest}\")\n\n for file in files:\n if os.path.splitext(file)[-1] == \".jpg\":\n img = Image.open(f\"./{source}/{file}\")\n clean_text = os.path.splitext(file)[0]\n img.save(f\"./{dest}/{clean_text}.png\",\"png\")\n else:\n print(f\"Your filename: {file} is not in .JPG format !!\")\n return \"All files converted successfully :) \"", "def onclick_open_image(self):\n filename = select_file(\n \"Select Image\",\n \"../\",\n \"Image Files (*.jpeg *.jpg *.png *.gif *.bmg)\")\n if filename:\n param_name = select_file(\n \"Select Parameter\", \"../\", \"Parameter Files (*.json)\")\n if param_name:\n self.moildev = Moildev(param_name)\n self.image = read_image(filename)\n self.h, self.w = self.image.shape[:2]\n self.show_to_window()", "def open_nd2(self):\n\n filename = filedialog.askopenfilename(initialdir=\".\", title=\"Select file...\",\n filetypes=((\"nd2 files\", \"*.nd2\"), (\"tif files\", \"*.tif\"), (\"All files\", \"*.*\")))\n if filename:\n self.parameters['basename'] = os.path.basename(filename)\n self.parameters['directory'] = os.path.dirname(filename)\n if filename[-4:] == \".tif\":\n print(\"Tif file selected\")\n self.parameters['filename'] = os.path.join(self.parameters['directory'],\"*.tif\")\n self.parameters['filetype'] = \"tif\"\n\n # get pixel size\n im = Image.open(filename) # open file with Pillow to extract metadata from it\n self.parameters['pixel_microns'] = 1./im.info['resolution'][0] # saved in tiff info by imagej\n im.close()\n else: # selected nd2 file\n self.parameters['filename'] = filename\n self.parameters['filetype'] = \"nd2\"\n self.widgets['lblHelp']['text'] = f\"You selected {filename}\"\n self.process_nd2()", "def create_ela_files(originals='./data/CASIA1_originals', originals_ela='./data/CASIA1_originals_ela',\n fakes='./data/CASIA1_fakes', fakes_ela='./data/CASIA1_fakes_ela'):\n for i, item in enumerate(os.listdir(originals)):\n image = ela(os.path.join(originals, item))\n cv2.imwrite(os.path.join(originals_ela, item), image)\n \n for i, item in enumerate(os.listdir(fakes)):\n image = ela(os.path.join(fakes, item))\n cv2.imwrite(os.path.join(fakes_ela, item), image)", "def join_files():\n files = [ent_1.get(), ent_2.get()]\n out_writer = PyPDF2.PdfFileWriter()\n for file in files:\n pdf_file = open(file, 'rb')\n file_reader = PyPDF2.PdfFileReader(pdf_file)\n for page in range(file_reader.numPages):\n pageObj = file_reader.getPage(page)\n out_writer.addPage(pageObj)\n\n output_file_name = result_entry.get()\n output_file = open(output_file_name, 'wb')\n out_writer.write(output_file)\n output_file.close()\n pdf_file.close()\n opener = \"open\" if sys.platform == \"darwin\" else \"xdg-open\"\n subprocess.call([opener, output_file_name])\n clear_labels()", "def take_door_photo():\n\n # based on lukec's code in VHS.pm\n config = yaml.load(file('/etc/vhs.yaml'))\n short_hash = hashlib.sha256(str(datetime.datetime.now())).hexdigest()[0:6]\n pic_base = config.get('picture_base')\n if pic_base:\n filename = os.path.join(pic_base, '%s.jpeg' % short_hash)\n os.system('streamer -c /dev/video0 -b 16 -o %s >/dev/null 2>&1' % filename)\n short_file = os.path.splitext(filename)[0] + '.jpg'\n os.rename(filename, short_file)\n pic_uri_base = config.get('picture_uri_base') \n if pic_uri_base and os.path.exists(short_file):\n pic_uri = '%s/%s' % (pic_uri_base, os.path.basename(short_file))\n return (pic_uri, short_file)\n\n return None", "def build_jpeg_preview(self, file_path, cache_path, page_id: int, extension='.jpeg', size=(256,256)):\n # try:\n # os.mkdir(cache_path.format(d_id=document_id))\n # except OSError:\n # pass\n\n file_name = self.get_file_hash(file_path, size)\n with open(file_path, 'rb') as img:\n result = file_converter.image_to_jpeg_wand(img, size)\n with open('{path}{extension}'.format(\n path=cache_path + file_name,\n extension=extension\n ), 'wb') as jpeg:\n buffer = result.read(1024)\n while buffer:\n jpeg.write(buffer)\n buffer = result.read(1024)", "def import_jail(self, jail, compression_algo=None, path=None):\n # Path can be an absolute path pointing straight to the exported jail\n # or it can the directory where the exported jail lives\n # TODO: We should introduce parsers for this\n image_dir = path or os.path.join(self.iocroot, 'images')\n if not os.path.exists(image_dir):\n iocage_lib.ioc_common.logit(\n {\n 'level': 'EXCEPTION',\n 'message': f'{image_dir} does not exist.'\n }\n )\n elif os.path.isfile(image_dir):\n image_dir, filename = image_dir.rsplit('/', 1)\n else:\n if not compression_algo:\n extension_regex = r'zip|tar\\.xz'\n else:\n extension_regex = r'zip' if \\\n compression_algo == 'zip' else r'tar.xz'\n regex = re.compile(rf'{jail}.*(?:{extension_regex})')\n matches = [\n f for f in os.listdir(image_dir) if regex.match(f)\n ]\n\n if len(matches) > 1:\n msg = f\"Multiple images found for {jail}:\"\n\n for j in sorted(matches):\n msg += f\"\\n {j}\"\n\n msg += '\\nPlease explicitly select image or define ' \\\n 'compression algorithm to use'\n\n iocage_lib.ioc_common.logit(\n {\n \"level\": \"EXCEPTION\",\n \"message\": msg\n },\n _callback=self.callback,\n silent=self.silent)\n elif len(matches) < 1:\n iocage_lib.ioc_common.logit(\n {\n \"level\": \"EXCEPTION\",\n \"message\": f\"{jail} not found!\"\n },\n _callback=self.callback,\n silent=self.silent)\n else:\n filename = matches[0]\n\n if filename.rsplit('.', 1)[-1] == 'zip':\n compression_algo = extension = 'zip'\n else:\n compression_algo = 'lzma'\n extension = 'tar.xz'\n\n image_target = f\"{image_dir}/{filename}\"\n uuid, date = filename[:-len(f'.{extension}')].rsplit('_', 1)\n\n if compression_algo == 'zip':\n reader = {\n 'func': zipfile.ZipFile, 'params': ['r'], 'iter': 'namelist'\n }\n else:\n reader = {\n 'func': tarfile.open, 'params': ['r:xz'], 'iter': 'getmembers'\n }\n\n with reader['func'](image_target, *reader['params']) as f:\n for member in getattr(f, reader['iter'])():\n if compression_algo != 'zip':\n name = member.name\n else:\n name = member\n\n z_dataset_type = name.split(f'{date}_', 1)[-1]\n z_dataset_type = z_dataset_type.split(f'{uuid}_', 1)[-1]\n if z_dataset_type == date:\n # This is the parent dataset\n z_dataset_type = uuid\n else:\n z_dataset_type = \\\n f'{uuid}/{z_dataset_type.replace(\"_\", \"/\")}'.rstrip(\n '/'\n )\n\n iocage_lib.ioc_common.logit(\n {\n 'level': 'INFO',\n 'message': f'Importing dataset: {z_dataset_type}'\n },\n self.callback,\n silent=self.silent\n )\n\n recv = su.Popen(\n [\n 'zfs', 'recv', '-F', os.path.join(\n self.pool, 'iocage/jails', z_dataset_type\n )\n ], stdin=su.PIPE\n )\n if compression_algo == 'zip':\n data = f.open(name).read()\n else:\n data = f.extractfile(member).read()\n\n recv.stdin.write(data)\n recv.communicate()\n\n # Cleanup our mess.\n try:\n target = f\"{self.pool}/iocage/jails/{uuid}@ioc-export-{date}\"\n\n iocage_lib.ioc_common.checkoutput(\n [\"zfs\", \"destroy\", \"-r\", target], stderr=su.STDOUT)\n except su.CalledProcessError as err:\n msg = err.output.decode('utf-8').rstrip()\n iocage_lib.ioc_common.logit(\n {\n \"level\": \"EXCEPTION\",\n \"message\": msg\n },\n _callback=self.callback,\n silent=self.silent)\n\n # Templates become jails again once imported, let's make that reality.\n cache.reset()\n jail_json = iocage_lib.ioc_json.IOCJson(\n f'{self.iocroot}/jails/{uuid}', silent=True\n )\n if jail_json.json_get_value('type') == 'template':\n jail_json.json_set_value('type=jail')\n jail_json.json_set_value('template=0', _import=True)\n\n msg = f\"\\nImported: {uuid}\"\n iocage_lib.ioc_common.logit(\n {\n \"level\": \"INFO\",\n \"message\": msg\n },\n self.callback,\n silent=self.silent)", "def preprocess_images(file_path, new_file_path):\n if not os.path.isdir(new_file_path):\n os.mkdir(new_file_path)\n i = 0\n for dir in listdir(file_path):\n j = 0\n for image_path in listdir(file_path + '/' + dir):\n image = open_image(image_path)\n cv2.imwrite(file_path + '/' + image_path + '/' str(i) + '/' +str(i) + '.jpg', image)\n j += 1\n i += 1", "def combine_pictures(images):\n widths, heights = zip(*(i.size for i in images))\n\n total_width = sum(widths)\n max_height = max(heights)\n\n new_im = Image.new('RGB', (total_width, max_height))\n\n x_offset = 0\n for im in images:\n new_im.paste(im, (x_offset, 0))\n x_offset += im.size[0]\n\n new_im.save('test.jpg')\n\n return True", "def buildImages(files, targets, type):\n images = []\n for file in files:\n targets.append(file)\n with open(file, \"rb\") as f:\n if type == \"Byte\":\n images.append(bytePlot(list(f.read())))\n elif type == \"Markov\":\n images.append(markovPlot(list(f.read())))\n elif type == \"Hilbert\":\n images.append(hilbertPlot(list(f.read())))\n smp.imsave(\"{}.png\".format(file), images[-1])\n return images, targets", "def open_file(self):\n filepath = askopenfilename(filetypes=[(\"Image Files\", (\"*.jpg\", \"*.png\")), (\"All Files\", \"*.*\")])\n if not filepath:\n return\n return filepath", "def download_images_jpg(self):\n self.show_as_waiting(True)\n self.download_images('JPEG')\n self.show_as_waiting(False)", "def run_image_editor( self ):\n\n # XXX: hardcoded program name and image size.\n subprocess.Popen( [\"gimp\", \"-adfs\", self.record[\"filename\"]] )", "def test_format_files(self):\n shutil.copytree(\"testimages/\", \"testimages_to_format/\")\n os.chdir(\"testimages_to_format\")\n self.vimiv.quit()\n self.init_test([\"arch_001.jpg\"])\n self.vimiv[\"fileextras\"].format_files(\"formatted_\")\n files = [fil for fil in os.listdir() if \"formatted_\" in fil]\n files = sorted(files)\n expected_files = [\"formatted_001.jpg\", \"formatted_002\",\n \"formatted_003.bmp\", \"formatted_004.svg\",\n \"formatted_005.tiff\", \"formatted_006.png\"]\n self.assertEqual(files, expected_files)\n os.chdir(\"..\")\n # Should not work without a path\n self.vimiv.paths = []\n self.vimiv[\"fileextras\"].format_files(\"formatted_\")\n self.check_statusbar(\"INFO: No files in path\")\n # Should not work in library\n self.vimiv[\"library\"].focus(True)\n self.vimiv[\"fileextras\"].format_files(\"formatted_\")\n self.check_statusbar(\"INFO: Format only works on opened image files\")", "def gifsicle(fname1, /, *, chunksize = 1048576, debug = False, timeout = 60.0):\n\n # Import standard modules ...\n import os\n import shutil\n import subprocess\n import tempfile\n\n # Import sub-functions ...\n from ..sha512 import sha512\n\n # Check that \"gifsicle\" is installed ...\n if shutil.which(\"gifsicle\") is None:\n raise Exception(\"\\\"gifsicle\\\" is not installed\") from None\n\n # Check that the image exists ...\n if not os.path.exists(fname1):\n raise Exception(f\"\\\"{fname1}\\\" does not exist\") from None\n\n # Create temporary directory ...\n with tempfile.TemporaryDirectory(prefix = \"gifsicle.\") as tname:\n # Create temporary name ...\n fname2 = f\"{tname}/image.gif\"\n\n # Optimise GIF ...\n subprocess.run(\n [\n \"gifsicle\",\n \"--unoptimize\",\n \"--optimize=3\",\n \"--output\", fname2,\n fname1\n ],\n check = True,\n encoding = \"utf-8\",\n stderr = subprocess.DEVNULL,\n stdout = subprocess.DEVNULL,\n timeout = timeout,\n )\n\n # Find the two sizes and don't replace the original if the new one is\n # larger, or equal ...\n if os.path.getsize(fname2) >= os.path.getsize(fname1):\n if debug:\n print(f\"INFO: Skipping because \\\"{fname2}\\\" is larger than, or equal to, \\\"{fname1}\\\"\")\n return\n\n # Find the two hashes and don't replace the original if the new one is\n # the same ...\n if sha512(fname1, chunksize = chunksize) == sha512(fname2, chunksize = chunksize):\n if debug:\n print(f\"INFO: Skipping because \\\"{fname2}\\\" is the same as \\\"{fname1}\\\"\")\n return\n\n # Replace the original ...\n shutil.move(fname2, fname1)", "def batch_export_ortho():\r\n global path_to_project\r\n \r\n for path in path_to_project:\r\n export_filename = os.path.basename(path['ProjectPath']).replace('.psz','.tif')\r\n export_path = os.path.join(export_folder,export_filename)\r\n try:\r\n project = PhotoScan.app.document\r\n project.open(path['ProjectPath'])\r\n \r\n dx, dy = mosaic.get_resolution(path['Flight_id'], path['Field'], path['Camera'])\r\n \r\n if dx is not None and dy is not None:\r\n status = project.activeChunk.exportOrthophoto(\r\n export_path, format=\"tif\", color_correction=False, blending='average', dx=dx, dy=dy,\r\n projection=project.activeChunk.projection)\r\n else:\r\n status = project.activeChunk.exportOrthophoto(export_path, format=\"tif\", color_correction=False, blending='average',projection=project.activeChunk.projection)\r\n except Exception as e:\r\n print(e)\r\n if status is True:\r\n print(\"Perfect\")\r\n app = PhotoScan.Application()\r\n app.quit()", "def open_(filename, *args):\n\n if (filename[-3:] == '.gz'):\n return gzip.open(filename, *args)\n try:\n return open(filename, *args)\n except OSError:\n return gzip.open(filename + \".gz\", *args)", "def open(self, img_name, size=\"default\"):\n print(\"Openning %s\" % img_name)\n self.img_original = Image.open(img_name, mode='r')\n self.img_name = img_name\n\n if size == \"default\":\n size = self.img_original.size[0]\n\n self.img_debut = self.img_resize(size)\n return self.img_debut", "def read_images(path, sz=None, cr=None):\n c = 0\n X,y = [], []\n for dirname, dirnames, filenames in os.walk(path):\n for subdirname in dirnames:\n subject_path = os.path.join(dirname, subdirname)\n for filename in os.listdir(subject_path):\n\n if filename.endswith('.jpg'):\n try:\n im = cv2.imread(os.path.join(subject_path, filename), cv2.IMREAD_GRAYSCALE)\n #print os.path.join(subject_path, filename)\n # crop the image on the face\n if (cr is not None):\n rect, img = detect(im)\n if len(rect) == 0:\n return [None,None]\n im = img[rect[0][1]:rect[0][3], rect[0][0]:rect[0][2]]\n \n #im = Image.fromarray(img)\n # resize to given size (if given)\n if (sz is not None):\n #print im, sz\n im = cv2.resize(im, sz)\n cv2.imwrite('../data_pictures/prova'+str(c)+'.jpg',im)\n X.append(np.asarray(im, dtype=np.uint8))\n y.append(c)\n except IOError, (errno, strerror):\n print \"I/O error({0}): {1}\".format(errno, strerror)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise\n\n\n c = c+1\n return [X,y]", "def load_images(files, open_fn=None):\n if open_fn is None:\n import cv2\n open_fn = cv2.imread\n images = list()\n for _file in files:\n images.append(np.asarray(open_fn(_file)))\n return images", "def join_file(first_file):\n first_file_no_numbers = first_file[:-3] # Remove 001 from file name\n output_file_name = first_file[:-4] # Remove .001 from file name\n file_number = 1 # Create counter starting at 1\n\n with open(output_file_name, 'wb') as output_file: # Output file loop\n while True: # For ever loop\n try:\n # Open file by pasting 3digit number as extension\n with open(first_file_no_numbers + ('%03d' % file_number), 'rb') as current_input:\n # Read the whole file and write it to output file. (Maybe dangerous if file size > memory)\n output_file.write(current_input.read())\n # Go on to the next file\n file_number += 1\n except FileNotFoundError:\n # End loop when no more 3digit extension files are found\n break", "def __main__() :\n try :\n poly = Polyhedre(sys.argv[1])\n \n name = sys.argv[2]\n \n createAllFiles(poly, name)\n\n createAllImageFiles(poly, name)\n \n except FileNotFoundError :\n print(\"Use an existing file\")", "def read_images(folder):\n distinct_frames = DistinctFrames()\n\n for file in sorted(sorted(os.listdir(folder)),\n key=len): # sorting files on basis of 1) length and 2) numerical order\n '''\n Sorting is done 2 times because\n if files in the folder are\n 1. image100.pkl\n 2. image22.pkl\n 3. image21.pkl\n firstly sort them to image100.pkl,image21.pkl,image22.pkl then according to length to image21.pkl,image22.pkl,image100.pkl\n '''\n try:\n img_obj = load_from_memory(file, folder)\n time_stamp = img_obj.get_time()\n distinct_frames.add_img_obj(img_obj)\n print(\"Reading image ..\" + str(time_stamp) + \" from \" + folder) # for debug purpose\n except:\n # exception will occur for files like .DS_Store and jpg directory\n continue\n\n if distinct_frames.no_of_frames() != 0:\n distinct_frames.calculate_time()\n\n return distinct_frames", "def download_images(src_dir, dest_dir):\n # +++your code here+++\n if not os.path.exists(dest_dir):\n os.mkdir(dest_dir)\n res=utility(src_dir)\n k=0\n f=file(dest_dir+\"/\"+\"index.html\", 'w')\n f.write(\"<html><body>\")\n for i in res:\n local_name='image'+str(k)\n print \"downloading image%d\" %(k)\n urllib.urlretrieve(i, os.path.join(dest_dir, local_name))\n f.write(\"<img src=\"+'\"'+os.path.join(dest_dir, local_name)+'\"'+\">\")\n k+=1\n f.write(\"</body></html>\")\n f.close()\n cmd=\"xdg-open\"+\" \"+'\"'+dest_dir+\"/\"+\"index.html\"+'\"'\n (status, output)=commands.getstatusoutput(cmd)\n sys.exit(1)", "def open(*args, **kwargs):\n return MultiFileTileSource(*args, **kwargs)", "def process_files(self):\n for filename in self.temp_directory.iterdir():\n im = Image.open(str(filename))\n scaled = im.resize((640, 480))\n scaled.save(str(filename))", "def iread(filename, *args, verbose=True, **kwargs):\n\n # determine if file is valid:\n # assert isinstance(filename, str), 'filename must be a string'\n\n\n # TODO read options for image\n # opt = {\n # 'uint8': False,\n # 'single': False,\n # 'double': False,\n # 'grey': False,\n # 'grey_709': False,\n # 'gamma': 'sRGB',\n # 'reduce': 1.0,\n # 'roi': None\n # }\n\n if isinstance(filename, str) and (filename.startswith(\"http://\") or filename.startswith(\"https://\")):\n # reading from a URL\n\n resp = urllib.request.urlopen(filename)\n array = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n image = cv.imdecode(array, -1)\n print(image.shape)\n return (image, filename)\n\n elif isinstance(filename, (str, Path)):\n # reading from a file\n\n path = Path(filename).expanduser()\n\n if any([c in \"?*\" for c in str(path)]):\n # contains wildcard characters, glob it\n # recurse and return a list\n # https://stackoverflow.com/questions/51108256/how-to-take-a-pathname-string-with-wildcards-and-resolve-the-glob-with-pathlib\n \n parts = path.parts[1:] if path.is_absolute() else path.parts\n p = Path(path.root).glob(str(Path(\"\").joinpath(*parts)))\n pathlist = list(p)\n\n if len(pathlist) == 0 and not path.is_absolute():\n # look in the toolbox image folder\n path = Path(__file__).parent / \"images\" / path\n parts = path.parts[1:] if path.is_absolute() else path.parts\n p = Path(path.root).glob(str(Path(\"\").joinpath(*parts)))\n pathlist = list(p)\n \n if len(pathlist) == 0:\n raise ValueError(\"can't expand wildcard\")\n\n imlist = []\n pathlist.sort()\n for p in pathlist:\n imlist.append(iread(p, **kwargs))\n return imlist\n\n else:\n # read single file\n\n if not path.exists():\n if path.is_absolute():\n raise ValueError(f\"file {filename} does not exist\")\n # file doesn't exist\n # see if it matches the supplied images\n path = Path(__file__).parent / \"images\" / path\n\n if not path.exists():\n raise ValueError(f\"file {filename} does not exist, and not found in supplied images\")\n\n # read the image\n # TODO not sure the following will work on Windows\n im = cv.imread(path.as_posix(), **kwargs) # default read-in as BGR\n\n if im is None:\n # TODO check ValueError\n raise ValueError(f\"Could not read {filename}\")\n\n return (im, str(path))\n\n elif islistof(filename, (str, Path)):\n # list of filenames or URLs\n # assume none of these are wildcards, TODO should check\n out = []\n for file in filename:\n out.append(iread(file, *args))\n return out\n else:\n raise ValueError(filename, 'invalid filename')", "def download_pics(pics_links):\n\n for link in range(len(pics_links)):\n r = requests.get(pics_links[link][0])\n with open(os.path.join(\"tmp\", f\"{link}.jpg\"), \"wb\") as dl:\n dl.write(r.content)", "def gray_pic(dossierE, dossierS):\r\n files = os.listdir(dossierE)\r\n for f in files:\r\n if f.endswith('.jpg'):\r\n print(\"1er if (pour .jpg)\")\r\n try:\r\n print(f)\r\n img = cv2.imread(f\"{dossierE}/{f}\")\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n cv2.imwrite(f\"{dossierS}/{f}\", gray)\r\n logger.log(f'gray_pic={f}')\r\n except NameError as e:\r\n print(f\"image inexistante, erreur : {e}\")\r\n logger.log(f\"image inexistante, erreur : {e}\")\r\n elif f.endswith('.jpeg'):\r\n print(\"elif (pour .jpeg)\")\r\n try:\r\n print(f)\r\n img = cv2.imread(f\"{dossierE}/{f}\")\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n cv2.imwrite(f\"{dossierS}/{f}\", gray)\r\n logger.log(f'gray_pic={f}')\r\n except NameError as e:\r\n print(f\"image inexistante, erreur : {e}\")\r\n logger.log(f\"image inexistante, erreur : {e}\")\r\n elif f.endswith('.png'):\r\n print(\"2ème elif (pour .png)\")\r\n try:\r\n print(f)\r\n img = cv2.imread(f\"{dossierE}/{f}\")\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n cv2.imwrite(f\"{dossierS}/{f}\", gray)\r\n logger.log(f'gray_pic={f}')\r\n except NameError as e:\r\n print(f\"image inexistante, erreur : {e}\")\r\n logger.log(f\"image inexistante, erreur : {e}\")\r\n else:\r\n print(\"else Erreur : Le fichier que vous essayez d'ouvrir n'est pas une image.\")\r\n logger.log(\"else Erreur : Le fichier que vous essayez d'ouvrir n'est pas une image.\")\r\n print(f)", "def _open_files(inputs, mode):\n assert isinstance(inputs, list)\n\n local_open = pf.open\n return [local_open(ffile, mode=mode) for ffile in inputs]", "def make_gif():\n if MIGRATION:\n import imageio\n for n, JPG_DIR in enumerate(JPG_DIRS):\n images, image_file_names = [], []\n for file_name in os.listdir(JPG_DIR):\n if file_name.endswith('.jpg'):\n image_file_names.append(file_name) \n sorted_files = sorted(image_file_names, key=lambda y: int(y.split('_')[1]))\n for i in range(len(sorted_files)): \n file_path = os.path.join(JPG_DIR, sorted_files[i])\n images.append(imageio.imread(file_path))\n imageio.mimsave(FNAME.rsplit('.', 1)[0] + '_migration' + str(n) + '.gif', images, 'GIF', loop=1, fps=FPS)", "def loadImages(files, targets):\n images = []\n for file in files:\n targets.append(file)\n images.append(snd.imread(file))\n return images, targets", "def gtiff(location):\n\n for src_asc in os.listdir(location):\n if \".asc\" in src_asc:\n dest_gtif = src_asc[:-4] + '.gtiff'\n command = \"gdal_translate -of GTiff -ot Float32 \" + location +'\\\\' \\\n + src_asc + ' ' + location +'\\\\' + dest_gtif\n os.system(command)", "def open_file(self, event=None):\n file = fd.askopenfile(title=\"Choose file to open\",\n filetypes=[(\"Python(default)\", \"*.py\"), (\"Text\", \"*.txt\"),\n (\"Java\", \"*.java\"), (\"JavaScript\", \"*.js\"),\n (\"HTML\", \"*.html\"), (\"CSS\", \"*.css\"),\n (\"All files\", \"*.*\")])\n if file is None:\n return\n else:\n if imghdr.what(\n file.name): # if file is image return image type otherwise return None if file is not an image type\n from project_explorer import ProjectExplorer\n ProjectExplorer().open_image(file.name)\n else:\n self.add_tab(file=file.name, open_file=1)\n from syntax_highlight import Highlighting\n Highlighting().highlight2()", "def jpg_jump(file_map):\r\n match_list = []\r\n jpg_header = re.compile(b'(?s)(\\xff\\xd8\\xff\\xe0|\\xff\\xd8\\xff\\xe1)')\r\n sof = re.compile(b'(?s)(\\xff\\xc0|\\xff\\xc2)')\r\n sos = re.compile(b'(?s)\\xff\\xda')\r\n jpg_footer = re.compile(b'(?s)\\xff\\xd9')\r\n for match in jpg_header.finditer(file_map):\r\n end_header = match.end()\r\n end_footer = jpg_footer.search(file_map, end_header).end()\r\n start_sof = sof.search(file_map, end_header, end_footer).start()\r\n end_sos_pointer = sos.search(file_map, start_sof, end_footer).end()\r\n number_colors_components = int.from_bytes((file_map[end_sos_pointer+2:end_sos_pointer+3]), byteorder='little')\r\n start_sos_data = end_sos_pointer + 3 + (number_colors_components * 2)\r\n pattern_start_spot = start_sos_data + 5\r\n data = file_map[pattern_start_spot:end_footer]\r\n jump_size = pattern_id(data)\r\n prefix = file_map[start_sof:pattern_start_spot].hex()\r\n unique_bytes = file_map[pattern_start_spot + jump_size: pattern_start_spot + jump_size + 84].hex()\r\n if jump_size == 0:\r\n match_list.append(prefix + unique_bytes)\r\n else:\r\n jump = \" [ {} ] \".format(jump_size)\r\n match_list.append(prefix + jump + unique_bytes)\r\n return match_list", "def merge_images(filenames, outfile, vgap=20):\n images = [Image.open(filename) for filename in filenames]\n\n widths = [image.size[0] for image in images]\n heights = [image.size[1] for image in images]\n\n result_width = max(widths)\n result_height = sum(heights) + len(images) * vgap\n\n result = Image.new('RGB', (result_width, result_height), (255, 255, 255))\n y = 0\n for image in images:\n result.paste(im=image, box=(0, y))\n y += image.size[1] + vgap\n\n\n result.save(outfile)", "def get_file(file_info):\n if session_vars.filepath == file_info['filepath']:\n img_file = session_vars.img_file\n else:\n print('loading', file_info['filepath'])\n if file_info['ext']=='fits':\n print('Detected fits image type')\n pyfits = import_fits()\n img_file = pyfits.open(file_info['filepath'])\n else:\n try:\n from PIL import Image\n except ImportError:\n raise ToyzJobError(\n \"You must have PIL (Python Imaging Library) installed to \"\n \"open files of this type\"\n )\n img_file = Image.open(file_info['filepath'])\n session_vars.filepath = file_info['filepath']\n session_vars.img_file = img_file\n return img_file", "def _launch_file_b(self):\n types = [\n (\"JPG\", \"*.jpg\"),\n (\"Bitmap\", \"*.bmp\"),\n (\"PNG\", \"*.png\"),\n (\"GIF\", \"*.gif\"),\n (\"All files\", \"*\")]\n dialog = tkFileDialog.Open(self, filetypes = types)\n self._file_path = dialog.show()\n\n self._file_name = self._scrub_name(self._file_path)\n self._move_img()\n return self._file_name", "def main(argv):\n args = parse_command_line(argv)\n return convert_chunks_to_jpeg(args.raw_chunks_dir,\n jpeg_quality=args.jpeg_quality,\n slicing_plane=args.slicing_plane) or 0", "def getpreview(bandfiles):\n blue, green, red = scaleandfill(bandfiles)\n for each in [blue, green, red]:\n if not os.path.isfile(each):\n raise FileNotFoundError(\n \"Remote files do not exist or could not be accessed\"\n )\n # unpack the bands in to the three colors red, blue and green\n stackcommand = \"gdal_merge.py -o rgb.tif -separate -co \\\n PHOTOMETRIC=RGB -co COMPRESS=DEFLATE {red} {green} {blue} \\\n --config AWS_REQUEST_PAYER requester\"\n\n subprocess.call(\n stackcommand.format(red=red, green=green, blue=blue), shell=True\n ) # this creates a stacked file 'rgb.tif'\n\n if not os.path.isfile(\"rgb.tif\"):\n raise FileNotFoundError(\"Band stacking has failed\")\n\n stretchmin, stretchmax = getstretchlimits(\"rgb.tif\")\n tcicommand = \"gdal_translate -scale {stretchmin} {stretchmax} 0 255 \\\n -exponent 1 -ot Byte -of JPEG rgb.tif tci.jpg\"\n subprocess.call(\n tcicommand.format(stretchmin=stretchmin, stretchmax=stretchmax), shell=True\n ) # this color stretches the image and writes to jpeg\n\n cleanupcommand = \"rm *tif *TIF *xml\"\n subprocess.call(cleanupcommand, shell=True)\n\n timestring = time.strftime(\"%H_%M_%S\", time.localtime())\n s3filename = \"tcilatest\" + timestring + \".jpg\"\n s3 = boto3.client(\"s3\")\n s3.upload_file(\"tci.jpg\", \"testpushkarbucket\", s3filename)\n\n return s3filename", "def load_images(self,im_paths,imlist,im_index):\n\n\t\timlist_arr = []\n\t\tj = 0\n\t\tfor im_path in im_paths:\n\t\t\tim = None\n\n\t\t\ttry:\n\t\t\t\tim = Image.open(im_path)\n\t\t\t\t#im = imread(im_path)\n\t\t\t\t#print im.shape\n\t\t\texcept Exception, e:\n\t\t\t\tprint e\n\t\t\t\n\t\t\tif im != None:\n\t\t\t\ttry:\n\t\t\t\t\tim_aux = np.array(im,dtype=theano.config.floatX)\n\t\t\t\t\tim_converted = True\n\t\t\t\texcept TypeError, e:\n\t\t\t\t\tim_converted = False\n\t\t\t\t\tprint e\n\t\t\t\t\n\t\t\t\tif im_converted == True:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif im_aux.shape[2] == 4:\n\t\t\t\t\t\t\tbackground = Image.new(\"RGB\", im.size, (255, 255, 255))\n\t\t\t\t\t\t\tbackground.paste(im, mask=im.split()[3]) # 3 is the alpha channel\n\t\t\t\t\t\t\tim = background\n\t\t\t\t\t\t\tim_aux = np.array(background,dtype=theano.config.floatX)\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tprint e\n\t\t\t\t\t\n\t\t\t\t\ttry:\n\n\t\t\t\t\t\tif im_aux.shape[2] == 3:\n\t\t\t\t\t\t\tbn_parsed = os.path.basename(im_path).split(\"_\")\n\t\t\t\t\t\t\tim_id = int(bn_parsed[0])\n\t\t\t\t\t\t\t#print im_id\n\t\t\t\t\t\t\t#Ignore potential duplicates\n\t\t\t\t\t\t\t#if im_id not in self.im_index:\n\t\t\t\t\t\t\tif im_id not in im_index:\n\t\t\t\t\t\t\t\tim_aux = self.scale_and_crop_img(im)\n\t\t\t\t\t\t\t\t# This is for multiprocessing\n\t\t\t\t\t\t\t\tim_index.append(im_id)\n\t\t\t\t\t\t\t\timlist.append(np.asarray(im_aux))\n\n\t\t\t\t\t\t\t\t# Uncomment this if you are not using multiprocessing\n\t\t\t\t\t\t\t\t# self.im_index.append(im_id)\n\t\t\t\t\t\t\t\t# self.imlist.append(np.asarray(im_aux))\n\t\t\t\t\t\t\t\t#self.imlist.append(im_aux)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint \"invalid image: {} size:{}\".format(im.filename, im_aux.shape)\n\t\t\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t#raise e\n\t\t\t\t\t\tprint e\n\t\n\t\t\t# if self.verbose:\n\t\t\t# \tsys.stdout.write(\"\\r Process: {0}/{1}\".format(j, len(im_paths)))\n\t\t\t# \tsys.stdout.flush()\n\n\t\t\tj += 1", "def import_photos(site):\n image_bank = site['institucional']['fotos']\n image_names = ['plenario-camara.jpg', 'plenario-senado.jpg', 'congresso-nacional.jpg']\n # look inside \"static\" folder and import some files\n path = os.path.dirname(os.path.abspath(__file__)) + '/browser/static/'\n logger.info(u'Importando imagens')\n for name in image_names:\n with open(path + name) as f:\n image = StringIO(f.read())\n img_name = name.split('.')[0]\n title = img_name.replace('-', ' ').title()\n api.content.create(\n image_bank,\n type = 'Image',\n id = name,\n title = title,\n description = u'Foto de demonstração no tamanho 3x2. (esta imagem é um conteúdo de exemplo e pode ser removida)',\n image = image,\n creators = CREATORS,\n )\n logger.debug(u' {0} importada'.format(name))", "def test_pil_file_deinterlace(self):\n path = PILBackend().deinterlace(self.IMG_225x225)\n from PIL import Image\n with Image.open(path) as img:\n self.assertFalse('progression' in img.info)", "def save_images(images, save_dir, image_type):\n for image in images:\n raw_img = urllib2.urlopen(image).read()\n count = len([i for i in os.listdir(save_dir) if image_type in i]) + 1\n f = open(save_dir + '/' + image_type + '_' + str(count), 'wb')\n f.write(raw_img)\n f.close()", "def open_file(file_name):\n pass", "def _open_files(path, filenames, barcode, queue):\n if not exists(path):\n mkdir(path)\n\n handles = []\n\n for filename in filenames:\n base, ext = basename(filename).split('.', True)\n handles.append(\n Handle('{}/{}_{}.{}'.format(path, base, barcode, ext), queue,\n f_open=_type_handler[ext.split('.')[-1]]))\n\n return handles", "def jpegrescan(ext_args, root=None):\n args = copy.copy(_JPEGRESCAN_ARGS)\n if Settings.jpegrescan_multithread:\n args += ['-t']\n if Settings.destroy_metadata:\n args += ['-s']\n args += [ext_args.old_filename, ext_args.new_filename]\n if test:\n print(\"jpegrescan: \", args)\n return 0\n\n extern.run_ext(args, root)\n return _JPEG_FORMAT", "def copy_images(gallery_path, image_list):\n for image_link in image_list:", "def show_file(file_location):\n img = Image.open(file_location)\n img.show()", "def downloadMinio(url_list,list_d):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n name = \"-\".join(parser_arguments().classes)\n name = name.lower()\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n\n\n if r.status_code == 200:\n r.raw.decode_content = True\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n metadata = list_d[i]\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n path = os.getcwd()+'/'+filename # image path\n minioClient.fput_object(name,filename,path,'image/jpg',metadata)\n os.remove(filename)\n print(filename,'have been successfuly uploaded')\n print('Done!')", "def read_images_jpg(folder, hessian_threshold: int = 2500):\n distinct_frames = DistinctFrames()\n detector = cv2.xfeatures2d_SURF.create(hessian_threshold)\n\n for file in sorted(sorted(os.listdir(folder)), key=len): # sorting files on basis of\n # 1) length and 2) numerical order\n \"\"\"\n Sorting is done 2 times because\n if files in the folder are\n 1. image100.jpg\n 2. image22.jpg\n 3. image21.jpg\n firstly sort them to image100.jpg,image21.jpg,image22.jpg then according to length to \n image21.jpg,image22.jpg,image100.jpg\n \"\"\"\n try:\n grey = cv2.imread(folder + \"/\" + file, 0)\n time_stamp = int(file.replace('image', '').replace('.jpg', ''), 10)\n keypoints, descriptors = detector.detectAndCompute(grey, None)\n img_obj = ImgObj(len(keypoints), descriptors, time_stamp, serialize_keypoints(keypoints))\n distinct_frames.add_img_obj(img_obj)\n print(\"Reading image ..\" + str(time_stamp) + \" from \" + folder) # for dev phase\n except:\n continue\n\n return distinct_frames", "def gen_opener(filenames):\n for filename in filenames:\n if str(filename).endswith('.gz'):\n f = gzip.open(filename, 'rt')\n elif str(filename).endswith('.bz2'):\n f = bz2.open(filename, 'rt')\n else:\n f = open(filename, 'rt')\n yield f\n f.close()" ]
[ "0.58855605", "0.5591409", "0.55525833", "0.5439224", "0.5414354", "0.5400621", "0.5384213", "0.53814346", "0.5351015", "0.5304343", "0.52945495", "0.5280849", "0.52760863", "0.52742296", "0.527252", "0.5259538", "0.5245993", "0.52299297", "0.5225909", "0.52070266", "0.51910776", "0.5189418", "0.51831883", "0.5182659", "0.5180952", "0.51739705", "0.51703113", "0.51187783", "0.51177955", "0.50918704", "0.5091103", "0.50903493", "0.5072777", "0.50705147", "0.5049475", "0.5044932", "0.50445896", "0.5032239", "0.5022566", "0.50217044", "0.4984293", "0.49747145", "0.49584407", "0.49548596", "0.49494082", "0.49494082", "0.49439976", "0.4934215", "0.49332893", "0.49207762", "0.49087632", "0.49081457", "0.48998404", "0.48974076", "0.48960698", "0.48935375", "0.48923162", "0.4890961", "0.4889606", "0.48860195", "0.48850352", "0.4874324", "0.4872825", "0.48674032", "0.48621264", "0.48610675", "0.486009", "0.4855507", "0.4854011", "0.4848597", "0.484801", "0.48452827", "0.48434728", "0.48429677", "0.48399127", "0.48320776", "0.48240432", "0.4822219", "0.48216963", "0.4819123", "0.48177943", "0.48138186", "0.48035985", "0.47977883", "0.47977382", "0.4797018", "0.4795999", "0.47926998", "0.479002", "0.47866985", "0.47814414", "0.47781196", "0.4774485", "0.47710246", "0.4762509", "0.47585782", "0.4757836", "0.47572497", "0.47564676", "0.47550076" ]
0.60279435
0
Scroll canvas horizontally and redraw the image
def __scroll_x(self, *args, **kwargs): self.canvas_image.xview(*args) # scroll horizontally self.__show_image() # redraw the image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __scroll_x(self, *args, **kwargs):\n self.canvas.xview(*args) # scroll horizontally\n self.__show_image() # redraw the image", "def continuous_scroll(self, context):\n\n self.drawing.redraw_canvas(self.dy)\n \n return True", "def refresh(self):\n\n # Delete old image (if needed) \n if self.canvas_image_id:\n self.canvas.delete(self.canvas_image_id)\n if debug > 5:\n print \"refresh: New image (x\", self.zoom, \") \", (self.xint, self.yint), (self.canvas[\"width\"], self.canvas[\"height\"]), [self.zoom * s for s in self.isize]\n\n scaled_isize = [self.xint[1] - self.xint[0],\n self.yint[1] - self.yint[0]]\n\n # Create the image for the canvas\n self.image = self.generator_func(self.zoom, self.xint, self.yint)\n self.canvas_image_id = self.canvas.create_image(0, 0, anchor=N+W,\n image=self.image)\n\n # Figure out where scroll bars should be and put them there.\n if self.xint[0] == 0 and int(self.isize[0] * self.zoom) == self.xint[1]:\n self.hscroll.grid_remove()\n else:\n self.hscroll.grid()\n self.hscroll.set(mapped_number(self.xint[0],\n (0, self.isize[0] * self.zoom -1),\n (0, 1)),\n mapped_number(self.xint[1] -1,\n (0, self.isize[0] * self.zoom -1),\n (0, 1)))\n if self.yint[0] == 0 and int(self.isize[1] * self.zoom) == self.yint[1]:\n self.vscroll.grid_remove()\n else:\n self.vscroll.grid()\n self.vscroll.set(mapped_number(self.yint[0],\n (0, self.isize[1] * self.zoom -1),\n (0, 1)),\n mapped_number(self.yint[1] -1,\n (0, self.isize[1] * self.zoom -1),\n (0, 1)))", "def scrollDown_x(self):\r\n if self.x_stack<self.img.shape[2]-1:\r\n self.x_stack+=1\r\n \r\n self.pixmap5=self.drawPixmap(\"yz\")\r\n self.lbl5.setPixmap(self.pixmap5)\r\n self.pixmap6= self.writeEdge(\"yz\")\r\n self.lbl6.setPixmap(self.pixmap6)\r\n self.x_stack_lbl.setText(str(self.x_stack+1) + '/' + str(self.img.shape[2]))", "def redraw_canvas(self, dy):\n self.scroll = dy/20\n \n if self.scroll > 0:\n \n if self.min_cairo < -20:\n self.min_cairo = 0 \n self.min_text += 1 \n self.max_text += 1\n \n #When bottom of document is reached stop scrolling\n if self.max_text > self.line_count + 2:\n self.min_cairo = 0\n self.min_text = self.line_count - 50\n self.max_text = self.line_count\n self.scroll = 0\n \n elif self.scroll < 0:\n if self.min_cairo > 0:\n self.min_cairo = -20\n self.min_text -= 1\n self.max_text -=1\n\n #Do not scroll up if already at top of document\n if self.min_text < 0:\n self.min_cairo = 20\n self.min_text = 0\n self.max_text = 50\n self.scroll = 0\n \n #Do the scrolling\n self.min_cairo -= self.scroll\n \n self.max_cairo = self.min_cairo\n self.invalidate_canvas()", "def scrollUp_x(self):\r\n if self.x_stack>0:\r\n self.x_stack-=1\r\n \r\n self.pixmap5=self.drawPixmap(\"yz\")\r\n self.lbl5.setPixmap(self.pixmap5) \r\n self.pixmap6= self.writeEdge(\"yz\")\r\n self.lbl6.setPixmap(self.pixmap6)\r\n self.x_stack_lbl.setText(str(self.x_stack+1) + '/' + str(self.img.shape[2]))", "def scrollDown(self):\r\n\r\n if self.z_stack<self.img.shape[0]-1:\r\n self.z_stack+=1\r\n \r\n #self.pixmap=QtGui.QPixmap.fromImage(ImageQt.ImageQt(misc.toimage(self.img[self.z_stack]))).scaled(500,500)\r\n self.pixmap= self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n self.pixmap2= self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def __scroll_y(self, *args, **kwargs):\n self.canvas_image.yview(*args) # scroll vertically\n self.__show_image() # redraw the image", "def scroll(self, delta_x, delta_y):\n if delta_x < 0:\n shift_x = 0\n xend = self.width + delta_x\n dt_x = 1\n else:\n shift_x = self.width - 1\n xend = delta_x - 1\n dt_x = -1\n if delta_y < 0:\n y = 0\n yend = self.height + delta_y\n dt_y = 1\n else:\n y = self.height - 1\n yend = delta_y - 1\n dt_y = -1\n while y != yend:\n x = shift_x\n while x != xend:\n self.format.set_pixel(\n self, x, y, self.format.get_pixel(self, x - delta_x, y - delta_y)\n )\n x += dt_x\n y += dt_y", "def __scroll_y(self, *args, **kwargs):\n self.canvas.yview(*args) # scroll vertically\n self.__show_image() # redraw the image", "def draw(self, thing):\n thing.draw(self, Point([2,2]), flip=1)\n\n # configure the scroll region\n bbox = Canvas.bbox(self.canvas, ALL)\n self.canvas.configure(scrollregion=bbox)", "def __show_image(self):\n box_image = self.canvas_image.coords(self.container) # get image area\n box_canvas = (self.canvas_image.canvasx(0), # get visible area of the canvas\n self.canvas_image.canvasy(0),\n self.canvas_image.canvasx(self.canvas_image.winfo_width()),\n self.canvas_image.canvasy(self.canvas_image.winfo_height()))\n self.box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly\n \n # Get scroll region box\n box_scroll = [min(self.box_img_int[0], box_canvas[0]), min(self.box_img_int[1], box_canvas[1]),\n max(self.box_img_int[2], box_canvas[2]), max(self.box_img_int[3], box_canvas[3])]\n # Horizontal part of the image is in the visible area\n if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:\n box_scroll[0] = self.box_img_int[0]\n box_scroll[2] = self.box_img_int[2]\n # Vertical part of the image is in the visible area\n if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:\n box_scroll[1] = self.box_img_int[1]\n box_scroll[3] = self.box_img_int[3]\n # Convert scroll region to tuple and to integer\n self.canvas_image.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region\n x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile\n y1 = max(box_canvas[1] - box_image[1], 0)\n x2 = min(box_canvas[2], box_image[2]) - box_image[0]\n y2 = min(box_canvas[3], box_image[3]) - box_image[1]\n if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area\n image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid\n (int(x1 / self.__scale), int(y1 / self.__scale),\n int(x2 / self.__scale), int(y2 / self.__scale)))\n #\n imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter))\n self.imageid = self.canvas_image.create_image(max(box_canvas[0], self.box_img_int[0]),\n max(box_canvas[1], self.box_img_int[1]),\n anchor='nw', image=imagetk)\n self.canvas_image.lower(self.imageid) # set image into background\n self.canvas_image.imagetk = imagetk # keep an extra reference to prevent garbage-collection", "def __show_image(self):\n box_image = self.canvas.coords(self.container) # get image area\n box_canvas = (self.canvas.canvasx(0), # get visible area of the canvas\n self.canvas.canvasy(0),\n self.canvas.canvasx(self.canvas.winfo_width()),\n self.canvas.canvasy(self.canvas.winfo_height()))\n box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly\n # Get scroll region box\n box_scroll = [min(box_img_int[0], box_canvas[0]), min(box_img_int[1], box_canvas[1]),\n max(box_img_int[2], box_canvas[2]), max(box_img_int[3], box_canvas[3])]\n # Horizontal part of the image is in the visible area\n if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:\n box_scroll[0] = box_img_int[0]\n box_scroll[2] = box_img_int[2]\n # Vertical part of the image is in the visible area\n if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:\n box_scroll[1] = box_img_int[1]\n box_scroll[3] = box_img_int[3]\n # Convert scroll region to tuple and to integer\n self.canvas.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region\n x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile\n y1 = max(box_canvas[1] - box_image[1], 0)\n x2 = min(box_canvas[2], box_image[2]) - box_image[0]\n y2 = min(box_canvas[3], box_image[3]) - box_image[1]\n if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area\n if self.__huge and self.__curr_img < 0: # show huge image\n h = int((y2 - y1) / self.imscale) # height of the tile band\n self.__tile[1][3] = h # set the tile band height\n self.__tile[2] = self.__offset + self.imwidth * int(y1 / self.imscale) * 3\n self.__image.close()\n self.__image = Image.open(self.path) # reopen / reset image\n self.__image.size = (self.imwidth, h) # set size of the tile band\n self.__image.tile = [self.__tile]\n image = self.__image.crop((int(x1 / self.imscale), 0, int(x2 / self.imscale), h))\n else: # show normal image\n image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid\n (int(x1 / self.__scale), int(y1 / self.__scale),\n int(x2 / self.__scale), int(y2 / self.__scale)))\n #\n imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter))\n imageid = self.canvas.create_image(max(box_canvas[0], box_img_int[0]),\n max(box_canvas[1], box_img_int[1]),\n anchor='nw', image=imagetk)\n self.canvas.lower(imageid) # set image into background\n if self.last_image_id != 0:\n self.canvas.delete(self.last_image_id)\n\n self.last_image_id = imageid\n self.canvas.imagetk = imagetk # keep an extra reference to prevent garbage-collection", "def adjustScrolls(self):\n cwidth = self._canvas.winfo_width()\n cheight = self._canvas.winfo_height()\n self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth)\n self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight)\n if cwidth < self.canvwidth or cheight < self.canvheight:\n self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,\n column=0, rowspan=1, columnspan=1, sticky='news')\n self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,\n column=1, rowspan=1, columnspan=1, sticky='news')\n else:\n self.hscroll.grid_forget()\n self.vscroll.grid_forget()", "def scrollDown_y(self):\r\n if self.y_stack<self.img.shape[1]-1:\r\n self.y_stack+=1\r\n self.pixmap3=self.drawPixmap(\"xz\") \r\n self.lbl3.setPixmap(self.pixmap3)\r\n self.pixmap4= self.writeEdge(\"xz\")\r\n self.lbl4.setPixmap(self.pixmap4)\r\n self.y_stack_lbl.setText(str(self.y_stack+1) + '/' + str(self.img.shape[1]))", "def draw(self, canvas):\n canvas.delete(\"all\")\n width = canvas.winfo_reqwidth()\n height = canvas.winfo_reqheight()\n\n image = ImageTk.PhotoImage(self.image())\n canvas.create_image(width/2, height/2, image=image)\n canvas.img = image", "def scrollUp(self):\r\n if self.z_stack>0:\r\n self.z_stack-=1\r\n self.pixmap=self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n self.pixmap2=self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def draw_canvas(self):\n\n self.canvas = Canvas(self)\n self.scrollbar = ttk.Scrollbar(self, orient= VERTICAL,\n command=self.canvas.yview) \n self.canvas.configure(yscrollcommand=self.scrollbar.set)\n \n # make sure to add scrollbar before adding the canvas\n self.scrollbar.pack(side=RIGHT, fill=Y)\n self.canvas.pack(side=TOP, fill=BOTH, expand=1, padx=20, pady=20)\n \n # adding a frame to hold all the widgets, ttk Frame doesn't support\n # background config option \n self.frame = Frame(self.canvas) \n self.canvas.create_window(0,0,window=self.frame, anchor='nw')", "def draw(self, img):\n self._erase_last_line(self.img)\n\n idxs = np.argwhere(img[:, self._pos] == 0)\n self.prev_y = (idxs.min(), idxs.max())\n\n cv.line(img, (self._pos, 0), (self._pos, self.h), (0, 0, 0), 1)", "def __reconfig__(self, event):\r\n x, y = event.width//2, event.height//2\r\n self.canvas.config(scrollregion=(-x, -y, x, y))", "def linux_zoomer_plus(self, event):\n self.canvas.scale(\"all\", event.x, event.y, 1.1, 1.1)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def mover_scroll(self, x, y):\n self.scrollx += x\n self.scrolly += y", "def on_mouse_wheel(self,event,canvas):\n canvas.yview(\"scroll\",-1*event.delta/100,\"units\")", "def scrollUp_y(self):\r\n if self.y_stack>0:\r\n self.y_stack-=1\r\n \r\n self.pixmap3=self.drawPixmap(\"xz\")\r\n self.lbl3.setPixmap(self.pixmap3)\r\n self.pixmap4= self.writeEdge(\"xz\")\r\n self.lbl4.setPixmap(self.pixmap4)\r\n self.y_stack_lbl.setText(str(self.y_stack+1) + '/' + str(self.img.shape[1]))", "def move_horizontal(self):\r\n if self.movement == \"horizontal\" and self.flag_move:\r\n self.move_ball()\r\n self.canvas.after(50, self.move_horizontal)", "def _on_mousewheel(event):\n if event.num == 4 or event.delta > 0:\n canvas.yview_scroll(-1, \"units\" )\n elif event.num == 5 or event.delta < 0:\n canvas.yview_scroll(1, \"units\" )", "def horizontal_scroll(self, image, padding=True):\n\n image_list = list()\n height = image.size[1]\n\n # Scroll into the blank image.\n if padding:\n for y in range(1,17):\n section = image.crop((0, 0, 8, y))\n print section.width,section.height\n display_section = self.create_blank_image()\n display_section.paste(section, (0, 16 - y, 8, 16))\n image_list.append(display_section)\n return image_list\n\n #Scroll across the input image.\n for y in range(16, height + 1):\n section = image.crop((0, y - 16, 8, y))\n display_section = self.create_blank_image()\n display_section.paste(section, (0, 0, 8, 16))\n image_list.append(display_section)\n\n #Scroll out, leaving the blank image.\n if padding:\n for y in range(height - 15, height + 1):\n section = image.crop((0, y, 8, height))\n display_section = self.create_blank_image()\n display_section.paste(section, (0, 0, 8, 15 - (y - (height - 15))))\n image_list.append(display_section)\n\n #Return the list of images created\n return image_list", "def __wheel(self, event):\n x = self.canvas_image.canvasx(event.x) # get coordinates of the event on the canvas\n y = self.canvas_image.canvasy(event.y)\n if self.outside(x, y): return # zoom only inside image area\n scale = 1.0\n # Respond to Linux (event.num) or Windows (event.delta) wheel event\n if event.num == 5 or event.delta == -120: # scroll down, smaller\n if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels\n self.imscale /= self.__delta\n scale /= self.__delta\n if event.num == 4 or event.delta == 120: # scroll up, bigger\n i = min(self.canvas_image.winfo_width(), self.canvas_image.winfo_height()) >> 1\n if i < self.imscale: return # 1 pixel is bigger than the visible area\n self.imscale *= self.__delta\n scale *= self.__delta\n # Take appropriate image from the pyramid\n k = self.imscale * self.__ratio # temporary coefficient\n self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1)\n self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img))\n #\n self.canvas_image.scale('all', x, y, scale, scale) # rescale all objects\n # Redraw some figures before showing image on the screen\n self.redraw_figures() # method for child classes\n self.__show_image()", "def updatescroll(self):\n if self.node:\n #self.update_idletasks() # Required, else dimension of content may not have been computed ?\n forgetit, forgetit, x1, forgetit = self.bbox(ALL)\n self.sizetree = self.node.sizetree() + (self.winfo_height() / self.nodeheight) - 1\n self.configure(scrollregion = (0, 0, x1, self.sizetree * self.nodeheight))", "def scroll(self):\n x_position = self._player.get_position()[0]\n half_screen = self._master.winfo_width() / 2\n world_size = self._world.get_pixel_size()[0] - half_screen\n\n # Left side\n if x_position <= half_screen:\n self._view.set_offset((0, 0))\n\n # Between left and right sides\n elif half_screen <= x_position <= world_size:\n self._view.set_offset((half_screen - x_position, 0))\n\n # Right side\n elif x_position >= world_size:\n self._view.set_offset((half_screen - world_size, 0))", "def move(self):\n if self.x_pos < const.screenwidth:\n self.x_pos += 1\n self.x_pos = self.x_pos\n\n self.draw()\n return", "def __wheel(self, event):\n x = self.canvas.canvasx(event.x) # get coordinates of the event on the canvas\n y = self.canvas.canvasy(event.y)\n if self.outside(x, y): return # zoom only inside image area\n scale = 1.0\n # Respond to Linux (event.num) or Windows (event.delta) wheel event\n if event.num == 5 or event.delta == -120: # scroll down, smaller\n if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels\n self.imscale /= self.__delta\n scale /= self.__delta\n if event.num == 4 or event.delta == 120: # scroll up, bigger\n i = min(self.canvas.winfo_width(), self.canvas.winfo_height()) >> 1\n if i < self.imscale: return # 1 pixel is bigger than the visible area\n self.imscale *= self.__delta\n scale *= self.__delta\n # Take appropriate image from the pyramid\n k = self.imscale * self.__ratio # temporary coefficient\n self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1)\n self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img))\n #\n self.canvas.scale('all', x, y, scale, scale) # rescale all objects\n # Redraw some figures before showing image on the screen\n self.redraw_figures() # method for child classes\n self.__show_image()", "def reDraw(self):\n self.canvasIGetDrawnOn.delete(self.spriteOnCanvas)\n self.spriteImage = ImageTk.PhotoImage(self.spriteImageFile.rotate(self.faceHeading, expand=True))\n self.spriteOnCanvas=self.canvasIGetDrawnOn.create_image(self.xPos,self.yPos,image=self.spriteImage)", "def move_start(self, event):\n self.canvas.scan_mark(event.x, event.y)", "def down():\n global y, canvas # y é modificado\n canvas.create_line(x, y, x, y + 10)\n y += 10", "def redraw(self):\n offset = self.actual_row * self.row_size\n x = 5\n y = 5\n ind = 0\n self.scene.clear()\n for _ in range(self.column_size):\n for _ in range(self.row_size):\n if ind+offset < len(self.cards):\n self.draw_card(x, y, ind+offset)\n x += 90\n ind += 1\n x = 5\n y += 120", "def scroll_window(self):\r\n window = tkinter.Frame(self.root)\r\n scroller = tkinter.Scrollbar(self.root, orient=\"vertical\",\r\n command=self.canvas.yview)\r\n self.canvas.configure(yscrollcommand=scroller.set)\r\n\r\n scroller.pack(side=\"right\", fill=\"y\")\r\n self.canvas.pack(side=\"left\", fill=\"both\", expand=True)\r\n self.canvas.create_window((4, 4), window=window, anchor=\"nw\",\r\n tags=\"self.window\")\r\n return window", "def redraw(self):\n self.vispy_viewer.canvas.update()", "def _drawOnCanvas(self):\n self.canvas=np.ones(self.canvas.shape,dtype=np.uint8)*255\n for key in self.elements:\n graphElement=self.elements[key]\n graphElement.draw(self.canvas)\n self.sync=True", "def update_pos(self):\n self.imgx=self.pathX[min(self.x,len(self.pathX)-1)]\\\n [min(self.y,len(self.pathX[self.x])-1)]\n self.imgy=self.pathY[min(self.x,len(self.pathY)-1)]\\\n [min(self.y,len(self.pathY[self.x])-1)]", "def refresh(self):\n\n self.ax.relim()\n self.ax.autoscale_view()\n self.canvas.draw()", "def redraw(self) -> None:\n self.canvas.draw_idle()\n self.Refresh()", "def NextFrame(self, event):\n buffer = self.GetDataBuffer()\n if buffer is not None:\n # Update bitmap widget with new image frame:\n self.bitmap.CopyFromBuffer(buffer)\n # Refresh panel to draw image into bitmap:\n self.Refresh()\n pass", "def _run(self):\n self._is_running = False\n self.start()\n self._pos += 1\n self.draw(self.img)", "def update(self):\n\n self.x += self.dx\n self.y += self.dy\n\n # draw image\n if self.visible:\n self.scene.screen.blit(self.image, (self.x, self.y))\n self.check_bounds()\n self.check_collisions()\n self.check_keys()", "def _on_scroll(self, event):\n self._zoom(event.step, draw=True)", "def xview_scroll(self, number, what):\n self.tk.call(self._w, 'xview', 'scroll', number, what)", "def on_mousewheel(self, event):\r\n self.container_widgets[\"order_canvas\"].yview_scroll(-1 * int(event.delta / 120), \"units\")\r\n # TODO FIX SCROLLING\r", "def draw_image(self):\n self.PDF.saveState()\n self.PDF.scale(1, -1)\n # self.PDF.drawImage(\n # LOGO, 490, -78, width=80, preserveAspectRatio=True, mask=\"auto\"\n # )\n self.PDF.restoreState()", "def windows_zoomer(self, event):\n if event.delta > 0:\n self.canvas.scale(\"all\", event.x, event.y, 1.1, 1.1)\n elif event.delta < 0:\n self.canvas.scale(\"all\", event.x, event.y, 0.9, 0.9)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def on_canvas_resize(self, event) -> None:\r\n\r\n self.painter.adjust_to_canvas()\r\n self.painter.draw_board()", "def __window_scroll(self, x, y):\n pass", "def scroll(self, direction):\n\n self.counter += direction # Counter of 'up' and 'down'\n do_redraw = self.counter == self.content_size - self.h\n\n if self.size > 0:\n self.count += direction\n pos = self.pos\n if math.fabs(self.count) == math.floor(self.content_size / self.h):\n pos += direction\n self.count = 0\n\n pos = max(0, pos) # Top limit\n pos = min(pos, self.h - self.size) # Bottom limit\n do_redraw = pos != self.pos # Redraw if pos has changed\n self.pos = pos\n\n if do_redraw:\n self._create()", "def draw_lines(self):\n # draw x lines\n y = self.step_y\n while y <= self.height:\n x = 0\n while x <= self.width:\n self.canvas.create_line(x, y, x+3.5, y)\n self.canvas.update()\n x += 3.5\n y += self.step_y\n \n # draw y lines\n x = self.step_x\n while x <= self.width:\n y = 0\n while y <= self.height:\n self.canvas.create_line(x, y, x, y+3.5)\n self.canvas.update()\n y += 3.5\n x += self.step_x\n \n self.is_operating = False", "def draw(self):\n self.write_image()\n self.update()", "def on_configure(self, event):\n self.testCanvas.configure(scrollregion=self.testCanvas.bbox('all'))\n self.testCanvas.yview_moveto(1)", "def refresh_canvas(self):\n self.canvas.delete('all')\n self.draw_handler(self)\n self.canvas.after(CANVAS[\"REFRESH_TIME\"], self.refresh_canvas)", "def update(self, *args):\n\n # change picture every 100 milliseconds\n now = pygame.time.get_ticks()\n if now - self.last_update > 100:\n self.index = self.index ^ 1\n self.image = self.images[self.index]\n self.last_update = now\n prom = self.rect.center\n self.rect = self.image.get_rect()\n self.rect.center = prom\n\n self.rect.x -= args[0]", "def draw(self):\n\n super().draw()\n \n self.dim = self.getdim()\n start_x, start_y, = self.x(), self.y()\n\n for y in range(self.r):\n for x in range(self.c):\n x_pos, y_pos = start_x + (self.dim * x), start_y + (self.dim * y)\n self.tiles[y][x].resize(x_pos, y_pos, self.dim, self.dim)", "def on_draw_over_image(self):", "def start_refresh(self, widget, context):\n\n self.source_id = gobject.timeout_add(38, self.continuous_scroll, context)", "def redraw(self):\n self.vispy_widget.canvas.update()", "def update_canvas_display_image_from_full_image(self):\n\n full_image_rect = (0, 0, self.image_reader.full_image_ny, self.image_reader.full_image_nx)\n self.update_canvas_display_image_from_full_image_rect(full_image_rect)", "def update(self):\n if (self.j + self.step >= self.image.shape[0]) and (self.i + self.step >= self.image.shape[1]):\n self.no_more_crops = True\n elif self.i + self.step >= self.image.shape[1]:\n self.i = 0\n self.j += self.step\n else:\n self.i += self.step", "def scrollUp(self):\n if self.__firstShownLine > 0:\n self.__firstShownLine -= 1\n self.__refreshContent()\n else:\n curses.beep()", "def makeImage(self):\n\n for row in range(self.height):\n self.makeRow(row)\n self.window.update() # display a row of pixels", "def onscroll(self, event):\n if self.out_graph is False:\n self.zoom += 10*event.step\n\n if self.zoom >= self.axe_X/2/self.FOV_img*self.FOV_img_Y:\n self.zoom = self.axe_X/2/self.FOV_img*self.FOV_img_Y\n\n if self.zoom <= 0:\n self.zoom = 0\n\n self.draw()", "def up():\n global y, canvas # y é modificado\n canvas.create_line(x, y, x, y - 10)\n y -= 10", "def start_scroll():\n send_command(0x2F)", "def StartDraw(self):\r\n self.zoom = self.test.viewZoom\r\n self.center = self.test.viewCenter\r\n self.offset = self.test.viewOffset\r\n self.screenSize = self.test.screenSize", "def update_current_image(self):\n\n rect = (0, 0, self.variables.canvas_width, self.variables.canvas_height)\n if self.variables.canvas_image_object is not None:\n self.variables.canvas_image_object.update_canvas_display_image_from_canvas_rect(rect)\n self.set_image_from_numpy_array(self.variables.canvas_image_object.display_image)\n self.update()", "def draw_image_on_canvas(self, force_generation=False):\n\n self.canvas_vertex = (self.canvas.canvasx(0), self.canvas.canvasy(0))\n box_coords = (self.canvas_vertex[0], self.canvas_vertex[1],\n self.canvas_vertex[0] + self.frame.width, self.canvas_vertex[1] + self.frame.height)\n\n # some weird bug with canvas being 0 when scrolling back to origin\n if box_coords[0] == -1:\n box_coords = (box_coords[0] + 1, box_coords[1], box_coords[2] + 1, box_coords[3])\n\n if box_coords[1] == -1:\n box_coords = (box_coords[0], box_coords[1] + 1, box_coords[2], box_coords[3] + 1)\n\n self.box_coords = box_coords\n\n image, self.top_left = self.get_image(box_coords, force_generation=force_generation)\n\n if image is not None:\n self.canvas.delete(\"all\")\n\n # this ownership is necessary, or the image does not show up on the canvas\n self.image = ImageTk.PhotoImage(image=image)\n\n self.image_on_canvas = self.canvas.create_image(\n self.top_left[0], self.top_left[1], image=self.image, anchor=\"nw\")", "def update_image(self, data):\n self.image_data.append(data)\n if len(self.image_data) > self.image_height:\n self.image_data = self.image_data[1:]\n\n self.image_render += 1\n\n # A 200 pixel tall image squashed into the render view does not\n # appear unpleasantely jumpy when scrolled by 5\n if self.image_render % 5 != 0 and self.image_render != 1:\n return\n\n img_data = range(len(self.image_data))\n\n position = 0\n while position < len(img_data):\n img_data[position] = self.image_data[position]\n position += 1\n\n new_data = numpy.array(img_data).astype(float)\n\n mid = self.main_image_dialog\n mid.image.set_data(new_data)\n\n # If you do autoscale here, it tends to jump around in appearing\n # to stretch to the window and be in 'normal' size\n mid.get_plot().replot()", "def scroll(*args):", "def draw(self, offset: IntegerPosition2D, canvas: Canvas) -> None:\n canvas_position: IntegerPosition2D\n if not self.buffer:\n canvas_position = IntegerPosition2D(0, 0)\n else:\n row: int = self.buffer.get_row(self.index)\n column: int = self.buffer.get_column(self.index)\n canvas_position = IntegerPosition2D(column, row)\n canvas_position += offset\n\n if not 0 <= canvas_position.x < canvas.size.width:\n return\n\n if not 0 <= canvas_position.y < canvas.size.height:\n return\n\n canvas.invert(canvas_position)", "def draw(self):\n self.figure.canvas.draw_idle()", "def draw(self):\n for section in self.sections:\n canvas_reset(self.canvas)\n section.draw(self.canvas)", "def display(self, canvas, x, y, width, height):\n pass", "def draw(self, x, y):\r\n for w in self.widgets:\r\n if w.visible:\r\n w.draw()\r\n self.pointer.position(x + self.p_dx, y + self.p_dy, 0.5)\r\n self.pointer.draw()", "def __init__(self, myCanvas, color, paddleW, paddleH, yAxisPos):\n self.canvas = myCanvas\n self.id = myCanvas.create_rectangle(0, 0, paddleW, paddleH, fill=color)\n\n # Getting height and width of current window\n self.canvas_width = self.canvas.winfo_width()\n self.canvas_height = self.canvas.winfo_height()\n\n # Horizontal Scroll\n self.x = 0\n\n # Centering from width and setting height as per yAxisPos\n self.canvas.move(self.id,\n (self.canvas_width//2) - paddleW // 2,\n ((int(self.canvas_height * yAxisPos)) - (paddleH//2)))\n\n # Binding Arrow Keys\n self.canvas.bind_all('<KeyPress-Left>', self.turn_left)\n self.canvas.bind_all('<KeyPress-Right>', self.turn_right)", "def draw(self):\n self.screen.blit(self.image, (self.x_pos1, self.y_pos))\n self.screen.blit(self.image, (self.x_pos2, self.y_pos))", "def scroll(self, axis, value):\n\n\t\tself._interface.scroll(axis, value)", "def update_drawer_img(self):\n self.drawer = aggdraw.Draw(self.img)\n self.drawer.settransform(self.coordspace_transform)", "def __init__(self,master,**kw):\n Frame.__init__(self,master,**kw)\n \n self.canvas=Canvas(self,scrollregion=(0,0,500,500))#,width=300,height=300,scrollregion=(0,0,500,500))\n self.internal_frame=Frame(self.canvas)\n self.hbar=Scrollbar(self,orient=HORIZONTAL)\n self.vbar=Scrollbar(self,orient=VERTICAL)\n\n interior_id=self.canvas.create_window((0,0),window=self.internal_frame,anchor=\"nw\")\n\n \n self.hbar.pack(side=BOTTOM,fill=X)\n self.hbar.config(command=self.canvas.xview)\n \n \n self.vbar.pack(side=RIGHT,fill=Y)\n self.vbar.config(command=self.canvas.yview)\n \n## self.canvas.config(width=300,height=300)\n self.canvas.config(xscrollcommand=self.hbar.set, yscrollcommand=self.vbar.set)\n self.canvas.bind_all(\"<MouseWheel>\",lambda x:self.on_mouse_wheel(x,self.canvas))\n self.canvas.pack(side=LEFT,expand=True,fill=BOTH)\n\n def _configure_interior(event):\n \"\"\"\n Figures out how big the interior frame needs to be\n \"\"\"\n # update the scrollbars to match the size of the inner frame\n size = (self.internal_frame.winfo_reqwidth(), self.internal_frame.winfo_reqheight())\n self.canvas.config(scrollregion=\"0 0 %s %s\" % size)\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n # update the canvas's width to fit the inner frame\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the canvas's width to fit the inner frame\n self.canvas.config(height=self.internal_frame.winfo_reqheight())\n self.internal_frame.bind('<Configure>', _configure_interior)\n\n def _configure_canvas(event):\n \"\"\"\n Figures out how bid the interior canvas needs to be\n \"\"\"\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n## print \"frame\",self.internal_frame.winfo_reqwidth()\n## print \"canvas\",self.canvas.winfo_width()\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(height=self.internal_frame.winfo_reqheight())\n self.canvas.bind('<Configure>', _configure_canvas)", "def updateScrollArea(self):\n iconx = []\n icony = []\n if len(self.icons) > 0:\n for item in self.icons:\n iconx.append(item.x())\n icony.append(item.y())\n self.setMinimumWidth(max(iconx)+75)\n self.setMinimumHeight(max(icony)+75)", "def __move_to(self, event):\n self.canvas_image.scan_dragto(event.x, event.y, gain=1)\n self.to_coord = (event.x, event.y)\n self.__show_image() # zoom tile and show it on the canvas", "def reset(self, canvwidth=None, canvheight=None, bg = None):\n if canvwidth:\n self.canvwidth = canvwidth\n if canvheight:\n self.canvheight = canvheight\n if bg:\n self.bg = bg\n self._canvas.config(bg=bg,\n scrollregion=(-self.canvwidth//2, -self.canvheight//2,\n self.canvwidth//2, self.canvheight//2))\n self._canvas.xview_moveto(0.5*(self.canvwidth - self.width + 30) /\n self.canvwidth)\n self._canvas.yview_moveto(0.5*(self.canvheight- self.height + 30) /\n self.canvheight)\n self.adjustScrolls()", "def init_canvas_frame(self, max_width=4000, max_height=4000):\n self.frames[\"canvas\"] = Frame(\n master=self.window, width=400, height=400)\n self.canvas = Canvas(\n master=self.frames[\"canvas\"],\n scrollregion=(0, 0, max_width, max_height),\n bg=\"white\")\n h_scrl_bar = Scrollbar(self.frames[\"canvas\"], orient=HORIZONTAL)\n h_scrl_bar.pack(side=BOTTOM, fill=X)\n h_scrl_bar.config(command=self.canvas.xview)\n v_scrl_bar = Scrollbar(self.frames[\"canvas\"], orient=VERTICAL)\n v_scrl_bar.pack(side=RIGHT, fill=Y)\n v_scrl_bar.config(command=self.canvas.yview)\n self.canvas.config(\n xscrollcommand=h_scrl_bar.set,\n yscrollcommand=v_scrl_bar.set)\n self.canvas.pack(side=LEFT, expand=True, fill=BOTH)\n self.frames[\"canvas\"].pack(\n anchor=\"nw\", side=LEFT, expand=True, fill=BOTH)\n\n self.canvas.bind(\"<ButtonPress-1>\", self.move_start)\n self.canvas.bind(\"<B1-Motion>\", self.move_move)\n self.canvas.bind(\"<Button-4>\", self.linux_zoomer_plus)\n self.canvas.bind(\"<Button-5>\", self.linux_zoomer_minus)\n # windows scroll\n self.canvas.bind(\"<MouseWheel>\", self.windows_zoomer)", "def __move_from(self, event):\n self.canvas_image.scan_mark(event.x, event.y)\n self.from_coord = (event.x, event.y)", "def __window_scrollByLines(self, lines):\n pass", "def draw(self):\r\n pygame.draw.rect(self.screen, self.background_color, self.bounds)\r\n line_window = self.lines[self.scroll_window_top:self.scroll_window_bottom]\r\n for idx,line in enumerate(line_window):\r\n text = self.font.render(line, True, self.foreground_color)\r\n x,y = self._get_x_y_from_pos(self.position[0], self.position[1]+idx)\r\n self.screen.blit(text,(x,y))\r\n \r\n if self.cursor_visible and self.scroll_window_bottom == len(self.lines):\r\n x,y = self._get_x_y_from_pos(len(line_window[-1]), len(line_window))\r\n cursor_rect = pygame.Rect(x,y,\r\n self.text_width,self.text_height)\r\n pygame.draw.rect(self.screen, self.foreground_color, cursor_rect)", "def scrollContentsBy(self, x, y):\n\n if not (self.panning or self.__zooming or self.isSettingCtrPt):\n # move the current center point if the user manually scrolls\n ptBeforeScale = self.mapToScene(self.centerPoint.toPoint())\n QGraphicsView.scrollContentsBy(self, x, y)\n ptAfterScale = self.mapToScene(self.centerPoint.toPoint())\n offset = ptBeforeScale - ptAfterScale\n self.centerPoint = self.centerPoint - offset\n else:\n # we're already adjusting the center point when we zoom so just\n # pass this on to the parent method\n QGraphicsView.scrollContentsBy(self, x, y)\n\n # we have to manually update the viewport if we have HUD items so\n # they draw properly.\n if self.hudItems:\n self.viewport().update()", "def yview_scroll(self, number, what):\n self.tk.call(self._w, 'yview', 'scroll', number, what)", "def draw(self, canvas, yloc):\n \n for card in self.hand:\n card.draw(canvas, (xloc+(self.hand.index(card)*CARD_SIZE[0]), yloc))", "def draw(self):\r\n scalex,scaley = self.getScale()\r\n try:\r\n self.clear()\r\n # Draw Graph Background\r\n self.drawLayout()\r\n if self.app.data == None:# If no data, break\r\n return\r\n # How much each pixel represents\r\n if scalex[1]-scalex[0] == 0:\r\n return\r\n step = (scalex[1]-scalex[0])/self.w# Draw lines at pixel level resolution\r\n self.fitYScale()\r\n sens_index = [0]# If one sensor displayed in this data player\r\n if len(self.sensor_ids) == 2:# If two sensors displayed in this data player\r\n sens_index = [1,0]# Draw order blue then red to make blue line on top\r\n for s in sens_index:\r\n i = scalex[0]\r\n x = 0\r\n trackcol = self.app.getSensorCol(self.sensors[self.sensor_ids[s]])\r\n while i < scalex[1]:\r\n i += step# i Is data\r\n x += 1# x is iteration/pixel-coordinate\r\n if i<0:# Skip data for t<0\r\n continue\r\n try:\r\n # Data retrieved from xml\r\n y = float(self.app.data[int(i)][self.sensor_ids[s]].text)\r\n y2 = float(self.app.data[int(i+step)][self.sensor_ids[s]].text)\r\n # Normalize into range 0 to 1 and multiply by height\r\n y = ((y-scaley[0])/(scaley[1]-scaley[0])) * self.h\r\n y2 = ((y2-scaley[0])/(scaley[1]-scaley[0])) * self.h\r\n except IndexError:# Missing data is skipped\r\n continue\r\n self.c.create_line(x,-y+self.h,x+1,-y2+self.h,fill=trackcol,width=1)\r\n self.drawScrubber()\r\n self.drawPeekScrubber()\r\n self.c.update()\r\n except tk.TclError:# If canvas destroyed, cancel draw operation\r\n return", "def Haut():\r\n X1, Y1, X2, Y2 = canvas.coords(boule)\r\n canvas.coords(boule,X1,Y1-20,X2,Y2-20)", "def redraw(self):\r\n self.c.update()", "def move_mouse(kf_x, m, img): \n exponent = 1.6\n x, y, x_vel, y_vel = (int(kf_x[0]), int(kf_x[1]), kf_x[2], kf_x[3])\n mx, my = m.position()\n win_height, win_width, channel = img.shape\n x_screen, y_screen = m.screen_size()\n min_x, max_x = 0, x_screen\n min_y, max_y = 0, y_screen \n\n #Calculations\n speed = np.sqrt(x_vel**2 + y_vel**2) \n power = math.pow(speed, exponent) \n ratio = speed / power\n theta = math.atan2(y_vel, x_vel) \n x_comp = power * math.cos(theta) \n y_comp = power * math.sin(theta) \n xf, yf = mx + x_comp, my + y_comp\n\n if xf < min_x: \n xf = min_x\n elif xf > max_x: \n xf = max_x\n elif yf < min_y: \n yf = min_y\n elif yf > max_y: \n yf = max_y\n m.move(xf, yf)\n return speed", "def move_by_pixels(self, dx, dy):\n wx, wy, w, h = self._raw_graph_window_dim()\n dwx = dx / w * (self.wsx2 - self.wsx1)\n dwy = dy / h * (self.wsy2 - self.wsy1)\n self.wsx1 -= dwx\n self.wsy1 -= dwy\n self.wsx2 -= dwx\n self.wsy2 -= dwy\n # moving the graph left releases anchoring\n if dx > 0:\n self.anchored = False\n self._hold_bounds()", "def configure_canvas(self):\r\n self.window.update_idletasks() # this updates window size\r\n\r\n border = 10\r\n self.canvas.config(\r\n width=self.window.winfo_reqwidth() + border,\r\n height=min(350, self.window.winfo_reqheight() + border,))\r\n self.canvas.configure(scrollregion=(\r\n 0, 0,\r\n self.window.winfo_reqwidth() + border,\r\n self.window.winfo_reqheight() + border))" ]
[ "0.7781856", "0.6695677", "0.6625122", "0.66034824", "0.64015204", "0.6336621", "0.62105596", "0.6134658", "0.6117314", "0.6087952", "0.60482293", "0.5974955", "0.59536266", "0.59532565", "0.5856979", "0.58364534", "0.58310264", "0.58037436", "0.5784162", "0.5746283", "0.5735475", "0.5700967", "0.56167144", "0.5609998", "0.5547529", "0.5539105", "0.55353874", "0.55134064", "0.55090874", "0.546265", "0.5455088", "0.54523665", "0.5438367", "0.5437902", "0.54273814", "0.5424633", "0.54157275", "0.53921103", "0.5391242", "0.53804076", "0.53777146", "0.53774667", "0.5365688", "0.53563654", "0.5354774", "0.5352245", "0.53451586", "0.5342894", "0.53387433", "0.5323614", "0.5299421", "0.5289036", "0.5276426", "0.5274366", "0.526362", "0.5261721", "0.525675", "0.5251821", "0.52491933", "0.52479905", "0.5240074", "0.52373433", "0.52292144", "0.5203698", "0.5184949", "0.51841944", "0.51810443", "0.51774096", "0.51755494", "0.516248", "0.5161708", "0.5155244", "0.5147369", "0.5147251", "0.5135243", "0.51308584", "0.5117956", "0.50946915", "0.508551", "0.50853556", "0.50840557", "0.5080142", "0.507947", "0.5077271", "0.5074023", "0.50669414", "0.50606793", "0.505639", "0.50543684", "0.5048108", "0.5045752", "0.50311285", "0.5029449", "0.5027353", "0.5022981", "0.5020851", "0.5020458", "0.501469", "0.5012018", "0.50020546" ]
0.78580177
0
Scroll canvas vertically and redraw the image
def __scroll_y(self, *args, **kwargs): self.canvas_image.yview(*args) # scroll vertically self.__show_image() # redraw the image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __scroll_y(self, *args, **kwargs):\n self.canvas.yview(*args) # scroll vertically\n self.__show_image() # redraw the image", "def refresh(self):\n\n # Delete old image (if needed) \n if self.canvas_image_id:\n self.canvas.delete(self.canvas_image_id)\n if debug > 5:\n print \"refresh: New image (x\", self.zoom, \") \", (self.xint, self.yint), (self.canvas[\"width\"], self.canvas[\"height\"]), [self.zoom * s for s in self.isize]\n\n scaled_isize = [self.xint[1] - self.xint[0],\n self.yint[1] - self.yint[0]]\n\n # Create the image for the canvas\n self.image = self.generator_func(self.zoom, self.xint, self.yint)\n self.canvas_image_id = self.canvas.create_image(0, 0, anchor=N+W,\n image=self.image)\n\n # Figure out where scroll bars should be and put them there.\n if self.xint[0] == 0 and int(self.isize[0] * self.zoom) == self.xint[1]:\n self.hscroll.grid_remove()\n else:\n self.hscroll.grid()\n self.hscroll.set(mapped_number(self.xint[0],\n (0, self.isize[0] * self.zoom -1),\n (0, 1)),\n mapped_number(self.xint[1] -1,\n (0, self.isize[0] * self.zoom -1),\n (0, 1)))\n if self.yint[0] == 0 and int(self.isize[1] * self.zoom) == self.yint[1]:\n self.vscroll.grid_remove()\n else:\n self.vscroll.grid()\n self.vscroll.set(mapped_number(self.yint[0],\n (0, self.isize[1] * self.zoom -1),\n (0, 1)),\n mapped_number(self.yint[1] -1,\n (0, self.isize[1] * self.zoom -1),\n (0, 1)))", "def redraw_canvas(self, dy):\n self.scroll = dy/20\n \n if self.scroll > 0:\n \n if self.min_cairo < -20:\n self.min_cairo = 0 \n self.min_text += 1 \n self.max_text += 1\n \n #When bottom of document is reached stop scrolling\n if self.max_text > self.line_count + 2:\n self.min_cairo = 0\n self.min_text = self.line_count - 50\n self.max_text = self.line_count\n self.scroll = 0\n \n elif self.scroll < 0:\n if self.min_cairo > 0:\n self.min_cairo = -20\n self.min_text -= 1\n self.max_text -=1\n\n #Do not scroll up if already at top of document\n if self.min_text < 0:\n self.min_cairo = 20\n self.min_text = 0\n self.max_text = 50\n self.scroll = 0\n \n #Do the scrolling\n self.min_cairo -= self.scroll\n \n self.max_cairo = self.min_cairo\n self.invalidate_canvas()", "def scrollDown_y(self):\r\n if self.y_stack<self.img.shape[1]-1:\r\n self.y_stack+=1\r\n self.pixmap3=self.drawPixmap(\"xz\") \r\n self.lbl3.setPixmap(self.pixmap3)\r\n self.pixmap4= self.writeEdge(\"xz\")\r\n self.lbl4.setPixmap(self.pixmap4)\r\n self.y_stack_lbl.setText(str(self.y_stack+1) + '/' + str(self.img.shape[1]))", "def scrollDown(self):\r\n\r\n if self.z_stack<self.img.shape[0]-1:\r\n self.z_stack+=1\r\n \r\n #self.pixmap=QtGui.QPixmap.fromImage(ImageQt.ImageQt(misc.toimage(self.img[self.z_stack]))).scaled(500,500)\r\n self.pixmap= self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n self.pixmap2= self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def continuous_scroll(self, context):\n\n self.drawing.redraw_canvas(self.dy)\n \n return True", "def scrollUp_y(self):\r\n if self.y_stack>0:\r\n self.y_stack-=1\r\n \r\n self.pixmap3=self.drawPixmap(\"xz\")\r\n self.lbl3.setPixmap(self.pixmap3)\r\n self.pixmap4= self.writeEdge(\"xz\")\r\n self.lbl4.setPixmap(self.pixmap4)\r\n self.y_stack_lbl.setText(str(self.y_stack+1) + '/' + str(self.img.shape[1]))", "def on_mouse_wheel(self,event,canvas):\n canvas.yview(\"scroll\",-1*event.delta/100,\"units\")", "def draw(self, thing):\n thing.draw(self, Point([2,2]), flip=1)\n\n # configure the scroll region\n bbox = Canvas.bbox(self.canvas, ALL)\n self.canvas.configure(scrollregion=bbox)", "def __show_image(self):\n box_image = self.canvas_image.coords(self.container) # get image area\n box_canvas = (self.canvas_image.canvasx(0), # get visible area of the canvas\n self.canvas_image.canvasy(0),\n self.canvas_image.canvasx(self.canvas_image.winfo_width()),\n self.canvas_image.canvasy(self.canvas_image.winfo_height()))\n self.box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly\n \n # Get scroll region box\n box_scroll = [min(self.box_img_int[0], box_canvas[0]), min(self.box_img_int[1], box_canvas[1]),\n max(self.box_img_int[2], box_canvas[2]), max(self.box_img_int[3], box_canvas[3])]\n # Horizontal part of the image is in the visible area\n if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:\n box_scroll[0] = self.box_img_int[0]\n box_scroll[2] = self.box_img_int[2]\n # Vertical part of the image is in the visible area\n if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:\n box_scroll[1] = self.box_img_int[1]\n box_scroll[3] = self.box_img_int[3]\n # Convert scroll region to tuple and to integer\n self.canvas_image.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region\n x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile\n y1 = max(box_canvas[1] - box_image[1], 0)\n x2 = min(box_canvas[2], box_image[2]) - box_image[0]\n y2 = min(box_canvas[3], box_image[3]) - box_image[1]\n if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area\n image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid\n (int(x1 / self.__scale), int(y1 / self.__scale),\n int(x2 / self.__scale), int(y2 / self.__scale)))\n #\n imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter))\n self.imageid = self.canvas_image.create_image(max(box_canvas[0], self.box_img_int[0]),\n max(box_canvas[1], self.box_img_int[1]),\n anchor='nw', image=imagetk)\n self.canvas_image.lower(self.imageid) # set image into background\n self.canvas_image.imagetk = imagetk # keep an extra reference to prevent garbage-collection", "def __show_image(self):\n box_image = self.canvas.coords(self.container) # get image area\n box_canvas = (self.canvas.canvasx(0), # get visible area of the canvas\n self.canvas.canvasy(0),\n self.canvas.canvasx(self.canvas.winfo_width()),\n self.canvas.canvasy(self.canvas.winfo_height()))\n box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly\n # Get scroll region box\n box_scroll = [min(box_img_int[0], box_canvas[0]), min(box_img_int[1], box_canvas[1]),\n max(box_img_int[2], box_canvas[2]), max(box_img_int[3], box_canvas[3])]\n # Horizontal part of the image is in the visible area\n if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:\n box_scroll[0] = box_img_int[0]\n box_scroll[2] = box_img_int[2]\n # Vertical part of the image is in the visible area\n if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:\n box_scroll[1] = box_img_int[1]\n box_scroll[3] = box_img_int[3]\n # Convert scroll region to tuple and to integer\n self.canvas.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region\n x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile\n y1 = max(box_canvas[1] - box_image[1], 0)\n x2 = min(box_canvas[2], box_image[2]) - box_image[0]\n y2 = min(box_canvas[3], box_image[3]) - box_image[1]\n if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area\n if self.__huge and self.__curr_img < 0: # show huge image\n h = int((y2 - y1) / self.imscale) # height of the tile band\n self.__tile[1][3] = h # set the tile band height\n self.__tile[2] = self.__offset + self.imwidth * int(y1 / self.imscale) * 3\n self.__image.close()\n self.__image = Image.open(self.path) # reopen / reset image\n self.__image.size = (self.imwidth, h) # set size of the tile band\n self.__image.tile = [self.__tile]\n image = self.__image.crop((int(x1 / self.imscale), 0, int(x2 / self.imscale), h))\n else: # show normal image\n image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid\n (int(x1 / self.__scale), int(y1 / self.__scale),\n int(x2 / self.__scale), int(y2 / self.__scale)))\n #\n imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter))\n imageid = self.canvas.create_image(max(box_canvas[0], box_img_int[0]),\n max(box_canvas[1], box_img_int[1]),\n anchor='nw', image=imagetk)\n self.canvas.lower(imageid) # set image into background\n if self.last_image_id != 0:\n self.canvas.delete(self.last_image_id)\n\n self.last_image_id = imageid\n self.canvas.imagetk = imagetk # keep an extra reference to prevent garbage-collection", "def draw_canvas(self):\n\n self.canvas = Canvas(self)\n self.scrollbar = ttk.Scrollbar(self, orient= VERTICAL,\n command=self.canvas.yview) \n self.canvas.configure(yscrollcommand=self.scrollbar.set)\n \n # make sure to add scrollbar before adding the canvas\n self.scrollbar.pack(side=RIGHT, fill=Y)\n self.canvas.pack(side=TOP, fill=BOTH, expand=1, padx=20, pady=20)\n \n # adding a frame to hold all the widgets, ttk Frame doesn't support\n # background config option \n self.frame = Frame(self.canvas) \n self.canvas.create_window(0,0,window=self.frame, anchor='nw')", "def adjustScrolls(self):\n cwidth = self._canvas.winfo_width()\n cheight = self._canvas.winfo_height()\n self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth)\n self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight)\n if cwidth < self.canvwidth or cheight < self.canvheight:\n self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,\n column=0, rowspan=1, columnspan=1, sticky='news')\n self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,\n column=1, rowspan=1, columnspan=1, sticky='news')\n else:\n self.hscroll.grid_forget()\n self.vscroll.grid_forget()", "def __reconfig__(self, event):\r\n x, y = event.width//2, event.height//2\r\n self.canvas.config(scrollregion=(-x, -y, x, y))", "def yview_scroll(self, number, what):\n self.tk.call(self._w, 'yview', 'scroll', number, what)", "def on_configure(self, event):\n self.testCanvas.configure(scrollregion=self.testCanvas.bbox('all'))\n self.testCanvas.yview_moveto(1)", "def _on_mousewheel(event):\n if event.num == 4 or event.delta > 0:\n canvas.yview_scroll(-1, \"units\" )\n elif event.num == 5 or event.delta < 0:\n canvas.yview_scroll(1, \"units\" )", "def __scroll_x(self, *args, **kwargs):\n self.canvas_image.xview(*args) # scroll horizontally\n self.__show_image() # redraw the image", "def scrollUp(self):\r\n if self.z_stack>0:\r\n self.z_stack-=1\r\n self.pixmap=self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n self.pixmap2=self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def scrollDown_x(self):\r\n if self.x_stack<self.img.shape[2]-1:\r\n self.x_stack+=1\r\n \r\n self.pixmap5=self.drawPixmap(\"yz\")\r\n self.lbl5.setPixmap(self.pixmap5)\r\n self.pixmap6= self.writeEdge(\"yz\")\r\n self.lbl6.setPixmap(self.pixmap6)\r\n self.x_stack_lbl.setText(str(self.x_stack+1) + '/' + str(self.img.shape[2]))", "def draw(self, img):\n self._erase_last_line(self.img)\n\n idxs = np.argwhere(img[:, self._pos] == 0)\n self.prev_y = (idxs.min(), idxs.max())\n\n cv.line(img, (self._pos, 0), (self._pos, self.h), (0, 0, 0), 1)", "def Configure_YScroll( self ):\r\n Label(self.frame_scroll).pack( side = TOP )\r\n self.yscroll = Scrollbar( self.frame_scroll )\r\n self.yscroll.config( command = self.Vertical_Scroll )\r\n self.canvas_one.config( yscrollcommand = self.Double_Expand )\r\n self.canvas_two.config( yscrollcommand = self.Double_Expand )", "def redraw(self):\n self.vispy_viewer.canvas.update()", "def __scroll_x(self, *args, **kwargs):\n self.canvas.xview(*args) # scroll horizontally\n self.__show_image() # redraw the image", "def updatescroll(self):\n if self.node:\n #self.update_idletasks() # Required, else dimension of content may not have been computed ?\n forgetit, forgetit, x1, forgetit = self.bbox(ALL)\n self.sizetree = self.node.sizetree() + (self.winfo_height() / self.nodeheight) - 1\n self.configure(scrollregion = (0, 0, x1, self.sizetree * self.nodeheight))", "def scroll(self, delta_x, delta_y):\n if delta_x < 0:\n shift_x = 0\n xend = self.width + delta_x\n dt_x = 1\n else:\n shift_x = self.width - 1\n xend = delta_x - 1\n dt_x = -1\n if delta_y < 0:\n y = 0\n yend = self.height + delta_y\n dt_y = 1\n else:\n y = self.height - 1\n yend = delta_y - 1\n dt_y = -1\n while y != yend:\n x = shift_x\n while x != xend:\n self.format.set_pixel(\n self, x, y, self.format.get_pixel(self, x - delta_x, y - delta_y)\n )\n x += dt_x\n y += dt_y", "def on_mousewheel(self, event):\r\n self.container_widgets[\"order_canvas\"].yview_scroll(-1 * int(event.delta / 120), \"units\")\r\n # TODO FIX SCROLLING\r", "def down():\n global y, canvas # y é modificado\n canvas.create_line(x, y, x, y + 10)\n y += 10", "def redraw(self):\n self.vispy_widget.canvas.update()", "def scroll_window(self):\r\n window = tkinter.Frame(self.root)\r\n scroller = tkinter.Scrollbar(self.root, orient=\"vertical\",\r\n command=self.canvas.yview)\r\n self.canvas.configure(yscrollcommand=scroller.set)\r\n\r\n scroller.pack(side=\"right\", fill=\"y\")\r\n self.canvas.pack(side=\"left\", fill=\"both\", expand=True)\r\n self.canvas.create_window((4, 4), window=window, anchor=\"nw\",\r\n tags=\"self.window\")\r\n return window", "def draw(self, canvas):\n canvas.delete(\"all\")\n width = canvas.winfo_reqwidth()\n height = canvas.winfo_reqheight()\n\n image = ImageTk.PhotoImage(self.image())\n canvas.create_image(width/2, height/2, image=image)\n canvas.img = image", "def __wheel(self, event):\n x = self.canvas_image.canvasx(event.x) # get coordinates of the event on the canvas\n y = self.canvas_image.canvasy(event.y)\n if self.outside(x, y): return # zoom only inside image area\n scale = 1.0\n # Respond to Linux (event.num) or Windows (event.delta) wheel event\n if event.num == 5 or event.delta == -120: # scroll down, smaller\n if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels\n self.imscale /= self.__delta\n scale /= self.__delta\n if event.num == 4 or event.delta == 120: # scroll up, bigger\n i = min(self.canvas_image.winfo_width(), self.canvas_image.winfo_height()) >> 1\n if i < self.imscale: return # 1 pixel is bigger than the visible area\n self.imscale *= self.__delta\n scale *= self.__delta\n # Take appropriate image from the pyramid\n k = self.imscale * self.__ratio # temporary coefficient\n self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1)\n self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img))\n #\n self.canvas_image.scale('all', x, y, scale, scale) # rescale all objects\n # Redraw some figures before showing image on the screen\n self.redraw_figures() # method for child classes\n self.__show_image()", "def scrollUp_x(self):\r\n if self.x_stack>0:\r\n self.x_stack-=1\r\n \r\n self.pixmap5=self.drawPixmap(\"yz\")\r\n self.lbl5.setPixmap(self.pixmap5) \r\n self.pixmap6= self.writeEdge(\"yz\")\r\n self.lbl6.setPixmap(self.pixmap6)\r\n self.x_stack_lbl.setText(str(self.x_stack+1) + '/' + str(self.img.shape[2]))", "def update_scrollbar(self):\n self.testCanvas.bind('<Configure>', self.on_configure)\n self.testFrame.bind('<Configure>', self.on_configure)", "def scroll(self, direction):\n\n self.counter += direction # Counter of 'up' and 'down'\n do_redraw = self.counter == self.content_size - self.h\n\n if self.size > 0:\n self.count += direction\n pos = self.pos\n if math.fabs(self.count) == math.floor(self.content_size / self.h):\n pos += direction\n self.count = 0\n\n pos = max(0, pos) # Top limit\n pos = min(pos, self.h - self.size) # Bottom limit\n do_redraw = pos != self.pos # Redraw if pos has changed\n self.pos = pos\n\n if do_redraw:\n self._create()", "def __wheel(self, event):\n x = self.canvas.canvasx(event.x) # get coordinates of the event on the canvas\n y = self.canvas.canvasy(event.y)\n if self.outside(x, y): return # zoom only inside image area\n scale = 1.0\n # Respond to Linux (event.num) or Windows (event.delta) wheel event\n if event.num == 5 or event.delta == -120: # scroll down, smaller\n if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels\n self.imscale /= self.__delta\n scale /= self.__delta\n if event.num == 4 or event.delta == 120: # scroll up, bigger\n i = min(self.canvas.winfo_width(), self.canvas.winfo_height()) >> 1\n if i < self.imscale: return # 1 pixel is bigger than the visible area\n self.imscale *= self.__delta\n scale *= self.__delta\n # Take appropriate image from the pyramid\n k = self.imscale * self.__ratio # temporary coefficient\n self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1)\n self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img))\n #\n self.canvas.scale('all', x, y, scale, scale) # rescale all objects\n # Redraw some figures before showing image on the screen\n self.redraw_figures() # method for child classes\n self.__show_image()", "def onscroll(self, event):\n if self.out_graph is False:\n self.zoom += 10*event.step\n\n if self.zoom >= self.axe_X/2/self.FOV_img*self.FOV_img_Y:\n self.zoom = self.axe_X/2/self.FOV_img*self.FOV_img_Y\n\n if self.zoom <= 0:\n self.zoom = 0\n\n self.draw()", "def vertical_scroll(self, image, padding=True):\n\n image_list = list()\n width = image.size[0]\n # Scroll into the blank image.\n if padding:\n for x in range(8):\n section = image.crop((0, 0, x, 16))\n display_section = self.create_blank_image()\n display_section.paste(section, (8 - x, 0, 8, 16))\n image_list.append(display_section)\n\n #Scroll across the input image.\n for x in range(8, width + 1):\n section = image.crop((x - 8, 0, x, 16))\n display_section = self.create_blank_image()\n display_section.paste(section, (0, 0, 8, 16))\n image_list.append(display_section)\n\n #Scroll out, leaving the blank image.\n if padding:\n for x in range(width - 7, width + 1):\n section = image.crop((x, 0, width, 16))\n display_section = self.create_blank_image()\n display_section.paste(section, (0, 0, 7 - (x - (width - 7)), 16))\n image_list.append(display_section)\n\n #Return the list of images created\n return image_list", "def _on_scroll(self, event):\n self._zoom(event.step, draw=True)", "def redraw(self) -> None:\n self.canvas.draw_idle()\n self.Refresh()", "def update_current_image(self):\n\n rect = (0, 0, self.variables.canvas_width, self.variables.canvas_height)\n if self.variables.canvas_image_object is not None:\n self.variables.canvas_image_object.update_canvas_display_image_from_canvas_rect(rect)\n self.set_image_from_numpy_array(self.variables.canvas_image_object.display_image)\n self.update()", "def up():\n global y, canvas # y é modificado\n canvas.create_line(x, y, x, y - 10)\n y -= 10", "def update_canvas_display_image_from_full_image_rect(self, full_image_rect):\n\n self.set_decimation_from_full_image_rect(full_image_rect)\n decimated_image_data = self.get_decimated_image_data_in_full_image_rect(full_image_rect, self.decimation_factor)\n self.update_canvas_display_from_numpy_array(decimated_image_data)\n self.canvas_full_image_upper_left_yx = (full_image_rect[0], full_image_rect[1])", "def __window_scroll(self, x, y):\n pass", "def off_configure(self, event):\n self.testCanvas.configure(scrollregion=self.testCanvas.bbox('all'))\n self.testCanvas.yview_moveto(0)", "def on_draw_over_image(self):", "def NextFrame(self, event):\n buffer = self.GetDataBuffer()\n if buffer is not None:\n # Update bitmap widget with new image frame:\n self.bitmap.CopyFromBuffer(buffer)\n # Refresh panel to draw image into bitmap:\n self.Refresh()\n pass", "def scrollY(self,yrel):\n # get the display size\n dispw, disph = c_int(), c_int()\n SDL_GetRendererOutputSize(self.rend,dispw,disph)\n\n # scroll vertically\n self.scroll += yrel\n\n # limit scrolling\n if self.scroll <= 0:\n self.scroll = 0\n if self.scroll+disph.value >= (len(self.itemList.items)+1)*150+178:\n self.scroll = (len(self.itemList.items)+1)*150+178-disph.value", "def update_image(self, data):\n self.image_data.append(data)\n if len(self.image_data) > self.image_height:\n self.image_data = self.image_data[1:]\n\n self.image_render += 1\n\n # A 200 pixel tall image squashed into the render view does not\n # appear unpleasantely jumpy when scrolled by 5\n if self.image_render % 5 != 0 and self.image_render != 1:\n return\n\n img_data = range(len(self.image_data))\n\n position = 0\n while position < len(img_data):\n img_data[position] = self.image_data[position]\n position += 1\n\n new_data = numpy.array(img_data).astype(float)\n\n mid = self.main_image_dialog\n mid.image.set_data(new_data)\n\n # If you do autoscale here, it tends to jump around in appearing\n # to stretch to the window and be in 'normal' size\n mid.get_plot().replot()", "def init_canvas_frame(self, max_width=4000, max_height=4000):\n self.frames[\"canvas\"] = Frame(\n master=self.window, width=400, height=400)\n self.canvas = Canvas(\n master=self.frames[\"canvas\"],\n scrollregion=(0, 0, max_width, max_height),\n bg=\"white\")\n h_scrl_bar = Scrollbar(self.frames[\"canvas\"], orient=HORIZONTAL)\n h_scrl_bar.pack(side=BOTTOM, fill=X)\n h_scrl_bar.config(command=self.canvas.xview)\n v_scrl_bar = Scrollbar(self.frames[\"canvas\"], orient=VERTICAL)\n v_scrl_bar.pack(side=RIGHT, fill=Y)\n v_scrl_bar.config(command=self.canvas.yview)\n self.canvas.config(\n xscrollcommand=h_scrl_bar.set,\n yscrollcommand=v_scrl_bar.set)\n self.canvas.pack(side=LEFT, expand=True, fill=BOTH)\n self.frames[\"canvas\"].pack(\n anchor=\"nw\", side=LEFT, expand=True, fill=BOTH)\n\n self.canvas.bind(\"<ButtonPress-1>\", self.move_start)\n self.canvas.bind(\"<B1-Motion>\", self.move_move)\n self.canvas.bind(\"<Button-4>\", self.linux_zoomer_plus)\n self.canvas.bind(\"<Button-5>\", self.linux_zoomer_minus)\n # windows scroll\n self.canvas.bind(\"<MouseWheel>\", self.windows_zoomer)", "def scrollDown(self):\n if self.__firstShownLine < len(self.__data) - 1:\n self.__firstShownLine += 1\n self.__refreshContent()\n self.__printRow(self.__firstShownLine + self.height - 2)\n else:\n curses.beep()", "def draw(self):\n self.write_image()\n self.update()", "def update_canvas_display_image_from_full_image(self):\n\n full_image_rect = (0, 0, self.image_reader.full_image_ny, self.image_reader.full_image_nx)\n self.update_canvas_display_image_from_full_image_rect(full_image_rect)", "def __init__(self,master,**kw):\n Frame.__init__(self,master,**kw)\n \n self.canvas=Canvas(self,scrollregion=(0,0,500,500))#,width=300,height=300,scrollregion=(0,0,500,500))\n self.internal_frame=Frame(self.canvas)\n self.hbar=Scrollbar(self,orient=HORIZONTAL)\n self.vbar=Scrollbar(self,orient=VERTICAL)\n\n interior_id=self.canvas.create_window((0,0),window=self.internal_frame,anchor=\"nw\")\n\n \n self.hbar.pack(side=BOTTOM,fill=X)\n self.hbar.config(command=self.canvas.xview)\n \n \n self.vbar.pack(side=RIGHT,fill=Y)\n self.vbar.config(command=self.canvas.yview)\n \n## self.canvas.config(width=300,height=300)\n self.canvas.config(xscrollcommand=self.hbar.set, yscrollcommand=self.vbar.set)\n self.canvas.bind_all(\"<MouseWheel>\",lambda x:self.on_mouse_wheel(x,self.canvas))\n self.canvas.pack(side=LEFT,expand=True,fill=BOTH)\n\n def _configure_interior(event):\n \"\"\"\n Figures out how big the interior frame needs to be\n \"\"\"\n # update the scrollbars to match the size of the inner frame\n size = (self.internal_frame.winfo_reqwidth(), self.internal_frame.winfo_reqheight())\n self.canvas.config(scrollregion=\"0 0 %s %s\" % size)\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n # update the canvas's width to fit the inner frame\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the canvas's width to fit the inner frame\n self.canvas.config(height=self.internal_frame.winfo_reqheight())\n self.internal_frame.bind('<Configure>', _configure_interior)\n\n def _configure_canvas(event):\n \"\"\"\n Figures out how bid the interior canvas needs to be\n \"\"\"\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n## print \"frame\",self.internal_frame.winfo_reqwidth()\n## print \"canvas\",self.canvas.winfo_width()\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(height=self.internal_frame.winfo_reqheight())\n self.canvas.bind('<Configure>', _configure_canvas)", "def refresh_HDV(self):\n self.canvas.draw()\n self.dicom_navigation.parent.dicom_right_window.top_info.canvas_HDV.draw()", "def refresh_canvas(self):\n self.canvas.delete('all')\n self.draw_handler(self)\n self.canvas.after(CANVAS[\"REFRESH_TIME\"], self.refresh_canvas)", "def on_canvas_resize(self, event) -> None:\r\n\r\n self.painter.adjust_to_canvas()\r\n self.painter.draw_board()", "def mover_scroll(self, x, y):\n self.scrollx += x\n self.scrolly += y", "def update_scroll_region(self):\n self.configure(scrollregion=(-self._radius - self.circ_pad,\n -self._radius - self.circ_pad,\n self._radius + self.circ_pad,\n self._radius + self.circ_pad))", "def scrollUp(self):\n if self.__firstShownLine > 0:\n self.__firstShownLine -= 1\n self.__refreshContent()\n else:\n curses.beep()", "def redraw(self):\r\n self.c.update()", "def move_vertical(self):\r\n if self.movement == \"vertical\" and self.flag_move:\r\n self.move_ball()\r\n self.canvas.after(50, self.move_vertical)", "def refresh(self):\n\n self.ax.relim()\n self.ax.autoscale_view()\n self.canvas.draw()", "def scroll(*args):", "def update(self):\n if (self.j + self.step >= self.image.shape[0]) and (self.i + self.step >= self.image.shape[1]):\n self.no_more_crops = True\n elif self.i + self.step >= self.image.shape[1]:\n self.i = 0\n self.j += self.step\n else:\n self.i += self.step", "def mouse_wheel(self, event):\n\n if event.num == 5 or event.delta == -120:\n event.widget.yview_scroll(1, UNITS)\n self.tablerowheader.yview_scroll(1, UNITS)\n if event.num == 4 or event.delta == 120:\n if self.canvasy(0) < 0:\n return\n event.widget.yview_scroll(-1, UNITS)\n self.tablerowheader.yview_scroll(-1, UNITS)\n self.redrawVisible()\n return", "def set_scrollregion(self, event=None):\n self.canvas.configure(scrollregion=self.canvas.bbox('all'))", "def reDraw(self):\n self.canvasIGetDrawnOn.delete(self.spriteOnCanvas)\n self.spriteImage = ImageTk.PhotoImage(self.spriteImageFile.rotate(self.faceHeading, expand=True))\n self.spriteOnCanvas=self.canvasIGetDrawnOn.create_image(self.xPos,self.yPos,image=self.spriteImage)", "def update(self):\n self.line.set_ydata(self._get_y_data())\n self.figure.canvas.draw()", "def redraw(self):\n offset = self.actual_row * self.row_size\n x = 5\n y = 5\n ind = 0\n self.scene.clear()\n for _ in range(self.column_size):\n for _ in range(self.row_size):\n if ind+offset < len(self.cards):\n self.draw_card(x, y, ind+offset)\n x += 90\n ind += 1\n x = 5\n y += 120", "def on_scroll(self, win, _deltax, deltay):\n self.zoom(deltay, glfw.get_window_size(win)[1])", "def on_scroll(self, win, _deltax, deltay):\n self.zoom(deltay, glfw.get_window_size(win)[1])", "def _update_scroll(self, path):\n global layout\n\n #Removes the widgets in the scroll layout, if there is any.\n scroll.remove_widget(layout)\n #Loads the new updated layout, and updates the showphotos layout.\n layout = self._showphotos(path)\n scroll.add_widget(layout)\n layout.do_layout()", "def draw_image(self):\n self.PDF.saveState()\n self.PDF.scale(1, -1)\n # self.PDF.drawImage(\n # LOGO, 490, -78, width=80, preserveAspectRatio=True, mask=\"auto\"\n # )\n self.PDF.restoreState()", "def draw(self, offset: IntegerPosition2D, canvas: Canvas) -> None:\n canvas_position: IntegerPosition2D\n if not self.buffer:\n canvas_position = IntegerPosition2D(0, 0)\n else:\n row: int = self.buffer.get_row(self.index)\n column: int = self.buffer.get_column(self.index)\n canvas_position = IntegerPosition2D(column, row)\n canvas_position += offset\n\n if not 0 <= canvas_position.x < canvas.size.width:\n return\n\n if not 0 <= canvas_position.y < canvas.size.height:\n return\n\n canvas.invert(canvas_position)", "def update(self):\n\n self.x += self.dx\n self.y += self.dy\n\n # draw image\n if self.visible:\n self.scene.screen.blit(self.image, (self.x, self.y))\n self.check_bounds()\n self.check_collisions()\n self.check_keys()", "def scroll(self, axis, value):\n\n\t\tself._interface.scroll(axis, value)", "def _on_scrollbar(self, *args) -> None:\r\n for textbox in self.textboxes:\r\n textbox.yview(*args)", "def _drawOnCanvas(self):\n self.canvas=np.ones(self.canvas.shape,dtype=np.uint8)*255\n for key in self.elements:\n graphElement=self.elements[key]\n graphElement.draw(self.canvas)\n self.sync=True", "def update_image(self, path=None):\n if path:\n self.image_path.current = path\n\n if self.image_path.current == self.image_canvas.image_path:\n self.image_canvas.fit_in_view()\n else:\n self.image_canvas.draw_image(self.image_path.current)", "def _on_scroll(self, event):", "def AppWinFrame(master):\n # auto scrolls\n vscrollbar = Scrollbar(master)\n vscrollbar.pack(side=RIGHT, fill=Y)\n canvas = Canvas(master, yscrollcommand=vscrollbar.set, bg='#999999')\n frame = Frame(canvas, borderwidth=2, relief=RIDGE, bg=\"#BFB8FE\")\n vscrollbar.config(command=canvas.yview)\n frame.pack(side=LEFT, fill=BOTH)\n #frame.update_idletasks()\n #canvas.config(scrollregion=canvas.bbox(\"all\"))\n #frame = Frame(master)\n return canvas", "def update_drawer_img(self):\n self.drawer = aggdraw.Draw(self.img)\n self.drawer.settransform(self.coordspace_transform)", "def draw(self):\n self.figure.canvas.draw_idle()", "def page_down(self):\n self.set_initial_offset(self.initial_offset + self.my_surface.get_height())", "def linux_zoomer_plus(self, event):\n self.canvas.scale(\"all\", event.x, event.y, 1.1, 1.1)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def draw(self):\r\n scalex,scaley = self.getScale()\r\n try:\r\n self.clear()\r\n # Draw Graph Background\r\n self.drawLayout()\r\n if self.app.data == None:# If no data, break\r\n return\r\n # How much each pixel represents\r\n if scalex[1]-scalex[0] == 0:\r\n return\r\n step = (scalex[1]-scalex[0])/self.w# Draw lines at pixel level resolution\r\n self.fitYScale()\r\n sens_index = [0]# If one sensor displayed in this data player\r\n if len(self.sensor_ids) == 2:# If two sensors displayed in this data player\r\n sens_index = [1,0]# Draw order blue then red to make blue line on top\r\n for s in sens_index:\r\n i = scalex[0]\r\n x = 0\r\n trackcol = self.app.getSensorCol(self.sensors[self.sensor_ids[s]])\r\n while i < scalex[1]:\r\n i += step# i Is data\r\n x += 1# x is iteration/pixel-coordinate\r\n if i<0:# Skip data for t<0\r\n continue\r\n try:\r\n # Data retrieved from xml\r\n y = float(self.app.data[int(i)][self.sensor_ids[s]].text)\r\n y2 = float(self.app.data[int(i+step)][self.sensor_ids[s]].text)\r\n # Normalize into range 0 to 1 and multiply by height\r\n y = ((y-scaley[0])/(scaley[1]-scaley[0])) * self.h\r\n y2 = ((y2-scaley[0])/(scaley[1]-scaley[0])) * self.h\r\n except IndexError:# Missing data is skipped\r\n continue\r\n self.c.create_line(x,-y+self.h,x+1,-y2+self.h,fill=trackcol,width=1)\r\n self.drawScrubber()\r\n self.drawPeekScrubber()\r\n self.c.update()\r\n except tk.TclError:# If canvas destroyed, cancel draw operation\r\n return", "def update(self):\r\n # Update the decimal position of the kame.\r\n self.y -= self.speed_factor\r\n # Update the rect position.\r\n self.rect.y = self.y", "def scroll(self):\n x_position = self._player.get_position()[0]\n half_screen = self._master.winfo_width() / 2\n world_size = self._world.get_pixel_size()[0] - half_screen\n\n # Left side\n if x_position <= half_screen:\n self._view.set_offset((0, 0))\n\n # Between left and right sides\n elif half_screen <= x_position <= world_size:\n self._view.set_offset((half_screen - x_position, 0))\n\n # Right side\n elif x_position >= world_size:\n self._view.set_offset((half_screen - world_size, 0))", "def Haut():\r\n X1, Y1, X2, Y2 = canvas.coords(boule)\r\n canvas.coords(boule,X1,Y1-20,X2,Y2-20)", "def configure_canvas(self):\r\n self.window.update_idletasks() # this updates window size\r\n\r\n border = 10\r\n self.canvas.config(\r\n width=self.window.winfo_reqwidth() + border,\r\n height=min(350, self.window.winfo_reqheight() + border,))\r\n self.canvas.configure(scrollregion=(\r\n 0, 0,\r\n self.window.winfo_reqwidth() + border,\r\n self.window.winfo_reqheight() + border))", "def draw(self, surface, camera_scroll):\n sprites = self.sprites()\n surface_blit = surface.blit\n for spr in sprites:\n self.spritedict[spr] = surface_blit(\n spr.image, spr.rect.move(adjust_scroll(camera_scroll))\n )\n self.lostsprites = []", "def draw(self, surface, camera_scroll):\n sprites = self.sprites()\n surface_blit = surface.blit\n for spr in sprites:\n self.spritedict[spr] = surface_blit(\n spr.image, spr.rect.move(adjust_scroll(camera_scroll))\n )\n self.lostsprites = []", "def reset(self, canvwidth=None, canvheight=None, bg = None):\n if canvwidth:\n self.canvwidth = canvwidth\n if canvheight:\n self.canvheight = canvheight\n if bg:\n self.bg = bg\n self._canvas.config(bg=bg,\n scrollregion=(-self.canvwidth//2, -self.canvheight//2,\n self.canvwidth//2, self.canvheight//2))\n self._canvas.xview_moveto(0.5*(self.canvwidth - self.width + 30) /\n self.canvwidth)\n self._canvas.yview_moveto(0.5*(self.canvheight- self.height + 30) /\n self.canvheight)\n self.adjustScrolls()", "def on_scroll(self, event):\n if event.button == 'up':\n self.generations += 4000\n elif event.button == 'down':\n if self.generations >= 4000:\n self.generations -= 4000\n self.redraw()", "def draw(self, canvas, yloc):\n \n for card in self.hand:\n card.draw(canvas, (xloc+(self.hand.index(card)*CARD_SIZE[0]), yloc))", "def xview_scroll(self, number, what):\n self.tk.call(self._w, 'xview', 'scroll', number, what)", "def update_img(self):\n self.img = np.array(self.image)", "def _render_vertical(self, gc, lx, ly, rx, ry, mx, my):\n mx = lx + (rx - lx) / 2.\n with gc:\n gc.set_line_width(20)\n gc.set_stroke_color(self._get_border_color())\n tee_v(gc, lx, ly, rx, mx, my)\n\n gc.set_line_width(10)\n self.set_fill_color(gc)\n tee_v(gc, lx, ly, rx, mx, my)", "def update_pos(self):\n self.imgx=self.pathX[min(self.x,len(self.pathX)-1)]\\\n [min(self.y,len(self.pathX[self.x])-1)]\n self.imgy=self.pathY[min(self.x,len(self.pathY)-1)]\\\n [min(self.y,len(self.pathY[self.x])-1)]" ]
[ "0.78763294", "0.7029265", "0.6990234", "0.6822299", "0.67670774", "0.6654772", "0.6486098", "0.63472086", "0.6317045", "0.6261508", "0.62313896", "0.61648124", "0.6163939", "0.613647", "0.61271584", "0.61243415", "0.61053514", "0.6080344", "0.60629725", "0.606112", "0.6051997", "0.60451305", "0.6028399", "0.60230815", "0.60148615", "0.59800255", "0.59372365", "0.59371126", "0.5928361", "0.57631946", "0.5761915", "0.57358223", "0.5717023", "0.5713142", "0.5699793", "0.5664008", "0.56615216", "0.5655327", "0.5638193", "0.5627065", "0.561417", "0.5605", "0.55891997", "0.5569291", "0.55553913", "0.55263686", "0.55055994", "0.548964", "0.5487394", "0.54843044", "0.5473981", "0.54728335", "0.54655", "0.5457268", "0.5451602", "0.54503417", "0.5404737", "0.54007524", "0.53971153", "0.53788155", "0.5369551", "0.5363697", "0.5344875", "0.53385556", "0.5335633", "0.5331243", "0.5319795", "0.5318367", "0.5311108", "0.53100646", "0.5304356", "0.5304356", "0.5303856", "0.5301284", "0.52968246", "0.5294079", "0.5285127", "0.52769387", "0.5268388", "0.5265641", "0.5234583", "0.5222971", "0.5211473", "0.5210639", "0.5181468", "0.51659054", "0.51611626", "0.515372", "0.51535386", "0.51436776", "0.5143216", "0.51310533", "0.51310533", "0.51306945", "0.5118303", "0.50907016", "0.50903654", "0.5090026", "0.50842565", "0.507867" ]
0.79207057
0
Remember previous coordinates for scrolling with the mouse
def __move_from(self, event): self.canvas_image.scan_mark(event.x, event.y) self.from_coord = (event.x, event.y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store_current_position_as_previous(self):\n pos = self.get_current_position()\n self.previous_xloc = pos[0]\n self.previous_yloc = pos[1]\n self.previous_zloc = pos[2]\n return pos", "def mousePosition(self):", "def new_previous_position(self, pos):\n self.previous_xloc = pos[0]\n self.previous_yloc = pos[1]\n self.previous_zloc = pos[2]\n return True", "def OnMouseMotion(self, evt):\n if evt.Dragging() and evt.LeftIsDown():\n self.lastx, self.lasty = self.x, self.y\n self.x, self.y = evt.GetPosition()\n self.Refresh(False)", "def mousePositionRaw(self):", "def mousePositionRaw(self):", "def grab(self, event):\n self.ypos = event.y\n self.xpos = event.x\n self.config(cursor='fleur')", "def _motion(self, event):\n if self.current:\n # modify the current line by changing the end coordinates\n # to be the current mouse position\n coords = event.widget.coords(self.current)\n coords[2] = event.x\n coords[3] = event.y\n\n event.widget.coords(self.current, *coords)", "def paddle_reset_position(self, mouse):\n if (0 + self.paddle.width / 2) <= mouse.x <= (self.window.width - self.paddle.width / 2):\n self.paddle_x = mouse.x - self.paddle.width / 2\n self.window.add(self.paddle, self.paddle_x, self.paddle_y)", "def OnMouseDown(self, evt):\n self.CaptureMouse()\n self.x, self.y = self.lastx, self.lasty = evt.GetPosition()", "def move_back(self):\r\n self.center_x, self.center_y = self.save_pos", "def reset(self):\n self.cur_pos = self._get_current_pos_in_1d()\n\n return self.cur_pos", "def move_previous():\n self.variables.table.set_joystick(False)\n self.variables.table.set_axis(\n [True, True, True]\n ) # so all axis can be adressed\n errorcode = self.variables.table.move_to(\n [self.previous_xloc, self.previous_yloc, self.previous_zloc],\n True,\n self.variables.default_values_dict[\"settings\"][\"height_movement\"],\n )\n # if errorcode:\n # self.variables.message_to_main.put(errorcode)\n self.variables.table.set_axis([True, True, False]) # so z axis is off again\n self.variables.table.set_joystick(True)", "def follow(self):\n\t\tpos = pygame.mouse.get_pos()\n\t\tself.x = pos[0]\n\t\tself.y = pos[1]\n\t\tself.draw()", "def on_dragg(self, event):\n if str(event.lastevent.button) == \"MouseButton.LEFT\":\n mX = event.xdata\n mY = event.ydata\n if mX and mY:\n if self.current_point is not None:\n self.x[self.current_point] = mX\n self.y[self.current_point] = mY\n self.redraw()", "def previous_line():\r\n set_point(point().previous_line())", "def __window_scroll(self, x, y):\n pass", "def onMove(self, event):\n\t\tif (event.xdata != None and event.ydata != None and event.xdata != self.xdata and event.ydata != self.ydata):\n\n\t\t\tself.xdata = event.xdata\n\t\t\tself.ydata = event.ydata\n\n\t\t\tfor loop in range(4):\n\t\t\t\tself.stokesFig.canvas.restore_region(self.background[loop])\n\t\t\t\tself.obsStokes[loop].set_ydata(self.stokes[loop][event.ydata, event.xdata, :])\n\t\t\t\tself.axStokes[loop].draw_artist(self.obsStokes[loop])\n\t\t\t\tself.axStokes[loop].draw_artist(self.axStokes[loop].get_yaxis())\n\t\t\t\tself.stokesFig.canvas.blit(self.axStokes[loop].bbox.expanded(1.4, 1.1))", "def previousRange(self):\r\n if (self.selectedmap > 0):\r\n self.pickMap(self.selectedmap-1)", "def odom_update(self, data):\n self, curr_pos = (data.pose.pose.position.x, data.pose.pose.position.y)", "def on_mouse_move(self, event):\n self.mouse = [event.xdata, event.ydata]\n\n # Update pan view on mouse move\n if self.panning is True:\n for a in self.pan_axes:\n a.drag_pan(1, event.key, event.x, event.y)\n\n # Async re-draw (redraws only on thread idle state, uses timer on backend)\n self.canvas.draw_idle()\n\n ##### Temporary place-holder for cached update #####\n self.update_screen_request.emit([0, 0, 0, 0, 0])", "def grab_current_point(self):\n self.open_gripper(80)\n time.sleep(2.5)\n self.execute_action((0, 0, -10), self.GRAB_ORIENTATION)\n self.open_gripper(-30)\n time.sleep(2.5)\n self.execute_action((0, 0, 10), self.GRAB_ORIENTATION)\n time.sleep(2.5)\n self.initial_position = np.array(self.get_current_cartesian_position().position)\n print self.initial_position", "def on_mouse_movement(self, event: wx.MouseEvent) -> None:\n if not event.Dragging():\n self._drag_start_pos = None\n return\n # self.CaptureMouse()\n if self._drag_start_pos is None:\n self._drag_start_pos = event.GetPosition()\n else:\n current_pos = event.GetPosition()\n change = self._drag_start_pos - current_pos\n self.SetPosition(self.GetPosition() - change)", "def on_mouse_movement(self, event: wx.MouseEvent) -> None:\n if not event.Dragging():\n self._drag_start_pos = None\n return\n # self.CaptureMouse()\n if self._drag_start_pos is None:\n self._drag_start_pos = event.GetPosition()\n else:\n current_pos = event.GetPosition()\n change = self._drag_start_pos - current_pos\n self.SetPosition(self.GetPosition() - change)", "def __reset_crosshair(self):\n self.lhor.set_ydata(self.y_coord)\n self.lver.set_xdata(self.x_coord)", "def on_mouse_move(self, event: PointEvent):\n self.x = event.x\n self.y = event.y\n self.handle_mouse(self.x, self.y)", "def reset_pos(self):\n\n return self.pos(1, 1)", "def apply_changes(self):\n self.x = self.buff_x\n self.y = self.buff_y\n self.buff_x = None\n self.buff_y = None", "def set_position(self, updated):\n self.buff_x = updated[0]\n self.buff_y = updated[1]", "def mouseMoveEvent(self, event):\n if self.view_state.tracking == TrackingMode.FREE and event.buttons() == QtCore.Qt.LeftButton:\n # Calculate the change in mouse position.\n new_mouse_pos = np.array([event.x(), event.y()])\n mouse_delta = new_mouse_pos - self.view_state.mouse\n\n # Add this to the view centre.\n self.view_state.centre = self.view_state.centre - mouse_delta * (1 / self.view_state.scale)\n self.view_state.mouse = new_mouse_pos", "def click_action(event, ax):\n global newcoords, oldcoords, count\n\n if count % 2 == 0:\n newcoords.append((event.xdata, event.ydata))\n print('NEW', event.xdata, event.ydata)\n else:\n oldcoords.append((event.xdata, event.ydata))\n print('OLD', event.xdata, event.ydata)\n # update count\n count += 1", "def update_pos(self):\n self.imgx=self.pathX[min(self.x,len(self.pathX)-1)]\\\n [min(self.y,len(self.pathX[self.x])-1)]\n self.imgy=self.pathY[min(self.x,len(self.pathY)-1)]\\\n [min(self.y,len(self.pathY[self.x])-1)]", "def on_mouse_wheel(self,event,canvas):\n canvas.yview(\"scroll\",-1*event.delta/100,\"units\")", "def odom_update(self, data):\n self.curr_pos = (data.pose.pose.position.x, data.pose.pose.position.y)", "def stop(self):\n self.change_x = 0\n self.change_y = 0", "def drag(self, event):\n self.yview('scroll', self.ypos - event.y, 'units')\n self.xview('scroll', self.xpos - event.x, 'units')\n self.ypos = event.y\n self.xpos = event.x", "def onRelease(event):\r\n global initPos\r\n initPos = None # Reset the position ready for next click\r", "def mouse_position(self):\r\n # TODO: add: Now deprecated in favor of pi3d.events\r\n if self.mouse:\r\n return self.mouse.position()\r\n elif self.tkwin:\r\n return self.tkwin.winfo_pointerxy()\r\n else:\r\n return -1, -1", "def getPreviousDataPoints(self):\n return self.dataPoints", "def move_previous_position(self, lifting=800, **kwargs):\n\n return self.move_to(\n [self.previous_xloc, self.previous_yloc, self.previous_zloc],\n True,\n lifting,\n **kwargs\n )", "def _on_scroll(self, event):", "def _prev(self, _):\n self.notebook.SetSelection(self.idx-1)", "def normal_mouse_move(self, event):\n plot = self.component\n if plot is not None:\n if isinstance(plot, BaseXYPlot):\n ndx = plot.map_index((event.x, event.y), index_only = True)\n x = plot.index.get_data()[ndx]\n y = plot.value.get_data()[ndx]\n print self.format % (x,y)\n else:\n print \"dataprinter: don't know how to handle plots of type\",\n print plot.__class__.__name__\n return", "def reset(self):\n self.t = 0.0\n self.last_t = None\n self.current_y = np.copy(self.start_y)\n self.current_yd = np.copy(self.start_yd)", "def mover_scroll(self, x, y):\n self.scrollx += x\n self.scrolly += y", "def __reconfig__(self, event):\r\n x, y = event.width//2, event.height//2\r\n self.canvas.config(scrollregion=(-x, -y, x, y))", "def handle_motion(self, x, y):\n if self.pressed_flag:\n self.last_point = (x, y)\n\n # trigger canvas to redraw itself\n self.redraw()", "def on_release(self, event):\n self.current_point = None", "def getMouse(self):\n self.mouseX = None\n self.mouseY = None\n while self.mouseX == None or self.mouseY == None:\n #self.update()\n _tkCall(self.update)\n if self.isClosed(): raise GraphicsError, \"getMouse in closed window\"\n time.sleep(.1) # give up thread\n x,y = self.toWorld(self.mouseX, self.mouseY)\n self.mouseX = None\n self.mouseY = None\n return Point(x,y)", "def set_new_location(self, xPos, yPos):", "def ev_MOUSEMOTION(self, event):", "def _updateOnMouseState(self, state):\n x = state.X.abs\n y = state.Y.abs\n \n mscale = self.mouse_icon.getScale() \n \n if (x + mscale[0] + self.mouse_offset) > render_engine.Window.width:\n x = x - mscale[0] - 10\n else:\n x += self.mouse_offset\n \n if (y + mscale[1] + self.mouse_offset) > render_engine.Window.height:\n y = y - mscale[1] - 10\n else:\n y += self.mouse_offset\n \n self.mouse_icon.setPosition((x, y))", "def on_mouse_move(self, event):\n\n # self.view = 1 * np.eye(4, dtype=np.float32)\n # self.model = 1 * np.eye(4, dtype=np.float32)\n\n # self.translate -= event.delta[1]\n # self.translate = max(-1, self.translate)\n # print(event.delta[1])\n # print(self.translate)\n # self.view = translate((0, 0, -self.translate))\n # self.game_program['u_view'] = self.view\n # self.game_program['u_size'] = 5 / self.translate\n # self.view = (0.1*self.translate*np.eye(4, dtype=np.float32)) + self.view\n # self.model = (0.1*self.translate*np.eye(4, dtype=np.float32)) + self.model\n # print(self.view)\n\n # self.game_program['u_model'] = self.model\n # self.game_program['u_view'] = self.view\n\n x, y = event.pos\n #print(x, y)\n self.x_offset, self.y_offset = x - self.last_x, - (y - self.last_y)\n self.last_x, self.last_y = x, y\n self.x_offset *= self.sensitivity\n self.y_offset *= self.sensitivity\n\n self.yaw, self.pitch = self.yaw - self.x_offset, self.pitch + self.y_offset\n self.rot_y(self.yaw * np.pi / 180)\n self.rot_x(self.pitch * np.pi / 180)\n\n self.view = np.dot(self.rot_mat_y, self.rot_mat_x)\n self.game_program['u_view'] = self.view\n\n self.update()", "def callback_handle_left_mouse_motion(self, event):\n\n # TODO: update this for the case where there is no current shape id\n vector_object = self.get_vector_object(self.variables.current_shape_id)\n if self.variables.active_tool == TOOLS.PAN_TOOL:\n x_dist = event.x - self.variables.tmp_anchor_point[0]\n y_dist = event.y - self.variables.tmp_anchor_point[1]\n self.move(self.variables.image_id, x_dist, y_dist)\n self.variables.tmp_anchor_point = event.x, event.y\n elif self.variables.active_tool == TOOLS.TRANSLATE_SHAPE_TOOL:\n x_dist = event.x - self.variables.tmp_anchor_point[0]\n y_dist = event.y - self.variables.tmp_anchor_point[1]\n t_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n new_coords = numpy.asarray(t_coords) + x_dist\n new_coords_y = numpy.asarray(t_coords) + y_dist\n new_coords[1::2] = new_coords_y[1::2]\n if vector_object.image_drag_limits:\n canvas_limits = self.image_coords_to_canvas_coords(vector_object.image_drag_limits)\n x_vertices = new_coords[0::2]\n y_vertices = new_coords[1::2]\n within_x_limits = True\n within_y_limits = True\n for x_vertex in x_vertices:\n if canvas_limits[2] < x_vertex or x_vertex < canvas_limits[0]:\n within_x_limits = False\n for y_vertex in y_vertices:\n if y_vertex < canvas_limits[1] or y_vertex > canvas_limits[3]:\n within_y_limits = False\n if not within_x_limits:\n new_coords[0::2] = t_coords[0::2]\n if not within_y_limits:\n new_coords[1::2] = t_coords[1::2]\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id,\n new_coords,\n update_pixel_coords=True)\n self.variables.tmp_anchor_point = event.x, event.y\n elif self.variables.active_tool == TOOLS.EDIT_SHAPE_COORDS_TOOL:\n previous_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n coord_x_index = self.variables.tmp_closest_coord_index*2\n coord_y_index = coord_x_index + 1\n new_coords = list(previous_coords)\n new_coords[coord_x_index] = event.x\n new_coords[coord_y_index] = event.y\n if vector_object.image_drag_limits:\n drag_x_lim_1, drag_y_lim_1, drag_x_lim_2, drag_y_lim_2 = \\\n self.image_coords_to_canvas_coords(vector_object.image_drag_limits)\n if new_coords[coord_x_index] < drag_x_lim_1:\n new_coords[coord_x_index] = drag_x_lim_1\n if new_coords[coord_x_index] > drag_x_lim_2:\n new_coords[coord_x_index] = drag_x_lim_2\n if new_coords[coord_y_index] < drag_y_lim_1:\n new_coords[coord_y_index] = drag_y_lim_1\n if new_coords[coord_y_index] > drag_y_lim_2:\n new_coords[coord_y_index] = drag_y_lim_2\n\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, tuple(new_coords))\n elif self.variables.active_tool == TOOLS.ZOOM_IN_TOOL:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.ZOOM_OUT_TOOL:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.SELECT_TOOL:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_ELLIPSE_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_LINE_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_POINT_BY_CLICKING:\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (event.x, event.y))", "def remember_pos(self, directory, position):\n self.dir_pos[directory] = position", "def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)", "def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)", "def history_buffer_pos_changed():\n # Only when this buffer has the focus.\n if buffer_mapping.focus_stack[-1] == HISTORY_BUFFER:\n line_no = history_buffer.document.cursor_position_row\n\n if line_no in history_mapping.selected_lines:\n default_lineno = sorted(history_mapping.selected_lines).index(line_no) + \\\n history_mapping.result_line_offset\n\n default_buffer.cursor_position = \\\n default_buffer.document.translate_row_col_to_index(default_lineno, 0)", "def currentstate_callback(self, odom):\n self.CurrentPosition = np.array([odom.pose.pose.position.x, odom.pose.pose.position.y, odom.pose.pose.position.z])\n self.CurrentVelocity = np.array([odom.twist.twist.linear.x, odom.twist.twist.linear.y, odom.twist.twist.linear.z])", "def reset_position(self):\n self.rect.left, self.rect.top = self.start_pos", "def undo(self):\n if self.history:\n xy0, xy1, data_size = self.history.pop()\n x0, y0 = xy0\n x1, y1 = xy1\n self._used[y1][x1] -= data_size\n self._used[y0][x0] = data_size\n if self.goal == xy1:\n self.goal = xy0", "def remember(self, grid_x, grid_y, obj):\n self.memory[grid_x][grid_y] = obj", "def update(self):\n self.x = games.mouse.x\n #self.y = games.mouse.y\n self.check_collide()", "def get_mouse_pos(new_x_coord, new_y_coord):\n\n x_change = 0\n y_change = 0\n \n # if the joystick returned to its default position (0,0), stop mouse movement\n if not (new_x_coord == 0 and new_y_coord == 0):\n if new_x_coord == 0:\n x_change = 0\n else:\n x_change = new_x_coord\n\n if new_y_coord == 0:\n y_change = 0\n else:\n y_change = -new_y_coord\n \n return (int(x_change), int(y_change))", "def __window_scrollTo(self, x, y):\n pass", "def mouseMoveEvent(self, e):\n if e.pos().y() == self.offset:\n return\n adder = (self.offset - e.y())\n self.deltacount += adder\n #adder *= self.accelerator\n adder *= (abs(adder) * 0.01)\n #self._state[0] = max(self._min[0], min(self._max[0], self._state[0] + adder))\n QtGui.qApp.emit( QtCore.SIGNAL(\"deltaChanged\"), self, adder)\n #self._param.update()\n QtGui.QCursor.setPos(self.origo)", "def updateFirstPoint(self):\n x, y = self.machine.plot.dataToPixel(*self._firstPos, check=False)\n\n offset = self.machine.getDragThreshold()\n points = [(x - offset, y - offset),\n (x - offset, y + offset),\n (x + offset, y + offset),\n (x + offset, y - offset)]\n points = [self.machine.plot.pixelToData(xpix, ypix, check=False)\n for xpix, ypix in points]\n self.machine.setSelectionArea(points, fill=None,\n color=self.machine.color,\n name='first_point')", "def _update_coords(self):\n x, y = self._head_coord.x, self._head_coord.y\n dx, dy = 0, 0\n if self._rotation == Rotation.HORIZONTAL:\n dx = 1\n elif self._rotation == Rotation.VERTICAL:\n dy = 1\n else:\n raise RuntimeError(\"unknown rotation parameter\")\n self._coords = frozenset(Coordinate(x + dx * i, y + dy * i) for i in range(self._size))", "def on_mousewheel(self, event):\r\n self.container_widgets[\"order_canvas\"].yview_scroll(-1 * int(event.delta / 120), \"units\")\r\n # TODO FIX SCROLLING\r", "def mouseReleaseEvent(self, ev):\n super(PlotObject, self).mouseReleaseEvent(ev)\n if self._downpos == ev.pos():\n x = ev.pos().x()\n y = ev.pos().y()\n if ev.button() == 2 :\n self.mPosition()\n elif ev.button() == 1:\n x = x - self.width() / 2\n y = y - self.height() / 2\n #self.pan(-x, -y, 0, relative=True)\n print(self.opts['center'])\n print(x,y)\n self._prev_zoom_pos = None\n self._prev_pan_pos = None", "def figure_mouse_press(self, event):\n \n # Add/remove an additional point?\n if event.dblclick:\n\n if event.button == 1:\n # Add a point.\n points = np.vstack([\n self.ax_order.collections[0].get_offsets(),\n [event.xdata, event.ydata]\n ])\n # TODO: set size by their weight?\n self.ax_order.collections[0].set_offsets(points)\n\n else:\n # Are we within <tolerance of a point?\n points = self.ax_order.collections[0].get_offsets()\n\n # Need to scale x-distance to convert to pixels.\n idx = self.current_order.dispersion.searchsorted(event.xdata)\n xscale = np.nanmean(\n np.diff(self.current_order.dispersion[idx-5:idx+5]))\n\n \"\"\"\n bbox = self.ax_order.get_window_extent().transformed(\n self.norm_plot.dpi_scale_trans.inverted())\n width = bbox.width * self.norm_plot.dpi\n height = bbox.height * self.norm_plot.dpi\n print(width, height)\n \"\"\"\n # TODO: Fix this distance thing.\n\n distance = np.sqrt(\n ((points[:, 0] - event.xdata)/xscale)**2 \\\n + (points[:, 1] - event.ydata)**2)\n \n if distance.size > 0:\n\n index = np.argmin(distance)\n if distance[index] < PIXEL_PICKER_TOLERANCE:\n # Remove that point.\n keep = np.ones(points.shape[0], dtype=bool)\n keep[index] = False\n self.ax_order.collections[0].set_offsets(points[keep])\n\n else:\n print(\"Closest point {} px away\".format(distance[index]))\n\n # Update the cache.\n idx = self.current_order_index\n N = points.shape[0]\n # TODO: adhere to the knot weights\n self._cache[\"input\"][\"additional_points\"] \\\n = np.hstack((points, 100 * np.ones(N).reshape((N, 1))))\n self.fit_continuum(clobber=True)\n self.draw_continuum(refresh=True)\n\n return None\n \n if event.button != 1: return None\n # Single click.\n # Set up/update the excluded region.\n xmin, xmax, ymin, ymax = (event.xdata, np.nan, -1e8, +1e8)\n try:\n self._exclude_selected_region\n except AttributeError:\n self._exclude_selected_region = self.ax_order.axvspan(**{\n \"xmin\": xmin,\n \"xmax\": xmax,\n \"ymin\": ymin,\n \"ymax\": ymax,\n \"facecolor\": \"r\",\n \"edgecolor\": \"none\",\n \"alpha\": 0.25,\n \"zorder\": -1\n })\n\n else:\n self._exclude_selected_region.set_xy([\n [xmin, ymin],\n [xmin, ymax],\n [xmax, ymax],\n [xmax, ymin],\n [xmin, ymin]\n ])\n\n # Set the signal and the time.\n self._exclude_selected_region_signal = (\n time(),\n self.norm_plot.mpl_connect(\n \"motion_notify_event\", self.update_exclude_selected_region)\n )\n return None", "def reset(self):\n self.x_prev = np.zeros_like(self.mu)", "def pos_updated(self,next_pos):\n #if (int(self.oldx) == int(self.x) and int(self.oldy) == int(self.y)):\n if (int(next_pos[0]) == int(self.x) and int(next_pos[1]) == int(self.y)):\n return False\n else:\n return True", "def default_buffer_pos_changed():\n # Only when this buffer has the focus.\n if buffer_mapping.focus_stack[-1] == DEFAULT_BUFFER:\n try:\n line_no = default_buffer.document.cursor_position_row - \\\n history_mapping.result_line_offset\n\n if line_no < 0: # When the cursor is above the inserted region.\n raise IndexError\n\n history_lineno = sorted(history_mapping.selected_lines)[line_no]\n except IndexError:\n pass\n else:\n history_buffer.cursor_position = \\\n history_buffer.document.translate_row_col_to_index(history_lineno, 0)", "def move_start(event):\n nonlocal x, y\n x = event.x \n y = event.y\n window['cursor'] = utils.CURSORS['move_item']", "def on_position_change(self) -> None:\n pass", "def mouse_position_event(self, x: int, y: int):\n pass", "def update(self):\n self.pos_x -=1", "def reset(self):\n self._x = 0\n self._y = 0", "def _move_cursors_to_pos(self):\n for axis in range(3):\n x, y = self._vox[list(self._xy_idx[axis])]\n self._images['cursor_v'][axis].set_xdata([x, x])\n self._images['cursor_h'][axis].set_ydata([y, y])\n self._zoom(0) # doesn't actually zoom just resets view to center\n self._update_images(draw=True)\n self._update_moved()", "def calc_positions(self) :\n\t\tx, y = self.x0, self.y0\n\n\t\twhile self.is_visible(x, y) :\n\t\t\tx = 0.5 * self.gx * self.t**2 + self.vx0 * self.t + self.x0\n\t\t\ty = 0.5 * self.gy * self.t**2 + self.vy0 * self.t + self.y0\n\t\t\t\n\t\t\tself.t += self.dt\n\t\t\tself.pos_x.append(x)\n\t\t\tself.pos_y.append(y)", "def reset_position(self):\n self.set_position(copy.deepcopy(self.ab_pos))", "def reset_coords(self):\n library.MagickResetImagePage(self.wand, None)", "def clear(self):\n self._x_prev = None\n self._y_prev = None", "def _on_mousewheel(event):\n if event.num == 4 or event.delta > 0:\n canvas.yview_scroll(-1, \"units\" )\n elif event.num == 5 or event.delta < 0:\n canvas.yview_scroll(1, \"units\" )", "def _update_coords(self, change=None):\n if self.node_id:\n x, y = self.layout[self.node_id]\n self.coords = (x - self.dist, x + self.dist, y - self.dist, y + self.dist)", "def ghost_points(self):\n return self.central.loffset, self.central.roffset", "def current_desired_pos(event):\n #get the current desired position from /desired_pos (formerly /pos_for_IK)\n #f_state.desired_pos = deepcopy(event.data)\n #switch to force feedback state when command is received from /desired_pos\n f_state.desired_pos[0] = event.data[0]\n f_state.desired_pos[1] = event.data[1]\n f_state.desired_pos[2] = event.data[2]", "def GetPosition(self):\n ...", "def move_to_position1(self):", "def restore_last_undo_point(self):\n self.unload()", "def _onmove(self, event):", "def follow_mouse(self, mouse):\n half_width = self.width() / 2\n self.left = mouse.get_x() - half_width\n self.right = mouse.get_x() + half_width", "def update_location(self):\n if self.simulation:\n return (self.y, self.x)\n else:\n raise NotImplementedError\n\n self.y = new_y\n self.x = new_x\n\n return (new_y, new_x)", "def updateposition(self, event):\n\n self.log('Updating position', lvl=verbose)\n self.current_position = event.vessel.geojson['coordinates']", "def update(self):\n pos = pygame.mouse.get_pos()\n self.rect.midtop = pos\n if self.punching:\n self.rect.move_ip(5, 10) # move fist position in place", "def update(self):\r\n self.x = games.mouse.x\r\n self.y = games.mouse.y\r\n self.check_collide()", "def reset(self):\n self.xview_moveto(0)\n self.yview_moveto(0)\n self.zoomMap(1, 0, 0)", "def change_pos(self, direction):\n if direction == Direction.UP:\n self._y_pos -= 1\n elif direction == Direction.DOWN:\n self._y_pos += 1\n elif direction == Direction.LEFT:\n self._x_pos -= 1\n elif direction == Direction.RIGHT:\n self._x_pos += 1\n self._coordinates = self.coordinates()", "def getMouse(self):\n self.update() # flush any prior clicks\n self.mouseX = None\n self.mouseY = None\n while self.mouseX == None or self.mouseY == None:\n self.update()\n if self.isClosed(): raise GraphicsError(\"getMouse in closed window\")\n time.sleep(.1) # give up thread\n x,y = self.mouseX, self.mouseY\n self.mouseX = None\n self.mouseY = None\n return Point(x,y)", "def mouse_move(self, obj, event):\n last_pos = self.iren.GetLastEventPosition()\n next_pos = self.iren.GetEventPosition()\n last_disp_coords = np.asarray([last_pos[0], last_pos[1], 0])\n next_disp_coords = np.asarray([next_pos[0], next_pos[1], 0])\n last_world_coords = self.display_to_world(last_disp_coords)\n next_world_coords = self.display_to_world(next_disp_coords)\n world_direction = (last_world_coords - next_world_coords)[0]\n\n if world_direction > 0:\n direction = 'forwards'\n elif world_direction < 0:\n direction = 'backwards'\n else:\n direction = 'none'\n\n if self.cone_dir == 'start':\n if direction == 'backwards':\n self.start_base_x += .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n isvalid = self.gaps.set_dragged_start(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.start_base_x -= .5\n return\n\n elif direction == 'forwards':\n if self.start_base_x > 0:\n self.start_base_x -= .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n self.gaps.set_dragged_start(ind)\n self.ren_win.Render()\n\n if self.cone_dir == 'end':\n if direction == 'backwards':\n if self.end_base_x > 0:\n self.end_base_x -= .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n self.gaps.set_dragged_end(ind)\n self.ren_win.Render()\n\n elif direction == 'forwards':\n self.end_base_x += .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n isvalid = self.gaps.set_dragged_end(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.end_base_x -= .5\n return" ]
[ "0.68040586", "0.66060555", "0.65336144", "0.647154", "0.61659133", "0.61659133", "0.61084694", "0.60922366", "0.60622627", "0.60567003", "0.5945194", "0.59199005", "0.5912067", "0.5889155", "0.58295685", "0.5828022", "0.5808109", "0.57255214", "0.5721227", "0.5708124", "0.57071894", "0.5696546", "0.5690835", "0.5690835", "0.56868744", "0.567172", "0.5661678", "0.5655979", "0.5649716", "0.5638312", "0.5624212", "0.55987144", "0.5595688", "0.5590619", "0.5564057", "0.55547553", "0.5552436", "0.55507004", "0.5529923", "0.5500046", "0.5498026", "0.5494181", "0.5494116", "0.5483656", "0.54804647", "0.5469848", "0.54693", "0.54681516", "0.54622203", "0.54579645", "0.54408276", "0.5440206", "0.5439931", "0.5438056", "0.5434759", "0.5433757", "0.5433757", "0.54313093", "0.5425335", "0.54183537", "0.5413376", "0.54132", "0.54101634", "0.5404141", "0.5400514", "0.53895056", "0.5366954", "0.5362232", "0.5355329", "0.53462124", "0.53411716", "0.5341061", "0.53369254", "0.5331762", "0.53309655", "0.5327976", "0.532725", "0.5326113", "0.53219557", "0.5319572", "0.5318291", "0.53161484", "0.5313101", "0.53129303", "0.53120536", "0.5307425", "0.5292512", "0.5284289", "0.5281508", "0.5275114", "0.5273992", "0.5269741", "0.5267926", "0.5267819", "0.5266811", "0.5249281", "0.5247539", "0.5244938", "0.5242751", "0.52415216", "0.52369773" ]
0.0
-1
Drag (move) canvas to the new position
def __move_to(self, event): self.canvas_image.scan_dragto(event.x, event.y, gain=1) self.to_coord = (event.x, event.y) self.__show_image() # zoom tile and show it on the canvas
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_move(self, event):\n self.canvas.scan_dragto(event.x, event.y, gain=1)", "def drag(self,x,y):\n self.x=x\n self.y=y", "def drag(self, event):\n self.yview('scroll', self.ypos - event.y, 'units')\n self.xview('scroll', self.xpos - event.x, 'units')\n self.ypos = event.y\n self.xpos = event.x", "def motion(self, event):\n dx = event.x - self.dragx\n dy = event.y - self.dragy\n\n self.dragx = event.x\n self.dragy = event.y\n\n self.canvas.move(self.tags, dx, dy)\n self.diag.update_arrows()", "def event_drag_line(self, event):\n\n if self.variables.current_shape_id:\n self.show_shape(self.variables.current_shape_id)\n event_x_pos = self.canvasx(event.x)\n event_y_pos = self.canvasy(event.y)\n if self.get_vector_object(self.variables.current_shape_id).image_drag_limits:\n drag_lims = self.get_vector_object(self.variables.current_shape_id).image_drag_limits\n canvas_lims = self.image_coords_to_canvas_coords(drag_lims)\n if event_x_pos < canvas_lims[0]:\n event_x_pos = canvas_lims[0]\n elif event_x_pos > canvas_lims[2]:\n event_x_pos = canvas_lims[2]\n if event_y_pos < canvas_lims[1]:\n event_y_pos = canvas_lims[1]\n elif event_y_pos > canvas_lims[3]:\n event_y_pos = canvas_lims[3]\n self.modify_existing_shape_using_canvas_coords(\n self.variables.current_shape_id,\n (self.variables.current_shape_canvas_anchor_point_xy[0],\n self.variables.current_shape_canvas_anchor_point_xy[1],\n event_x_pos, event_y_pos))", "def move(self, dx, dy):\n \n self._move(dx,dy)\n canvas = self.canvas\n if canvas and not canvas.isClosed():\n trans = canvas.trans\n if trans:\n x = dx/ trans.xscale \n y = -dy / trans.yscale\n else:\n x = dx\n y = dy\n #self.canvas.move(self.id, x, y)\n _tkExec(self.canvas.move, self.id, x, y)\n if canvas.autoflush:\n #_root.update()\n _tkCall(_root.update)", "def update():\n global dragon, x, y, position, angle_left, angle_right, size, new\n x, y, position, angle_left, angle_right, new = modify_pos(x, y, position,\n angle_left,\n angle_right,\n size, new)\n dragon.setData(x, y) # update plot", "def mouse_move(self, pos):\n if (self.setup_type == \"position\"):\n x, y = pos\n self.canvas.move(x, y)", "def on_dragg(self, event):\n if str(event.lastevent.button) == \"MouseButton.LEFT\":\n mX = event.xdata\n mY = event.ydata\n if mX and mY:\n if self.current_point is not None:\n self.x[self.current_point] = mX\n self.y[self.current_point] = mY\n self.redraw()", "def drag(self, event):\n if event.button:\n try:\n x_loc, y_loc = self.appWindow.spec_cv.mouse(event)\n print(x_loc, y_loc)\n trackNo, updated_track =\\\n self.model.updateTrackDrag(x_loc, y_loc,\\\n self.locked_track, self.x_high)\n self.appWindow.spec_cv.updateTrack(trackNo, updated_track)\n self.appWindow.spec_cv.redrawTracks()\n except TypeError:\n pass", "def drag_motion(self, widget, context, x, y, t):\n \n if self.mouse_click_point:\n self.dy = y - self.mouse_click_point\n else:\n self.mouse_click_point = y", "def onMove(event):\r\n global initPos\r\n global rect\r\n if initPos is None:\r\n return# If you haven't clicked recently, we ignore the event\r\n\r\n if event.inaxes == None:\r\n return# ignore movement outside the axes\r\n\r\n x = initPos[2]\r\n y = initPos[3]\r\n dx = event.xdata - initPos[2]\r\n dy = event.ydata - initPos[3]\r\n # This code does the actual move of the rectangle\r\n rect.set_x(initPos[0] + dx)\r\n rect.set_y(initPos[1] + dy)\r\n\r\n rect.figure.canvas.draw()", "def __move_from(self, event):\n self.canvas_image.scan_mark(event.x, event.y)\n self.from_coord = (event.x, event.y)", "def drag(self, event):\n\t\tif len(self.coord_list) > 0:\n\t\t\tself.canvas.create_line(event.x, event.y, \n\t\t\t\tself.coord_list[-1][0], self.coord_list[-1][1])\n\n\t\tself.coord_list.append([event.x, event.y])\n\n\t\tpoly_list = check_contained(self.coord_list) - self.drawn_list\n\t\tfor polygon in poly_list:\t\t\t# will accidently draw this multilple times oops \n\t\t\t#self.canvas.create_polygon( self.coord_list[polygon[0]:polygon[1]], fill='black')\n\t\t\tself.drawn_list.add(polygon)", "def move(self, x_change, y_change):\n self.rect.x += x_change\n self.rect.y += y_change", "def do_move(self, dx, dy):\n self.rect.move_ip(dx, dy)", "def move_start(self, event):\n self.canvas.scan_mark(event.x, event.y)", "def move(self, dx, dy):\r\n self.corner.x += dx\r\n self.corner.y += dy", "def move(self, dx, dy):\n self.corner.x += dx\n self.corner.y += dy", "def move(self, dx, dy):\n self.corner.x += dx\n self.corner.y += dy", "def move(self, dx, dy):\n self.corner.x += dx\n self.corner.y += dy", "def move(self):\r\n min_x = self.__screen.SCREEN_MIN_X\r\n min_y = self.__screen.SCREEN_MIN_Y\r\n delta_x = self.__screen.SCREEN_MAX_X - min_x\r\n delta_y = self.__screen.SCREEN_MAX_Y - min_y\r\n\r\n # new location formula according to pdf.\r\n new_x = (self.__x_speed + self.__x - min_x) % delta_x + min_x\r\n new_y = (self.__y_speed + self.__y - min_y) % delta_y + min_y\r\n self.__x, self.__y = new_x, new_y", "def mouseDragged(self, point, delta):\n pass", "def move(self):\n \n self.position = self.explore()", "def on_mouse_move(self, event):\n if event.is_dragging and event.buttons[0] == 1:\n x0, y0 = event.last_event.pos[0], event.last_event.pos[1]\n x1, y1 = event.pos[0], event.pos[1]\n X0, Y0, Z0 = self.pixel_to_coords(float(x0), float(y0))\n X1, Y1, Z1 = self.pixel_to_coords(float(x1), float(y1))\n self.translate_center(X1 - X0, Y1 - Y0, Z1 - Z0)", "def event_click_line(self, event):\n event_x_pos = self.canvasx(event.x)\n event_y_pos = self.canvasy(event.y)\n if self.get_vector_object(self.variables.current_shape_id).image_drag_limits:\n drag_lims = self.get_vector_object(self.variables.current_shape_id).image_drag_limits\n canvas_lims = self.image_coords_to_canvas_coords(drag_lims)\n if event_x_pos < canvas_lims[0]:\n event_x_pos = canvas_lims[0]\n elif event_x_pos > canvas_lims[2]:\n event_x_pos = canvas_lims[2]\n if event_y_pos < canvas_lims[1]:\n event_y_pos = canvas_lims[1]\n elif event_y_pos > canvas_lims[3]:\n event_y_pos = canvas_lims[3]\n if self.variables.actively_drawing_shape:\n old_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n new_coords = tuple(list(old_coords) + [event_x_pos, event_y_pos])\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n else:\n new_coords = (event_x_pos, event_y_pos, event_x_pos + 1, event_y_pos + 1)\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n self.variables.actively_drawing_shape = True", "def down(self, event):\n self.dragx = event.x\n self.dragy = event.y\n self.canvas.bind(\"<B1-Motion>\", self.motion)\n self.canvas.bind(\"<ButtonRelease-1>\", self.up)\n return True", "def move(self, x, y):\r\n if self.brush_on:\r\n for lx, ly in line(self.pos_x, self.pos_y, x, y):\r\n self.set(lx, ly)\r\n\r\n self.pos_x = x\r\n self.pos_y = y", "def move(self, pos):\n self.widget.move(*pos)", "def move_start(event):\n nonlocal x, y\n x = event.x \n y = event.y\n window['cursor'] = utils.CURSORS['move_item']", "def button_press_cb(self, darea, event):\n x, y = event.x, event.y\n self.draw_pointer(self.cr, x, y)\n self.queue_draw()\n self.oldx, self.oldy = x, y\n rel_x, rel_y = self.absolute_to_relative(x, y)\n self.emit('dnd-value', rel_x, rel_y)\n self.emit('start-dnd')\n return True", "def drag_coordinates(self, client, *args):\r\n x_coord1, y_coord1, x_coord2, y_coord2, sec = args\r\n client.dragCoordinates2(x_coord1, y_coord1, x_coord2, y_coord2, sec)", "def _animation_move_tick(self, diff_x, diff_y):\r\n self.root.after(self._MOVE_ANIMATION_DELAY, self.canvas.move(self.item, diff_x, diff_y))\r\n self.canvas.update()", "def update_position(self, canvas):\n if self.x <= 0:\n if self.direction == \"SW\":\n self.direction = \"SE\"\n if self.direction == \"W\":\n self.direction = \"E\"\n if self.direction == \"NW\":\n self.direction = \"NE\"\n if self.x >= canvas.width:\n if self.direction == \"SE\":\n self.direction = \"SW\"\n if self.direction == \"E\":\n self.direction = \"W\"\n if self.direction == \"NE\":\n self.direction = \"NW\"\n if self.y <= 0:\n if self.direction == \"NW\":\n self.direction = \"SW\"\n if self.direction == \"N\":\n self.direction = \"S\"\n if self.direction == \"NE\":\n self.direction = \"SE\"\n if self.y >= canvas.height:\n if self.direction == \"SW\":\n self.direction = \"NW\"\n if self.direction == \"S\":\n self.direction = \"N\"\n if self.direction == \"SE\":\n self.direction = \"NE\"\n if self.direction == \"N\":\n self.y -= 1\n if self.direction == \"NE\":\n self.y -= 1\n self.x += 1\n if self.direction == \"E\":\n self.x += 1\n if self.direction == \"SE\":\n self.x += 1\n self.y += 1\n if self.direction == \"S\":\n self.y += 1\n if self.direction == \"SW\":\n self.x -= 1\n self.y += 1\n if self.direction == \"W\":\n self.x -= 1\n if self.direction == \"NW\":\n self.y -= 1\n self.x -= 1", "def move(self, xmove, ymove):\n # convert units to pixels\n xmove = units.parse_dist(xmove,\n ppi=self.ppi,\n default_unit=\"px\",\n canvassize=[self.width,self.height])\n ymove = units.parse_dist(ymove,\n ppi=self.ppi,\n default_unit=\"px\",\n canvassize=[self.width,self.height])\n # paste self on blank at offset pixel coords\n self.drawer.flush()\n blank = PIL.Image.new(self.img.mode, self.img.size, None)\n blank.paste(self.img, (xmove, ymove))\n self.img = blank\n # similarly move the drawing transform\n # by converting pixels to coord distances\n xmove,ymove = self.pixel2coord_dist(xmove, ymove)\n orig = affine.Affine(*self.coordspace_transform)\n moved = orig * affine.Affine.translate(xmove,ymove)\n self.drawer = aggdraw.Draw(self.img)\n self.drawer.settransform(moved.coefficients)\n # remember the new coordinate extents and affine matrix\n self.coordspace_transform = moved.coefficients\n # offset bbox\n x1,y1,x2,y2 = 0,0,self.width,self.height\n x1,y1 = self.pixel2coord(x1, y1)\n x2,y2 = self.pixel2coord(x2, y2)\n self.coordspace_bbox = [x1,y1,x2,y2]\n return self", "def mouse_move(self, obj, event):\n last_pos = self.iren.GetLastEventPosition()\n next_pos = self.iren.GetEventPosition()\n last_disp_coords = np.asarray([last_pos[0], last_pos[1], 0])\n next_disp_coords = np.asarray([next_pos[0], next_pos[1], 0])\n last_world_coords = self.display_to_world(last_disp_coords)\n next_world_coords = self.display_to_world(next_disp_coords)\n world_direction = (last_world_coords - next_world_coords)[0]\n\n if world_direction > 0:\n direction = 'forwards'\n elif world_direction < 0:\n direction = 'backwards'\n else:\n direction = 'none'\n\n if self.cone_dir == 'start':\n if direction == 'backwards':\n self.start_base_x += .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n isvalid = self.gaps.set_dragged_start(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.start_base_x -= .5\n return\n\n elif direction == 'forwards':\n if self.start_base_x > 0:\n self.start_base_x -= .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n self.gaps.set_dragged_start(ind)\n self.ren_win.Render()\n\n if self.cone_dir == 'end':\n if direction == 'backwards':\n if self.end_base_x > 0:\n self.end_base_x -= .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n self.gaps.set_dragged_end(ind)\n self.ren_win.Render()\n\n elif direction == 'forwards':\n self.end_base_x += .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n isvalid = self.gaps.set_dragged_end(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.end_base_x -= .5\n return", "def on_mouse_move(self, event):\n self.mouse = [event.xdata, event.ydata]\n\n # Update pan view on mouse move\n if self.panning is True:\n for a in self.pan_axes:\n a.drag_pan(1, event.key, event.x, event.y)\n\n # Async re-draw (redraws only on thread idle state, uses timer on backend)\n self.canvas.draw_idle()\n\n ##### Temporary place-holder for cached update #####\n self.update_screen_request.emit([0, 0, 0, 0, 0])", "def move(self,x,y):\n self.pos.x = x\n self.pos.y = y", "def moveDown(self,event):\n oldCoords=[self.xPos,self.yPos]\n \n self.yPos= self.yPos+1 #modify the coordiantes\n \n deltaCoords=[self.xPos-oldCoords[0],self.yPos-oldCoords[1]]\n self.canvasIGetDrawnOn.move(self.sprite,*deltaCoords)", "def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)", "def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)", "def button_release_cb(self, darea, event):\n self.oldx, self.oldy = event.x, event.y\n self.draw_pointer(self.cr, None, None)\n self.queue_draw()\n self.oldx, self.oldy = None, None\n self.emit('end-dnd')\n return True", "def _move(self):\n self.pos += self.direction # add direction vector\n self.direction += self.gravity # add gravity to direction\n self.direction = self.direction.elementwise() * self.drag # apply drag to direction", "def up():\n global y, canvas # y é modificado\n canvas.create_line(x, y, x, y - 10)\n y -= 10", "def move (self):\n\t\tself.x += self.direction[0]\n\t\tself.y += self.direction[1]\n\t\tself.row = (self.y - 15) / 30\n\t\tself.col = (self.x - 15) / 30\n\t\tself.rowcol = (self.row,self.col)\n\t\tself.draw()", "def _onmove(self, event):", "def move(self, dx, dy):\n self.x += dx\n self.y += dy", "def move(self, x, y):\r\n self.rect_thumb.move_ip(x,y)", "def move(self):\n \n self.position = self.wander()", "def move(event):\r\n\t\tif event.char == \"a\":\r\n\t\t\tcanvas.move(z[a], -10, 0)\r\n\t\telif event.char == \"d\":\r\n\t\t\tcanvas.move(z[a], 10, 0)\r\n\t\telif event.char == \"w\":\r\n\t\t\tcanvas.move(z[a], 0, -10)\r\n\t\telif event.char == \"s\":\r\n\t\t\tcanvas.move(z[a], 0, 10)", "def move(self):\n if self.x_pos < const.screenwidth:\n self.x_pos += 1\n self.x_pos = self.x_pos\n\n self.draw()\n return", "def drag(self, x, y, btn):\n if self._doPan:\n return self._pan.drag(x, y, btn)\n else:\n return super(PanAndSelect, self).drag(x, y, btn)", "def move_draught_begin(event):\n global red_draughts, white_draughts\n global board_array\n global click_offset\n global old_point\n draught = board.find_withtag(CURRENT)[0]\n click_offset = [event.x-board.coords(draught)[0],event.y-board.coords(draught)[1]] #How far off the click is from the coordinates of the draught it's moving\n bottom = (event.y-click_offset[1] >= board_height//2)\n point_left_edges = [board_divisions*i for i in xrange(0,15) if i != 7]\n if bottom == False:\n if(event.x-click_offset[0] == 7*board_divisions): #If on the white bar\n old_point = 25\n else:\n old_point = 12+point_left_edges.index(min(point_left_edges, key=lambda x:abs(x-event.x+click_offset[0])))\n else:\n if(event.x-click_offset[0] == 7*board_divisions): #If on the red bar\n old_point = 0\n else:\n old_point = 13-point_left_edges.index(min(point_left_edges, key=lambda x:abs(x-event.x+click_offset[0])))", "def update(self):\n self.rect.x += self.change_x\n self.rect.y += self.change_y", "def update(self):\n self.rect.x += self.change_x\n self.rect.y += self.change_y", "def moveCursor(self):\n\n\t\tself._before = self.rect.center\n\t\tself.rect.center = self._pos", "def move(self) -> None:\n\n new_x = self.getX() + self.speed[0]\n new_y = self.getY() + self.speed[1]\n self.setX(new_x)\n self.setY(new_y)", "def callback_handle_left_mouse_motion(self, event):\n\n # TODO: update this for the case where there is no current shape id\n vector_object = self.get_vector_object(self.variables.current_shape_id)\n if self.variables.active_tool == TOOLS.PAN_TOOL:\n x_dist = event.x - self.variables.tmp_anchor_point[0]\n y_dist = event.y - self.variables.tmp_anchor_point[1]\n self.move(self.variables.image_id, x_dist, y_dist)\n self.variables.tmp_anchor_point = event.x, event.y\n elif self.variables.active_tool == TOOLS.TRANSLATE_SHAPE_TOOL:\n x_dist = event.x - self.variables.tmp_anchor_point[0]\n y_dist = event.y - self.variables.tmp_anchor_point[1]\n t_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n new_coords = numpy.asarray(t_coords) + x_dist\n new_coords_y = numpy.asarray(t_coords) + y_dist\n new_coords[1::2] = new_coords_y[1::2]\n if vector_object.image_drag_limits:\n canvas_limits = self.image_coords_to_canvas_coords(vector_object.image_drag_limits)\n x_vertices = new_coords[0::2]\n y_vertices = new_coords[1::2]\n within_x_limits = True\n within_y_limits = True\n for x_vertex in x_vertices:\n if canvas_limits[2] < x_vertex or x_vertex < canvas_limits[0]:\n within_x_limits = False\n for y_vertex in y_vertices:\n if y_vertex < canvas_limits[1] or y_vertex > canvas_limits[3]:\n within_y_limits = False\n if not within_x_limits:\n new_coords[0::2] = t_coords[0::2]\n if not within_y_limits:\n new_coords[1::2] = t_coords[1::2]\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id,\n new_coords,\n update_pixel_coords=True)\n self.variables.tmp_anchor_point = event.x, event.y\n elif self.variables.active_tool == TOOLS.EDIT_SHAPE_COORDS_TOOL:\n previous_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n coord_x_index = self.variables.tmp_closest_coord_index*2\n coord_y_index = coord_x_index + 1\n new_coords = list(previous_coords)\n new_coords[coord_x_index] = event.x\n new_coords[coord_y_index] = event.y\n if vector_object.image_drag_limits:\n drag_x_lim_1, drag_y_lim_1, drag_x_lim_2, drag_y_lim_2 = \\\n self.image_coords_to_canvas_coords(vector_object.image_drag_limits)\n if new_coords[coord_x_index] < drag_x_lim_1:\n new_coords[coord_x_index] = drag_x_lim_1\n if new_coords[coord_x_index] > drag_x_lim_2:\n new_coords[coord_x_index] = drag_x_lim_2\n if new_coords[coord_y_index] < drag_y_lim_1:\n new_coords[coord_y_index] = drag_y_lim_1\n if new_coords[coord_y_index] > drag_y_lim_2:\n new_coords[coord_y_index] = drag_y_lim_2\n\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, tuple(new_coords))\n elif self.variables.active_tool == TOOLS.ZOOM_IN_TOOL:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.ZOOM_OUT_TOOL:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.SELECT_TOOL:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_ELLIPSE_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_LINE_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_POINT_BY_CLICKING:\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (event.x, event.y))", "def mouse_release(self,event):\n global drag_sq\n if drag_sq != -1:\n# dst_sq = (event.y // sq_size) * 8+ (event.x // sq_size)\n dst_sq = self.coord_to_sq((event.x, event.y))\n \n m = Move(drag_sq, dst_sq)\n m.set_from_user() # this is input from user (not file)\n \n if not self.on_move_piece(m):\n # Withdraw the piece to original spot\n obj = self.piece_objs[drag_sq]\n \n self.canvas.coords(obj, \n self.sq_to_coord(drag_sq))\n# ((drag_sq%8)*sq_size, (drag_sq//8)*sq_size))\n drag_sq = -1\n return", "def handle_motion(self, x, y):\n if self.pressed_flag:\n self.last_point = (x, y)\n\n # trigger canvas to redraw itself\n self.redraw()", "def move(self, center):\n\t\t#print \"made it\"\n\t\tself.rect = self.rect.move(center)", "def down():\n global y, canvas # y é modificado\n canvas.create_line(x, y, x, y + 10)\n y += 10", "def move_draught(event):\n global red_turn\n if(red_turn == False):\n return\n draught = board.find_withtag(CURRENT)[0]\n board.coords(draught,event.x-click_offset[0],event.y-click_offset[1],event.x-click_offset[0]+board_divisions,event.y-click_offset[1]+board_divisions)", "def _on_move(self, event):\n\n if not self.button_pressed:\n return\n\n if self.M is None:\n return\n\n x, y = event.xdata, event.ydata\n # In case the mouse is out of bounds.\n if x == None:\n return\n\n dx, dy = x - self.sx, y - self.sy\n x0, x1 = self.get_xlim()\n y0, y1 = self.get_ylim()\n w = (x1-x0)\n h = (y1-y0)\n self.sx, self.sy = x, y\n\n # Rotation\n if self.button_pressed in self._rotate_btn:\n # rotate viewing point\n # get the x and y pixel coords\n if dx == 0 and dy == 0:\n return\n self.elev = art3d.norm_angle(self.elev - (dy/h)*180)\n self.azim = art3d.norm_angle(self.azim - (dx/w)*180)\n self.get_proj()\n self.figure.canvas.draw()\n\n# elif self.button_pressed == 2:\n # pan view\n # project xv,yv,zv -> xw,yw,zw\n # pan\n# pass\n\n # Zoom\n elif self.button_pressed in self._zoom_btn:\n # zoom view\n # hmmm..this needs some help from clipping....\n minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()\n df = 1-((h - dy)/h)\n dx = (maxx-minx)*df\n dy = (maxy-miny)*df\n dz = (maxz-minz)*df\n self.set_xlim3d(minx - dx, maxx + dx)\n self.set_ylim3d(miny - dy, maxy + dy)\n self.set_zlim3d(minz - dz, maxz + dz)\n self.get_proj()\n self.figure.canvas.draw()", "def move(self, x, y):\n\n\t\tself._window.move(x, y)", "def mouse_move_callback(self, event):\n # TODO drag and drop figuriek\n print(\"moving at \", event.x + self.offset_x, event.y + self.offset_y)", "def on_mouse_movement(self, event: wx.MouseEvent) -> None:\n if not event.Dragging():\n self._drag_start_pos = None\n return\n # self.CaptureMouse()\n if self._drag_start_pos is None:\n self._drag_start_pos = event.GetPosition()\n else:\n current_pos = event.GetPosition()\n change = self._drag_start_pos - current_pos\n self.SetPosition(self.GetPosition() - change)", "def on_mouse_movement(self, event: wx.MouseEvent) -> None:\n if not event.Dragging():\n self._drag_start_pos = None\n return\n # self.CaptureMouse()\n if self._drag_start_pos is None:\n self._drag_start_pos = event.GetPosition()\n else:\n current_pos = event.GetPosition()\n change = self._drag_start_pos - current_pos\n self.SetPosition(self.GetPosition() - change)", "def on_mouse_move(self, event: PointEvent):\n self.x = event.x\n self.y = event.y\n self.handle_mouse(self.x, self.y)", "def grab(self, event):\n self.ypos = event.y\n self.xpos = event.x\n self.config(cursor='fleur')", "def update(self):\n self.syncSpriteCoordinates()\n self.moveBasedOnCurrentMomentum()\n #self.decelerate()\n self.checkCanvasBoundsAndWrap()", "def _move(self, dx, dy):\n pass # must override in subclass", "def update_position(self, canvas):\n pts = []\n for P in self.get_points():\n if P[0] <= 0:\n if self.direction == \"SW\":\n self.direction = \"SE\"\n if self.direction == \"W\":\n self.direction = \"E\"\n if self.direction == \"NW\":\n self.direction = \"NE\"\n if P[0] >= canvas.width:\n if self.direction == \"SE\":\n self.direction = \"SW\"\n if self.direction == \"E\":\n self.direction = \"W\"\n if self.direction == \"NE\":\n self.direction = \"NW\"\n if P[1] <= 0:\n if self.direction == \"NW\":\n self.direction = \"SW\"\n if self.direction == \"N\":\n self.direction = \"S\"\n if self.direction == \"NE\":\n self.direction = \"SE\"\n if P[1] >= canvas.height:\n if self.direction == \"SW\":\n self.direction = \"NW\"\n if self.direction == \"S\":\n self.direction = \"N\"\n if self.direction == \"SE\":\n self.direction = \"NE\"\n for P in self.get_points():\n if self.direction == \"N\":\n pts.append((P[0],P[1]-1)) \n# P.y -= 1\n if self.direction == \"NE\":\n pts.append((P[0]+1,P[1]-1))\n# P.y -= 1\n# P.x += 1\n if self.direction == \"E\":\n pts.append((P[0]+1,P[1]))\n# P.x += 1\n if self.direction == \"SE\":\n pts.append((P[0]+1,P[1]+1))\n# P.x += 1\n# P.y += 1\n if self.direction == \"S\":\n pts.append((P[0],P[1]+1))\n# P.y += 1\n if self.direction == \"SW\":\n pts.append((P[0]-1,P[1]+1))\n# P.x -= 1\n# P.y += 1\n if self.direction == \"W\":\n pts.append((P[0]-1,P[1]))\n# P.x -= 1\n if self.direction == \"NW\":\n pts.append((P[0]-1,P[1]-1))\n# P.y -= 1\n# P.x -= 1 \n self.set_points(pts)", "def drag(self, (x0, y0), (x1, y1), duration, steps=1, orientation=-1):\n if orientation == -1:\n orientation = self.getOrientation()\n (x0, y0) = self.__transformPointByOrientation((x0, y0), orientation, self.getOrientation())\n (x1, y1) = self.__transformPointByOrientation((x1, y1), orientation, self.getOrientation())\n\n version = self.device.get_sdk_version()\n if version <= 15:\n self.logger.error(\"drag: API <= 15 not supported (version=%d)\" % version)\n elif version <= 17:\n self.shell(\"input swipe %d %d %d %d\" % (x0, y0, x1, y1))\n else:\n self.shell(\"input touchscreen swipe %d %d %d %d %d\" % (x0, y0, x1, y1, duration))", "def onmove(self, event):\n if self.ignore(event):\n return\n if not self.canvas.widgetlock.available(self):\n return\n if event.inaxes != self.ax:\n self.linev.set_visible(False)\n self.lineh.set_visible(False)\n\n if self.needclear:\n self.canvas.draw()\n self.needclear = False\n return\n self.needclear = True\n\n self.linev.set_xdata((event.xdata, event.xdata))\n self.linev.set_visible(self.visible and self.vertOn)\n\n self.lineh.set_ydata((event.ydata, event.ydata))\n self.lineh.set_visible(self.visible and self.horizOn)\n\n if self.visible and (self.vertOn or self.horizOn):\n self._update()", "def cambiovelocidad(self,x,y):\n self.change_x += x\n self.change_y += y", "def _onmove(self, event):\n eventpress = self._eventpress\n # The calculations are done for rotation at zero: we apply inverse\n # transformation to events except when we rotate and move\n state = self._state\n rotate = ('rotate' in state and\n self._active_handle in self._corner_order)\n move = self._active_handle == 'C'\n resize = self._active_handle and not move\n\n if resize:\n inv_tr = self._get_rotation_transform().inverted()\n event.xdata, event.ydata = inv_tr.transform(\n [event.xdata, event.ydata])\n eventpress.xdata, eventpress.ydata = inv_tr.transform(\n [eventpress.xdata, eventpress.ydata]\n )\n\n dx = event.xdata - eventpress.xdata\n dy = event.ydata - eventpress.ydata\n # refmax is used when moving the corner handle with the square state\n # and is the maximum between refx and refy\n refmax = None\n if self._use_data_coordinates:\n refx, refy = dx, dy\n else:\n # Get dx/dy in display coordinates\n refx = event.x - eventpress.x\n refy = event.y - eventpress.y\n\n x0, x1, y0, y1 = self._extents_on_press\n # rotate an existing shape\n if rotate:\n # calculate angle abc\n a = np.array([eventpress.xdata, eventpress.ydata])\n b = np.array(self.center)\n c = np.array([event.xdata, event.ydata])\n angle = (np.arctan2(c[1]-b[1], c[0]-b[0]) -\n np.arctan2(a[1]-b[1], a[0]-b[0]))\n self.rotation = np.rad2deg(self._rotation_on_press + angle)\n\n elif resize:\n size_on_press = [x1 - x0, y1 - y0]\n center = [x0 + size_on_press[0] / 2, y0 + size_on_press[1] / 2]\n\n # Keeping the center fixed\n if 'center' in state:\n # hh, hw are half-height and half-width\n if 'square' in state:\n # when using a corner, find which reference to use\n if self._active_handle in self._corner_order:\n refmax = max(refx, refy, key=abs)\n if self._active_handle in ['E', 'W'] or refmax == refx:\n hw = event.xdata - center[0]\n hh = hw / self._aspect_ratio_correction\n else:\n hh = event.ydata - center[1]\n hw = hh * self._aspect_ratio_correction\n else:\n hw = size_on_press[0] / 2\n hh = size_on_press[1] / 2\n # cancel changes in perpendicular direction\n if self._active_handle in ['E', 'W'] + self._corner_order:\n hw = abs(event.xdata - center[0])\n if self._active_handle in ['N', 'S'] + self._corner_order:\n hh = abs(event.ydata - center[1])\n\n x0, x1, y0, y1 = (center[0] - hw, center[0] + hw,\n center[1] - hh, center[1] + hh)\n\n else:\n # change sign of relative changes to simplify calculation\n # Switch variables so that x1 and/or y1 are updated on move\n if 'W' in self._active_handle:\n x0 = x1\n if 'S' in self._active_handle:\n y0 = y1\n if self._active_handle in ['E', 'W'] + self._corner_order:\n x1 = event.xdata\n if self._active_handle in ['N', 'S'] + self._corner_order:\n y1 = event.ydata\n if 'square' in state:\n # when using a corner, find which reference to use\n if self._active_handle in self._corner_order:\n refmax = max(refx, refy, key=abs)\n if self._active_handle in ['E', 'W'] or refmax == refx:\n sign = np.sign(event.ydata - y0)\n y1 = y0 + sign * abs(x1 - x0) / \\\n self._aspect_ratio_correction\n else:\n sign = np.sign(event.xdata - x0)\n x1 = x0 + sign * abs(y1 - y0) * \\\n self._aspect_ratio_correction\n\n elif move:\n x0, x1, y0, y1 = self._extents_on_press\n dx = event.xdata - eventpress.xdata\n dy = event.ydata - eventpress.ydata\n x0 += dx\n x1 += dx\n y0 += dy\n y1 += dy\n\n else:\n # Create a new shape\n self._rotation = 0\n # Don't create a new rectangle if there is already one when\n # ignore_event_outside=True\n if ((self.ignore_event_outside and self._selection_completed) or\n not self._allow_creation):\n return\n center = [eventpress.xdata, eventpress.ydata]\n dx = (event.xdata - center[0]) / 2.\n dy = (event.ydata - center[1]) / 2.\n\n # square shape\n if 'square' in state:\n refmax = max(refx, refy, key=abs)\n if refmax == refx:\n dy = np.sign(dy) * abs(dx) / self._aspect_ratio_correction\n else:\n dx = np.sign(dx) * abs(dy) * self._aspect_ratio_correction\n\n # from center\n if 'center' in state:\n dx *= 2\n dy *= 2\n\n # from corner\n else:\n center[0] += dx\n center[1] += dy\n\n x0, x1, y0, y1 = (center[0] - dx, center[0] + dx,\n center[1] - dy, center[1] + dy)\n\n self.extents = x0, x1, y0, y1", "def move(self,dt):\n self.x_pos += self.x_vel*dt\n self.y_pos += self.y_vel*dt", "def __move__(self):\n v = self.velocity\n p = self.position\n p += v\n self.rect.x = round(p.x)\n self.rect.y = round(p.y)", "def _onmove(self, event):\n\n # self._prev are deprecated but we still need to maintain it\n self._prev = self._get_data(event)\n\n v = event.xdata if self.direction == 'horizontal' else event.ydata\n if self.direction == 'horizontal':\n vpress = self._eventpress.xdata\n else:\n vpress = self._eventpress.ydata\n\n # move existing span\n # When \"dragging from anywhere\", `self._active_handle` is set to 'C'\n # (match notation used in the RectangleSelector)\n if self._active_handle == 'C' and self._extents_on_press is not None:\n vmin, vmax = self._extents_on_press\n dv = v - vpress\n vmin += dv\n vmax += dv\n\n # resize an existing shape\n elif self._active_handle and self._active_handle != 'C':\n vmin, vmax = self._extents_on_press\n if self._active_handle == 'min':\n vmin = v\n else:\n vmax = v\n # new shape\n else:\n # Don't create a new span if there is already one when\n # ignore_event_outside=True\n if self.ignore_event_outside and self._selection_completed:\n return\n vmin, vmax = vpress, v\n if vmin > vmax:\n vmin, vmax = vmax, vmin\n\n self.extents = vmin, vmax\n\n if self.onmove_callback is not None:\n self.onmove_callback(vmin, vmax)\n\n return False", "def start_pointer_drag(self, drag: Drag, serial: int) -> None:\n lib.wlr_seat_start_pointer_drag(self._ptr, drag._ptr, serial)", "def move(self):\n self.pos += self.vel\n self.rect.center = self.pos", "def SetPoint(self, pt):\r\n \r\n self._pointDrag = pt", "def updatePos(self):\n self.timeDriving +=1\n self.pos[0] += self.vx\n self.pos[1] += self.vy", "def move(self, delta):\n ideal_frame_time = 60 # FPS\n displacement_factor = delta / ideal_frame_time\n \n #self.x += self.vx * displacement_factor\n #self.y += self.vy * displacement_factor\n \n # If we do not round our floats, pygame will floor it for us (bad)\n self.rect.center = (round(self.x), round(self.y))\n \n self.rot = (self.rot + self.vrot) % 360\n self.rot_center()", "def onMove(self, event):\n\n # get current mouse position\n (x, y) = event.GetPositionTuple()\n\n self.handleMousePositionCallback((x, y))\n\n if event.Dragging() and event.LeftIsDown():\n # are we doing box select?\n if self.is_box_select:\n # set select box point 2 at mouse position\n (self.sbox_w, self.sbox_h) = (x - self.sbox_1_x,\n y - self.sbox_1_y)\n elif not self.last_drag_x is None:\n # no, just a map drag\n self.was_dragging = True\n dx = self.last_drag_x - x\n dy = self.last_drag_y - y\n\n # move the map in the view\n self.view_offset_x += dx\n self.view_offset_y += dy\n\n # limit drag at edges of map\n if self.map_width > self.view_width:\n # if map > view, don't allow edge to show background\n if self.view_offset_x < 0:\n self.view_offset_x = 0\n elif self.view_offset_x > self.max_x_offset:\n self.view_offset_x = self.max_x_offset\n else:\n # else map < view, centre X\n self.view_offset_x = (self.map_width - self.view_width)/2\n\n if self.map_height > self.view_height:\n # if map > view, don't allow edge to show background\n if self.view_offset_y < 0:\n self.view_offset_y = 0\n elif self.view_offset_y > self.max_y_offset:\n self.view_offset_y = self.max_y_offset\n else:\n # else map < view, centre Y\n self.view_offset_y = (self.map_height - self.view_height)/2\n\n # adjust remembered X,Y\n self.last_drag_x = x\n self.last_drag_y = y\n\n self.recalc_view_lonlat_limits()\n\n # redraw client area\n self.drawTilesLayers()", "def move(self):\n self.center_x += self._vx\n self.center_y += self._vy", "def ClickAndDrag(self, delta_x=0, delta_y=0):\n self._EnsureHIDValueInRange(delta_x)\n self._EnsureHIDValueInRange(delta_y)\n self._PressLeftButton()\n self.Move(delta_x, delta_y)\n self._ReleaseAllButtons()", "def move(self):\n self.pos += self.direc\n self.nearest_node = self.pixel_to_node()", "def move(self, p):\r\n self.position.setvalue(p)", "def move(self, rel_pos):\n self.pos = (self.pos[0] + rel_pos[0] * GRID, self.pos[1] + rel_pos[1] * GRID)", "def move(self, dt):\n dt = dt", "def move(self):\n pass", "def moveTo(self, x, y):\n\n\t\tif x < 0:\n\t\t\tself.x = 0\n\t\telif x > self.maxX:\n\t\t\tself.x = self.maxX\n\t\telse:\n\t\t\tself.x = x\n\n\t\tif y < 0:\n\t\t\tself.y = 0\n\t\telif y > self.maxY:\n\t\t\tself.y = self.maxY\n\t\telse:\n\t\t\tself.y = y \n\n #print self.x, self.y\n\t\tautopy.mouse.move(self.x,self.y)", "def onMoveDown(self):\n self.mainGrid.moveDown()", "def update_pos(self):\n self.imgx=self.pathX[min(self.x,len(self.pathX)-1)]\\\n [min(self.y,len(self.pathX[self.x])-1)]\n self.imgy=self.pathY[min(self.x,len(self.pathY)-1)]\\\n [min(self.y,len(self.pathY[self.x])-1)]", "def _update(self, event):\n if self.ignore(event) or event.button != 1:\n return\n\n if event.name == \"button_press_event\" and event.inaxes == self.ax:\n self.drag_active = True\n event.canvas.grab_mouse(self.ax)\n\n if not self.drag_active:\n return\n\n elif (event.name == \"button_release_event\") or (\n event.name == \"button_press_event\" and event.inaxes != self.ax\n ):\n self.drag_active = False\n event.canvas.release_mouse(self.ax)\n self._active_handle = None\n return\n\n # determine which handle was grabbed\n if self.orientation == \"vertical\":\n handle_index = np.argmin(\n np.abs([h.get_ydata()[0] - event.ydata for h in self._handles])\n )\n else:\n handle_index = np.argmin(\n np.abs([h.get_xdata()[0] - event.xdata for h in self._handles])\n )\n handle = self._handles[handle_index]\n\n # these checks ensure smooth behavior if the handles swap which one\n # has a higher value. i.e. if one is dragged over and past the other.\n if handle is not self._active_handle:\n self._active_handle = handle\n\n if self.orientation == \"vertical\":\n self._update_val_from_pos(event.ydata)\n else:\n self._update_val_from_pos(event.xdata)", "def onMouseMove(self,mouseEvent):\n\t\tself.canvas.drawEdgeTo(mouseEvent.x,mouseEvent.y)", "def move(self, x, y):\n self.x+=x\n self.y+=y", "def _ondrag(self, item, fun, num=1, add=None):\n if fun is None:\n self.cv.tag_unbind(item, \"<Button%s-Motion>\" % num)\n else:\n def eventfun(event):\n try:\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n fun(x, y)\n except Exception:\n pass\n self.cv.tag_bind(item, \"<Button%s-Motion>\" % num, eventfun, add)" ]
[ "0.7642598", "0.7116813", "0.6957533", "0.6895421", "0.6839819", "0.6795433", "0.6787461", "0.66360825", "0.6598902", "0.65643734", "0.65615577", "0.6521887", "0.64005405", "0.6353929", "0.63048756", "0.62917954", "0.627049", "0.6246554", "0.6198148", "0.6198148", "0.6198148", "0.61967653", "0.615824", "0.61532515", "0.61450773", "0.6142107", "0.61293596", "0.6112488", "0.610505", "0.610482", "0.610392", "0.6087674", "0.60844785", "0.6082753", "0.6081412", "0.6073924", "0.6073852", "0.6069526", "0.6063965", "0.6058388", "0.6058388", "0.60433674", "0.603998", "0.603739", "0.6017113", "0.60016084", "0.5984851", "0.59843355", "0.59842336", "0.59774697", "0.5962611", "0.59412456", "0.59151745", "0.5913717", "0.5913717", "0.5909649", "0.5898944", "0.58854985", "0.5879318", "0.58716136", "0.58620113", "0.58374643", "0.5800824", "0.57977486", "0.57745236", "0.57725906", "0.5761809", "0.5761809", "0.5753016", "0.5752877", "0.5752319", "0.57505447", "0.57397866", "0.5735336", "0.572122", "0.5716934", "0.5664959", "0.56648153", "0.5662142", "0.5658328", "0.56558883", "0.56505805", "0.5648451", "0.56456476", "0.5639952", "0.5639814", "0.56384677", "0.5629344", "0.5628202", "0.5627501", "0.5625527", "0.56222254", "0.5617074", "0.5613682", "0.5605804", "0.559876", "0.5593447", "0.55862904", "0.5585747", "0.5580673" ]
0.67894125
6
Checks if the point (x,y) is outside the image area
def outside(self, x, y): bbox = self.canvas_image.coords(self.container) # get image area if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]: return False # point (x,y) is inside the image area else: return True # point (x,y) is outside the image area
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def outside(self, x, y):\n bbox = self.canvas.coords(self.container) # get image area\n if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]:\n return False # point (x,y) is inside the image area\n else:\n return True # point (x,y) is outside the image area", "def point_in_map(self, x, y):\r\n return 0 <= x < self.width and 0 <= y < self.height and (x,y) not in self.walls", "def isOutside(self, point):\n return 1-self.isInside(point)", "def is_out_of_bounds(img_height: int, img_width: int, x: float, y: float, patch_size: int) -> bool:\n patch_half_size_floored = patch_size // 2\n x_low = x - patch_half_size_floored\n x_high = x + patch_half_size_floored\n y_low = y - patch_half_size_floored\n y_high = y + patch_half_size_floored\n\n return x_low < 0 or x_high > img_width or y_low < 0 or y_high > img_height", "def isOutsideBorder(self):\n if (self.posX < -self.myGalaxy.worldWidth or self.posX > self.myGalaxy.worldWidth or\n self.posY < -self.myGalaxy.worldHeight or self.posY > self.myGalaxy.worldHeight):\n return 1\n return 0", "def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8", "def isInsideImage(x, y, nx, ny, imageNx, imageNy):\r\n return ( ((x+nx) < imageNx) and ((y+ny) < imageNy) )", "def is_point_within(self, x, y):\n return abs(x - self._x_position) <= self._x_length / 2 and abs(y - self._y_position) <= self._y_length / 2", "def valid_coordinates(self, x, y):\n return ((x >= 0) and (x < self.width) and\n (y >= 0) and (y < self.height))", "def in_bounds(self, x, y):\n return x >= 0 and x < 8 and y >= 0 and y < 8", "def isInside(point_x, point_y, area_left, area_top, area_width, area_height):\n return (area_left <= point_x < area_left + area_width) and (area_top <= point_y < area_top + area_height)", "def contains_point(self, x, y): \r\n n = len(self.points)\r\n inside = False\r\n \r\n x1, y1 = self.points[0]\r\n for i in range(n + 1):\r\n x2, y2 = self.points[i % n]\r\n if y > min(y1, y2):\r\n if y <= max(y1, y2):\r\n if x <= max(x1, x2):\r\n if y1 != y2:\r\n xinters = (y - y1) * (x2 - x1) / (y2 - y1) + x1\r\n if x1 == x2 or x <= xinters:\r\n inside = not inside\r\n x1, y1 = x2, y2\r\n \r\n return inside", "def __isPointOnArea(self, point, area):\r\n\r\n pointX, pointY = point\r\n areaX,areaY,areaWidth,areaHeight = area\r\n\r\n if (pointX >= areaX and pointX <= areaX+areaWidth) and (pointY >= areaY and pointY <= areaY+areaHeight):\r\n return True\r\n else:\r\n return False", "def is_inside(self, x: int, y: int) -> bool:\n pass", "def test_point_within_dimensions_border():\n point = np.array([100, 20])\n image_dimensions = np.array([100, 100])\n assert not point_within_dimensions(point, image_dimensions)", "def insideArea(point, area):\n x=point.real\n y=point.imag\n n = len(area)\n inside = False\n p1x = area[0].real\n p1y = area[0].imag\n for i in range(1, n + 1):\n p2x = area[i % n].real\n p2y = area[i % n].imag\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n return inside", "def _inside(self, x, y):\n wx, wy, w, h = self._raw_graph_window_dim()\n if wx <= x < wx + w and wy <= y < wy + h:\n return True\n return False", "def filter_point(x, y, xlower, xupper, ylower, yupper):\n ignore = False\n if (x < xlower or x > xupper or y < ylower or y > yupper):\n ignore = True\n return ignore", "def isinsidepointXY(x,p):\n \n return dist(x,p) < epsilon", "def in_area(self, x, y):\n raise NotImplementedError", "def check_pos(self, x, y):\n if x >= WINDOWWIDTH or y >= WINDOWHEIGHT or x <=0 or y <= 0:\n return True", "def out_of_bounds(self):\n return not 0 <= self.nodes[0].x < WIDTH * SCALE or not 0 <= self.nodes[0].y < HEIGHT * SCALE", "def is_point_in_box(x, y, bbox):\n if x < 200 and y < 200:\n return True\n return False", "def isInternal(self, aPoint):\n if (aPoint.x >= self.pMin.x and aPoint.x <= self.pMax.x) \\\n and (aPoint.y >= self.pMin.y and aPoint.y <= self.pMax.y):\n return True\n else:\n return False", "def test_inside_image(self):\r\n sample_img = Image(np.zeros((100, 200, 3), dtype=np.uint8))\r\n\r\n # test 4 corners\r\n top_left = Point(y=0, x=0)\r\n bottom_left = Point(y=99, x=0)\r\n top_right = Point(y=0, x=199)\r\n bottom_right = Point(y=99, x=199)\r\n\r\n assert top_left.inside(sample_img)\r\n assert bottom_left.inside(sample_img)\r\n assert top_right.inside(sample_img)\r\n assert bottom_right.inside(sample_img)\r\n\r\n # test out-side point\r\n pt1 = Point(y=-1, x=50)\r\n pt2 = Point(y=100, x=50)\r\n pt3 = Point(y=50, x=-1)\r\n pt4 = Point(y=50, x=200)\r\n\r\n assert not pt1.inside(sample_img)\r\n assert not pt2.inside(sample_img)\r\n assert not pt3.inside(sample_img)\r\n assert not pt4.inside(sample_img)", "def validate_in(self, xcoord, ycoord):\r\n x = int(xcoord/(self.tr.bd.TILE_WIDTH + self.tr.bd.LINE_WIDTH))\r\n y = int(ycoord/(self.tr.bd.TILE_WIDTH + self.tr.bd.LINE_WIDTH))\r\n if not self.tr.turn_tracker and self.tr.bd.disks[x][y].halo_tag:\r\n return True, x, y\r\n else:\r\n return False, x, y", "def obstacle_prone_area(self,image):\r\n\r\n start_x=int(self.start[0])\r\n start_y=int(self.start[1])\r\n goal_x=int(self.goal[0])\r\n goal_y=int(self.goal[1])\r\n print(goal_x,goal_y)\r\n if (image[int(self.maximum_size-goal_x),int(goal_y),0]==0) or ((image[int(self.maximum_size-start_x),int(start_y),0]==0)):\r\n #print(1)\r\n return False\r\n else:\r\n #print(2)\r\n return True", "def check_coord_in_range(self, x, y):\n return 0 <= x < self.cols and 0 <= y < self.lines", "def is_inside(self, mX, mY, point):\n return (math.sqrt((point[0] - mX) * (point[0] - mX)\n + (point[1] - mY) * (point[1] - mY)) <= 2)", "def in_display(self, point):\n x, y = point\n if x < 0 or x > self.width or \\\n y < 0 or y > self.height:\n return False\n return True", "def contains_point(self, x, y):\r\n if self.m == None:\r\n if abs(x - self.start[0]) > 0.6:\r\n return False\r\n else:\r\n if (y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1]):\r\n return True\r\n else:\r\n return False\r\n else: \r\n y0 = int(self.m * x + self.n)\r\n if abs(y - y0) > 0.6: \r\n return False \r\n else: \r\n if ((x >= self.start[0] and x <= self.end[0]) or \\\r\n (x <= self.start[0] and x >= self.end[0])) and \\\r\n ((y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1])): \r\n return True\r\n else:\r\n return False", "def is_off_grid(self, xmin, ymin, xmax, ymax):\n if xmin < 0:\n print('x-coordinate: {0} below minimum of 0.'.format(xmin))\n return True\n if ymin < 0:\n print('y-coordinate: {0} below minimum of 0.'.format(ymin))\n return True\n if xmax >= self.width:\n print('x-coordinate: {0} above maximum of {1}.'.format(\n xmax, self.width - 1))\n return True\n if ymax >= self.height:\n print('y-coordinate: {0} above maximum of {1}.'.format(\n ymax, self.height - 1))\n return True\n return False", "def in_range(x, y):\n if (x < 0 or x > width or y < 0 or y > length):\n return False\n else:\n return True", "def contains_point(self, point):\n\t\tthreshold = 0.6\n\t\tx = point[0]\n\t\ty = point[1]\n\t\tif (x >= (self.xmin - threshold) and x <= (self.xmax + threshold) and\n\t\t\ty >= (self.ymin - threshold) and y <= (self.ymax + threshold)):\n\t\t return True\n\t\treturn False", "def in_box(x, y):\n if self.zoom_box and x <= self.zoom_box_max_x and \\\n x >= self.zoom_box_min_x and y >= self.zoom_box_min_y and \\\n y <= self.zoom_box_max_y:\n return True\n else:\n return False", "def clicked(self, x_pos, y_pos):\n img = self.tower_imgs\n if self.x - img.get_width() // 2 + self.width >= x_pos >= self.x - img.get_width() // 2:\n if self.y + self.height - img.get_height() // 2 >= y_pos >= self.y - img.get_height() // 2:\n return True\n return False", "def check_inside(self, pos):\n x,y = pos\n return x >= self.posx and x <= self.posx + self.sizex and y >= self.posy and y <= self.posy + self.sizey", "def inBounds(self, px, py):\n return px >= 0 and py >= 0 and px < self.w and py < self.h", "def inside( self, point ):\n for i in range( 0, len(point) ):\n if math.fabs( self.center[i] - point[i] ) > self.dimLens[i]/2.0:\n return False;\n return True;", "def valid_pixel_coordinates(u, v, IMAGE_HEIGHT, IMAGE_WIDTH):\n return (u >= 0 and v >= 0 and v < IMAGE_HEIGHT and u < IMAGE_WIDTH)", "def inside_rectangle(self, x, y):\n if (self.pos.x - self.width < x < self.pos.x + self.width and\n self.pos.y - self.height < y < self.pos.y + self.height):\n return True", "def _assert_valid(self, y: int, x: int) -> None:\n if not (0 <= y < self.size[0] and 0 <= x < self.size[1]):\n raise ValueError('Coordinates out of image boundary, {}'.format(self.size))", "def inside_limits(self, point):\n if not self.regions:\n # Use rectangle check\n lat, lon = point.latitude, point.longitude\n if (lon > self.limits[0] and lat > self.limits[1] and\n lon < self.limits[2] and lat < self.limits[3]):\n return True\n else:\n return False\n else:\n # Check inside all possible regions\n p = Point((point.longitude, point.latitude))\n print(p, point)\n # import IPython; IPython.embed()\n for name, poly in self.regions.items():\n # if poly.contains(p):\n if p.intersects(poly):\n return name\n return False", "def in_image(self, *args, **kwargs):\n kwargs['with_bounding_box'] = True\n kwargs['fill_value'] = np.nan\n\n coords = self.invert(*args, **kwargs)\n\n result = np.isfinite(coords)\n if self.input_frame.naxes > 1:\n result = np.all(result, axis=0)\n\n if self.bounding_box is None or not np.any(result):\n return result\n\n if self.input_frame.naxes == 1:\n x1, x2 = self.bounding_box\n\n if len(np.shape(args[0])) > 0:\n result[result] = (coords[result] >= x1) & (coords[result] <= x2)\n elif result:\n result = (coords >= x1) and (coords <= x2)\n\n else:\n if len(np.shape(args[0])) > 0:\n for c, (x1, x2) in zip(coords, self.bounding_box):\n result[result] = (c[result] >= x1) & (c[result] <= x2)\n\n elif result:\n result = all([(c >= x1) and (c <= x2) for c, (x1, x2) in zip(coords, self.bounding_box)])\n\n return result", "def is_in_field(self, x, y):\n return (self.origin_x <= x < self.width) and (self.origin_y <= y < self.height)", "def in_bounds(self, point):\n # Sanity checks\n # Check that point has same number of dimensions as graph\n if not len(point) == len(self.dimensions):\n raise Exception(\"Point has \" + str(len(point)) + \" dimensions, Coordination Space has \" + \\\n str(len(self.dimensions)) + \" dimensions.\")\n\n for i, coordinate in enumerate(point):\n if coordinate > self.dimensions[i] or coordinate < 0:\n return False\n\n return True", "def valid(point):\n index = offset(point)\n if tiles[index] == 0:\n return False\n\n index = offset(point + 19)\n\n if tiles[index] == 0:\n return False\n\n return point.x % 20 == 0 or point.y % 20 == 0", "def remove_outside_points(points, world_cam, cam_img, image_shape):\n pts_cam = DataProcessing.world2cam(points[:, :3], world_cam)\n pts_img, depth = DataProcessing.cam2img(pts_cam, cam_img)\n\n val_flag_1 = np.logical_and(pts_img[:, 0] >= 0,\n pts_img[:, 0] < image_shape[1])\n val_flag_2 = np.logical_and(pts_img[:, 1] >= 0,\n pts_img[:, 1] < image_shape[0])\n val_flag_merge = np.logical_and(val_flag_1, val_flag_2)\n valid = np.logical_and(val_flag_merge, depth >= 0)\n\n return points[valid]", "def inside(self, x, on_boundary):\n return bool((near(x[0], xmin) or near(x[1], ymin)) and \\\n (not ((near(x[0], xmin) and near(x[1], ymax)) \\\n or (near(x[0], xmax) and near(x[1], ymin)))) \\\n and on_boundary)", "def detects_outside_grid(self):\r\n ii = self.rec_track['i']\r\n outside = sum(np.isnan(ii))\r\n\r\n return outside", "def within(self, x, y):\n return x >= self.top_x and x <= self.bottom_x and y >= self.bottom_y and y <= self.top_y", "def is_inside(self, coordinates: tuple) -> bool:\n if len(coordinates) != 2:\n raise IndexError(\"Coordinates consist of x and y\")\n x, y = coordinates\n if (self.MIN_X <= x <= self.MAX_X) and (self.MIN_Y <= y <= self.MAX_Y):\n return True\n else:\n return False", "def detect_in_bounds(self):\n creature_x, creature_y = self.creature.current_location\n if creature_x < 0 or creature_x >= self.world_width\\\n or creature_y < 0 or creature_y >= self.world_height:\n print('The creature is out of bounds!')\n return False\n return True", "def test_point_not_in_room(rectangle, big_area):\n new_room = Room(rectangle, 0, 1, 1, big_area, 'bathroom')\n point = (0, 15, 15)\n assert new_room.contains_point(point[0], point[1], point[2]) is False", "def out_of_bounds(self):\n return self.rect.right <= 0", "def check_extent(self):\n if self.lower_left.x > self.upper_right.x:\n dlx = self.lower_left.x\n self.lower_left.x = self.upper_right.x\n self.upper_right.y = dlx\n\n if self.lower_left.y > self.upper_right.y:\n dly = self.lower_left.y\n self.lower_left.y = self.upper_right.y\n self.upper_right.y = dly", "def wall_check(x: int, y: int, state: bool) -> bool:\r\n if state:\r\n if x == 0 or x == shape-1 or y == 0 or y == shape-1:\r\n return True\r\n else:\r\n if x < 0 or x >= shape or y < 0 or y >= shape:\r\n return True\r\n return False", "def click(self, X, Y):\n img = self.tower_imgs\n if self.x - img.get_width() // 2 + self.width >= X >= self.x - img.get_width() // 2:\n if self.y + self.height - img.get_height() // 2 >= Y >= self.y - img.get_height() // 2:\n return True\n return False", "def hasSpaceAround(self,x,y):\n global gamemap\n c = 0\n for x2 in xrange(-2,2):\n for y2 in xrange(-2,2):\n if self.near(x, y,x + x2,y + y2):\n if not gamemap[x + x2][y + y2].type[0]:\n c += 1\n if c >= 8:\n return False\n else:\n return True", "def _in_box(self, point, extent):\n return ((point[0] >= extent[0]) and\n (point[0] <= extent[1]) and\n (point[1] >= extent[2]) and\n (point[1] <= extent[3]))", "def boundary_check(limits : tuple, coords : tuple) -> bool:\n xl,xh,yl,yh = limits\n x,y = coords\n bound_x = xl <= x and x < xh\n bound_y = yl <= y and y < yh\n return bound_x and bound_y", "def isValidPos(self, x, y, wallList, grid):\n if (x, y) not in wallList:\n return x > 0 and x < grid.width and y > 0 and y < grid.height", "def test_point_with_zero_value_is_good():\n point = np.array([0, 20])\n image_dimensions = np.array([100, 100])\n assert point_within_dimensions(point, image_dimensions)", "def in_sight(x1, y1, x2, y2, area):\n # skip itself\n if x1 == x2 and y1 == y2:\n return False\n\n # go to the coordinate\n x_d, y_d = x2 - x1, y2 - y1\n multiple = gcd(x_d, y_d)\n\n x_step, y_step = x_d // multiple, y_d // multiple\n\n x1 += x_step\n y1 += y_step\n\n # jump to x2, y2 until we hit something\n while x1 != x2 or y1 != y2:\n if area[y1][x1] == \"#\":\n return False\n\n x1 += x_step\n y1 += y_step\n\n # if we didn't hit something, the position is valid!\n return True", "def isOnCanvas(self, x, y):\n return 0 <= x < self.width and 0 <= y < self.height", "def limit_pixel( self, point ):\n\t\tif point[0] and not( 0 <= self.y_offset+point[0] < self._raw_width ):\n\t\t\treturn True # Limit Pixel Drawing\n\t\tif point[1] and not( 0 <= self.y_offset+point[1] < self._raw_width ):\n\t\t\treturn True # Limit Pixel Drawing\n\t\treturn False", "def _isPoint(self):\n return (self.width == 0 and self.height == 1) or (self.height == 0 and self.width == 1)", "def _isPoint(self):\n return (self.width == 0 and self.height == 1) or (self.height == 0 and self.width == 1)", "def coordinates_within_board(n: int, x: int, y: int) -> bool:\n\n return x < n and y < n and x >= 0 and y >= 0", "def test_point_within_dimensions_true():\n point = np.array([10, 20])\n image_dimensions = np.array([100, 100])\n assert point_within_dimensions(point, image_dimensions)", "def check_crop(dim, x1, y1, x2, y2):\n return (\n x1 >= 0 and y1 >= 0 and x2 >= 0 and y2 >= 0 and x1 <= dim[0] and\n y1 <= dim[1] and x2 <= dim[0] and y2 <= dim[1] and x2 > x1 and y2 > y1)", "def is_position_allowed(new_x, new_y):\n\n return min_x <= new_x <= max_x and min_y <= new_y <= max_y", "def is_node_in_threat_zone(self, y, x):\n y_condition = self.top_left_y <= y < self.top_left_y + self.height\n x_condition = self.top_left_x <= x < self.top_left_x + self.width\n return y_condition and x_condition", "def inside_polygon(self, x, y, points):\n n = len(points)\n inside = False\n p1x, p1y = points[0]\n for i in range(1, n + 1):\n p2x, p2y = points[i % n]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y - p1y) * (p2x - p1x) / \\\n (p2y - p1y) + p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n return inside", "def occupied(self, (xIndex, yIndex)):\n return xIndex < 0 or yIndex < 0 or \\\n xIndex >= self.xN or yIndex >= self.yN or \\\n self.grid[xIndex][yIndex]", "def out_of_bounds(self):\n return self.rect.right <= 0 or self.rect.left >= self.screen_rect.width", "def is_dead(self, img):\n\n crop_height = 20\n crop_width = 20\n threshold = 70\n pixels_percentage = 0.10\n\n pixels_required = (img.shape[1] - 2 * crop_width) * crop_height * pixels_percentage\n\n crop = img[-crop_height:, crop_width:-crop_width]\n\n r = crop[:, :, 0] < threshold\n g = crop[:, :, 1] < threshold\n b = crop[:, :, 2] < threshold\n\n pixels = (r & g & b).sum()\n\n # print(\"Pixels: {}, Required: {}\".format(pixels, pixels_required))\n\n return pixels < pixels_required", "def isInside(self, point):\n # we rotate back the point to the frame parallel to the axis of the ellipse\n rotatedPoint = self.rotatePoint(point)\n # we check if each point is inside the associated liquid drop\n return ((rotatedPoint[:, :, 0]/self.axisA[:, None])**2 + (rotatedPoint[:, :, 1]/self.axisB[:, None])**2 < 1)", "def inBounds(self,pos):\n return ((pos.x<WIDTH) & (pos.x>=0) & (pos.y<HEIGHT) & (pos.y>=0))", "def contains(self, point):\n return 0 <= point.x <= 1 \\\n and 0 <= point.y <= 1 \\\n and 0 <= point.z <= 1", "def inrange(cc, point):\n return point.row in range(cc.top, cc.bottom+1) and point.col in range(cc.left, cc.right+1)", "def check_contained(self,x,y):\n if self.active:\n self.reset()\n #if in horizontal bounds\n if x > self.left and x < self.right:\n slope = 1/sqrt(3)\n #use to set create verticle bounds\n if x - self.center_x <= 0:\n slope *= -1\n\n ################\n x_rel = x - self.center_x #bounds depends on x location of the mouse \n bottom_bound = self.bottom - (x_rel*slope)\n top_bound = self.top - (x_rel*-slope)\n ################\n\n if y >= top_bound and y <= bottom_bound:\n if Ctrl_Vars.Left_MouseDown:\n self.press() # if all conditions are met use functionality", "def parse_area(x,y):\r\n # if (x,y) in gone :\r\n # return False\r\n # print(\"!\",end=\"\")\r\n # made useless thanks to the loop's conditions\r\n gone.add((x,y))\r\n if (x,y) in pos_turtle.values() :\r\n return True\r\n else :\r\n for (i,j) in [(x-UNIT,y), (x+UNIT,y), (x,y-UNIT), (x,y+UNIT)] :\r\n if (i,j) in pos_tracker or (i,j) in gone or abs(i)>=RAY or abs(j)>=RAY :\r\n continue\r\n if parse_area(i,j) :\r\n return True\r\n return False", "def is_wall(self, x, y):\r\n\r\n return self.get_bool(x, y, 'wall')", "def isoutside(coords, shape):\n # Label external pores for trimming below\n if len(shape) == 1: # Spherical\n # Find external points\n r = np.sqrt(np.sum(coords**2, axis=1))\n Ps = r > shape[0]\n elif len(shape) == 2: # Cylindrical\n # Find external pores outside radius\n r = np.sqrt(np.sum(coords[:, [0, 1]]**2, axis=1))\n Ps = r > shape[0]\n # Find external pores above and below cylinder\n if shape[1] > 0:\n Ps = Ps + (coords[:, 2] > shape[1])\n Ps = Ps + (coords[:, 2] < 0)\n else:\n pass\n elif len(shape) == 3: # Rectilinear\n shape = np.array(shape, dtype=float)\n try:\n lo_lim = shape[:, 0]\n hi_lim = shape[:, 1]\n except IndexError:\n lo_lim = np.array([0, 0, 0])\n hi_lim = shape\n Ps1 = np.any(coords > hi_lim, axis=1)\n Ps2 = np.any(coords < lo_lim, axis=1)\n Ps = Ps1 + Ps2\n return Ps", "def in_rectangle(x, y):\n return ((self.min_x <= x <= self.max_x) and\n (self.min_y <= y <= self.max_y))", "def lefton(hedge, point):\r\n\r\n return area2(hedge, point) >= 0", "def is_valid(box, img):\n valid_width = box['top_left_x'] > 0 and box['bottom_right_x'] < img.shape[1]\n valid_height = box['top_left_y'] > 0 and box['bottom_right_y'] < img.shape[0]\n return valid_width and valid_height", "def is_valid_room(self, x, y):\r\n return 0 <= x < self.__nx and 0 <= y < self.__ny", "def no_neighbour(x: int, y: int) -> bool:\r\n if not wall_check(x, y-1, False):\r\n if example[x, y-1] == 0:\r\n return False\r\n if not wall_check(x, y+1, False):\r\n if example[x, y+1] == 0:\r\n return False\r\n if not wall_check(x+1, y, False):\r\n if example[x+1, y] == 0:\r\n return False\r\n if not wall_check(x-1, y, False):\r\n if example[x-1, y] == 0:\r\n return False\r\n return True", "def isPositionValid(self, x, y):\n if x >= self._width:\n return False\n if y >= self._height:\n return False\n if x < 0:\n return False\n if y < 0:\n return False\n return not (x, y) in self._invalidPositions", "def in_ellipse(x,y,a,b):\n return ellipse(x,y,a,b) <= 1", "def inside(point, rectangle):\n\n ll = rectangle.getP1() # assume p1 is ll (lower left)\n ur = rectangle.getP2() # assume p2 is ur (upper right)\n\n return ll.getX() < point.getX() < ur.getX() and ll.getY() < point.getY() < ur.getY()", "def inside(i,j,im,h=H): #X\n return i-h >=0 and j-h >=0 and i+h+1<=im.shape[0] and j+h+1<=im.shape[1]", "def get(self, x, y):\n return not self.__image.getpixel((x, y))", "def is_occupied(self, p):\r\n return 0 <= p[0] < self.width and 0 <= p[1] < self.height and self.grid[p[1]][p[0]] == '#'", "def __validatePoint(self, point):\n # print point\n if point[0] > self.scn_width:\n raise ValueError('X coordinate: %d out of range.' % point[0])\n if point[1] > self.scn_height:\n raise ValueError('Y coordinate: %d out of range.' % point[1])\n return point", "def IsMouseWellOutsideWindow(self):\r\n \r\n screen_rect = self.GetScreenRect() \r\n screen_rect.Inflate(50, 50)\r\n \r\n return not screen_rect.Contains(wx.GetMousePosition())", "def isInside(x1, y1, x2, y2, x3, y3, x, y):\n # Calculate area of triangle ABC\n A = area (x1, y1, x2, y2, x3, y3)\n \n # Calculate area of triangle PBC\n A1 = area (x, y, x2, y2, x3, y3)\n \n # Calculate area of triangle PAC\n A2 = area (x1, y1, x, y, x3, y3)\n \n # Calculate area of triangle PAB\n A3 = area (x1, y1, x2, y2, x, y)\n \n # Check if sum of A1, A2 and A3\n # is same as A\n if(A == A1 + A2 + A3):\n return True\n else:\n return False", "def is_point_in(self, point):\n return (self.upperleft[0] <= point[0] <= self.upperright[0] and self.upperleft[1] <= point[1] <= self.bottomleft[1])" ]
[ "0.87402356", "0.7378075", "0.7371467", "0.7322463", "0.7271476", "0.71669763", "0.7123005", "0.70656526", "0.7022389", "0.7014238", "0.696292", "0.6954585", "0.69515514", "0.69209486", "0.6896473", "0.68796045", "0.6856969", "0.68375915", "0.67989755", "0.6798606", "0.67816234", "0.6774851", "0.67660224", "0.66965127", "0.66963464", "0.66877055", "0.668064", "0.6679758", "0.6669126", "0.664453", "0.6615116", "0.6604384", "0.6582943", "0.6579689", "0.6578297", "0.6571445", "0.6546099", "0.6535256", "0.6529479", "0.6523384", "0.65218556", "0.6508571", "0.6491724", "0.64842397", "0.64634407", "0.64543194", "0.6453566", "0.6426069", "0.64212716", "0.6415442", "0.64109427", "0.6392675", "0.6388497", "0.6376996", "0.63739663", "0.636504", "0.6354457", "0.6354348", "0.63432235", "0.6341127", "0.6340445", "0.6307196", "0.6305892", "0.62970966", "0.6296934", "0.6296008", "0.628023", "0.628023", "0.6274577", "0.6269325", "0.6257013", "0.6250913", "0.6249572", "0.6239489", "0.6232575", "0.62110794", "0.62059695", "0.6198584", "0.61950374", "0.61909354", "0.61810637", "0.6174886", "0.616458", "0.6133154", "0.61245984", "0.61202693", "0.6119263", "0.6116482", "0.6115049", "0.6111867", "0.6107598", "0.61054164", "0.6088242", "0.60846215", "0.6081302", "0.6053452", "0.60515857", "0.60507417", "0.60402167", "0.60374147" ]
0.8744287
0
Zoom with mouse wheel
def __wheel(self, event): x = self.canvas_image.canvasx(event.x) # get coordinates of the event on the canvas y = self.canvas_image.canvasy(event.y) if self.outside(x, y): return # zoom only inside image area scale = 1.0 # Respond to Linux (event.num) or Windows (event.delta) wheel event if event.num == 5 or event.delta == -120: # scroll down, smaller if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels self.imscale /= self.__delta scale /= self.__delta if event.num == 4 or event.delta == 120: # scroll up, bigger i = min(self.canvas_image.winfo_width(), self.canvas_image.winfo_height()) >> 1 if i < self.imscale: return # 1 pixel is bigger than the visible area self.imscale *= self.__delta scale *= self.__delta # Take appropriate image from the pyramid k = self.imscale * self.__ratio # temporary coefficient self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1) self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img)) # self.canvas_image.scale('all', x, y, scale, scale) # rescale all objects # Redraw some figures before showing image on the screen self.redraw_figures() # method for child classes self.__show_image()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_mouse_wheel(self, event):\n delta = event.delta[1]\n if delta > 0: # Zoom in\n factor = 0.9\n elif delta < 0: # Zoom out\n factor = 1 / 0.9\n for _ in range(int(abs(delta))):\n self.zoom(factor, event.pos)", "def set_zooming_wheel(self):\n # Zooming: wheel\n self.set('Wheel', 'Zoom',\n param_getter=lambda p: (\n p[\"wheel\"]*.002, \n p[\"mouse_position\"][0],\n p[\"wheel\"]*.002, \n p[\"mouse_position\"][1]))", "def callback_mouse_zoom(self, event):\n\n if self.variables.zoom_on_wheel:\n delta = event.delta\n single_delta = 120\n\n # handle case where platform is linux:\n if platform.system() == \"Linux\":\n delta = single_delta\n if event.num == 5:\n delta = delta*-1\n\n zoom_in_box_half_width = int(self.variables.canvas_width / self.variables.mouse_wheel_zoom_percent_per_event / 2)\n zoom_out_box_half_width = int(self.variables.canvas_width * self.variables.mouse_wheel_zoom_percent_per_event / 2)\n zoom_in_box_half_height = int(self.variables.canvas_height / self.variables.mouse_wheel_zoom_percent_per_event / 2)\n zoom_out_box_half_height = int(self.variables.canvas_height * self.variables.mouse_wheel_zoom_percent_per_event / 2)\n\n x = event.x\n y = event.y\n\n after_zoom_x_offset = (self.variables.canvas_width/2 - x)/self.variables.mouse_wheel_zoom_percent_per_event\n after_zoom_y_offset = (self.variables.canvas_height/2 - y)/self.variables.mouse_wheel_zoom_percent_per_event\n\n x_offset_point = x + after_zoom_x_offset\n y_offset_point = y + after_zoom_y_offset\n\n zoom_in_box = (x_offset_point - zoom_in_box_half_width,\n y_offset_point - zoom_in_box_half_height,\n x_offset_point + zoom_in_box_half_width,\n y_offset_point + zoom_in_box_half_height)\n\n zoom_out_box = (x_offset_point - zoom_out_box_half_width,\n y_offset_point - zoom_out_box_half_height,\n x_offset_point + zoom_out_box_half_width,\n y_offset_point + zoom_out_box_half_height)\n\n if self.variables.the_canvas_is_currently_zooming:\n pass\n else:\n if delta > 0:\n self.zoom_to_selection(zoom_in_box, self.variables.animate_zoom)\n else:\n self.zoom_to_selection(zoom_out_box, self.variables.animate_zoom)\n else:\n pass", "def wheelEvent(self, event: QWheelEvent):\n # zoom only when CTRL key pressed\n if (event.modifiers() & Qt.ControlModifier) == Qt.ControlModifier:\n steps = event.angleDelta().y() / 15 / 8\n\n if steps == 0:\n event.ignore()\n return\n\n # scale factor 1.25\n sc = pow(1.25, steps)\n self.scale(sc, sc)\n self.centerOn(self.mapToScene(event.pos()))\n event.accept()\n #  act normally on scrollbar\n else:\n # transmit event to parent class wheelevent\n super(QGraphicsView, self).wheelEvent(event)", "def windows_zoomer(self, event):\n if event.delta > 0:\n self.canvas.scale(\"all\", event.x, event.y, 1.1, 1.1)\n elif event.delta < 0:\n self.canvas.scale(\"all\", event.x, event.y, 0.9, 0.9)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def linux_zoomer_plus(self, event):\n self.canvas.scale(\"all\", event.x, event.y, 1.1, 1.1)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def wheelEvent(self, event: QtGui.QWheelEvent) -> None:\n scaleFactor = 1 + (event.angleDelta().y() / 600)\n # Limit zoom to a reasonable range.\n if scaleFactor > 1 and self._scaleFactor > 10:\n return\n elif scaleFactor < 1 and self._scaleFactor < .8:\n return\n self.scale(scaleFactor, scaleFactor)\n self._scaleFactor = self._scaleFactor * scaleFactor # Keep track of current scaling factor", "def wheelEvent(self,event):\n factor = 1.41 ** (-event.delta()/240.0)\n self.scale(factor,factor)", "def onWheel(self, event):\r\n ax = event.inaxes\r\n step = event.step\r\n\r\n\r\n if ax != None:\r\n # Event occurred inside a plotting area\r\n lo,hi = ax.get_xlim()\r\n lo,hi = _rescale(lo,hi,step,pt=event.xdata)\r\n ax.set_xlim((lo,hi))\r\n\r\n lo,hi = ax.get_ylim()\r\n lo,hi = _rescale(lo,hi,step,pt=event.ydata)\r\n ax.set_ylim((lo,hi))\r\n else:\r\n # Check if zoom happens in the axes\r\n xdata,ydata = None,None\r\n x,y = event.x,event.y\r\n for ax in self.axes:\r\n insidex,_ = ax.xaxis.contains(event)\r\n if insidex:\r\n xdata,_ = ax.transAxes.inverse_xy_tup((x,y))\r\n #print \"xaxis\",x,\"->\",xdata\r\n insidey,_ = ax.yaxis.contains(event)\r\n if insidey:\r\n _,ydata = ax.transAxes.inverse_xy_tup((x,y))\r\n #print \"yaxis\",y,\"->\",ydata\r\n if xdata is not None:\r\n lo,hi = ax.get_xlim()\r\n lo,hi = _rescale(lo,hi,step,bal=xdata)\r\n ax.set_xlim((lo,hi))\r\n if ydata is not None:\r\n lo,hi = ax.get_ylim()\r\n lo,hi = _rescale(lo,hi,step,bal=ydata)\r\n ax.set_ylim((lo,hi))\r\n \r\n self.canvas.draw_idle()", "def wheelEvent(self, ev):\n\n # Check if we're in auto Zoom mode\n if self.__zooming:\n # we're zooming\n if (ev.angleDelta().y() > 0):\n self.zoom(ev.pos(), 1)\n else:\n self.zoom(ev.pos(), -1)\n\n else:\n # not zooming - pass wheel event on\n self.mouseWheel.emit(self, ev)", "def set_zooming_mouse(self):\n # Zooming: right button mouse\n self.set('RightClickMove', 'Zoom',\n param_getter=lambda p: (p[\"mouse_position_diff\"][0]*2.5,\n p[\"mouse_press_position\"][0],\n p[\"mouse_position_diff\"][1]*2.5,\n p[\"mouse_press_position\"][1]))", "def zoom(self, factor, mouse_coords=None):\n if mouse_coords is not None: # Record the position of the mouse\n x, y = float(mouse_coords[0]), float(mouse_coords[1])\n x0, y0, z0 = self.pixel_to_coords(x, y)\n\n self.scale *= factor\n self.scale = max(min(self.scale, self.max_scale), self.min_scale)\n self.program[\"scale\"] = self.scale\n\n # Translate so the mouse point is stationary\n if mouse_coords is not None:\n x1, y1, z1 = self.pixel_to_coords(x, y)\n self.translate_center(x1 - x0, y1 - y0, z1 - z0)", "def __wheel(self, event):\n x = self.canvas.canvasx(event.x) # get coordinates of the event on the canvas\n y = self.canvas.canvasy(event.y)\n if self.outside(x, y): return # zoom only inside image area\n scale = 1.0\n # Respond to Linux (event.num) or Windows (event.delta) wheel event\n if event.num == 5 or event.delta == -120: # scroll down, smaller\n if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels\n self.imscale /= self.__delta\n scale /= self.__delta\n if event.num == 4 or event.delta == 120: # scroll up, bigger\n i = min(self.canvas.winfo_width(), self.canvas.winfo_height()) >> 1\n if i < self.imscale: return # 1 pixel is bigger than the visible area\n self.imscale *= self.__delta\n scale *= self.__delta\n # Take appropriate image from the pyramid\n k = self.imscale * self.__ratio # temporary coefficient\n self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1)\n self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img))\n #\n self.canvas.scale('all', x, y, scale, scale) # rescale all objects\n # Redraw some figures before showing image on the screen\n self.redraw_figures() # method for child classes\n self.__show_image()", "def set_zoombox_mouse(self):\n # Zooming: zoombox (drag and drop)\n self.set('MiddleClickMove', 'ZoomBox',\n param_getter=lambda p: (p[\"mouse_press_position\"][0],\n p[\"mouse_press_position\"][1],\n p[\"mouse_position\"][0],\n p[\"mouse_position\"][1]))", "def _on_scroll(self, event):\n self._zoom(event.step, draw=True)", "def on_zoom_change(self, event) -> None:\r\n\r\n zoom_level = int(self.zoom_scale.get())\r\n self.painter.zoom = zoom_level\r\n self.painter.draw_board()", "def on_scroll(self, win, _deltax, deltay):\n self.zoom(deltay, glfw.get_window_size(win)[1])", "def on_scroll(self, win, _deltax, deltay):\n self.zoom(deltay, glfw.get_window_size(win)[1])", "def increment_zoom(self):\n if self._diving:\n self.mpl_mandelbrot.increment_zoom_anchored(self._zoom_frac_per_frame)", "def on_mouse_wheel(self,event,canvas):\n canvas.yview(\"scroll\",-1*event.delta/100,\"units\")", "def wheel(ticks):\n m = PyMouse()\n m.scroll(ticks)", "def enableZoomIn(self):\n self.zoomInID = self.canvas.mpl_connect('button_press_event', self.onZoomIn)\n self.master.config(cursor = \"cross\")", "def ev_mousewheel(self, event: MouseWheel) -> None:", "def on_mouse_press(self, event):\n self.on_mouse_wheel(event)", "def wheelEvent(self, event):\n degrees = event.angleDelta().y() / 8\n steps = degrees / 15\n self.view_state.scale *= 1.5 ** steps", "def mouseMoveEvent(self, ev):\n shift = ev.modifiers() & QtCore.Qt.ShiftModifier\n ctrl = ev.modifiers() & QtCore.Qt.ControlModifier\n if shift:\n y = ev.pos().y()\n if not hasattr(self, '_prev_zoom_pos') or not self._prev_zoom_pos:\n self._prev_zoom_pos = y\n return\n dy = y - self._prev_zoom_pos\n def delta():\n return -dy * 5\n ev.delta = delta\n self._prev_zoom_pos = y\n self.wheelEvent(ev)\n elif ctrl:\n pos = ev.pos().x(), ev.pos().y()\n if not hasattr(self, '_prev_pan_pos') or not self._prev_pan_pos:\n self._prev_pan_pos = pos\n return\n dx = pos[0] - self._prev_pan_pos[0]\n dy = pos[1] - self._prev_pan_pos[1]\n self.pan(dx, dy, 0, relative=True)\n self._prev_pan_pos = pos\n else:\n super(PlotObject, self).mouseMoveEvent(ev)", "def linux_zoomer_minus(self, event):\n self.canvas.scale(\"all\", event.x, event.y, 0.9, 0.9)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def zoom(self, amount):\n pass", "def enableZoomOut(self):\n self.zoomOutID = self.canvas.mpl_connect('button_press_event', self.onZoomOut)\n self.master.config(cursor = \"cross\")", "def do_scroll_event(self, event):\n\t\tif event.state & gtk.gdk.CONTROL_MASK:\n\t\t\tif event.direction == gtk.gdk.SCROLL_UP:\n\t\t\t\tself.zoom *= 1.1\n\t\t\telif event.direction == gtk.gdk.SCROLL_DOWN:\n\t\t\t\tself.zoom /= 1.1", "def onZoomIn(self, event):\n try:\n print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %\n ('double' if event.dblclick else 'single', event.button,\n event.x, event.y, event.xdata, event.ydata))\n except:\n return\n\n\n self.plotter.zoomIn(event)", "def on_mouse_wheel(self, e): # pragma: no cover\n super(TraceView, self).on_mouse_wheel(e)\n if e.modifiers == ('Alt',):\n start, end = self._interval\n delay = e.delta * (end - start) * .1\n self.shift(-delay)", "def set_zoombox_keyboard(self):\n # Idem but with CTRL + left button mouse \n self.set('LeftClickMove', 'ZoomBox',\n key_modifier='Control',\n param_getter=lambda p: (p[\"mouse_press_position\"][0],\n p[\"mouse_press_position\"][1],\n p[\"mouse_position\"][0],\n p[\"mouse_position\"][1]))", "def zoom(self,event):\r\n if self.measurements == None:\r\n return\r\n if self.controlLock.locked():\r\n return\r\n self.controlLock.acquire()\r\n if len(self.dataPlayers) > 0:\r\n dp = self.dataPlayers[0]\r\n dp.zoom(event.delta*2/120)\r\n scalex,scaley = dp.getScale()\r\n # Do functions together\r\n for dp in self.dataPlayers:\r\n dp.setScaleX(scalex[0],scalex[1])\r\n for dp in self.dataPlayers:\r\n dp.draw()\r\n for dp in self.dataPlayers:# Update canvas together\r\n dp.redraw()\r\n self.controlLock.release()", "def __zoomIn(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n e5App().getObject(\"Shell\").zoomIn()\n else:\n aw = self.activeWindow()\n if aw:\n aw.zoomIn()\n self.sbZoom.setValue(aw.getZoom())", "def _zoom(self, sign=1, draw=False):\n delta = _ZOOM_STEP_SIZE * sign\n for axis, fig in enumerate(self._figs):\n xmid = self._images['cursor_v'][axis].get_xdata()[0]\n ymid = self._images['cursor_h'][axis].get_ydata()[0]\n xmin, xmax = fig.axes[0].get_xlim()\n ymin, ymax = fig.axes[0].get_ylim()\n xwidth = (xmax - xmin) / 2 - delta\n ywidth = (ymax - ymin) / 2 - delta\n if xwidth <= 0 or ywidth <= 0:\n return\n fig.axes[0].set_xlim(xmid - xwidth, xmid + xwidth)\n fig.axes[0].set_ylim(ymid - ywidth, ymid + ywidth)\n if draw:\n self._figs[axis].canvas.draw()", "def action_zoom_in(self):\n if self.cur_zoom < len(self.zoom_levels) - 1:\n self.cur_zoom += 1\n self.zoom_widget.setValue(self.cur_zoom)\n self.apply_zoom()", "def zoom(self, scaleChange):\n\t\tself.scaleFactor += scaleChange\n\t\t\n\t\t# don't allow smaller then 10%\n\t\tif self.scaleFactor < 0.1:\n\t\t\tself.scaleFactor = 0.1\n\t\t\n\t\tif scaleChange > 0:\n\t\t\tself.setTitle(\"Zoom +\")\n\t\telse:\n\t\t\tself.setTitle(\"Zoom -\")\n\t\t\n\t\tself.display()\n\t\tself.autoResize()\n\t\tgobject.timeout_add(10, self.display)", "def zoom(self, zoom):\n\n self.container['zoom'] = zoom", "def __zoom(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n aw = e5App().getObject(\"Shell\")\n else:\n aw = self.activeWindow()\n if aw:\n from QScintilla.ZoomDialog import ZoomDialog\n dlg = ZoomDialog(aw.getZoom(), self.ui, None, True)\n if dlg.exec_() == QDialog.Accepted:\n value = dlg.getZoomSize()\n self.__zoomTo(value)", "def on_mouse_wheel(self, event):\n self.translate -= event.delta[1]\n self.game_program['u_view'] = self.view\n\n self.yaw, self.pitch = 0, 0\n\n self.rot_y(self.yaw * np.pi / 180)\n self.rot_x(self.pitch * np.pi / 180)\n\n self.view = np.dot(self.rot_mat_y, self.rot_mat_x)\n self.game_program['u_view'] = self.view\n\n self.update()", "def setZoomOnWheelEnabled(self, enabled: bool):\n if enabled != self.__zoomOnWheel:\n self.__zoomOnWheel = enabled\n self.sigChanged.emit()", "def mouse_wheelEvent(self, e):\n if self.image is not None:\n modifiers = QtWidgets.QApplication.keyboardModifiers()\n if modifiers == QtCore.Qt.ControlModifier:\n wheel_counter = e.angleDelta()\n if wheel_counter.y() / 120 == -1:\n if self.width_result_image == 1000:\n pass\n else:\n self.width_result_image -= 100\n\n if wheel_counter.y() / 120 == 1:\n if self.width_result_image == 4000:\n pass\n else:\n self.width_result_image += 100\n self.show_to_window()", "def update_zoom_plot(self):\n self.plot_zoom.setXRange(*self.linear_region.getRegion(), padding=0)", "def set_zoombox_keyboard(self):\n self.set('MiddleClickMove', 'ZoomBox',\n # key_modifier='Shift',\n param_getter=lambda p: (p[\"mouse_press_position\"][0],\n p[\"mouse_press_position\"][1],\n p[\"mouse_position\"][0],\n p[\"mouse_position\"][1]))", "def set_zoombox_keyboard(self):\n self.set('MiddleClickMove', 'ZoomBox',\n # key_modifier='Shift',\n param_getter=lambda p: (p[\"mouse_press_position\"][0],\n p[\"mouse_press_position\"][1],\n p[\"mouse_position\"][0],\n p[\"mouse_position\"][1]))", "def onscroll(self, event):\n if self.out_graph is False:\n self.zoom += 10*event.step\n\n if self.zoom >= self.axe_X/2/self.FOV_img*self.FOV_img_Y:\n self.zoom = self.axe_X/2/self.FOV_img*self.FOV_img_Y\n\n if self.zoom <= 0:\n self.zoom = 0\n\n self.draw()", "def zoom_factory(self, ax, base_scale=2.):\n def zoom_fun(event):\n # Get the current x and y limits\n cur_xlim = ax.get_xlim()\n cur_ylim = ax.get_ylim()\n cur_xrange = (cur_xlim[1] - cur_xlim[0])*.5\n cur_yrange = (cur_ylim[1] - cur_ylim[0])*.5\n # Get event location\n xdata = event.xdata\n ydata = event.ydata\n if event.button == 'up':\n # Deal with zoom in\n scale_factor = 1/base_scale\n elif event.button == 'down':\n # Deal with zoom out\n scale_factor = base_scale\n else:\n # Deal with something that should never happen\n scale_factor = 1\n print event.button\n # Set new limits - improved from original\n ax.set_xlim([xdata - (xdata - cur_xlim[0])*scale_factor,\n xdata + (cur_xlim[1] - xdata)*scale_factor])\n ax.set_ylim([ydata - (ydata - cur_ylim[0])*scale_factor,\n ydata + (cur_ylim[1] - ydata)*scale_factor])\n # Force redraw\n self.canvas.Refresh()\n\n # Get the figure of interest\n fig = ax.get_figure()\n # Attach the call back\n fig.canvas.mpl_connect('scroll_event', zoom_fun)\n\n # Return the function\n return zoom_fun", "def zoom(self, step):\n current_zoom = self.scale_factor\n self.scale_factor = current_zoom + step / 100 * current_zoom\n # self.scale_factor = max(min(self.scale_factor + step, 1000000), 5)", "def setZoom(self, zoom):\r\n self._viewZoom = zoom", "def set_zooming_keyboard(self):\n # Zooming: ALT + key arrows\n self.set('KeyPress', 'Zoom',\n key='Left', key_modifier='Control', \n param_getter=lambda p: (-.25, 0, 0, 0))\n self.set('KeyPress', 'Zoom',\n key='Right', key_modifier='Control', \n param_getter=lambda p: (.25, 0, 0, 0))\n self.set('KeyPress', 'Zoom',\n key='Up', key_modifier='Control', \n param_getter=lambda p: (0, 0, .25, 0))\n self.set('KeyPress', 'Zoom',\n key='Down', key_modifier='Control', \n param_getter=lambda p: (0, 0, -.25, 0))", "def evt_zoom_released(self):\n # record home XY limit if it is never zoomed\n if self._isZoomed is False:\n self._homeXYLimit = list(self.getXLimit())\n self._homeXYLimit.extend(list(self.getYLimit()))\n # END-IF\n\n # set the state of being zoomed\n self._isZoomed = True\n\n return", "def ev_mousewheel(self, event: tcod.event.MouseWheel) -> T | None:", "def on_mouse_drag(x, y, dx, dy, buttons, modifiers):\n geo = self.geometry\n height = self.window.height - 2 * geo.vertical_margin - 3 -\\\n geo.status_bar_height\n start = geo.horizontal_margin + geo.graph_start_x\n end = self.window.width - 2 * geo.horizontal_margin - \\\n geo.scroll_bar_width - geo.menu_width - 3\n # If the box already exists, update it.\n if self.zoom_box:\n if x < start:\n x = start\n elif x > end:\n x = end\n self.zoom_box.begin_update()\n self.zoom_box.resize(x - self.zoom_start - 2, height - 2)\n self.zoom_box.end_update()\n self.zoom_frame.begin_update()\n self.zoom_frame.resize(x - self.zoom_start, height)\n self.zoom_frame.end_update()\n self.zoom_box_min_x = self.zoom_start\n self.zoom_box_max_x = self.zoom_start + x - self.zoom_start\n self.zoom_box_max_y = self.window.height - geo.vertical_margin\\\n - 3\n self.zoom_box_min_y = self.zoom_box_max_y - height\n if self.zoom_box_min_x > self.zoom_box_max_x:\n self.zoom_box_min_x, self.zoom_box_max_x = \\\n self.zoom_box_max_x, self.zoom_box_min_x\n # Otherwise create a new box.\n else:\n self.zoom_start = x\n self.zoom_box = glydget.Rectangle(x + 1, self.window.height -\\\n self.geometry.vertical_margin - 5, 1,\n height - 2, [255,255,255,155,255,255,255,100,\n 255,255,255,200,255,255,255,120])\n self.zoom_frame = glydget.Rectangle(x, self.window.height -\\\n self.geometry.vertical_margin - 3, 1,\n height, (0,0,0,200), filled = False)\n self.zoom_box.build(batch = self.batch, group = self.groups[-1])\n self.zoom_frame.build(batch = self.batch, group = self.groups[-1])\n self.zoom_box_min_x = x\n self.zoom_box_max_x = x+1\n # Push the handlers for the box.\n self.zoom_handlers = \\\n self.win.window.push_handlers(on_mouse_motion, on_mouse_press)", "def zoom_to(self):\n # Will seek user feedback. QGIS will\n # Pan to first layer loaded", "def zoom(self, factor):\n adj = self.canvas.get_hadjustment()\n oldCenter = adj.value + adj.page_size // 2\n\n self.scale *= factor\n self.resizer.rescale()\n self.resize(self.timeExtent * self.scale, self.height)\n for f in self.resizeCallbacks:\n f()\n\n adj.value = oldCenter * factor - adj.page_size // 2", "def zoom(self, factor):\n self._transform(\n [\n [factor, 0, 0],\n [0, factor, 0],\n [0, 0, factor],\n ])", "def _on_mousewheel(event):\n if event.num == 4 or event.delta > 0:\n canvas.yview_scroll(-1, \"units\" )\n elif event.num == 5 or event.delta < 0:\n canvas.yview_scroll(1, \"units\" )", "def clickAutoscale(self, event):\n self.axes.autoscale_view()", "def action_set_zoom(self, value):\n if value >= 0 and value < len(self.zoom_levels) and value != self.cur_zoom:\n self.cur_zoom = value\n self.apply_zoom()", "def do_zoom_view(self, dx, dy, lock_aspect_ratio=False):\n # See guiqwt/events.py where dx and dy are defined like this:\n # dx = (pos.x(), self.last.x(), self.start.x(), rct.width())\n # dy = (pos.y(), self.last.y(), self.start.y(), rct.height())\n # where:\n # * self.last is the mouse position seen during last event\n # * self.start is the first mouse position (here, this is the\n # coordinate of the point which is at the center of the zoomed area)\n # * rct is the plot rect contents\n # * pos is the current mouse cursor position\n auto = self.autoReplot()\n self.setAutoReplot(False)\n dx = (-1,) + dx # adding direction to tuple dx\n dy = (1,) + dy # adding direction to tuple dy\n if lock_aspect_ratio:\n direction, x1, x0, start, width = dx\n F = 1 + 3 * direction * float(x1 - x0) / width\n axes_to_update = self.get_axes_to_update(dx, dy)\n\n axis_ids_horizontal = (self.get_axis_id(\"bottom\"), self.get_axis_id(\"top\"))\n axis_ids_vertical = (self.get_axis_id(\"left\"), self.get_axis_id(\"right\"))\n\n for (direction, x1, x0, start, width), axis_id in axes_to_update:\n lbound, hbound = self.get_axis_limits(axis_id)\n if not lock_aspect_ratio:\n F = 1 + 3 * direction * float(x1 - x0) / width\n if F * (hbound - lbound) == 0:\n continue\n if self.get_axis_scale(axis_id) == 'lin':\n orig = self.invTransform(axis_id, start)\n vmin = orig - F * (orig - lbound)\n vmax = orig + F * (hbound - orig)\n else: # log scale\n i_lbound = self.transform(axis_id, lbound)\n i_hbound = self.transform(axis_id, hbound)\n imin = start - F * (start - i_lbound)\n imax = start + F * (i_hbound - start)\n vmin = self.invTransform(axis_id, imin)\n vmax = self.invTransform(axis_id, imax)\n\n # patch for not \"zooming out\"\n if axis_id in axis_ids_horizontal:\n vmin = max(vmin, self.peakmap_range[0])\n vmax = min(vmax, self.peakmap_range[1])\n elif axis_id in axis_ids_vertical:\n vmin = max(vmin, self.peakmap_range[2])\n vmax = min(vmax, self.peakmap_range[3])\n\n self.set_axis_limits(axis_id, vmin, vmax)\n\n self.setAutoReplot(auto)\n # the signal MUST be emitted after replot, otherwise\n # we receiver won't see the new bounds (don't know why?)\n self.replot()\n self.emit(SIG_PLOT_AXIS_CHANGED, self)", "def zoomMap(self, scale, x=0, y=0):\n if self.zoomed:\n self.delete(self.zoomed)\n self.zoomed = self.im.zoom(scale, scale)\n zoomed_id = self.create_image(x, y, image=self.zoomed, anchor=NW)\n self.delete(self.original)\n self.scale = scale", "def zoom(x, zoom_range=(0.9, 1.1), flags=None, border_mode='constant'):\n zoom_matrix = affine_zoom_matrix(zoom_range=zoom_range)\n h, w = x.shape[0], x.shape[1]\n transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)\n x = affine_transform_cv2(x, transform_matrix, flags=flags, border_mode=border_mode)\n return x", "def change_zoom(self, b):\n\n x_mid = int(self.ff[0].info['xres'] / 2)\n y_mid = int(self.ff[0].info['yres'] / 2)\n\n x = x_mid - self.x_crop_slider.value\n\n if self.y_crop.value is True:\n y = y_mid - self.y_crop_slider.value\n else:\n y = y_mid - self.x_crop_slider.value\n\n x0 = x_mid - x\n x1 = x_mid + x\n y0 = y_mid - y\n y1 = y_mid + y\n\n self.x_range = [x0, x1]\n self.y_range = [y0, y1]\n\n self.ax.set_xlim([x0, x1])\n self.ax.set_ylim([y0, y1])", "def action_zoom_out(self):\n if self.cur_zoom > 0:\n self.cur_zoom -= 1\n self.zoom_widget.setValue(self.cur_zoom)\n self.apply_zoom()", "def zoom(self, parameter):\n dx, px, dy, py = parameter\n if self.parent.constrain_ratio:\n if (dx >= 0) and (dy >= 0):\n dx, dy = (max(dx, dy),) * 2\n elif (dx <= 0) and (dy <= 0):\n dx, dy = (min(dx, dy),) * 2\n else:\n dx = dy = 0\n self.sx *= np.exp(dx)\n self.sy *= np.exp(dy)\n \n # constrain scaling\n if self.constrain_navigation:\n self.sx = np.clip(self.sx, self.sxmin, self.sxmax)\n self.sy = np.clip(self.sy, self.symin, self.symax)\n \n self.tx += -px * (1./self.sxl - 1./self.sx)\n self.ty += -py * (1./self.syl - 1./self.sy)\n self.sxl = self.sx\n self.syl = self.sy", "def plot_zoom(ax, xlims):\n\n xmin, xmax, ymin, ymax = get_y_lims(ax, xlims)\n ax.set_xlim(xmin, xmax)\n ax.set_ylim(ymin, ymax)\n\n return ax", "def __zoomTo(self, value):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n aw = e5App().getObject(\"Shell\")\n else:\n aw = self.activeWindow()\n if aw:\n aw.zoomTo(value)\n self.sbZoom.setValue(aw.getZoom())", "def zoom(self, dr):\n d = self.getDistance()\n vn = self.getViewNormal()\n vn *= dr*d\n GL.glTranslate(vn[0], vn[1], vn[2])", "def apply_zoom(self):\n self.maparea.setTransform(self.zoom_levels[self.cur_zoom][1])\n self.scene.draw_visible_area()", "def onZoomOut(self, event):\n self.plotter.zoomOut(event)", "def __zoomOut(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n e5App().getObject(\"Shell\").zoomOut()\n else:\n aw = self.activeWindow()\n if aw:\n aw.zoomOut()\n self.sbZoom.setValue(aw.getZoom())", "def update_zoom_region(self):\n self.linear_region.setRegion(self.plot_zoom.getViewBox().viewRange()[0])", "def isZoomOnWheelEnabled(self) -> bool:\n return self.__zoomOnWheel", "def process_zoom(self, status):\n log.debug(\"Zoom tool clicked %s\", status)\n if status == \"True\":\n self.auto_scale = False", "def zoom(self, zoomIn):\n zoomFactor = 0.05\n maxZoomIn = 2\n maxZoomOut = 0.1\n if zoomIn:\n s = self.viewNP.getScale()\n if s.getX()-zoomFactor < maxZoomIn and s.getY()-zoomFactor < maxZoomIn and s.getZ()-zoomFactor < maxZoomIn:\n self.viewNP.setScale(s.getX()+zoomFactor,s.getY()+zoomFactor,s.getZ()+zoomFactor)\n else:\n s = self.viewNP.getScale()\n if s.getX()-zoomFactor > maxZoomOut and s.getY()-zoomFactor > maxZoomOut and s.getZ()-zoomFactor > maxZoomOut:\n self.viewNP.setScale(s.getX()-zoomFactor,s.getY()-zoomFactor,s.getZ()-zoomFactor)\n self.nodeMgr.updateConnections()", "def zoom(self, locator, percent=\"200%\", steps=1):\r\n driver = self._current_application()\r\n element = self._element_find(locator, True, True)\r\n driver.zoom(element=element, percent=percent, steps=steps)", "def zoom(self, dr):\n d = self.getDistance()\n vn = self.getViewNormal()\n vn *= dr*d\n GL.glTranslatef(vn[0], vn[1], vn[2])", "def zoom(self, delta):\n zoom_v = 1./20\n zoom_cap = self.d + zoom_v*delta < (self.max_side_lengths)*4\n if self.d + zoom_v * delta > 0 and zoom_cap:\n self.d += zoom_v * delta", "def zoom(self, point, zoomLevel):\n\n # get the point before the scale\n ptBeforeScale = self.mapToScene(point)\n\n # set the zoom level\n zoomScalar = self.__getZoomScaler(zoomLevel)\n if (zoomScalar == 0):\n return\n\n # scale the scene\n QGraphicsView.scale(self, zoomScalar, zoomScalar)\n\n # get the point after the scale\n ptAfterScale = self.mapToScene(point)\n\n # calculate the offset and update\n offset = ptAfterScale - ptBeforeScale\n newCenter = self.centerPoint - offset\n self.setCenterPoint(newCenter)", "def _handleClick(self, event):\n\n\t\t(x_min, x_max, y_min, y_max) = [i for i in self.extent]\n\t\tif event.xdata != None and event.ydata != None:\n\t\t\t(click_x, click_y) = (event.xdata, event.ydata)\n\t\t\tnewWidth = (x_max-x_min)/self.zoom\n\t\t\tnewHeight = (y_max-y_min)/self.zoom\n\n\t\t\t# update self.extent to the new zoomed in extent\n\t\t\tself.extent = [click_x-newWidth/2, click_x+newWidth/2, click_y-newHeight/2, click_y+newHeight/2]\n\t\t\tself.plot()", "def zoom(self,factor):\r\n # Set x axis range\r\n scalex = self.getScale()[0]\r\n range_ = scalex[1] - scalex[0]\r\n scalex[0] += range_*factor/10\r\n scalex[1] -= range_*factor/10\r\n # Centre range around scrubber\r\n range_ = scalex[1] - scalex[0]\r\n if range_ < self.w//2 and factor > 0:\r\n range_ = self.w//2\r\n if self.measurements is not None and range_ > self.measurements*2 and factor < 0:\r\n range_ = self.measurements*2\r\n scalex[0] = self.progress*self.samplerate - range_/2\r\n scalex[1] = self.progress*self.samplerate + range_/2\r\n self.setScaleX(scalex[0],scalex[1]) \r\n self.draw()\r\n self.update(self.app.videoPlayer.startTimestamp)", "def handle_scrollwheel(self, event):\n delta_x, delta_y, delta_z = self._get_deltas(event)\n if delta_x:\n self.events.append(\n self.emulate_wheel(delta_x, 'x', self.timeval))\n if delta_y:\n self.events.append(\n self.emulate_wheel(delta_y, 'y', self.timeval))\n if delta_z:\n self.events.append(\n self.emulate_wheel(delta_z, 'z', self.timeval))", "def on_mousewheel(self, event):\r\n self.container_widgets[\"order_canvas\"].yview_scroll(-1 * int(event.delta / 120), \"units\")\r\n # TODO FIX SCROLLING\r", "def set_zoom_on_edit(self, should_zoom):\n self._should_zoom = should_zoom", "def release_zoom(self, event):\n self.canvas_zoom_released.emit()\n\n NavigationToolbar2.release_zoom(self, event)\n\n return", "def zoom(self) -> float:\n return self._zoom", "def zoom(self):\n return self.container['zoom']", "def wheelEvent(self, event):\r\n\t\t\r\n\t\t# If a spritesheet has not been loaded, do nothing\r\n\t\tif self.animation_data and self.animation_data.active_frame is not None:\r\n\t\t\t\r\n\t\t\tif QtWidgets.QApplication.keyboardModifiers() == Qt.ControlModifier:\r\n\t\t\t\t\r\n\t\t\t\t# Next / Previous frame\r\n\t\t\t\tif event.angleDelta().y() > 0:\r\n\t\t\t\t\tself.current_index += 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.current_index -= 1\r\n\r\n\t\t\t\tself.update()\r\n\t\t\t\r\n\t\t\t\r\n\t\t\telse:\r\n\t\t\t\t# Zoom in / Out of the spritesheet\r\n\t\t\t\tif event.angleDelta().y() > 0:\r\n\t\t\t\t\tself.set_scale(min(round(self.scale + self.scale_inc, 1), self.scale_max))\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.set_scale(max(round(self.scale - self.scale_inc, 1), self.scale_min))", "def mouse_wheel(self, event):\n\n if event.num == 5 or event.delta == -120:\n event.widget.yview_scroll(1, UNITS)\n self.tablerowheader.yview_scroll(1, UNITS)\n if event.num == 4 or event.delta == 120:\n if self.canvasy(0) < 0:\n return\n event.widget.yview_scroll(-1, UNITS)\n self.tablerowheader.yview_scroll(-1, UNITS)\n self.redrawVisible()\n return", "def create_zoom_widgets(master: Widget) -> None:\r\n\r\n zoom_label = Label(master, text='Zoom:', font=self.FONT_NORMAL, bg=self.MAIN_BG)\r\n zoom_label.grid(row=1, column=0, sticky=W, padx=self.WIDGET_PAD,\r\n pady=(0,self.WIDGET_PAD*2))\r\n\r\n self.zoom_scale = Scale(\r\n master, from_=0, to=len(self.CELL_SIZES)-1, resolution=1, orient=HORIZONTAL,\r\n bg=self.MAIN_BG, font=self.FONT_SMALL, command=self.on_zoom_change)\r\n self.zoom_scale.set(self.INITIAL_ZOOM)\r\n self.zoom_scale.grid(row=1, column=1 ,sticky=W+E, padx=(0,self.WIDGET_PAD),\r\n pady=(0,self.WIDGET_PAD*2))", "def can_zoom(self):\n return False", "def _zoomCamera(self, sizeChange):\n self.camSize -= sizeChange", "def zoom(self, factor, center=None):\n\n xmin, xmax = self.axes.get_xlim()\n ymin, ymax = self.axes.get_ylim()\n width = xmax - xmin\n height = ymax - ymin\n\n if center is None or center == [None, None]:\n center = [(xmin + xmax) / 2.0, (ymin + ymax) / 2.0]\n\n # For keeping the point at the pointer location\n relx = (xmax - center[0]) / width\n rely = (ymax - center[1]) / height\n\n new_width = width / factor\n new_height = height / factor\n\n xmin = center[0] - new_width * (1 - relx)\n xmax = center[0] + new_width * relx\n ymin = center[1] - new_height * (1 - rely)\n ymax = center[1] + new_height * rely\n\n # Adjust axes\n for ax in self.figure.get_axes():\n ax.set_xlim((xmin, xmax))\n ax.set_ylim((ymin, ymax))\n\n # Async re-draw\n self.canvas.draw_idle()\n\n ##### Temporary place-holder for cached update #####\n self.update_screen_request.emit([0, 0, 0, 0, 0])", "def DoZoom(self, mode):\n id_type = mode\n zoomlevel = self.GetZoom()\n if id_type == ed_glob.ID_ZOOM_OUT:\n if zoomlevel > -9:\n self.ZoomOut()\n elif id_type == ed_glob.ID_ZOOM_IN:\n if zoomlevel < 19:\n self.ZoomIn()\n else:\n self.SetZoom(0)\n return self.GetZoom()", "def zoom(cls, img, zoom):\n w, h = img.size\n x = h / 2\n y = w / 2\n zoom2 = zoom * 2\n img = img.crop((x - w / zoom2, y - h / zoom2,\n x + w / zoom2, y + h / zoom2))\n return img.resize((w, h), Image.LANCZOS)", "def _zoom(self, x0, y0, x1, y1):\n # Store current zoom state in stack\n self.plot.getLimitsHistory().push()\n\n extents = self._getAxesExtent(x0, y0, x1, y1)\n self.plot.setLimits(\n extents.xmin,\n extents.xmax,\n extents.ymin,\n extents.ymax,\n extents.y2min,\n extents.y2max,\n )", "def wheel_click(coords=(0, 0)):\n _perform_click_input(button='middle', coords=coords)", "def zoom(self):\n return self['zoom']", "def zoom(self, xmin, xmax, xlen, ymin, ymax, ylen):\n self.xmax = xmax\n self.xmin = xmin\n self.xmax = xmax\n self.xlen = xlen\n self.ymin = ymin\n self.ymax = ymax\n self.ylen = ylen\n self.refresh()" ]
[ "0.8572107", "0.8045329", "0.80039686", "0.7584533", "0.7354916", "0.7326063", "0.7284798", "0.72583723", "0.72249573", "0.7202118", "0.72010577", "0.719273", "0.71766204", "0.705019", "0.70209515", "0.6953993", "0.6904493", "0.6904493", "0.68883395", "0.68644863", "0.68514544", "0.68481845", "0.68270445", "0.6822933", "0.68106157", "0.67803377", "0.67378676", "0.6735482", "0.6686846", "0.66690665", "0.66548556", "0.65847987", "0.6564047", "0.6538414", "0.65058947", "0.6503453", "0.647978", "0.6476846", "0.645619", "0.64561486", "0.6455139", "0.6444411", "0.6420983", "0.64181304", "0.640324", "0.640324", "0.6361051", "0.63050145", "0.6293518", "0.62538123", "0.6243507", "0.62285143", "0.6227762", "0.62128025", "0.6206351", "0.61715937", "0.61425024", "0.612732", "0.6121979", "0.61174434", "0.6108504", "0.60866064", "0.6069964", "0.60571676", "0.6041417", "0.6036059", "0.6003119", "0.5994796", "0.5988801", "0.59801257", "0.5975632", "0.5972009", "0.596618", "0.5939813", "0.5933822", "0.59315145", "0.59163934", "0.58770347", "0.58568466", "0.58565766", "0.5839015", "0.582895", "0.58157367", "0.58105093", "0.58083326", "0.5798313", "0.5787013", "0.5781404", "0.5767712", "0.5765602", "0.5753562", "0.57503325", "0.57495284", "0.5729534", "0.5718274", "0.5711918", "0.5700588", "0.56731224", "0.56556594", "0.56518644" ]
0.7094766
13
Scrolling with the keyboard. Independent from the language of the keyboard, CapsLock, +, etc.
def __keystroke(self, event): if event.state - self.__previous_state == 4: # means that the Control key is pressed pass # do nothing if Control key is pressed else: self.__previous_state = event.state # remember the last keystroke state # Up, Down, Left, Right keystrokes if event.keycode in [68, 39, 102]: # scroll right, keys 'd' or 'Right' self.__scroll_x('scroll', 1, 'unit', event=event) elif event.keycode in [65, 37, 100]: # scroll left, keys 'a' or 'Left' self.__scroll_x('scroll', -1, 'unit', event=event) elif event.keycode in [87, 38, 104]: # scroll up, keys 'w' or 'Up' self.__scroll_y('scroll', -1, 'unit', event=event) elif event.keycode in [83, 40, 98]: # scroll down, keys 's' or 'Down' self.__scroll_y('scroll', 1, 'unit', event=event)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_key(self, event):\n if event.key() == QtCore.Qt.Key_Up:\n self.model.channel_Scroll_Up('page')\n elif event.key() == QtCore.Qt.Key_PageUp:\n self.model.channel_Scroll_Up('page')\n elif event.key() == QtCore.Qt.Key_Down:\n self.model.channel_Scroll_Down('page')\n elif event.key() == QtCore.Qt.Key_PageDown:\n self.model.channel_Scroll_Down('page')\n elif event.key() == QtCore.Qt.Key_Left:\n self.model.time_scroll(scroll=-1 / 3)\n elif event.key() == QtCore.Qt.Key_Right:\n self.model.time_scroll(scroll=1 / 3)\n event.accept()", "def __window_scrollTo(self, x, y):\n pass", "def keyboard_mode(self, bot, update):\n bot.send_message(update.message.chat_id,\n 'Режим \"keyboard\" активирован.',\n reply_markup=self.markup)", "def __window_scrollBy(self, xDelta, yDelta):\n pass", "def scroll(*args):", "def start_scroll():\n send_command(0x2F)", "def autoscroll(self):\n self.displaymode |= self.LCD_ENTRYSHIFTINCREMENT\n self.write_lcd(self.LCD_DATA_E1, self.LCD_ENTRYMODESET | self.displaymode)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_ENTRYMODESET | self.displaymode)", "def __window_scroll(self, x, y):\n pass", "def scroll(self, direction):\n # Handle the specific keys\n if direction == \"h\": # Behave like ranger\n self.remember_pos(os.getcwd(),\n self.vimiv.get_pos(force_widget=\"lib\"))\n self.move_up()\n elif direction == \"l\":\n self.file_select(self.treeview, self.treeview.get_cursor()[0],\n None, False)\n else:\n # Scroll the tree checking for a user step\n if self.vimiv.keyhandler.num_str:\n step = int(self.vimiv.keyhandler.num_str)\n else:\n step = 1\n if direction == \"j\":\n new_pos = self.vimiv.get_pos(force_widget=\"lib\") + step\n if new_pos >= len(self.file_liststore):\n new_pos = len(self.file_liststore) - 1\n else:\n new_pos = self.vimiv.get_pos(force_widget=\"lib\") - step\n if new_pos < 0:\n new_pos = 0\n self.move_pos(True, new_pos)\n return True # Deactivates default bindings (here for Arrows)", "def _on_key_press(self, event):", "def perform_keyboard_actions(self):\n self.handle_keyboard_input()\n self.grid.next_frame()", "def window_scroll(self, width=None, height=None):\n if width is None:\n width = \"0\"\n if height is None:\n height = \"0\"\n js = \"window.scrollTo({w},{h});\".format(w=width, h=height)\n self.run_script(js)", "def ev_KEYDOWN(self, event):", "def bindKeys(self):\r\n self.c.bind(\"<Button-1>\",self.seek)\r\n self.c.bind(\"<MouseWheel>\",self.app.zoom)\r\n self.c.bind(\"<Button-3>\",self.peek)", "def process_key(self, key):\n\t\tif(self.index/SCROLL_CONSTANT >= len(self.text)):\n\t\t\tif(key == UP):\n\t\t\t\tself.select_index = max(0, self.select_index - 1)\n\t\t\telif(key == DOWN):\n\t\t\t\tself.select_index = min(len(self.choice_data_list) - 1, self.select_index + 1)", "def Scroll(self, steps):\n self._EnsureHIDValueInRange(steps)\n self._kit.MouseScroll(steps)\n time.sleep(self.send_delay)", "def scroll_buffer_to_prompt(self) -> None:\n # Get current window size\n info = self.get_win32_screen_buffer_info()\n sr = info.srWindow\n cursor_pos = info.dwCursorPosition\n\n result = SMALL_RECT()\n\n # Scroll to the left.\n result.Left = 0\n result.Right = sr.Right - sr.Left\n\n # Scroll vertical\n win_height = sr.Bottom - sr.Top\n if 0 < sr.Bottom - cursor_pos.Y < win_height - 1:\n # no vertical scroll if cursor already on the screen\n result.Bottom = sr.Bottom\n else:\n result.Bottom = max(win_height, cursor_pos.Y)\n result.Top = result.Bottom - win_height\n\n # Scroll API\n self._winapi(\n windll.kernel32.SetConsoleWindowInfo, self.hconsole, True, byref(result)\n )", "def keyboard_on_key_down(\n self,\n window: Keyboard,\n keycode: Tuple[int, str],\n text: str,\n modifiers: ObservableList\n ):\n if keycode[0] in KEYS['s'] and 'ctrl' in modifiers:\n popup = SavePopup(\n self, size_hint=(.5, .5),\n pos_hint={\n 'center_x': .5, 'center_y': .5\n }\n )\n popup.open()\n\n elif keycode[0] in KEYS['enter']:\n self.focus = True\n text = list(self.text)\n text.insert(self.cursor_index(), '\\n')\n self.text = ''.join(text)\n Clock.schedule_once(self._focus)\n\n elif keycode[0] in KEYS['esc']:\n self.root.manager.current = 'browser'\n\n elif keycode[0] in KEYS['del', 'backspace']:\n self.cancel_selection()\n\n elif keycode[0] in KEYS['='] and 'ctrl' in modifiers:\n self.font_size += 1\n\n elif keycode[0] in KEYS['-'] and 'ctrl' in modifiers:\n if self.font_size > 0:\n self.font_size -= 1\n\n return super(EditorIO, self).keyboard_on_key_down(\n window, keycode, text, modifiers\n )", "def autoscroll( self, value=True ):\n\t\tif value:\n\t\t\t# This will 'right justify' text from the cursor\n\t\t\tself._displaymode |= LCD_ENTRYSHIFTINCREMENT\n\t\telse:\n\t\t\t# This will 'left justify' text from the cursor\n\t\t\tself._displaymode &= (0xFF ^ LCD_ENTRYSHIFTINCREMENT)\n\t\tself.command( LCD_ENTRYMODESET | self._displaymode )", "def handle_continuous_keys(self):\n shift = pygame.K_LSHIFT in self.held\n ctrl = pygame.K_LCTRL in self.held\n factor = 3 if shift else 1/3 if ctrl else 1\n for key in self.held:\n if not self.followmode:\n # if self.held_delay[key] == 0:\n if key in (pygame.K_w, pygame.K_UP): # up\n # self.canvas.move_offset(0, 5 * factor)\n self.canvas.move_focus(0, 5 * factor)\n elif key in (pygame.K_s, pygame.K_DOWN): # down\n # self.canvas.move_offset(0, -5 * factor)\n self.canvas.move_focus(0, -5 * factor)\n elif key in (pygame.K_d, pygame.K_RIGHT): # right\n # self.canvas.move_offset(-5 * factor, 0)\n self.canvas.move_focus(5 * factor, 0)\n elif key in (pygame.K_a, pygame.K_LEFT): # left\n # self.canvas.move_offset(5 * factor, 0)\n self.canvas.move_focus(-5 * factor, 0)\n if key in (pygame.K_e, pygame.K_KP_PLUS):\n self.canvas.zoom(2 * factor)\n elif key in (pygame.K_q, pygame.K_KP_MINUS):\n self.canvas.zoom(-2 * factor)\n for key in self.held:\n self.held_delay[key] = (self.held_delay[key] + 1) % 5", "def OnKeyDown(self, event):\r\n\r\n key = event.GetKeyCode()\r\n nb = self.GetParent()\r\n\r\n if key == wx.WXK_LEFT:\r\n nb.AdvanceSelection(False)\r\n self.SetFocus()\r\n\r\n elif key == wx.WXK_RIGHT:\r\n nb.AdvanceSelection(True)\r\n self.SetFocus()\r\n\r\n elif key == wx.WXK_HOME:\r\n newPage = 0\r\n nb.SetSelection(newPage)\r\n self.SetFocus()\r\n \r\n elif key == wx.WXK_END:\r\n newPage = nb.GetPageCount() - 1\r\n nb.SetSelection(newPage)\r\n self.SetFocus()\r\n \r\n elif key == wx.WXK_TAB:\r\n if not event.ControlDown():\r\n flags = 0\r\n if not event.ShiftDown(): flags |= wx.NavigationKeyEvent.IsForward\r\n if event.CmdDown(): flags |= wx.NavigationKeyEvent.WinChange\r\n self.Navigate(flags)\r\n else:\r\n\r\n if not nb or not isinstance(nb, AuiNotebook):\r\n event.Skip()\r\n return\r\n\r\n bForward = bWindowChange = 0\r\n if not event.ShiftDown(): bForward |= wx.NavigationKeyEvent.IsForward\r\n if event.CmdDown(): bWindowChange |= wx.NavigationKeyEvent.WinChange\r\n \r\n keyEvent = wx.NavigationKeyEvent()\r\n keyEvent.SetDirection(bForward)\r\n keyEvent.SetWindowChange(bWindowChange)\r\n keyEvent.SetFromTab(True)\r\n keyEvent.SetEventObject(nb)\r\n\r\n if not nb.GetEventHandler().ProcessEvent(keyEvent):\r\n \r\n # Not processed? Do an explicit tab into the page.\r\n win = self.GetWindowFromIdx(self.GetActivePage())\r\n if win:\r\n win.SetFocus()\r\n\r\n self.SetFocus()\r\n \r\n return\r\n\r\n else:\r\n event.Skip()", "def mouse_scroll(self, x, y, scroll_x, scroll_y):\n # Check if in the menu.\n if x > self.menu_start:\n # Scroll the menu.\n self.menu.scrollMenu(scroll_y)\n # Otherwise scroll the waveforms\n else:\n self.waveform_offset += 4 * scroll_y\n if self.waveform_offset > 0:\n self.waveform_offset = 0\n # Avoid going too far down.\n max_view = self.max_viewable + self.win.geometry.time_scale\n if self.current_view_span - self.waveform_offset > max_view:\n if self.current_view_span > max_view:\n self.waveform_offset = 0\n else:\n self.waveform_offset = -((10 + max_view) - \\\n self.current_view_span)\n # Update the scroll_bar.\n self.scroll_bar.changePosition()", "def __keystroke(self, event):\n if event.state - self.__previous_state == 4: # means that the Control key is pressed\n pass # do nothing if Control key is pressed\n else:\n if event.char in [' ', 'f']:\n return self.parent_class.finish_polygons_key()\n self.__previous_state = event.state # remember the last keystroke state\n # Up, Down, Left, Right keystrokes\n if event.keycode in [68, 39, 102]: # scroll right: keys 'D', 'Right' or 'Numpad-6'\n self.__scroll_x('scroll', 1, 'unit', event=event)\n elif event.keycode in [65, 37, 100]: # scroll left: keys 'A', 'Left' or 'Numpad-4'\n self.__scroll_x('scroll', -1, 'unit', event=event)\n elif event.keycode in [87, 38, 104]: # scroll up: keys 'W', 'Up' or 'Numpad-8'\n self.__scroll_y('scroll', -1, 'unit', event=event)\n elif event.keycode in [83, 40, 98]: # scroll down: keys 'S', 'Down' or 'Numpad-2'\n self.__scroll_y('scroll', 1, 'unit', event=event)", "def XPSetKeyboardFocus(inWidget):\n pass", "def menu_keyboard_shortcuts(self, event=None):\n self.link('http://pythonide.stani.be/manual/html/manual12.html')", "def keypress(self, size, key):\n pos = self.get_focus()[1]\n _ll = len(self.body)\n if (pos <= 0 and key == 'up') or (pos >= _ll-1 and key == 'down'):\n return\n else:\n return super(ClosedListBox, self).keypress(size, key)", "def on_key_press(self, event):\n\n if event.text == '+' or event.text == '=':\n self.zoom(0.9)\n elif event.text == '-':\n self.zoom(1/0.9)\n elif event.text == 'l':\n self.translate_center(0.0, 0.0, -0.01)\n elif event.text == 'L':\n self.translate_center(0.0, 0.0, 0.01)", "def scroll_display( self, direction=LCD_MOVELEFT ):\n\t\tassert direction in (LCD_MOVELEFT,LCD_MOVERIGHT), \"Invalid direction %s value\" % direction\n\t\tself.command(LCD_CURSORSHIFT | LCD_DISPLAYMOVE | direction)", "def scroll_text(self, text, color=defcolor, scrolldelay=0.25):\n # Line height including spacing.\n lineh = self.font['height'] + 1\n # Calculate characters we can have per line.\n chars_per_line = self.x // (self.font['width'] + 1)\n # Split the text into its lines.\n linelist = striptomatrix.listto2d(text, chars_per_line)\n # Add an empty line at the end of the list.\n linelist.append(\" \" * chars_per_line)\n # Initialize the vertical offset.\n voffset = 0\n # Reset the sentinel\n self.sentinel = False\n self.killedevent.clear()\n while self.sentinel == False:\n self.clear()\n \n # Uncomment to understand how this works.\n #print('{0} voffset={1}'.format(linelist, voffset))\n\n # Insert the lines, taking into account the vertical offset. \n for l in range(len(linelist)):\n self.insert_line(linelist[l], 0, (l*lineh)-voffset, color) \n self.show()\n time.sleep(scrolldelay)\n # Upate the offset value.\n # After we scrolled 1 char worth, \n if voffset == self.font['height']:\n voffset = 0\n # Place the last item to the beginnig of the list.\n linelist.append(linelist[0])\n del linelist[0]\n else:\n voffset += 1\n self.killedevent.set()", "def yview_scroll(self, number, what):\n self.tk.call(self._w, 'yview', 'scroll', number, what)", "def on_mousewheel(self, event):\r\n self.container_widgets[\"order_canvas\"].yview_scroll(-1 * int(event.delta / 120), \"units\")\r\n # TODO FIX SCROLLING\r", "def on_down_key(self, event) -> None:\r\n\r\n self.move_view(0, 1)", "def scroll(self, direction):\n # next cursor position after scrolling\n next_line = self.line + direction\n\n # Up direction scroll overflow\n # current cursor position is 0, but top position is greater than 0\n if (direction == self.UP) and (self.top > 0 and self.line == 0):\n self.top += direction\n \n # Down direction scroll overflow\n # next cursor position touch the max lines, but absolute position of max lines could not touch the bottom\n elif (direction == self.DOWN) and (next_line == self.max_lines -1) and (self.top + self.max_lines < self.bottom):\n self.top += direction\n \n # Scroll up\n # current cursor position or top position is greater than 0\n elif (direction == self.UP) and (self.top > 0 or self.line > 0):\n self.line = next_line\n \n # Scroll down\n # next cursor position is above max lines, and absolute position of next cursor could not touch the bottom\n elif (direction == self.DOWN) and (next_line < self.max_lines) and (self.top + next_line < self.bottom):\n self.line = next_line", "def ScrollMessage(text, color, repeat):\n text_area.text = text\n text_area.color = color\n\n # Start the message just off the side of the glasses\n x = display.width\n text_area.x = x\n\n # Determine the width of the message to scroll\n width = text_area.bounding_box[2]\n\n for _ in range(repeat):\n while x != -width:\n x = x - 1\n text_area.x = x\n\n # Update the switch and if it has been pressed abort scrolling this message\n switch.update()\n if not switch.value:\n return\n\n time.sleep(0.025) # adjust to change scrolling speed\n x = display.width", "def scroll_page(self):\n scroll_down = self.driver.find_element_by_tag_name(\"html\")\n scroll_down.send_keys(Keys.END)\n sleep(TestData.DELAY)\n scroll_down.send_keys(Keys.CONTROL + Keys.HOME)\n sleep(TestData.DELAY)\n return True", "def cmdKeyboard(self, dev):\n # Slaap één-tiende van een seconde om te voorkomen dat de toetsaanslag <enter>\n # wordt gepakt als het wordt uitgevoerd in een terminal\n time.sleep(0.1)\n\n self.hitsKeyboards[dev] = False\n f = open(self.inputPath + dev, 'rb')\n f.flush()\n while self.live:\n # Lees de toetsaanslag --> Pak de 42ste byte\n self.hitsKeyboards[dev] = f.read(144)[42]\n time.sleep(0.1)", "def key_press(self):\n self.screen.nodelay(True)\n return self.screen.getch()", "def ev_keydown(self, event: KeyDown) -> None:", "def scroll_to(self):\n\n if self:\n pass", "def scroll_page_down(event):\n w = _current_window_for_event(event)\n b = event.cli.current_buffer\n\n if w and w.render_info:\n # Scroll down one page.\n line_index = max(w.render_info.last_visible_line(), w.vertical_scroll + 1)\n w.vertical_scroll = line_index\n\n b.cursor_position = b.document.translate_row_col_to_index(line_index, 0)\n b.cursor_position += b.document.get_start_of_line_position(after_whitespace=True)", "def enable_keypress(self):\r\n # Add a space after the prompt -- we'll start collecting text after this point\r\n self.lines[-1] += ' '\r\n # Setup cursor\r\n self.cursor_visible = True\r\n self.cursor_min_length = sum([len(l) for l in self.lines])\r\n self.cursor_absolute_position = self.cursor_min_length\r\n self.entered_text_buffer = ''", "def _on_mousewheel(event):\n if event.num == 4 or event.delta > 0:\n canvas.yview_scroll(-1, \"units\" )\n elif event.num == 5 or event.delta < 0:\n canvas.yview_scroll(1, \"units\" )", "def play(self):\n self.accept(\"wheel_up\", self.scrollindex, [-1] )\n self.accept(\"wheel_down\", self.scrollindex, [1] )\n self.accept(\"arrow_up\", self.scrollindex, [-1] )\n self.accept(\"arrow_down\", self.scrollindex, [1] )\n self.accept(\"enter\", self._click)\n if callable(self.data['exit']): self.accept(\"escape\", self.data['exit'])\n for item in self.canvas[\"items\"]: item['state']=DGG.NORMAL", "def keyboard(self, *args):\n return _ida_hexrays.Hexrays_Hooks_keyboard(self, *args)", "def on_key_press(self, key, modifiers):\n KeyboardController.lastKey = key;\n KeyboardController.keys.add(key);\n if key == arcade.key.ESCAPE:\n # User hits f. Flip between full and not full screen.\n self.set_fullscreen(not self.fullscreen)\n\n # Get the window coordinates. Match viewport to window coordinates\n # so there is a one-to-one mapping.\n width, height = self.get_size()\n self.set_viewport(0, width, 0, height)", "def handle_movement_keydown(self, key):\n try:\n log.debug(f'pressed: {key}')\n if key == pygame.K_LEFT:\n self.walk_left()\n elif key == pygame.K_RIGHT:\n self.walk_right()\n elif key == pygame.K_DOWN:\n pass\n elif key == pygame.K_UP:\n pass\n elif key == pygame.K_SPACE:\n self.jump()\n self.keys_down[key] = True\n except AttributeError:\n log.info(\"you didn't pass a keyboard event!!\")", "def do_auto_scroll( self, auto = True ):\n print( \"do_auto_scroll fix !!\" )", "def __window_scrollByLines(self, lines):\n pass", "def scroll_page(self, where: str, direction: ScrollEnum):\n\n element = self.find_element_by_xpath(where)\n if element:\n if direction == ScrollEnum.UP:\n element.send_keys(Keys.HOME)\n elif direction == ScrollEnum.DOWN:\n element.send_keys(Keys.END)", "def trigger_scroller_event(self):\n evt = wx.PyCommandEvent(wx.EVT_TEXT.typeId,self.page_scroller.GetId())\n self.GetEventHandler().ProcessEvent(evt)", "def keyboard_on_key_down(self, window, keycode, text, modifiers):\n if super(SelectableLayout, self).keyboard_on_key_down(\n window,\n keycode,\n text,\n modifiers,\n ):\n return True\n if self.select_with_key_down(window, keycode, text, modifiers):\n return True\n return False", "def onKeyPress(self):\n ch = read(fd, 4)\n if ch == '\\033': # escape\n self.pause()\n elif '\\033' in ch:\n return\n elif '\\t' in ch: # tab\n return\n elif len(self.user_input) >= 80: # too long\n self.user_input[:80]\n return\n elif ch == '\\r': # return\n if self.user_input == \"\":\n return\n command = command_list.match(self.user_input)\n if not command:\n pass\n elif command.group(1):\n self._save(0)\n elif command.group(2):\n self._save()\n elif command.group(3):\n self._save(command.group(4))\n link = self.links.match(self.user_input.lower())\n if link:\n self.reset(link.group(0))\n self.user_input = \"\"\n self.locked += 1\n print '\\033[0m'\n print_loc(' '*80, self.y+5, self.x+2)\n #print_loc(' '*80, self.y+6, 0)\n self.locked -= 1\n elif ch == '\\x7f': # backspace\n if self.user_input == \"\":\n return\n self.user_input = self.user_input[:-1]\n elif ch == ' ': # space\n if self.user_input == \"\":\n return\n elif self.user_input[-1] == ' ':\n return\n self.user_input += ' '\n else: # all else\n self.user_input += ch\n self.locked += 1\n # Highlight valid user input\n if self.links.match(self.user_input.lower()):\n print '\\033[0;96;4m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n elif command_list.match(self.user_input):\n print '\\033[0;1;92m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n else:\n print '\\033[0m'\n # Display new user input line\n print_loc(self.user_input+'\\033[0;7m \\033[0m ', self.y + 5, self.x)\n self.locked -= 1", "def _on_scrollbar(self, *args) -> None:\r\n for textbox in self.textboxes:\r\n textbox.yview(*args)", "def on_mouse_wheel(self,event,canvas):\n canvas.yview(\"scroll\",-1*event.delta/100,\"units\")", "def run_autofocus(self):\n raise NotImplementedError", "def set_zoombox_keyboard(self):\n # Idem but with CTRL + left button mouse \n self.set('LeftClickMove', 'ZoomBox',\n key_modifier='Control',\n param_getter=lambda p: (p[\"mouse_press_position\"][0],\n p[\"mouse_press_position\"][1],\n p[\"mouse_position\"][0],\n p[\"mouse_position\"][1]))", "def focus_password(self, **kws):\r\n self.password_box.focus()", "def idle_loop(self):\n sleep(0.1)\n self.scroll()", "def _on_textscroll(self, *args) -> None:\r\n self.scrollbar.set(*args)\r\n self._on_scrollbar('moveto', args[0])", "def on_key_press(self, key_pressed: int, _: int) -> None:\n if key_pressed in (key.UP, key.W):\n if self.physics_engine.can_jump():\n self.change_y = self.jump_speed\n elif key_pressed in (key.LEFT, key.A):\n self.change_x = -self.movement_speed\n self.direction = Direction.LEFT\n self.last_faced_dir = \"left\"\n self.texture = self.textures[Direction.LEFT.value]\n elif key_pressed in (key.RIGHT, key.D):\n self.change_x = self.movement_speed\n self.direction = Direction.RIGHT\n self.last_faced_dir = \"right\"\n self.texture = self.textures[Direction.RIGHT.value]", "def activate_keyboard_bindings():\n turtle.Screen().listen()\n turtle.Screen().onkey(exit, \"e\")\n turtle.Screen().onkey(exit, \"n\")", "def _on_keyboard(self, instance, key, scancode, codepoint, modifiers, *args):\r\n # print(\"Keyboard pressed! {}, {}, {}, {}\".format(key, scancode, codepoint, modifiers))\r\n if codepoint == 's' and 'ctrl' in modifiers:\r\n toast('Search by Name, Ingredient, or Tag', 3)\r\n self.search_focus = True", "def on_keydown(self, keys, game) -> None:\n pass", "def Keyboard(self, key):\r\n\t\tif key == Keys.K_u:\r\n\t\t\tself.kp+=self.dp\r\n\t\t\tself.q[0].SetCoefs(self.q[0].configer,self.kp,self.ki,self.kd)\r\n\t\tif key == Keys.K_j:\r\n\t\t\tself.kp-=self.dp\r\n\t\t\tif self.kp<0:\r\n\t\t\t\tself.kp=0.0\r\n\t\t\tself.q[0].SetCoefs(self.q[0].configer,self.kp,self.ki,self.kd)\r\n\t\tif key == Keys.K_i:\r\n\t\t\tself.ki+=self.di\r\n\t\t\tself.q[0].SetCoefs(self.q[0].configer,self.kp,self.ki,self.kd)\r\n\t\tif key == Keys.K_k:\r\n\t\t\tself.ki-=self.di\r\n\t\t\tif self.ki<0:\r\n\t\t\t\tself.ki=0.0\r\n\t\t\tself.q[0].SetCoefs(self.q[0].configer,self.kp,self.ki,self.kd)\r\n\t\tif key == Keys.K_o:\r\n\t\t\tself.kd+=self.dd\r\n\t\t\tself.q[0].SetCoefs(self.q[0].configer,self.kp,self.ki,self.kd)\r\n\t\tif key == Keys.K_l:\r\n\t\t\tself.kd-=self.dd\r\n\t\t\tif self.kd<0:\r\n\t\t\t\tself.kd=0.0\r\n\t\t\tself.q[0].SetCoefs(self.q[0].configer,self.kp,self.ki,self.kd)\r\n\t\t\r\n\t\tif key == Keys.K_f:\r\n\t\t\tself.center+=1\r\n\t\t\tif self.center>2:\r\n\t\t\t\tself.center = 0\r\n\t\t\r\n\t\tif key == Keys.K_t:\r\n\t\t\tself.cut=1-self.cut\r\n\t\t\tself.q[0].SetCut(self.cut)\t\t\t\r\n\t\t\t\r\n\t\tif key == Keys.K_r:\r\n\t\t\tpass\t\r\n\t\t\r\n\t\tif key == Keys.K_s:\r\n\t\t\tself.q[0].saveConf()", "def test_keyboard_characters(self):\n pass", "def XPLoseKeyboardFocus(inWidget):\n pass", "def lockKeyboard():\r\n while True:\r\n # locks the keyboard for 5s\r\n # command: xinput float keyborad_ID\r\n os.system('xinput float 8')\r\n time.sleep(5)\r\n\r\n # Enable the keyboard fo 10s\r\n # command: xinput float keyborad_ID slave_keyboard_number\r\n os.system('xinput reattach 8 3')\r\n time.sleep(10)", "def key_G(buf, input_line, cur, count):\n if count > 0:\n # This is necessary to prevent weird scroll jumps.\n weechat.command(\"\", \"/window scroll_top\")\n weechat.command(\"\", \"/window scroll %s\" % (count - 1))\n else:\n weechat.command(\"\", \"/window scroll_bottom\")", "def on_key_press(self, key):\n if key == 'esc':\n self.backtrack()\n elif key in ['f1', '?']:\n self.open(HelpPane(self._get_current_pane()))", "def scroll(self):\n x_position = self._player.get_position()[0]\n half_screen = self._master.winfo_width() / 2\n world_size = self._world.get_pixel_size()[0] - half_screen\n\n # Left side\n if x_position <= half_screen:\n self._view.set_offset((0, 0))\n\n # Between left and right sides\n elif half_screen <= x_position <= world_size:\n self._view.set_offset((half_screen - x_position, 0))\n\n # Right side\n elif x_position >= world_size:\n self._view.set_offset((half_screen - world_size, 0))", "def handle_keypress(self,key):\r\n if len(key) == 0:\r\n return True\r\n \r\n # Whenever we start typing scroll to the bottom\r\n self.scroll_bottom() \r\n\r\n if self.cursor_visible:\r\n if ord(key) == BACKSPACE_KEY: # If backspace and we aren't at start point of buffer, remove one char\r\n if self.cursor_absolute_position > self.cursor_min_length:\r\n self.lines[-1] = self.lines[-1][0:-1]\r\n self.entered_text_buffer = self.entered_text_buffer[0:-1]\r\n self.cursor_absolute_position-=1\r\n elif ord(key) == RETURN_KEY:\r\n # Hit return, count it as an entered command and return\r\n return False \r\n else:\r\n self.print_text(key)\r\n self.cursor_absolute_position+=1\r\n self.entered_text_buffer += key\r\n\r\n return True", "def cycle_text(self, **kwargs):\n self.scroll(**kwargs) # Temporary, will be replaced", "def scroll_down(self):\r\n self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\r\n sleep(self.wait)", "def getFocus(*args):", "def getFocus(*args):", "def getFocus(*args):", "def getFocus(*args):", "def move_debug(self, environment):\n\n ch2 = sys.stdin.read(1)\n\n if ch2 == \"w\":\n # the up arrow key was pressed\n print(\"up key pressed\")\n\n elif ch2 == \"s\":\n # the down arrow key was pressed\n print(\"down key pressed\")\n\n elif ch2 == \"a\":\n # the left arrow key was pressed\n print(\"left key pressed\")\n\n elif ch2 == \"d\":\n # the right arrow key was pressed\n print(\"right key pressed\")", "def run_autofocus_stig(self):\n raise NotImplementedError", "def _scroll(self, ui_content: UIContent, width: int, height: int) -> None:\n if self.wrap_lines():\n func = self._scroll_when_linewrapping\n else:\n func = self._scroll_without_linewrapping\n\n func(ui_content, width, height)", "def _on_key_release(self, event):", "def keyboard(key, x, y):\n\n # Handle ESC key.\n if key == b'\\033':\t\n\t# \"\\033\" is the Escape key\n sys.exit(1)\n \n if key == b',' and selected_face:\n move_face('LEFT')\n\n if key == b'.' and selected_face:\n move_face('RIGHT')", "def onMnemoToMain(self):\n self.second_main_text.SetFocus()", "def runKeyboard(self):\n for dev in self.keyboards:\n threading.Thread(target=partial(self.cmdKeyboard, dev)).start()", "def keydown(key):\r\n \r\n global paddle1_vel, paddle2_vel\r\n \r\n if key == simplegui.KEY_MAP[\"w\"]:\r\n paddle1_vel = -PAD_SPEED\r\n elif key == simplegui.KEY_MAP[\"s\"]:\r\n paddle1_vel = PAD_SPEED\r\n else:\r\n paddle1_vel = 0\r\n \r\n if key == simplegui.KEY_MAP[\"up\"]:\r\n paddle2_vel = -PAD_SPEED\r\n elif key == simplegui.KEY_MAP[\"down\"]:\r\n paddle2_vel = PAD_SPEED\r\n else:\r\n paddle2_vel = 0", "def run(self):\n global key\n getch = _GetchUnix()\n key = getch()\n while key != \"e\":\n key = getch()\n #time.sleep(0.1)", "def open_keyboard(self, instance):\n self.popup.open()", "def move_focus(self, pos_x, pos_y):\n factor = self.offset.x * -0.005 / self.scale\n pos_x *= factor\n pos_y *= factor\n self.focus += (pos_x, pos_y)", "def autoscroll(self):\n return self.getY() == float(1.0)\n #return self.autoscroll", "def focus(self):\n raise NotImplementedError", "def bs_input(self):\n from lib.keyboard import getch, Key\n start_value = self.size * 4 # Half full \n self.value = start_value\n n = start_value\n ch = \"\"\n while ch != '\\r':\n ch = getch()\n if ch == Key.UP_ARROW:\n self.value = start_value\n n = start_value\n elif ch == Key.LEFT_ARROW:\n n /= 2\n self.value -= n\n elif ch == Key.RIGHT_ARROW:\n n /= 2\n self.value += n", "def ev_KEYUP(self, event):", "def setFocus(*args):", "def setFocus(*args):", "def setFocus(*args):", "def setFocus(*args):", "def scroll_to(self):\n self.driver.execute_script(\"arguments[0].scrollIntoView(true);\", self._element)", "def scroll(self, dir):\n try:\n self.scrool = dir\n except:\n raise ReferenceError", "def set_panning_keyboard(self):\n # Panning: keyboard arrows\n self.set('KeyPress', 'Pan',\n key='Left',\n param_getter=lambda p: (.24, 0))\n self.set('KeyPress', 'Pan',\n key='Right',\n param_getter=lambda p: (-.24, 0))\n self.set('KeyPress', 'Pan',\n key='Up',\n param_getter=lambda p: (0, -.24))\n self.set('KeyPress', 'Pan',\n key='Down',\n param_getter=lambda p: (0, .24))", "def __scroll_element_into_view__(self, element):\n y = element.location['y']\n self.driver.execute_script('window.scrollTo(0, {0})'.format(y))" ]
[ "0.6526636", "0.6466856", "0.61477494", "0.60642177", "0.60475975", "0.6046673", "0.5949585", "0.59238994", "0.59151155", "0.5871811", "0.5852923", "0.57747257", "0.57678497", "0.5751938", "0.5697251", "0.5689333", "0.5646427", "0.5634948", "0.5631655", "0.56251585", "0.5620744", "0.55984664", "0.55908936", "0.55804795", "0.5578338", "0.55741936", "0.5553027", "0.554875", "0.55479693", "0.5546229", "0.55211663", "0.55149496", "0.55141795", "0.55067056", "0.5505664", "0.5496129", "0.5487298", "0.5473952", "0.5470166", "0.546087", "0.5444433", "0.54324603", "0.5431113", "0.542884", "0.5427816", "0.5426263", "0.5418144", "0.5406203", "0.54009384", "0.5392513", "0.53917617", "0.5384699", "0.5383114", "0.5378622", "0.537541", "0.53735477", "0.53731614", "0.5360165", "0.5353457", "0.53533363", "0.53415143", "0.53370124", "0.53362983", "0.53028756", "0.53026295", "0.5300825", "0.5298837", "0.52986497", "0.529293", "0.5290014", "0.52779925", "0.52626026", "0.5260553", "0.5257026", "0.5257026", "0.5257026", "0.5257026", "0.5251775", "0.52509284", "0.5248818", "0.5233923", "0.5232709", "0.52326524", "0.5228399", "0.5219997", "0.52153265", "0.5212711", "0.5211468", "0.5209517", "0.52081406", "0.52019936", "0.5193294", "0.5192851", "0.5192851", "0.5192851", "0.5192851", "0.51921135", "0.51842886", "0.51840705", "0.51818377" ]
0.563615
17
Dummy function to redraw figures in the children classes
def redraw_figures(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redraw(event):\n if np.size(plt.get_figlabels()):\n #Need to check if figure is closed or not and only then do the following\n #operations. Else, the following operations will create a new figure\n ax.clear()\n drawRectangle(ax)\n fig.canvas.draw()\n else:\n pass", "def redraw(self):\n raise NotImplementedError()", "def update_figure(self):\n\n self.draw()", "def redraw(self, **kwargs):\n #src_dict = self.data_sources\n #self.remove_sources(src_dict.keys())\n self.renderers = {}\n #self.renderers = {}\n self.figure = self.draw_figure(**kwargs)\n #self.add_sources(src_dict)\n # todo does the old figure linger on?\n self.render_sources(self.data_sources)\n self.bk_pane.object = self.figure", "def redraw(self):\n dummy_figure = plt.figure()\n new_manager = dummy_figure.canvas.manager\n new_manager.canvas.figure = self.figure\n self.figure.set_canvas(new_manager.canvas)\n plt.show(block=False)", "def plot_refresh():\n figure.canvas.draw()", "def on_draw(self):\n # draw everything", "def refresh_self(self) -> None:\n self._logger.debug(\"running\")\n try:\n self.figure.canvas.draw()\n except Exception as e:\n self._logger.exception(\"issue with drawing canvas.\")\n self._logger.debug(\"done\")", "def _redraw_graph(self) -> None:\n self._clear_drawing()\n self.draw_graph()", "def _redraw_graph(self) -> None:\n self._clear_drawing()\n self.draw_graph(graph=self.graph, axes=self.subplot)\n self.draw_graph(graph=self.graph2, axes=self.subplot2)\n self.draw_mappings(self.mapping)", "def redraw_viz():\n\tglobal g_last_draw\n\tif (rospy.Time.now().to_sec() > (refresh_rate + g_last_draw)):\n\t\tg_last_draw = rospy.Time.now().to_sec()\n\t\t# redraw imu box\n\t\tdoDraw()", "def paint(self):\r\n pass", "def update_plot():\n pass", "def repaint(self):\n pass", "def redraw(self):\n self._create()", "def draw(self, force=False):\n for child in self.children.values():\n child.draw(force)", "def _draw_handler(self, bpy_dummy_self, bpy_dummy_context):\r\n self._drawRays()", "def draw(self):\n for obj in self.objects:\n obj.draw()", "def on_draw(self):\n self.clear()\n self.manager.draw()", "def setDrawing(self):\n self.graph_drawing=[]", "def draw(self):", "def refresh_svg_canvas(self):\n if self.ui.tabWidget.currentIndex() == 0:\n self.ui.svg_canvas.build_schematic()\n self.ui.svg_canvas.viewport().update()\n elif self.ui.tabWidget.currentIndex() in (1,2):\n self.ui.svg_canvas.build_pcb()\n self.ui.svg_canvas.viewport().update()\n else:\n raise Exception(\"Unknown view to draw\")", "def plot_finalize():\n global figure\n global axes\n\n plot_refresh()\n plt.ioff()\n plt.show()\n\n figure, axes = None, None", "def setup_draw(self):\n pass", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def redraw_avatar(cls):", "def redraw(self):\n x2, y2 = [[] for i in range(len(self.x))], \\\n [[] for i in range(len(self.x))]\n game_point = [random.randint(1, 100),\n random.randint(1, 100)]\n for i in range(self.generations):\n x2, y2, game_point = self.move(x2, y2, game_point)\n for i in range(10): # Czyszczenie starych wykresow\n self.plots[i].set_xdata([])\n self.plots[i].set_ydata([])\n self.plots2[i].set_xdata([])\n self.plots2[i].set_ydata([])\n for i in range(len(self.x)): # Nowe dane wykresow\n self.plots[i].set_xdata(self.x[i])\n self.plots[i].set_ydata(self.y[i])\n self.plots2[i].set_xdata(x2[i])\n self.plots2[i].set_ydata(y2[i])\n self.fig.canvas.draw_idle()", "def redraw(self, x, y, title, xlabel) :\n # clear the old image (axes.hold is deprecated)\n self.fig.clf()\n self.fig.subplots_adjust(bottom=0.2)\n self.axes = self.fig.add_subplot(111)\n self.axes.clear()\n if re.search('mm',xlabel):\n self.axes.set_xlim([0,10])\n elif re.search('in',xlabel):\n self.axes.set_xlim([0,0.4])\n self.axes.set_ylim([0,1])\n self.axes.plot(x, y)\n self.axes.set_title(title)\n self.axes.set_xlabel(xlabel)\n self.axes.set_ylabel('Notch Sensitivity, q') \n self.axes.grid()\n self.draw()", "def draw(self):\n\t\tpass", "def draw(self):\n self.figure.canvas.draw_idle()", "def _redraw(self, render_as_done: \"bool\" = False) -> \"None\":\n if not self.drawn:\n cast(\"Application\", super())._redraw(render_as_done=True)\n self.drawn = True", "def force_draw(self):\n import matplotlib.pyplot as plt\n\n plt.show()", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n pass", "def redraw(self, state: EngineeringState) -> None:\n pass", "def figure(self):\n if self._figure is None:\n\n self._figure, ax = plt.subplots(nrows=1, dpi=self._dpi)\n if self._verbose:\n print(f\" Figure dpi set to {self._dpi}\")\n\n self._figure.set_size_inches(self._size)\n if self._verbose:\n print(\" Figure size set to \" + str(self._size) + \" inches.\")\n\n for model in self._models:\n xs, ys, _ = zip(*model._nodes)\n\n for face in model._elements:\n xf = tuple(xs[k - 1] for k in face) # 1-base index to 0-base index\n yf = tuple(ys[k - 1] for k in face)\n # plt.fill(\n # xf,\n # yf,\n # linestyle=\"dotted\",\n # edgecolor=\"magenta\",\n # alpha=0.5,\n # facecolor=\"gray\",\n # )\n plt.fill(\n xf,\n yf,\n alpha=model._alpha,\n edgecolor=model._edgecolor,\n facecolor=model._facecolor,\n linestyle=model._linestyle,\n linewidth=model._linewidth,\n )\n\n if self._xticks:\n ax.set_xticks(self._xticks)\n\n if self._yticks:\n ax.set_yticks(self._yticks)\n\n if self._xlim:\n ax.set_xlim(self._xlim)\n\n if self._ylim:\n ax.set_ylim(self._ylim)\n\n if self._xlabel:\n ax.set_xlabel(self._xlabel)\n\n if self._ylabel:\n ax.set_ylabel(self._ylabel)\n\n # set frame on or off based on the Bool \"frame\" in .json input\n ax.set_frame_on(b=self._frame)\n if len(self._tick_params) > 0:\n ax.tick_params(**self._tick_params)\n\n if self._display:\n plt.show()\n\n if self._serialize:\n self.serialize(self._folder, self._file)\n\n plt.close(\"all\")\n self._figure = None", "def draw(self):\n raise NotImplementedError", "def draw(self):\n raise NotImplementedError", "def draw(self):\n raise NotImplementedError", "def _update_ax(self):\n raise NotImplementedError(\"Implement _update_ax(self) in subclass\")", "def drawImplementation(self, *args):\n return _osgAnimation.RigGeometry_drawImplementation(self, *args)", "def draw(self):\n self.figure.show()\n self.figure.canvas.draw()", "def pre_draw(self):", "def _draw_plot(self, *args, **kw):\n # Simple compatibility with new-style rendering loop\n return self._draw_component(*args, **kw)", "def update_figure(self):\n # if number of kinetics in model did not change\n # update just last lines\n if self.N_lines - 1 == len(self.model.spectra.keys()) * 2:\n self.dataplot.lines[-1].remove()\n self.dataplot.lines[-1].remove()\n self.draw_figure_first()\n # delete all and redraw\n else:\n n = int((self.N_lines - 1) / 2)\n for _ in range(n):\n self.dataplot.lines[-1].remove()\n self.dataplot.lines[-1].remove()\n self.draw_figure_total()\n\n self.dataplot.relim()\n\n self.dataplot.autoscale_view(True, True, True)\n\n self.draw()", "def visual_attr_changed(self):\n if self.component:\n self.component.invalidate_draw()\n self.component.request_redraw()\n else:\n self.invalidate_draw()\n self.request_redraw()", "def drawall(self):\r\n for x in self.objectlist:\r\n if x.model:\r\n x.model.draw()", "def _relief_refresh(self, *_args):\n if self._relief_graphic_instructions.length():\n self.canvas.after.remove(self._relief_graphic_instructions)\n self._relief_graphic_instructions.clear()\n\n add = self._relief_graphic_instructions.add\n pos_size = self.x, self.y, self.width, self.height\n if self.relief_ellipse_inner_colors and self.relief_ellipse_inner_lines:\n self._relief_ellipse_inner_refresh(add, *self.relief_ellipse_inner_colors, *pos_size)\n if self.relief_ellipse_outer_colors and self.relief_ellipse_outer_lines:\n self._relief_ellipse_outer_refresh(add, *self.relief_ellipse_outer_colors, *pos_size)\n if self.relief_square_inner_colors and self.relief_square_inner_lines:\n self._relief_square_inner_refresh(add, *self.relief_square_inner_colors, *pos_size)\n if self.relief_square_outer_colors and self.relief_square_outer_lines:\n self._relief_square_outer_refresh(add, *self.relief_square_outer_colors, *pos_size)\n\n if self._relief_graphic_instructions.length():\n self.canvas.after.add(self._relief_graphic_instructions)", "def redraw(self):\r\n self.c.update()", "def __del__(self):\n pyplot.clf()", "def _DoUpdateRedraw( self, hilite = True ):\n pass", "def _UpdatePlotImpl( self ):\n if self.ax is not None:\n self.axline = None\n self.cursorLine = \\\n self.cursorLine2 = None\n\n# self.ax.clear()\n# if hasattr( self, 'ax2' ) and self.ax2 is not None:\n# self.ax2.clear()\n self.fig.clear()\n self._InitAxes()\n\n#\t\t-- Scale fonts\n#\t\t--\n wd, ht = self.GetClientSize()\n label_font_size = 14\n tick_font_size = 12\n self.titleFontSize = 16\n if 'wxMac' not in wx.PlatformInfo and wd < 800:\n\tdecr = (800 - wd) / 50.0\n\tlabel_font_size -= decr\n\ttick_font_size -= decr\n\tself.titleFontSize -= decr\n\n# self.ax.grid(\n# True, 'both', 'both',\n#\t color = '#c8c8c8', linestyle = ':', linewidth = 1\n#\t )\n self._DoUpdatePlot( wd, ht )\n self._DoUpdateRedraw()\n self.canvas.draw()\n #end if", "def plot(self):\n raise Exception(\"pure virtual function\")", "def refresh(self):\n\n self.ax.relim()\n self.ax.autoscale_view()\n self.canvas.draw()", "def draw(self):\n\n for item in self.vis:\n item.undraw()\n self.render()\n for item in self.vis:\n item.draw(self.win)\n self.drawn = True", "def all():\n adjust_spines()\n draggable_legend()\n plt.gcf().canvas.mpl_connect('close_event', handle_close)", "def plot(self):\n attr = self.Graph[\"root\"]\n if (self.type == 0 or self.type == 1):\n self.subplot_1(attr, 0)\n else:\n self.subplot_2(attr, 0)", "def redrawSegs(self):\n for seg in self.segs:\n self.drawSeg(seg)", "def on_draw_overlay(self):", "def __exit__(self, *args):\n # Do the last (and perhaps only) call's plotting\n self._doPlots()\n self._isSubplot = False\n self.opts.goGlobal()\n if not self.usingAgg:\n self.fig.canvas.mpl_connect('resize_event', self.subplots_adjust)", "def draw(self, ax):\n\n self.prepare()\n # Call to matplotlib method on ax here\n pass", "def updateDraw(self):\r\n self.delConns()\r\n self.delTags()\r\n self.drawConns()\r\n self.drawTags()", "def redraw(self):\n\n # First remove all items from group.\n for child in self.childItems():\n self.removeFromGroup(child)\n\n # It converts the SVG vector information to QItems.\n svg = self.generateSVG()\n\n item = True\n while item:\n # Goes through each SVG item and depending on the type,\n # extracts different attributes from it and creates the\n # QItem.\n item = svg[svg.find('<')+1 : svg.find('>')]\n if item == '':\n break\n svg = svg[svg.find('>')+1:]\n\n name = item.split(' ')[0]\n\n if name == 'line':\n QItem = self.canvas.scene.addLine(\n QtCore.QLineF(float(self.getSVGItemAttrValue(item, 'x1')),\n float(self.getSVGItemAttrValue(item, 'y1')),\n float(self.getSVGItemAttrValue(item, 'x2')),\n float(self.getSVGItemAttrValue(item, 'y2')))\n )\n\n elif name == 'rect':\n pass\n\n try:\n color = self.getSVGItemAttrValue(item, 'stroke')\n except IndexError:\n color = '#000000'\n QItem.setPen(QtGui.QColor(color))\n\n # Add the QItem to ourself so it is a part of the group.\n self.addToGroup(QItem)\n self.top()", "def draw(self,children):\n self.clip = [(0,0,gui._width+100, gui._height+100)]\n\n self.drawer.setClip(0,0,gui._width+100, gui._height+100)\n \n self.drawer.begin()\n z = 0\n for child in reversed(children):\n z += 1\n self.drawChild(0,0,z,child)\n \n self.drawer.end()", "def redraw(self):\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n for shape in self.shapes:\n shape.redraw()\n glFlush()\n self.SwapBuffers()", "def redraw(self):\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n for shape in self.shapes:\n shape.redraw()\n glFlush()\n self.SwapBuffers()", "def draw(self, renderer=None, *args, **kwargs):\n # NOTE: This mimics matplotlib API, which calls identical\n # post-processing steps in both draw() and get_tightbbox()\n self._hide_labels()\n self._altx_overrides()\n self._alty_overrides()\n self._dualx_overrides()\n self._dualy_overrides()\n self._datex_rotate()\n if self._inset_parent is not None and self._inset_zoom:\n self.indicate_inset_zoom()\n super().draw(renderer, *args, **kwargs)", "def draw_objects(self, view_manager):\n raise NotImplementedError(\"draw_objects can not be called directly from recoBase3D\")", "def _doPlots(self):\n ax = self.sp.ax\n if ax: ax.helper.doPlots()\n # Setting calls now use new local options\n self.opts.newLocal()", "def redraw(self):\n self.vispy_viewer.canvas.update()", "def draw():\n recursion_depth = self.get_recursion_depth()\n base_length = self.get_base_length()\n self.parent_class.classes[\"fractal\"].set_base_length(base_length)\n is_curved = self.vars[\"round_corners\"].get()\n fill_color = self.vars[\"fill_color\"].get()\n self.parent_class.classes[\"fractal\"].draw_fractal(\n recursion_depth, is_curved, fill_color)", "def redrawAll(screen, data):\n data.groups.terrain.draw(screen)\n data.groups.walls.draw(screen)\n data.groups.player.draw(screen)\n data.groups.projectiles.draw(screen)\n data.groups.spawners.draw(screen)\n data.groups.monsters.draw(screen)\n data.groups.items.draw(screen)\n data.groups.ui.draw(screen)\n data.groups.damagedWalls.draw(screen)\n if data.screenUI != None:\n screen.blit(data.screenUI, (0, 0))", "def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()", "def paint(self):\r\n self.canvas.delete(tkinter.ALL)\r\n self.visit(self.tree.root)", "def on_draw(self):\n self.clear()\n self.gamestatemanager.peek().on_draw(self.get_size())", "def on_draw(self):\n\t\tself.render()", "def on_draw(self):\n\t\tself.render()", "def __init__(self, subplot_class, *args, **kwargs):\n import pylab\n self.fig = pylab.figure(*args, **kwargs)\n self.subplot_class = subplot_class", "def redraw(self) -> None:\n self.canvas.draw_idle()\n self.Refresh()", "def on_draw_event(self, widget, ctx):\n # the _need_redraw flag doesnt work. it sometimes prevents\n # the rendering and leaving the canvas blank\n #if self._need_redraw:\n self._renderer.set_context(ctx)\n allocation = self.get_allocation()\n x, y, w, h = allocation.x, allocation.y, allocation.width, allocation.height\n self._render_figure(w, h)\n #self._need_redraw = False\n\n return False # finish event propagation?", "def draw(self, force = False):\n\t\tpass", "def __draw(self):\n plt.rcParams.update(self.settings.rcParams)\n\n self.fig = plt.figure()\n self.ax = self.fig.add_axes(self.axes_rect)\n\n xs = np.arange(1, self.xmax+1)\n ys = [np.arange(0, self.ymax) for i in range(self.xmax)]\n\n self.ax.plot(xs, ys)\n\n self.__draw_xaxis()\n self.__draw_yaxis()\n\n self.__draw_annotations()\n self.__draw_eras()\n self.__draw_era_spans()\n self.__draw_watermark()\n self.__draw_title()\n self.__draw_image()\n self.__draw_max_age()\n\n self.ax.set_aspect('equal', share=True)", "def draw(self):\n draw(self.graph)", "def init_figure(self): \r\n # add the big circle to represent the container\r\n BigCirc = plt.Circle((0,0), self.__ContainerRad, ec = 'b', fill = False, ls = 'solid')\r\n ax.add_artist(BigCirc)\r\n # initialise the axis to be animated and add it to the plot\r\n self.__text0 = ax.text(-9.9,9,\"f={:4d}\".format(0,fontsize=12))\r\n patches = [self.__text0]\r\n # add the patches for the balls to the plot\r\n for b in self.__ballList:\r\n pch = b.get_patch()\r\n ax.add_patch(pch)\r\n patches.append(pch)\r\n return patches", "def redraw(self):\n self.vispy_widget.canvas.update()", "def draw_nodes(self):\n pass", "def blit(self):\n # self.ax1.draw_artist(self.lines[:2])\n # self.ax2.draw_artist(self.lines[2:4])\n self.ax1.autoscale()\n self.ax2.autoscale()\n self.ax1.redraw_in_frame()\n self.ax2.redraw_in_frame()\n self.fig.canvas.blit(self.fig.bbox)", "def expose_graphics_methods():\n pass", "def clear_figure(self):\n self.figure.clf()", "def draw():" ]
[ "0.69585615", "0.68990314", "0.6801519", "0.67597973", "0.6633574", "0.64148325", "0.6317707", "0.6250047", "0.6198424", "0.61798114", "0.61377364", "0.6077331", "0.60669315", "0.60655534", "0.60549927", "0.6053331", "0.60529304", "0.6050833", "0.604263", "0.6037915", "0.60376084", "0.601923", "0.5978028", "0.5971856", "0.5968721", "0.5968721", "0.5968721", "0.5968721", "0.5968721", "0.5968721", "0.5968721", "0.5968721", "0.596863", "0.5953592", "0.5953262", "0.59430563", "0.5937118", "0.59353614", "0.593398", "0.58811307", "0.58811307", "0.58811307", "0.58811307", "0.58706194", "0.5863757", "0.58558184", "0.58558184", "0.58558184", "0.585158", "0.5843077", "0.58371073", "0.5835735", "0.58248615", "0.58240455", "0.5822931", "0.5814384", "0.5814158", "0.57659435", "0.5760105", "0.5751765", "0.574847", "0.5743847", "0.5738553", "0.5736953", "0.57252055", "0.5712683", "0.57067746", "0.5704837", "0.5686596", "0.5662345", "0.5650035", "0.56458026", "0.5636127", "0.562764", "0.562764", "0.5624733", "0.5623539", "0.5616232", "0.5603051", "0.5602962", "0.56010544", "0.5595723", "0.55907416", "0.558998", "0.5589466", "0.5589466", "0.55871654", "0.55805296", "0.5575066", "0.5574802", "0.55651647", "0.55608606", "0.5556784", "0.55550814", "0.55512774", "0.5549119", "0.5538003", "0.5529706", "0.55217683" ]
0.78338176
1
Crop rectangle from the image and return it
def crop(self, bbox): return self.__pyramid[0].crop(bbox)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crop(self, image):\n\t\treturn image.copy()[self.ymin:self.ymax,self.xmin:self.xmax]", "def crop_bounding_box(im, x, y, w, h):\n return im[y:y+h, x:x+w]", "def doCrop(image, x, y, w, h):\n\tcrop_height = int((config.FACE_HEIGHT / float(config.FACE_WIDTH)) * w)\n\tmidy = y + h/2\n\ty1 = max(0, midy-crop_height/2)\n\ty2 = min(image.shape[0]-1, midy+crop_height/2)\n\treturn image[y1:y2, x:x+w]", "def crop_image(self):\n\n image_data = Image.open(self.img_path)\n return image_data.crop(self.data_type)", "def crop(img, x, y, w, h):\n check_type(img)\n return img.crop((x, y, x + w, y + h))", "def crop_image(self, image, top_left, bottom_right):\n # Crop image to include ROI using slicing. The first entry is the length and the second entry is the height.\n image_cropped = image[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]\n \n # Save the cropped image\n cv2.imwrite(os.path.join(self.output_dir, f\"{self.input_image}_cropped.jpg\"), image_cropped)\n \n return image_cropped", "def crop_image(image):\r\n return image[40:-20, :]", "def crop_img(image, bound):\n scale = 1.01 # 1%\n return image.crop((bound.vertices[0].x // scale, bound.vertices[0].y // scale,\n int(bound.vertices[2].x * scale), int(bound.vertices[2].y) * scale))", "def crop(self):\n\n return self.image.crop(self.x, self.y, self.width(), self.height(), centered = True)", "def crop(self, *args, **kwargs):\n return _image.image_crop(self, *args, **kwargs)", "def crop(image, dimX, dimY):\n # TODO\n return image", "def crop(img: 'np.ndarray', x: int, y: int, width: int, height: int) -> 'np.ndarray':\n return img[y:y+height, x:x+width]", "def crop_face(image,face_rect):\n (x1,y1,x2,y2) = face_rect\n w = abs(x2-x1)\n h = abs(y2-y1)\n return image[y1:y1 + h, x1:x1 + w]", "def get_crop(\n self,\n x: NumberType,\n y: NumberType,\n width: NumberType,\n height: NumberType\n ) -> 'pygame.Surface':\n assert 0 <= x < self.get_width(), \\\n 'X position must be between 0 and the image width'\n assert 0 <= y < self.get_height(), \\\n 'Y position must be between 0 and the image width'\n assert 0 < width <= self.get_width(), \\\n 'Width must be greater than zero and less than the image width'\n assert 0 < height <= self.get_height(), \\\n 'Height must be greater than zero and less than the image height'\n assert (x + width) <= self.get_width(), \\\n 'Crop box cannot exceed image width'\n assert (y + height) <= self.get_height(), \\\n 'Crop box cannot exceed image height'\n rect = pygame.Rect(0, 0, 0, 0)\n rect.x = x\n rect.y = y\n rect.width = width\n rect.height = height\n return self.get_crop_rect(rect)", "def crop(img, top, left, height, width):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n return img[top:top + height, left:left + width]", "def crop(img, boundaries):\n minx, miny, maxx, maxy = boundaries\n return img[miny:maxy, minx:maxx]", "def __call__(self, img):\n image_width, image_height = img.size\n image_short = min(image_width, image_height)\n\n crop_size = float(self.imgsize) / (self.imgsize + 32) * image_short\n\n crop_height, crop_width = crop_size, crop_size\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))", "def __call__(self, img):\n image_width, image_height = img.size\n image_short = min(image_width, image_height)\n\n crop_size = float(self.imgsize) / (self.imgsize + 32) * image_short\n\n crop_height, crop_width = crop_size, crop_size\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))", "def crop(img, i, j, h, w):\n return img.crop((j, i, j + w, i + h))", "def crop(img, i, j, h, w):\n return img.crop((j, i, j + w, i + h))", "def crop_image(input_image, output_image, start_x, start_y, width, height):\n box = (start_x, start_y, start_x + width, start_y + height)\n output_img = img.crop(box)\n output_img.save(output_image +\".png\")", "def cropbox(row):\n if row['Type'] == 'Rectangle':\n cropbox = [row['X'], row['Y'], row['X'] + row['Width'], \n row['Y'] + row['Height']]\n else:\n # damnit I should set up a logger\n print('WARNING: The annotation \"%s\" (index %d) is not a \\\n rectangle!' %(row['Image'], row['Index']))\n cropbox = None\n return cropbox", "def crop(img):\n new_shape = min(img.shape[0], img.shape[1])\n \n return img[0:new_shape, 0:new_shape, ...]", "def crop(img, top, left, width, height):\n if not all([isinstance(x, int) for x in (top, left, width, height)]):\n raise ValueError(\"params should be integer!\")\n if (width > img.shape[0] or height > img.shape[1]):\n raise ValueError(\"the output imgage size should be small than input image!!!\")\n\n if len(img.shape) == 2:\n img_height, img_width = img.shape\n else:\n img_height, img_width, _ = img.shape\n right = img_width - (left + width)\n bottom = img_height - (top + height)\n if len(img.shape) == 2:\n img_croped = util.crop(img,((top,bottom),(left,right)))\n else:\n img_croped = util.crop(img,((top,bottom),(left,right),(0,0)))\n\n return img_croped", "def crop_frame(frame, crop_region):\n tl_x = crop_region['top_left_x'] \n tl_y = crop_region['top_left_y']\n br_x = crop_region['bottom_right_x']\n br_y = crop_region['bottom_right_y']\n return frame[tl_y:br_y, tl_x:br_x]", "def Crop_Image(img, mask, x, y, width, height):\n img = img[y:y+height, x:x+width,:]\n mask = mask[y:y+height, x:x+width,:]\n return img, mask", "def crop_img(img, shape=(100, 100)):\r\n width, height = img.shape\r\n\r\n cx, cy = width / 2, height / 2\r\n sx, sy = cx - shape[0] / 2, cy - shape[1] / 2\r\n ex, ey = cx + shape[0] / 2, cy + shape[1] / 2\r\n\r\n return img[int(sx): int(ex), int(sy): int(ey)]", "def crop(image, x_low=0.3, x_up=0.7, y_low=0.3, y_up=0.7):\n\n x_l, x_h = image['x'].max() * x_low, image['x'].max() * x_up\n y_l, y_h = image['y'].max() * y_low, image['y'].max() * y_up\n image = image[(image.x > x_l) & (image.x < x_h)]\n image = image[(image.y > y_l) & (image.y < y_h)]\n return image", "def crop_img(img: np.ndarray, box: Tuple[float, float, float, float]) -> Optional[np.ndarray]:\n \n h, w, c = img.shape\n \n y0 = max(int(round(h * box[0])), 0)\n x0 = max(int(round(w * box[1])), 0)\n y1 = min(int(round(h * box[2])), h)\n x1 = min(int(round(w * box[3])), w)\n \n if y0 >= y1 or x0 >= x1:\n return None\n \n return img[y0:y1, x0:x1, :]", "def crop_bounding_box_from_image(bounding_box, image_path, with_margin=True):\n original_image = Image.open(image_path)\n margin_length = 0\n margin_height = 0\n if with_margin:\n # Extend bounding box length and height by 20%\n margin_length = 0.1 * (int(bounding_box[3]) - int(bounding_box[2]))\n margin_height = 0.1 * (int(bounding_box[5]) - int(bounding_box[4]))\n cropped_image = original_image.crop((int(bounding_box[2]) - margin_length, int(bounding_box[4]) - margin_height,\n int(bounding_box[3]) + margin_length, int(bounding_box[5]) + margin_height))\n return cropped_image", "def basic_crop(data):\n return data['crop'];", "def crop_image(img):\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n _, threshed = cv.threshold(gray, 240, 255, cv.THRESH_BINARY_INV)\n\n kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (11, 11))\n morphed = cv.morphologyEx(threshed, cv.MORPH_CLOSE, kernel)\n\n cnts = cv.findContours(morphed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)[-2]\n cnt = sorted(cnts, key=cv.contourArea)[-1]\n\n x, y, w, h = cv.boundingRect(cnt)\n dst = img[y:y+h, x:x+w]\n cv.imwrite(\"001.png\", dst)\n return dst", "def crop_object_from_image(saving_folder,root_folder_path,root_folder_name,row_info):\n class_name=row_info['class']\n file_id=row_info['file_id']\n img_type=row_info['type']\n xmin=row_info['x_min']\n xmax=row_info['x_max']\n ymin=row_info['y_min']\n ymax=row_info['y_max']\n\n\n origin_img_path=os.path.join(root_folder_path,root_folder_name,img_type,file_id+\".png\")\n crop_img_path=os.path.join(saving_folder,file_id+\"_\"+class_name+\".png\")\n\n origin_img=cv2.imread(origin_img_path)\n crop_img=origin_img[ymin:ymax-1,xmin:xmax-1]\n\n # If width or height only contain 1 pixel, do not crop.\n if xmax-xmin<=2 or ymax-ymin<=2:\n print(\"Only one pixel, pass!\")\n return 0\n # print(origin_img.shape)\n # print(xmin,xmax,ymin,ymax)\n # print(crop_img.shape)\n # print(crop_img_path)\n cv2.imwrite(crop_img_path,crop_img)", "def crop(image: Image, bounding_box: List[int], margin: float) -> Image:\n\n if margin < 0:\n raise ValueError(\"the margin must be a value between 0 and 1\")\n if margin > 1:\n raise ValueError(\n \"the margin must be a value between 0 and 1 - this is a change from the existing API\")\n\n img_height = image.shape[0]\n img_width = image.shape[1]\n x_0, y_0, x_1, y_1 = bounding_box[:4]\n margin_height = (y_1 - y_0) * margin / 2\n margin_width = (x_1 - x_0) * margin / 2\n x_0 = int(np.maximum(x_0 - margin_width, 0))\n y_0 = int(np.maximum(y_0 - margin_height, 0))\n x_1 = int(np.minimum(x_1 + margin_width, img_width))\n y_1 = int(np.minimum(y_1 + margin_height, img_height))\n return image[y_0:y_1, x_0:x_1, :], (x_0, y_0, x_1, y_1)", "def __crop(img, pos, size):\n ow, oh = img.size\n x1, y1 = pos\n tw = th = size\n if (ow > tw or oh > th):\n return img.crop((x1, y1, x1 + tw, y1 + th))\n return img", "def crop(self,crop_vector = [None, None, None, None]):\n xmin,xmax,ymin,ymax = crop_vector\n \n xmin = self._obj.x.min() if xmin is None else xmin\n xmax = self._obj.x.max() if xmax is None else xmax\n ymin = self._obj.y.min() if ymin is None else ymin\n ymax = self._obj.y.max() if ymax is None else ymax \n \n self._obj = self._obj.sel(x=slice(xmin, xmax),y=slice(ymin,ymax))\n\n return self._obj", "def crop_image(self, img):\n img.crop_image(self._center, 1.1 * self._radius)", "def faceCrop(im,x,y,w,h,m):\r\n sizeX, sizeY = im.size\r\n new_x, new_y = max(0,x-m*w), max(0,y-m*h)\r\n new_w = w + 2*m*w if sizeX > (new_x + w + 2*m*w) else sizeX - new_x\r\n new_h = h + 2*m*h if sizeY > (new_y + h + 2*m*h) else sizeY - new_y\r\n new_x,new_y,new_w,new_h = int(new_x),int(new_y),int(new_w),int(new_h)\r\n return im.crop((new_x,new_y,new_x+new_w,new_y+new_h))", "def _crop(image, offset_height, offset_width, crop_height, crop_width):\n original_shape = tf.shape(image)\n\n rank_assertion = tf.Assert(\n tf.equal(tf.rank(image), 3),\n ['Rank of image must be equal to 3.'])\n with tf.control_dependencies([rank_assertion]):\n cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])\n\n size_assertion = tf.Assert(\n tf.logical_and(\n tf.greater_equal(original_shape[0], crop_height),\n tf.greater_equal(original_shape[1], crop_width)),\n ['Crop size greater than the image size.'])\n\n offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))\n\n # Use tf.strided_slice instead of crop_to_bounding box as it accepts tensors\n # to define the crop size.\n with tf.control_dependencies([size_assertion]):\n image = tf.strided_slice(image, offsets, offsets + cropped_shape,\n strides=tf.ones_like(offsets))\n return tf.reshape(image, cropped_shape)", "def trim_image(image):\n bbox = image.getbbox()\n return image.crop(bbox)", "def crop_to_square(self, image):\n orig_height, orig_width, orig_channels = image.shape\n if orig_height > orig_width:\n return image[:orig_width, ...]\n elif orig_height < orig_width:\n return image[:, :orig_height, ...]\n return image", "def crop(self, bbox):\n if self.__huge: # image is huge and not totally in RAM\n band = bbox[3] - bbox[1] # width of the tile band\n self.__tile[1][3] = band # set the tile height\n self.__tile[2] = self.__offset + self.imwidth * bbox[1] * 3 # set offset of the band\n self.__image.close()\n self.__image = Image.open(self.path) # reopen / reset image\n self.__image.size = (self.imwidth, band) # set size of the tile band\n self.__image.tile = [self.__tile]\n return self.__image.crop((bbox[0], 0, bbox[2], band))\n else: # image is totally in RAM\n return self.__pyramid[0].crop(bbox)", "def crop(self, im, window):\n # Crop window from the image.\n crop = im[window[0]:window[2], window[1]:window[3]]\n\n if self.context_pad:\n box = window.copy()\n crop_size = self.net.blobs[self.net.inputs[0]].width # assumes square\n scale = crop_size / (1. * crop_size - self.context_pad * 2)\n # Crop a box + surrounding context.\n half_h = (box[2] - box[0] + 1) / 2.\n half_w = (box[3] - box[1] + 1) / 2.\n center = (box[0] + half_h, box[1] + half_w)\n scaled_dims = scale * np.array((-half_h, -half_w, half_h, half_w))\n box = np.round(np.tile(center, 2) + scaled_dims)\n full_h = box[2] - box[0] + 1\n full_w = box[3] - box[1] + 1\n scale_h = crop_size / full_h\n scale_w = crop_size / full_w\n pad_y = round(max(0, -box[0]) * scale_h) # amount out-of-bounds\n pad_x = round(max(0, -box[1]) * scale_w)\n\n # Clip box to image dimensions.\n im_h, im_w = im.shape[:2]\n box = np.clip(box, 0., [im_h, im_w, im_h, im_w])\n clip_h = box[2] - box[0] + 1\n clip_w = box[3] - box[1] + 1\n assert(clip_h > 0 and clip_w > 0)\n crop_h = round(clip_h * scale_h)\n crop_w = round(clip_w * scale_w)\n if pad_y + crop_h > crop_size:\n crop_h = crop_size - pad_y\n if pad_x + crop_w > crop_size:\n crop_w = crop_size - pad_x\n\n # collect with context padding and place in input\n # with mean padding\n context_crop = im[box[0]:box[2], box[1]:box[3]]\n context_crop = caffe.io.resize_image(context_crop, (crop_h, crop_w))\n crop = np.ones(self.crop_dims, dtype=np.float32) * self.crop_mean\n crop[pad_y:(pad_y + crop_h), pad_x:(pad_x + crop_w)] = context_crop\n #\n return crop", "def crop_table(image):\n x = 760\n y = 300\n w = 600\n h = 640\n\n crop_img = image[y:y + h, x:x + w]\n # cv2.imshow(\"cropped\", crop_img)\n # cv2.waitKey()\n\n return crop_img, x, y", "def _crop_image(image: Image, w: int, h: int, area_to_crop: Tuple[int]) -> Image:\n expected_w = 480\n expected_h = 360\n if w != expected_w:\n print(\"Warning, image width:\", w, \"differs from expected width:\", expected_w)\n if h != expected_h:\n print(\"Warning, image height:\", h, \"differs from expected height:\", expected_h)\n\n cropped_image = image.crop(area_to_crop)\n w_cropped, h_cropped = cropped_image.size\n return cropped_image.resize((w_cropped, h_cropped))", "def test_crop_by_bbox(self):\n with Image.open(self.subject) as im:\n image = im.convert(\"RGB\")\n\n cropped = image_helper.crop_by_bbox(image, BoundingBox(0,0,15,15))\n\n self.assertEqual(cropped.size, (15, 15))", "def cropPt(img, start, end):\n\tx0, y0 = start\n\tx1, y1 = end\n\treturn crop(img, x0, y0, x1, y1)", "def get_crop_rect(self, rect: 'pygame.Rect') -> 'pygame.Surface':\n return self._surface.subsurface(rect)", "def crop_image(image: np.ndarray) -> np.ndarray:\n # convert image to grayscale and apply blur to reduce noise\n image_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n blurred = cv2.GaussianBlur(image_gray, (3, 3), 0)\n\n # global threshold using Otsu\n # Note: Although unpacking like this results in one of the variables to be unused and makes\n # PyTA heavily depressed, this is standard OpenCV notation.\n # For reference, you may check docs.opencv.org/master/d7/d4d/tutorial_py_thresholding.html\n\n ret1, thresh1 = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n\n # invert image color and find contours\n ret2, thresh2 = cv2.threshold(thresh1, 150, 255, cv2.THRESH_BINARY_INV)\n contours, hierarchy = cv2.findContours(thresh2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # create list of tuples with the contour itself and its arc length\n # then sort by arc length and take the two longest\n cont_len = [(cont, cv2.arcLength(cont, True)) for cont in contours]\n cont_len.sort(key=lambda x: -x[1])\n longest_2 = cont_len[0:2]\n rects = [cv2.boundingRect(tup[0]) for tup in longest_2]\n\n # take the smallest coordinates for the top left corner of rect\n # and largest for the bottom right corner\n min_x0, min_y0, max_x0, max_y0 = rects[0][0], rects[0][1], \\\n rects[0][0] + rects[0][2], \\\n rects[0][1] + rects[0][3]\n min_x1, min_y1, max_x1, max_y1 = rects[1][0], rects[1][1], \\\n rects[1][0] + rects[1][2], \\\n rects[1][1] + rects[1][3]\n min_x = min(min_x0, min_x1) + 1\n min_y = min(min_y0, min_y1) + 1\n max_x = max(max_x0, max_x1) - 1\n max_y = max(max_y0, max_y1) - 1\n\n cropped_img = image[min_y: max_y, min_x: max_x]\n return cropped_img", "def crop(\n self,\n x: NumberType,\n y: NumberType,\n width: NumberType,\n height: NumberType\n ) -> 'BaseImage':\n self._surface = self.get_crop(x, y, width, height)\n return self", "def _generate_crop(self):\n if self.box_drawn == True:\n if (self.cd_pic_num != -1) & (self.cd_crop_num == 1):\n self.communicator.generate_crop(picture_num=self.cd_pic_num, \\\n xa=self.xa, ya=self.ya, xb=self.xb, yb=self.yb)\n else:\n print \"ERROR: can only generate a new crop from a thumbnail\"\n else:\n print \"ERROR: please select an area to generate a crop from\"", "def __crop_img(img, cx, cy, max_axis, padding=0):\n\n new_height = max_axis\n new_width = max_axis\n\n cy -= new_height // 2\n cx -= new_width // 2\n\n if (cy + new_height) > img.shape[0]:\n shift = (cy + new_height) - img.shape[0]\n cy -= shift\n\n if (cx + new_width) > img.shape[1]:\n shift = (cx + new_width) - img.shape[1]\n cx -= shift\n\n cy = max(0., cy)\n cx = max(0., cx)\n\n cx = padding if cx == 0 else cx\n cy = padding if cy == 0 else cy\n\n cropped_img = img[cy - padding:cy + new_height + padding, cx - padding:cx + new_width + padding, :]\n\n return cropped_img", "def crop_image(sensor_path, box, expand=0):\n #Read data and mask\n try: \n src = rasterio.open(sensor_path)\n left, bottom, right, top = box.bounds\n window=rasterio.windows.from_bounds(left-expand, bottom-expand, right+expand, top+expand, transform=src.transform)\n masked_image = src.read(window=window)\n src.close()\n except Exception as e:\n raise ValueError(\"sensor path: {} failed at reading crop window {} with error {}\".format(sensor_path, box.bounds,e))\n \n #Roll depth to channel last\n masked_image = np.rollaxis(masked_image, 0, 3)\n \n #Skip empty frames\n if masked_image.size ==0:\n raise ValueError(\"Empty frame crop for box {} in sensor path {}\".format(box, sensor_path))\n \n return masked_image", "def crop(self, left=0, top=0, right=None, bottom=None,\n width=None, height=None, reset_coords=True):\n if not (right is None or width is None):\n raise TypeError('parameters right and width are exclusive each '\n 'other; use one at a time')\n elif not (bottom is None or height is None):\n raise TypeError('parameters bottom and height are exclusive each '\n 'other; use one at a time')\n def abs_(n, m, null=None):\n if n is None:\n return m if null is None else null\n elif not isinstance(n, numbers.Integral):\n raise TypeError('expected integer, not ' + repr(n))\n elif n > m:\n raise ValueError(repr(n) + ' > ' + repr(m))\n return m + n if n < 0 else n\n left = abs_(left, self.width, 0)\n top = abs_(top, self.height, 0)\n if width is None:\n right = abs_(right, self.width)\n width = right - left\n if height is None:\n bottom = abs_(bottom, self.height)\n height = bottom - top\n if width < 1:\n raise ValueError('image width cannot be zero')\n elif height < 1:\n raise ValueError('image width cannot be zero')\n elif left == top == 0 and width == self.width and height == self.height:\n return\n library.MagickCropImage(self.wand, width, height, left, top)\n self.raise_exception()\n if reset_coords:\n self.reset_coords()", "def crop (*args, **kwargs):\n return compute('crop', inputs=list(args), args=kwargs)", "def crop_rect(self, rect: 'pygame.Rect') -> 'BaseImage':\n self._surface = self.get_crop_rect(rect)\n return self", "def cropCnt(img, cnt):\n\tpts = extremePoints(cnt)\n\troi = crop(img, pts[\"L\"][0], pts[\"T\"][1], pts[\"R\"][0], pts[\"B\"][0])\n\treturn roi", "def crop(img, top, left, height, width, data_format='CHW'):\n _assert_image_tensor(img, data_format)\n\n if _is_channel_first(data_format):\n return img[:, top : top + height, left : left + width]\n else:\n return img[top : top + height, left : left + width, :]", "def rgb_image_bounding_box(image_full_path, boundingBox, convert_bgr=False, autocrop=False):\n imgraw = cv2.imread(image_full_path, 1)\n if len(boundingBox) > 0:\n imgraw = imgraw[boundingBox[1]:boundingBox[3], boundingBox[0]:boundingBox[2], :]\n\n if autocrop:\n imgshape = imgraw.shape\n mindim = np.argmin([imgshape[0], imgshape[1]])\n cropdim = mindim\n boundingBox = [0, 0, imgshape[1], imgshape[0]]\n xtra = np.abs(imgshape[0] - imgshape[1])\n boundingBox[cropdim] = xtra // 2\n boundingBox[cropdim + 2] -= xtra // 2\n imgcrop = imgraw[boundingBox[1]:boundingBox[3], boundingBox[0]:boundingBox[2], :]\n else:\n imgcrop = imgraw\n\n if convert_bgr:\n imgcrop = cv2.cvtColor(imgcrop, cv2.COLOR_BGR2RGB)\n return imgcrop", "def crop_frame(frame):\n (h,w,c) = frame.shape\n return frame[int(h/2):h, 0:w]", "def imcrop(image, crop_range):\n return image[crop_range[0][0]:crop_range[0][1],\n crop_range[1][0]:crop_range[1][1], ...]", "def crop(img, side=\"left\"):\n\n if side is None:\n return img\n\n assert side in [\"left\", \"right\"], \"not a valid side\"\n\n # we take only 55% of the frame either left or right side\n width_img = img.shape[1]\n box_width = int(width_img*0.55)\n\n if side == 'left':\n img = img[:, :box_width]\n else:\n box_width = width_img - box_width\n img = img[:, box_width:width_img]\n\n return img", "def crop(self, coords):\n pass", "def crop_image(sensor_path, box, expand=0):\n #Read data and mask\n src = rasterio.open(sensor_path)\n left, bottom, right, top = box.bounds\n window=rasterio.windows.from_bounds(left-expand, bottom-expand, right+expand, top+expand, transform=src.transform)\n try:\n masked_image = src.read(window=window)\n except Exception as e:\n raise ValueError(\"sensor path: {} failed at reading window {} with error {}\".format(sensor_path, box.bounds,e))\n \n #Roll depth to channel last\n masked_image = np.rollaxis(masked_image, 0, 3)\n \n #Skip empty frames\n if masked_image.size ==0:\n raise ValueError(\"Empty frame crop for box {} in sensor path {}\".format(box, sensor_path))\n \n return masked_image", "def get_cropped_image(normal_path, segment_path):\n normal_img = cv2.imread(normal_path)\n segment_img = cv2.imread(segment_path)\n\n cropped_path = get_masked_image(normal_img, segment_img)\n\n return cropped_path", "def crop(self, rect):\n maybe_cropped_area = self.to_bbox().crop(rect)\n if len(maybe_cropped_area) == 0:\n return []\n else:\n [cropped_area] = maybe_cropped_area\n cropped_origin = PointLocation(row=cropped_area.top, col=cropped_area.left)\n cropped_area_in_data = cropped_area.translate(drow=-self._origin.row, dcol=-self.origin.col)\n return [MultichannelBitmap(data=cropped_area_in_data.get_cropped_numpy_slice(self._data),\n origin=cropped_origin,)]", "def crop_image_to_shape(image, x, y, shape):\n return crop_image(image, x, y, shape[0], shape[1])", "def cropCnt(img, cnt):\n\td = extremePoints(cnt)\n\troi = cropPt(img, (d[\"L\"][0], d[\"T\"][1]), (d[\"R\"][0], d[\"B\"][1]))\n\treturn roi", "def pil_crop_image(img, x1, y1, x2, y2, load=True):\n # type: (PImage.Image, int, int, int, int, bool) -> PImage.Image\n y_size = img.height\n x_size = img.width\n\n # Sanity check\n if (x1 > x_size or\n x2 > x_size or\n x1 < 0 or\n x2 < 0 or\n y1 > y_size or\n y2 > y_size or\n y1 < 0 or\n y2 < 0):\n raise ValueError('Invalid crop parameters for image shape: {}, ({}, {}), ({}, {})'.format(img.size, x1, y1, x2, y2))\n\n cropped_img = img.crop(box=(x1, y1, x2, y2))\n\n if load:\n cropped_img.load()\n\n return cropped_img", "def crop_to_square(image):\n\n if image is None:\n return None\n w, h = (image.shape[1], image.shape[0])\n w = float(w)\n h = float(h)\n\n # only crop images automatically if the aspect ratio is not bigger than 2 or not smaller than 0.5\n aspectRatio = w / h\n if aspectRatio > 3 or aspectRatio < 0.3:\n return None\n if aspectRatio == 1.0:\n return image\n \n # the shortest edge is the edge of our new square. b is the other edge\n a = min(w, h)\n b = max(w, h)\n\n # get cropping position\n x = (b - a) / 2.0\n\n # depending which side is longer we have to adjust the points\n # Heigth is longer\n if h > w:\n upperLeft = (0, x) \n else:\n upperLeft = (x, 0)\n cropW = cropH = a \n return crop_image(image, upperLeft[0], upperLeft[1], cropW, cropH)", "def crop_image(im, roi, roi_shape):\r\n roi = [int(v) for v in roi]\r\n cut_roi = limit_roi(roi, im.shape[0], im.shape[1])\r\n\r\n if len(im.shape) == 3:\r\n im_roi = im[cut_roi[1]:cut_roi[3]+1, cut_roi[0]:cut_roi[2]+1, :]\r\n else:\r\n im_roi = im[cut_roi[1]:cut_roi[3]+1, cut_roi[0]:cut_roi[2]+1]\r\n im_roi = cv2.copyMakeBorder(im_roi,\r\n cut_roi[1] - roi[1], roi[3] - cut_roi[3],\r\n cut_roi[0] - roi[0], roi[2] - cut_roi[2],\r\n cv2.BORDER_CONSTANT)\r\n roi_shape = (roi_shape[-1], roi_shape[-2])\r\n\r\n im_roi = cv2.resize(im_roi, roi_shape, interpolation=cv2.INTER_LINEAR)\r\n \r\n im_roi = im_roi.astype(np.float32)\r\n return im_roi", "def toCrop(curr_width, curr_height, bounds_width, bounds_height):\n\n\t# Return\n\tlRet\t= [0,0]\n\n\t# Convert current width/height to floats\n\tcurr_width\t\t= float(curr_width)\n\tcurr_height\t\t= float(curr_height)\n\n\t# If either width or height is smaller than the boundary box, resize up\n\tif curr_width < bounds_width or curr_height < bounds_height:\n\n\t\t# Which is the side that needs to grow more?\n\t\tif (bounds_width / curr_width) > (bounds_height / curr_height):\n\t\t\tlRet[0]\t= bounds_width\n\t\t\tlRet[1]\t= int(round(bounds_width * (curr_height / curr_width)))\n\t\telse:\n\t\t\tlRet[1]\t= bounds_height\n\t\t\tlRet[0]\t= int(round(bounds_height * (curr_width / curr_height)))\n\n\t# Else if the image is already larger than the boundary, resize down\n\telse:\n\n\t\t# Which is the side that needs to shrink less?\n\t\tif (curr_width / bounds_width) > (curr_height / bounds_height):\n\t\t\tlRet[1]\t= bounds_height\n\t\t\tlRet[0]\t= int(round(bounds_height * (curr_width / curr_height)))\n\t\telse:\n\t\t\tlRet[0]\t= bounds_width\n\t\t\tlRet[1]\t= int(round(bounds_width * (curr_height / curr_width)))\n\n\t# Return the new dimensions\n\treturn lRet", "def crop(img, size, point=(0, 0)):\n y, x = point\n w, h = size\n hf, wf, _ = img.shape\n\n if not isinstance(x, int):\n y = min(int(wf * y), wf)\n x = min(int(hf * x), hf)\n\n if not isinstance(w, int):\n w = int(wf * w)\n h = int(hf * h)\n\n x2 = min(x + h, hf) - 1\n y2 = min(y + w, wf) - 1\n log.debug(\"w = %d, x2=%d, %s\" % (w, x2, img.shape))\n img2 = img[x:x2, y:y2, :].copy()\n return img2", "def crop_outlined_image(frame: imageType) -> Opt[imageType]:\n largest_contour = get_largest_contour(frame)\n if largest_contour is not None:\n mask = np.zeros(frame.shape, dtype=np.uint8)\n cv2.drawContours(mask, [largest_contour], -1, color=255, thickness=-1) # color = opacity?\n\n # compute its bounding box of pill, then extract the ROI, and apply the mask\n h: int\n w: int\n x: int\n y: int\n (x, y, w, h) = cv2.boundingRect(largest_contour)\n imageROI = cast(imageType, frame[y:y + h, x:x + w])\n maskROI = mask[y:y + h, x:x + w]\n imageROI = cv2.bitwise_and(imageROI, imageROI, mask=maskROI)\n # skew = get_image_skew(frame)\n # if skew > 0: # , need to rotateanticlockwise\n # imageROI = imutils.rotate_bound(imageROI, -skew)\n return imageROI\n else:\n return None", "def get_crop(self):\n if self.cropping_method == self.CROP_NONE:\n self.autocrop()\n return '{h}% {v}%'.format(h=self.from_left, v=self.from_top)", "def crop_array(array: np.array, rect: Rect, bbox: Rect) -> np.array:\n width = bbox.right - bbox.left\n height = bbox.bottom - bbox.top\n\n left = clamp(bbox.left - rect.left, minimum=0, maximum=width)\n right = clamp(bbox.right - rect.left, minimum=0, maximum=width)\n top = clamp(bbox.top - rect.top, minimum=0, maximum=height)\n bottom = clamp(bbox.bottom - rect.top, minimum=0, maximum=height)\n\n if array.ndim == 2:\n return array[top:bottom, left:right]\n else:\n return array[top:bottom, left:right, :]", "def _central_crop(image, crop_size):\r\n shape = tf.shape(input=image)\r\n height, width = shape[0], shape[1]\r\n\r\n amount_to_be_cropped_h = (height - crop_size[0])\r\n crop_top = amount_to_be_cropped_h // 2\r\n amount_to_be_cropped_w = (width - crop_size[1])\r\n crop_left = amount_to_be_cropped_w // 2\r\n return tf.slice(\r\n image, [crop_top, crop_left, 0], [crop_size[0], crop_size[1], -1])", "def __randomCrop(self, img):\n limit = self.PROCESSING_DIM - self.INPUT_DIM\n # pick 2 random integers less than this limit as the origin of the cropped image\n x_start = np.random.randint(limit)\n y_start = np.random.randint(limit)\n return img.crop((x_start, y_start, x_start + self.INPUT_DIM, y_start + self.INPUT_DIM))", "def crop_frame(frame):\n return frame[9:195,:]", "def crop(X,size_crop=_size_crop):\n b = size_crop//2\n shape = tf.shape(X)\n cx= shape[0]//2\n cy= shape[1]//2\n return X[cx-b:cx+b,cy-b:cy+b,...]", "def crop(self, xdiv, ydiv, img, bBoxes=None):\n xstride = img.shape[1] // xdiv\n ystride = img.shape[0] // ydiv\n\n widthLimits = np.zeros((xdiv+1,), dtype=np.int32)\n heightLimits = np.zeros((ydiv+1), dtype=np.int32)\n croppedImages = [[] for _ in range(xdiv*ydiv)]\n croppedBoxes = [[] for _ in range(xdiv*ydiv)]\n index = 0\n for x in range(0, img.shape[1]+1, xstride):\n widthLimits[index] = x\n index += 1\n index = 0\n for y in range(0, img.shape[0]+1, ystride):\n heightLimits[index] = y\n index+=1\n index = 0\n for i in range(len(widthLimits)-1):\n for j in range(len(heightLimits)-1):\n croppedImages[index] = img[heightLimits[j]:heightLimits[j+1], widthLimits[i]:widthLimits[i+1]]\n index += 1\n if bBoxes:\n for box in bBoxes:\n index = 0\n for i in range(len(widthLimits)-1):\n for j in range(len(heightLimits)-1):\n if box[0] >= widthLimits[i] and box[2] < widthLimits[i+1] \\\n and box[1] >= heightLimits[j] and box[3] < heightLimits[j+1]:\n box[0] -= widthLimits[i]\n box[2] -= widthLimits[i]\n box[1] -= heightLimits[j]\n box[3] -= heightLimits[j]\n croppedBoxes[index].append(box)\n index += 1\n return croppedImages, croppedBoxes", "def get_rect(self):\n return self.pic.get_rect().move(self.pos)", "def crop(img, i, j, h, w):\n if not _is_numpy_image(img):\n raise TypeError('img should be nparray Image. Got {}'.format(type(img)))\n\n return img[i:i+h, j:j+w]", "def crop_face(image):\n gray_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n face_roi_list = face_detector.detectMultiScale(gray_image, scale_factor, min_neighbors)\n \n if len(face_roi_list) > 0:\n (x,y,w,h) = face_roi_list[0]\n return gray_image[y:y+h,x:x+w]\n else:\n return None", "def observation(self, frame):\r\n if self.grayscale:\r\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\r\n frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)\r\n crop_frame = frame[self.crop:, :]\r\n if self.grayscale:\r\n crop_frame = crop_frame[:,:, None]\r\n return crop_frame", "def crop_to_face(self):\n original = self.images['original']\n original.generate_borders(sigma=self.sigma)\n contours, hierarchy = cv2.findContours(\n original.borders,\n cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n hierarchy = hierarchy[0] # For some reason, there's an extra, useless dimension.\n\n polygons = list()\n for contour, h in zip(contours, hierarchy):\n polygon, is_square = self._squareness(contour)\n polygons += [(polygon, h, is_square)]\n\n squares = list()\n discarded = list()\n for p, h, is_square in polygons:\n if is_square:\n squares += [(p, h)]\n else:\n discarded += [(p, h)]\n\n faces = list()\n for s, h in squares:\n # If no parents but has children.\n if h[3] == -1 and h[2] != -1:\n faces += [(s, h)]\n\n face = None\n if len(faces) == 1:\n face = faces[0]\n rectangle = cv2.boundingRect(face[0])\n cropped = original.crop_image(rectangle)\n else:\n cropped = original\n\n self.candidates['faces'] = faces\n self.candidates['squares'] = squares\n self.candidates['discarded'] = discarded\n self.images['cropped'] = cropped", "def crop(self, padding, random=True):\n self.get_roi(padding=padding, random=random)\n self.bgr = self.camera_model.crop_resize_image(self.bgr)\n self.depth = self.camera_model.crop_resize_image(\n self.depth, interpolation=Image.NEAREST)", "def _crop_data(self, results, crop_size, allow_negative_crop):\n assert crop_size[0] > 0 and crop_size[1] > 0\n for key in results.get('img_fields', ['img']):\n img = results[key]\n margin_h = max(img.shape[0] - crop_size[0], 0)\n margin_w = max(img.shape[1] - crop_size[1], 0)\n offset_h = np.random.randint(0, margin_h + 1)\n offset_w = np.random.randint(0, margin_w + 1)\n crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]\n crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]\n\n # crop the image\n img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]\n img_shape = img.shape\n results[key] = img\n results['img_shape'] = img_shape\n\n # crop bboxes accordingly and clip to the image boundary\n for key in results.get('bbox_fields', []):\n # e.g. gt_bboxes and gt_bboxes_ignore\n bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],\n dtype=np.float32)\n bboxes = results[key] - bbox_offset\n if self.bbox_clip_border:\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & (\n bboxes[:, 3] > bboxes[:, 1])\n # If the crop does not contain any gt-bbox area and\n # allow_negative_crop is False, skip this image.\n if (key == 'gt_bboxes' and not valid_inds.any()\n and not allow_negative_crop):\n return None\n results[key] = bboxes[valid_inds, :]\n # label fields. e.g. gt_labels and gt_labels_ignore\n label_key = self.bbox2label.get(key)\n if label_key in results:\n results[label_key] = results[label_key][valid_inds]\n\n # mask fields, e.g. gt_masks and gt_masks_ignore\n mask_key = self.bbox2mask.get(key)\n if mask_key in results:\n results[mask_key] = results[mask_key][\n valid_inds.nonzero()[0]].crop(\n np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))\n if self.recompute_bbox:\n results[key] = results[mask_key].get_bboxes()\n\n # crop semantic seg\n for key in results.get('seg_fields', []):\n results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2]\n\n return results", "def _crop(self, fieldname, scale, box):\n croputils = IImageCroppingUtils(self.context)\n data = croputils.get_image_data(fieldname)\n\n original_file = StringIO(data)\n image = PIL.Image.open(original_file)\n image_format = image.format or self.DEFAULT_FORMAT\n\n cropped_image = image.crop(box)\n cropped_image_file = StringIO()\n cropped_image.save(cropped_image_file, image_format, quality=100)\n cropped_image_file.seek(0)\n\n croputils.save_cropped(fieldname, scale, cropped_image_file)\n\n # store crop information in annotations\n self._store(fieldname, scale, box)\n\n # Purge caches if needed\n notify(Purge(self.context))", "def crop(src_path, out_path, bbox_geometry, bbox_crs):\n\n # validate area of interest\n\n\n # load imagery\n satdata = rasterio.open(src_path)\n\n # grab crs\n crs = satdata.meta['crs']\n crs = str(crs).split(':')[-1]\n\n # check crs\n if(crs != bbox_crs):\n raise Exception(f'Imagery & bounding box crs mismatch ({crs}, {bbox_crs})')\n\n # apply mask with crop=True to crop the resulting raster to the AOI's bounding box\n clipped, transform = mask(satdata, aoi, crop=True)\n\n # Using a copy of the metadata from our original raster dataset, we can write a new geoTIFF\n # containing the new, clipped raster data:\n meta = satdata.meta.copy()\n\n # update metadata with new, clipped mosaic's boundaries\n meta.update(\n {\n \"transform\": transform,\n \"height\":clipped.shape[1],\n \"width\":clipped.shape[2]\n }\n )\n\n # write the clipped-and-cropped dataset to a new GeoTIFF\n with rasterio.open(out_path, 'w', **meta) as dst:\n dst.write(clipped)", "def test_crop(self):\r\n u = Uploader()\r\n size = (100, 100)\r\n im = Image.new('RGB', size)\r\n folder = tempfile.mkdtemp()\r\n u.upload_folder = folder\r\n im.save(os.path.join(folder, 'image.png'))\r\n coordinates = (0, 0, 50, 50)\r\n file = FileStorage(filename=os.path.join(folder, 'image.png'))\r\n with patch('pybossa.uploader.Image', return_value=True):\r\n err_msg = \"It should crop the image\"\r\n assert u.crop(file, coordinates) is True, err_msg\r\n\r\n with patch('pybossa.uploader.Image.open', side_effect=IOError):\r\n err_msg = \"It should return false\"\r\n assert u.crop(file, coordinates) is False, err_msg", "def crop_video(self, video=None):\r\n\r\n if video is None:\r\n video = self.video_buffer\r\n self.get_roi(video=video, window_name='Darw the ROI on any frame in the video and click Esc')\r\n roi = self.roi\r\n return video[:, roi['y1']: roi['y2'], roi['x1']: roi['x2'], :]", "def crop_image(grayscale_image, raft_center, width):\n top_row = int(raft_center[1] - width / 2)\n # note that y corresponds to rows, and is directed from top to bottom in scikit-image\n bottom_row = int(raft_center[1] + width / 2)\n\n left_column = int(raft_center[0] - width / 2)\n right_column = int(raft_center[0] + width / 2)\n\n raft_image = grayscale_image[top_row:bottom_row, left_column:right_column]\n return raft_image", "def crop(self, bounds, destructive=False, allow_empty=False):\n bounds = Bounds(*bounds)\n oob = points_out_of_bounds(self, bounds)\n # Deal with empty pointclouds\n if oob.all():\n if allow_empty:\n return type(self)(None)\n else:\n raise simulocloud.exceptions.EmptyPointCloud(\n \"No points in crop bounds:\\n{}\".format(bounds))\n \n cropped = type(self)(self._arr[:, ~oob])\n if destructive:\n self.__init__(self._arr[:, oob])\n return cropped", "def crop(im, config):\n\n src_width, src_height = im.size\n\n src_ratio = float(src_width) / float(src_height)\n dst_ratio = float(config['width']) / float(config['height'])\n\n if dst_ratio < src_ratio:\n crop_height = src_height\n crop_width = crop_height * dst_ratio\n x_offset = int(float(src_width - crop_width) / 2)\n y_offset = 0\n else:\n crop_width = src_width\n crop_height = crop_width / dst_ratio\n x_offset = 0\n y_offset = int(float(src_height - crop_height) / 3)\n\n im = im.crop((\n x_offset,\n y_offset,\n x_offset + int(crop_width),\n y_offset + int(crop_height),\n ))\n\n return im.resize(\n (config['width'], config['height']),\n ANTIALIAS,\n )", "def crop_image(image):\n delta = .05\n rand_top_ratio = random.uniform(default_top_ratio - delta,\n default_top_ratio + delta)\n rand_bot_ratio = random.uniform(default_bot_tatio - delta,\n default_bot_tatio + delta)\n image = preprocess(image, top_ratio=rand_top_ratio, bot_ratio=rand_bot_ratio)\n\n return image", "def np_crop_image(np_img, x1, y1, x2, y2):\n # type: (np.ndarray, int, int, int, int) -> np.ndarray\n y_size = np_img.shape[0]\n x_size = np_img.shape[1]\n\n # Sanity check\n if (x1 > x_size or\n x2 > x_size or\n x1 < 0 or\n x2 < 0 or\n y1 > y_size or\n y2 > y_size or\n y1 < 0 or\n y2 < 0):\n raise ValueError('Invalid crop parameters for image shape: {}, ({}, {}), ({}, {})'.format(np_img.shape, x1, y1, x2, y2))\n\n return np_img[y1:y2, x1:x2]", "def crop_image(image_to_crop, year):\r\n\timg = Image.open(image_to_crop)\r\n\t#The dimensions of just the US in the image\r\n\timg = img.crop((80, 240, 800, 615))\r\n\r\n\tfile_destination = \"images/cropped_images/\" + str(year) + \".png\"\r\n\r\n\timage_file = open(file_destination, 'wb')\r\n\timg.save(image_file, 'png')\r\n\timage_file.close()", "def create_cropped_data(image_array: np.ndarray, crop_size: tuple, crop_center: tuple, crop_only: bool = True):\n if not crop_only:\n # check parameters\n if not isinstance(image_array, np.ndarray) or len(image_array.shape) != 2:\n raise ValueError('image_array is not a 2D numpy array')\n elif len(crop_size) != 2 or len(crop_center) != 2:\n raise ValueError('crop size or crop center tuples have invalid amount of values')\n elif crop_size[0] % 2 == 0 or crop_size[1] % 2 == 0:\n raise ValueError('crop size contains an even number')\n # check rectangle position\n min_x = crop_center[0] - crop_size[0] // 2\n max_x = crop_center[0] + crop_size[0] // 2\n min_y = crop_center[1] - crop_size[1] // 2\n max_y = crop_center[1] + crop_size[1] // 2\n if not crop_only:\n crop_margin = 20\n if not (crop_margin <= min_x and max_x < image_array.shape[0] - crop_margin and\n crop_margin <= min_y and max_y < image_array.shape[1] - crop_margin):\n raise ValueError('the crop rectangle is too close to the edges')\n if crop_only:\n # create crop array\n crop_array = np.zeros_like(image_array)\n crop_array[min_x:max_x + 1, min_y:max_y + 1] = 1\n return crop_array\n else:\n # target_array = crop region in image_array\n target_array = np.copy(image_array[min_x:max_x + 1, min_y:max_y + 1])\n # set image_array values in crop region to 0 (in-place)\n image_array[min_x:max_x + 1, min_y:max_y + 1] = 0\n return image_array, target_array", "def crop_face(self, image):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n faces = self.faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30)\n )\n #\n # # Draw a rectangle around the faces\n # for (x, y, w, h) in faces:\n # cv2.rectangle(gray, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n # Display the resulting frame\n if len(faces) == 0:\n raise FaceNotFoundError()\n x, y, w, h = faces[0] # gets the first face\n cropped = image[y:y + h, x:x + w, :]\n return cropped" ]
[ "0.7942811", "0.79130894", "0.7909533", "0.7762608", "0.77604425", "0.768507", "0.7674636", "0.7651629", "0.7633949", "0.76148933", "0.7608375", "0.75981236", "0.75507385", "0.74944735", "0.74684227", "0.7441258", "0.7434214", "0.7434214", "0.741522", "0.741522", "0.73019385", "0.729235", "0.729026", "0.72589904", "0.7108505", "0.7106228", "0.7093503", "0.70915776", "0.7084093", "0.70631385", "0.705811", "0.7049178", "0.7037221", "0.7027466", "0.70232105", "0.70183396", "0.6997076", "0.6992722", "0.69600177", "0.6952822", "0.6937316", "0.6933644", "0.69308805", "0.68517184", "0.68512267", "0.68339723", "0.6833131", "0.6806742", "0.6797106", "0.67893904", "0.67844343", "0.6760016", "0.67367035", "0.6733293", "0.6727452", "0.66983074", "0.6687343", "0.66731817", "0.66487455", "0.6642036", "0.66245514", "0.6616507", "0.660355", "0.6595053", "0.65923697", "0.6589674", "0.6576747", "0.6573775", "0.6568553", "0.6560811", "0.6556409", "0.6536079", "0.6533554", "0.6520925", "0.6509301", "0.649717", "0.6494048", "0.6487342", "0.64800024", "0.6476008", "0.64714783", "0.64709985", "0.6462439", "0.6451893", "0.6449018", "0.6394173", "0.6390516", "0.63745207", "0.636728", "0.63598055", "0.63432115", "0.6341935", "0.63336694", "0.6329196", "0.6322816", "0.6313143", "0.6282383", "0.627937", "0.6268556", "0.62519956" ]
0.7051762
31
Show image on the Canvas. Implements correct image zoom almost like in Google Maps
def __show_image(self): box_image = self.canvas_image.coords(self.container) # get image area box_canvas = (self.canvas_image.canvasx(0), # get visible area of the canvas self.canvas_image.canvasy(0), self.canvas_image.canvasx(self.canvas_image.winfo_width()), self.canvas_image.canvasy(self.canvas_image.winfo_height())) self.box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly # Get scroll region box box_scroll = [min(self.box_img_int[0], box_canvas[0]), min(self.box_img_int[1], box_canvas[1]), max(self.box_img_int[2], box_canvas[2]), max(self.box_img_int[3], box_canvas[3])] # Horizontal part of the image is in the visible area if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]: box_scroll[0] = self.box_img_int[0] box_scroll[2] = self.box_img_int[2] # Vertical part of the image is in the visible area if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]: box_scroll[1] = self.box_img_int[1] box_scroll[3] = self.box_img_int[3] # Convert scroll region to tuple and to integer self.canvas_image.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile y1 = max(box_canvas[1] - box_image[1], 0) x2 = min(box_canvas[2], box_image[2]) - box_image[0] y2 = min(box_canvas[3], box_image[3]) - box_image[1] if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid (int(x1 / self.__scale), int(y1 / self.__scale), int(x2 / self.__scale), int(y2 / self.__scale))) # imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter)) self.imageid = self.canvas_image.create_image(max(box_canvas[0], self.box_img_int[0]), max(box_canvas[1], self.box_img_int[1]), anchor='nw', image=imagetk) self.canvas_image.lower(self.imageid) # set image into background self.canvas_image.imagetk = imagetk # keep an extra reference to prevent garbage-collection
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __draw_image(self):\n if self.image_name is not None:\n img = mpimg.imread(self.image_name)\n extent = (0.5, self.xmax+0.5, -0.5, self.ymax-0.5)\n self.ax.imshow(img, extent=extent, origin='lower',\n alpha=self.image_alpha)", "def showImage(self,image):\n if isinstance(image,QtGui.QImage):\n filename = None\n else:\n filename = str(image)\n image = QtGui.QImage(filename)\n if image.isNull():\n raise ValueError,\"Cannot load image file %s\" % filename\n #print(\"Size %sx%s\" % (image.width(),image.height()))\n self.setPixmap(QtGui.QPixmap.fromImage(image))\n self.filename = filename\n self.image = image \n self.zoom = 1.0", "def show_to_window(self):\n if self.normal_mode:\n self.show_image.show_original_image(\n self.image, self.width_original_image)\n self.show_image.show_result_image(\n self.image, self.width_result_image, self.angle)\n\n else:\n if self.panorama_mode:\n image = draw_polygon(\n self.image.copy(),\n self.mapX_pano,\n self.mapY_pano)\n mapX = np.load(\n './plugins/Thread_inspection/view_image/maps_pano/mapX.npy')\n mapY = np.load(\n './plugins/Thread_inspection/view_image/maps_pano/mapY.npy')\n rho = self.panorama.rho\n\n self.result_image = cv2.remap(\n self.image,\n mapX,\n mapY,\n cv2.INTER_CUBIC)\n self.result_image = self.result_image[round(\n rho + round(self.moildev.getRhoFromAlpha(30))):self.h, 0:self.w]\n # print(self.width_result_image)\n else:\n image = draw_polygon(self.image.copy(), self.mapX, self.mapY)\n self.result_image = cv2.remap(\n self.image,\n self.mapX,\n self.mapY,\n cv2.INTER_CUBIC)\n self.show_image.show_original_image(\n image, self.width_original_image)\n self.show_image.show_result_image(\n self.result_image, self.width_result_image, self.angle)", "def draw_image(self):\n self.PDF.saveState()\n self.PDF.scale(1, -1)\n # self.PDF.drawImage(\n # LOGO, 490, -78, width=80, preserveAspectRatio=True, mask=\"auto\"\n # )\n self.PDF.restoreState()", "def zoom(cls, img, zoom):\n w, h = img.size\n x = h / 2\n y = w / 2\n zoom2 = zoom * 2\n img = img.crop((x - w / zoom2, y - h / zoom2,\n x + w / zoom2, y + h / zoom2))\n return img.resize((w, h), Image.LANCZOS)", "def __show_image(self):\n box_image = self.canvas.coords(self.container) # get image area\n box_canvas = (self.canvas.canvasx(0), # get visible area of the canvas\n self.canvas.canvasy(0),\n self.canvas.canvasx(self.canvas.winfo_width()),\n self.canvas.canvasy(self.canvas.winfo_height()))\n box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly\n # Get scroll region box\n box_scroll = [min(box_img_int[0], box_canvas[0]), min(box_img_int[1], box_canvas[1]),\n max(box_img_int[2], box_canvas[2]), max(box_img_int[3], box_canvas[3])]\n # Horizontal part of the image is in the visible area\n if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:\n box_scroll[0] = box_img_int[0]\n box_scroll[2] = box_img_int[2]\n # Vertical part of the image is in the visible area\n if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:\n box_scroll[1] = box_img_int[1]\n box_scroll[3] = box_img_int[3]\n # Convert scroll region to tuple and to integer\n self.canvas.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region\n x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile\n y1 = max(box_canvas[1] - box_image[1], 0)\n x2 = min(box_canvas[2], box_image[2]) - box_image[0]\n y2 = min(box_canvas[3], box_image[3]) - box_image[1]\n if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area\n if self.__huge and self.__curr_img < 0: # show huge image\n h = int((y2 - y1) / self.imscale) # height of the tile band\n self.__tile[1][3] = h # set the tile band height\n self.__tile[2] = self.__offset + self.imwidth * int(y1 / self.imscale) * 3\n self.__image.close()\n self.__image = Image.open(self.path) # reopen / reset image\n self.__image.size = (self.imwidth, h) # set size of the tile band\n self.__image.tile = [self.__tile]\n image = self.__image.crop((int(x1 / self.imscale), 0, int(x2 / self.imscale), h))\n else: # show normal image\n image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid\n (int(x1 / self.__scale), int(y1 / self.__scale),\n int(x2 / self.__scale), int(y2 / self.__scale)))\n #\n imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter))\n imageid = self.canvas.create_image(max(box_canvas[0], box_img_int[0]),\n max(box_canvas[1], box_img_int[1]),\n anchor='nw', image=imagetk)\n self.canvas.lower(imageid) # set image into background\n if self.last_image_id != 0:\n self.canvas.delete(self.last_image_id)\n\n self.last_image_id = imageid\n self.canvas.imagetk = imagetk # keep an extra reference to prevent garbage-collection", "def showImage(self, image):\n \n self.image = img", "def on_image(self, image):", "def paint(self, event):\r\n width, height = self.imageView.Size\r\n dimension = min(width, height)\r\n\r\n if dimension < self.image.dimension:\r\n resizeQuality = wx.IMAGE_QUALITY_BICUBIC\r\n elif dimension < self.image.dimension * 2:\r\n resizeQuality = wx.IMAGE_QUALITY_BILINEAR\r\n else:\r\n resizeQuality = wx.IMAGE_QUALITY_NORMAL\r\n\r\n image = self.image.image().Scale(dimension, dimension, resizeQuality)\r\n\r\n self.imageView.Refresh()\r\n\r\n dc = wx.AutoBufferedPaintDC(self.imageView)\r\n dc.Clear()\r\n dc.DrawBitmap(wx.Bitmap(image),\r\n (width - dimension) // 2,\r\n (height - dimension) // 2)", "def draw(self, canvas):\n canvas.delete(\"all\")\n width = canvas.winfo_reqwidth()\n height = canvas.winfo_reqheight()\n\n image = ImageTk.PhotoImage(self.image())\n canvas.create_image(width/2, height/2, image=image)\n canvas.img = image", "def view(self):\n window = tk.Tk()\n label = tk.Label(window)\n label.pack()\n img = self.get_tkimage()\n label[\"image\"] = label.img = img\n window.mainloop()", "def show_image(self, name, img, loc=(0,0), anchor='nw',\n scale=1, background=False, interpolation=cv2.INTER_LINEAR):\n if name in self._images:\n self._canvas.delete(self._images[name][0])\n self._images[name] = [img, None, -1] # image (original size), img_tk (shown), item_id\n img = cv2.resize(img, (int(round(img.shape[1]*scale)),\n int(round(img.shape[0]*scale))), interpolation=interpolation)\n self._images[name][1] = ImageTk.PhotoImage(image=Image.fromarray(img))\n self._images[name][2] = self._canvas.create_image(loc[0], loc[1],\n anchor=anchor, image=self._images[name][1])\n if background:\n self._width = img.shape[1]\n self._height = img.shape[0]\n self._canvas.config(width=self._width,\n height=self._height)\n return self._images[name][1].width(), self._images[name][1].height()", "def show_image(img, figsize=(10, 10)):\n plt.figure(figsize=figsize)\n plt.imshow(img)\n plt.show()", "def zoom_to_full_image_selection(self, image_rect, animate=False):\n zoomed_image_height = image_rect[2] - image_rect[0]\n zoomed_image_width = image_rect[3] - image_rect[1]\n\n canvas_height_width_ratio = self.variables.canvas_height / self.variables.canvas_width\n zoomed_image_height_width_ratio = zoomed_image_height / zoomed_image_width\n\n new_image_width = zoomed_image_height / canvas_height_width_ratio\n new_image_height = zoomed_image_width * canvas_height_width_ratio\n\n if zoomed_image_height_width_ratio > canvas_height_width_ratio:\n image_zoom_point_center = (image_rect[3] + image_rect[1]) / 2\n image_rect[1] = image_zoom_point_center - new_image_width / 2\n image_rect[3] = image_zoom_point_center + new_image_width / 2\n else:\n image_zoom_point_center = (image_rect[2] + image_rect[0]) / 2\n image_rect[0] = image_zoom_point_center - new_image_height / 2\n image_rect[2] = image_zoom_point_center + new_image_height / 2\n\n # keep the rect within the image bounds\n image_y_ul = max(image_rect[0], 0)\n image_x_ul = max(image_rect[1], 0)\n image_y_br = min(image_rect[2], self.variables.canvas_image_object.image_reader.full_image_ny)\n image_x_br = min(image_rect[3], self.variables.canvas_image_object.image_reader.full_image_nx)\n\n # re-adjust if we ran off one of the edges\n if image_x_ul == 0:\n image_rect[3] = new_image_width\n if image_x_br == self.variables.canvas_image_object.image_reader.full_image_nx:\n image_rect[1] = self.variables.canvas_image_object.image_reader.full_image_nx - new_image_width\n if image_y_ul == 0:\n image_rect[2] = new_image_height\n if image_y_br == self.variables.canvas_image_object.image_reader.full_image_ny:\n image_rect[0] = self.variables.canvas_image_object.image_reader.full_image_ny - new_image_height\n\n # keep the rect within the image bounds\n image_y_ul = max(image_rect[0], 0)\n image_x_ul = max(image_rect[1], 0)\n image_y_br = min(image_rect[2], self.variables.canvas_image_object.image_reader.full_image_ny)\n image_x_br = min(image_rect[3], self.variables.canvas_image_object.image_reader.full_image_nx)\n\n new_canvas_rect = self.variables.canvas_image_object.full_image_yx_to_canvas_coords(\n (image_y_ul, image_x_ul, image_y_br, image_x_br))\n new_canvas_rect = (\n int(new_canvas_rect[0]), int(new_canvas_rect[1]), int(new_canvas_rect[2]), int(new_canvas_rect[3]))\n\n background_image = self.variables.canvas_image_object.display_image\n self.variables.canvas_image_object.update_canvas_display_image_from_canvas_rect(new_canvas_rect)\n if self.variables.rescale_image_to_fit_canvas:\n new_image = PIL.Image.fromarray(self.variables.canvas_image_object.display_image)\n else:\n new_image = PIL.Image.fromarray(self.variables.canvas_image_object.canvas_decimated_image)\n if animate is True:\n # create frame sequence\n n_animations = self.variables.n_zoom_animations\n background_image = background_image / 2\n background_image = numpy.asarray(background_image, dtype=numpy.uint8)\n canvas_x1, canvas_y1, canvas_x2, canvas_y2 = new_canvas_rect\n display_x_ul = min(canvas_x1, canvas_x2)\n display_x_br = max(canvas_x1, canvas_x2)\n display_y_ul = min(canvas_y1, canvas_y2)\n display_y_br = max(canvas_y1, canvas_y2)\n x_diff = new_image.width - (display_x_br - display_x_ul)\n y_diff = new_image.height - (display_y_br - display_y_ul)\n pil_background_image = PIL.Image.fromarray(background_image)\n frame_sequence = []\n for i in range(n_animations):\n new_x_ul = int(display_x_ul * (1 - i / (n_animations - 1)))\n new_y_ul = int(display_y_ul * (1 - i / (n_animations - 1)))\n new_size_x = int((display_x_br - display_x_ul) + x_diff * (i / (n_animations - 1)))\n new_size_y = int((display_y_br - display_y_ul) + y_diff * (i / (n_animations - 1)))\n resized_zoom_image = new_image.resize((new_size_x, new_size_y))\n animation_image = pil_background_image.copy()\n animation_image.paste(resized_zoom_image, (new_x_ul, new_y_ul))\n frame_sequence.append(animation_image)\n fps = n_animations / self.variables.animation_time_in_seconds\n self.animate_with_pil_frame_sequence(frame_sequence, frames_per_second=fps)\n if self.variables.rescale_image_to_fit_canvas:\n self.set_image_from_numpy_array(self.variables.canvas_image_object.display_image)\n else:\n self.set_image_from_numpy_array(self.variables.canvas_image_object.canvas_decimated_image)\n self.update()\n self.redraw_all_shapes()\n self.variables.the_canvas_is_currently_zooming = False", "def refresh(self):\n\n # Delete old image (if needed) \n if self.canvas_image_id:\n self.canvas.delete(self.canvas_image_id)\n if debug > 5:\n print \"refresh: New image (x\", self.zoom, \") \", (self.xint, self.yint), (self.canvas[\"width\"], self.canvas[\"height\"]), [self.zoom * s for s in self.isize]\n\n scaled_isize = [self.xint[1] - self.xint[0],\n self.yint[1] - self.yint[0]]\n\n # Create the image for the canvas\n self.image = self.generator_func(self.zoom, self.xint, self.yint)\n self.canvas_image_id = self.canvas.create_image(0, 0, anchor=N+W,\n image=self.image)\n\n # Figure out where scroll bars should be and put them there.\n if self.xint[0] == 0 and int(self.isize[0] * self.zoom) == self.xint[1]:\n self.hscroll.grid_remove()\n else:\n self.hscroll.grid()\n self.hscroll.set(mapped_number(self.xint[0],\n (0, self.isize[0] * self.zoom -1),\n (0, 1)),\n mapped_number(self.xint[1] -1,\n (0, self.isize[0] * self.zoom -1),\n (0, 1)))\n if self.yint[0] == 0 and int(self.isize[1] * self.zoom) == self.yint[1]:\n self.vscroll.grid_remove()\n else:\n self.vscroll.grid()\n self.vscroll.set(mapped_number(self.yint[0],\n (0, self.isize[1] * self.zoom -1),\n (0, 1)),\n mapped_number(self.yint[1] -1,\n (0, self.isize[1] * self.zoom -1),\n (0, 1)))", "def draw_image(self):\n \n pixel_array = self.imageprepare(self.image_path)\n newArr = self.reshape_pixel_array(pixel_array)\n plt.imshow(newArr, interpolation='nearest')\n plt.savefig('MNIST_IMAGE.png')#save MNIST image\n plt.show()#Show / plot that image", "def disp(self,zoomc):\n\n MAG = 10 # magnification for Zoom display\n nbox = self.nbox # pixel size of cursor box\n nzbox = nbox * MAG # pixel size of Zoom display\n zoom = np.ndarray((nzbox,nzbox)) # Create 2D array for Zoom image\n for ixz in range(nzbox):\n ix = ixz // MAG # x-coord in full image\n for iyz in range(nzbox):\n iy = iyz // MAG # y-coord in full image\n zoom[iyz,ixz] = self.img[iy,ix]\n zmin = np.min(zoom)\n zmax = np.max(zoom)\n # Within Zoom, draw line around central pixel\n i1 = self.mbox * MAG\n i2 = i1 + MAG-1\n val = 255 # value used for edge outline\n for i in range(MAG):\n zoom[i1+i,i1] = val\n zoom[i1+i,i2] = val\n zoom[i1,i1+i] = val\n zoom[i2,i1+i] = val\n val = 255 - val\n plt.figure(WINDOW_ZOOM,figsize=(3,3))\n if zoomc: plt.title(\"Zoom Display (enhanced)\")\n else: plt.title(\"Zoom Display\")\n if self.label in ( 'T9', 'T10', 'TS'): # Temperature image\n tnorm, tcmap = tem_colours()\n if zoomc: \n plt.imshow(zoom, origin='lower', cmap = tcmap, \n norm=plt.Normalize(zmin,zmax) )\n else: \n plt.imshow(zoom, origin='lower', cmap = tcmap, norm=tnorm )\n else: # Pixel count image\n if zoomc: \n plt.imshow(zoom, origin='lower', vmin=zmin, vmax=zmax, cmap='gray')\n else:\n plt.imshow(zoom, origin='lower', cmap='gray')\n plt.axis('off')\n plt.tight_layout()", "def updateViewer(self):\n if not self.hasImage():\n return\n if len(self.zoomStack):\n self.fitInView(self.zoomStack[-1], self.aspectRatioMode) # Show zoomed rect.\n else:\n self.fitInView(self.sceneRect(), self.aspectRatioMode) # Show entire image.", "def show(self):\n\n self.image.show()", "def show_image(image):\r\n plt.imshow(image, cmap='gray')\r\n plt.show()", "def display(self):\n image_qt = ImageQt.ImageQt(self.view_state.get_image())\n self.imageLabel.setPixmap(QtGui.QPixmap.fromImage(image_qt))\n self.imageLabel.adjustSize()", "def display(self):\n display(self.image)", "def show_img(self):\n if self.image is not None:\n cv2.imshow(self.image_window, self.image)\n cv2.waitKey(1)\n else:\n rospy.loginfo(\"No image to show yet\")", "def Crop(self, path):\n global img, crop\n self.window_created = True\n self.path = path\n self.crop = tk.Toplevel(None)\n self.crop.protocol(\"WM_DELETE_WINDOW\", self.CloseCropWindow)\n \n# tk.Label(crop, text=self.path).grid()\n self.crop.title(\"Crop Window\")\n #load image specified in path var\n img = Image.open(self.path)\n img = ImageTk.PhotoImage(img)\n #print img.height()\n #create canvas to show image\n global crop_canvas \n crop_canvas = tk.Canvas(master=self.crop, bg='#000',\n width=img.width(), height=img.height())\n \n crop_canvas.bind('<Button-1>', self.Btn1Pressed)\n crop_canvas.bind('<ButtonRelease-1>', self.Btn1Released)\n crop_canvas.bind('<B1-Motion>', self.Btn1Motion)\n \n \n crop_canvas.create_image(0,0,anchor=tk.NW, image=img)\n crop_canvas.image = img #keep image reference\n crop_canvas.grid(sticky=tk.NW)\n self.crop.focus_set()\n \n #btns for zoom functionality\n \"\"\"\n zoom_in = tk.Button(master=self.crop_canvas,text='+', anchor=tk.NE,\n command=self.ZoomIn)\n zoom_out = tk.Button(master=self.crop_canvas,text='-',anchor=tk.NE, \n command=self.ZoomOut)\n \"\"\"\n #zoom_in.place(x=img.width()-14,y=0)\n #zoom_out.place(x=img.width()-14,y=30)", "def show(image):\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def show_image(self):\n cv2.imshow(self.config.DISPLAY_NAME, self.image)", "def show(image):\n fig = pyplot.figure()\n axis = fig.add_subplot(1, 1, 1)\n imgplot = axis.imshow(image)\n imgplot.set_interpolation('nearest')\n axis.xaxis.set_ticks_position('top')\n axis.yaxis.set_ticks_position('left')\n pyplot.show()", "def plot(self,id=1,dpi=150):\n fig = plt.figure(id)\n ax1 = fig.add_subplot(111)\n ax1.imshow(self.image,interpolation='nearest',extent=[self.xmin,self.xmax,\n self.ymin,self.ymax], origin='lower')\n #plt.savefig('.png',dpi=dpi)\n plt.draw()", "def resizeEvent(self, event):\n self.image_canvas.fit_in_view()", "def show_image(im, rescale=False) :\r\n \r\n plt.figure()\r\n im = im.copy()\r\n im.resize(*LFW_IMAGESIZE)\r\n if rescale :\r\n plt.imshow(im.astype(float), cmap=plt.cm.get_cmap(\"gray\"))\r\n else :\r\n plt.imshow(im.astype(float), cmap=plt.cm.get_cmap(\"gray\"), vmin=0, vmax=255)\r\n plt.axis('off')\r\n plt.show()", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def showimage(image):\n mplt.figure()\n mplt.imshow(image)\n mplt.show()", "def update_canvas_display_image_from_full_image(self):\n\n full_image_rect = (0, 0, self.image_reader.full_image_ny, self.image_reader.full_image_nx)\n self.update_canvas_display_image_from_full_image_rect(full_image_rect)", "def on_draw_over_image(self):", "def showResized(name, image, scale):\n image = resizeImage(image, scale)\n cv.ShowImage(name, image)", "def update_image(self, path=None):\n if path:\n self.image_path.current = path\n\n if self.image_path.current == self.image_canvas.image_path:\n self.image_canvas.fit_in_view()\n else:\n self.image_canvas.draw_image(self.image_path.current)", "def display(self, image):\n raise NotImplementedError()", "def __wheel(self, event):\n x = self.canvas_image.canvasx(event.x) # get coordinates of the event on the canvas\n y = self.canvas_image.canvasy(event.y)\n if self.outside(x, y): return # zoom only inside image area\n scale = 1.0\n # Respond to Linux (event.num) or Windows (event.delta) wheel event\n if event.num == 5 or event.delta == -120: # scroll down, smaller\n if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels\n self.imscale /= self.__delta\n scale /= self.__delta\n if event.num == 4 or event.delta == 120: # scroll up, bigger\n i = min(self.canvas_image.winfo_width(), self.canvas_image.winfo_height()) >> 1\n if i < self.imscale: return # 1 pixel is bigger than the visible area\n self.imscale *= self.__delta\n scale *= self.__delta\n # Take appropriate image from the pyramid\n k = self.imscale * self.__ratio # temporary coefficient\n self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1)\n self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img))\n #\n self.canvas_image.scale('all', x, y, scale, scale) # rescale all objects\n # Redraw some figures before showing image on the screen\n self.redraw_figures() # method for child classes\n self.__show_image()", "def visualizeImg(img):\n plt.figure(figsize=(10,4))\n plt.imshow(img)\n plt.show()", "def show(self, exec_rasterize = False):\n\n if (exec_rasterize):\n self.rasterize()\n\n Image.fromarray(self._image).show()", "def show_env(self, img):\n plt.figure(1)\n plt.subplot(111)\n plt.imshow(img, interpolation=\"nearest\")\n plt.show()", "def update(self):\n cv2.imshow(self.window_name, self.map.get_crop())", "def __wheel(self, event):\n x = self.canvas.canvasx(event.x) # get coordinates of the event on the canvas\n y = self.canvas.canvasy(event.y)\n if self.outside(x, y): return # zoom only inside image area\n scale = 1.0\n # Respond to Linux (event.num) or Windows (event.delta) wheel event\n if event.num == 5 or event.delta == -120: # scroll down, smaller\n if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels\n self.imscale /= self.__delta\n scale /= self.__delta\n if event.num == 4 or event.delta == 120: # scroll up, bigger\n i = min(self.canvas.winfo_width(), self.canvas.winfo_height()) >> 1\n if i < self.imscale: return # 1 pixel is bigger than the visible area\n self.imscale *= self.__delta\n scale *= self.__delta\n # Take appropriate image from the pyramid\n k = self.imscale * self.__ratio # temporary coefficient\n self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1)\n self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img))\n #\n self.canvas.scale('all', x, y, scale, scale) # rescale all objects\n # Redraw some figures before showing image on the screen\n self.redraw_figures() # method for child classes\n self.__show_image()", "def show_map_window(image):\n cv2.imshow(_WINDOW_NAME, image)", "def update_image(self, cv_img):\n\t\tqt_img = self.ImageEdits(cv_img)\n\t\tself.camera.setPixmap(qt_img)", "def show_image(dataset, domain, image_class, image_name):\n\timage_file = io.imread(os.path.join(\"data\", dataset, domain, \"images\", image_class, image_name))\n\tplt.imshow(image_file)\n\tplt.pause(0.001)\n\tplt.figure()", "def on_zoom_change(self, event) -> None:\r\n\r\n zoom_level = int(self.zoom_scale.get())\r\n self.painter.zoom = zoom_level\r\n self.painter.draw_board()", "def onclick_open_image(self):\n filename = select_file(\n \"Select Image\",\n \"../\",\n \"Image Files (*.jpeg *.jpg *.png *.gif *.bmg)\")\n if filename:\n param_name = select_file(\n \"Select Parameter\", \"../\", \"Parameter Files (*.json)\")\n if param_name:\n self.moildev = Moildev(param_name)\n self.image = read_image(filename)\n self.h, self.w = self.image.shape[:2]\n self.show_to_window()", "def _render_static_image_annotation(self):\n cv2.rectangle(self._image,\n (0,0), (640, 40),\n (0, 0, 0),\n -1)\n \n cv2.putText(self._image,self._current_mode, (40, 25),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, 255, 2)\n\n cv2.putText(self._image, time.asctime(), (400, 460),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, 255, 2)", "def show_image(self, idx):\n image, target = self.__getitem__(self, idx)\n im_h, im_w, _ = image.size()\n labels_num = target['labels']\n rescale = torch.tensor([[im_w, im_h, im_w, im_h]])\n bboxs = target['boxes'] * rescale\n img = image.permute(1, 2, 0).numpy()\n for i, bboxe in enumerate(bboxs):\n x, y, xm, ym = bboxe\n label = class_name[int(labels_num[i])]\n plot_one_box((int(x), int(y), int(xm), int(ym)), img, label=label, line_thickness=3)\n cv2.imshow('image', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def display_image(self, img, img_pos):\n image = tk.Label(self.top, image=img)\n image.grid(row=img_pos[0], column=img_pos[1],\n columnspan=img_pos[2], rowspan=img_pos[3])", "def draw_img(self, i, j, k):\n if k < len(self.images):\n img = self.images[k]\n r = self.get_rect(i, j)\n self.screen.blit(img, r)", "def show_image(path):\n img = mpimg.imread(path)\n imgplot = plt.imshow(img)\n plt.show()\n plt.close()", "def show(self):\n if self.video:\n self.video.write(self.img)\n cv2.imshow('Simpy', self.img)\n cv2.waitKey(1000 // self.fps)", "def OnPaint(self, event):\n bmp = self._bitmap\n if bmp is None:\n return\n\n bmp_width, bmp_height = bmp.GetWidth(), bmp.GetHeight()\n if bmp_width == 0 or bmp_height == 0:\n return\n\n evt_x = 0\n evt_y = 0\n evt_width, evt_height = self.GetSize().asTuple()\n\n if not self._scaled_contents:\n # If the image isn't scaled, it is centered if possible.\n # Otherwise, it's painted at the origin and clipped.\n paint_x = max(0, int((evt_width / 2. - bmp_width / 2.) + evt_x))\n paint_y = max(0, int((evt_height / 2. - bmp_height / 2.) + evt_y))\n paint_width = bmp_width\n paint_height = bmp_height\n else:\n # If the image *is* scaled, it's scaled size depends on the \n # size of the paint area as well as the other scaling flags.\n if self._preserve_aspect_ratio:\n bmp_ratio = float(bmp_width) / bmp_height\n evt_ratio = float(evt_width) / evt_height\n if evt_ratio >= bmp_ratio:\n if self._allow_upscaling:\n paint_height = evt_height\n else:\n paint_height = min(bmp_height, evt_height)\n paint_width = int(paint_height * bmp_ratio)\n else:\n if self._allow_upscaling:\n paint_width = evt_width\n else:\n paint_width = min(bmp_width, evt_width)\n paint_height = int(paint_width / bmp_ratio)\n else:\n if self._allow_upscaling:\n paint_height = evt_height\n paint_width = evt_width\n else:\n paint_height = min(bmp_height, evt_height)\n paint_width = min(bmp_width, evt_width)\n # In all cases of scaling, we know that the scaled image is\n # no larger than the paint area, and can thus be centered.\n paint_x = int((evt_width / 2. - paint_width / 2.) + evt_x)\n paint_y = int((evt_height / 2. - paint_height / 2.) + evt_y)\n\n # Scale the bitmap if needed, using a faster method if the\n # image is currently being resized\n if paint_width != bmp_width or paint_height != bmp_height:\n img = bmp.ConvertToImage()\n if self._resizing:\n quality = wx.IMAGE_QUALITY_NORMAL\n else:\n quality = wx.IMAGE_QUALITY_HIGH\n img.Rescale(paint_width, paint_height, quality)\n bmp = wx.BitmapFromImage(img)\n\n # Finally, draw the bitmap into the computed location\n dc = wx.PaintDC(self)\n dc.DrawBitmap(bmp, paint_x, paint_y)", "def generatePreview(self):\n self.saveParameters()\n image=self.simulation.generatePreview()\n # convert pil image to a tkinter image\n self.photo = ImageTk.PhotoImage(image)\n\n # display image\n self.preview.create_image(0, 0, anchor='nw', image=self.photo)", "def show_shot(path_to_images, name_image):\n crrt_image = misc.imread(\"./{}/{}\".format(path_to_images, name_image))\n\n plt.imshow(crrt_image)\n\n plt.draw()\n plt.pause(0.5)", "def apply_zoom(self):\n self.maparea.setTransform(self.zoom_levels[self.cur_zoom][1])\n self.scene.draw_visible_area()", "def get_tkimage(self):\n self.drawer.flush()\n return PIL.ImageTk.PhotoImage(self.img)", "def updateViewer(self):\n if not self.hasImage():\n return\n if self.zoom<0:\n self.fitInView(self.sceneRect(), self.aspectRatioMode)\n self.zoom=self.size().width()/self.scene.width()\n else:\n self.setTransform(QTransform().scale(self.zoom, self.zoom))", "def _display_img(self):\n if self._file_path is None:\n Debug.printi(\"No picture has been loaded to preview\", Debug.Level.ERROR)\n return\n photo = self._open_img(self._file_path)\n ImageViewDialog(self._parent, self._file_name, photo)", "def show_image(f, x, y):\n window_surface.blit(f, (x, y))", "def showImage(self, img):\n cv2.namedWindow(self.NAME_WINDOW,cv2.WINDOW_NORMAL)\n cv2.resizeWindow(self.NAME_WINDOW, 300, 700)\n cv2.imshow(self.NAME_WINDOW , img)\n cv2.waitKey(0)", "def update_image(self, surface):\n self.ui_widget.update_image(surface=surface)", "def draw_image_on_canvas(self, force_generation=False):\n\n self.canvas_vertex = (self.canvas.canvasx(0), self.canvas.canvasy(0))\n box_coords = (self.canvas_vertex[0], self.canvas_vertex[1],\n self.canvas_vertex[0] + self.frame.width, self.canvas_vertex[1] + self.frame.height)\n\n # some weird bug with canvas being 0 when scrolling back to origin\n if box_coords[0] == -1:\n box_coords = (box_coords[0] + 1, box_coords[1], box_coords[2] + 1, box_coords[3])\n\n if box_coords[1] == -1:\n box_coords = (box_coords[0], box_coords[1] + 1, box_coords[2], box_coords[3] + 1)\n\n self.box_coords = box_coords\n\n image, self.top_left = self.get_image(box_coords, force_generation=force_generation)\n\n if image is not None:\n self.canvas.delete(\"all\")\n\n # this ownership is necessary, or the image does not show up on the canvas\n self.image = ImageTk.PhotoImage(image=image)\n\n self.image_on_canvas = self.canvas.create_image(\n self.top_left[0], self.top_left[1], image=self.image, anchor=\"nw\")", "def small_image(self):\n pass", "def zoom_augmentation():\n # Get the width and the height of the zoomed version\n x_len, y_len = np.random.randint(250, 350, size=2)\n # Get left upper ,right and lower bound of the pixels in the original image\n left = np.random.randint(x_size-x_len)\n upper = np.random.randint(y_size-y_len)\n right, lower = left + x_len, upper+y_len\n # Crops the box and resizes it to the original image size\n box = (left, upper, right, lower)\n return lambda image: image.transform(image.size, Image.EXTENT, box)", "def display_image(self, window_name, image):\n cv2.namedWindow(window_name)\n cv2.imshow(window_name, image)\n cv2.waitKey(0)", "def map_image(res):\n # constants\n MAP_URL = \"https://maps.googleapis.com/maps/api/staticmap\"\n SIZE = \"400x400\"\n\n polygon_path = mh.get_polygon_path(res)\n origin = mh.get_latlon(mh.get_origin(res))\n destination = mh.get_latlon(mh.get_destination(res))\n params = {\n \"size\": SIZE,\n \"path\": f\"enc:{polygon_path}\",\n \"markers\": [f\"color:red|label:X|{destination}\", f\"size:small|color:blue|{origin}\"],\n \"key\": key\n }\n img_resp = requests.get(url=MAP_URL, params=params)\n return img_resp.url", "def draw_image(self, image, src_coor, src_size, dest_coor, dest_size, angle = 0):\n img = Image_process.update(image, src_coor, src_size, dest_size, angle)\n self.canvas.create_image(dest_coor, image=img)", "def getimage(self):", "def imshow(img):\n imadd(img)\n plt.ion()\n plt.show()", "def show_image(self, idx, **kwargs):\n img, labels = self.__getitem__(idx)\n img = img.numpy()\n img = np.squeeze(img, axis=0)\n ax = plt.imshow(img, **kwargs)\n return ax", "def display(self):\n\t\tself.imgDisplay.set_from_pixbuf(self.getVisible())\n\t\tgc.collect()", "def show_image(file_location):\n img = Image.open(file_location)\n img.show()", "def show(self, image_dir_root=None):\n self.get_image(image_dir_root=image_dir_root).show()", "def exportImg(self):\n if self.superSampling:\n print(\"Exporting with size adjusted\")\n self.img = self.img.resize((int(self.width/2),int(self.height/2)),Image.NEAREST)\n self.img.save(self.fileName,\"PNG\")", "def linux_zoomer_plus(self, event):\n self.canvas.scale(\"all\", event.x, event.y, 1.1, 1.1)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def update_image(self):\n self.image = Image.fromarray(self.img)", "def update_current_image(self):\n\n rect = (0, 0, self.variables.canvas_width, self.variables.canvas_height)\n if self.variables.canvas_image_object is not None:\n self.variables.canvas_image_object.update_canvas_display_image_from_canvas_rect(rect)\n self.set_image_from_numpy_array(self.variables.canvas_image_object.display_image)\n self.update()", "def showImage(self, filePath): \n size = 244, 244 \n try:\n guiobjects.generateImageSize(filePath, [244, 244], IMG_UPLOAD)\n except:\n return \n imgPath = IMG_UPLOAD\n img = ocempgui.draw.Image.load_image(imgPath)\n self.imgOptionsTab.picture = img\n self.generateMask(\"imgUpload.png\")", "def zoomMap(self, scale, x=0, y=0):\n if self.zoomed:\n self.delete(self.zoomed)\n self.zoomed = self.im.zoom(scale, scale)\n zoomed_id = self.create_image(x, y, image=self.zoomed, anchor=NW)\n self.delete(self.original)\n self.scale = scale", "def set_img(self, img):\n self.img = img", "def show_image(self, image_set='train', index=None, interactive_mode=True):\n if interactive_mode:\n plt.ion()\n else:\n plt.ioff()\n\n if image_set == 'train':\n target = self.train_dataset\n else:\n target = self.test_dataset\n\n if index is None:\n index = randint(0, len(target['data']))\n\n plt.figure(num=self.LABELS[target['labels'][index]])\n plt.imshow(target['data'][index])\n plt.show()", "def fit_image(self, img, width, height):\n if img.get_height()/height > img.get_width()/width:\n # scale is determined by width\n w = width\n h = int(math.ceil(img.get_height() * (w/img.get_width())))\n else:\n # scale is determined by height\n h = height\n w = int(math.ceil(img.get_width() * (h/img.get_height())))\n img = pygame.transform.smoothscale(img, (w,h))\n rect = img.get_rect()\n rect = rect.move((width-w)//2, (height-h)//2)\n img2 = pygame.Surface((width, height))\n img2.blit(img, rect)\n return img2", "def draw(self):\r\n self.screen.blit(self.image, self.image.get_rect())", "def __init__(self, parent, top, lmap):\n Canvas.__init__(self, parent, width=512, height=512)\n # Bind drag and drop events to canvas and pack it in mapcontainer\n self.bind('<ButtonPress-1>', self.grab)\n self.bind('<ButtonRelease-1>', self.drop)\n self.bind('<B1-Motion>', self.drag)\n self.pack(side='left', fill=BOTH, expand=1)\n\n self.xpos = 0 # X coord of mouse grab event\n self.ypos = 0 # Y coord of mouse grab event\n self.scale = 1 # Current zoom level\n self.im = None # Ref to original image, on which zoom is based\n self.original = None # image id, as first added to canvas\n self.zoomed = None # image id, as zoomed on canvas\n\n self.lmap = lmap\n self.drawMap(lmap)", "def show(self) -> None:\n cv.imshow(str(self.__class__), self.output_image)", "def setImage(self, img):\n self.img.setPixmap(QtGui.QPixmap(img))", "def display(self):\n nrow = 2\n ncol = len(self.views) + 1\n rows = [(self.views[0].original, len(self.views)),\n (self.views[0].image, len(self.views) + 1)]\n fig, axes = plt.subplots(nrows=nrow, ncols=ncol,\n figsize=self._figsize(rows),\n squeeze=True)\n originals = [(v.position.id, v.original) for v in self.views] + [\n ('combined', np.median(np.stack([v.original for v in self.views]), axis=0))]\n warped = [(v.position.id, v.image) for v in self.views] + [\n ('combined', self.image)]\n for ax, (title, img) in zip(axes.ravel(), originals + warped):\n ax.imshow(img)\n ax.axis('off')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set(title=title)\n fig.tight_layout()\n fig.canvas.draw()\n img_array = np.array(fig.canvas.renderer._renderer)\n plt.close('all')\n return img_array", "def setImage(self, image=None, autoLevels=None, **kargs):\n profile = debug.Profiler()\n\n gotNewData = False\n if image is None:\n if self.image is None:\n return\n else:\n old_xp = self._xp\n cp = getCupy()\n self._xp = cp.get_array_module(image) if cp else numpy\n gotNewData = True\n processingSubstrateChanged = old_xp != self._xp\n if processingSubstrateChanged:\n self._processingBuffer = None\n shapeChanged = (processingSubstrateChanged or self.image is None or image.shape != self.image.shape)\n image = image.view()\n if self.image is None or image.dtype != self.image.dtype:\n self._effectiveLut = None\n self.image = image\n self._imageHasNans = None\n if self.image.shape[0] > 2**15-1 or self.image.shape[1] > 2**15-1:\n if 'autoDownsample' not in kargs:\n kargs['autoDownsample'] = True\n if shapeChanged:\n self.prepareGeometryChange()\n self.informViewBoundsChanged()\n\n profile()\n\n if autoLevels is None:\n if 'levels' in kargs:\n autoLevels = False\n else:\n autoLevels = True\n if autoLevels:\n level_samples = kargs.pop('levelSamples', 2**16) \n mn, mx = self.quickMinMax( targetSize=level_samples )\n # mn and mx can still be NaN if the data is all-NaN\n if mn == mx or self._xp.isnan(mn) or self._xp.isnan(mx):\n mn = 0\n mx = 255\n kargs['levels'] = [mn,mx]\n\n profile()\n\n self.setOpts(update=False, **kargs)\n\n profile()\n\n self._renderRequired = True\n self.update()\n\n profile()\n\n if gotNewData:\n self.sigImageChanged.emit()\n if self._defferedLevels is not None:\n levels = self._defferedLevels\n self._defferedLevels = None\n self.setLevels((levels))", "def draw(self, canvas):\n canvas.draw_polygon([self._top_left_, self._top_right_, self._bot_right_, self._bot_left_],\n 3, \"red\")\n # draw_image(image, center_source, width_height_source, center_dest, width_height_dest, rotation=0)\n # print(\"self._tilemap_coord[0]\", self._tilemap_coord[0])\n # print(\"self._tilemap_coord[1]\", self._tilemap_coord[1])\n canvas.draw_image(\n # image\n PLATFORM_TILEMAP,\n # center_source\n [(self._tilemap_coord[0] + 0.5) * shooter_global_variables.TILE_DIM,\n (self._tilemap_coord[1] + 0.5) * shooter_global_variables.TILE_DIM],\n # width_height_source\n [shooter_global_variables.TILE_DIM, shooter_global_variables.TILE_DIM],\n # center_dest\n self._pos_,\n # width_height_dest\n PLATFORM_INFO.get_size())\n canvas.draw_text(str(round(self._pos_[1] / TILE_DIM - 1)) + \", \"\n + str(round(self._pos_[0] / TILE_DIM - 1)),\n [self._top_left_[0] + TILE_DIM / 3, self._pos_[1]], 20, \"white\")\n # draw tilemap here", "def display_image(mat):\n\timg = Image.fromarray(mat)\n\timg.show()", "def plot(self):\n self.fig = plt.figure('black hole')\n self.fig.clf() #clear the graph to avoir superposing data from the same set (can be deactivated if need to superpose)\n self.ax = plt.subplot()\n\n if self.img2 is not None:\n self.ax.imshow(self.img2)\n else:\n print(\"No black hole deformation in the memory, displayed the original image instead.\")\n self.ax.imshow(self.img_debut)\n\n self.fig.canvas.set_window_title('Black hole')\n self.ax.set_title(\"scrool to zoom in or out \\nright click to add an offset in the background \\nleft click to refresh image \\n close the option windows to stop the program\")\n self.fig.canvas.mpl_connect('scroll_event', self.onscroll)\n self.fig.canvas.mpl_connect('button_press_event', self.onclick)\n self.fig.canvas.mpl_connect('axes_leave_event', self.disconnect)\n self.fig.canvas.mpl_connect('axes_enter_event', self.connect)\n\n self.draw()", "def plt_show_image(image):\r\n plt.imshow(image)\r\n plt.axis('off')\r\n plt.axis('image')\r\n plt.tight_layout(pad=0)", "def windows_zoomer(self, event):\n if event.delta > 0:\n self.canvas.scale(\"all\", event.x, event.y, 1.1, 1.1)\n elif event.delta < 0:\n self.canvas.scale(\"all\", event.x, event.y, 0.9, 0.9)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def display_image(self, image):\n self._frame.clear()\n self._frame.setMinimumWidth(500)\n self._frame.setMinimumHeight(300)\n self._frame.setAlignment(Qt.AlignCenter)\n\n if image is not None:\n if image.is_valid():\n pixmap = image.to_qt_pixmap(self._frame.size())\n self._frame.setPixmap(pixmap)\n else:\n self._frame.setText(\"Image Not Found\")", "def load_image_i(self, img_tk):\n\n self.p2_label_img.configure(image=img_tk)\n self.p2_label_img.image = img_tk" ]
[ "0.6751016", "0.6738683", "0.66990304", "0.66660595", "0.65815693", "0.64749587", "0.6393063", "0.63925314", "0.63365763", "0.6326308", "0.62597394", "0.6224787", "0.62157637", "0.6211517", "0.6196586", "0.61757445", "0.6165809", "0.61133194", "0.6083976", "0.6078011", "0.6077065", "0.60603374", "0.6057575", "0.60385984", "0.60185057", "0.60122335", "0.6000883", "0.5989634", "0.5981993", "0.5981524", "0.5980602", "0.5980602", "0.5980602", "0.59630555", "0.5960431", "0.5958253", "0.5940977", "0.5925327", "0.59098023", "0.58988994", "0.58926374", "0.5892252", "0.58801174", "0.58584696", "0.58346653", "0.5828659", "0.5823317", "0.5819262", "0.58164245", "0.58135027", "0.58124834", "0.5810925", "0.57993275", "0.5797246", "0.57841295", "0.5780185", "0.5775285", "0.5768441", "0.5760534", "0.57438296", "0.57431895", "0.5742709", "0.5741216", "0.5737286", "0.5719228", "0.57092065", "0.57039285", "0.5700602", "0.5691197", "0.5675637", "0.5667148", "0.56615263", "0.5649039", "0.56463057", "0.564427", "0.56328714", "0.5632268", "0.5631682", "0.5623023", "0.562142", "0.56151515", "0.5611467", "0.5609725", "0.56050557", "0.56038475", "0.5603264", "0.56006694", "0.5596596", "0.5581039", "0.5580408", "0.5567411", "0.5565472", "0.55609083", "0.555553", "0.55527836", "0.55364543", "0.5532497", "0.5531262", "0.5518279", "0.55110484" ]
0.636585
8
Provides consistant naming to statistic descriptors
def stat_by_group(stat: str, group: str) -> str: return f'{stat} by {group}'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def DescriptiveName(self):\r\n\t\treturn self._get_attribute('descriptiveName')", "def name(self):\n return 'data_extraction_for_' + '_'.join(self.names).lower()", "def DescriptiveName(self):\n return self._get_attribute('descriptiveName')", "def name(self):\n return '{} {} {}'.format(self.var_period, self.var_type,\n self.var_detail)", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def name(self):\n name = self.function_name\n\n # Feature type is based on additional data that used\n # for example if insight is for Healthsites Facilities\n # than feature type is Healthsites Facilities\n\n if self.feature_type:\n name = '%s for %s' % (name, self.feature_type)\n return name", "def get_suffstat_names():\n params = ['sum_x', 'sum_x_squared']\n return params", "def _get_name(self):\n return '%s: %s-%s' % (\n self.fcs_number,\n self.parameter_type,\n self.parameter_value_type)", "def name(self):\n\t\t# This is necessary for ColumnLists that are used\n\t\t# for CondDescs as well. Ideally, we'd do this on an\n\t\t# InputKeys basis and yield their names (because that's what\n\t\t# formal counts on), but it's probably not worth the effort.\n\t\treturn \"+\".join([f.name for f in self.inputKeys])", "def name(self):", "def name(self):", "def name(self):", "def name(self):", "def desc(self):\n kpi_name = 'id:{idx}, resource: {rsc}, group: {grp}, metric: {mtr}'\\\n .format(idx=self.idx,\n rsc=self.resource,\n grp=self.group,\n mtr=self.metric)\n return kpi_name", "def name(self):\n return self.data[\"attributes\"][\"stats\"][\"name\"]", "def get_name():", "def _metric_name(self, suffix):\r\n return '{}.{}'.format(self.METRIC_NAME, suffix)", "def name(self):\r\n pass", "def get_name():\n return \"SVMd+\"", "def get_name(self):", "def get_name(self):", "def get_name():\n return \"SVMd+ - simplified approach\"", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def getName(obj):", "def getName(self):", "def name(self):\n pass", "def names(self):\n labels = [\n \"$X_{%i}$\" % i if d.name is None else d.name\n for i, d in enumerate(self.dimensions)\n ]\n return labels", "def name(self) -> str:\n return f\"{self._obj_name} count\"", "def name(self):\n ...", "def get_name(self):\n return \"{0}: \".format(self.__class__.__name__)", "def name(self):\n return f\"{self._tc_object.name} {SENSOR_TYPES[self.type][0]}\"", "def name(self) -> str: # pragma: no cover", "def name(self):\n raise NotImplementedError # pragma: no cover", "def name(self):\n base_str = 'd{}{}_'.format(self.derivative_count if\n self.derivative_count > 1 else '', self.expr)\n for var, count in self.variable_count:\n base_str += 'd{}{}'.format(var, count if count > 1 else '')\n return base_str", "def get_data_name(data_func, data_type, npoints, y_error_sigma, x_error_sigma):\n data_name = '{}_{}'.format(data_func.__name__, data_type)\n if data_func.__name__ != 'get_image':\n data_name += 'funcs'\n data_name += '_{}pts_{}ye'.format(npoints, y_error_sigma)\n if x_error_sigma is not None:\n data_name += '_{}xe'.format(x_error_sigma)\n return data_name.replace('.', '_')", "def get_highly_correlated_feature_names(self):", "def target_naming(ty,target):\n de = ty.description(target)\n de = de[0].upper() + de[1:] + \".\"\n return de", "def get_label(name):\n lower = name.lower()\n vals = lower.split('_')\n if 'ho' in vals:\n name = 'Independent Estimate'\n elif 'alldata' in vals:\n name = 'Extra-Data Estimate'\n elif 'ris' in vals[0]:\n name = 'RIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n elif 'is' in vals[0]:\n name = 'OIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n if 'dr' in vals:\n name += ' DR'\n if 'wdr' in vals:\n name += ' WDR'\n return name", "def _predefined_statistics() -> str:\n return \"\\n\".join(\n f\"[{i}] {name}\"\n for i, (name, _) in enumerate(PermutationStatistic._STATISTICS)\n )", "def get_descriptive_name(self):\n return f\"{self.year} {self.make} {self.model}\".title()", "def __str__(self):\n return '{} - {} : mean = {}, std = {}'.format(self.name, self.type, self.mean, self.std)", "def get_name(self):\n pass", "def get_name(self):\n pass", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def display_name(self):", "def __name__(self):\n return '_'.join([function.__name__ for function in self.functions])", "def get_name(self):\r\n raise NotImplementedError", "def _generate_expanded_column_names(self):\n\n names = []\n # Get names of the descriptors\n des_names = [column for column in self.descriptor_dataframe][1:]\n\n # Generate expanded descriptor names for each compound\n for i in range(self.total_compounds):\n for des_name in des_names:\n name = 'compund_{}_{}'.format(i, des_name)\n names.append(name)\n\n return names", "def get_name(self):\n raise NotImplementedError", "def get_name(self):\n raise NotImplementedError", "def get_name(self):\n raise NotImplementedError", "def get_name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def sample_name_colname(self):\n return SAMPLE_NAME_ATTR \\\n if SAMPLE_NAME_ATTR == self.st_index else self.st_index", "def getName(self, index) -> Str:\n ...", "def name(self):\n return f\"{self._name} {SENSOR_TYPES[self.sensor][0]}\"", "def get_describe_name(self):\n long_name = str(self.year)+ ' ' + self.make.title()+ ' ' +self.model.title()\n return long_name", "def map_stat_name(self, generic_name):\n pass", "def name(self):\n return '{:.2f}_{:d}_{:s}_{:d}'.format(self.A, self.Z,\n self.species, self.C)", "def describe(self) -> str:\n return self.__class__.__name__", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def tname(self) -> str:", "def name(self):\n return f\"{self._name}_{self._sensor}\"", "def _get_histname(self, plot, var, frame):\n return '_'.join([plot, var, frame])", "def name(self) -> str:\n ...", "def name(self) -> str:\n ...", "def getName(self):\n l = []\n for wt in self.weights:\n l.append(chr( int( 97 + (sum(map(sum,wt)) * 10) % 26 ) ))\n for bs in self.bias:\n #print(\"BS: \"+str(bs[0]))\n l.append(chr( int( 97 + (sum(bs) * 10) % 26 ) ))\n l[0] = chr(ord(l[0]) - 32)\n self.name = ''.join(l)\n return self.name", "def get_display_names(self, exp):\n alias = str(exp.id)\n column_display_names = [\n field.di_display_name if field.di_display_name else field.name\n for field in exp.measurementmodel._meta.get_fields() \n if getattr(field, 'di_show', False)\n ]\n return tuple('%s_%s' % (name, alias) for name in column_display_names)", "def name(self) -> str:\n if self._name is None:\n return 'AutoML Metric'\n else:\n return self._name", "def _getMetadataName(self):\n pass", "def name(self):\n return f\"{self._name.replace('_', ' ')}\".title()", "def memberName(self, p_int): # real signature unknown; restored from __doc__\n return \"\"", "def df_sample_names(self):\n return self.abundance_mat_mult(True)", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name" ]
[ "0.667977", "0.6442585", "0.6435272", "0.6424433", "0.6370413", "0.6370413", "0.6370413", "0.6370413", "0.6370413", "0.6370413", "0.6352662", "0.63082206", "0.62384665", "0.6236644", "0.62084746", "0.62084746", "0.62084746", "0.62084746", "0.61990166", "0.6192866", "0.6161828", "0.6138022", "0.6130863", "0.6130674", "0.6105846", "0.6105846", "0.61020064", "0.6098286", "0.6098286", "0.6098286", "0.6098286", "0.6098286", "0.6085306", "0.6084199", "0.6063693", "0.60579175", "0.6053813", "0.60402733", "0.60300505", "0.6010699", "0.6008598", "0.60056293", "0.60032934", "0.5999951", "0.5984953", "0.5976563", "0.5976008", "0.59627354", "0.59474814", "0.59453374", "0.5936843", "0.5936843", "0.5930375", "0.5930375", "0.5930375", "0.5930375", "0.5930375", "0.5930375", "0.5930375", "0.5930375", "0.5930375", "0.5930375", "0.5920061", "0.59062", "0.5905883", "0.5901334", "0.58970577", "0.58970577", "0.58970577", "0.58970577", "0.5892389", "0.58776706", "0.58753294", "0.58649516", "0.5860413", "0.5853828", "0.5852558", "0.58387876", "0.58321154", "0.58321154", "0.58321154", "0.58321154", "0.5830191", "0.58263725", "0.58245164", "0.58177984", "0.58177984", "0.58126867", "0.58118993", "0.58091736", "0.58020407", "0.57999694", "0.5797302", "0.57931745", "0.57885784", "0.57885784", "0.57885784", "0.57885784", "0.57885784", "0.57885784", "0.57885784" ]
0.0
-1
Repeat the retrieval of the metrics of a metrics context until at least one of the specified metric group names has data. Returns the MetricGroupValues object for the metric group that has data.
def wait_for_metrics(metric_context, metric_groups): retries = 0 got_data = False while not got_data: mr_str = metric_context.get_metrics() mr = zhmcclient.MetricsResponse(metric_context, mr_str) for mg_values in mr.metric_group_values: if mg_values.name in metric_groups: got_data = True if DEBUG_METRICS_RESPONSE: print("Debug: MetricsResponse:") print(mr_str) break if not got_data: if retries > GET_METRICS_MAX_RETRIES: return None time.sleep(GET_METRICS_RETRY_TIME) # avoid hot spin loop retries += 1 return mg_values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def result_group(group_id, failures=False, wait=0, count=None, cached=Conf.CACHED):\n if cached:\n return result_group_cached(group_id, failures, wait, count)\n start = time.time()\n if count:\n while 1:\n if count_group(group_id) == count or wait and (time.time() - start) * 1000 >= wait >= 0:\n break\n tile.sleep(0.01)\n while 1:\n r = None#Task.get_result_group(group_id, failures)\n if r:\n return r\n if (time.time() - start) * 1000 >= wait >= 0:\n break\n time.sleep(0.01)", "def metrics_group():", "def result_group_cached(group_id, failures=False, wait=0, count=None, broker=None):\n if not broker:\n broker = get_broker()\n start = time.time()\n if count:\n while 1:\n if count_group(group_id) == count or wait and (time.time() - start) * 1000 >= wait >= 0:\n break\n tile.sleep(0.01)\n while 1:\n group_list = broker.cache.get('{}:{}:keys'.format(broker.list_key, group_id))\n if group_list:\n result_list = []\n for task_key in group_list:\n task = signing.SignedPackage.loads(broker.cache.get(task_key))\n if task['success'] or failures:\n result_list.append(task['result'])\n return result_list\n if (time.time() - start) * 1000 >= wait >= 0:\n break\n time.sleep(0.01)", "def _compute_group_stats():\n group_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # TODO: move this to property of evaluation group or add dedicated data model.\n # GOAL: should be configurable from within the Django admin backend.\n #\n # MINIMAL: move to local_settings.py?\n #\n # The following dictionary defines the number of HITs each group should\n # have completed during the WMT16 evaluation campaign.\n \n for group in groups:\n _name = group.name\n \n _group_stats = HIT.compute_status_for_group(group)\n _total = _group_stats[0]\n \n if _total > 0 and not _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = 0\n elif _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = GROUP_HIT_REQUIREMENTS[_name]\n _delta = _total - _required\n _data = (_total, _required, _delta)\n \n if _data[0] > 0:\n group_stats.append((_name, _data))\n \n # Sort by number of remaining HITs.\n group_stats.sort(key=lambda x: x[1][2])\n \n # Add totals at the bottom.\n global_total = sum([x[1][0] for x in group_stats])\n global_required = sum([x[1][1] for x in group_stats])\n global_delta = global_total - global_required\n global_data = (global_total, global_required, global_delta)\n group_stats.append((\"Totals\", global_data))\n \n return group_stats", "def get_group_values(self, group_id:int, group_name:str) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT id, name FROM {table_groups} WHERE id={group_id};\").fetchone()\n if not value_list:\n return False\n group_used_id, group_used_name = value_list\n if group_used_name != group_name:\n self.cursor.execute(f\"UPDATE {table_groups} SET name={group_name} WHERE id={group_used_id};\")\n self.connection.commit()\n return True\n except Exception as e:\n msg = f\"We faced problems with checking of the group prensence. Mistake: {e}\"\n self.proceed_error(msg)\n return False", "def _read_group_fill_results(self, cr, uid, domain, groupby,\n remaining_groupbys, aggregated_fields,\n count_field, read_group_result,\n read_group_order=None, context=None):\n if groupby == 'week_number':\n WEEK_DICT = dict(self.WEEKS)\n for result in read_group_result:\n week = result['week_number']\n result['week_number'] = (week, WEEK_DICT.get(week))\n return super(calendar_event, self)._read_group_fill_results(\n cr, uid, domain, groupby, remaining_groupbys, aggregated_fields,\n count_field, read_group_result, read_group_order, context\n )", "def metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsMetricArgs']]]]:\n return pulumi.get(self, \"metrics\")", "def read_metric_values(self):\n inv_objs = self._inventory_mgr.current_inventory()\n monitored_metrics = self._metric_mgr.get_monitored_metrics()\n perf_manager = self._si.RetrieveServiceContent().perfManager\n for mor in inv_objs.keys():\n for inv_obj in inv_objs[mor]:\n inv_obj_metrics = inv_obj.metric_id_map\n desired_keys = list(set(inv_obj_metrics.keys()) & set(monitored_metrics[mor].keys()))\n if not len(desired_keys) == 0:\n metric_id_objs = [inv_obj_metrics[key] for key in desired_keys]\n query_spec = vim.PerformanceManager.QuerySpec(\n entity=inv_obj.mor, metricId=metric_id_objs,\n intervalId=inv_obj.INSTANT_INTERVAL,\n maxSample=1, format='normal'\n )\n try:\n results = perf_manager.QueryPerf(querySpec=[query_spec])\n except Exception as e:\n self._logger.error(\"Exception while making performance query : {0}\".format(e))\n if results:\n dps = self._parse_query(inv_obj, results, monitored_metrics[mor])\n payload = self._build_payload(dps)\n self._dispatch_metrics(payload)\n else:\n self._logger.warning(\"Empty result from query : {0}\".format(query_spec))", "def print_metric_groups(cmd_ctx, client, metric_groups, resource_filter):\n\n if not isinstance(metric_groups, (list, tuple)):\n metric_groups = [metric_groups]\n\n properties = {\n 'anticipated-frequency-seconds': MIN_ANTICIPATED_FREQUENCY,\n 'metric-groups': metric_groups,\n }\n mc = client.metrics_contexts.create(properties)\n mg_values = wait_for_metrics(mc, metric_groups)\n filtered_object_values = list() # of MetricObjectValues\n\n if not mg_values:\n\n mg_name = metric_groups[0] # just pick any\n res_class = zhmcclient._metrics._resource_class_from_group(mg_name)\n mg_def = zhmcclient.MetricGroupDefinition(\n name=mg_name, resource_class=res_class, metric_definitions=[])\n\n else:\n\n mg_def = mc.metric_group_definitions[mg_values.name]\n\n filter_cpc = None\n filter_partition = None\n filter_lpar = None\n filter_adapter = None\n filter_nic = None\n for r_class, r_name in resource_filter:\n if r_class == 'cpc' and r_name:\n filter_cpc = client.cpcs.find(name=r_name)\n elif r_class == 'partition' and r_name:\n assert filter_cpc\n filter_partition = filter_cpc.partitions.find(name=r_name)\n elif r_class == 'logical-partition' and r_name:\n assert filter_cpc\n filter_lpar = filter_cpc.lpars.find(name=r_name)\n elif r_class == 'adapter' and r_name:\n assert filter_cpc\n filter_adapter = filter_cpc.adapters.find(name=r_name)\n elif r_class == 'nic' and r_name:\n assert filter_partition\n filter_nic = filter_partition.nics.find(name=r_name)\n\n resource_class = mg_def.resource_class\n\n for ov in mg_values.object_values:\n included = False\n if resource_class == 'cpc':\n if not filter_cpc:\n included = True\n elif ov.resource_uri == filter_cpc.uri:\n included = True\n elif resource_class == 'partition':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.cpc.uri == filter_cpc.uri:\n if not filter_partition:\n included = True\n elif ov.resource_uri == filter_partition.uri:\n included = True\n elif resource_class == 'logical-partition':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.cpc.uri == filter_cpc.uri:\n if not filter_lpar:\n included = True\n elif ov.resource_uri == filter_lpar.uri:\n included = True\n elif resource_class == 'adapter':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.cpc.uri == filter_cpc.uri:\n if not filter_adapter:\n included = True\n elif ov.resource_uri == filter_adapter.uri:\n included = True\n elif resource_class == 'nic':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.partition.manager.cpc.uri == \\\n filter_cpc.uri:\n if not filter_partition:\n included = True\n elif ov.resource.manager.partition.uri == \\\n filter_partition.uri:\n if not filter_nic:\n included = True\n elif ov.resource_uri == filter_nic.uri:\n included = True\n else:\n raise ValueError(\n \"Invalid resource class: {}\".format(resource_class))\n\n if included:\n filtered_object_values.append(ov)\n\n resource_classes = [f[0] for f in resource_filter]\n\n cmd_ctx.spinner.stop()\n print_object_values(filtered_object_values, mg_def, resource_classes,\n cmd_ctx.output_format, cmd_ctx.transpose)\n\n mc.delete()", "def iter(self):\n if self.setting_group is None:\n raise Exception(\"No Dictionary to read values from\")\n try:\n return self.setting_group[next(self.setting_group_iter)]\n except StopIteration:\n # make sure we understand the run is over\n self.setting_group = None\n self.setting_group_iter = None", "def _get_group_data(self, group_name):\n if self.plotter.plot_hues is None:\n data = self._get_group_data_without_hue(group_name)\n else:\n data = self._get_group_data_with_hue(group_name)\n\n group_data = remove_null(data)\n\n return group_data", "def _poll_group(self, group_type, server, obj, name):\n\n # change collection behavior based on the type of group we're dealing\n # with\n if group_type == 'datacenter':\n # find each cluster in the datacenter\n find_children = server.get_clusters\n poll_child = self.poll_cluster\n child_type = 'cluster'\n elif group_type == 'cluster':\n # find each host in the datacenter or cluster\n find_children = server.get_clusters\n find_children = server.get_hosts\n poll_child = self.poll_host\n child_type = 'host'\n\n self.log.debug('start querying %s: %s' % (group_type, name))\n children = find_children(obj)\n self.log.debug('finish querying %s: %s' % (group_type, name))\n\n # initialize some metrics\n cpu_total = cpu_usage = cpu_percent = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n child_stats = {}\n\n # iterate over each child node in this object group\n for child_obj, child_name in children.items():\n stats = poll_child(server, child_obj, child_name)\n child_stats[child_name] = stats\n\n # aggregate data from each child to the top level\n cpu_total += stats['cpu_total']\n cpu_usage += stats['cpu_usage']\n\n mem_total += stats['mem_total']\n mem_usage += stats['mem_usage']\n\n vms_total += stats['vms_total']\n vms_running += stats['vms_running']\n vms_stopped += stats['vms_stopped']\n\n # recalculate percentages\n if cpu_total > 0:\n cpu_percent = cpu_usage / float(cpu_total) * 100\n\n if mem_total > 0:\n mem_percent = mem_usage / float(mem_total) * 100\n\n # return the current metrics for this group\n group_stats = {\n 'cpu_total': cpu_total,\n 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent,\n 'mem_total': mem_total,\n 'mem_usage': mem_usage,\n 'mem_percent': mem_percent,\n 'vms_total': vms_total,\n 'vms_running': vms_running,\n 'vms_stopped': vms_stopped,\n child_type: child_stats,\n }\n\n return group_stats", "def get(self):\n status = ErrorCode.SUCCESS\n try:\n res = []\n cid = self.get_argument('cid', None)\n if not (cid is None):\n res = QueryHelper.get_groups_by_cid(cid, self.db)\n self.write_ret(status,\n dict_=DotDict(res=res))\n except Exception as e:\n logging.exception(\"[UWEB] Get groups failed. Exception: %s\",\n e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)", "def generate_metrics_data(metricsquery: List, resultsquery: Dict, deltaminutes: int = 5, Region_name: str = None) -> Dict:\r\n cloudwatch=client('cloudwatch', region_name=Region_name) \r\n paginator = cloudwatch.get_paginator('get_metric_data')\r\n metricsgroup=grouper(metricsquery)\r\n resultsquery['ApiCalls']=0 \r\n for mqs in metricsgroup:\r\n for response in paginator.paginate(MetricDataQueries=mqs, StartTime=datetime.now()-timedelta(minutes=deltaminutes),EndTime=datetime.now()):\r\n for results in response['MetricDataResults']:\r\n resultsquery[results['Id']].append({'results':results})\r\n resultsquery['ApiCalls']+=1\r\n return resultsquery", "def collect_metrics(grouped_samples, projroot, tgtdir, ext, grouping=\"sample\"):\n metrics = []\n for item_id, itemlist in grouped_samples.items():\n item = itemlist[0]\n # FIXME: tgtdir should be docroot!\n pfx = os.path.relpath(itemlist[0].prefix(grouping), os.path.dirname(tgtdir))\n mfile = glob.glob(pfx + \".*\" + ext)\n if mfile:\n metrics.append((item_id, mfile[0]))\n return PicardMetricsCollection(metrics)", "def test_result_group_can_index_into_metrics(\n self, index: int, metric_name: str, result_group: ResultGroup\n ):\n assert result_group.metrics[index].name == metric_name", "def _get_group_example_data(self, data_group_id: str) -> Dict[\n str, dict\n ]:\n return {\n e['example_id']: self._get_example_data(e['example_id'])\n for e in self.tasks['data_groups'][data_group_id]\n }", "def _evaluate(dataset: dict, name: str, metrics=None):\n if metrics is None:\n metrics = ['Accuracy', 'AUROC', 'AUPRC', 'Precision', 'Recall', 'F1', 'F2']\n measures = [dataset[metric] for metric in metrics]\n measures.insert(0, name)\n return measures", "def _retrieve(self):\n all_groups_settings = []\n iam_groups_settings = []\n\n model_manager = self.service_config.model_manager\n scoped_session, data_access = model_manager.get(self.model_name)\n with scoped_session as session:\n for settings in data_access.scanner_fetch_groups_settings(session,\n True):\n email = settings[0].split('group/')[1]\n iam_groups_settings.append(groups_settings.GroupsSettings\n .from_json(email, settings[1]))\n for settings in data_access.scanner_fetch_groups_settings(session,\n False):\n email = settings[0].split('group/')[1]\n all_groups_settings.append(groups_settings.GroupsSettings\n .from_json(email, settings[1]))\n\n return all_groups_settings, iam_groups_settings", "def get_stats_by_adgroup(\n self, account_id, adgroup_ids=None, batch=False,\n start_time=None, end_time=None):\n args = {}\n if adgroup_ids is not None:\n args['adgroup_ids'] = json.dumps(adgroup_ids)\n if start_time:\n args['start_time'] = self.__parse_time(start_time)\n if end_time:\n args['end_time'] = self.__parse_time(end_time)\n path = 'act_%s/adgroupstats' % account_id\n return self.make_request(path, 'GET', args, batch=batch)", "def group_get_members(self,groupname):\n\n if not self.check_prereqs():\n raise StopIteration\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_group_get_members_query+\" ORDER BY $username_field$\",{'groupname':groupname,'username_field':self.sql_username_field,'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: group_get_members: %s\" % (query,))\n\n cursor.execute(query)\n desc=[i[0] for i in cursor.description]\n for row in cursor:\n dictrow=dict(zip(desc,row))\n yield dictrow[self.sql_username_field]", "def get(self, group) -> Optional[OrderedDict]:\n return self._queue.get(group)", "def get_values(self, names):\n r = []\n for n in names:\n if n in self.raw_metrics:\n r.append(self.raw_metrics[n])\n else:\n return None\n return r", "def _parse_varname(self) -> (str, int, dict):\n\n metr_groups = list(globals.metric_groups.keys())\n for g in metr_groups:\n templ_d = globals.var_name_ds_sep[g]\n pattern = '{}{}'.format(globals.var_name_metric_sep[g],\n templ_d if templ_d is not None else '')\n parts = parse(pattern, self.varname)\n\n if parts is not None and parts['metric'] in globals.metric_groups[g]:\n return parts['metric'], g, parts.named\n\n return None, None, None", "def _get_endpoint_group(self, group_name):\n params = {\n \"name\": group_name\n }\n\n response, err_msg = self.api_call(\"GET\", ENDPOINT_GROUP_URI, params)\n if not err_msg:\n result = response.json()\n if result.get(\"nextPage\"):\n response_next = self.get_next_page(result.get(\"nextPage\"))\n\n return response, err_msg", "def get_metrics(metric_groups):\n return sorted(m for g in metric_groups for m in INSTANCE_METRIC_GROUP_MAP[g])", "def get_metric_filter(\n log_group_name,\n filter_name_prefix,\n metric_name,\n metric_namespace,\n):\n paginator = CLIENT.get_paginator(\"describe_metric_filters\")\n response_iterator = paginator.paginate(\n logGroupName=log_group_name,\n filterNamePrefix=filter_name_prefix,\n )\n metric_filters_response = [\n metric_filter\n for response in response_iterator\n for metric_filter in response.get(\"metricFilters\", [])\n ]\n LOGGER.debug(\"metric filters response: %s\", metric_filters_response)\n if not metric_filters_response:\n raise ValueError(\n \"failed to find existing metric filter with \"\n f\"logGroupName: [{log_group_name}], \"\n f\"filterNamePrefix: [{filter_name_prefix}]\"\n )\n # Get the fist metric filter with a matching transformation with the same\n # metricNameSpace and metricName\n # NOTE: There is a chance that there are multiple metric filters since the\n # describe_metric_filters uses a name prefix\n for m_f in metric_filters_response:\n metric_filters = [\n m_f\n for m_t in m_f[\"metricTransformations\"]\n if m_t[\"metricName\"] == metric_name and m_t[\"metricNamespace\"] == metric_namespace\n ]\n if metric_filters:\n break\n\n if not metric_filters:\n raise ValueError(\n \"failed to find existing metric filter with \"\n f\"logGroupName: [{log_group_name}], \"\n f\"filterNamePrefix: [{filter_name_prefix}], \"\n f\"metricName: [{metric_name}], \"\n f\"metricNamespace: [{metric_namespace}]\"\n )\n\n metric_filter_properties = [\n \"filterName\",\n \"filterPattern\",\n \"logGroupName\",\n \"metricTransformations\",\n ]\n # only return the properties that are needed for the put_metric_filter call\n return {k: v for k, v in metric_filters[0].items() if k in metric_filter_properties}", "def get(self):\n FetchGroupActionObjects.__init__(self)\n kwargs = self.parser.parse_args()\n query = self.notifications_db.construct_lucene_complex_query([\n ('target_role', {'value': self.role}),\n ('targets', {'value': self.username, 'join_operator': 'OR'}),\n ('group', {'value': kwargs['group'], 'join_operator': 'AND'})])\n notifications = self.notifications_db.full_text_search('search', query)\n action_objects = []\n object_type = ''\n for notif in notifications:\n action_objects += notif['action_objects']\n if object_type == '' and notif['object_type'] != '':\n object_type = notif['object_type']\n if object_type == '' or len(action_objects) < 1:\n return {}\n action_objects_results = self.fetch_action_objects(action_objects, object_type, kwargs['page'], kwargs['limit'])\n self.logger.info(\"Fetched group action objects for group %s\" % kwargs['group'])\n return action_objects_results", "def get_feed_group_data(\n self,\n feed: str,\n group: str,\n since: Optional[datetime.datetime] = None,\n next_token: str = None,\n ) -> GroupData:\n try:\n listing_json, record = self._get_feed_group_data()\n if record.content_type != \"application/x-tar\":\n raise UnexpectedMIMEType(record.content_type)\n return GroupData(\n data=record.content,\n next_token=None,\n since=since,\n record_count=1,\n response_metadata={\n \"checksum\": listing_json.get(\"checksum\"),\n \"built\": listing_json.get(\"built\"),\n \"version\": listing_json.get(\"version\"),\n },\n )\n except (HTTPStatusException, json.JSONDecodeError, UnicodeDecodeError) as e:\n logger.debug(\"Error executing grype DB data download: %s\", e)\n raise e", "def get_group_members(self, group_key):\n try:\n paged_results = self.repository.members.list(group_key)\n result = api_helpers.flatten_list_results(paged_results, 'members')\n LOGGER.debug('Getting all the members for group_key = %s,'\n ' result = %s', group_key, result)\n return result\n except (errors.HttpError, HttpLib2Error) as e:\n raise api_errors.ApiExecutionError(group_key, e)", "def getDriverData(self, metricSet):\n\n driverNiceName = metricSet[0]['source']['driver']['name']\n if 'driverCounter' not in self.drivers[driverNiceName]:\n self.drivers[driverNiceName]['driverCounter'] = self.drivers[\n driverNiceName]['driverInterval']\n else:\n self.drivers[driverNiceName]['driverCounter'] += self.drivers[\n driverNiceName]['driverInterval']\n for metric in metricSet:\n count = self.drivers[driverNiceName]['driverCounter']\n metricInterval = int(metric['interval'])\n if count % metricInterval == 0:\n metricId = metric['id']\n value = self.drivers[driverNiceName]['driver'].getData(metric)\n dt = datetime.utcnow()\n self.queue.enqueue(\n self.sender.send_metric, metricId, value, dt)", "def get_metrics(\n self,\n sample_size,\n namespaced=False,\n mp_type=MetricsProviderType.STATIC,\n unreachable=False,\n ):\n namespace = NAMESPACE if namespaced else None\n return self.get_metrics_in_namespace(\n namespace, sample_size, mp_type, unreachable\n )", "def get_metrics_for(pkg_name):\n\n # Convert a \"package.name\" to \"package_name\" because Prometheus doesn't\n # let us use . in metric names\n return Metrics(pkg_name.replace(\".\", \"_\"))", "def get_data(self, run_id, metric_ids=None):\n now = datetime.datetime.now()\n end = datetime.datetime(now.year, now.month, now.day)\n start = end - datetime.timedelta(days=4*365)\n test_measures = self.repo.get_measurements(run_id=run_id,\n start_date=start,\n end_date=end,\n metric_ids=metric_ids)\n return test_measures", "def update_metrics(self, topic_group_list):\n for topic_group in topic_group_list:\n self.gauge.labels(\n topic_group.name, topic_group.group).set(topic_group.lag)\n BaseMetricsAdapter.update_topic_group_lag(\n topic_group.name, topic_group.group, topic_group.lag)", "def test_api_v1_groups_names_get(self):\n pass", "def get_metric(self, property_id, metric, year=2015, month=1, day=1, metric_name=\"METRIC VALUE\"):\n today = datetime.datetime.now()\n data = []\n dates = [(y, m) for y in range(year,today.year) for m in range(1,13)]\n dates.extend([(today.year, m) for m in range(month, today.month + 1)])\n for year_month in dates:\n url = \"{0}/property/{1}/metrics?year={2}&month={3}&measurementSystem=EPA\".format(self.domain, property_id, year_month[0], year_month[1])\n self.logger.debug(\"Pulling data from {0}\".format(url))\n if(year_month[1] < 10):\n date = \"{0}-0{1}\".format(year_month[0], year_month[1])\n else:\n date = \"{0}-{1}\".format(year_month[0], year_month[1])\n response = self.session.get(url, headers={\"PM-Metrics\":metric})\n if response.status_code != requests.codes.ok:\n return response.raise_for_status()\n root = Et.fromstring(response.text)\n for element in root.findall(\"metric\"):\n d = {\"PM ID\":property_id, \"K\":date, metric_name:element.find(\"value\").text}\n data.append(d)\n return data", "def get_metric_data(config, metric_list, metric_grouping, start_time, end_time, collected_data_map):\n\n def format_data_entry(json_data_entry):\n metric_name = json_data_entry.get('metric')\n host_name = json_data_entry.get('tags', {}).get('host') or 'unknownHost'\n dps = json_data_entry.get('dps', {})\n metric_value = None\n header_field = normalize_key(metric_name) + \"[\" + host_name + \"]:\" + str(\n get_grouping_id(metric_name, metric_grouping))\n mtime = 0\n for stime, val in dps.items():\n if int(stime) > mtime:\n metric_value = val\n mtime = int(stime)\n\n epoch = mtime * 1000\n\n if epoch in collected_data_map:\n timestamp_value_map = collected_data_map[epoch]\n else:\n timestamp_value_map = {}\n\n timestamp_value_map[header_field] = str(metric_value)\n collected_data_map[epoch] = timestamp_value_map\n\n json_data = {\n \"token\": config['OPENTSDB_TOKEN'],\n \"start\": start_time,\n \"end\": end_time,\n \"queries\": map(lambda m: {\n \"aggregator\": \"avg\",\n \"downsample\": \"1m-avg\",\n \"metric\": m.encode('ascii')\n }, metric_list)\n }\n\n url = config[\"OPENTSDB_URL\"] + \"/api/query\"\n response = requests.post(url, data=json.dumps(json_data))\n if response.status_code == 200:\n rawdata_list = response.json()\n logger.debug(\"Get metric data from opentsdb: \" + str(len(rawdata_list)))\n\n # format metric and save to collected_data_map\n map(lambda d: format_data_entry(d), rawdata_list)", "def test_result_group_proxies_correctly_to_the_first_metric(\n self, result_group: ResultGroup\n ):\n assert result_group.metrics.name == \"accuracy\"", "def test_wait_for_dispatched_metrics(self):\n worker_helper = WorkerHelper()\n d = worker_helper.wait_for_dispatched_metrics()\n self.assertEqual(self.successResultOf(d), [])\n\n self._add_to_dispatched_metrics(worker_helper.broker, MetricMessage())\n msg = MetricMessage()\n msg.append('fake metric 1')\n msg.append('fake metric 2')\n self._add_to_dispatched_metrics(worker_helper.broker, msg)\n worker_helper.kick_delivery()\n d = worker_helper.wait_for_dispatched_metrics()\n self.assertNoResult(d)\n yield worker_helper.broker.wait_delivery()\n self.assertEqual(\n self.successResultOf(d), [[], ['fake metric 1', 'fake metric 2']])", "def get(self, host_name, metric_name, service_description=None):\n query = self._build_metric_query(\n host_name,\n metric_name,\n service_description=service_description,\n limit=1)\n\n influx_client = self.request.influxdb_client\n response = influx_client.query(query)\n\n metrics = []\n for item in response[None]:\n metric_dict = self._metric_dict_from_influx_item(item, metric_name)\n metric = m.Metric(**metric_dict)\n metrics.append(metric)\n\n if metric_name:\n metrics = metrics[0] or ''\n return metrics", "def ListGroupStats(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def getGroupData(service, groupName, attList):\n # import IPython ; IPython.embed() ; exit(); \n groupsDataList = service.contactGroups().list().execute()[\"contactGroups\"]\n for group in groupsDataList:\n if group[\"name\"] == groupName:\n groupData = []\n for att in attList:\n groupData.append(group[att])\n return groupData", "def eval_group(self, group):\n\n return [self.eval(coords) for coords in group]", "def eval_group(self, group):\n\n return [self.eval(coords) for coords in group]", "def get_group(self, group_name):\n\n return self._group[group_name]", "def customer_group_get(group_id=None):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n\n query = \"\"\"\n SELECT \n `group_id`,\n `group_name`,\n `description`,\n `timestamp`,\n `created_by`,\n `creation_time`,\n `is_deleted`,\n `updated_by`,\n `role_id`,\n `is_default`,\n `is_customer`,\n `company_name`,\n `company_address`,\n `company_telephone`,\n `company_fax`,\n `company_website`,\n `company_sales_contact`,\n `company_purchase_contact`,\n `company_business`,\n `company_business_type`,\n `company_sales_email`,\n `company_purchase_email`,\n `company_reg_number`,\n `company_vat_number` \n FROM `groups` \n WHERE `is_customer` = 1\n \"\"\"\n\n if group_id:\n query += \"\"\"\n AND `group_id` = \\\"%s\\\"\n \"\"\" % (group_id)\n\n group_details = None\n cursor = db.cursor()\n\n if cursor.execute(query) != 0:\n group_details = cursor.fetchall()\n\n cursor.close()\n db.close()\n\n return group_details", "def _ListGroupDevices(self, group):\n for run_target in six.itervalues(group.run_targets):\n for d in six.itervalues(run_target.devices):\n yield d", "def get_accumulated_data(self, topic, start_ts, end_ts, units):\n return self.manager.get_accumulated_data(topic, start_ts, end_ts, units)", "def getGroup(group: int, name=\"\") -> list:\n groups = mongo.db.groups.find({'id':group},{'_id':0})\n userID_list = []\n user_list = []\n for entry in groups:\n if entry[\"id\"] == group:\n userID_list = userID_list + entry[\"members\"]\n if len(userID_list) != 0:\n for entry in userID_list:\n x = fetchUser(userId=entry)\n user_list = user_list + x\n return user_list", "def fetch_metric(self, metric, start, end, tags={}, aggregator=\"sum\",\n downsample=None, ms_resolution=True):\n query = \"{aggregator}:{downsample}{metric}{{{tags}}}\".format(\n aggregator=aggregator,\n downsample=downsample + \"-avg:\" if downsample else \"\",\n metric=metric,\n tags=','.join(\"%s=%s\" % (k, v) for k, v in tags.items())\n )\n params = {\n 'ms': ms_resolution,\n 'start': '{0:.3f}'.format(start.timestamp()),\n 'end': '{0:.3f}'.format(end.timestamp()),\n 'm': query\n }\n response = self.__request(\"/query\", params)\n\n if response.status_code == 200:\n try:\n return response.json()[0]['dps']\n except IndexError:\n # empty data set\n return {}\n\n raise QueryError(response.json())", "def walk(self):\n for group in self.all_groups.values():\n yield from group.calculations", "def data(self, *args, **kwargs):\n\n data = self.cached(NR_CACHE_NAME)\n if not data:\n raise core.InvalidState(\"No grouping loaded\")\n\n mapping = self.mapping(data['release'], data['groups'])\n data['groups'] = self.transform(data['groups'], mapping)\n self.cache(NR_CACHE_NAME, data)\n return None", "def getdatafromfile(self,rootelement):\n for metricgroupid in rootelement.findall(\"metricGroup\"):\n # print(etree.tostring(metricgroupid,pretty_print=True))\n groupid = metricgroupid.get(\"id\")\n metric = metricgroupid.find(\"metric\")\n metric_params = {i:j for i , j in metric.items()}\n parameter_parmas = {i:j for i , j in metric.find(\"parameter\").items() }\n # pmfile_data[groupid] = (metric_params,parameter_parmas)\n\n yield (groupid,metric_params,parameter_parmas)", "def load_consumer_metadata_for_group(self, group):\n group = _coerce_consumer_group(group)\n log.debug(\"%r: load_consumer_metadata_for_group: %r\", self, group)\n\n # If we are already loading the metadata for this group, then\n # just return the outstanding deferred\n if group in self.coordinator_fetches:\n return self.coordinator_fetches[group]\n\n # No outstanding request, create a new one\n requestId = self._next_id()\n request = KafkaCodec.encode_consumermetadata_request(\n self._clientIdBytes, requestId, group)\n\n # Callbacks for the request deferred...\n def _handleConsumerMetadataResponse(response, group):\n # Clear the outstanding fetch\n self.coordinator_fetches.pop(group, None)\n # Decode the response (returns ConsumerMetadataResponse)\n c_m_resp = KafkaCodec.decode_consumermetadata_response(response)\n log.debug(\"%r: c_m_resp: %r\", self, c_m_resp)\n if c_m_resp.error:\n # Raise the appropriate error\n resp_err = kafka_errors.get(\n c_m_resp.error, UnknownError)(c_m_resp)\n raise resp_err\n\n self.consumer_group_to_brokers[group] = \\\n BrokerMetadata(c_m_resp.node_id, c_m_resp.host,\n c_m_resp.port)\n return True\n\n def _handleConsumerMetadataErr(err, group):\n # Clear the outstanding fetch\n self.coordinator_fetches.pop(group, None)\n log.error(\"Failed to retrieve consumer metadata \"\n \"for group: %s Error:%r\", group, err)\n # Clear any stored value for the group's coordinator\n self.reset_consumer_group_metadata(group)\n raise ConsumerCoordinatorNotAvailableError(\n \"Coordinator for group: %s not available\" % (group))\n\n # Send the request, add the handlers\n d = self._send_broker_unaware_request(requestId, request)\n # Save the deferred under the fetches for this group\n self.coordinator_fetches[group] = d\n d.addCallback(_handleConsumerMetadataResponse, group)\n d.addErrback(_handleConsumerMetadataErr, group)\n return d", "def get_cloudwatch_log_groups(global_vars):\n resp_data = {'status': False, 'log_groups':[], 'error_message': ''}\n client = boto3.client('logs')\n try:\n # Lets get all the logs\n resp = client.describe_log_groups( limit = 50 )\n resp_data['log_groups'].extend( resp.get('logGroups') )\n # Check if the results are paginated\n if resp.get('nextToken'):\n while True:\n resp = client.describe_log_groups( nextToken = resp.get('nextToken'), limit = 50 )\n resp_data['log_groups'].extend( resp.get('logGroups') )\n # Check & Break, if the results are no longer paginated\n if not resp.get('nextToken'):\n break\n resp_data['status'] = True\n except Exception as e:\n resp_data['error_message'] = str(e)\n return resp_data", "def check_group_exists(self, group_name):\n for grp in self.get_list_groups():\n if grp[\"name\"] == group_name:\n return grp[\"id\"], grp[\"members\"]\n\n return None", "def get_groups(self, group_name):\r\n assert group_name in self.groups.keys(), group_name\r\n try:\r\n group_list = self.groups[group_name]\r\n except KeyError:\r\n raise GroupKeyError()\r\n return group_list", "def get(self, request, group, key):\n # XXX(dcramer): kill sentry prefix for internal reserved tags\n if TagKey.is_reserved_key(key):\n lookup_key = 'sentry:{0}'.format(key)\n else:\n lookup_key = key\n\n try:\n tag_key = TagKey.objects.get(\n project=group.project_id,\n key=lookup_key,\n status=TagKeyStatus.VISIBLE,\n )\n except TagKey.DoesNotExist:\n raise ResourceDoesNotExist\n\n try:\n group_tag_key = GroupTagKey.objects.get(\n group=group,\n key=lookup_key,\n )\n except GroupTagKey.DoesNotExist:\n raise ResourceDoesNotExist\n\n total_values = GroupTagValue.get_value_count(group.id, lookup_key)\n\n top_values = GroupTagValue.get_top_values(group.id, lookup_key, limit=9)\n\n data = {\n 'id': str(tag_key.id),\n 'key': key,\n 'name': tag_key.get_label(),\n 'uniqueValues': group_tag_key.values_seen,\n 'totalValues': total_values,\n 'topValues': serialize(top_values, request.user),\n }\n\n return Response(data)", "def load_group(self, group_name):\n self.sorted = False\n self.grouped = False\n self.nwb_path_list = dict()\n self.labels = []\n for path_list in {group_name: self.all_groups.get(group_name)}.values():\n for path in path_list:\n io = NWBHDF5IO(path, 'r')\n nwb_file = io.read()\n self.labels.append(nwb_file.identifier)\n self.nwb_path_list.update({nwb_file.identifier: path})\n self.musketeers_widget.session_widget.populate(self.labels)\n self.musketeers_widget.session_widget.update_text_filter()\n self.groupMenu.setEnabled(True)\n self.sortMenu.setEnabled(True)", "def test_get_scaling_group_info(self):\n def view_manifest(with_policies, with_webhooks, get_deleting):\n self.assertEqual(with_policies, False)\n self.assertEqual(with_webhooks, False)\n self.assertEqual(get_deleting, True)\n return succeed(manifest)\n\n manifest = {}\n self.group.view_manifest.side_effect = view_manifest\n info = self.perform_with_group(\n Effect(GetScalingGroupInfo(tenant_id='00', group_id='g1')),\n (self.log, '00', 'g1'), self.group)\n self.assertEqual(info, (self.group, manifest))", "def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):\n if groupby and groupby[0] == \"state\":\n # Default result structure\n # states = self._get_state_list(cr, uid, context=context)\n states = [('draft', 'Draft'), ('test', 'Tested'), ('done', 'Sent')]\n read_group_all_states = [{\n '__context': {'group_by': groupby[1:]},\n '__domain': domain + [('state', '=', state_value)],\n 'state': state_value,\n 'state_count': 0,\n } for state_value, state_name in states]\n # Get standard results\n read_group_res = super(MassMailing, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)\n # Update standard results with default results\n result = []\n for state_value, state_name in states:\n res = filter(lambda x: x['state'] == state_value, read_group_res)\n if not res:\n res = filter(lambda x: x['state'] == state_value, read_group_all_states)\n res[0]['state'] = [state_value, state_name]\n result.append(res[0])\n return result\n else:\n return super(MassMailing, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)", "def customer_group_get_all():\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n query = \"\"\"\n SELECT \n `group_id`, \n `group_name`, \n `description`, \n `timestamp`, \n `created_by`, \n `creation_time`, \n `is_deleted`, \n `updated_by`, \n `role_id`, \n `is_default`, \n `is_customer` \n FROM `groups` \n WHERE `is_customer` = 1\n \"\"\"\n user_group_details = None\n cursor = db.cursor()\n if cursor.execute(query) != 0:\n user_group_details = cursor.fetchall()\n cursor.close()\n db.close()\n return user_group_details", "def _fetch_gauge_metrics_and_clear(self):\n with self._gauge_rlock:\n gauge_metrics = self._gauge_metrics\n self._gauge_metrics = defaultdict(int)\n\n return gauge_metrics", "def get_data(self, topic, end_ts=six.MAXSIZE):\n queue = self._get_queue(topic)\n self.logger.debug(\"TopicManager starting queue %s size is: %i\" %(topic, len(queue)))\n collector = CollectData(self.wind_fields)\n while queue:\n if queue[0]['data']['dateTime'] > end_ts:\n self.logger.debug(\"TopicManager leaving queue: %s size: %i content: %s\" %(topic, len(queue), queue[0]))\n break\n payload = queue.popleft()\n wind_data = payload['wind_data']\n if wind_data:\n self.logger.debug(\"TopicManager processing wind data.\")\n temp_data = payload['data']\n data = collector.add_data(temp_data)\n else:\n data = payload['data']\n if data:\n self.logger.debug(\"TopicManager retrieved queue %s %s: %s\"\n %(topic, weeutil.weeutil.timestamp_to_string(data['dateTime']), to_sorted_string(data)))\n yield data\n\n data = collector.get_data()\n if data:\n self.logger.debug(\"TopicManager retrieved wind queue final %s %s: %s\"\n %(topic, weeutil.weeutil.timestamp_to_string(data['dateTime']), to_sorted_string(data)))\n yield data", "def _cache_get(self, metric_name):\n try:\n with self._lock:\n metric = self.__cache.get(metric_name, False)\n except KeyError:\n # When metrics expire, we still get a KeyError.\n metric = False\n if metric is False:\n return None, False\n else:\n return metric, True", "def test_get_device_groups1(self):\n pass", "def test_api_v1_groups_get(self):\n pass", "def get_metrics_in_namespace(\n self, sample_size, namespace, mp_type, unreachable=False\n ):\n metrics = list(self.metrics.get(mp_type, {}).get(namespace, {}))\n desired_metrics = [\n m for m in metrics if unreachable == self._is_unreachable(m, mp_type)\n ]\n if len(desired_metrics) < sample_size:\n unreachable_str = \"unreachable\" if unreachable else \"reachable\"\n msg = (\n f\"There are not enough {unreachable_str} metrics. Needed \"\n f\"{sample_size}, found {len(desired_metrics)}: {desired_metrics}.\"\n )\n raise ValueError(msg)\n return random.sample(desired_metrics, sample_size)", "def getGroupDataSlidingFactor(self, groupName):\n return self.getGroupSetting(groupName, self._dataSlidingFactorToken, 0.1)", "def metrics(self, project, callback=None):\n\n self.client.select(self.selected_db)\n\n metrics = {}\n\n for metric_name in (yield gen.Task(self.get_metrics_list, project)):\n if metric_name not in metrics.keys():\n metrics[metric_name] = {}\n\n for filter_name in (yield gen.Task(self.get_filters, project, metric_name)):\n metrics[metric_name][filter_name] = (yield gen.Task(self.get_filter_values,\n project, metric_name, filter_name))\n\n if callback:\n callback(metrics)", "def retry_get_gauge_metrics(self, target_metrics, retries=3):\n return self._retry_get_metrics(self._gauge_varz_endpoint, target_metrics, retries)", "def get_group_group_members(self, group_id):\n try:\n group_id = self.quote(group_id)\n return self.g.get('groups/%s/groups/' % group_id)\n except HTTPError as e:\n return self._manage_errors(e)", "def fetch_metrics():\n try:\n s = redis.Redis(host=REDIS_HOST, port=REDIS_PORT)\n log_verbose('Connected to Redis at %s:%s' % (REDIS_HOST, REDIS_PORT))\n except socket.error, e:\n collectd.error('redis_metrics plugin: Error connecting to %s:%d - %r'\n % (REDIS_HOST, REDIS_PORT, e))\n return None\n log_verbose('Retrieving data')\n data = s.hgetall(METRICS_HASH)\n log_verbose('Recieved data: %s' % data)\n\n return data", "def count_group(group_id, failures=False, cached=Conf.CACHED):\n if cached:\n return count_group_cached(group_id, failures)\n return None#Task.get_group_count(group_id, failures)", "async def parse(self, key) -> List[dict]:\n data = await self._get_data()\n output = []\n for group in data:\n for series in group.get(key, []):\n output.append(series)\n return output", "def _getMetrics(self):\n metric = None\n if self.metrics is not None:\n metric = self.metrics(self._currentRecordIndex+1)\n elif self.metricValue is not None:\n metric = self.metricValue\n else:\n raise RuntimeError('No metrics or metric value specified for dummy model')\n\n return {self._optimizeKeyPattern:metric}", "def get_val_iterator(self) -> Iterable[Batch]:\n if self._val_name not in self._datasets:\n raise ValueError(\"Val data not provided.\")\n return self.get_iterator(self._val_name)", "def test_get_scaling_group_info_log_context(self):\n manifest = {}\n\n def view_manifest(with_policies, with_webhooks, get_deleting):\n return manifest\n self.group.view_manifest.side_effect = view_manifest\n eff = Effect(GetScalingGroupInfo(tenant_id='00', group_id='g1'))\n expected_lookup = (matches(IsBoundWith(base_log=True, effectful=True)),\n '00', 'g1')\n result = self.perform_with_group(\n eff, expected_lookup, self.group,\n fallback_dispatcher=get_log_dispatcher(self.log,\n {'effectful': True}))\n self.assertEqual(result, (self.group, manifest))", "def test_020_query_groups(self):\n\n testflow.step(\"Querying for groups\")\n assert self.query_cli.run(\n what='group'\n )[0], \"Failed to search for groups\"", "def test_list_entries_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.list_entries_groups(group_id, topic_id, ids=None)", "def get_metric_samples(metricDict, metricNames):\n assert isinstance(metricDict, dict)\n assert isinstance(metricNames, str) or isinstance(metricNames, list)\n\n retDict = {}\n if isinstance(metricNames, str):\n retDict[metricNames] = metricDict[metricNames]\n return retDict\n\n # metricNames must be a list\n for metricName in metricNames:\n metricName = metricName.strip()\n try:\n retDict[metricName] = metricDict[metricName]\n except KeyError:\n print(\"Metric \" + metricName + \" does not exist - skipping\")\n pass\n\n return retDict", "def return_ga_data(start_date, end_date, view_id, metrics, dimensions, group_by=[], dimensionFilterClauses=[], segments=[]):\n\n start_date = datetime.strptime(start_date, '%Y-%m-%d').date()\n end_date = datetime.strptime(end_date, '%Y-%m-%d').date()\n final_list = []\n for date in rrule(freq=DAILY, dtstart=start_date, until=end_date):\n date = str(date.date())\n\n # for paginated results\n page_token = '0' # for initial iteration this just needs to be anything except None\n while page_token != None:\n\n # GA API limit of 100 requests per 100 seconds\n time.sleep(1)\n\n # for tracking progress in logs\n print(\"pageToken is:\" + page_token + \" : \" + date)\n\n iresponse = get_report(date, date, view_id, metrics, dimensions, dimensionFilterClauses, segments, pageToken=page_token)\n\n # make sure there are results else quit\n if 'rowCount' not in iresponse['reports'][0]['data']:\n pass\n\n i_df = convert_response_to_df(iresponse)\n final_list.append(i_df)\n page_token = iresponse['reports'][0].get('nextPageToken') # update the pageToken\n\n final_df = pd.concat(final_list)\n\n if len(group_by) != 0 and final_df.shape[0] > 0:\n final_df = final_df.groupby(group_by).sum().reset_index()\n return final_df", "def get_group_users(groupname):\n return jsonify(admin.get_group_users(current_app.scoped_session(), groupname))", "def GetGroupMembers(self, group):\n return []", "def stats(self) -> Sequence['outputs.GetSystemGroupsGroupStatResult']:\n return pulumi.get(self, \"stats\")", "def test_groups_get(self):\n pass", "def test_groups_get(self):\n pass", "def get_metric_key_samples(metricDict, metricNames, keyVal=\"means\"):\n assert keyVal in [\"mins\", \"maxs\", \"means\", \"vars\", \"sums\"]\n\n retDict = get_metric_samples(metricDict, metricNames)\n for key in retDict:\n retDict[key] = retDict[key][keyVal]\n\n return retDict", "def _cache_get(self, metric_name):\n pass", "def count_group_cached(group_id, failures=False, broker=None):\n if not broker:\n broker = get_broker()\n group_list = broker.cache.get('{}:{}:keys'.format(broker.list_key, group_id))\n if group_list:\n if not failures:\n return len(failures)\n failure_count = 0\n for task_key in group_list:\n task = signing.SignedPackage.loads(broker.cache.get(task_key))\n if not task['success']:\n failure_count += 1\n return failure_count", "def get(ctx):\n user, project_name, _group = get_project_group_or_local(ctx.obj.get('project'),\n ctx.obj.get('group'))\n try:\n response = PolyaxonClient().experiment_group.get_experiment_group(\n user, project_name, _group)\n cache.cache(config_manager=GroupManager, response=response)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get experiment group `{}`.'.format(_group))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n get_group_details(response)", "def get_next_batch(self):\n\n metrics = {}\n for struct in self.metrics.values():\n metrics = {**metrics, **struct.get_next_batch()}\n\n return metrics", "def get_accumulated_data(self, topic, start_time, end_time, units):\n ignore_start_time = self._get_value('ignore_start_time', topic)\n ignore_end_time = self._get_value('ignore_end_time', topic)\n adjust_start_time = self._get_value('adjust_start_time', topic)\n adjust_end_time = self._get_value('adjust_end_time', topic)\n\n if ignore_start_time:\n self.logger.debug(\"Service ignoring start time.\")\n start_ts = self.peek_datetime(topic) - adjust_start_time\n else:\n start_ts = start_time - adjust_start_time\n\n if ignore_end_time:\n self.logger.debug(\"Service ignoring end time.\")\n end_ts = self.peek_last_datetime(topic) + adjust_end_time\n else:\n end_ts = end_time + adjust_end_time\n\n self.logger.debug(\"Service processing interval: %f %f\" %(start_ts, end_ts))\n accumulator = weewx.accum.Accum(weeutil.weeutil.TimeSpan(start_ts, end_ts))\n\n for data in self.get_data(topic, end_ts):\n if data:\n try:\n self.logger.debug(\"Service data to accumulate: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(data['dateTime']), to_sorted_string(data)))\n accumulator.addRecord(data)\n except weewx.accum.OutOfSpan:\n self.logger.info(\"Service ignoring record outside of interval %f %f %f %s\"\n %(start_ts, end_ts, data['dateTime'], (to_sorted_string(data))))\n else:\n break\n\n target_data = {}\n if not accumulator.isEmpty:\n aggregate_data = accumulator.getRecord()\n self.logger.debug(\"Service data prior to conversion is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(aggregate_data['dateTime']), to_sorted_string(aggregate_data)))\n target_data = weewx.units.to_std_system(aggregate_data, units)\n self.logger.debug(\"Service data after to conversion is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(target_data['dateTime']), to_sorted_string(target_data)))\n else:\n self.logger.debug(\"Dervice queue was empty\")\n\n # Force dateTime to packet's datetime so that the packet datetime is not updated to the MQTT datetime\n if ignore_end_time:\n target_data['dateTime'] = end_time\n\n return target_data", "def data_group():\n ...", "def _get_from_namespace(self, namespace, group_name):\n names = [(group_name, self.dest)]\n current_name = (group_name, self.name)\n\n for opt in self.deprecated_opts:\n dname, dgroup = opt.name, opt.group\n if dname or dgroup:\n names.append((dgroup if dgroup else group_name,\n dname if dname else self.dest))\n\n value, loc = namespace._get_value(\n names, multi=self.multi,\n positional=self.positional, current_name=current_name)\n # The previous line will raise a KeyError if no value is set in the\n # config file, so we'll only log deprecations for set options.\n if self.deprecated_for_removal and not self._logged_deprecation:\n self._logged_deprecation = True\n pretty_group = group_name or 'DEFAULT'\n if self.deprecated_reason:\n pretty_reason = ' ({})'.format(self.deprecated_reason)\n else:\n pretty_reason = ''\n format_str = ('Option \"%(option)s\" from group \"%(group)s\" is '\n 'deprecated for removal%(reason)s. Its value may '\n 'be silently ignored in the future.')\n format_dict = {'option': self.dest,\n 'group': pretty_group,\n 'reason': pretty_reason}\n _report_deprecation(format_str, format_dict)\n return (value, loc)", "def get_metric_info(self):\n metric_data_object = self.client.get_metric_data(\n MetricDataQueries=[\n {\n \"Id\": \"cdbdata_invocations\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"Invocations\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n },\n {\n \"Id\": \"cdbdata_errors\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"Errors\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n },\n {\n \"Id\": \"cdbdata_throttles\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"Throttles\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n },\n {\n \"Id\": \"cdbdata_concurrentexec\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"ConcurrentExecutions\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n }\n ],\n StartTime=self.start_timestamp,\n EndTime=self.end_timestamp,\n ScanBy='TimestampDescending'\n )\n\n metric_data_points = metric_data_object[DataPointsCollector.RESPONSE_KEY]\n\n return metric_data_points", "def getGroupDataWeight(self, groupName):\n return self.getGroupSetting(groupName, self._dataWeightToken, 1.0)", "def build_device_groups_report(**kwargs):\n # All report functions support kwargs to support a unified interface,\n # even if they don't use them.\n _ = kwargs\n jss_connection = JSSConnection.get()\n group_list = jss_connection.MobileDeviceGroup()\n if not group_list:\n return Report(\"MobileDeviceGroup\", [], \"Mobile Device Group Report\",\n {})\n\n all_mobile_device_groups = [(group.id, group.name) for group in group_list]\n full_groups = group_list.retrieve_all()\n\n all_configs = (\n jss_connection.MobileDeviceConfigurationProfile().retrieve_all(\n subset=[\"general\", \"scope\"]))\n all_provisioning_profiles = (\n jss_connection.MobileDeviceProvisioningProfile().retrieve_all(\n subset=[\"general\", \"scope\"]))\n all_apps = (\n jss_connection.MobileDeviceApplication().retrieve_all(\n subset=[\"general\", \"scope\"]))\n all_ebooks = (\n jss_connection.EBook().retrieve_all(subset=[\"general\", \"scope\"]))\n xpath = \"scope/mobile_device_groups/mobile_device_group\"\n exclusion_xpath = (\n \"scope/exclusions/mobile_device_groups/mobile_device_group\")\n\n # Build results for groups which aren't scoped.\n report = build_group_report(\n [(all_configs, xpath), (all_configs, exclusion_xpath),\n (all_provisioning_profiles, xpath),\n (all_provisioning_profiles, exclusion_xpath),\n (all_apps, xpath), (all_apps, exclusion_xpath),\n (all_ebooks, xpath), (all_ebooks, exclusion_xpath)],\n all_mobile_device_groups, full_groups)\n report.heading = \"Mobile Device Group Usage Report\"\n report.get_result_by_name(\"Used\").description = (\n \"All groups which participate in scoping. Mobile device groups are \"\n \"considered to be in-use if they are designated in the scope or the \"\n \"exclusions of a configuration profile, provisioning profile, app, \"\n \"or ebook. This report includes all groups which are nested inside \"\n \"of smart groups using the 'member_of' criterion.\")\n report.get_result_by_name(\"Unused\").description = (\n \"All groups which do not participate in scoping. Mobile device groups \"\n \"are considered to be in-use if they are designated in the scope or \"\n \"the exclusions of a configuration profile, provisioning profile, \"\n \"app, or ebook. This report includes all groups which are nested \"\n \"inside of smart groups using the 'member_of' criterion.\")\n\n return report", "def get(self, key, group='default', default=None):\n if not self.fp:\n raise Exception(\"Please invoke method setup first!\")\n if group not in self.__cache:\n self._reload_group(group)\n try:\n return self.__cache[group][key]\n except KeyError as e:\n if self.fp.has_option(group, key):\n self.__cache[group][key] = self.fp.get(group, key)\n else:\n self.__cache[group][key] = default\n return self.__cache[group][key]" ]
[ "0.55873525", "0.55362415", "0.54371434", "0.51211834", "0.50748", "0.50633067", "0.50494426", "0.50236344", "0.5014017", "0.49106795", "0.4888227", "0.48790106", "0.48743725", "0.4858078", "0.48575234", "0.48438308", "0.48435786", "0.4804029", "0.47981688", "0.47835502", "0.47701344", "0.47551236", "0.4739878", "0.47212353", "0.46829647", "0.46789664", "0.46770507", "0.46716234", "0.4664505", "0.46313092", "0.46261567", "0.4609294", "0.46087503", "0.4601392", "0.4600142", "0.4564801", "0.45624426", "0.45205724", "0.45175916", "0.45159805", "0.45095176", "0.45062393", "0.45052132", "0.4492809", "0.4492809", "0.44834772", "0.4476534", "0.447006", "0.44660676", "0.44647038", "0.44630393", "0.44624165", "0.44615138", "0.44522828", "0.4437519", "0.44354475", "0.4431046", "0.44303286", "0.4427043", "0.44252607", "0.44216675", "0.44194016", "0.44095817", "0.44080812", "0.4406963", "0.44061634", "0.440501", "0.44028732", "0.440284", "0.4402699", "0.43924218", "0.43850374", "0.43779218", "0.43766707", "0.43721625", "0.43656486", "0.43570963", "0.43526953", "0.43518132", "0.43382707", "0.43367082", "0.4334853", "0.43288055", "0.43148834", "0.4314729", "0.43126458", "0.43081105", "0.43081105", "0.4307116", "0.4303019", "0.42973864", "0.42935747", "0.42918926", "0.42899916", "0.42877734", "0.42852288", "0.42817798", "0.4281512", "0.42755133", "0.4271751" ]
0.72703934
0
Print a metric group for a list of resources in the desired output format.
def print_object_values( object_values_list, metric_group_definition, resource_classes, output_format, transposed): if output_format in TABLE_FORMATS: if output_format == 'table': output_format = 'psql' print_object_values_as_table( object_values_list, metric_group_definition, resource_classes, output_format, transposed) elif output_format == 'json': print_object_values_as_json( object_values_list, metric_group_definition, resource_classes) else: raise InvalidOutputFormatError(output_format)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_metric_groups(cmd_ctx, client, metric_groups, resource_filter):\n\n if not isinstance(metric_groups, (list, tuple)):\n metric_groups = [metric_groups]\n\n properties = {\n 'anticipated-frequency-seconds': MIN_ANTICIPATED_FREQUENCY,\n 'metric-groups': metric_groups,\n }\n mc = client.metrics_contexts.create(properties)\n mg_values = wait_for_metrics(mc, metric_groups)\n filtered_object_values = list() # of MetricObjectValues\n\n if not mg_values:\n\n mg_name = metric_groups[0] # just pick any\n res_class = zhmcclient._metrics._resource_class_from_group(mg_name)\n mg_def = zhmcclient.MetricGroupDefinition(\n name=mg_name, resource_class=res_class, metric_definitions=[])\n\n else:\n\n mg_def = mc.metric_group_definitions[mg_values.name]\n\n filter_cpc = None\n filter_partition = None\n filter_lpar = None\n filter_adapter = None\n filter_nic = None\n for r_class, r_name in resource_filter:\n if r_class == 'cpc' and r_name:\n filter_cpc = client.cpcs.find(name=r_name)\n elif r_class == 'partition' and r_name:\n assert filter_cpc\n filter_partition = filter_cpc.partitions.find(name=r_name)\n elif r_class == 'logical-partition' and r_name:\n assert filter_cpc\n filter_lpar = filter_cpc.lpars.find(name=r_name)\n elif r_class == 'adapter' and r_name:\n assert filter_cpc\n filter_adapter = filter_cpc.adapters.find(name=r_name)\n elif r_class == 'nic' and r_name:\n assert filter_partition\n filter_nic = filter_partition.nics.find(name=r_name)\n\n resource_class = mg_def.resource_class\n\n for ov in mg_values.object_values:\n included = False\n if resource_class == 'cpc':\n if not filter_cpc:\n included = True\n elif ov.resource_uri == filter_cpc.uri:\n included = True\n elif resource_class == 'partition':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.cpc.uri == filter_cpc.uri:\n if not filter_partition:\n included = True\n elif ov.resource_uri == filter_partition.uri:\n included = True\n elif resource_class == 'logical-partition':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.cpc.uri == filter_cpc.uri:\n if not filter_lpar:\n included = True\n elif ov.resource_uri == filter_lpar.uri:\n included = True\n elif resource_class == 'adapter':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.cpc.uri == filter_cpc.uri:\n if not filter_adapter:\n included = True\n elif ov.resource_uri == filter_adapter.uri:\n included = True\n elif resource_class == 'nic':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.partition.manager.cpc.uri == \\\n filter_cpc.uri:\n if not filter_partition:\n included = True\n elif ov.resource.manager.partition.uri == \\\n filter_partition.uri:\n if not filter_nic:\n included = True\n elif ov.resource_uri == filter_nic.uri:\n included = True\n else:\n raise ValueError(\n \"Invalid resource class: {}\".format(resource_class))\n\n if included:\n filtered_object_values.append(ov)\n\n resource_classes = [f[0] for f in resource_filter]\n\n cmd_ctx.spinner.stop()\n print_object_values(filtered_object_values, mg_def, resource_classes,\n cmd_ctx.output_format, cmd_ctx.transpose)\n\n mc.delete()", "def metrics_group():", "def display_metric(metrics_to_print, results, num_refs, args):\n for metric, result in zip(metrics_to_print, results):\n if metric == 'bleu':\n if args.score_only:\n print('{0:.{1}f}'.format(result.score, args.width))\n else:\n version_str = bleu_signature(args, num_refs)\n print(result.format(args.width).replace('BLEU', 'BLEU+' + version_str))\n\n elif metric == 'chrf':\n if args.score_only:\n print('{0:.{1}f}'.format(result.score, args.width))\n else:\n version_str = chrf_signature(args, num_refs)\n print('chrF{0:d}+{1} = {2:.{3}f}'.format(args.chrf_beta, version_str, result.score, args.width))", "def list_groups(self):\n\n for counter, label in enumerate(self.exp_labels_list):\n print('Key {}: {} \\n'.format(str(counter), label))", "def output_metrics(self):\n print('')\n for key in sorted(self.metrics):\n print('{}:'.format(key), end='')\n for k, v in self.metrics[key].items():\n if type(v[-1]) is list:\n print('\\t' + k + ': ' + ''.join('{:5.3f} '.format(vs) for vs in v[-1]), end='')\n else:\n print('\\t{}: {:5.3f}'.format(k, v[-1]), end='')\n print('\\n', end='')", "def print_metrics(self):\n output = \"\"\n metrics = self.get_all_metrics()\n for k, v in metrics.items():\n # Print the help line\n output += \"\\n# HELP {name} {help}\\n\".format(name=v['name'],\n help=v['help'])\n # and the type line\n output += \"# TYPE {name} {type}\\n\".format(name=v['name'],\n type=v['type'])\n for sample in v['values']:\n labels = json.loads(sample, object_pairs_hook=OrderedDict)\n if v['type'] == 'histogram' and labels.get('le') == '_sum':\n labels.pop('le', None)\n mname = '{name}_sum'.format(name=v['name'])\n elif v['type'] == 'histogram' and labels.get('le') == '+Inf':\n labels.pop('le', None)\n mname = '{name}_count'.format(name=v['name'])\n elif v['type'] == 'histogram':\n mname = '{name}_bucket'.format(name=v['name'])\n else:\n mname = v['name']\n output += \"{name}{labels} {value}\\n\".format(name=mname,\n labels=self.format_labels(labels),\n value=self.format_value(v['values'][sample]))\n return output", "def print_list(self):\r\n print(\"Displaying each metric:\")\r\n print(\"======\")\r\n for metric in self.metrics:\r\n metric.whoami()\r\n print(\"======\")\r\n print(self.metrics)\r\n print(\"END\")\r\n print()", "def breakdown_resources(self):\n print('Resources breakdown:')\n headers = ['Faction', 'Power', 'Leech', 'Coins', 'Ore', 'Knowledge', 'QIC', 'Power Tokens']\n rows = []\n for faction, stats in self.faction_stats.items():\n rows.append([\n faction,\n stats.power,\n stats.leech,\n stats.coins,\n stats.ore,\n stats.knowledge,\n stats.qic,\n stats.pt,\n ])\n print(tabulate(rows, headers=headers))", "def print_item(group):\n print(\"\\tName: {}\".format(group.name))\n print(\"\\tId: {}\".format(group.id))\n print(\"\\tLocation: {}\".format(group.location))\n print(\"\\tTags: {}\".format(group.tags))\n if hasattr(group, 'properties'):\n print_properties(group.properties)", "def gather_groups_memory(output_mem):\n groups = get_memory_cgroups()\n p_table = prettytable.PrettyTable(\n ['Group',\n 'Resident Set Size (MiB)'\n ], caching=False)\n p_table.align = 'l'\n p_table.align['Resident Set Size (MiB)'] = 'r'\n\n # Get overall memory summary per group\n total_rss = 0.0\n for group in groups:\n for line in output_mem.split(\"\\n\"):\n if group + \"/memory.stat\" in line:\n total_rss += float(line.split()[1])\n rss_mem = mem_to_mebibytes(line.split()[1])\n MEMORY['cgroups'][group] = rss_mem\n p_table.add_row(\n [group,\n rss_mem or '-',\n ])\n break\n\n # Add overall rss memory\n MEMORY['cgroups']['total_rss'] = mem_to_mebibytes(total_rss)\n p_table.add_row(\n [\"Total cgroup-rss\",\n MEMORY['cgroups']['total_rss'] or '-',\n ])\n return p_table", "def print_item(group):\n print(\"\\tName: {}\".format(group.name))\n print(\"\\tId: {}\".format(group.id))\n if hasattr(group, 'location'):\n print(\"\\tLocation: {}\".format(group.location))\n print_properties(getattr(group, 'properties', None))", "def pprint(metrics: dict, fmt: str = \"katib\"):\n if fmt == \"katib\":\n # Format print to default Katib::StdOut to reduce need for additional user config\n # https://www.kubeflow.org/docs/components/katib/experiment/#metrics-collector\n print(\"epoch {}:\".format(metrics[\"epoch\"]))\n for k, v in metrics.items():\n if k == \"epoch\":\n continue\n print(\"{}={}\".format(k, v))\n print()\n else:\n print(metrics)", "def print_summary(metrics_list, labels_list):\n for metric, name in zip(metrics_list, labels_list):\n print('*' * 108)\n print(name)\n mean_inc_acc = []\n for i in range(metric.shape[0]):\n print('\\t', end='')\n for j in range(metric.shape[1]):\n print('{:5.2f}% '.format(100 * metric[i, j]), end='')\n if np.trace(metric) == 0.0:\n if i > 0:\n avg = 100 * metric[i, :i].mean()\n mean_inc_acc += [avg]\n print('\\tAvg.:{:5.2f}% '.format(avg), end='')\n else:\n avg = 100 * metric[i, :i + 1].mean()\n mean_inc_acc += [avg]\n print('\\tAvg.:{:5.2f}% '.format(avg), end='')\n print()\n print()\n\n # Computing AIA across all incremental states (thus excluding the first non-incremental state)\n print('\\tMean Incremental Acc.: {:5.2f}%'.format(np.mean(mean_inc_acc[1:])))\n print('*' * 108)", "def print_item(group):\n print(\"\\tName: {}\".format(group.name))\n print(\"\\tId: {}\".format(group.id))\n print(\"\\tLocation: {}\".format(group.location))\n print(\"\\tTags: {}\".format(group.tags))\n if hasattr(group, 'status'):\n print(\"\\tStatus: {}\".format(group.status))\n if hasattr(group, 'state'): # Site\n print(\"\\tStatus: {}\".format(group.state))\n if hasattr(group, 'properties'):\n print_properties(group.properties)\n print(\"\\n\\n\")", "def __show_all_metrics(self):\n for obj in self.metrics_list:\n self.__print_metrics_info(obj.get_name())\n print()", "def resource_group(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group\")", "def resource_group(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group\")", "def print_object_values_as_table(\n object_values_list, metric_group_definition, resource_classes,\n table_format, transposed):\n\n if object_values_list:\n metric_definitions = metric_group_definition.metric_definitions\n sorted_metric_names = [md.name for md in\n sorted(metric_definitions.values(),\n key=lambda md: md.index)]\n\n table = list()\n headers = list()\n for i, ov in enumerate(object_values_list):\n\n row = list()\n\n # Add resource names up to the CPC\n res = ov.resource\n while res:\n if i == 0:\n name_prop = res.manager.class_name + '-name'\n headers.insert(0, name_prop)\n row.insert(0, res.name)\n res = res.manager.parent # CPC has None as parent\n\n # Add the metric values\n for name in sorted_metric_names:\n if i == 0:\n m_def = metric_definitions[name]\n header_str = name\n if m_def.unit:\n header_str += u\" [{}]\".format(m_def.unit)\n headers.append(header_str)\n value = ov.metrics[name]\n row.append(value)\n\n table.append(row)\n\n # Sort the table by the resource name columns\n n_sort_cols = len(resource_classes)\n table = sorted(table, key=lambda row: row[0:n_sort_cols])\n\n if transposed:\n table.insert(0, headers)\n table = [list(col) for col in zip(*table)]\n headers = []\n\n if not table:\n click.echo(\"No {} resources with metrics data for metric group {}.\".\n format(metric_group_definition.resource_class,\n metric_group_definition.name))\n else:\n click.echo(tabulate(table, headers, tablefmt=table_format))", "def print_metrics(summary_metrics: Dict, title: str = 'Metrics'):\n\n print('=' * (len(title) + 1))\n print(title + ':')\n\n print('DeepCP metrics:{:.4f}(rot-rmse) | {:.4f}(rot-mae) | {:.4g}(trans-rmse) | {:.4g}(trans-mae)'.\n format(summary_metrics['r_rmse'], summary_metrics['r_mae'],\n summary_metrics['t_rmse'], summary_metrics['t_mae'],\n ))\n print('Rotation error {:.4f}(deg, mean) | {:.4f}(deg, rmse)'.\n format(summary_metrics['err_r_deg_mean'],\n summary_metrics['err_r_deg_rmse']))\n print('Translation error {:.4g}(mean) | {:.4g}(rmse)'.\n format(summary_metrics['err_t_mean'],\n summary_metrics['err_t_rmse']))\n print('RPM Chamfer error: {:.7f}(mean-sq)'.\n format(summary_metrics['chamfer_dist']))\n print('Source error: {:.7f}(mean-sq)'.\n format(summary_metrics['pcab_dist']))\n print('Clip Chamfer error: {:.7f}(mean-sq)'.\n format(summary_metrics['clip_chamfer_dist']))", "def print_group_summary(self, groups, group_names=None, detailed=False, tablefmt='jira'):\n output = self.generate_group_summary_table(groups, group_names)\n\n \"\"\"Group patches table\"\"\"\n group_patches = output['group_patches']\n headers = [\"Group Patches\",] + group_patches.columns.tolist()\n print()\n print(tabulate(group_patches, headers=headers, tablefmt=tablefmt))\n print()\n\n \"\"\"Group slides table\"\"\"\n group_slides = output['group_slides']\n headers = [\"Group Slides\",] + group_slides.columns.tolist()\n print()\n print(tabulate(group_slides, headers=headers, tablefmt=tablefmt))\n print()\n\n \"\"\"Group patients table\"\"\"\n group_patients = output['group_patients']\n headers = [\"Group Patients\",] + group_patients.columns.tolist()\n print()\n print(tabulate(group_patients, headers=headers, tablefmt=tablefmt))\n print()\n\n if detailed:\n print(\"Patient Patches\")\n for group_name, tally in output['patient_patches'].items():\n headers = [group_name,] + tally.columns.tolist()\n print()\n print(tabulate(tally, headers=headers, tablefmt=tablefmt))\n print()\n \n print(\"Patient Slides\")\n for group_name, tally in output['patient_slides'].items():\n headers = [group_name,] + tally.columns.tolist()\n print()\n print(tabulate(tally, headers=headers, tablefmt=tablefmt))\n print()\n \n print(\"Slide Patches\")\n for group_name, tally in output['slide_patches'].items():\n headers = [group_name,] + tally.columns.tolist()\n print()\n print(tabulate(tally, headers=headers, tablefmt=tablefmt))\n print()\n return output", "def print_metric_dict(self, metric_dict):\n print(\"\".join([\" {}: {:4f},\".format(k, v) for k, v in metric_dict.items()]))", "def print_all():\n\n for i, context in enumerate(CONTEXT_GROUP):\n print('Group #{0:d}'.format(i))\n\n charmap = context[\"charmap\"]\n assert charmap is None or isinstance(charmap, dict)\n\n for j, item in enumerate(StringGeneratorPascalStyle(context)):\n text = process_dakuten(get_text(item[1], charmap, None))\n print('{index:04X}:{address:06X}:{data}'.format(\n index=j,\n address=item[0],\n data=text))", "def cute_output(insights_request):\n json_report = insights_request.get_insights()\n\n if not json_report:\n print('Error ocurred, unable to print!!!')\n else:\n for groups in json_report:\n print('GROUP: ' + groups['display_name'])\n for systems in groups['systems']:\n print('\\n\\t\\t Host name: ' + systems['hostname'])\n print('\\n\\t\\t Product: ' + systems['product'])\n print('\\n\\t\\t Type: ' + systems['type'])\n print('\\n\\t\\t Registered at Insights: ' + systems['created_at'])\n print('\\n\\t\\t Last checked at Insights: ' + systems['last_check_in'] + '\\n\\n')", "def print_object_values_as_json(\n object_values_list, metric_group_definition, resource_classes):\n\n if object_values_list:\n metric_definitions = metric_group_definition.metric_definitions\n sorted_metric_names = [md.name for md in\n sorted(metric_definitions.values(),\n key=lambda md: md.index)]\n\n json_obj = list()\n for i, ov in enumerate(object_values_list):\n\n resource_obj = OrderedDict()\n\n # Add resource names up to the CPC\n res = ov.resource\n while res:\n name_prop = res.manager.class_name + '-name'\n resource_obj[name_prop] = res.name\n res = res.manager.parent # CPC has None as parent\n\n # Add the metric values\n for name in sorted_metric_names:\n m_def = metric_definitions[name]\n value = ov.metrics[name]\n resource_obj[name] = OrderedDict(value=value, unit=m_def.unit)\n\n json_obj.append(resource_obj)\n\n json_str = json.dumps(json_obj)\n click.echo(json_str)", "def printUsersInGroup(group) -> None:\n click.echo(tabulate(listUsersInDict(group), headers=\"keys\", tablefmt=\"grid\"))", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Metric Definition ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-info:\", self.info, sep='')", "def format_to_string(groups: List[List]) -> str:\n\tgroups_str = \"\"\n\tcount = 1\n\tfor group in groups:\n\t\tgroups_str += f\"Group {count}: {group} \\n\"\n\t\tcount += 1\n\treturn groups_str", "def test_get_resource_group_list(self):\n pass", "def format_metrics(metrics, split):\n result = format_partial_metrics(metrics, split)\n result += '\\n'\n result += format_partial_metrics(metrics, split, extra='_r')\n return result", "def list_groups(args):\n\n for group in get_groups(args):\n print(group)", "def groups(self):\n groups_text = '\\n'\n for group in self.exercise_numbers:\n txt = ' %s:\\t' % group[0]\n for exercise in group[1:]:\n if isinstance(exercise, int):\n txt += '%d. ' % exercise\n else:\n txt += '\\n\\t%s\\n\\t' % exercise\n groups_text += txt + '\\n'\n return groups_text", "def print_scores(result_collector):\n # print(\"\\n# Metric: Cohen's kappa\")\n # result_collector.set_metric(['k_cohen', 'k'])\n # result_collector.print_all_results()\n print(\"\\n# Metric: Macro avg. F1\")\n result_collector.set_metric([\"macro_avg\", \"fscore\"])\n # result_collector.print_all_results()\n result_collector.print_result_for_level(\"cc\")\n result_collector.print_result_for_level(\"ro\", print_header=False)\n result_collector.print_result_for_level(\"fu\", print_header=False)\n result_collector.print_result_for_level(\"at\", print_header=False)\n\n # print(\"\\nMetric: Positive attachment F1\")\n # result_collector.set_metric(['classwise', '1', 'fscore'])\n # result_collector.print_result_for_level('at')\n print(\"\\n# Metric: Labelled attachment score\")\n result_collector.set_metric([\"accuracy\"])\n result_collector.print_result_for_level(\"lat\")", "def _print_summary(data, metric):\n\n print(u'Cortical thickness {}: {:.2f} \\u00B1 {:.2f} [{:.2f}--{:.2f}]'\n .format(metric, data[:, 0].mean(), data[:, 0].std(ddof=1),\n data[:, 0].min(), data[:, 0].max()))\n print('Other modalities {}: {:.2f} \\u00B1 {:.2f} [{:.2f}--{:.2f}]'\n .format(metric, data[:, 1:].mean(), data[:, 1:].std(ddof=1),\n data[:, 1:].min(), data[:, 1:].max()))\n print('Overall {}: {:.2f} \\u00B1 {:.2f} [{:.2f}--{:.2f}]'\n .format(metric, data.mean(), data.std(ddof=1),\n data.min(), data.max()))", "def print_metrics(result):\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '------------------------------------------------')\n logging.log(LOG_LEVEL_OUTPUT_INFO, ' KEY METRICS: ')\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '------------------------------------------------')\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* pages_count: %d',\n get_counter_metric(result, 'pages_count'))\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_count: %d',\n get_counter_metric(result, 'revisions_count'))\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* very_long_page_histories_count: %d',\n get_counter_metric(result, 'very_long_page_histories_count'))\n revisions_per_page_distr = get_distributions_metric(\n result, 'revisions_per_page_distr')\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_per_page_distr.mean: %d',\n revisions_per_page_distr.mean)\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_per_page_distr.sum: %d',\n revisions_per_page_distr.sum)\n cumulative_page_rev_size_distr = get_distributions_metric(\n result, 'cumulative_page_rev_size_distr')\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '* cumulative_page_rev_size_distr.mean: %d',\n cumulative_page_rev_size_distr.mean)\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* cumulative_page_rev_size_distr.sum: %d',\n cumulative_page_rev_size_distr.sum)", "def test_get_resource_group_by_moid(self):\n pass", "def collect_resources_list(namespace, output_dir, k8s_cli, mode):\n selector = \"\"\n if mode == MODE_RESTRICTED:\n selector = '--selector=\"{}\"'.format(OPERATOR_LABEL)\n collect_helper(output_dir,\n cmd=\"{} get all -o wide -n {} {}\".format(k8s_cli, namespace, selector),\n file_name=\"resources_list\",\n resource_name=\"resources list\",\n namespace=namespace)", "def metrics(_):\r\n collector = BuildsCollector()\r\n build_metrics, headers = collector.get_metrics_table()\r\n print(tabulate(build_metrics, headers=headers))", "def print_metric(self):\r\n print(f'\\n\\n{self.sort} metric of size {self.n}')\r\n print(f'algorithm: {self.algo}')\r\n print(f'number of comparisons: {self.comps}')\r\n print(f'number of exchanges: {self.exs}')\r\n print(f'regression equation for comparisons: {self.comp_eq}')\r\n print(f'regression equation for exchanges: {self.ex_eq}')\r\n print(f'presorted data: {self.predata}')\r\n print(f'postsorted data: {self.postdata}')", "def collect_api_resources_description(namespace, output_dir, k8s_cli, api_resources, selector=\"\",\n collect_empty_files=False):\n logger.info(\"Namespace '%s': Collecting API resources description\", namespace)\n resources_out = OrderedDict()\n message = f\"Namespace '{namespace}': no resources of type %s is found\" if not selector else \\\n f\"Namespace '{namespace}': no {extract_label(selector)} labeled resources of type %s is found\"\n\n message = f\"{message}, skip collecting empty log file\"\n for resource in api_resources:\n if resource == \"Namespace\":\n output = describe_resource(namespace, resource, k8s_cli, resource_name=namespace)\n elif resource in NON_LABELED_RESOURCES:\n output = describe_resource(namespace, resource, k8s_cli)\n else:\n output = describe_resource(namespace, resource, k8s_cli, selector)\n if output:\n if check_empty_desc_file(output) and not collect_empty_files:\n logger.info(message, resource)\n else:\n resources_out[resource] = output\n logger.info(\"Namespace: '%s' + Collected %s\", namespace, resource)\n if selector:\n # collect PV resource\n collect_persistent_volume_description(namespace, k8s_cli, resources_out, KUBCTL_DESCRIBE_RETRIES)\n for entry, out in resources_out.items():\n with open(os.path.join(output_dir,\n \"{}.txt\".format(entry)), \"w+\", encoding='UTF-8') as file_handle:\n file_handle.write(out)", "def report(drink, resources):\n print(f\" Coffee: {resources['coffee']}gm\")\n print(f\" Water: {resources['water']}ml\")\n print(f\" Milk: {resources['water']}ml\")", "def display_results(self, metrics):\n\n for k, v in self.cv_results_.items():\n# sorted_results = sort_results(v)\n print(f'Results for {k} metric:')\n print()\n print(v.sort_values(by=['Metric mean'], ascending=False))\n print()", "def display(self):\n print \"\\n\\n***********************\\n\"\n print \"Info about group %s, name=%s, path=%s\" % (self.sdef['id'], \n self.name, self.path)\n print \"sdef=\"\n pp.pprint(self.sdef)\n print \"expanded_def=\"\n pp.pprint (self.expanded_def)\n print \"includes=\"\n pp.pprint (self.includes)\n print \"parent_attributes=\"\n pp.pprint (self.parent_attributes)\n print \"attributes=\"\n pp.pprint (self.attributes)\n print \"mstats=\"\n pp.pprint (self.mstats)", "def __repr__(self):\n return str(self.group)", "def capacitygroup_show(cmd_ctx, cpc, capacitygroup):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_show(cmd_ctx, cpc, capacitygroup))", "def printOptions():\n\n # For each group, create a group option\n print(\"default\")", "def display(global_step, step, scaler_sum_list,\r\n name_list, collection,\r\n summary_val=None, summary_writer=None):\r\n print('[step: {}]'.format(global_step), end='')\r\n for val, name in zip(scaler_sum_list, name_list):\r\n print(' {}: {:.4f}'.format(name, val * 1. / step), end='')\r\n print('')\r\n if summary_writer is not None:\r\n s = tf.Summary()\r\n for val, name in zip(scaler_sum_list, name_list):\r\n s.value.add(tag='{}/{}'.format(collection, name),\r\n simple_value=val * 1. / step)\r\n summary_writer.add_summary(s, global_step)\r\n if summary_val is not None:\r\n summary_writer.add_summary(summary_val, global_step)", "def print_group_message(group, contact, message):\n print(f\"{group}: {contact}: {message}\")", "def display_collection_by_title_table(resource_list):\n \n # Obtain sorted resource_list\n resource_list = sort_collection_by_title(resource_list)\n \n # Display type\n print(\"\\nBOOKS:\")\n print(\"======\")\n \n # Display column names\n print(\"{:7s} {:30s} {:20s} {:11s} {:9s} {:5s} {:8s} {:14s}\"\\\n \" {:9s} {:18s} {:20s}\"\n .format(\"UID\", \"Title\", \"Creator\", \"Genre\", \"Language\", \"Year\", \n \"Country\", \"Publisher\", \"City\", \"Category\", \n \"Keywords\"))\n \n # Display book resources\n for resource in resource_list:\n \n if resource.resource_type == \"book\":\n\n print(\"{:<7d} {:30s} {:20s} {:11s} {:9s} {:<5d} {:8s} {:14s} \"\\\n \"{:9s} {:18s} {:20s}\"\n .format(resource.get_uid(), resource.title[:29], \n resource.creator.get_full_name(), resource.genre[:10], \n resource.language[:8], resource.year, \n resource.country, resource.publisher[:13], \n resource.city, resource.category,\n resource.get_keyword_string()))\n\n # Display type\n print(\"\\nMOVIES:\")\n print(\"=======\")\n \n # Display column names\n print(\"{:7s} {:30s} {:20s} {:11s} {:9s} {:5s} {:8s} {:7s} {:35s} {:20s}\"\n .format(\"UID\", \"Title\", \"Creator\", \"Genre\", \"Language\", \"Year\", \n \"Country\", \"Rating\", \"Writers\", \"Keywords\"))\n \n # Display movie resources\n for resource in resource_list:\n \n if resource.resource_type == \"movie\":\n \n print(\"{:<7d} {:30s} {:20s} {:11s} {:9s} {:<5d} {:8s} {:7s} \"\\\n \"{:35s} {:20s}\"\n .format(resource.get_uid(), resource.title, \n resource.creator.get_full_name(), \n resource.genre, resource.language[:8], resource.year, \n resource.country, resource.rating, \n resource.get_names_string(resource.writers)[:35], \n resource.get_keyword_string()))", "def writeGroup(fil, elems):\n fil.write(' ELEMENT GROUP 2.2.30\\n')\n fil.write('GROUP:%11d ELEMENTS:%11d MATERIAL:%11d NFLAGS:%11d\\n' % (1,shape(elems)[0],2,1))\n fil.write('%32s\\n' %'fluid')\n fil.write('%8d\\n' %0)\n n = shape(elems)[0]/10\n for i in range(n):\n fil.write('%8d%8d%8d%8d%8d%8d%8d%8d%8d%8d\\n' %(10*i+1,10*i+2,10*i+3,10*i+4,10*i+5,10*i+6,10*i+7,10*i+8,10*i+9,10*i+10))\n for j in range(shape(elems)[0]-10*n):\n fil.write('%8d' %(10*n+j+1))\n fil.write('\\n')\n fil.write('ENDOFSECTION\\n')", "def print_status(metrics, step, metric_names=[]):\n printf = {'train': '', 'valid': ''}\n values = {'train': [], 'valid': []} \n\n for name in metric_names:\n for mode in ['train', 'valid']:\n printf[mode] += '- %s : %s ' % (name, '%0.4f')\n values[mode].append(metrics[mode][name])\n\n printf = '%s | TRAIN %s | VALID %s' % ('%07i', printf['train'], printf['valid'])\n values = [step] + values['train'] + values['valid']\n\n print(printf % tuple(values), end='\\r')", "def PrintResource(resource):\n print resource.resource_id.text, resource.GetResourceType()", "def collect_resources(namespace, output_dir, api_resources, k8s_cli_input=\"\", selector=\"\"):\n set_file_logger(output_dir)\n k8s_cli = detect_k8s_cli(k8s_cli_input)\n ns_output_dir = os.path.join(output_dir, namespace)\n make_dir(ns_output_dir)\n collect_api_resources(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_api_resources_description(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_pods_logs(namespace, ns_output_dir, k8s_cli, logs_from_all_pods=True)", "def resource_group(self) -> str:\n return pulumi.get(self, \"resource_group\")", "def _format_help_console(self):\n formatter = self._get_formatter()\n formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups)\n formatter.add_text(self.description)\n for action_group in self._sorted_groups():\n title = ' '.join(x[0].upper() + x[1:] for x in action_group.title.split())\n formatter.start_section(title)\n formatter.add_text(action_group.description)\n formatter.add_arguments(sorted(action_group._group_actions, key=attrgetter('option_strings')))\n formatter.end_section()\n formatter.add_text(self.epilog)\n return formatter.format_help()", "def mysummary(self):\n return self.sprintf(\"IGMPv3 Group Record %IGMPv3gr.type% %IGMPv3gr.maddr%\")", "def generate_report(problem: Problem, metrics: Dict[SolverFactory, Dict[str, Metric]]):\n timestamp = datetime.datetime.now().strftime(\"%m-%d_%H-%M-%S\")\n problem_path = os.path.join(get_rootdir(), \"reports\", str(problem))\n\n m_groups = defaultdict(list)\n\n for sf, ms_dict in metrics.items():\n for key, m in ms_dict.items():\n m.discard_warmup(0.15)\n m_groups[key].append(m)\n\n for key, ms in m_groups.items():\n plot_group(\n ms,\n f\"{problem_path}@{timestamp}\",\n name=key,\n stdev_factor=0.1,\n smoothen=False,\n )", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Physical Resource ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-info:\", self.info, sep='')\n print(indent, \"|-IP address:\", self.IP_address, sep='')\n print(indent, \"|-MAC address:\", self.MAC_address, sep='')", "def collect_metrics(grouped_samples, projroot, tgtdir, ext, grouping=\"sample\"):\n metrics = []\n for item_id, itemlist in grouped_samples.items():\n item = itemlist[0]\n # FIXME: tgtdir should be docroot!\n pfx = os.path.relpath(itemlist[0].prefix(grouping), os.path.dirname(tgtdir))\n mfile = glob.glob(pfx + \".*\" + ext)\n if mfile:\n metrics.append((item_id, mfile[0]))\n return PicardMetricsCollection(metrics)", "def __str__(self):\n columns = list(self.metrics.keys())\n columns.sort()\n out = '%s\\n' % ','.join(columns)\n values = [str(self.metrics[c]) for c in columns]\n out += '%s\\n' % ','.join(values)\n return out", "def print_stats():\n if spritegroup_stats[0] > 0:\n generic.print_info(\"Concurrent spritegroups: {}/{} ({})\".format(spritegroup_stats[0], total_action2_ids, str(spritegroup_stats[1])))\n if a2register_stats[0] > 0:\n generic.print_info(\"Concurrent Action2 registers: {}/{} ({})\".format(a2register_stats[0], total_tmp_locations, str(a2register_stats[1])))", "def _emit_group_build_lines(self, basename: str) -> list[str]:\n del basename # Unused.\n out: list[str] = []\n if not self.targets:\n return out\n all_dsts = set()\n for target in self.targets:\n all_dsts.add(target.dst)\n out.append(\n \"# Add this section's targets to the overall resources target.\\n\"\n 'resources: \\\\\\n '\n + ' \\\\\\n '.join(\n dst.replace(' ', '\\\\ ') for dst in sorted(all_dsts)\n )\n + '\\n'\n )\n return out", "def get_metrics(metric_groups):\n return sorted(m for g in metric_groups for m in INSTANCE_METRIC_GROUP_MAP[g])", "def show_group(self, _, group):\n items = []\n for id in self.execution_manager.get_jobs(group):\n job = self.execution_manager.get(id)\n if job.retries > 0:\n items.append((\"{}\".format(job), self.show_job_details, id, ('retried job', 'retried job select')))\n else:\n items.append((\"{}\".format(job), self.show_job_details, id))\n\n menu_key = \"Jobs {}\".format(group)\n self.menu_structure[menu_key] = (\"jobs\", items)\n self.show_menu(None, menu_key)", "def record_metrics_header(metric_list, output_file_name):\n with open(output_file_name, 'w') as file:\n # writting each metric on the header\n file.write(\",\".join(metric_list)+\"\\n\")", "def generate_latest(registry=Registry):\n\n def sample_line(line, metric_type):\n if line.labels:\n labelstr = '{{{0}}}'.format(','.join(\n ['{0}=\"{1}\"'.format(\n k, v.replace('\\\\', r'\\\\').replace('\\n', r'\\n').replace('\"', r'\\\"'))\n for k, v in sorted(line.labels.items())]))\n else:\n labelstr = ''\n timestamp = ''\n if line.timestamp is not None:\n # Convert to milliseconds.\n timestamp = ' {0:d}'.format(int(float(line.timestamp) * 1000))\n name = line.name\n if metric_type == 'counter' and name.endswith('_total'):\n name = name[:-6]\n return '{0}{1} {2}{3}\\n'.format(\n name, labelstr, int(line.value), timestamp)\n\n output = []\n for metric in registry.collect():\n try:\n mname = metric.name\n mtype = metric.type\n # Munging from OpenMetrics into Prometheus format.\n if mtype == 'counter':\n mname = mname\n elif mtype == 'info':\n mname = mname + '_info'\n mtype = 'gauge'\n elif mtype == 'stateset':\n mtype = 'gauge'\n elif mtype == 'gaugehistogram':\n # A gauge histogram is really a gauge,\n # but this captures the structure better.\n mtype = 'histogram'\n elif mtype == 'unknown':\n mtype = 'untyped'\n help_str = '# HELP {0} {1}\\n'.format(mname, metric.documentation.replace('\\\\', r'\\\\').replace('\\n', r'\\n'))\n if 'Multiprocess' not in help_str:\n continue\n output.append('# HELP {0} {1}\\n'.format(\n mname, metric.documentation.replace('\\\\', r'\\\\').replace('\\n', r'\\n')))\n output.append('# TYPE {0} {1}\\n'.format(mname, mtype))\n\n for s in metric.samples:\n for suffix in ['_created', '_gsum', '_gcount']:\n if s.name == metric.name + suffix:\n break\n else:\n line = sample_line(s, mtype)\n if not line:\n continue\n output.append(line)\n except Exception as exception:\n exception.args = (exception.args or ('',)) + (metric,)\n raise\n\n return ''.join(output).encode('utf-8')", "def __repr__(self, prefixes=None):\n if prefixes is None:\n prefixes = []\n prefix = \"groups=%s\" % repr(self._groups)\n return super(TreeClassifier, self).__repr__([prefix] + prefixes)", "def print_group(self, k, n):\n self.votation_k = k\n self.votation_n = n\n print_group(k, n, self.data.votation_clusters)", "def get_report(dataset):\n\n dataset = dataset.round(2)\n print('Overall results (mean): ')\n display(dataset[['classifier', 'preprocessor', 'f1', 'precision', 'recall']].groupby(['preprocessor', 'classifier'])\n .mean().round(2))\n print('Overall results (max): ')\n display(dataset[['classifier', 'preprocessor', 'f1', 'precision', 'recall']].groupby(['preprocessor', 'classifier'])\n .max().round(2))\n print('Grouped by Preprocessor (mean):')\n display(dataset[['preprocessor', 'f1', 'precision', 'recall']].groupby('preprocessor').mean().round(2))\n print('Grouped by Classifier (mean):')\n display(dataset[['classifier', 'f1', 'precision', 'recall']].groupby('classifier').mean().round(2))\n\n preprocessors = dataset['preprocessor'].unique()\n metrics = ['f1', 'precision', 'recall']\n\n # For each metric, display top 10 rounds.\n for m in metrics:\n print(f'Top 10 by {m}:')\n display(dataset.sort_values(m, ascending=False).head(10).round(2))\n\n for p in preprocessors:\n for m in metrics:\n d = dataset[dataset['preprocessor'] == p]\n for c in dataset['classifier'].unique():\n plt.plot(d[d['classifier'] == c]['prior'].unique(), d[d['classifier'] == c].groupby('prior').mean()[m],\n label=str(c))\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)\n plt.title(m + ' - ' + str(p))\n plt.show()", "def get_resources_output(name: Optional[pulumi.Input[Optional[str]]] = None,\n required_tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,\n resource_group_name: Optional[pulumi.Input[Optional[str]]] = None,\n type: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetResourcesResult]:\n ...", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def _print_row_list(self, resource_wrappers):\n object_count = total_bytes = 0\n for i, resource_wrapper in enumerate(resource_wrappers):\n resource_wrapper_string = str(resource_wrapper)\n if not resource_wrapper_string:\n continue\n if i == 0 and resource_wrapper and resource_wrapper_string[0] == '\\n':\n # First print should not begin with a line break, which can happen\n # for headers.\n print(resource_wrapper_string[1:])\n else:\n print(resource_wrapper_string)\n\n if isinstance(resource_wrapper.resource,\n resource_reference.ObjectResource):\n # For printing long listing data summary.\n object_count += 1\n total_bytes += resource_wrapper.resource.size or 0\n\n if (self._display_detail in (DisplayDetail.LONG, DisplayDetail.FULL) and\n not self._only_display_buckets):\n # Long listing needs summary line.\n print('TOTAL: {} objects, {} bytes ({})'.format(\n object_count, int(total_bytes),\n shim_format_util.get_human_readable_byte_value(\n total_bytes, self._use_gsutil_style)))", "def print_labels(self,labels):\n\t\tfor key in labels:\n\t\t\tprint key, ':\\t', labels[key]", "def printing():\r\n document.add_heading('Printing Service details', 1)\r\n\r\n printing_metrics = ['customproperties',\r\n 'workingSetSizeHiPct',\r\n 'logVerbosityAuditActivity',\r\n 'logVerbosityService',\r\n 'hostname',\r\n 'tags']\r\n\r\n printnodes = get_qlik_sense.get_printing()\r\n num_of_nodes = len(printnodes)\r\n num_of_print_metrics = len(printing_metrics)\r\n table = document.add_table(rows=num_of_print_metrics+1, cols=num_of_nodes+1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'Metric'\r\n for item in range(0, num_of_nodes):\r\n row.cells[item+1].text = printnodes[item][6]\r\n for item in range(num_of_print_metrics):\r\n row = table.rows[item+1]\r\n row.cells[0].text = str(printing_metrics[item])\r\n for printnode in range(num_of_nodes):\r\n row.cells[printnode+1].text = str(printnodes[printnode][item])\r\n\r\n document.add_page_break()", "def get_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n group = client.get_group(group_id)\n\n group_readable, group_outputs = parse_outputs(group)\n human_readable = tableToMarkdown(name=\"Groups:\", t=group_readable,\n headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',\n 'Security Enabled', 'Visibility'],\n removeNull=True)\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(obj.ID === {group_id})': group_outputs}\n return human_readable, entry_context, group", "def printInfo(matrix):\n\n print(\"Groups:\")\n for group in matrix.matrix.group_labels:\n print(\"\\t{0}\".format(group))\n\n print(\"Samples:\")\n for sample in matrix.matrix.sample_labels:\n print(\"\\t{0}\".format(sample))", "def make_describe_attrs(self):\n if self.all_groups:\n self.result.append((NEWLINE, \"\\n\"))\n self.result.append((INDENT, \"\"))\n\n for group in self.all_groups:\n if group.name:\n self.result.extend(self.tokens.make_describe_attr(group.kls_name))", "def _create_flattened_csv_from_resources(mapping):\n data = [{\n \"resource_id\": resource[\"resource_id\"],\n \"resource_type\": resource[\"resource_type\"],\n \"resource_name\": resource.get(\"resource_name\", \"\"),\n \"sg_id\": security_group[\"sg_id\"],\n \"sg_name\": security_group[\"sg_name\"]}\n for resource in mapping\n for security_group in resource[\"sg_attached\"]]\n header = [\"resource_id\", \"resource_type\", \"resource_name\", \"sg_id\", \"sg_name\"]\n print _generate_csv(data, header)[:-1]", "def log_group_arn(self) -> str:\n ...", "def get_formatter(self, group):\n return getattr(self, \"format_\" + group + \"_standings\")", "def collect_api_resources(namespace, output_dir, k8s_cli, api_resources, selector=\"\", collect_empty_files=False):\n logger.info(\"Namespace '%s': Collecting API resources\", namespace)\n resources_out = OrderedDict()\n message = f\"Namespace '{namespace}': no resources of type %s is found\" if not selector else \\\n f\"Namespace '{namespace}': no {extract_label(selector)} labeled resources of type %s is found\"\n\n message = f\"{message}, skip collecting empty log file\"\n for resource in api_resources:\n if resource == \"Namespace\":\n output = run_get_resource_yaml(namespace, resource, k8s_cli, resource_name=namespace)\n elif resource in NON_LABELED_RESOURCES:\n output = run_get_resource_yaml(namespace, resource, k8s_cli)\n else:\n output = run_get_resource_yaml(namespace, resource, k8s_cli, selector)\n if output:\n if check_empty_yaml_file(output) and not collect_empty_files:\n logger.info(message, resource)\n else:\n resources_out[resource] = output\n logger.info(\"Namespace '%s': + Collected %s\", namespace, resource)\n if selector:\n # collect PV resource\n collect_persistent_volume(namespace, k8s_cli, resources_out, \"get\", KUBCTL_GET_YAML_RETRIES)\n for entry, out in resources_out.items():\n with open(os.path.join(output_dir,\n \"{}.yaml\".format(entry)), \"w+\", encoding='UTF-8') as file_handle:\n file_handle.write(out)", "def print_spec(self, spec):\n if spec == 'summary':\n print \"{}\\n{}\\n{}\\n{}\\n{}\".format(self.avgtime, self.avgcpu, self.avgram, self.maxram, self.maxcpu)\n else:\n print \"{}\".format(getattr(self, spec))", "def report(self):\n print(f\"Water: {self.resources['water']}ml\")\n print(f\"Milk: {self.resources['milk']}ml\")\n print(f\"Coffee: {self.resources['coffee']}g\")", "def group_describe(self, group):\n mapped = self.map_vects(datanorm)\n mappednp= np.array(mapped)\n \n groups= mappednp[:,0]\n data['Group'] = pd.Series(groups, index=data.index)\n print(data[data['Group']==group].describe())", "def PrintPortSetSummary(pset, space = 0):\n out_str = \"\"\n show_kmsg_summary = False\n if config['verbosity'] > vHUMAN :\n show_kmsg_summary = True\n\n local_name = FindEntryName(pset, space)\n setid = 0\n if pset.ips_object.io_bits & 0x80000000:\n setid = pset.ips_messages.data.pset.setq.wqset_id\n out_str += \"{0: #019x} {1: #019x} {2: <7s} {3: #011x} {4: <4s} {5: >6d} {6: #019x} \".format(\n unsigned(pset), addressof(pset.ips_messages), ' '*7,\n local_name, \"ASet\",\n pset.ips_object.io_references,\n local_name)\n\n else:\n out_str += \"{0: #019x} {1: #019x} {2: <7s} {3: #011x} {4: <4s} {5: >6d} {6: #019x} \".format(\n unsigned(pset), addressof(pset.ips_messages), ' '*7,\n local_name, \"DSet\",\n pset.ips_object.io_references,\n local_name)\n print out_str\n\n if setid != 0 and space != 0:\n PrintPortSetMembers(space, setid, show_kmsg_summary)\n\n return", "def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)", "def print_option_set(option_set, leader):\n for option in option_set:\n labels = \",\".join(option['labels'])\n option_set = leader + labels + \" \"*(20-len(labels)) + \"- \" + option['description']\n print(option_set)", "def _print_aggregate_results(\n task: Task, task_results: Dict[Task, List[List[Dict[str, Any]]]]\n) -> None:\n aggregate_task_result = aggregate_nvs_results(task_results[task])\n print(\"\")\n print(f\"Aggregate results for task={task}:\")\n pretty_print_nvs_metrics(aggregate_task_result)\n print(\"\")", "def display_count_grouped_by_genre():\n dict_of_genre = reports.count_grouped_by_genre(filename)\n print(\"Game grouped by genre:\")\n for genre, value in dict_of_genre.items():\n print(\"{}: {}\".format(genre, value))\n print()", "def list_namespaced_group(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/groups'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1GroupList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response" ]
[ "0.751741", "0.63145715", "0.5910002", "0.59023666", "0.59002185", "0.5885719", "0.5866339", "0.5861136", "0.58524024", "0.5828134", "0.57840455", "0.5753333", "0.57369936", "0.57281035", "0.57204556", "0.56444025", "0.56444025", "0.557295", "0.5570822", "0.5553722", "0.5543906", "0.55372334", "0.5524341", "0.5516915", "0.5503684", "0.5485859", "0.54187703", "0.5413898", "0.5385572", "0.5369951", "0.5321004", "0.5294975", "0.5265492", "0.522939", "0.52230847", "0.52131426", "0.521242", "0.5200982", "0.51993823", "0.51870066", "0.5181296", "0.5178932", "0.5174772", "0.5170145", "0.51575077", "0.5152209", "0.51505125", "0.51310545", "0.5119774", "0.51110727", "0.51100916", "0.5073047", "0.50659937", "0.5043972", "0.5028275", "0.5027792", "0.5020704", "0.5017757", "0.5009204", "0.5006134", "0.49810097", "0.498086", "0.49784806", "0.497726", "0.49743003", "0.49723667", "0.49703738", "0.49660864", "0.49651265", "0.49558285", "0.49558285", "0.49558285", "0.49558285", "0.49558285", "0.49558285", "0.49558285", "0.49558285", "0.49558285", "0.49558285", "0.49558285", "0.49558285", "0.49520272", "0.49489787", "0.49256662", "0.49237573", "0.49228567", "0.49150953", "0.49118325", "0.49108964", "0.49050713", "0.49039754", "0.48999172", "0.48901537", "0.48875326", "0.48857656", "0.4884135", "0.4875791", "0.4873056", "0.48729855", "0.48719922" ]
0.48897582
93
Print a list of object values in a tabular output format.
def print_object_values_as_table( object_values_list, metric_group_definition, resource_classes, table_format, transposed): if object_values_list: metric_definitions = metric_group_definition.metric_definitions sorted_metric_names = [md.name for md in sorted(metric_definitions.values(), key=lambda md: md.index)] table = list() headers = list() for i, ov in enumerate(object_values_list): row = list() # Add resource names up to the CPC res = ov.resource while res: if i == 0: name_prop = res.manager.class_name + '-name' headers.insert(0, name_prop) row.insert(0, res.name) res = res.manager.parent # CPC has None as parent # Add the metric values for name in sorted_metric_names: if i == 0: m_def = metric_definitions[name] header_str = name if m_def.unit: header_str += u" [{}]".format(m_def.unit) headers.append(header_str) value = ov.metrics[name] row.append(value) table.append(row) # Sort the table by the resource name columns n_sort_cols = len(resource_classes) table = sorted(table, key=lambda row: row[0:n_sort_cols]) if transposed: table.insert(0, headers) table = [list(col) for col in zip(*table)] headers = [] if not table: click.echo("No {} resources with metrics data for metric group {}.". format(metric_group_definition.resource_class, metric_group_definition.name)) else: click.echo(tabulate(table, headers, tablefmt=table_format))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def table_print_object(self, obj_name, header, values_list):\n def terminal_size():\n import fcntl, termios, struct\n h, w, hp, wp = struct.unpack('HHHH',\n fcntl.ioctl(0, termios.TIOCGWINSZ,\n struct.pack('HHHH', 0, 0, 0, 0)))\n return h, w\n\n labels_value = header\n rows_list = values_list\n\n height, width_value = terminal_size()\n if labels_value:\n width_value = (width_value / len(labels_value)) + 5\n var1 = indent([labels_value]+rows_list, has_header=True, separate_rows=True,\n prefix=' ', postfix=' ', header_char= '-', delimit=' ',\n wrapfunc=lambda x: wrap_onspace_strict(x, width_value))\n\n return var1\n elif rows_list:\n width_value = (width_value / len(rows_list[0])) + 5\n var1 = indent(rows_list, has_header=False, separate_rows=True,\n prefix=' ', postfix=' ', header_char= '-', delimit=' ',\n wrapfunc=lambda x: wrap_onspace_strict(x, width_value))\n return var1\n else:\n print('No Data To Display for %s' % obj_name)\n return 0", "def print_tabulated_output(array_obj, headers):\n print()\n print(tabulate(array_obj, headers=headers))\n print()", "def print_table(listx):\r\n\tfor lists in listx:\r\n\t\tfor i in lists:\r\n\t\t\tprint str(i) , '\\t',\r\n\t\tprint()", "def print_table(self, items, fields):\r\n formats = []\r\n borders = []\r\n for f in fields:\r\n length = max(len(f),\r\n max([len(self.string(getattr(i, f))) for i in items]))\r\n justify = '>' if isinstance(getattr(\r\n items[0], f), int) or f == 'size' or f == 'reward' else '<'\r\n formats.append('{:' + justify + self.string(length + 2) + '}')\r\n borders.append('-' * length + ' ')\r\n row_format = u''.join(formats)\r\n headers = [f + ' ' for f in fields]\r\n print(row_format.format(*headers))\r\n print(row_format.format(*borders))\r\n for i in items:\r\n i_fields = [self.string(getattr(i, f)) + ' ' for f in fields]\r\n try:\r\n print(row_format.format(*i_fields))\r\n except UnicodeEncodeError:\r\n print(row_format.format(*i_fields).encode('utf-8'))", "def print_object_values(\n object_values_list, metric_group_definition, resource_classes,\n output_format, transposed):\n if output_format in TABLE_FORMATS:\n if output_format == 'table':\n output_format = 'psql'\n print_object_values_as_table(\n object_values_list, metric_group_definition, resource_classes,\n output_format, transposed)\n elif output_format == 'json':\n print_object_values_as_json(\n object_values_list, metric_group_definition, resource_classes)\n else:\n raise InvalidOutputFormatError(output_format)", "def print_individuals(self):\n pt = PrettyTable()\n pt.field_names = ['ID', 'Name', 'Gender', 'Birthday', 'Age', 'Alive', 'Death', 'Child', 'Spouse']\n for i in self.individuals.values():\n pt.add_row(i.get_values())\n print(pt)", "def print_list(things_to_print, prefix=\"\\t\", stream=sys.stdout):\n for item in things_to_print:\n print(f\"{prefix}{item}\", file=stream)", "def basic_print(lista):\n for item in lista:\n print(\"{} \\t\\t {}\".format(item[0], item[1]))", "def tabout(things, file=sys.stdout):\n print(\"\\t\".join([str(x) for x in things]), file=file)\n file.flush()", "def print_list(objs, fields, formatters=None, sortby_index=0,\n mixed_case_fields=None, field_labels=None,\n normalize_field_names=False,\n table_label=None, print_header=True, print_border=True,\n print_row_border=False,\n out=sys.stdout):\n formatters = formatters or {}\n mixed_case_fields = mixed_case_fields or []\n field_labels = field_labels or fields\n if len(field_labels) != len(fields):\n raise ValueError(\"Field labels list %(labels)s has different number of\"\n \" elements than fields list %(fields)s\"\n % {\"labels\": field_labels, \"fields\": fields})\n\n kwargs = {}\n if sortby_index is not None:\n kwargs = {\"sortby\": field_labels[sortby_index]}\n\n if print_border and print_row_border:\n headers_horizontal_char = \"=\"\n kwargs[\"hrules\"] = prettytable.ALL\n else:\n headers_horizontal_char = \"-\"\n pt = prettytable.PrettyTable(field_labels)\n pt.align = \"l\"\n\n for o in objs:\n row = []\n for field in fields:\n if field in formatters:\n row.append(formatters[field](o))\n else:\n field_name = field\n\n if normalize_field_names:\n if field_name not in mixed_case_fields:\n field_name = field_name.lower()\n field_name = field_name.replace(\" \", \"_\").replace(\"-\", \"_\")\n\n if isinstance(o, dict):\n data = o.get(field_name, \"\")\n else:\n data = getattr(o, field_name, \"\")\n row.append(data)\n pt.add_row(row)\n\n if not print_border or not print_header:\n pt.set_style(prettytable.PLAIN_COLUMNS)\n pt.left_padding_width = 0\n pt.right_padding_width = 1\n\n table_body = pt.get_string(header=print_header,\n border=print_border,\n **kwargs) + \"\\n\"\n if print_border and print_row_border:\n table_body = table_body.split(\"\\n\", 3)\n table_body[2] = table_body[2].replace(\"-\", headers_horizontal_char)\n table_body = \"\\n\".join(table_body)\n\n table_header = \"\"\n\n if table_label:\n table_width = table_body.index(\"\\n\")\n table_header = make_table_header(\n table_label, table_width, horizontal_char=headers_horizontal_char)\n table_header += \"\\n\"\n\n if table_header:\n out.write(encodeutils.safe_encode(table_header).decode())\n out.write(encodeutils.safe_encode(table_body).decode())", "def show_table(self, keys=None, sort_keys_function=None):\n rows = []\n output_keys = keys or self.keys\n\n for item in self.__get_items(sort_keys_function):\n row = []\n for output_key in output_keys:\n row.append(getattr(item, self.mapping[output_key]))\n rows.append(row)\n print(tabulate(rows, output_keys))", "def print_Products_List(list_of_product_objects):\r\n print(\"\"\"\r\n ********************************************\r\n Your current product data is:\r\n --------------------------------------------\r\n \"\"\")\r\n try:\r\n for row in list_of_product_objects:\r\n print(\"\\t\\t\" + row[0] + \", $\" + row[1])\r\n except IOError as e:\r\n raise Exception(\"Problem with print statement\" + str(e))\r\n print(\"\"\"\r\n ********************************************\r\n \"\"\")", "def print_table(table):\n for row in table:\n print(row)", "def print_table(table):\n for row in table:\n print(row)", "def print_table(table):\n for row in table:\n print(row)", "def custom_print(*objects):\n print(*objects, sep=OFS, end=ORS)", "def display_table(dict_list=None, user_config_data=None):\r\n if user_config_data is not None:\r\n # print(tabulate.tabulate(user_config_data, headers=['Variable', 'Value'], tablefmt=\"grid\"))\r\n print(tabulate.tabulate(user_config_data, tablefmt=\"grid\"))\r\n return\r\n\r\n header = [\"idx\"] + list(dict_list[0].keys())\r\n rows = [[idx + 1] + list(x.values()) for idx, x in enumerate(dict_list)]\r\n print(tabulate.tabulate(rows, header, tablefmt=\"grid\"))", "def display_table(dict_list=None, user_config_data=None):\r\n if user_config_data is not None:\r\n # print(tabulate.tabulate(user_config_data, headers=['Variable', 'Value'], tablefmt=\"grid\"))\r\n print(tabulate.tabulate(user_config_data, tablefmt=\"grid\"))\r\n return\r\n\r\n header = [\"idx\"] + list(dict_list[0].keys())\r\n rows = [[idx + 1] + list(x.values()) for idx, x in enumerate(dict_list)]\r\n print(tabulate.tabulate(rows, header, tablefmt=\"grid\"))", "def pretty_print_table(data, list_of_dicts):\n # ensure that each dict has the same set of keys\n keys = None\n for d in list_of_dicts:\n if keys is None:\n keys = d.keys()\n else:\n if d.keys() != keys:\n print(\"Error! not all dicts have the same keys!\")\n return\n header = \"\\t\" + \"\\t\".join(['{:11.10s}'] * len(data))\n header = header.format(*data)\n rows = []\n for k in keys:\n r = k + \"\\t\"\n for d in list_of_dicts:\n if type(d[k]) is float:\n r += '{:.9f}'.format(d[k]) + \"\\t\"\n else:\n r += '{:10.9s}'.format(str(d[k])) + \"\\t\"\n rows.append(r)\n print(header)\n for row in rows:\n print(row)", "def print_table(self):\n print(\"%-12s%-12s%-12s%-12s%-12s\" % (\"index\",\"balance\",\"payment\",\"interest\",\"amortization\"))\n print(\"-------------------------------------------------------------\")\n for i in self.table[\"index\"]:\n print(\"%-12i%-12i%-12i%-12i%-12i\" % (self.table[\"index\"][i],self.table[\"balance\"][i]\\\n ,self.table[\"payment\"][i],self.table[\"interest\"][i],\\\n self.table[\"amortization\"][i]))", "def print_table(hdrs, flag=False, data=[],fmt='psql'):\n\tres = cur.fetchall()\n\tif flag:\n\t\tres = data\n\tprint(tabulate(res, headers=hdrs, tablefmt=fmt))", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n tab = Texttable()\n tab.add_rows([[\"Parameter\", \"Value\"]])\n tab.add_rows([[k.replace(\"_\", \" \").capitalize(), args[k]] for k in keys])\n print(tab.draw())", "def print_table(table, fieldnames):\n print(\"{:<19}\".format(fieldnames[0]), end='')\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(field), end='')\n print(\"\")\n for name, row in table.items():\n # Header column left justified\n print(\"{:<19}\".format(name), end='')\n # Remaining columns right justified\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(row[field]), end='')\n print(\"\", end='\\n')", "def print_table(table, fieldnames):\n print(\"{:<19}\".format(fieldnames[0]), end='')\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(field), end='')\n print(\"\")\n for name, row in table.items():\n # Header column left justified\n print(\"{:<19}\".format(name), end='')\n # Remaining columns right justified\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(row[field]), end='')\n print(\"\", end='\\n')", "def print(*objects, sep=None, end=None):\n if sep is None:\n sep = ' '\n if end is None:\n end = '\\n'\n array = map(str, objects)\n __PyOutputHelper__.print(sep.join(array)+end)", "def print_table(table):\n for i in range(len(table)):\n print \"Row \", i, \"\\t\",\n for j in range(len(table[i])):\n print table[i][j],\n print \"\\n\"", "def print_results(list_object1, list_object2):\n STUDENT_COLUMN = 16\n GENERAL_COLUMN = 14\n\n print()\n print(\"{:>{}}\".format(\"Student ID\",STUDENT_COLUMN),end=\"\")\n\n for i in range(len(list_object1)):\n print(\"{:>{}}\".format(list_object1[i][0],GENERAL_COLUMN),end=\"\")\n \n print(\"{:>{}}\".format(\"Course grade\",GENERAL_COLUMN))\n\n for tuple_element in list_object2:\n\n print(\"{:>{}}\".format(tuple_element[0],STUDENT_COLUMN),end=\"\")\n\n for i, value in enumerate(tuple_element[1]):\n print(\"{:>{}}\".format(value,GENERAL_COLUMN),end=\"\")\n \n print(\"{:>{}}\".format(round(tuple_element[-1],2),GENERAL_COLUMN))", "def print_table(table):\n rest = table[1:]\n fmt = \"%-28s %-9s %-16s %s\"\n for row in rest:\n print(fmt % tuple(row))", "def print_objects(self, objs, fields, cls=None, reverse=False):\n if cls is not None:\n objs = [cls(**o) for o in objs]\n rows = []\n for obj in objs:\n row = []\n if isinstance(fields, dict):\n for field, transform in fields.items():\n # Default to identity for transform\n if transform is None:\n transform = lambda f: f\n\n if isinstance(obj, dict):\n if field in obj:\n val = transform(obj[field])\n else:\n val = '-'\n else:\n try:\n val = getattr(obj, field)\n except AttributeError:\n val = '-'\n row.append(val)\n else:\n for field in fields:\n try:\n row.append(getattr(obj, field))\n except:\n row.append('-')\n rows.append(row)\n\n # Don't print anything if there is nothing to print\n if not rows:\n return\n\n if reverse:\n rows = reversed(rows)\n print(tabulate(rows, headers=fields))", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\",\" \").capitalize(), args[k]] for k in keys])\n print(t.draw())", "def print(self):\n tiles = list(map(list, zip(*self.tiles))) # transposed\n print('tiles = [')\n for row in tiles:\n print('\\t' + repr(row))\n print(']')\n print('props = [')\n for prop in self.props:\n print('\\t' + repr(prop))\n print(']')", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\", \" \").capitalize(), args[k]] for k in keys])\n print(t.draw())", "def print_seq(self):\n names, values = [], []\n for each in self.minions:\n names.append(each.name)\n values.append(f'{each.atk}/{each.dfs}')\n t = PrettyTable()\n t.add_row(names)\n t.add_row(values)\n print(t)", "def _print_table(stats):\n max_key_len = max([len(key) for key in stats])\n width_right = 15\n width_left = max(width_right, max_key_len)\n divider = '+-' + '-' * width_left + '-+-' + '-' * width_right + '-+'\n\n def get_format_char(value):\n if isinstance(value, int):\n return 'd'\n elif isinstance(value, float):\n return '.4f'\n else:\n return 's'\n\n print(divider)\n for name, value in stats.items():\n left_format = f':>{width_left}s'\n right_format = f':<{width_right}{get_format_char(value)}'\n line_format = f'| {{{left_format}}} | {{{right_format}}} |'\n line = line_format.format(name, value)\n print(line)\n print(divider)", "def print_table(source, count=False):\n table_value = []\n table_header = []\n for source_key, source_value in source.items():\n for item in source_value:\n table_value.append([v for v in item.values()])\n table_header.append([k for k in item.keys()])\n if not count:\n print(tabulate(table_value,\n headers=table_header[0],\n tablefmt='orgtbl'))\n else:\n print(tabulate([[len(source_value)]],\n headers=[source_key],\n tablefmt='orgtbl'))", "def print_table(data):\n for key in sorted(data):\n print \"%s: %s\" % (key.rjust(16), data[key])", "def print_table(headers, rows):\n try:\n if headers:\n print('\\n')\n print(tabulate.tabulate(\n rows, headers=headers,\n tablefmt=\"plain\", numalign=\"left\"\n ))\n print('\\n')\n except Exception as e:\n print(e.message)", "def print_table(self, table):\n raise NotImplementedError('print_table method not defined!')", "def print_movie_table(self):\n self = self\n headers = [\"Votes\", \"Rank\", \"Year\", \"Title\"]\n self.handler.header(headers)\n\n for movie in self.movie_list:\n self.handler.row([str(movie.get_votes()), str(movie.get_rank()),\n str(movie.get_year()), str(movie.get_title())])\n\n self.handler.footer()", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\",\" \").capitalize(),args[k]] for k in keys])\n print(t.draw())", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\",\" \").capitalize(),args[k]] for k in keys])\n print(t.draw())", "def print_all(cls):\n [print('{0} = \"{1}\"'.format(k, v)) for (k, v) in cls.all()]", "def print_table():\n for key in _op_table.keys():\n print(key)\n for sub_key in _op_table[key]:\n print('\\t--' + sub_key)", "def print_table(rows, labels=None):\n if labels is None:\n labels = ROW_LABELS\n\n output_table = prettytable.PrettyTable()\n output_table.field_names = labels\n output_table.align = 'l'\n output_table.vrules = prettytable.prettytable.ALL\n output_table.hrules = prettytable.prettytable.HEADER\n\n for row in rows:\n row = [x.strip() for x in row]\n output_table.add_row(row)\n\n print output_table\n print ''", "def print_table(rows, header=['Operation', 'OPS']):\n if len(rows) == 0:\n return\n col_max = [max([len(str(val[i])) for val in rows]) + 3 for i in range(len(rows[0]))]\n row_format = ''.join([\"{:<\" + str(length) + \"}\" for length in col_max])\n\n if len(header) > 0:\n print(row_format.format(*header))\n print(row_format.format(*['-' * (val - 2) for val in col_max]))\n\n for row in rows:\n print(row_format.format(*row))\n print(row_format.format(*['-' * (val - 3) for val in col_max]))", "def write_table(*lists):\n print(\"<table>\")\n for columns in zip(*lists):\n print(\"<tr>\")\n for val in columns:\n print(\"<td>{}</td>\".format(val))\n print(\"</tr>\")\n print(\"</table>\")", "def printValues(self):\n headers = self.getHeaders()\n values = self.getValues()\n for i in range(len(values)):\n print(f\"[{headers[i]}] Temperature: {values[i]:0.3f} C\")", "def __print_work_table(table):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % ('Act', 'Pred', 'Block', 'Dummy', 'Succ', 'start', 'end')\n for k, col in sorted(table.items()):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % tuple(\n [str(k)] + [list(col[0])] + [str(col[i]) for i in range(1, len(col))])", "def pretty_print(self):\n for dtr in self.dtrs:\n dtr.pretty_print(indent=2)", "def print_tables(self):\n print \"------------------\\nTables\\n------------------\"\n cnt = 0\n for x in self.show_tables():\n cnt += 1\n print (\"{0}.) {1}\".format(cnt, x[0]))", "def print_table(table):\n # transpose the table:\n table = map(list, zip(*table))\n # get the column width:\n col_width = [max(len(str(x)) for x in col) for col in zip(*table)]\n # print it to screen:\n print\n for line in table:\n print \"| \" + \" | \".join(\"{:{}}\".format(x, col_width[i]) for i, x in enumerate(line)) + \" |\"\n print", "def print_table(table, title_list):\n table.insert(0, title_list)\n for row_index, row in enumerate(table):\n for col_index, col in enumerate(row):\n if (type(col) == float) or (type(col) == int):\n table[row_index][col_index] = str(\"{0:,.2f}\".format(col))\n widths = [max(map(len, col)) for col in zip(*table)]\n sum_of_widths = sum(widths) + len(table[0]) * 3 - 1\n for row in table:\n print(\"-\" * sum_of_widths)\n print(\"|\" + \" \".join((val.ljust(width) + \"|\" for val, width in zip(row, widths))))\n print(\"-\" * sum_of_widths)", "def print_table(table, title_list):\n table.insert(0, title_list)\n for row_index, row in enumerate(table):\n for col_index, col in enumerate(row):\n if (type(col) == float) or (type(col) == int):\n table[row_index][col_index] = str(\"{0:,.2f}\".format(col))\n widths = [max(map(len, col)) for col in zip(*table)]\n sum_of_widths = sum(widths) + len(table[0]) * 3 - 1\n for row in table:\n print(\"-\" * sum_of_widths)\n print(\"|\" + \" \".join((val.ljust(width) + \"|\" for val, width in zip(row, widths))))\n print(\"-\" * sum_of_widths)", "def print_list(self):\r\n pass", "def dump(self):\r\n for (name, value) in self.__table__.items():\r\n print (name)\r\n print (value)", "def print_table(table):\n print(\"City \", end='')\n for month in MONTHS:\n print(\"{:>6}\".format(month), end='')\n print(\"\")\n for name, row in table.items():\n # Header column left justified\n print(\"{:<19}\".format(name), end='')\n # Remaining columns right justified\n for month in MONTHS:\n print(\"{:>6}\".format(row[month]), end='')\n print(\"\", end='\\n')", "def print_objects(self):\n print(\"Spaces: {}\".format([s.name for s in self.spaces]))\n print(\"Characters: {}\".format([c.name for c in self.characters]))\n print(\"Items: {}\".format([i.name for i in self.items]))", "def table_print(csv_header, csv_body, csv_footer):\n s = '\\t'.join(csv_header)\n print(s)\n\n for r in csv_body:\n s = '\\t'.join(map(str, r))\n print(s)\n\n s = '\\t'.join(map(str, csv_footer))\n print(s)", "def print_output_tables(cls,\n wfns=None, file=None,\n print_intensities=True,\n print_energies=True,\n print_energy_corrections=True,\n print_transition_moments=True,\n operators=None,\n logger=None, sep_char=\"=\", sep_len=100):\n\n if logger is None:\n logger = wfns.logger\n if logger is not None:\n def print_block(label, *args, **kwargs):\n with logger.block(tag=label):\n logger.log_print(\" \".join(\"{}\".format(x) for x in args), **kwargs)\n else:\n if file is None:\n file = sys.stdout\n\n def print_label(label, file=file, **opts):\n lablen = len(label) + 2\n split_l = int(np.floor((sep_len - lablen) / 2))\n split_r = int(np.ceil((sep_len - lablen) / 2))\n print(sep_char * split_l, label, sep_char * split_r, **opts, file=file)\n\n def print_footer(label=None, file=file, **opts):\n print(sep_char * sep_len, **opts, file=file)\n\n def print_block(label, *args, file=file, **kwargs):\n print_label(label, file=file, **kwargs)\n print(*args, file=file, **kwargs)\n print_footer(file=file, **kwargs)\n\n if print_energy_corrections:\n print_block(\"Energy Corrections\", wfns.format_energy_corrections_table())\n if print_energies:\n if wfns.degenerate_transformation is not None:\n print_block(\"Deperturbed Energies\",\n wfns.format_deperturbed_energies_table()\n )\n print_block(\n \"Degenerate Energies\",\n wfns.format_energies_table()\n )\n else:\n print_block(\"States Energies\",\n wfns.format_energies_table()\n )\n\n if print_intensities:\n ints = wfns.intensities # to make sure they're computed before printing starts\n if print_transition_moments:\n if wfns.degenerate_transformation is not None:\n for a, m in zip([\"X\", \"Y\", \"Z\"], wfns.format_deperturbed_dipole_contribs_tables()):\n print_block(\"{} Deperturbed Dipole Contributions\".format(a), m)\n\n print_block(\"Deperturbed IR Data\",\n wfns.format_deperturbed_intensities_table()\n )\n\n for a, m in zip([\"X\", \"Y\", \"Z\"], wfns.format_dipole_contribs_tables()):\n print_block(\"{} Dipole Contributions\".format(a), m)\n print_block(\"IR Data\", wfns.format_intensities_table())\n\n if operators is not None:\n print_block(\"Operator Data\", wfns.format_operator_table(operators))", "def tabular_formatted_printing(data_list):\n n = len(data_list)\n max = 0\n for i in range(0,n):\n if int(len(data_list[i][0])) > max:\n max = len(data_list[i][0])\n for i in range(0,n):\n if int(len(data_list[i][0])) < max:\n space = max - len(data_list[i][0])\n else:\n space = 0\n print(data_list[i][0]+space*' '+' : '+str(data_list[i][1]))\n return", "def print_table(seqids, data, outputfile, separator='\\t'):\n\n tags = data.keys()\n with open(outputfile, 'w') as out:\n out.write(separator.join([\"#Sequence ID\"] + list(tags)) + \"\\n\")\n for s in seqids:\n out.write(s)\n for t in tags:\n out.write(\"{}{}\".format(separator, data[t].get(s, \"\")))\n out.write(\"\\n\")", "def pretty_print(results: List[Tuple[str, torch.Tensor]]):\n for item in results:\n print(\"...[%.2f] - %s\" % (item[1], item[0]))", "def print_table(emojis):\n if len(emojis) > 0:\n table = []\n for i in emojis:\n table.append([i.get('id'), i.get('title'), i.get('emoji')])\n print(tabulate(table, headers=[\"ID\", \"Title\", \"Emoji\"]))\n else:\n print(\"¯\\_(ツ)_/¯ Nothing to see here...\")", "def formatall(obj):\n result = \"\"\n if isinstance(obj, list):\n# i = 0\n for obj in obj:\n #printf(\">>> [%d] >>> \", i)\n result += format(obj)\n result += \"\\n\"\n# i += 1\n return result\n if isinstance(obj, dict):\n for key, value in obj.items():\n result += \"%-15s : \" % key\n result += format(value)\n result += \"\\n\"\n return result\n return format(obj)", "def print_sorted_table_by_value(table):\n\td_view = [ (v,k) for k,v in table.iteritems() ]\n\td_view.sort(reverse=True) # natively sort tuples by first element\n\tfor v,k in d_view:\n\t\tprint \"%d: %s\" % (v,k)", "def uprint(self, *objects, sep=' ', end='\\n'):\n if self._utf8_safe is None:\n from sys import stdout\n self._utf8_safe = (stdout.encoding.lower() == 'utf-8')\n if self._utf8_safe:\n encoded = objects\n else:\n encoded = [str(obj).encode('utf8') for obj in objects]\n print(*encoded, sep=sep, end=end)", "def print_table(response, title):\n print title + ':'\n\n if 'rows' not in response:\n print 'Empty response'\n return\n\n rows = response['rows']\n row_format = '{:<20}' + '{:>20}' * 4\n print row_format.format('Keys', 'Clicks', 'Impressions', 'CTR', 'Position')\n for row in rows:\n keys = ''\n # Keys are returned only if one or more dimensions are requested.\n if 'keys' in row:\n keys = u','.join(row['keys']).encode('utf-8')\n print row_format.format(\n keys, row['clicks'], row['impressions'], row['ctr'], row['position'])", "def print_contents(self):\n print self.values", "def print(listing: typing.Iterable[typing.Any]) -> None:\n listing = tuple(str(i) for i in listing)\n if not listing:\n return\n width = max(len(i) for i in listing) + 2\n count = min(shutil.get_terminal_size().columns // width, len(listing))\n for row in itertools.zip_longest(*(listing[i::count] for i in range(count)), fillvalue=''):\n print(*(f'{c:<{width}}' for c in row), sep='')", "def print_pretty(self, data):\n length = max(map(lambda x: len(x), data.keys()))\n print '+-------------------------------------+'\n print '| Company Name | Year | Month | Value |'\n print '+-------------------------------------+'\n for key, value in data.items():\n print '| %s | %s | %s | %s |' % (key, \\\n value['year'], value['month'], value['value'])\n print '+-------------------------------------+'", "def display(self):\n print(\n f'\\t\\t {self.name.upper()} {self.potency[0]}{self.potency[1]}\\t\\t'\n f' {self.dose_qty[0]} {self.dose_qty[1]} {self.dose[0]} {self.dose[1].upper()}')", "def printPairList(values, lab1, lab2, precision, offset=16):\n\tprint(lab1.ljust(offset, \" \") + lab2)\n\tfor (v1, v2) in values:\n\t\tsv1 = toStr(v1, precision).ljust(offset, \" \")\n\t\tsv2 = toStr(v2, precision)\n\t\tprint(sv1 + sv2)", "def print_queue(queue):\n print(tabulate.tabulate(queue,headers=['Time','Priority','Action','Argument','kwargs'],\n floatfmt=(\".12f\")))", "def print_table(table):\n for row in table:\n # Header column left justified\n print(\"{:<19}\".format(row[0]), end='')\n # Remaining columns right justified\n for col in row[1:]:\n print(\"{:>4}\".format(col), end='')\n print(\"\", end='\\n')", "def dibujar_tablero(tablero):\n for fila in tablero:\n print(\"|\", \"|\".join(fila), \"|\", sep=\"\")\n print(\"\")", "def show(self, lst=None):\n\n def f(v):\n if np.size(v) == 1:\n return str(v)\n elif np.size(v) > 3:\n return str(np.shape(v))\n elif np.ndim(v) > 1:\n return str(np.shape(v))\n else:\n return str(v)\n\n def buffer(l, m, n=25):\n end = len(l) - 1\n buffered = []\n for i in range(m):\n if i > end:\n buffered.append(\"\".ljust(n))\n else:\n buffered.append(l[i].ljust(n))\n return buffered\n\n lst = self if lst is None else lst\n out = [IND.ljust(7) + INDEP.ljust(60) + DEP.ljust(60)]\n for row in lst:\n ind = [str(row[IND])]\n dep = [k + \": \" + f(v) for k, v in row[DEP].items()]\n indep = [k + \": \" + f(v) for k, v in row[INDEP].items()]\n m = max(len(dep), len(indep), 1)\n ind = buffer(ind, m, 7)\n dep = buffer(dep, m, 60)\n indep = buffer(indep, m, 60)\n for a, b, c in zip(ind, indep, dep):\n out.append(a + b + c)\n out.append(\"\")\n return \"\\n\".join(out)", "def pretty_print(output: list):\n for movie in output:\n for item in movie.items():\n print(item[0]+\":\", item[1])\n print()", "def show_table():\n\n title_list = ('ID', 'Platform', 'Producer', 'Year', 'Elements')\n \n return table, title_list", "def pretty_print(self, query_obj):\n assert type(query_obj) == dict\n result = \"\"\n if type(query_obj) == dict and 'message' in query_obj:\n result += query_obj[\"message\"] + \"\\n\"\n if 'data' in query_obj and 'column_labels' in query_obj:\n \"\"\" Pretty-print data table \"\"\"\n pt = prettytable.PrettyTable()\n columns = query_obj['column_labels']\n pt.field_names = columns\n\n # Adjust value width - for now preserve 2 decimal places.\n for row_idx, row_values in enumerate(query_obj['data']):\n if type(row_values) == tuple:\n row_values = list(row_values)\n for col_idx, col_value in enumerate(row_values):\n if type(col_value) == float:\n # Right-align numeric columns.\n if row_idx == 0:\n pt.align[columns[col_idx]] = 'r'\n row_values[col_idx] = \"% .2f\" % col_value\n pt.add_row(row_values)\n result += str(pt)\n\n elif 'list' in query_obj:\n \"\"\" Pretty-print lists \"\"\"\n result += str(query_obj['list'])\n elif 'column_names' in query_obj:\n \"\"\" Pretty-print cctypes \"\"\"\n colnames = query_obj['column_names']\n zmatrix = query_obj['matrix']\n pt = prettytable.PrettyTable(hrules=prettytable.ALL, vrules=prettytable.ALL,\n header=False)\n pt.add_row([''] + list(colnames))\n for row, colname in zip(zmatrix, list(colnames)):\n pt.add_row([colname] + list(row))\n result += str(pt)\n elif 'column_labels' in query_obj:\n \"\"\" Pretty-print column list.\"\"\"\n pt = prettytable.PrettyTable()\n pt.field_names = ['column']\n for column in query_obj['column_labels']:\n pt.add_row([column])\n result += str(pt)\n elif 'row_lists' in query_obj:\n \"\"\" Pretty-print multiple row lists, which are just names and row sizes. \"\"\"\n pt = prettytable.PrettyTable()\n pt.field_names = ('Row List Name', 'Row Count')\n\n def get_row_list_sorting_key(x):\n \"\"\" To be used as the key function in a sort. Puts cc_2 ahead of cc_10, e.g. \"\"\"\n name, count = x\n if '_' not in name:\n return name\n s = name.split('_')\n end = s[-1]\n start = '_'.join(s[:-1])\n if utils.is_int(end):\n return (start, int(end))\n return name\n\n for name, count in sorted(query_obj['row_lists'], key=get_row_list_sorting_key):\n pt.add_row((name, count))\n result += str(pt)\n elif 'column_lists' in query_obj:\n \"\"\" Pretty-print multiple column lists. \"\"\"\n print\n clists = query_obj['column_lists']\n for name, clist in clists:\n print(\"%s:\" % name)\n pt = prettytable.PrettyTable()\n pt.field_names = clist\n print(pt)\n elif 'models' in query_obj:\n \"\"\" Pretty-print model info. \"\"\"\n pt = prettytable.PrettyTable()\n pt.field_names = ('model_id', 'iterations')\n for (id, iterations) in query_obj['models']:\n pt.add_row((id, iterations))\n result += str(pt)\n\n if len(result) >= 1 and result[-1] == '\\n':\n result = result[:-1]\n return result", "def show_table(table):\n # id: string\n # Unique and random generated (at least 2 special char()expect: ';'),\n # 2 number, 2 lower and 2 upper case letter)\n # title: string\n # manufacturer: string\n # price: number (dollars)\n # in_stock: number\n title_list = [\"ID\", \"Title\", \"Manufacturer\",\n \"Price\", \"Number in stock\"]\n ui.print_table(table, title_list)", "def print_obs(self,obs):\n print(obs)", "def print_list(self):\n self.print_avec_separateur(\" \")", "def print_table(self) -> None:\n if (self.probability_links == None):\n print(\"+--------+\")\n print(f\"| P({self.key:1s}) |\")\n print(\"+--------+\")\n print(f\"| {self.probability_values[0]:0.04f} |\")\n print(\"+--------+\")\n else:\n arg_len = 2 + len(' '.join(self.probability_links.keys()))\n param_len = 2 + \\\n max(6, len(\"P(A|)\" + \",\".join(self.probability_links.keys())))\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")\n print(\n f\"| {' '.join(self.probability_links.keys())} | P({self.key}|{','.join(self.probability_links.keys())}) |\")\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")\n for i in range(2**len(self.probability_links.keys())):\n # Gives us a string binary value to make truth table off of\n bool_key = f\"{i:0{len(self.probability_links.keys())}b}\"\n print(\n f\"| {' '.join(['T' if bool_key[j] == '0' else 'F' for j in range(len(self.probability_links.keys()))])} | {f'{self.probability_values[i]:0.04f}':<{param_len-1}s}|\")\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")", "def pretty_print(self):\n pt = PrettyTable()\n for i in self.files_summary:\n pt.field_names = [\"File Name\", \"Classes\", \"Functions\", \"Lines\", \"Characters\"]\n pt.add_row(list([i, self.files_summary[i][\"class\"], self.files_summary[i][\"function\"], self.files_summary[i][\"line\"], self.files_summary[i][\"char\"]]))\n print(pt) #Using a Print statement here because i tried to return self.pt and it didnt give me anything but the print works", "def print_list(self, items):\n\t\tstrtype = unicode if self.encoding else bytes\n\t\titems = map(strtype, items)\n\t\twidth = self.get_width()\n\t\tlines = []\n\t\tsep = strtype(' ')\n\t\tfor item in items:\n\t\t\tif lines:\n\t\t\t\tnew = lines[-1] + sep + item\n\t\t\t\tif len(new) <= width:\n\t\t\t\t\tlines[-1] = new\n\t\t\t\t\tcontinue\n\t\t\tlines.append(item)\n\t\tself.write(strtype('\\n').join(lines))", "def __str__(self):\n txt = ''\n if self.PrintHeader:\n txt = \" |\" + \"|\".join(sorted(self.rows[0].keys())).expandtabs() + \"|\"\n txt += \"\\n\"\n txt += \"|-\"\n for r in self.rows:\n txt += \"\\n|\"\n txt += \"|\".join([str(uround(r[key] , 2) if isinstance(r[key], (int, long, float, complex , Variable,AffineScalarFunc )) else r[key]) for key in sorted(self.rows[0].keys())]) + \"|\"\n txt += \"\\n|-\"\n if self.PrintSum:\n txt += \"\\n\"\n sumRow = self.GetSumRow()\n txt += \"| |\" + \"|\".join( [str(uround(sumRow[key] , 2) if isinstance(sumRow[key], (int, long, float, complex , Variable ,AffineScalarFunc )) else sumRow[key]) for key in sorted(self.rows[0].keys())[1:]] ) + \"|\"\n\n return txt", "def print_csv(self, items, fields):\r\n writer = csv.writer(sys.stdout)\r\n writer.writerow(fields)\r\n for i in items:\r\n i_fields = [self.string(getattr(i, f)) for f in fields]\r\n writer.writerow(i_fields)", "def print_dict(obj, fields=None, formatters=None, mixed_case_fields=False,\n normalize_field_names=False, property_label=\"Property\",\n value_label=\"Value\", table_label=None, print_header=True,\n print_border=True, wrap=0, out=sys.stdout):\n formatters = formatters or {}\n mixed_case_fields = mixed_case_fields or []\n if not fields:\n if isinstance(obj, dict):\n fields = sorted(obj.keys())\n else:\n fields = [name for name in dir(obj)\n if (not name.startswith(\"_\")\n and not callable(getattr(obj, name)))]\n\n pt = prettytable.PrettyTable([property_label, value_label], caching=False)\n pt.align = \"l\"\n for field_name in fields:\n if field_name in formatters:\n data = formatters[field_name](obj)\n else:\n field = field_name\n if normalize_field_names:\n if field not in mixed_case_fields:\n field = field_name.lower()\n field = field.replace(\" \", \"_\").replace(\"-\", \"_\")\n\n if isinstance(obj, dict):\n data = obj.get(field, \"\")\n else:\n data = getattr(obj, field, \"\")\n\n # convert dict to str to check length\n if isinstance(data, (dict, list)):\n data = json.dumps(data)\n if wrap > 0:\n data = textwrap.fill(str(data), wrap)\n # if value has a newline, add in multiple rows\n # e.g. fault with stacktrace\n if (data and isinstance(data, str)\n and (r\"\\n\" in data or \"\\r\" in data)):\n # \"\\r\" would break the table, so remove it.\n if \"\\r\" in data:\n data = data.replace(\"\\r\", \"\")\n lines = data.strip().split(r\"\\n\")\n col1 = field_name\n for line in lines:\n pt.add_row([col1, line])\n col1 = \"\"\n else:\n if data is None:\n data = \"-\"\n pt.add_row([field_name, data])\n\n table_body = pt.get_string(header=print_header,\n border=print_border) + \"\\n\"\n\n table_header = \"\"\n\n if table_label:\n table_width = table_body.index(\"\\n\")\n table_header = make_table_header(table_label, table_width)\n table_header += \"\\n\"\n\n if table_header:\n out.write(encodeutils.safe_encode(table_header).decode())\n out.write(encodeutils.safe_encode(table_body).decode())", "def print_list(l):\n print('[' + ', '.join([x.__str__() for x in l]) + ']')", "def display(self):\n for value, prob in self.items():\n print(value, prob)", "def display_table(a, m):\n # Initialize string\n result = ''\n result += '{'\n\n # Add all polynomials to the string, given they are already a string\n for i in a:\n for j in i[:-1]:\n result += display_poly(j, m)\n result += ', '\n\n # Add the last one here to prevent unneeded comma\n result += display_poly(i[-1], m)\n result += '; '\n\n # Remove final semicolon and close the brace\n result = result[:-2]\n result += '}'\n\n return result", "def print_object(dict_to_print, *, name='', uppercase=False):\n string = '' if name == '' else name.ljust(10)\n for key, value in dict_to_print.items():\n string += f'{key.upper() if uppercase else key}: {\"\" if value < 0 else \" \"}{float(value):.4}'.ljust(\n len(key) + 10)\n\n print(string)", "def pretty_print(self, query_obj):\n assert type(query_obj) == dict\n result = \"\"\n if type(query_obj) == dict and 'message' in query_obj:\n result += query_obj[\"message\"] + \"\\n\"\n if 'data' in query_obj and 'columns' in query_obj:\n \"\"\" Pretty-print data table \"\"\"\n pt = prettytable.PrettyTable()\n pt.field_names = query_obj['columns']\n for row in query_obj['data']:\n pt.add_row(row)\n result += str(pt)\n elif 'list' in query_obj:\n \"\"\" Pretty-print lists \"\"\"\n result += str(query_obj['list'])\n elif 'column_names' in query_obj:\n \"\"\" Pretty-print cctypes \"\"\"\n colnames = query_obj['column_names']\n zmatrix = query_obj['matrix']\n pt = prettytable.PrettyTable(hrules=prettytable.ALL, vrules=prettytable.ALL,\n header=False)\n pt.add_row([''] + list(colnames))\n for row, colname in zip(zmatrix, list(colnames)):\n pt.add_row([colname] + list(row))\n result += str(pt)\n elif 'columns' in query_obj:\n \"\"\" Pretty-print column list.\"\"\"\n pt = prettytable.PrettyTable()\n pt.field_names = ['column']\n for column in query_obj['columns']:\n pt.add_row([column])\n result += str(pt)\n elif 'row_lists' in query_obj:\n \"\"\" Pretty-print multiple row lists, which are just names and row sizes. \"\"\"\n pt = prettytable.PrettyTable()\n pt.field_names = ('Row List Name', 'Row Count')\n\n def get_row_list_sorting_key(x):\n \"\"\" To be used as the key function in a sort. Puts cc_2 ahead of cc_10, e.g. \"\"\"\n name, count = x\n if '_' not in name:\n return name\n s = name.split('_')\n end = s[-1]\n start = '_'.join(s[:-1])\n if is_int(end):\n return (start, int(end))\n return name\n\n for name, count in sorted(query_obj['row_lists'], key=get_row_list_sorting_key):\n pt.add_row((name, count))\n result += str(pt)\n elif 'column_lists' in query_obj:\n \"\"\" Pretty-print multiple column lists. \"\"\"\n print\n clists = query_obj['column_lists']\n for name, clist in clists:\n print(\"%s:\" % name)\n pt = prettytable.PrettyTable()\n pt.field_names = clist\n print(pt)\n elif 'models' in query_obj:\n \"\"\" Pretty-print model info. \"\"\"\n pt = prettytable.PrettyTable()\n pt.field_names = ('model_id', 'iterations')\n for (id, iterations) in query_obj['models']:\n pt.add_row((id, iterations))\n result += str(pt)\n\n if len(result) >= 1 and result[-1] == '\\n':\n result = result[:-1]\n return result", "def output_table(results, output, keys=None, sort_key=None):\n\n if output not in constants.TABLE_OUTPUT_FORMAT:\n raise ValueError(\"Output format must be{}, \"\n \"got {}\".format(constants.TABLE_OUTPUT_FORMAT,\n output))\n if output == 'print':\n if len(results) == 0:\n print 'No output!'\n return\n\n headers = [keys[k] for k in keys.keys()] if keys else results[0].keys()\n table = PrettyTable(headers)\n for line in results:\n table.add_row([line[k] if k in line else '' for k in (keys.keys() if keys else headers)])\n\n if sort_key:\n table.sortby = keys[sort_key] if keys else sort_key\n\n print table\n\n if output == 'csv':\n csvwriter = csv.writer(sys.stdout, quoting=csv.QUOTE_ALL)\n keys = results[0].keys()\n csvwriter.writerow(keys)\n for row in results:\n csvwriter.writerow([row[k] for k in keys])\n\n if output == 'json':\n print json.dumps(results)", "def print_out(self, *lst):\n self.print2file(self.stdout, True, True, *lst)", "def print_labels(self,labels):\n\t\tfor key in labels:\n\t\t\tprint key, ':\\t', labels[key]", "def pretty_print(iterable):\n for elem in iterable:\n print(elem)", "def __str__(self):\n tabuleiro = prettytable.PrettyTable(header=False)\n for linha in self.tabuleiro:\n tabuleiro.add_row(linha)\n return str(tabuleiro)", "def printData (data):\n print(str(len(data)) + '\\t' + str(data))", "def tabular_print(files_dict: dict):\r\n # create a list of file extensions\r\n file_extensions = []\r\n for filename in files_dict.keys():\r\n for file_ext in files_dict[filename].keys():\r\n # print(\"debug:::\", file_ext)\r\n file_extensions.append(file_ext)\r\n break\r\n # go through all the files and print them in a table with the file extension as the top row\r\n sep_line_len = 40 + 10 * len(file_extensions) # separator line length = max_filename_len [35] + 10*number of ext\r\n # print the first row\r\n print(\"filename\".ljust(40), end='')\r\n for ext in file_extensions:\r\n print(\"|\" + ext.center(9), end='')\r\n print()\r\n print(''.center(sep_line_len, '='))\r\n # print the rest of the files\r\n for filename, ext_dict in files_dict.items():\r\n print(filename.ljust(40), end='')\r\n for ext in ext_dict.keys():\r\n if ext_dict[ext]:\r\n print(\"|\" + \"V\".center(9), end='')\r\n else:\r\n print(\"|\" + \" \".center(9), end='')\r\n print()\r\n print(''.center(sep_line_len, '-'))" ]
[ "0.7586605", "0.75449604", "0.72875947", "0.71443856", "0.7099981", "0.6986745", "0.6952594", "0.6931348", "0.6849189", "0.68258506", "0.6799973", "0.67922515", "0.67875236", "0.67875236", "0.6745301", "0.67065424", "0.66971654", "0.6696674", "0.6684991", "0.66635174", "0.6648342", "0.66050047", "0.65965694", "0.65965694", "0.6585275", "0.65821147", "0.65607625", "0.6543437", "0.65370286", "0.6533434", "0.65275145", "0.65220594", "0.6517411", "0.649677", "0.64941216", "0.648565", "0.64798987", "0.6478175", "0.6465516", "0.64639586", "0.64639586", "0.6461594", "0.64504296", "0.64403266", "0.6428659", "0.6392328", "0.6391762", "0.63758785", "0.6375355", "0.6370695", "0.6361564", "0.63143736", "0.63143736", "0.6298176", "0.62598884", "0.62535", "0.62523204", "0.62301654", "0.62233675", "0.62059695", "0.61852366", "0.61818844", "0.61793745", "0.6175997", "0.61651903", "0.6165072", "0.6160514", "0.6121623", "0.61178416", "0.6117297", "0.61066484", "0.60947394", "0.6079206", "0.60674196", "0.60667354", "0.6063231", "0.6061996", "0.60549366", "0.604617", "0.6036374", "0.60308987", "0.60293674", "0.6026664", "0.6026499", "0.6019464", "0.60145605", "0.6012071", "0.601056", "0.5999123", "0.5980475", "0.5971654", "0.596746", "0.59590375", "0.5956999", "0.5947089", "0.59286696", "0.59279484", "0.5920989", "0.5913141", "0.59120005" ]
0.7025772
5
Print a list of object values in JSON output format.
def print_object_values_as_json( object_values_list, metric_group_definition, resource_classes): if object_values_list: metric_definitions = metric_group_definition.metric_definitions sorted_metric_names = [md.name for md in sorted(metric_definitions.values(), key=lambda md: md.index)] json_obj = list() for i, ov in enumerate(object_values_list): resource_obj = OrderedDict() # Add resource names up to the CPC res = ov.resource while res: name_prop = res.manager.class_name + '-name' resource_obj[name_prop] = res.name res = res.manager.parent # CPC has None as parent # Add the metric values for name in sorted_metric_names: m_def = metric_definitions[name] value = ov.metrics[name] resource_obj[name] = OrderedDict(value=value, unit=m_def.unit) json_obj.append(resource_obj) json_str = json.dumps(json_obj) click.echo(json_str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def jprint(obj):\n #\n text = json.dumps(obj, sort_keys=True, indent=4)\n print(text)", "def print_json(obj):\n print(json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': ')))", "def print_json(obj):\n print(json.dumps(obj, indent=2))", "def pprint(self,obj):\n return(json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': ')))", "def pprint(obj):\n return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))", "def pprint(obj):\n return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))", "def cli(obj):\n for k, v in obj.items():\n if isinstance(v, list):\n v = ', '.join(v)\n click.echo(f'{k:20}: {v}')", "def pretty_print(data):\n print json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))", "def display_json(self, results, verbose):\n print(json.dumps(results))", "def print_list(list_, format_=None):\n\n format_ = format_ or DEFAULT\n\n if format_ == TEXT:\n for item in list_:\n print(item)\n elif format_ == JSON:\n print(json.dumps(list_))", "def fprint(content):\n print json_dumps(content,indent=1)", "def fprint(content):\n print json_dumps(content,indent=1)", "def format_json(the_list):\n result = \"{\"\n index = 0\n size = len(the_list)\n while index < size:\n result += format_json_key(the_list[index][0], the_list[index][1])\n if index != size - 1:\n result += \", \"\n index += 1\n result += \"}\"\n return result", "def print_json(\n *args: Union[Iterable[Any], Iterable[Tuple[Any]]], indent: int = 4, **kwargs\n) -> None:\n if not kwargs.get(\"default\"):\n kwargs[\"default\"] = str\n print(\"*\" * 72)\n for i, arg in enumerate(args, 1):\n if isinstance(arg, Tuple) and len(arg) == 2:\n title, data = arg\n else:\n title, data = f\"Argument {i}\", arg\n print(title)\n print(json.dumps(data, indent=indent, **kwargs))\n print(\"*\" * 72)", "def print_friendly_JSON_object(JSON_object):\n formatted_string = json.dumps(JSON_object, sort_keys=True, indent=4)\n print(formatted_string)", "def print_json(results):\r\n import json\r\n stats = calc_stats(results)\r\n print(json.dumps(stats._asdict()))", "def print_object_values(\n object_values_list, metric_group_definition, resource_classes,\n output_format, transposed):\n if output_format in TABLE_FORMATS:\n if output_format == 'table':\n output_format = 'psql'\n print_object_values_as_table(\n object_values_list, metric_group_definition, resource_classes,\n output_format, transposed)\n elif output_format == 'json':\n print_object_values_as_json(\n object_values_list, metric_group_definition, resource_classes)\n else:\n raise InvalidOutputFormatError(output_format)", "def format(self, obj):\n return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))", "def write(self):\n out = json.dumps({\"items\": self.items})\n sys.stdout.write(out)", "def _print_json_list(self, resource_wrappers):\n is_empty_list = True\n for i, resource_wrapper in enumerate(resource_wrappers):\n is_empty_list = False\n if i == 0:\n # Start of JSON list for long long listing.\n print('[')\n print(resource_wrapper, end='')\n else:\n # Print resource without newline at end to allow list formatting for\n # unknown number of items in generator.\n print(',\\n{}'.format(resource_wrapper), end='')\n\n # New line because we were removing it from previous prints to give us\n # the ability to do a trailing comma for JSON list printing.\n print()\n if not is_empty_list:\n # Close long long listing JSON list. Prints nothing if no items.\n print(']')", "def write(obj, hdr=False):\n\n if hdr:\n print(json.dumps(obj))\n sys.stdout.flush()\n return\n\n if obj is None:\n print(\"[\")\n print(\"[]\")\n sys.stdout.flush()\n return\n\n print(\",\" + json.dumps(obj))\n sys.stdout.flush()", "def pprint(json_data):\n\n print(json.dumps(json_data, indent=4, separators=(' , ', ' : ')))", "def pprint(self):\n import json\n return json.dumps(OrderedDict(self.items()), indent=4)", "def formatall(obj):\n result = \"\"\n if isinstance(obj, list):\n# i = 0\n for obj in obj:\n #printf(\">>> [%d] >>> \", i)\n result += format(obj)\n result += \"\\n\"\n# i += 1\n return result\n if isinstance(obj, dict):\n for key, value in obj.items():\n result += \"%-15s : \" % key\n result += format(value)\n result += \"\\n\"\n return result\n return format(obj)", "def pprint(self, *args, **kwargs):\n kw = dict(self.pprint_args)\n kw.update(kwargs)\n return self.to_json(*args, **kw)", "def print_list(self):\r\n pass", "def jq(o: object, return_only: bool = False, indent: int = 1) -> str:\n\n formatted = json.dumps(o, indent=indent, default=str)\n\n if return_only:\n return formatted\n\n print(formatted)\n\n return None", "def printAsFormattedJSON(jsonObject):\n print(json.dumps(jsonObject, indent=2)[0:1000])", "def jsonprint(obj):\n try:\n print(json.dumps(obj, sort_keys=True, indent=4))\n except TypeError:\n obj_copy = obj.copy()\n del obj_copy['_id']\n print(json.dumps(obj_copy, sort_keys=True, indent=4))", "def custom_print(*objects):\n print(*objects, sep=OFS, end=ORS)", "def print_json(res, ctx):\n\n return json.dumps(res)", "def view_values(self, attr_list): # DONE\n values = {}\n for attr in attr_list:\n values[attr] = list(self.data[attr].values)\n return json.dumps(values)", "def print(*objects, sep=None, end=None):\n if sep is None:\n sep = ' '\n if end is None:\n end = '\\n'\n array = map(str, objects)\n __PyOutputHelper__.print(sep.join(array)+end)", "def pprint_json(data):\n from IPython.display import JSON, display # pylint: disable=C0415\n\n display(JSON(loads(DisplayEcoder().encode(data))))", "def print_json_tweet_pair(tweet_pair):\n print(json.dumps(tweet_pair, indent=4))", "def print_json(results, number, concurrency):\n import json\n stats = calc_stats(results, number, concurrency)\n print(json.dumps(stats))", "def json_out(self):\n temp_json = json.dumps(self.ecat_info, indent=4)\n print(temp_json)", "def output_jsonl(filename: str, data: List):\n with open(filename, \"w\") as outfile:\n for x in data:\n print(json.dumps(x))\n json.dump(x, outfile)\n outfile.write(\"\\n\")", "def print_json_stdout(results):\n for json in results:\n print(\"\\n########## Result for IP {} ##########\".format(json['ip']))\n pprint.pprint(json)\n print('######################################')\n print()", "def gen_json(self, show_headers=True, show_tags=True, use_objects=False):\n is_first = True\n yield \"[\\n\"\n if use_objects:\n for row in self:\n if is_first:\n is_first = False\n yield json.dumps(row.dictionary, sort_keys=True, indent=2)\n else:\n yield \",\\n\" + json.dumps(row.dictionary, sort_keys=True, indent=2)\n else:\n for raw in self.gen_raw(show_headers, show_tags):\n if is_first:\n is_first = False\n yield json.dumps(raw)\n else:\n yield \",\\n\" + json.dumps(raw)\n yield \"\\n]\\n\"", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n return(\"[]\")\n json_string = json.dumps(list_dictionaries)\n return(json_string)", "def uprint(self, *objects, sep=' ', end='\\n'):\n if self._utf8_safe is None:\n from sys import stdout\n self._utf8_safe = (stdout.encoding.lower() == 'utf-8')\n if self._utf8_safe:\n encoded = objects\n else:\n encoded = [str(obj).encode('utf8') for obj in objects]\n print(*encoded, sep=sep, end=end)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n list_dictionaries = []\n return json.dumps(list_dictionaries)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n list_dictionaries = []\n return json.dumps(list_dictionaries)", "def list_to_json(items):\n return json.dumps(to_dict_list(items))", "def json_encode_pretty(data):\n # type: (Union[Dict,List]) -> str\n return json.dumps(data, indent=2, separators=(\",\", \": \"))", "def json_dumps(self, obj: object) -> str:\n return json.dumps(obj, sort_keys=self.beautify, indent=4)", "def printjson(toprint, exception_conversions={}):\n\n def serialize(x):\n conversion = exception_conversions.get(type(x))\n if conversion is None:\n try:\n return json.dumps(x)\n except Exception:\n return str(x)\n else:\n return conversion(x)\n\n print(json.dumps(toprint, indent=4, default=serialize))", "def print_list(things_to_print, prefix=\"\\t\", stream=sys.stdout):\n for item in things_to_print:\n print(f\"{prefix}{item}\", file=stream)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n return \"[]\"\n return json.dumps(list_dictionaries)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n return \"[]\"\n return json.dumps(list_dictionaries)", "def pretty_print(js):\n try:\n return json.dumps(js, indent=4, sort_keys=True, separators=(\",\", \":\"))\n except Exception as e:\n return \"%s\" % js", "def pprint(self):\r\n for i in self.items():\r\n print '%s => %r'%i", "def pretty_print(dictionary: dict):\n return json.dumps(dictionary, indent=4)", "def my_pprint(obj, intend = 0):\n if isinstance(obj, dict):\n for key, value in obj.items():\n print(intend*\" \"+str(key)+\" : \")\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, list):\n for value in obj:\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, bytes):\n print(\"<binary data>\")\n \n else:\n try:\n print(intend*\" \"+str(obj))\n except UnicodeDecodeError:\n print(intend*\" \"\"<?>\")", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n return \"[]\"\n else:\n string = json.dumps(list_dictionaries)\n return string", "def print_formatted_json(data, fd=sys.stdout):\n _pp = pprint.PrettyPrinter(width=80, compact=False, stream=fd)\n _pp.pprint(data)", "def print_raw_json(raw):\n # type: (dict) -> None\n print(json.dumps(raw, ensure_ascii=False, indent=2, sort_keys=True))", "def to_json_string(list_dictionaries):\n if list_dictionaries and list_dictionaries is not None:\n return json.dumps(list_dictionaries)\n else:\n return \"[]\"", "def devicelist_to_json(self):\n devices_json = json.dumps(self.device_list)\n print(devices_json)", "def print_json(result_json):\n\n if (result_json):\n for element in result_json:\n json_str = json.dumps(element, indent=4)\n click.echo(json_str)\n else:\n click.echo(\"No starships found with given criteria. Try again!\")", "def to_json_string(list_dictionaries):\n if list_dictionaries is None or len(list_dictionaries) < 1:\n return \"[]\"\n return json.dumps(list_dictionaries)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None or list_dictionaries == []:\n return \"[]\"\n return json.dumps(list_dictionaries)", "def json_pretty_print(dictionary):\n return json.dumps(dictionary, sort_keys=True,\n indent=2, separators=(',', ': '))", "def to_json_string(list_dictionaries):\n if not list_dictionaries or list_dictionaries is None:\n return \"[]\"\n return json.dumps(list_dictionaries)", "def print_all(cls):\n [print('{0} = \"{1}\"'.format(k, v)) for (k, v) in cls.all()]", "def pretty_print(data, indent=4):\n if type(data) == dict:\n print(json.dumps(data, indent=indent, sort_keys=True))\n else:\n print(data)", "def pretty_print(output: list):\n for movie in output:\n for item in movie.items():\n print(item[0]+\":\", item[1])\n print()", "def to_json_string(list_dictionaries):\n if list_dictionaries is None or len(list_dictionaries) is 0:\n return \"[]\"\n return json.dumps(list_dictionaries)", "def pretty_print(js):\n try:\n return json.dumps(js, indent=4, separators=(\",\", \":\"))\n except Exception as e:\n return \"%s\" % js", "def dumps(obj):\n return json.dumps(obj, indent=4, sort_keys=True, cls=CustomEncoder)", "def to_json_string(list_dictionaries):\n\n if list_dictionaries is None or len(list_dictionaries) <= 0:\n return '[]'\n\n return json.dumps(list_dictionaries)", "def pretty (value, stream=sys.stdout, starting_indent=0, indent_additive=4) :\r\n indentOut_(stream, starting_indent)\r\n pretty_print = 1\r\n specialStream_(value, stream, starting_indent-indent_additive, pretty_print, indent_additive)\r\n if type(value) in [list, dict, OrderedDict] :\r\n stream.write('\\n')", "def json_out(db, options):\n stats = {\"stats\": basic_stats(db)}\n stats['logins_per_rp'] = db['rp']\n if options.quiet:\n print(dumps(stats, separators=(',', ':')))\n else:\n print(dumps(stats, indent=2, separators=(',', ': ')))", "def to_json_string(list_dictionaries):\n if list_dictionaries is None or len(list_dictionaries) == 0:\n return (\"[]\")\n else:\n return json.dumps(list_dictionaries)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None or list_dictionaries == []:\n return \"[]\"\n else:\n return json.dumps(list_dictionaries)", "def pretty(data, sort=True, indent=4):\n return json.dumps(data, sort_keys=sort, indent=indent,\n separators=(',', ': '))", "def json_compact(obj) -> str:\n return json.dumps(obj, separators=(\",\", \":\"))", "def dump_json(request, obj):\n return obj", "def to_json_string(list_dictionaries):\n if list_dictionaries is None or not list_dictionaries:\n return \"[]\"\n else:\n return json.dumps(list_dictionaries)", "def __repr__(self):\n return json.dumps(self, sort_keys=True, indent=2)", "def print_array(self):\n for item in self.items:\n print(item)", "def show(self):\n if self.is_empty():\n print('[]')\n return\n line = '['\n for item in self._data:\n line += '(' + str(item._key) + ', ' + str(item._value) + '), '\n line = line[:-2] + ']'\n print(line)", "def render_json(self, obj):\n self.response.content_type = \"application/json\"\n self.response.out.write(json.encode(obj))", "def __str__(self):\r\n self.vals.sort()\r\n return '{' + ','.join([str(e) for e in self.vals]) + '}'", "def __str__(self):\n self.vals.sort()\n return '{' + ','.join([str(e) for e in self.vals]) + '}'", "def print_Products_List(list_of_product_objects):\r\n print(\"\"\"\r\n ********************************************\r\n Your current product data is:\r\n --------------------------------------------\r\n \"\"\")\r\n try:\r\n for row in list_of_product_objects:\r\n print(\"\\t\\t\" + row[0] + \", $\" + row[1])\r\n except IOError as e:\r\n raise Exception(\"Problem with print statement\" + str(e))\r\n print(\"\"\"\r\n ********************************************\r\n \"\"\")", "def encode_objects(objects, encode):\n\tints = [encode(obj) for obj in objects]\n\treturn json.dumps(sorted(ints))", "def jsonify_list(data):\n assert data is not None\n indent = None\n if (current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] and\n not request.is_xhr):\n indent = 2\n data = dumps(data, indent=indent, sort_keys=False)\n\n mimetype = 'application/json; charset=utf-8'\n response = current_app.response_class(data, mimetype=mimetype)\n\n return response", "def pprint(obj):\n for argname in sorted([x for x in dir(obj) if not x.startswith('__')]):\n # Skip callables\n if hasattr(getattr(obj, argname), '__call__'):\n continue\n print(\"{} : {}\".format(argname, getattr(obj, argname)))", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent = 2, sort_keys = True) + \"\\n\"", "def to_json_string(list_dictionaries):\n if not list_dictionaries:\n return \"[]\"\n if (type(list_dictionaries) != list or\n not all(type(x) == dict for x in list_dictionaries)):\n raise TypeError(\"list_dictionaries must be a list of dictionaries\")\n return json.dumps(list_dictionaries)", "def to_json_string(list_dictionaries):\n\n if list_dictionaries is None or len(list_dictionaries) == 0:\n return \"[]\"\n else:\n return json.dumps(list_dictionaries)", "def print_formatted_values(**kwargs):\n string = ', '.join([f'{k}: {format_number(kwargs[k])}' for k in kwargs])\n print(string)", "def to_json_string(list_dictionaries):\n\n if list_dictionaries is None or list_dictionaries == []:\n return (\"[]\")\n\n else:\n return (json.dumps(list_dictionaries))", "def print_contents(self):\n print self.values", "def print_dict(data):\n print data", "def print_objects_created(objects_created):\n for object_dict in objects_created:\n key = object_dict.keys()[0]\n print '%s:' % key\n if type(object_dict[key]) is not list:\n object_dict[key] = [object_dict[key]]\n for obj in object_dict[key]:\n print '\\t %s' % obj['id']\n print", "def _print_all_objects(self):\n if self._json_to_dict():\n print(json.dumps(self.db_data, indent=3))\n return True\n else:\n print(\"Unable to Print all JSON Objects\")\n return False", "def pretty_json_repr(data):\n return json.dumps(data, sort_keys=True, indent=2)" ]
[ "0.75082326", "0.7412574", "0.7307961", "0.7016454", "0.6846915", "0.6846915", "0.6722339", "0.6652273", "0.6616749", "0.660433", "0.65436924", "0.65436924", "0.65428746", "0.65419024", "0.64811295", "0.64317226", "0.64314044", "0.64107096", "0.6371735", "0.6338294", "0.6291181", "0.6277293", "0.62590086", "0.62533826", "0.6244215", "0.62440187", "0.6241442", "0.6223934", "0.6217691", "0.62042415", "0.61657447", "0.6128815", "0.611849", "0.60970867", "0.6092767", "0.6088239", "0.6086384", "0.6048244", "0.60404146", "0.6024763", "0.6018472", "0.5997564", "0.59793085", "0.59793085", "0.59623283", "0.59482294", "0.5947762", "0.59137774", "0.5905483", "0.5875957", "0.5875957", "0.5850608", "0.58447766", "0.58412194", "0.5837899", "0.5836954", "0.58199894", "0.5817765", "0.58163863", "0.58152246", "0.58137816", "0.58088344", "0.5784073", "0.57827157", "0.578071", "0.5778079", "0.5775785", "0.57717973", "0.57689786", "0.57671046", "0.57648546", "0.5760642", "0.5756461", "0.5752702", "0.57318074", "0.5729372", "0.57285476", "0.5706016", "0.5705191", "0.56895953", "0.5685387", "0.56852734", "0.5685262", "0.5684698", "0.5679822", "0.56797206", "0.5678331", "0.5676752", "0.5667458", "0.56633794", "0.56615424", "0.566093", "0.56581986", "0.56530386", "0.5653001", "0.56469744", "0.564654", "0.56419057", "0.5630165", "0.56226456" ]
0.6860965
4
Retrieve and print metric groups.
def print_metric_groups(cmd_ctx, client, metric_groups, resource_filter): if not isinstance(metric_groups, (list, tuple)): metric_groups = [metric_groups] properties = { 'anticipated-frequency-seconds': MIN_ANTICIPATED_FREQUENCY, 'metric-groups': metric_groups, } mc = client.metrics_contexts.create(properties) mg_values = wait_for_metrics(mc, metric_groups) filtered_object_values = list() # of MetricObjectValues if not mg_values: mg_name = metric_groups[0] # just pick any res_class = zhmcclient._metrics._resource_class_from_group(mg_name) mg_def = zhmcclient.MetricGroupDefinition( name=mg_name, resource_class=res_class, metric_definitions=[]) else: mg_def = mc.metric_group_definitions[mg_values.name] filter_cpc = None filter_partition = None filter_lpar = None filter_adapter = None filter_nic = None for r_class, r_name in resource_filter: if r_class == 'cpc' and r_name: filter_cpc = client.cpcs.find(name=r_name) elif r_class == 'partition' and r_name: assert filter_cpc filter_partition = filter_cpc.partitions.find(name=r_name) elif r_class == 'logical-partition' and r_name: assert filter_cpc filter_lpar = filter_cpc.lpars.find(name=r_name) elif r_class == 'adapter' and r_name: assert filter_cpc filter_adapter = filter_cpc.adapters.find(name=r_name) elif r_class == 'nic' and r_name: assert filter_partition filter_nic = filter_partition.nics.find(name=r_name) resource_class = mg_def.resource_class for ov in mg_values.object_values: included = False if resource_class == 'cpc': if not filter_cpc: included = True elif ov.resource_uri == filter_cpc.uri: included = True elif resource_class == 'partition': if not filter_cpc: included = True elif ov.resource.manager.cpc.uri == filter_cpc.uri: if not filter_partition: included = True elif ov.resource_uri == filter_partition.uri: included = True elif resource_class == 'logical-partition': if not filter_cpc: included = True elif ov.resource.manager.cpc.uri == filter_cpc.uri: if not filter_lpar: included = True elif ov.resource_uri == filter_lpar.uri: included = True elif resource_class == 'adapter': if not filter_cpc: included = True elif ov.resource.manager.cpc.uri == filter_cpc.uri: if not filter_adapter: included = True elif ov.resource_uri == filter_adapter.uri: included = True elif resource_class == 'nic': if not filter_cpc: included = True elif ov.resource.manager.partition.manager.cpc.uri == \ filter_cpc.uri: if not filter_partition: included = True elif ov.resource.manager.partition.uri == \ filter_partition.uri: if not filter_nic: included = True elif ov.resource_uri == filter_nic.uri: included = True else: raise ValueError( "Invalid resource class: {}".format(resource_class)) if included: filtered_object_values.append(ov) resource_classes = [f[0] for f in resource_filter] cmd_ctx.spinner.stop() print_object_values(filtered_object_values, mg_def, resource_classes, cmd_ctx.output_format, cmd_ctx.transpose) mc.delete()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_groups(self):\n\n for counter, label in enumerate(self.exp_labels_list):\n print('Key {}: {} \\n'.format(str(counter), label))", "def metrics_group():", "def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))", "def list_groups(args):\n\n for group in get_groups(args):\n print(group)", "def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)", "def groups(self):\n groups_text = '\\n'\n for group in self.exercise_numbers:\n txt = ' %s:\\t' % group[0]\n for exercise in group[1:]:\n if isinstance(exercise, int):\n txt += '%d. ' % exercise\n else:\n txt += '\\n\\t%s\\n\\t' % exercise\n groups_text += txt + '\\n'\n return groups_text", "def get_metrics(metric_groups):\n return sorted(m for g in metric_groups for m in INSTANCE_METRIC_GROUP_MAP[g])", "def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)", "def get_group_names(self):\r\n return self.groups.keys()", "def get_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n group = client.get_group(group_id)\n\n group_readable, group_outputs = parse_outputs(group)\n human_readable = tableToMarkdown(name=\"Groups:\", t=group_readable,\n headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',\n 'Security Enabled', 'Visibility'],\n removeNull=True)\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(obj.ID === {group_id})': group_outputs}\n return human_readable, entry_context, group", "def test_get_groups(self):\n pass", "def test_get_groups(self):\n pass", "def gather_groups_memory(output_mem):\n groups = get_memory_cgroups()\n p_table = prettytable.PrettyTable(\n ['Group',\n 'Resident Set Size (MiB)'\n ], caching=False)\n p_table.align = 'l'\n p_table.align['Resident Set Size (MiB)'] = 'r'\n\n # Get overall memory summary per group\n total_rss = 0.0\n for group in groups:\n for line in output_mem.split(\"\\n\"):\n if group + \"/memory.stat\" in line:\n total_rss += float(line.split()[1])\n rss_mem = mem_to_mebibytes(line.split()[1])\n MEMORY['cgroups'][group] = rss_mem\n p_table.add_row(\n [group,\n rss_mem or '-',\n ])\n break\n\n # Add overall rss memory\n MEMORY['cgroups']['total_rss'] = mem_to_mebibytes(total_rss)\n p_table.add_row(\n [\"Total cgroup-rss\",\n MEMORY['cgroups']['total_rss'] or '-',\n ])\n return p_table", "def groups(self):\n return self.get_data(\"groups\")", "def printUsersInGroup(group) -> None:\n click.echo(tabulate(listUsersInDict(group), headers=\"keys\", tablefmt=\"grid\"))", "def getGroup():\n\tprint\n\tprint \"Requesting the list of groups for this account\"\n\n\tgroups_result = getResult('/papi/v0/groups')\n\n\treturn (groups_result)", "def cli(ctx, group_id):\n return ctx.gi.groups.show_group(group_id)", "def print_list(self):\r\n print(\"Displaying each metric:\")\r\n print(\"======\")\r\n for metric in self.metrics:\r\n metric.whoami()\r\n print(\"======\")\r\n print(self.metrics)\r\n print(\"END\")\r\n print()", "def get_psample_list_groups(dut):\n return st.show(dut, \"sudo psample --list-groups\", skip_tmpl=True)", "def __repr__(self):\n return str(self.group)", "def info_materials_groups_get():\n session = info_map.Session()\n\n mat = aliased(info_map.Material)\n grp = aliased(info_map.Group)\n\n q = session.query(mat.group_id,grp.name).join(grp).distinct()\n groups = [Group(group=row.group_id,name=row.name) for row in q.all()]\n return groups, 200", "def getGroups():\r\n return Group.getGroups()", "def output_groups(self) -> List[str]:\n return self._output_groups", "def get_pingroups(self):\n return self.groups[:]", "def __show_all_metrics(self):\n for obj in self.metrics_list:\n self.__print_metrics_info(obj.get_name())\n print()", "def get_cloudwatch_log_groups(global_vars):\n resp_data = {'status': False, 'log_groups':[], 'error_message': ''}\n client = boto3.client('logs')\n try:\n # Lets get all the logs\n resp = client.describe_log_groups( limit = 50 )\n resp_data['log_groups'].extend( resp.get('logGroups') )\n # Check if the results are paginated\n if resp.get('nextToken'):\n while True:\n resp = client.describe_log_groups( nextToken = resp.get('nextToken'), limit = 50 )\n resp_data['log_groups'].extend( resp.get('logGroups') )\n # Check & Break, if the results are no longer paginated\n if not resp.get('nextToken'):\n break\n resp_data['status'] = True\n except Exception as e:\n resp_data['error_message'] = str(e)\n return resp_data", "def test_grouped(self):\n gfile = grades.writers.GradesFile(self.fname)\n gfile.table.compute_grouped_mean('Group')\n gfile.table_format = 'org'\n self.check_output(self.output_str2, gfile)", "def test_020_query_groups(self):\n\n testflow.step(\"Querying for groups\")\n assert self.query_cli.run(\n what='group'\n )[0], \"Failed to search for groups\"", "def stats(self) -> Sequence['outputs.GetSystemGroupsGroupStatResult']:\n return pulumi.get(self, \"stats\")", "def get(self, *args):\n return _libsbml.ListOfGroups_get(self, *args)", "def test_get_device_groups(self):\n pass", "def get_mobility_group(session, to_cvs=False):\n send_cmd = \"show mobility summary\"\n output_raw = session.get_command_output(send_cmd)\n\n # TextFSM template for parsing \"show ap summary\" output\n template_file = session.script.get_template(\"cisco_aireos_show_mobility_summary.template\")\n output = utilities.textfsm_parse_to_list(output_raw, template_file, add_header=True)\n\n if to_cvs:\n output_filename = session.create_output_filename(\"mobility-group\", ext=\".csv\")\n utilities.list_of_lists_to_csv(output, output_filename)\n\n return output", "def group_describe(self, group):\n mapped = self.map_vects(datanorm)\n mappednp= np.array(mapped)\n \n groups= mappednp[:,0]\n data['Group'] = pd.Series(groups, index=data.index)\n print(data[data['Group']==group].describe())", "def getGroup(self, *args):\n return _libsbml.GroupsModelPlugin_getGroup(self, *args)", "def print_item(group):\n print(\"\\tName: {}\".format(group.name))\n print(\"\\tId: {}\".format(group.id))\n if hasattr(group, 'location'):\n print(\"\\tLocation: {}\".format(group.location))\n print_properties(getattr(group, 'properties', None))", "def get_list_groups(self):\n list_response = requests.get(self.groups_url, headers=self.headers)\n return list_response.json()[\"groups\"]", "def output_metrics(self):\n print('')\n for key in sorted(self.metrics):\n print('{}:'.format(key), end='')\n for k, v in self.metrics[key].items():\n if type(v[-1]) is list:\n print('\\t' + k + ': ' + ''.join('{:5.3f} '.format(vs) for vs in v[-1]), end='')\n else:\n print('\\t{}: {:5.3f}'.format(k, v[-1]), end='')\n print('\\n', end='')", "def list_groups(self, **params):\n url = 'groups'\n if params:\n url += '?%s' % urllib.urlencode(params)\n resp, body = self.get(url)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def ListGroupStats(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def print_item(group):\n print(\"\\tName: {}\".format(group.name))\n print(\"\\tId: {}\".format(group.id))\n print(\"\\tLocation: {}\".format(group.location))\n print(\"\\tTags: {}\".format(group.tags))\n if hasattr(group, 'properties'):\n print_properties(group.properties)", "def test_api_v1_groups_get(self):\n pass", "def get_groups(self):\n response = self._get(\"groups\")\n\n return response.json()", "def get_groups_details(self, groups):\n assert isinstance(groups, list)\n # It may be require we request the API by splitting the names list\n # If the list is too long to be handled by the Gerrit server (URI)\n query_args = \"?%s\" % \"&\".join([\"q=%s\" % g for g in groups])\n query_args += \"&o=MEMBERS\" if groups else \"o=MEMBERS\"\n\n try:\n ret = self.g.get('groups/%s' % query_args)\n except HTTPError as e:\n return self._manage_errors(e)\n\n return ret", "def get_device_groups(self):\n\t\tresult = {\n\t\t\t'success': False,\n\t\t\t'response': '',\n\t\t\t'error': '',\n\t\t}\n\n\t\tself.ise.headers.update({'Accept': 'application/vnd.com.cisco.ise.network.networkdevicegroup.1.0+xml'})\n\n\t\tresp = self.ise.get('{0}/config/networkdevicegroup'.format(self.url_base))\n\n\t\tif resp.status_code == 200:\n\t\t\tresult['success'] = True\n\t\t\tresult['response'] = [(i['@name'], i['@id'])\n\t\t\t\t\t\t\t\t for i in ERS._to_json(resp.text)['ns3:searchResult']['ns3:resources']['ns5:resource']]\n\t\t\treturn result\n\t\telse:\n\t\t\tresult['response'] = ERS._to_json(resp.text)['ns3:ersResponse']['messages']['message']['title']\n\t\t\tresult['error'] = resp.status_code\n\t\t\treturn result", "def list_groups(self):\n return self.get_admin(\"groups\")", "def groups():\n access_token = session['access_token']\n return \"%s\" % list_groups(access_token)", "def test_get_groups(self):\n response = self.client.get_groups()\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"GET\")\n self.assertEqual(uri, \"/admin/v1/groups\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def test_api_v1_groups_names_get(self):\n pass", "def print_group_summary(self, groups, group_names=None, detailed=False, tablefmt='jira'):\n output = self.generate_group_summary_table(groups, group_names)\n\n \"\"\"Group patches table\"\"\"\n group_patches = output['group_patches']\n headers = [\"Group Patches\",] + group_patches.columns.tolist()\n print()\n print(tabulate(group_patches, headers=headers, tablefmt=tablefmt))\n print()\n\n \"\"\"Group slides table\"\"\"\n group_slides = output['group_slides']\n headers = [\"Group Slides\",] + group_slides.columns.tolist()\n print()\n print(tabulate(group_slides, headers=headers, tablefmt=tablefmt))\n print()\n\n \"\"\"Group patients table\"\"\"\n group_patients = output['group_patients']\n headers = [\"Group Patients\",] + group_patients.columns.tolist()\n print()\n print(tabulate(group_patients, headers=headers, tablefmt=tablefmt))\n print()\n\n if detailed:\n print(\"Patient Patches\")\n for group_name, tally in output['patient_patches'].items():\n headers = [group_name,] + tally.columns.tolist()\n print()\n print(tabulate(tally, headers=headers, tablefmt=tablefmt))\n print()\n \n print(\"Patient Slides\")\n for group_name, tally in output['patient_slides'].items():\n headers = [group_name,] + tally.columns.tolist()\n print()\n print(tabulate(tally, headers=headers, tablefmt=tablefmt))\n print()\n \n print(\"Slide Patches\")\n for group_name, tally in output['slide_patches'].items():\n headers = [group_name,] + tally.columns.tolist()\n print()\n print(tabulate(tally, headers=headers, tablefmt=tablefmt))\n print()\n return output", "def get_all_groups(self):\n self.cursor.execute(\"select * from groups\")\n self.connection.commit()\n return self.cursor.fetchall()", "def test_get_device_groups1(self):\n pass", "def get_groups(self):\n return Client._get(self)", "def print_item(group):\n print(\"\\tName: {}\".format(group.name))\n print(\"\\tId: {}\".format(group.id))\n print(\"\\tLocation: {}\".format(group.location))\n print(\"\\tTags: {}\".format(group.tags))\n if hasattr(group, 'status'):\n print(\"\\tStatus: {}\".format(group.status))\n if hasattr(group, 'state'): # Site\n print(\"\\tStatus: {}\".format(group.state))\n if hasattr(group, 'properties'):\n print_properties(group.properties)\n print(\"\\n\\n\")", "def get(self):\n status = ErrorCode.SUCCESS\n try:\n res = []\n cid = self.get_argument('cid', None)\n if not (cid is None):\n res = QueryHelper.get_groups_by_cid(cid, self.db)\n self.write_ret(status,\n dict_=DotDict(res=res))\n except Exception as e:\n logging.exception(\"[UWEB] Get groups failed. Exception: %s\",\n e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)", "def show_all_groups(self, account_name=None, account_id=None, path=None,\n group_name=None, group_id=None, search=False, print_table=True):\n pt = PrettyTable(['ACCOUNT:', 'GROUPNAME:', 'GROUP_ID:'])\n pt.hrules = 1\n pt.align = 'l'\n list = self.get_all_groups(account_name=account_name, account_id=account_id,\n path=path, group_name=group_name, group_id=group_id,\n search=search)\n for group in list:\n pt.add_row([group['account_name'], group['group_name'], group['group_id']])\n if print_table:\n self.log.info(\"\\n\" + str(pt) + \"\\n\")\n else:\n return pt", "def test_groups_get(self):\n pass", "def test_groups_get(self):\n pass", "def getGroups(self):\n return [g[0] for g in grp.getgrall()]", "def groups(self):\n return []", "def groups(self):\n return self._groups", "def groups(self):\n return self._groups", "def groups(self):\n return self._groups", "def get_all_groups():\n return jsonify(admin.get_all_groups(current_app.scoped_session()))", "def getGroupInfo(groupId):\n url = f\"https://groups.roblox.com/v1/groups/{groupId}\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j", "def display(self):\n print \"\\n\\n***********************\\n\"\n print \"Info about group %s, name=%s, path=%s\" % (self.sdef['id'], \n self.name, self.path)\n print \"sdef=\"\n pp.pprint(self.sdef)\n print \"expanded_def=\"\n pp.pprint (self.expanded_def)\n print \"includes=\"\n pp.pprint (self.includes)\n print \"parent_attributes=\"\n pp.pprint (self.parent_attributes)\n print \"attributes=\"\n pp.pprint (self.attributes)\n print \"mstats=\"\n pp.pprint (self.mstats)", "def iter_groups(self):\n\t\treturn iter(self._groups)", "def test_get_group(self):\n pass", "def print_metrics(self):\n output = \"\"\n metrics = self.get_all_metrics()\n for k, v in metrics.items():\n # Print the help line\n output += \"\\n# HELP {name} {help}\\n\".format(name=v['name'],\n help=v['help'])\n # and the type line\n output += \"# TYPE {name} {type}\\n\".format(name=v['name'],\n type=v['type'])\n for sample in v['values']:\n labels = json.loads(sample, object_pairs_hook=OrderedDict)\n if v['type'] == 'histogram' and labels.get('le') == '_sum':\n labels.pop('le', None)\n mname = '{name}_sum'.format(name=v['name'])\n elif v['type'] == 'histogram' and labels.get('le') == '+Inf':\n labels.pop('le', None)\n mname = '{name}_count'.format(name=v['name'])\n elif v['type'] == 'histogram':\n mname = '{name}_bucket'.format(name=v['name'])\n else:\n mname = v['name']\n output += \"{name}{labels} {value}\\n\".format(name=mname,\n labels=self.format_labels(labels),\n value=self.format_value(v['values'][sample]))\n return output", "def get_groups(self):\n\n if not self.check_prereqs():\n raise StopIteration\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_get_groups_query+\" ORDER BY $groupname_field$\",{'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: get_groups: %s\" % (query,))\n\n cursor.execute(query)\n desc=[i[0] for i in cursor.description]\n for row in cursor:\n dictrow=dict(zip(desc,row))\n yield dictrow[self.sql_groupname_field]", "def get_all_groups(self):\n return self.groups + ['all']", "def get_group(tkn: Token = Depends(from_authotization_header_nondyn),):\n assert_has_clearance(tkn.owner, \"sni.read_group\")\n return [\n GetGroupShortOut(group_id=str(grp.pk), group_name=grp.group_name)\n for grp in Group.objects().order_by(\"group_name\")\n ]", "def test_get_tag_group_by(self):\n url = \"?\"\n query_params = self.mocked_query_params(url, OCPTagView)\n handler = OCPTagQueryHandler(query_params)\n tag_keys = handler.get_tag_keys(filters=False)\n\n group_by_key = tag_keys[0]\n group_by_value = \"group_by\"\n url = f\"?group_by[tag:{group_by_key}]={group_by_value}\"\n query_params = self.mocked_query_params(url, OCPCpuView)\n handler = OCPReportQueryHandler(query_params)\n group_by = handler._get_tag_group_by()\n group = group_by[0]\n expected = \"pod_labels__\" + group_by_key\n self.assertEqual(len(group_by), 1)\n self.assertEqual(group[0], expected)", "def get_group(self):\n\t\treturn self.variables.get('group')", "def display_count_grouped_by_genre():\n dict_of_genre = reports.count_grouped_by_genre(filename)\n print(\"Game grouped by genre:\")\n for genre, value in dict_of_genre.items():\n print(\"{}: {}\".format(genre, value))\n print()", "def list_metrics(self):\n results = []\n if self.r.exists(self.metrics_key):\n keys = self.r.smembers(self.metrics_key)\n for k in keys:\n # metric_key, metric_type, metric_name, metric_help = keys.split(\" \", 3)\n results.append(k.split(\" \", 3))\n return results", "def _compute_group_stats():\n group_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # TODO: move this to property of evaluation group or add dedicated data model.\n # GOAL: should be configurable from within the Django admin backend.\n #\n # MINIMAL: move to local_settings.py?\n #\n # The following dictionary defines the number of HITs each group should\n # have completed during the WMT16 evaluation campaign.\n \n for group in groups:\n _name = group.name\n \n _group_stats = HIT.compute_status_for_group(group)\n _total = _group_stats[0]\n \n if _total > 0 and not _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = 0\n elif _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = GROUP_HIT_REQUIREMENTS[_name]\n _delta = _total - _required\n _data = (_total, _required, _delta)\n \n if _data[0] > 0:\n group_stats.append((_name, _data))\n \n # Sort by number of remaining HITs.\n group_stats.sort(key=lambda x: x[1][2])\n \n # Add totals at the bottom.\n global_total = sum([x[1][0] for x in group_stats])\n global_required = sum([x[1][1] for x in group_stats])\n global_delta = global_total - global_required\n global_data = (global_total, global_required, global_delta)\n group_stats.append((\"Totals\", global_data))\n \n return group_stats", "def wait_for_metrics(metric_context, metric_groups):\n retries = 0\n got_data = False\n while not got_data:\n mr_str = metric_context.get_metrics()\n mr = zhmcclient.MetricsResponse(metric_context, mr_str)\n for mg_values in mr.metric_group_values:\n if mg_values.name in metric_groups:\n got_data = True\n if DEBUG_METRICS_RESPONSE:\n print(\"Debug: MetricsResponse:\")\n print(mr_str)\n break\n if not got_data:\n if retries > GET_METRICS_MAX_RETRIES:\n return None\n time.sleep(GET_METRICS_RETRY_TIME) # avoid hot spin loop\n retries += 1\n return mg_values", "def list_groups_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n order_by = args.get('order_by')\n next_link = args.get('next_link')\n top = args.get('top')\n filter_ = args.get('filter')\n groups = client.list_groups(order_by, next_link, top, filter_)\n\n groups_readable, groups_outputs = parse_outputs(groups['value'])\n\n next_link_response = ''\n if '@odata.nextLink' in groups:\n next_link_response = groups['@odata.nextLink']\n\n if next_link_response:\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}NextLink': {'GroupsNextLink': next_link_response},\n f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': groups_outputs}\n title = 'Groups (Note that there are more results. Please use the next_link argument to see them. The value ' \\\n 'can be found in the context under MSGraphGroupsNextLink.GroupsNextLink): '\n else:\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': groups_outputs}\n title = 'Groups:'\n\n human_readable = tableToMarkdown(name=title, t=groups_readable,\n headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail'],\n removeNull=True)\n\n return human_readable, entry_context, groups", "def group_nodes(self, group, namespace=None):\n source = self._source(namespace)\n return self._list(source, 'map', group)", "def getListOfGroups(self, *args):\n return _libsbml.GroupsModelPlugin_getListOfGroups(self, *args)", "def group_info(args):\n\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"Config file not valid, please use the verify function to debug\")\n return 1\n\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n\n for group in config_json[\"groups\"]:\n if group[\"name\"] == args.group:\n print(json.dumps(group, indent=4))\n return 0\n\n print(\"No group matching {} found\".format(args.group))\n return 1", "def metrics(_):\r\n collector = BuildsCollector()\r\n build_metrics, headers = collector.get_metrics_table()\r\n print(tabulate(build_metrics, headers=headers))", "def groups(self):\r\n return resources.Groups(self)", "def get_group_info(groupname):\n return jsonify(admin.get_group_info(current_app.scoped_session(), groupname))", "def list_metrics(self):\n pass", "def set_up_groups(self):\n groups = []\n groups.append({'groupname': 'th',\n 'grouptitle': 'TH',\n 'path': '/'})\n groups.append({'groupname': 'neutronics',\n 'grouptitle': 'Neutronics',\n 'path': '/'})\n groups.append({'groupname': 'metadata',\n 'grouptitle': 'Simulation Metadata',\n 'path': '/'})\n return groups", "def test_function(self):\n self.ms_client.http_request(method='GET', url_suffix='groups', params={'$orderby': 'displayName'})\n demisto.results('ok')", "def get_groups(self) -> dict:\n return dict(self._groups)", "def getGrpStats(group):\n return {'min': group.min(), 'max': group.max(),\n 'count': group.count(), 'mean': group.mean(), 'sum':group.sum()}", "def get_identity_groups(self):\n\t\tresult = {\n\t\t\t'success': False,\n\t\t\t'response': '',\n\t\t\t'error': '',\n\t\t}\n\n\t\tself.ise.headers.update({'Accept': 'application/vnd.com.cisco.ise.identity.identitygroup.1.0+xml'})\n\n\t\tresp = self.ise.get('{0}/config/identitygroup'.format(self.url_base))\n\n\t\tif resp.status_code == 200:\n\t\t\tresult['success'] = True\n\t\t\t###\n\t\t\tx = ERS._to_json(resp.text)['ns3:searchResult']['ns3:resources']['ns5:resource']\n\t\t\tprint (\"x\", len(x))\n\t\t\tprint (x[0])\n\t\t\tfor element in x[0]:\n\t\t\t\tprint (element,x[0][element])\n\t\t\t###\n\t\t\tresult['response'] = [(i['@name'], i['@id'], i['@description'],i['link']['@href'])\n\t\t\t\t\t\t\t\t for i in ERS._to_json(resp.text)['ns3:searchResult']['ns3:resources']['ns5:resource']]\n\t\t\treturn result\n\t\telse:\n\t\t\tresult['response'] = ERS._to_json(resp.text)['ns3:ersResponse']['messages']['message']['title']\n\t\t\tresult['error'] = resp.status_code\n\t\t\treturn result", "def list_groups(request):\n groups = models.UserGroup.all().order('name')\n return utility.respond(request, 'admin/list_groups', {'groups': groups})", "def get_groups(args):\n\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"Config file not valid, please use the verify function to debug\")\n return []\n\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n\n groups = []\n for group in config_json[\"groups\"]:\n groups.append(group[\"name\"])\n return groups", "def show_signatories_by_group_name(name):\n print('Searching for groups... ', end='', flush=True)\n\n groups = list_groups(name=name)\n\n if not groups:\n print()\n print(red('No student groups found.'))\n return\n\n plural_case = 'entry' if len(groups) == 1 else 'entries'\n\n print('Found {} {}.'.format(len(groups), plural_case))\n\n print('Searching for signatories...')\n\n for (oid, attrs) in groups.items():\n print()\n show_signatories_for_group(oid)", "def describe_group(args, topic):\n global bootstrap\n out = ()\n\n consumer = KafkaConsumer(\n bootstrap_servers=bootstrap,\n group_id=\"backbeat-replication-group-{0}\".format(args.destination),\n enable_auto_commit=False,\n )\n topics = consumer.topics()\n if not topic in topics:\n return False\n\n for part in consumer.partitions_for_topic(topic):\n tp = TopicPartition(topic, part)\n consumer.assign([tp])\n committed = consumer.committed(tp)\n consumer.seek_to_end(tp)\n last_offset = consumer.position(tp)\n try:\n out += (\n {\n \"topic\": topic,\n \"partition\": part,\n \"committed\": committed,\n \"last_offset\": last_offset,\n \"lag\": (last_offset - committed),\n },\n )\n except TypeError:\n sys.stderr.write(\"bad/missing info on consumer group (doesn't exist?)\\n\")\n sys.exit(1)\n\n consumer.close(autocommit=False)\n return out", "def groups(self):\n\n return ('train', 'dev', 'eval')", "def list(self):\n METHOD = 'GET'\n API_PATH = '/groups/list'\n\n # Make REST call\n resp = self._rest_call[METHOD](API_PATH)\n\n if resp.status_code == 200:\n return resp.json().get('group_names')\n\n elif resp.status_code == 403:\n raise AuthorizationError(\"User is not authorized or token is incorrect.\")\n\n else:\n if resp.json().get(\"error_code\") in ERROR_CODES:\n raise ERROR_CODES[resp.json().get('error_code')](resp.json().get('message'))\n else:\n raise APIError(\"Response code {0}: {1} {2}\".format(resp.status_code,\n resp.json().get('error_code'),\n resp.json().get('message')))", "def describe_target_groups(ctx):\n data = self.get_target_groups_info()\n ctx.info('Target groups details for load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)", "def print_group(self, k, n):\n self.votation_k = k\n self.votation_n = n\n print_group(k, n, self.data.votation_clusters)", "def group(group_name, members):\r\n pymol_out = \"\"\r\n for member in members:\r\n pymol_out += f'\\ncmd.group(\"{group_name}\", members=\"{member}\")'\r\n return pymol_out", "def list_group(self, groupname):\n return self.get_admin(\"groups/{}\".format(groupname))" ]
[ "0.72279793", "0.70860595", "0.674111", "0.66355723", "0.64703333", "0.6281449", "0.62582123", "0.62421024", "0.62346387", "0.6213498", "0.61584836", "0.61584836", "0.6125847", "0.6110818", "0.61033535", "0.60848945", "0.6041761", "0.6026084", "0.599124", "0.5982218", "0.59800136", "0.59650046", "0.5954368", "0.59489745", "0.59417105", "0.5909944", "0.5909551", "0.5903865", "0.5896795", "0.5892987", "0.5889954", "0.58797073", "0.58791655", "0.5870438", "0.5862079", "0.5858032", "0.5853092", "0.58500797", "0.584564", "0.5837679", "0.5820826", "0.5816614", "0.58100176", "0.58047324", "0.58038986", "0.58021075", "0.5780234", "0.57746476", "0.5774412", "0.5771016", "0.57677066", "0.5751746", "0.5741148", "0.5733749", "0.57311463", "0.5725686", "0.5725686", "0.57088536", "0.57014364", "0.5692728", "0.5692728", "0.5692728", "0.5683019", "0.56824666", "0.56592005", "0.56514883", "0.56413776", "0.5637853", "0.5637823", "0.56356657", "0.5629036", "0.56179625", "0.5612782", "0.5591441", "0.5577721", "0.55702275", "0.5565176", "0.5559895", "0.55560255", "0.5552245", "0.55341524", "0.5524757", "0.55246764", "0.5520475", "0.5518775", "0.55186343", "0.55140585", "0.551082", "0.55036634", "0.55007696", "0.548851", "0.548683", "0.54862463", "0.54841125", "0.5482523", "0.5479356", "0.54764676", "0.5474957", "0.5467964", "0.5467471" ]
0.73456234
0
Command group for reporting metrics. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_group():
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cmd_help(args):", "def help_opt(self):\n print(OPTIONS)", "def command_help(self, *args, **kwargs):\n print(\"Commands available:\\n\")\n for name in dir(self):\n if not name.startswith(\"command_\"):\n continue\n name_clean = name[len(\"command_\"):]\n print(\"%s:\\n - %s\\n\" % (name_clean, getattr(self, name).__doc__.strip()))", "async def send_group_help(self, group: commands.Group):\n embed = discord.Embed(\n title=f\"Help for {group.name}.\",\n description=group.help or \"No help for this command.\",\n colour=0x9B2335,\n )\n group_commands = \", \".join(f\"`{command.name}`\" for command in group.commands)\n embed.add_field(name=f\"{group.name}'s subcommands\", value=group_commands)\n\n if group.aliases:\n embed.add_field(name=\"Aliases\", value=\", \".join(group.aliases))\n\n embed.set_footer(\n text=f\"Type {self.clean_prefix}{group.name} \"\n \"<command> to see info on each subcommand\"\n )\n await self.context.send(embed=embed)", "def help_help(self):\n print(\"List commands or print details about a command\")", "def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()", "def DoHelp(options, args):\n __pychecker__ = 'unusednames=options'\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print(COMMAND_USAGE_TEXT[args[0]])\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n args[0])", "def help_option(args, run):\n pass", "def getHelp(self):\r\n help_str =\\\r\n \"\"\"##########################################################################################\r\n#\r\n# Required:\r\n#\r\n# --query_NAST multi-fasta file containing query sequences in alignment format\r\n#\r\n# Common opts:\r\n#\r\n# --db_NAST db in NAST format\r\n# --db_FASTA db in fasta format (megablast formatted)\r\n#\r\n#\r\n# -n number of top matching database sequences to compare to (default 15)\r\n# -R min divergence ratio default: 1.007\r\n# -P min percent identity among matching sequences (default: 90)\r\n#\r\n# ## parameters to tune ChimeraParentSelector:\r\n#\r\n# Scoring parameters:\r\n# -M match score (default: +5)\r\n# -N mismatch penalty (default: -4)\r\n# -Q min query coverage by matching database sequence (default: 70)\r\n# -T maximum traverses of the multiple alignment (default: 1)\r\n\r\n#\r\n# ## parameters to tune ChimeraPhyloChecker:\r\n#\r\n#\r\n# --windowSize default 50\r\n# --windowStep default 5\r\n# --minBS minimum bootstrap support for calling chimera (default: 90)\r\n# -S percent of SNPs to sample on each side of breakpoint for computing bootstrap support (default: 10)\r\n# --num_parents_test number of potential parents to test for chimeras (default: 3)\r\n# --MAX_CHIMERA_PARENT_PER_ID Chimera/Parent alignments with perID above this are considered non-chimeras (default 100; turned off)\r\n#\r\n# ## misc opts\r\n#\r\n# --printFinalAlignments shows alignment between query sequence and pair of candidate chimera parents\r\n# --printCSalignments print ChimeraSlayer alignments in ChimeraSlayer output\r\n# --exec_dir chdir to here before running\r\n#\r\n#########################################################################################\r\n \"\"\"\r\n return help_str", "async def module_command_help(self, ctx, parsed):\n\n def _create_commandhelp(request):\n usage, desc = request.format_help().split(\"\\n\\n\")[:2]\n usage = usage.partition(\" \")[2]\n desc = desc.rstrip()\n args, opts, subcmds, aliases = {}, {}, {}, []\n prev_arg = ()\n for arg in request._get_positional_actions():\n name = arg.metavar or arg.dest\n if isinstance(arg, _SubParsersAction):\n args[name] = (arg.help, True)\n prev_sub = ()\n for subname, subparser in arg.choices.items():\n # Aliases follow the canonical name\n if prev_sub and subparser is prev_sub[1]:\n subcmds[prev_sub[0]].aliases.append(subname)\n else:\n subcmds[subname] = _create_commandhelp(subparser)\n # Don't include parent command in subcommand name\n subcmds[subname].name = subname\n prev_sub = (subname, subparser)\n else:\n # Aliases follow the canonical name\n if prev_arg and arg is prev_arg[1]:\n args[prev_arg[0]].aliases.append(name)\n else:\n args[name] = (arg.help, False)\n prev_arg = (name, arg)\n for opt in request._get_optional_actions():\n names = tuple(opt.option_strings)\n if opt.nargs == 0 or opt.const:\n # Don't make it seem like flag options take a value\n metavar = None\n else:\n metavar = opt.metavar or opt.dest\n opts[names] = (metavar, opt.help)\n return CommandHelp(\n HelpType.CMD,\n request.name,\n desc,\n usage,\n aliases=aliases,\n args=args,\n opts=opts,\n subcmds=subcmds,\n )\n\n if parsed.args[\"command\"]:\n help_args = parsed.args[\"command\"]\n if len(help_args) > 1 and help_args[0:2] == [\"help\"] * 2:\n await ctx.reply_command_result(parsed, \"I'm afraid that you're far beyond any help...\")\n return\n try:\n request = self._commands[help_args[0]]\n except KeyError:\n cmd_help = CommandHelp(HelpType.NO_SUCH_CMD, help_args[0])\n else:\n cmd_help = _create_commandhelp(request)\n help_args.pop(0)\n subcmd = cmd_help\n for sub_request in help_args:\n try:\n parent = subcmd\n subcmd = cmd_help.get_subcmd(sub_request)\n except KeyError:\n cmd_help = CommandHelp(HelpType.NO_SUCH_SUBCMD, sub_request, parent=parent)\n break\n else:\n cmd_help = subcmd\n elif parsed.args[\"module\"]:\n mod_id = parsed.args[\"module\"]\n if mod_id not in self._features and mod_id != \"core\":\n cmd_help = CommandHelp(HelpType.NO_SUCH_MOD, mod_id)\n else:\n try:\n parsers = [parser for parser in self._commands.iter_by_module(mod_id)]\n except KeyError:\n parsers = []\n desc = parsers[0].module.description\n cmds = {}\n for parser in parsers:\n mod = cmds.setdefault(mod_id, {})\n mod[parser.name] = parser.description\n cmd_help = CommandHelp(HelpType.MOD, mod_id, desc, cmds=cmds)\n else:\n cmds = {}\n for mod_id, parsers in self._commands.pairs():\n for parser in parsers:\n mod = cmds.setdefault(mod_id, {})\n mod[parser.name] = parser.description\n cmd_help = CommandHelp(HelpType.ALL, cmds=cmds)\n await ctx.core_command_help(parsed, cmd_help)", "def help(self, args):\n print('No commands available for this consumer')", "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def help(self):\n msg = \"`%s' performs the computational aspects of genotyping-by-sequencing.\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Usage: %s [OPTIONS] ...\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Options:\\n\"\n msg += \" -h, --help\\tdisplay the help and exit\\n\"\n msg += \" -V, --version\\toutput version information and exit\\n\"\n msg += \" -v, --verbose\\tverbosity level (0/default=1/2/3)\\n\"\n msg += \" --proj1\\tname of the project used for steps 1 to 4\\n\"\n msg += \"\\t\\tmention a reference genome only if all samples belong to\\n\"\n msg += \"\\t\\t the same species, and will be mapped to the same ref genome\\n\"\n msg += \" --proj2\\tname of the project used for steps 4 to 8\\n\"\n msg += \"\\t\\tcan be the same as --proj1, or can be different\\n\"\n msg +=\"\\t\\t notably when samples come from different species\\n\"\n msg += \"\\t\\t or if one wants to align reads to different ref genomes\\n\"\n msg += \" --schdlr\\tname of the cluster scheduler (default=SGE)\\n\"\n msg += \" --queue\\tname of the cluster queue (default=normal.q)\\n\"\n msg += \" --resou\\tcluster resources (e.g. 'test' for 'qsub -l test')\\n\"\n msg += \" --rmvb\\tremove bash scripts for jobs launched in parallel\\n\"\n msg += \" --step\\tstep to perform (1/2/3/.../9)\\n\"\n msg += \"\\t\\t1: raw read quality per lane (with FastQC v >= 0.11.2)\\n\"\n msg += \"\\t\\t2: demultiplexing per lane (with demultiplex.py v >= 1.14.0)\\n\"\n msg += \"\\t\\t3: cleaning per sample (with CutAdapt v >= 1.8)\\n\"\n msg += \"\\t\\t4: alignment per sample (with BWA MEM v >= 0.7.12, Samtools v >= 1.3, Picard and R v >= 3)\\n\"\n msg += \"\\t\\t5: local realignment per sample (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t6: local realignment per genotype (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t7: variant and genotype calling per genotype (with GATK HaplotypeCaller v >= 3.5)\\n\"\n msg += \"\\t\\t8: variant and genotype calling jointly across genotypes (with GATK GenotypeGVCFs v >= 3.5)\\n\"\n msg += \"\\t\\t9: variant and genotype filtering (with GATK v >= 3.5)\\n\"\n msg += \" --samples\\tpath to the 'samples' file\\n\"\n msg += \"\\t\\tcompulsory for all steps, but can differ between steps\\n\"\n msg += \"\\t\\t e.g. if samples come from different species or are aligned\\n\"\n msg += \"\\t\\t on different ref genomes, different samples file should\\n\"\n msg += \"\\t\\t be used for steps 4-9, representing different subsets of\\n\"\n msg += \"\\t\\t the file used for steps 1-3\\n\"\n msg += \"\\t\\tthe file should be encoded in ASCII\\n\"\n msg += \"\\t\\tthe first row should be a header with column names\\n\"\n msg += \"\\t\\teach 'sample' (see details below) should have one and only one row\\n\"\n msg += \"\\t\\tany two columns should be separated with one tabulation\\n\"\n msg += \"\\t\\tcolumns can be in any order\\n\"\n msg += \"\\t\\trows starting by '#' are skipped\\n\"\n msg += \"\\t\\t12 columns are compulsory (but there can be more):\\n\"\n msg += \"\\t\\t genotype (see details below, e.g. 'Col-0', but use neither underscore '_' nor space ' ' nor dot '.', use dash '-' instead)\\n\"\n msg += \"\\t\\t ref_genome (identifier of the reference genome used for alignment, e.g. 'Atha_v2', but use neither space ' ' nor dot '.'; the full species name, e.g. 'Arabidopsis thaliana', will be present in the file given to --dict)\\n\"\n msg += \"\\t\\t library (e.g. can be the same as 'genotype')\\n\"\n msg += \"\\t\\t barcode (e.g. 'ATGG')\\n\"\n msg += \"\\t\\t seq_center (e.g. 'Broad Institute', 'GenoToul', etc)\\n\"\n msg += \"\\t\\t seq_platform (e.g. 'ILLUMINA', see SAM format specification)\\n\"\n msg += \"\\t\\t seq_platform_model (e.g. 'HiSeq 2000')\\n\"\n msg += \"\\t\\t flowcell (e.g. 'C5YMDACXX')\\n\"\n msg += \"\\t\\t lane (e.g. '3', can be '31' if a first demultiplexing was done per index)\\n\"\n msg += \"\\t\\t date (e.g. '2015-01-15', see SAM format specification)\\n\"\n msg += \"\\t\\t fastq_file_R1 (filename, one per lane, gzip-compressed)\\n\"\n msg += \"\\t\\t fastq_file_R2 (filename, one per lane, gzip-compressed)\\n\"\n msg += \" --fcln\\tidentifier of a flowcell and lane number\\n\"\n msg += \"\\t\\tformat as <flowcell>_<lane-number>, e.g. 'C5YMDACXX_1'\\n\"\n msg += \"\\t\\tif set, only the samples from this lane will be analyzed\\n\"\n msg += \" --pird\\tpath to the input reads directory\\n\"\n msg += \"\\t\\tcompulsory for steps 1 and 2\\n\"\n msg += \"\\t\\twill be added to the columns 'fastq_file_R*' from the sample file\\n\"\n msg += \"\\t\\tif not set, input read files should be in current directory\\n\"\n msg += \" --enz\\tname of the restriction enzyme\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=ApeKI\\n\"\n msg += \" --dmxmet\\tmethod used to demultiplex\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=4c (see the help of demultiplex.py to know more)\\n\"\n msg += \" --subst\\tnumber of substitutions allowed during demultiplexing\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=2\\n\"\n msg += \" --ensubst\\tenforce the nb of substitutions allowed\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=lenient/strict\\n\"\n msg += \" --adp\\tpath to the file containing the adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tsame format as FastQC: name<tab>sequence\\n\"\n msg += \"\\t\\tname: at least 'adpR1' (also 'adpR2' if paired-end)\\n\"\n msg += \"\\t\\tsequence: from 5' (left) to 3' (right)\\n\"\n msg += \" --errtol\\terror tolerance to find adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --minovl\\tminimum overlap length between reads and adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=3 (in bases)\\n\"\n msg += \" --minrl\\tminimum length to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=35 (in bases)\\n\"\n msg += \" --minq\\tminimum quality to trim a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=20 (used for both reads if paired-end)\\n\"\n msg += \" --maxNp\\tmaximum percentage of N to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --ref\\tpath to the prefix of files for the reference genome\\n\"\n msg += \"\\t\\tcompulsory for steps 4, 5, 6, 7, 8, 9\\n\"\n msg += \"\\t\\tshould correspond to the 'ref_genome' column in --samples\\n\"\n msg += \"\\t\\te.g. '/data/Atha_v2' for '/data/Atha_v2.fa', '/data/Atha_v2.bwt', etc\\n\"\n msg += \"\\t\\tthese files are produced via 'bwa index ...'\\n\"\n msg += \" --dict\\tpath to the 'dict' file (SAM header with @SQ tags)\\n\"\n msg += \"\\t\\tcompulsory for step 4\\n\"\n msg += \"\\t\\tsee 'CreateSequenceDictionary' in the Picard software\\n\"\n msg += \" --jgid\\tcohort identifier to use for joint genotyping\\n\"\n msg += \"\\t\\tcompulsory for steps 8, 9\\n\"\n msg += \"\\t\\tuseful to launch several, different cohorts in parallel\\n\"\n msg += \" --rat\\trestrict alleles to be of a particular allelicity\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdefault=ALL/BIALLELIC/MULTIALLELIC\\n\"\n msg += \"\\t\\tsee '--restrictAllelesTo' in GATK's SelectVariant\\n\"\n msg += \" --mdp\\tminimum value for DP (read depth; e.g. 10)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mgq\\tminimum value for GQ (genotype quality; e.g. 20)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mnfg\\tmaximum number of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mffg\\tmaximum fraction of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFractionFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mnnc\\tmaximum number of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \" --mfnc\\tmaximum fraction of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxNOCALLfraction' in GATK's SelectVariants\\n\"\n msg += \" --fam\\tpath to the file containing pedigree information\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdiscard variants with Mendelian violations (see Semler et al, 2012)\\n\"\n msg += \"\\t\\tshould be in the 'fam' format specified by PLINK\\n\"\n msg += \"\\t\\tvalidation strictness (GATK '-pedValidationType') is set at 'SILENT'\\n\"\n msg += \"\\t\\t allowing some samples to be absent from the pedigree\\n\"\n msg += \" --mvq\\tminimum GQ for each trio member to accept a variant as a Mendelian violation\\n\"\n msg += \"\\t\\tused in step 9 if '--fam' is specified\\n\"\n msg += \"\\t\\tdefault=0\\n\"\n msg += \" --xlssf\\tpath to the file with genotypes to exclude\\n\"\n msg += \"\\t\\tused in step 9 (can be especially useful if '--fam' is specified)\\n\"\n msg += \" --tmpd\\tpath to a temporary directory on child nodes (default=.)\\n\"\n msg += \"\\t\\te.g. it can be /tmp or /scratch\\n\"\n msg += \"\\t\\tused in step 4 for 'samtools sort'\\n\"\n msg += \"\\t\\tused in step 7 for 'GATK HaplotypeCaller'\\n\"\n msg += \" --jvmXms\\tinitial memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=512m (can also be specified as 1024k, 1g, etc)\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --jvmXmx\\tmaximum memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=4g\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --queue2\\tname of the second cluster queue (default=bigmem.q)\\n\"\n msg += \"\\t\\tused in step 4 for Picard to collect insert sizes\\n\"\n msg += \" --knowni\\tpath to a VCF file with known indels (for local realignment)\\n\"\n msg += \" --known\\tpath to a VCF file with known variants (e.g. from dbSNP)\\n\"\n msg += \" --force\\tforce to re-run step(s)\\n\"\n msg += \"\\t\\tthis removes without warning the step directory if it exists\\n\"\n msg += \"\\n\"\n msg += \"Examples:\\n\"\n msg += \" %s --step 1 --samples samples.txt\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Details:\\n\"\n msg += \"This program aims at genotyping a set of 'genotypes' using data from\\n\"\n msg += \"a restriction-assisted DNA sequencing (RAD-seq) experiment, also known\\n\"\n msg += \"as a genotyping-by-sequencing (GBS) experiment.\\n\"\n msg += \"Here, by 'genotype', we mean the entity which is the focus of the\\n\"\n msg += \"study. For instance, it can be a plant variety (or a human being), or\\n\"\n msg += \"the specific clone of a given plant variety (or a specific tumor of a\\n\"\n msg += \"given human being), etc.\\n\"\n msg += \"Importantly, note that the content of the 'genotype' column will\\n\"\n msg += \"be used to set the 'SM' (sample) tag of the 'RG' (read group) header\\n\"\n msg += \"record type of the SAM format (see http://www.htslib.org/). However,\\n\"\n msg += \"internal to this program, the term 'sample' corresponds to the unique\\n\"\n msg += \"quadruplet (genotype,flowcell,lane,barcode) for steps 1 and 2, and to\\n\"\n msg += \"the unique triplet (genotype,flowcell,lane) for the others.\\n\"\n msg += \"Jobs are executed in parallel (--schdlr). Their return status is\\n\"\n msg += \"recorded in a SQLite database which is removed at the end. If a job\\n\"\n msg += \"fails, the whole script stops with an error.\\n\"\n msg += \"\\n\"\n msg += \"Dependencies:\\n\"\n msg += \"Python >= 2.7; Biopython; pyutilstimflutre >= 0.5\\n\"\n msg += \"\\n\"\n msg += \"Report bugs to <[email protected]>.\"\n print(msg); sys.stdout.flush()", "def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging", "def optionHelp(self):\n return {}", "def command_help(args):\n\tprint_usage()\n\treturn 0", "def command_short():\n pass", "def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text", "def help(self):\r\n self._short_help(None, None, None, None)", "def help_command(server, output, conf):\n server.tell(output.name, 'Available commands:')\n for key in COMMANDS.keys():\n cmd_func = COMMANDS[key]\n if cmd_func.__doc__:\n server.tell(output.name, '%s: %s' % (key[1:], cmd_func.__doc__))\n else:\n server.tell(output.name, key[1:])\n return", "def command_help(self, command):\n self.commands[command].command_help()", "def command(self, *args, **kwargs):\n help_group = kwargs.pop(\"group\", None)\n decorator = super(GroupedGroup, self).command(*args, **kwargs)\n\n def wrapper(f):\n cmd = decorator(f)\n cmd.help_group = help_group\n return cmd\n\n return wrapper", "def handle_program_options():\n parser = argparse.ArgumentParser(description=\"Gather numeric information \\\n about the processed sequence data in an \\\n MG-RAST project.\")\n parser.add_argument('project_id',\n help=\"The project identifier (MG-RAST ID)\")\n parser.add_argument('-a', '--auth_key',\n help=\"An MG-RAST API authorization key. This is \\\n necessary to access projects marked as private.\")\n parser.add_argument('-g', '--group_by', action='append',\n help=\"A string that matches some part of the \\\n 'Metagenome Name' field. All matching project \\\n metagenomes will be grouped by this identifier \\\n and their stats will be summed. This option can \\\n be specified multiple times to create multiple \\\n groups. All non-matching metagenomes will \\\n appear separately in the table. NOTE: \\\n Strings will be matched longest first. This \\\n allows for matching names that might be a \\\n substring of another match. For example: -g S \\\n -g NS. The name field will first be matched \\\n against the longest string (NS) first and then \\\n each smaller string in order.\")\n parser.add_argument('-o', '--output_filename', default='meta_stats.txt',\n help=\"The name of the file the project summary \\\n information will be written to.\")\n\n# parser.add_argument('-v', '--verbose', action='store_true')\n\n return parser.parse_args()", "def cli():\n pass # do nothing here, it just defines the name for other subcommands", "def test_cli_help(self):\n output = self.update_command('-h')", "async def send_group_help(self, group):\n self.add_command_formatting(group)\n\n filtered = await self.filter_commands(group.commands, sort=self.sort_commands)\n if filtered:\n note = await self.get_opening_note()\n if note:\n self.paginator.add_line(note, empty=True)\n\n self.paginator.add_line('**%s**' % self.commands_heading)\n for command in filtered:\n self.add_subcommand_formatting(command)\n\n note = self.get_ending_note()\n if note:\n self.paginator.add_line()\n self.paginator.add_line(note)\n\n await self.send_pages()", "def get_command_help(self, module_name, command_name):\r\n command = self.env.get_command(module_name, command_name)\r\n\r\n default_format = 'raw'\r\n if sys.stdout.isatty():\r\n default_format = 'table'\r\n\r\n arg_doc = command.__doc__\r\n\r\n if 'confirm' in command.options:\r\n arg_doc += \"\"\"\r\nPrompt Options:\r\n -y, --really Confirm all prompt actions\r\n\"\"\"\r\n\r\n if '[options]' in arg_doc:\r\n arg_doc += \"\"\"\r\nStandard Options:\r\n --format=ARG Output format. [Options: table, raw] [Default: %s]\r\n -C FILE --config=FILE Config file location. [Default: ~/.softlayer]\r\n --debug=LEVEL Specifies the debug noise level\r\n 1=warn, 2=info, 3=debug\r\n --timings Time each API call and display after results\r\n --proxy=PROTO:PROXY_URL HTTP[s] proxy to be use to make API calls\r\n -h --help Show this screen\r\n\"\"\" % default_format\r\n return arg_doc.strip()", "def options_argument_group(parser):\n group = parser.add_argument_group(\n \"GLOBAL OPTIONS\",\n \"Options are available for all\" \"arguments within the scope of this command.\",\n )\n\n group.add_argument(\n \"--controller\",\n dest=\"controller\",\n help=\"Use this flag to select the corresponding controller \"\n \"using either the slot number or index.\\nexample: --controller=Slot 0 OR \"\n \"--controller=1\",\n default=None,\n )", "def command_help(self, bot, update):\n\n messages = [\n 'Available commands:',\n '/who - Who is Myles?',\n '/where - Where is Myles?',\n '/tweet - What was the last tweet Myles sent?',\n '/photo - What was the last Instagram photo Myles took?',\n '/web - Where can I find Myles on the interwebs?',\n ]\n\n self.send_messages(bot, update, messages)", "def _format_help_console(self):\n formatter = self._get_formatter()\n formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups)\n formatter.add_text(self.description)\n for action_group in self._sorted_groups():\n title = ' '.join(x[0].upper() + x[1:] for x in action_group.title.split())\n formatter.start_section(title)\n formatter.add_text(action_group.description)\n formatter.add_arguments(sorted(action_group._group_actions, key=attrgetter('option_strings')))\n formatter.end_section()\n formatter.add_text(self.epilog)\n return formatter.format_help()", "def AddCommandArgGroup(parser):\n command_group = parser.add_argument_group(\n help='These arguments are used to run commands using SSH.'\n )\n command_group.add_argument(\n '--command',\n required=True,\n help=\"\"\"\\\n Command to run on the Cloud TPU VM.\n\n Runs the command on the target Cloud TPU Queued Resource's nodes and then exits.\n\n Note: in the case of a TPU Pod, it will only run the command in the\n workers specified with the `--worker` flag (defaults to worker all if not\n set).\n \"\"\",\n )\n command_group.add_argument(\n '--output-directory',\n help=\"\"\"\\\n Path to the directory to output the logs of the commands.\n\n The path can be relative or absolute. The directory must already exist.\n\n If not specified, standard output will be used.\n\n The logs will be written in files named {WORKER_ID}.log. For example:\n \"2.log\".\n \"\"\",\n )", "def add_usage(self, usage, actions, groups, prefix=''):\n #if prefix is None:\n # prefix = ''\n return super(SubcommandHelpFormatter, self).add_usage(usage, actions, groups, prefix='')", "def printHelp():\n print(\"amqWorkApiMass.py -n <msgcnt> -b <body> -m <headers> -s <path/to/bodyandheaders>\")", "async def send_group_help(self, group):\n self.add_command_formatting(group)\n\n filtered = await self.filter_commands(group.commands, sort=self.sort_commands)\n if filtered:\n note = await self.get_opening_note()\n if note:\n self.paginator.add_line(note, empty=True)\n\n self.paginator.add_line('**%s**' % self.commands_heading)\n for command in filtered:\n await asyncio.sleep(0)\n self.add_subcommand_formatting(command)\n\n note = self.get_ending_note()\n if note:\n self.paginator.add_line()\n self.paginator.add_line(note)\n\n await self.send_pages()", "def _help(self):\n self.onecmd('help')", "def command_help(self):\n print(\"Command \", self)\n print(\"\\t\\thelp (Get help for command)\")\n\n params = self.params.copy()\n del params[\"help\"]\n\n if len(params) == 0:\n print(\"This command has no parameters\")\n return\n\n print(\"Parameters:\")\n for info in params.values():\n print(\" %s\" % info.get_basic_info())\n description = info.get_desc()\n if description != \"\":\n print(textwrap.fill(description,\n initial_indent=\" \",\n subsequent_indent=\" \",\n width=70))", "def do_command_help(self, command):\n summary = self.base.commands[command].get_summary()\n usage = self.get_command_usage(command)\n description = self.base.commands[command].get_description()\n sys.stdout.write('%s\\n%s' % (summary, usage))\n if description != None:\n sys.stdout.write('Arguments Description:\\n%s\\n' %\n (description, ))", "async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)", "def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()", "def show_help(argv=None):\n if argv:\n if \"list_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"list_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ls\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"search_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"search_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm search <keyword>\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"show_mounts\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"show_mounts\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ps\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mount <dataset_name> [<mount_path>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mmount <dataset_name> [<dataset_name> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"unmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"unmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm unmount <mount_id> [<cleanup_flag>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"munmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"munmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm munmount <mount_id> [<mount_id> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"clean\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"clean\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm clean\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n else:\n sdm_util.print_message(\"Unrecognized command\")\n return 1\n else:\n sdm_util.print_message(\"command : sdm <COMMAND> [<COMMAND_SPECIFIC_ARGS> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(\"Available Commands\")\n\n tbl = PrettyTable()\n tbl.field_names = [\"COMMAND\", \"DESCRIPTION\"]\n for cmd in COMMANDS:\n command, _, desc = cmd\n command_str = \" | \".join(command)\n tbl.add_row([command_str, desc])\n\n sdm_util.print_message(tbl)\n sdm_util.print_message(\"\")\n return 0", "def help(cls, extra_args=None):\n if (_is_text_interface()):\n return _create_text_help_str(cls, cls._TEXT_USAGE)\n else:\n return cls._GRAPHICAL_USAGE", "async def greeter(self, ctx):\n await util.command_group_help(ctx)", "def print_help_info(self, global_options):\r\n usage = ['',\"Type '%s help <subcommand>' for help on a specific subcommand.\" % self.prog_name,'']\r\n usage.append('Available subcommands:')\r\n commands = self.get_commands(global_options).keys()\r\n commands.sort()\r\n for cmd in commands:\r\n usage.append(' %s' % cmd)\r\n return '\\n'.join(usage)", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"", "def cmd_help(self, commands=None, usage=False):\n if commands:\n usage = True\n commands = {self.approx.decmd(c.lower()) for c in commands}\n rejects = commands - self.approx.keys()\n for reject in rejects:\n self.put_pretty(\"No command named %r\" % reject)\n continue\n commands -= rejects\n if self.debug:\n assert not any(self.approx.encmd(r) in self.mod_commands for\n r in rejects)\n assert all(self.approx.encmd(c) in self.mod_commands for\n c in commands)\n if not commands:\n return\n requested = zip(commands, (self.approx[c] for c in commands))\n else:\n requested = self.approx.items()\n help = znc.CTable()\n help.AddColumn(\"Command\")\n help.AddColumn(\"Usage\" if usage else \"Description\")\n from itertools import zip_longest\n #\n for command, parser in requested:\n if usage:\n upre = \"usage: %s\" % command\n rest = (parser.format_usage()\n .replace(upre, \"\", 1)\n .replace(\"[-h] \", \"\", 1))\n desc = [l.strip() for l in rest.split(\"\\n\") if l.strip()]\n else:\n desc = [parser.description]\n for line, comm in zip_longest(desc, (command,), fillvalue=\"\"):\n help.AddRow()\n help.SetCell(\"Command\", comm)\n help.SetCell(\"Usage\" if usage else \"Description\", line)\n #\n s_line = znc.String()\n strung = []\n while help.GetLine(len(strung), s_line):\n strung.append(s_line.s)\n also = \" (<command> [-h] for details)\"\n strung[1] = strung[1].replace(len(also) * \" \", also, 1)\n self.put_pretty(\"\\n\".join(strung))", "def usage(err=''):\r\n m = '%s\\n' %err\r\n m += 'Default usage is to list Cases closed for the 30 days\\n'\r\n m += '\\n Example:\\n'\r\n m += ' closedcases -n 90 \\n' \r\n m += ' \\n'\r\n# m += ' closedcases -n 60 -s blast5 \\n'\r\n return m", "def main():\n parser = argparse.ArgumentParser(description='investigate code health and random statistics')\n sub_parsers = parser.add_subparsers(dest='command_name', title='Commands', help='', metavar='<command>')\n\n sub = sub_parsers.add_parser('line-count', help='list line counts')\n sub.add_argument('files', nargs='+', help='files or folders to look in')\n sub.add_argument('--each', type=int, default=1)\n sub.add_argument('--show', action='store_true')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_line_count)\n\n sub = sub_parsers.add_parser('include-list', help='list headers from files')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--print', dest='print_files', action='store_true')\n sub.add_argument('--print-stats', dest='print_stats', action='store_true')\n sub.add_argument('--print-max', dest='print_max', action='store_true')\n sub.add_argument('--no-list', dest='print_list', action='store_false')\n sub.add_argument('--count', default=2, type=int, help=\"only print includes that are more or equal to <count>\")\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.set_defaults(func=handle_list)\n\n sub = sub_parsers.add_parser('include-gv', help='generate a graphviz of the includes')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.add_argument('--group', action='store_true', help=\"group output\")\n sub.add_argument('--cluster', action='store_true', help=\"group output into clusters\")\n sub.set_defaults(func=handle_gv)\n\n sub = sub_parsers.add_parser('list-indents', help='list the files with the maximum indents')\n sub.add_argument('files', nargs='+')\n sub.add_argument('--each', type=int, default=1, help='group counts')\n sub.add_argument('--show', action='store_true', help='include files in list')\n sub.add_argument('--hist', action='store_true', help='show simple histogram')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_list_indents)\n\n sub = sub_parsers.add_parser('missing-pragma-once', help='find headers with missing include guards')\n sub.add_argument('files', nargs='+')\n sub.set_defaults(func=handle_missing_include_guards)\n\n sub = sub_parsers.add_parser('missing-in-cmake', help='find files that existis on disk but missing in cmake')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_missing_in_cmake)\n\n sub = sub_parsers.add_parser('list-no-project-folders', help='find projects that have not set the solution folder')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_list_no_project_folder)\n\n sub = sub_parsers.add_parser('check-files', help=\"find files that doesn't match the name style\")\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_check_files)\n\n args = parser.parse_args()\n if args.command_name is not None:\n args.func(args)\n else:\n parser.print_help()", "def make_cli_parser(self):\n super(McmcArgParser, self).make_cli_parser()\n self.cli_parser.add_option('--burn-in', type='int',\n default=mcmc.defaults.BURN_IN,\n help=(\"the number of steps to take before recording states \"\n \"in the Markov chain [default: %default]\")\n )\n self.cli_parser.add_option('--steps', type='int',\n default=mcmc.defaults.NUM_STEPS,\n help=(\"the number of steps through the Markov chain to \"\n \"observe [default: %default]\")\n )\n self.cli_parser.add_option('--activity-threshold',\n type='float',\n default=mcmc.defaults.ACTIVITY_THRESHOLD,\n help=(\"set the (differential) expression threshold at \"\n \"which a gene is considered active [default: \"\n \"%default=-log10(0.05)]\")\n )\n self.cli_parser.add_option('--transition-ratio', type='float',\n default=mcmc.defaults.TRANSITION_TYPE_RATIO,\n help=(\"The target ratio of proposed link transitions \"\n \"to proposed parameter transitions [default: \"\n \"%default]\"\n )\n )\n self.cli_parser.add_option('--link-false-pos', type='float',\n help=(\"designate the starting false-positive rate \"\n \"for links\")\n )\n self.cli_parser.add_option('--link-false-neg', type='float',\n help=(\"designate the starting false-negative rate \"\n \"for links\")\n )\n self.cli_parser.add_option('--link-prior', type='float',\n help=(\"designate the starting prior probability \"\n \"for adding a link\")\n )\n self.cli_parser.add_option('--term-false-pos', type='float',\n help=(\"designate the starting false-positive rate \"\n \"for terms [NOTE: applies only when used in \"\n \"conjunction with '--genes-based']\"\n )\n )\n self.cli_parser.add_option('--term-false-neg', type='float',\n help=(\"designate the starting false-negative rate \"\n \"for terms [NOTE: applies only when used in \"\n \"conjunction with '--genes-based']\"\n )\n )\n self.cli_parser.add_option('--term-prior', type='float',\n help=(\"designate the starting prior probability \"\n \"for adding a term [NOTE: applies only when used \"\n \"in conjunction with '--terms-based'\"\n )\n )\n self.cli_parser.add_option('--seed-terms',\n help=(\"A file containing terms, one per line to \"\n \"use as a seed when initializing the Markov chain.\")\n )\n self.cli_parser.add_option('--seed-links',\n help=(\"A two-column CSV-formatted file containing \"\n \"pairs of terms to use as seed for links when \"\n \"initializing the Markov chain.\")\n )\n self.cli_parser.add_option('--relaxed-coannotations',\n dest='stringent_coannotations', action='store_false',\n default=True,\n help=(\"use a more relaxed definition of co-annotation \"\n \"both genes may be annotated by both terms\")\n )\n self.cli_parser.add_option('--fixed-distributions',\n action='store_true',\n help=(\"use fixed distributions for link (and term) \"\n \"prior [implies --free-parameters]\")\n )\n self.cli_parser.add_option('--free-parameters',\n action='store_true',\n help=(\"parameters will be adjusted randomly, rather \"\n \"than incrementally\")\n )\n self.cli_parser.add_option('--disable-swaps', action='store_true',\n help=(\"disables swapping links as an option for \"\n \"transitions\")\n )\n self.cli_parser.add_option('--terms-based', action='store_true',\n help=(\"uses terms-based, rather than links-based model \"\n \"[implies ``--disable-swaps``]\")\n )\n self.cli_parser.add_option('--intraterms', action='store_true',\n help=(\"consider also intraterm interactions [NOTE: \"\n \"only available in conjunction with \"\n \"'--terms-based']\")\n )\n self.cli_parser.add_option('--independent-terms',\n action='store_true',\n help=(\"allow terms to be selected indpendently from \"\n \"links [NOTE: only available in conjunction with \"\n \"--terms-based']\"\n )\n )\n self.cli_parser.add_option('--genes-based', action='store_true',\n help=\"overlap of terms is based on genes\")\n self.cli_parser.add_option('--parameters-outfile',\n default=mcmc.defaults.PARAMETERS_OUTFILE,\n help=(\"the file to which the parameters results should \"\n \"be written [default: %default]\")\n )\n self.cli_parser.add_option('--terms-outfile',\n default=mcmc.defaults.TERMS_OUTFILE,\n help=(\"the file to which the terms results should \"\n \"be written [default: %default]\")\n )\n self.cli_parser.add_option('--transitions-outfile',\n default=mcmc.defaults.TRANSITIONS_OUTTFILE,\n help=(\"the file to which the transitions data should \"\n \"be written [default: %default]\")\n )\n self.cli_parser.add_option('--detailed-transitions',\n action='store_true',\n help=(\"Transitions file includes full information about \"\n \"each step's state.\")\n )\n self.cli_parser.add_option('--bzip2', action='store_true',\n help=\"compress transitions file using bzip2\"\n )\n self.cli_parser.add_option('--record-frequencies',\n action='store_true',\n help=\"record the frequency of each state\"\n )\n self.cli_parser.add_option('--frequencies-outfile',\n default=mcmc.defaults.FREQUENCIES_OUTFILE,\n help=(\"the file to which frequency information \"\n \"should be written [default: %default]\")\n )", "async def help_forum_group(self, ctx: commands.Context) -> None:\n if not ctx.invoked_subcommand:\n await ctx.send_help(ctx.command)", "def help_command(update: Update, _: CallbackContext) -> None:\n update.message.reply_text(\"These are the list of commands supported. \\n\\n /deadlines. \\n\\n \"\n \"Hey! I am still being enhanced, more features to come...!\")", "async def adventofcode_group(self, ctx: commands.Context) -> None:\n if not ctx.invoked_subcommand:\n await ctx.send_help(ctx.command)", "async def adventofcode_group(self, ctx: commands.Context) -> None:\n if not ctx.invoked_subcommand:\n await ctx.send_help(ctx.command)", "def _get_metrics_options(metrics):\n metrics_options = []\n if metrics is None:\n metrics = []\n for static_metric in metrics:\n metrics_options += [\n \"-m\",\n static_metric.metric.mp_metric_name,\n str(static_metric.value),\n ]\n return metrics_options", "def help(*args):\n console_script = ConsoleScript.singleton\n\n if not args:\n # show documentation for parsed group\n yield console_script.parser.group.doc\n else:\n # show command documentation if possible\n if args[0] in console_script:\n yield console_script[args[0]].doc\n else:\n importable = Importable.factory(args[0])\n if importable.target and not importable.is_module:\n yield importable.doc\n elif importable.module:\n if not importable.target:\n yield f'{RED}Cannot import {args[0]}{RESET}'\n yield ' '.join([\n YELLOW,\n 'Showing help for',\n importable.module.__name__ + RESET\n ])\n yield BaseGroup.factory(importable.module.__name__).doc", "def _format_help_markdown(self):\n formatter = self._get_formatter()\n formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups)\n for action_group in self._sorted_groups():\n title = ' '.join(x[0].upper() + x[1:] for x in action_group.title.split())\n formatter.start_section(title)\n formatter.add_arguments(sorted(action_group._group_actions, key=attrgetter('option_strings')))\n formatter.end_section()\n formatter.add_text(self.epilog)\n return formatter.format_help()", "def help(self, msg=None):\n\n # Print the message if given.\n if not msg == None:\n print str(msg) + \"\\n\"\n\n # Display the list of commands, in the alphabetical order.\n print \"Use one of the following commands:\"\n for action in sorted(self.actions.keys()):\n info = self.actions[action]\n joined_oblig = ' '.join(info['required'])\n if len(info['additional']) > 0:\n add = [\"<%s>\" % x for x in info['additional']]\n joined_add = '[' + ' '.join(add) + ']'\n else:\n joined_add = ''\n print \"\\t* %s %s %s\" % (action, joined_oblig, joined_add)", "def help(self):\n pass", "def help(self):\n pass", "def main():\n\n parser = argparse.ArgumentParser(description=main.__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n epilog=\"Homepage: https://github.com/kbat/mc-tools\")\n parser.add_argument('com', type=str, help='plot requests file name', nargs='?', default=\"/tmp/foo.c\")\n parser.add_argument('comout', type=str, help='COMOUT file name', nargs='?', default=\"zoom\")\n\n args = parser.parse_args()\n\n cmd = {} # dictionary of commands\n\n bas = False\n plane = False\n \n with open(args.com) as f:\n for line in f.readlines():\n words = line.strip().split()\n if len(words) is 0:\n continue\n\n for i,w in enumerate(words):\n if re.search(\"^bas\", w):\n cmd['bas'] = list(map(float, words[i+1:i+7]))\n if plane is False: bas = True # basis was before plane cuts\n elif re.search(\"^or\", w):\n cmd['or'] = list(map(float, words[i+1:i+4]))\n elif re.search(\"^ex\", w):\n try: # both x and y scales are given\n cmd['ex'] = list(map(float, words[i+1:i+3]))\n continue\n except ValueError: # just 1 scale is given\n cmd['ex'] = list(map(float, words[i+1:i+2]))\n elif re.search(\"^lab\", w):\n cmd['label'] = list(map(int, map(float, words[i+1:i+3]))) #+ [words[i+3]]\n elif re.search(\"^p[xyz]\", w):\n cmd[w] = [float(words[i+1])]\n if bas is False: plane = True # plane cuts were before basis\n elif re.search(\"^legend\", w):\n cmd[w] = [words[i+1]]\n elif w == \"scale\":\n print(w)\n if int(words[i+1]): # no need to put 'scale 0'\n cmd[w] = [words[i+1]]\n elif w in (\"mesh\"):\n if int(words[i+1])==1: # no need to put 'mesh 1'\n cmd[w] = [words[i+1]]\n\n print(bas, plane)\n\n if plane: # bas was first\n keys = ('bas', 'or', 'ex', 'px', 'py', 'pz', 'label', 'mesh', 'legend', 'scale')\n elif bas:\n keys = ('or', 'ex', 'px', 'py', 'pz', 'bas', 'label', 'mesh', 'legend', 'scale')\n else:\n keys = {'or', 'ex', 'label', 'mesh', 'legend', 'scale'}\n \n with open(args.comout, 'w') as f:\n for key in keys:\n if key in cmd:\n # newline required by mcplot:\n if key in ('mesh', 'legend', 'scale', 'label'):\n f.write(\"\\n\")\n f.write(\"%s %s \" % (key,\" \".join(str(e) for e in cmd[key]),))\n f.write(\"\\n\")", "def help():\n \n pass", "def parse_opts_help():\n print(u'geneteka.py [-d] [-l <limit>] [-o <outputfile>] <lastname>')", "def _add_cmn_options(a_parser):\n a_parser.add_argument(\"-m\", \"--model\",\n help=\"path to the (stored or to be stored) model\",\n type=str)\n a_parser.add_argument(\"files\", help=\"input files in TSV format\",\n type=argparse.FileType('r'),\n nargs='*', default=[sys.stdin])", "def help():", "def help(command = None):\n if command is None:\n option = \" {:12} {}\"\n help_text = [\n \"Usage: tracker <command> [<args>]\",\n \"\",\n \"Available commands:\",\n option.format(\"help\", \"display this dialog\"),\n option.format(\"update\", \"save data to tracker\"),\n option.format(\"list\", \"list available trackers\"),\n option.format(\"show\", \"display raw tracker data\"),\n option.format(\"rename\", \"rename tracker\"),\n option.format(\"delete\", \"remove tracker\"),\n option.format(\"stats\", \"show statistics\"),\n option.format(\"plot\", \"show graph\"),\n \"\",\n \"Use 'tracker help <command>' for a command's detailed usage.\"\n ]\n print(\"\\n\".join(help_text))\n else:\n # commands = [\"update\", \"list\", \"show\", \"rename\", \"delete\", \"stats\", \"plot\"]\n usage = \" {}\"\n desc = \" {}\"\n if command == \"update\":\n help_text = [\n \"Update: command which adds (numerical) data to a tracker.\",\n \"\",\n \"Usage:\",\n usage.format(\"tracker update <tracker> <data>\"),\n usage.format(\"tracker update <tracker>\"),\n \"\",\n \"Description:\",\n usage.format(\"tracker update <tracker> <data>\"),\n desc.format(\"This form is shorthand for saving <data> to \" +\n \"<tracker> for today's date.\"),\n \"\",\n usage.format(\"tracker update <tracker>\"),\n desc.format(\"This form is used to set the value for an \" +\n \"arbitrary date for <tracker>.\"),\n desc.format(\"The date must be in the format YYYY-MM-DD.\"),\n \"\",\n \"Options:\",\n usage.format(\"<tracker>\"),\n desc.format(\"The name of the tracker to update, converted to lowercase.\"),\n desc.format(\"If <tracker> does not exist, you will be prompted to create it.\"),\n \"\",\n usage.format(\"<data>\"),\n desc.format(\"The value to save to the tracker to update, must be numerical.\")\n ]\n elif command == \"list\":\n help_text = [\n \"List: displays a list of trackers which have been created\",\n \"\",\n \"Usage:\",\n usage.format(\"tracker list\")\n ]\n elif command == \"show\":\n help_text = [\n \"Show: displays raw dates and values for a tracker\",\n \"\",\n \"Usage:\",\n usage.format(\"tracker show <tracker>\"),\n \"\",\n \"Description:\",\n usage.format(\"tracker show <tracker>\"),\n desc.format(\"Displays all data for <tracker> in the form '<date> | <value>'.\"),\n desc.format(\"Note: <date> is formatted as 'YYYY-MM-DD'.\"),\n \"\",\n \"Options:\",\n usage.format(\"<tracker>\"),\n desc.format(\"The name of the tracker to show, converted to lowercase.\"),\n ]\n elif command == \"rename\":\n help_text = [\n \"Rename: change name of a tracker\",\n \"\",\n \"Usage:\",\n usage.format(\"tracker rename <tracker> <new_tracker>\"),\n \"\",\n \"Description:\",\n usage.format(\"tracker rename <tracker> <new_tracker>\"),\n desc.format(\"All <tracker> entries will not be <new_tracker> entries.\"),\n \"\",\n \"Options:\",\n usage.format(\"<tracker>\"),\n desc.format(\"The name of the existing tracker to change, converted to lowercase.\"),\n \"\",\n usage.format(\"<new_tracker>\"),\n desc.format(\"The name of the new tracker (must not already exist), converted to lowercase.\")\n ]\n elif command == \"delete\":\n help_text = [\n \"Delete: permanently removes all data entries for a given tracker\",\n \"\",\n \"Usage:\",\n usage.format(\"tracker delete <tracker>\"),\n \"\",\n \"Description:\",\n usage.format(\"tracker delete <tracker>\"),\n desc.format(\"All sqlite entries associated with <tracker> are deleted.\"),\n \"\",\n \"Options:\",\n usage.format(\"<tracker>\"),\n desc.format(\"The name of the tracker to delete, converted to lowercase.\")\n ]\n elif command == \"stats\":\n help_text = [\n \"Stats: show statistics for tracker(s)\",\n \"\",\n \"Usage:\",\n usage.format(\"tracker stats <tracker> <tracker>\"),\n usage.format(\"tracker stats <tracker>\"),\n usage.format(\"tracker stats\"),\n \"\",\n \"Description:\",\n usage.format(\"tracker stats <tracker> <tracker>\"),\n desc.format(\"Show correlation coefficient between two trackers.\"),\n \"\",\n usage.format(\"tracker stats <tracker>\"),\n desc.format(\"Display information for each weekday and entire time period.\"),\n desc.format(\"Stats included: total, mean, min, max.\"),\n \"\",\n usage.format(\"tracker stats\"),\n desc.format(\"Displays information about all trackers.\"),\n desc.format(\"Stats included: total entries, entries per tracker.\"),\n \"\",\n \"Options:\",\n usage.format(\"<tracker>\"),\n desc.format(\"The name of the tracker to show stats for, converted to lowercase.\")\n ]\n elif command == \"plot\":\n help_text = [\n \"Plot: show graph for tracker\",\n \"\",\n \"Usage:\",\n usage.format(\"tracker plot <tracker>\"),\n \"\",\n \"Description:\",\n usage.format(\"tracker stats <tracker>\"),\n desc.format(\"Displays graph for <tracker> from first entry to last entry.\"),\n \"\",\n \"Options:\",\n usage.format(\"<tracker>\"),\n desc.format(\"The name of the tracker to graph, converted to lowercase.\")\n ]\n else:\n error(\"Invalid command: '{}'\".format(command))\n print(\"\\n\".join(help_text))\n sys.exit(1)", "def do_help(self, args): \n if args.command:\n if len(args.command) > 1:\n command = args.command[0] +\"-\" + args.command[1]\n else:\n command = args.command[0]\n if command in self.subcommands:\n self.subcommands[command].print_help()\n else:\n print((\"'%s' is not a valid subcommand\") %\n args.command)\n else:\n self.parser.print_help()", "def help(update, context):\n msg = \"\"\n msg += \"\\n/covid 7-Day-Incident per Million\"\n msg += \"\\n/daylio What did I do a year ago today?\"\n msg += \"\\n/f1last Results of the last race\"\n msg += \"\\n/f1stand Driver standings\"\n msg += \"\\n/f1next Time and place of the next race\"\n msg += \"\\n/fuel prices and consump. (args: Xeur Ykm)\"\n msg += \"\\n/ip Outside ip address\"\n msg += \"\\n/rate Exchange rates (args: Xeur/Yhuf)\"\n msg += \"\\n/rss check rss feeds for new content\"\n msg += \"\\n/sun Time of sunrise and sunset\"\n msg += \"\\n/xkcd Sends last comic image and alt\"\n msg.rstrip()\n update.message.reply_text(msg)", "def help(self, irc, msg, args, command):\n command = map(callbacks.canonicalName, command)\n (maxL, cbs) = irc.findCallbacksForArgs(command)\n if maxL == command:\n if len(cbs) > 1:\n names = sorted([cb.name() for cb in cbs])\n irc.error(format('That command exists in the %L plugins. '\n 'Please specify exactly which plugin command '\n 'you want help with.', names))\n else:\n assert cbs, 'Odd, maxL == command, but no cbs.'\n irc.reply(cbs[0].getCommandHelp(command, False))\n else:\n irc.error(format('There is no command %q.',\n callbacks.formatCommand(command)))", "def commands_description(package_name=COMMANDS_PACKAGE_NAME):\n usage_fmt = USAGE_FORMAT.lower()\n groups = {}\n commands = sorted([i for i in _get_commands(package_name).items() if i[0] != '__module__'])\n for cmd, topcmd in commands:\n module = topcmd['__module__']\n try:\n command_obj = module.COMMAND\n except AttributeError:\n continue\n descr = command_obj.summary.split('\\n')[0]\n group = command_obj.group\n if usage_fmt == 'console':\n line = ' {}{}'.format(util.color_text(f'{cmd:<14}', 'green'), descr)\n elif usage_fmt == 'markdown':\n line = ' {} | {}'.format(f'{cmd:<28}', descr)\n groups.setdefault(group, []).append(line)\n parts = []\n for group, members in groups.items():\n title = group.title() + ' Subcommands' if group else 'Subcommands'\n if usage_fmt == 'console':\n parts.append(util.color_text(title+':', attrs=['bold']))\n elif usage_fmt == 'markdown':\n parts.extend(['', ' ', f'{title:<30}' + ' | Description',\n '{}:| {}'.format('-'*30, '-'*len('Description'))])\n parts.extend(members)\n parts.append('')\n return '\\n'.join(parts)", "def help(update, context):\n update.message.reply_text(\"\"\"usage \n /bus <bus name> or /bus <bus name> <stop name>\n /addstop <stop name> <stop code>\n /delstop <stop name>\n /showstops\n /help\n \"\"\")\n\n # log info\n logger.info(\"help used username:{0}\".format(update.message.from_user.username))", "async def help(message, *args, **kwargs):\r\n\r\n if not args:\r\n docs = {name: f.__doc__ for name, f in usercommands.items()}\r\n docs = {k:v.split(\"\\n\",1)[0] for k,v in docs.items() if v and k.startswith(prefix)}\r\n output = dictstr(docs) + f\"\\nType \\\"{prefix}help func\\\" for detailed information about func\"\r\n output += \"\\n\\nFunction arguments are words separated by spaces\\n ex. $sort enemydata HP\"\r\n output += \"\\nKeyword arguments have the format: key=value\\n no spaces, unless value is in quotes\"\r\n output += \"\\n\\nDataTables:\"\r\n for name in DataTables:\r\n output += \"\\n \" + name\r\n if name == \"enemydata-h\": output += \" (hard-mode stats)\"\r\n await reply(message, f\"```{output}```\")\r\n return\r\n await reply(message, f\"```{inspect.getdoc(usercommands['$'+args[0]])}```\")", "def help():\n print(UI.HELP)", "def makecmd(self, options):", "def help(bot, sender, sendmsg, label, args):\n\n clist = commands.commands\n csort = sorted(clist.values(), key=lambda c: c.__name__.lower())\n\n if len(args) > 0:\n page = int(args[0]) - 1\n else:\n page = 0\n\n pages = len(clist) // 10 + 1\n\n sendmsg(\"-- Help (Page {} of {}) --\".format(page + 1, pages))\n for i in range(10):\n if i >= len(csort):\n break\n\n command = csort[i + (page * 10)]\n sendmsg(\"{}: {}\".format(command.__name__, command.__doc__))", "def cli_help(self):\n self._generate_cli_version()\n self._generate_cli_help()\n sys.exit(0)", "def help_usage(self):\n\t\thelptext = \"\"\"\nUSAGE\n==========\n1.) connect to server:\n\tWhen starting p22p, you dont automatically connect to a server.\n\tTo do this, use the 'connect'-command.\n\tWithout additional arguements, p22p will connect to {default}.\n\tIf you want to connect to a other server, use the following syntax:\n\t\tconnect PROTO://SERVER:PORT\n\twhere PROTO is either 'ws' or 'wss'. 'wss' is a SSL/TLS connection, ws a insecure connection.\n\tNote that the communication between to clients is always CBC-encrypted (additionaly to other encryption methods.)\n\tThe CBC-password will never be sent to the server.\n\tThe Server only receives a hash of the password.\n\n2.) join or create a Group\n\tp22p is using Group as Network-Namespaces.\n\tEach Groupmember has a unique CID. However, the CID is only unique in the Group and only unique during that clients connection.\n\tTo create a new Group, use the 'create'-command:\n\t\tcreate NAME PASSWORD [KEYFILE]\n\tThe server only receives a hash of the PASSWORD.\n\tNote that groupnames starting with a \"#\" are reserved (You cant create them except if you have the key).\n\tIf you want to create a reserved group, pass the path to the keyfile.\n\tWhen creating a Group, you will automatically join that Group.\n\t\n\tTo join a Group, use the 'join'-command:\n\t\tjoin NAME PSWD\n\tThe Server only reveives a hash of the Password.\n\n3.) relay a Port\n\tTo relay a port from your Device to a target device, use the 'relay'-command:\n\t\trelay PEER [LOCAL] REMOTE\n\tIf LOCAL is 0 or ommited, a free port is choosen.\n\tThis Command will create a socket listening to Port LOCAL on your DEVICE.\n\tOnce a connection is made to that Port, P22P will send a message to PEER, telling him to create a connection to Port REMOTE.\n\tAll data sent trough this connection will be encrypted with the Group's Password.\n\tThe Server only knows the hash of the password, meaning only Groupmembers know how to decrypt the Message.\n\tThe Server knows who should receive this message and sends it to only that Client.\n\n4.) Leaving a Group\n\tOnce you are finished, you can leave the Group.\n\tThis will close all connections to peers and free your CID.\n\tAll Groupmembers will receive a message that you left the Group.\n\tto leave a Group, use thr 'leave'-command.\n\n5.) Disconnecting\n\tIf you want to disconnect from the Server, use the 'disconnect'-command.\n\tThis will close all connections and also auto-leaves the Group (see 4.)\n\n6.) Exiting\n\tTo close this script, use the 'exit'-command.\n\tIf required, the 'disconnect'-command is invoked.\n\n7.) Additional commands\n\tTo get a list of all aviable commands, use the 'help'-command.\n\tTo get a description about a command, use the gollowing syntax:\n\t\thelp COMMAND\n\tHere are some useful commands:\n\t\tping PEER: pings a peer (not the Server.)\n\t\tlist: shows a list of all connections and relayed ports. also shows some information.\n\t\tcid: shows your current CID.\n\"\"\".format(default=DEFAULT_SERVER)\n\t\tself.stdout.write(helptext)", "def _cli_options(self, parser, defaults):\n config_group = parser.add_argument_group(title=\"Config Group\", description=self._help['config_group'])\n config_group.add_argument('-f', '--herringfile', metavar='FILESPEC',\n default='herringfile', help=self._help['herringfile'])\n config_group.add_argument('--herringlib', metavar='DIRECTORY', nargs='*', default=self._herringlib_path,\n help=self._help['herringlib'].format(dirs=self._herringlib_path))\n\n task_group = parser.add_argument_group(title='Task Commands', description=self._help['task_group'])\n task_group.add_argument('-T', '--tasks', dest='list_tasks',\n action=\"store_true\", help=self._help['list_tasks'])\n task_group.add_argument('-U', '--usage', dest='list_task_usages',\n action=\"store_true\", help=self._help['list_task_usages'])\n task_group.add_argument('-D', '--depends', dest='list_dependencies',\n action=\"store_true\", help=self._help['list_dependencies'])\n task_group.add_argument('tasks', nargs='*', help=self._help['tasks'])\n\n task_options_group = parser.add_argument_group(title='Task Options',\n description=self._help['task_options_group'])\n task_options_group.add_argument('-a', '--all', dest='list_all_tasks',\n action='store_true', help=self._help['list_all_tasks'])\n task_options_group.add_argument('-i', '--interactive', dest='interactive', action='store_true',\n default=False, help=self._help['interactive'])\n\n output_group = parser.add_argument_group(title='Output Options', description=self._help['output_group'])\n output_group.add_argument('-q', '--quiet', dest='quiet', action='store_true',\n help=self._help['quiet'])\n output_group.add_argument('-d', '--debug', dest='debug',\n action='store_true', help=self._help['debug'])\n output_group.add_argument('--herring_debug', dest='herring_debug',\n action='store_true', help=self._help['herring_debug'])\n output_group.add_argument('--leave_union_dir', action='store_true', help=self._help['leave_union_dir'])\n output_group.add_argument('-j', '--json', dest='json', action='store_true',\n help=self._help['json'])\n\n info_group = parser.add_argument_group(title='Informational Commands', description=self._help['info_group'])\n info_group.add_argument('-v', '--version', dest='version',\n action='store_true', help=self._help['version'])\n info_group.add_argument('-l', '--longhelp', dest='longhelp', action='store_true',\n help=self._help['longhelp'])\n info_group.add_argument('--environment', action='store_true', help=self._help['environment'])", "def show_commands(self):\n print(\n ''\n '\\n\\t' + bc.OKBLUE + 'COMMANDS:' + bc.ENDC +\n '\\n\\t' + '---------' +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'run', 'Run the script')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'runcom', 'Run program with specific arguments <runcom [ARGS]>')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'info', 'Information')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'help', 'Help')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'so', 'Show options')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'sa', 'Show module info')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'set', 'Set options, <set [PARAMETER] [VALUE]>')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'invoke', 'Invoke module')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'exit', 'Exit')) +\n '\\n'\n )", "def commandline_options():\n parser = argparse.ArgumentParser(\n description='ocn_diags_generator: CESM wrapper python program for Ocean Diagnostics packages.')\n\n parser.add_argument('--backtrace', action='store_true',\n help='show exception backtraces as extra debugging '\n 'output')\n\n parser.add_argument('--debug', action='store_true',\n help='extra debugging output')\n\n #parser.add_argument('--config', nargs=1, required=True, help='path to config file')\n\n options = parser.parse_args()\n return options", "def help_v2():\n return {\"commands\" : ['report', 'help', 'alternatives']}", "def cmd_help(ctx):\n echo(ctx.parent.get_help())", "def help(self):", "def help(self):", "def display_help(self):\n pass", "def do_help(self, line):\n Cmd.do_help(self, line)", "def help(bin_name='windmill'):\n bin_name = 'windmill'\n module = sys.modules[__name__]\n from windmill.conf import global_settings\n all_option_names = []\n options_string = []\n for option in [getattr(module, x) for x in dir(module) if (\n hasattr(getattr(module, x), 'option_names')) and (\n getattr(module, x).__doc__ is not None ) ]:\n all_option_names.append(option.option_names)\n if hasattr(option, 'setting'):\n if getattr(global_settings, option.setting, None) is not None:\n default = ' Defaults to %s' % str(getattr(global_settings, option.setting, None))\n else:\n default = ''\n else:\n default = ''\n if option.option_names[0] is None:\n if not issubclass(option, GeneralBool):\n options_string.append(' '+''.join([str(option.option_names[1])+'='+' :: ', \n option.__doc__]) + default)\n else:\n options_string.append(' '+''.join([str(option.option_names[1])+' :: ', \n option.__doc__]) + default)\n else:\n if not issubclass(option, GeneralBool):\n options_string.append(' '+''.join([\n '-'+str(option.option_names[0])+', '\n +str(option.option_names[1])+'='+' :: ',\n option.__doc__]) + default)\n else:\n options_string.append(' '+''.join([\n '-'+str(option.option_names[0])+', '\n +str(option.option_names[1])+' :: ',\n option.__doc__]) + default)\n\n preamble = \"\"\"windmill web test automation system.\n %s [-%s] action [option=value] [firefox|ie|safari] [http://www.example.com]\n \nAvailable Actions:\n shell Enter the windmilll shell environment (modified python shell). \n Uses ipython if installed. Exit using ^d\n run_service Run the windmill service in foreground. Kill using ^c.\n \nAvailable Options:\"\"\" % ( bin_name,\n ''.join([ o[0] for o in all_option_names if o[0] is not None ]) \n )\n print preamble\n print '\\n'.join(options_string)", "def cli(ctx):\n if ctx.obj[\"debug\"]:\n click.echo(\"Debug mode initiated\")\n set_trace()\n\n logger.debug(\"cluster subcommand called from cli\")", "def add_options(self, parser):\n parser.add_option(\"-a\", \"--addons\",\n dest=\"addons\",\n action=\"append\",\n metavar=\"ADDONS\",\n help=\"add-ons to be installed\")\n parser.add_option(\"--application\",\n dest=\"application\",\n default=\"firefox\",\n choices=[\"firefox\", \"thunderbird\"],\n metavar=\"APPLICATION\",\n help=\"application name [default: %default]\")\n parser.add_option(\"--junit\",\n dest=\"junit_file\",\n metavar=\"PATH\",\n help=\"JUnit XML style report file\")\n parser.add_option(\"--report\",\n dest=\"report_url\",\n metavar=\"URL\",\n help=\"send results to the report server\")\n parser.add_option(\"--repository\",\n dest=\"repository_url\",\n metavar=\"URL\",\n help=\"URL of a custom repository\")\n parser.add_option(\"--restart\",\n dest=\"restart\",\n default=False,\n action=\"store_true\",\n help=\"restart the application between tests\")\n parser.add_option(\"--screenshot-path\",\n dest=\"screenshot_path\",\n metavar=\"PATH\",\n help=\"path to use for screenshots\")\n parser.add_option(\"--tag\",\n dest=\"tags\",\n action=\"append\",\n metavar=\"TAG\",\n help=\"Tag to apply to the report\")\n\n mozmill = optparse.OptionGroup(parser, \"Mozmill options\")\n mozmill.add_option(\"-l\", \"--logfile\",\n dest=\"logfile\",\n metavar=\"PATH\",\n help=\"path to log file\")\n mozmill.add_option('-p', \"--profile\",\n dest=\"profile\",\n metavar=\"PATH\",\n help=\"path to the profile\")\n parser.add_option_group(mozmill)", "def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)", "def do_help(self, args):\n ## The only reason to define this method is for the help text in the doc string\n cmd.Cmd.do_help(self, args)", "def do_help(self, args):\n ## The only reason to define this method is for the help text in the doc string\n cmd.Cmd.do_help(self, args)", "def HelpForCmd(self, name):\n canonical_name = self._cmd_alias_list.get(name)\n if not canonical_name:\n raise CmdNotFoundError('Command not found: \"%s\"' % name)\n cmd = self._cmd_list[canonical_name]\n if cmd.__doc__.strip():\n flags_help = ''\n cmd_flags = self._flag_values_by_cmd[canonical_name]\n if cmd_flags.RegisteredFlags():\n prefix = ' '\n flags_help += '%s\\nFlags for %s:\\n' % (prefix, name)\n flags_help += cmd_flags.GetHelp(prefix + ' ')\n flags_help = _DeleteSpecialFlagHelp(flags_help)\n flags_help += '\\n\\n'\n return cmd.__doc__ + flags_help\n else:\n raise AssertionError('No class docstring found for command %s' % name)", "def help(self):\n\t\treturn", "async def help_command(self, ctx, *, cmd_name: str=None):\n bot_prefix = '@Randy '\n # Get command object\n cmd_obj = self.cmd(cmd_name)\n\n # Handle no command found\n if cmd_obj is None:\n return await ctx.error(f'Command {cmd_name} not found')\n em = discord.Embed(title=cmd_obj.name, description=cmd_obj.help, color=self.color)\n\n # Input aliases and parameters to embed\n if cmd_obj.aliases:\n em.add_field(name='Aliases', value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.aliases]))\n if cmd_obj.clean_params:\n em.add_field(name='Parameters', value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.clean_params]))\n\n # Handle group commands\n if isinstance(cmd_obj, commands.core.Group):\n em.add_field(name='Group commands',\n value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.commands]),\n inline=False)\n\n # Add usage last\n em.add_field(name='Usage',\n value=f'```{bot_prefix}\\u200b{cmd_name} '\n f'{\" \".join([f\"<{x}>\" for x in cmd_obj.clean_params])}```',\n inline=False)\n\n await ctx.send(embed=em)", "def format_options(self, ctx: Context, formatter: DocsCommandHelpTextFormatter): # type:ignore\n DocsBaseCommand.format_description(formatter)\n self.format_sub_commands(formatter)", "async def help(self, args):\n if not args:\n maxw = max([len(x) for x in self.commands]) + 1\n commands = list(self.commands)\n commands.sort()\n message = '\\n'.join(['`{name:{width}}|` {desc}'.format(\n name=command, width=maxw,\n desc=(self.commands[command].__doc__ or 'No description.').splitlines()[0]\n ) for command in commands])\n await self.send(\"Unlisted commands are forwarded to the Minecraft server.\\n\" + message)\n elif args.lower() not in self.commands:\n await self.send_error(\"Unknown command: {command}. This might be a Minecraft command.\".format(command=args))\n else:\n args = args.lower()\n await self.send(\"**`{name}`** - {doc}\".format(name=args, doc=self.commands[args].__doc__ or 'No description.'))", "def _describe_command(self, command, **options):\n command.get_synopsis(True)\n command.get_synopsis(False)\n command.merge_application_definition(False)\n\n self._write_text('<comment>Usage:</comment>', **options)\n for usage in [command.get_synopsis(True)] + command.get_aliases() + command.get_usages():\n self._write_text('\\n')\n self._write_text(' %s' % usage, **options)\n\n self._write_text('\\n')\n\n definition = command.get_native_definition()\n if definition.get_options() or definition.get_arguments():\n self._write_text('\\n')\n self._describe_input_definition(definition, **options)\n self._write_text('\\n')\n\n help = command.get_processed_help()\n if help:\n self._write_text('\\n')\n self._write_text('<comment>Help:</comment>', **options)\n self._write_text('\\n')\n self._write_text(' %s' % help.replace('\\n', '\\n '), **options)\n self._write_text('\\n')", "async def eventstats(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send_help(ctx.command)", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def get_help(self) -> None: \n print(messages.get_help())", "def cli():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"tic\", help=\"TIC number\")\n parser.add_argument(\"-L\", \"--LIST\", help=\"Only fit the LC\", action=\"store_true\")\n parser.add_argument(\"-S\", \"--SAVEGAIA\", help=\"Save Gaia sources\", action=\"store_true\")\n parser.add_argument(\"-C\", \"--COORD\", help=\"Use coordinates\", default=False)\n parser.add_argument(\"-n\", \"--name\", help=\"Target name to be plotted in title\", default=False)\n parser.add_argument(\"-D2\", \"--DR2\", help=\"Use Gaia DR2 catalog instead of DR3\", action=\"store_true\")\n parser.add_argument(\"-PM\", \"--PM\", help=\"Add proper motion direction arrows in the plot\", action=\"store_true\")\n parser.add_argument(\"--maglim\", default=5., help=\"Maximum magnitude contrast respect to TIC\")\n parser.add_argument(\"--sector\", default=None, help=\"Select Sector if more than one\")\n parser.add_argument(\"--gid\", default=None, help=\"Gaia ID\")\n parser.add_argument(\"--gmag\", default=None, help=\"Gaia mag\")\n parser.add_argument(\"--sradius\", default=10., type=float, help=\"Search radius (in arcsec) for the get_gaia_data function\")\n parser.add_argument(\"--legend\", default='best', help=\"Legend location\")\n args = parser.parse_args()\n return args", "def CommandHelp(paser):\n\n\tprint \"\\n===============Commands List===============\\n\"\n\t\t\n\tprint \"NewProject - {}\".format(NewProject.__doc__)\n\tprint \"DelProject - {}\".format(DelProject.__doc__)\n\tprint \"ShareProject - {}\".format(ShareProject.__doc__)\n\tprint \"StopProject - {}\".format(StopProject.__doc__)\n\tprint \"Help - {}\".format(CommandHelp.__doc__)\n\tprint \"Exit - Finaliza la sesion en la terminal.\"" ]
[ "0.589855", "0.58264685", "0.58036107", "0.57175773", "0.56941164", "0.56822515", "0.56753105", "0.567416", "0.56666476", "0.5660978", "0.5652861", "0.56472456", "0.56321657", "0.56308264", "0.55924976", "0.55864775", "0.5582454", "0.5558841", "0.55458933", "0.55455136", "0.5545258", "0.554449", "0.5516721", "0.5515871", "0.5515587", "0.55119604", "0.5484775", "0.5461708", "0.54581046", "0.54567105", "0.54554224", "0.5452041", "0.5420012", "0.5409557", "0.5393858", "0.5382833", "0.53748655", "0.5372349", "0.53709525", "0.53619194", "0.5347445", "0.53316313", "0.53244704", "0.53174615", "0.531214", "0.5305342", "0.52985317", "0.5282207", "0.52764636", "0.52724", "0.52690536", "0.52690536", "0.5265495", "0.5265093", "0.5259557", "0.52489555", "0.5245225", "0.5245225", "0.5240673", "0.52386373", "0.52258235", "0.52234834", "0.5222497", "0.5222283", "0.52194196", "0.5218213", "0.5215482", "0.52067417", "0.51985747", "0.51962537", "0.51959693", "0.51946723", "0.51931775", "0.5188584", "0.51835984", "0.51805675", "0.517935", "0.51752776", "0.5171131", "0.5164416", "0.5162963", "0.5162963", "0.51611876", "0.5156441", "0.5155901", "0.5155296", "0.5147805", "0.5145239", "0.5145098", "0.5145098", "0.5140515", "0.513914", "0.5137366", "0.51329386", "0.5131805", "0.51279825", "0.5127008", "0.51251835", "0.5124948", "0.5124909", "0.5123281" ]
0.0
-1
Report usage overview metrics for CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_cpc(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text", "def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()", "async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)", "def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()", "def help(cls, extra_args=None):\n if (_is_text_interface()):\n return _create_text_help_str(cls, cls._TEXT_USAGE)\n else:\n return cls._GRAPHICAL_USAGE", "def DoHelp(options, args):\n __pychecker__ = 'unusednames=options'\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print(COMMAND_USAGE_TEXT[args[0]])\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n args[0])", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "def usage(err=''):\r\n m = '%s\\n' %err\r\n m += 'Default usage is to list Cases closed for the 30 days\\n'\r\n m += '\\n Example:\\n'\r\n m += ' closedcases -n 90 \\n' \r\n m += ' \\n'\r\n# m += ' closedcases -n 60 -s blast5 \\n'\r\n return m", "def cmd_help(args):", "def help(self, args):\n print('No commands available for this consumer')", "def help(self):\n msg = \"`%s' performs the computational aspects of genotyping-by-sequencing.\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Usage: %s [OPTIONS] ...\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Options:\\n\"\n msg += \" -h, --help\\tdisplay the help and exit\\n\"\n msg += \" -V, --version\\toutput version information and exit\\n\"\n msg += \" -v, --verbose\\tverbosity level (0/default=1/2/3)\\n\"\n msg += \" --proj1\\tname of the project used for steps 1 to 4\\n\"\n msg += \"\\t\\tmention a reference genome only if all samples belong to\\n\"\n msg += \"\\t\\t the same species, and will be mapped to the same ref genome\\n\"\n msg += \" --proj2\\tname of the project used for steps 4 to 8\\n\"\n msg += \"\\t\\tcan be the same as --proj1, or can be different\\n\"\n msg +=\"\\t\\t notably when samples come from different species\\n\"\n msg += \"\\t\\t or if one wants to align reads to different ref genomes\\n\"\n msg += \" --schdlr\\tname of the cluster scheduler (default=SGE)\\n\"\n msg += \" --queue\\tname of the cluster queue (default=normal.q)\\n\"\n msg += \" --resou\\tcluster resources (e.g. 'test' for 'qsub -l test')\\n\"\n msg += \" --rmvb\\tremove bash scripts for jobs launched in parallel\\n\"\n msg += \" --step\\tstep to perform (1/2/3/.../9)\\n\"\n msg += \"\\t\\t1: raw read quality per lane (with FastQC v >= 0.11.2)\\n\"\n msg += \"\\t\\t2: demultiplexing per lane (with demultiplex.py v >= 1.14.0)\\n\"\n msg += \"\\t\\t3: cleaning per sample (with CutAdapt v >= 1.8)\\n\"\n msg += \"\\t\\t4: alignment per sample (with BWA MEM v >= 0.7.12, Samtools v >= 1.3, Picard and R v >= 3)\\n\"\n msg += \"\\t\\t5: local realignment per sample (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t6: local realignment per genotype (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t7: variant and genotype calling per genotype (with GATK HaplotypeCaller v >= 3.5)\\n\"\n msg += \"\\t\\t8: variant and genotype calling jointly across genotypes (with GATK GenotypeGVCFs v >= 3.5)\\n\"\n msg += \"\\t\\t9: variant and genotype filtering (with GATK v >= 3.5)\\n\"\n msg += \" --samples\\tpath to the 'samples' file\\n\"\n msg += \"\\t\\tcompulsory for all steps, but can differ between steps\\n\"\n msg += \"\\t\\t e.g. if samples come from different species or are aligned\\n\"\n msg += \"\\t\\t on different ref genomes, different samples file should\\n\"\n msg += \"\\t\\t be used for steps 4-9, representing different subsets of\\n\"\n msg += \"\\t\\t the file used for steps 1-3\\n\"\n msg += \"\\t\\tthe file should be encoded in ASCII\\n\"\n msg += \"\\t\\tthe first row should be a header with column names\\n\"\n msg += \"\\t\\teach 'sample' (see details below) should have one and only one row\\n\"\n msg += \"\\t\\tany two columns should be separated with one tabulation\\n\"\n msg += \"\\t\\tcolumns can be in any order\\n\"\n msg += \"\\t\\trows starting by '#' are skipped\\n\"\n msg += \"\\t\\t12 columns are compulsory (but there can be more):\\n\"\n msg += \"\\t\\t genotype (see details below, e.g. 'Col-0', but use neither underscore '_' nor space ' ' nor dot '.', use dash '-' instead)\\n\"\n msg += \"\\t\\t ref_genome (identifier of the reference genome used for alignment, e.g. 'Atha_v2', but use neither space ' ' nor dot '.'; the full species name, e.g. 'Arabidopsis thaliana', will be present in the file given to --dict)\\n\"\n msg += \"\\t\\t library (e.g. can be the same as 'genotype')\\n\"\n msg += \"\\t\\t barcode (e.g. 'ATGG')\\n\"\n msg += \"\\t\\t seq_center (e.g. 'Broad Institute', 'GenoToul', etc)\\n\"\n msg += \"\\t\\t seq_platform (e.g. 'ILLUMINA', see SAM format specification)\\n\"\n msg += \"\\t\\t seq_platform_model (e.g. 'HiSeq 2000')\\n\"\n msg += \"\\t\\t flowcell (e.g. 'C5YMDACXX')\\n\"\n msg += \"\\t\\t lane (e.g. '3', can be '31' if a first demultiplexing was done per index)\\n\"\n msg += \"\\t\\t date (e.g. '2015-01-15', see SAM format specification)\\n\"\n msg += \"\\t\\t fastq_file_R1 (filename, one per lane, gzip-compressed)\\n\"\n msg += \"\\t\\t fastq_file_R2 (filename, one per lane, gzip-compressed)\\n\"\n msg += \" --fcln\\tidentifier of a flowcell and lane number\\n\"\n msg += \"\\t\\tformat as <flowcell>_<lane-number>, e.g. 'C5YMDACXX_1'\\n\"\n msg += \"\\t\\tif set, only the samples from this lane will be analyzed\\n\"\n msg += \" --pird\\tpath to the input reads directory\\n\"\n msg += \"\\t\\tcompulsory for steps 1 and 2\\n\"\n msg += \"\\t\\twill be added to the columns 'fastq_file_R*' from the sample file\\n\"\n msg += \"\\t\\tif not set, input read files should be in current directory\\n\"\n msg += \" --enz\\tname of the restriction enzyme\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=ApeKI\\n\"\n msg += \" --dmxmet\\tmethod used to demultiplex\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=4c (see the help of demultiplex.py to know more)\\n\"\n msg += \" --subst\\tnumber of substitutions allowed during demultiplexing\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=2\\n\"\n msg += \" --ensubst\\tenforce the nb of substitutions allowed\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=lenient/strict\\n\"\n msg += \" --adp\\tpath to the file containing the adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tsame format as FastQC: name<tab>sequence\\n\"\n msg += \"\\t\\tname: at least 'adpR1' (also 'adpR2' if paired-end)\\n\"\n msg += \"\\t\\tsequence: from 5' (left) to 3' (right)\\n\"\n msg += \" --errtol\\terror tolerance to find adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --minovl\\tminimum overlap length between reads and adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=3 (in bases)\\n\"\n msg += \" --minrl\\tminimum length to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=35 (in bases)\\n\"\n msg += \" --minq\\tminimum quality to trim a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=20 (used for both reads if paired-end)\\n\"\n msg += \" --maxNp\\tmaximum percentage of N to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --ref\\tpath to the prefix of files for the reference genome\\n\"\n msg += \"\\t\\tcompulsory for steps 4, 5, 6, 7, 8, 9\\n\"\n msg += \"\\t\\tshould correspond to the 'ref_genome' column in --samples\\n\"\n msg += \"\\t\\te.g. '/data/Atha_v2' for '/data/Atha_v2.fa', '/data/Atha_v2.bwt', etc\\n\"\n msg += \"\\t\\tthese files are produced via 'bwa index ...'\\n\"\n msg += \" --dict\\tpath to the 'dict' file (SAM header with @SQ tags)\\n\"\n msg += \"\\t\\tcompulsory for step 4\\n\"\n msg += \"\\t\\tsee 'CreateSequenceDictionary' in the Picard software\\n\"\n msg += \" --jgid\\tcohort identifier to use for joint genotyping\\n\"\n msg += \"\\t\\tcompulsory for steps 8, 9\\n\"\n msg += \"\\t\\tuseful to launch several, different cohorts in parallel\\n\"\n msg += \" --rat\\trestrict alleles to be of a particular allelicity\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdefault=ALL/BIALLELIC/MULTIALLELIC\\n\"\n msg += \"\\t\\tsee '--restrictAllelesTo' in GATK's SelectVariant\\n\"\n msg += \" --mdp\\tminimum value for DP (read depth; e.g. 10)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mgq\\tminimum value for GQ (genotype quality; e.g. 20)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mnfg\\tmaximum number of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mffg\\tmaximum fraction of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFractionFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mnnc\\tmaximum number of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \" --mfnc\\tmaximum fraction of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxNOCALLfraction' in GATK's SelectVariants\\n\"\n msg += \" --fam\\tpath to the file containing pedigree information\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdiscard variants with Mendelian violations (see Semler et al, 2012)\\n\"\n msg += \"\\t\\tshould be in the 'fam' format specified by PLINK\\n\"\n msg += \"\\t\\tvalidation strictness (GATK '-pedValidationType') is set at 'SILENT'\\n\"\n msg += \"\\t\\t allowing some samples to be absent from the pedigree\\n\"\n msg += \" --mvq\\tminimum GQ for each trio member to accept a variant as a Mendelian violation\\n\"\n msg += \"\\t\\tused in step 9 if '--fam' is specified\\n\"\n msg += \"\\t\\tdefault=0\\n\"\n msg += \" --xlssf\\tpath to the file with genotypes to exclude\\n\"\n msg += \"\\t\\tused in step 9 (can be especially useful if '--fam' is specified)\\n\"\n msg += \" --tmpd\\tpath to a temporary directory on child nodes (default=.)\\n\"\n msg += \"\\t\\te.g. it can be /tmp or /scratch\\n\"\n msg += \"\\t\\tused in step 4 for 'samtools sort'\\n\"\n msg += \"\\t\\tused in step 7 for 'GATK HaplotypeCaller'\\n\"\n msg += \" --jvmXms\\tinitial memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=512m (can also be specified as 1024k, 1g, etc)\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --jvmXmx\\tmaximum memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=4g\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --queue2\\tname of the second cluster queue (default=bigmem.q)\\n\"\n msg += \"\\t\\tused in step 4 for Picard to collect insert sizes\\n\"\n msg += \" --knowni\\tpath to a VCF file with known indels (for local realignment)\\n\"\n msg += \" --known\\tpath to a VCF file with known variants (e.g. from dbSNP)\\n\"\n msg += \" --force\\tforce to re-run step(s)\\n\"\n msg += \"\\t\\tthis removes without warning the step directory if it exists\\n\"\n msg += \"\\n\"\n msg += \"Examples:\\n\"\n msg += \" %s --step 1 --samples samples.txt\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Details:\\n\"\n msg += \"This program aims at genotyping a set of 'genotypes' using data from\\n\"\n msg += \"a restriction-assisted DNA sequencing (RAD-seq) experiment, also known\\n\"\n msg += \"as a genotyping-by-sequencing (GBS) experiment.\\n\"\n msg += \"Here, by 'genotype', we mean the entity which is the focus of the\\n\"\n msg += \"study. For instance, it can be a plant variety (or a human being), or\\n\"\n msg += \"the specific clone of a given plant variety (or a specific tumor of a\\n\"\n msg += \"given human being), etc.\\n\"\n msg += \"Importantly, note that the content of the 'genotype' column will\\n\"\n msg += \"be used to set the 'SM' (sample) tag of the 'RG' (read group) header\\n\"\n msg += \"record type of the SAM format (see http://www.htslib.org/). However,\\n\"\n msg += \"internal to this program, the term 'sample' corresponds to the unique\\n\"\n msg += \"quadruplet (genotype,flowcell,lane,barcode) for steps 1 and 2, and to\\n\"\n msg += \"the unique triplet (genotype,flowcell,lane) for the others.\\n\"\n msg += \"Jobs are executed in parallel (--schdlr). Their return status is\\n\"\n msg += \"recorded in a SQLite database which is removed at the end. If a job\\n\"\n msg += \"fails, the whole script stops with an error.\\n\"\n msg += \"\\n\"\n msg += \"Dependencies:\\n\"\n msg += \"Python >= 2.7; Biopython; pyutilstimflutre >= 0.5\\n\"\n msg += \"\\n\"\n msg += \"Report bugs to <[email protected]>.\"\n print(msg); sys.stdout.flush()", "def usage():\n print(\"[1] Getting help from a cipher \")\n print(\" ---> ./cryptogra.py caesar -h \")\n print(\"\")", "def command_help(args):\n\tprint_usage()\n\treturn 0", "def print_usage_command(self):\n print self.get_usage_command()", "def print_usage_command(self):\n print self.get_usage_command()", "def help_opt(self):\n print(OPTIONS)", "def usage():", "def usage():", "def help_help(self):\n print(\"List commands or print details about a command\")", "def _usage_options_example(self):\n pass", "def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging", "def help(self):\r\n self._short_help(None, None, None, None)", "def usage(self):\n\n # header\n self.usage_header()\n\n print _(\"\"\"Screen: %(screen)s\nDescription: %(description)s\n\nUsage: %(app_name)s %(screen)s [options]\"\"\") % {\n 'app_name': constants.App.NAME,\n 'screen': self.name,\n 'description': self.description,\n }\n # any additional info in between (see other classes for reference)\n self._usage_options_example()\n\n #footer\n self.usage_footer()", "def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)", "def printHelp():\n print(\"amqWorkApiMass.py -n <msgcnt> -b <body> -m <headers> -s <path/to/bodyandheaders>\")", "def do_overview(self):\n summaries = []\n for name, cmd in self.base.commands.iteritems():\n summaries.append(' %-14s %s\\n' % (name, cmd.get_summary()))\n summaries.sort()\n sys.stdout.write('Usage: %s COMMAND ARGUMENTS...\\n\\n' \\\n 'Available commands:\\n' % (self.base.scriptname, ))\n for line in summaries:\n sys.stdout.write(line)", "def print_help_info(self, global_options):\r\n usage = ['',\"Type '%s help <subcommand>' for help on a specific subcommand.\" % self.prog_name,'']\r\n usage.append('Available subcommands:')\r\n commands = self.get_commands(global_options).keys()\r\n commands.sort()\r\n for cmd in commands:\r\n usage.append(' %s' % cmd)\r\n return '\\n'.join(usage)", "def usage():\n \n print '-b <bench> the bench to show.'\n print '-c <config> the config to show (GPU, 8888, 565, etc).'\n print '-d <dir> a directory containing bench_r<revision>_<scalar> files.'\n print '-e <file> file containing expected bench values/ranges.'\n print ' Will raise exception if actual bench values are out of range.'\n print ' See bench_expectations.txt for data format and examples.'\n print '-f <revision>[:<revision>] the revisions to use for fitting.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-i <time> the time to ignore (w, c, g, etc).'\n print ' The flag is ignored when -t is set; otherwise we plot all the'\n print ' times except the one specified here.'\n print '-l <title> title to use for the output graph'\n print '-m <representation> representation of bench value.'\n print ' See _ListAlgorithm class in bench_util.py.'\n print '-o <path> path to which to write output; writes to stdout if not specified'\n print '-r <revision>[:<revision>] the revisions to show.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-s <setting>[=<value>] a setting to show (alpha, scalar, etc).'\n print '-t <time> the time to show (w, c, g, etc).'\n print '-x <int> the desired width of the svg.'\n print '-y <int> the desired height of the svg.'\n print '--default-setting <setting>[=<value>] setting for those without.'", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"", "def getHelp(self):\r\n help_str =\\\r\n \"\"\"##########################################################################################\r\n#\r\n# Required:\r\n#\r\n# --query_NAST multi-fasta file containing query sequences in alignment format\r\n#\r\n# Common opts:\r\n#\r\n# --db_NAST db in NAST format\r\n# --db_FASTA db in fasta format (megablast formatted)\r\n#\r\n#\r\n# -n number of top matching database sequences to compare to (default 15)\r\n# -R min divergence ratio default: 1.007\r\n# -P min percent identity among matching sequences (default: 90)\r\n#\r\n# ## parameters to tune ChimeraParentSelector:\r\n#\r\n# Scoring parameters:\r\n# -M match score (default: +5)\r\n# -N mismatch penalty (default: -4)\r\n# -Q min query coverage by matching database sequence (default: 70)\r\n# -T maximum traverses of the multiple alignment (default: 1)\r\n\r\n#\r\n# ## parameters to tune ChimeraPhyloChecker:\r\n#\r\n#\r\n# --windowSize default 50\r\n# --windowStep default 5\r\n# --minBS minimum bootstrap support for calling chimera (default: 90)\r\n# -S percent of SNPs to sample on each side of breakpoint for computing bootstrap support (default: 10)\r\n# --num_parents_test number of potential parents to test for chimeras (default: 3)\r\n# --MAX_CHIMERA_PARENT_PER_ID Chimera/Parent alignments with perID above this are considered non-chimeras (default 100; turned off)\r\n#\r\n# ## misc opts\r\n#\r\n# --printFinalAlignments shows alignment between query sequence and pair of candidate chimera parents\r\n# --printCSalignments print ChimeraSlayer alignments in ChimeraSlayer output\r\n# --exec_dir chdir to here before running\r\n#\r\n#########################################################################################\r\n \"\"\"\r\n return help_str", "def help_command(server, output, conf):\n server.tell(output.name, 'Available commands:')\n for key in COMMANDS.keys():\n cmd_func = COMMANDS[key]\n if cmd_func.__doc__:\n server.tell(output.name, '%s: %s' % (key[1:], cmd_func.__doc__))\n else:\n server.tell(output.name, key[1:])\n return", "def help(update, context):\n update.message.reply_text(\"\"\"usage \n /bus <bus name> or /bus <bus name> <stop name>\n /addstop <stop name> <stop code>\n /delstop <stop name>\n /showstops\n /help\n \"\"\")\n\n # log info\n logger.info(\"help used username:{0}\".format(update.message.from_user.username))", "def show_usage():\n\n usage_screen = \"\\nUsage:\\n\" \\\n f\" {basename(argv[0])} <mock_1> [<mock_2> ...]\\n\" \\\n \"\\nOptions:\\n\" \\\n \" mock-departments Send HTTP requests to create some mock departments in the backend.\\n\" \\\n \" mock-employees Send HTTP requests to create some mock employees in the backend.\\n\" \\\n \" help Show this help page.\\n\" \\\n \"\" \\\n \" verbose Enables detailed request logging for the remaining options.\\n\"\n print(usage_screen)", "def stats(caesar, input):\n commands = {}\n users = {}\n channels = {}\n\n ignore = set(['f_note', 'startup', 'message', 'noteuri'])\n for (name, user), count in caesar.stats.items(): \n if name in ignore: continue\n if not user: continue\n\n if not user.startswith('#'): \n try: users[user] += count\n except KeyError: users[user] = count\n else: \n try: commands[name] += count\n except KeyError: commands[name] = count\n\n try: channels[user] += count\n except KeyError: channels[user] = count\n\n comrank = sorted([(b, a) for (a, b) in commands.iteritems()], reverse=True)\n userank = sorted([(b, a) for (a, b) in users.iteritems()], reverse=True)\n charank = sorted([(b, a) for (a, b) in channels.iteritems()], reverse=True)\n\n # most heavily used commands\n creply = 'most used commands: '\n for count, command in comrank[:10]: \n creply += '%s (%s), ' % (command, count)\n caesar.say(creply.rstrip(', '))\n\n # most heavy users\n reply = 'power users: '\n for count, user in userank[:10]: \n reply += '%s (%s), ' % (user, count)\n caesar.say(reply.rstrip(', '))\n\n # most heavy channels\n chreply = 'power channels: '\n for count, channel in charank[:3]: \n chreply += '%s (%s), ' % (channel, count)\n caesar.say(chreply.rstrip(', '))", "def help(update, context):\n msg = \"\"\n msg += \"\\n/covid 7-Day-Incident per Million\"\n msg += \"\\n/daylio What did I do a year ago today?\"\n msg += \"\\n/f1last Results of the last race\"\n msg += \"\\n/f1stand Driver standings\"\n msg += \"\\n/f1next Time and place of the next race\"\n msg += \"\\n/fuel prices and consump. (args: Xeur Ykm)\"\n msg += \"\\n/ip Outside ip address\"\n msg += \"\\n/rate Exchange rates (args: Xeur/Yhuf)\"\n msg += \"\\n/rss check rss feeds for new content\"\n msg += \"\\n/sun Time of sunrise and sunset\"\n msg += \"\\n/xkcd Sends last comic image and alt\"\n msg.rstrip()\n update.message.reply_text(msg)", "def test_explicit_usage_message(self):\n assert 'Usage:' in main('coloredlogs', '--help', capture=True)", "def info():\n f = Figlet(font='standard')\n click.echo(f.renderText('covtool'))\n click.secho(\n \"covtool: a simple CLI for fetching covid data\", fg='cyan')\n click.echo(\n \"Data Sources: https://www.worldometers.info/coronavirus\\nJohn Hopkins [https://github.com/CSSEGISandData/COVID-19] \")\n click.secho(\"Author: Amayo II <[email protected]>\", fg='magenta')", "def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)", "def test_cli_help(self):\n output = self.update_command('-h')", "def usage() :\n\n print usage.__doc__", "def cmd_help(self, commands=None, usage=False):\n if commands:\n usage = True\n commands = {self.approx.decmd(c.lower()) for c in commands}\n rejects = commands - self.approx.keys()\n for reject in rejects:\n self.put_pretty(\"No command named %r\" % reject)\n continue\n commands -= rejects\n if self.debug:\n assert not any(self.approx.encmd(r) in self.mod_commands for\n r in rejects)\n assert all(self.approx.encmd(c) in self.mod_commands for\n c in commands)\n if not commands:\n return\n requested = zip(commands, (self.approx[c] for c in commands))\n else:\n requested = self.approx.items()\n help = znc.CTable()\n help.AddColumn(\"Command\")\n help.AddColumn(\"Usage\" if usage else \"Description\")\n from itertools import zip_longest\n #\n for command, parser in requested:\n if usage:\n upre = \"usage: %s\" % command\n rest = (parser.format_usage()\n .replace(upre, \"\", 1)\n .replace(\"[-h] \", \"\", 1))\n desc = [l.strip() for l in rest.split(\"\\n\") if l.strip()]\n else:\n desc = [parser.description]\n for line, comm in zip_longest(desc, (command,), fillvalue=\"\"):\n help.AddRow()\n help.SetCell(\"Command\", comm)\n help.SetCell(\"Usage\" if usage else \"Description\", line)\n #\n s_line = znc.String()\n strung = []\n while help.GetLine(len(strung), s_line):\n strung.append(s_line.s)\n also = \" (<command> [-h] for details)\"\n strung[1] = strung[1].replace(len(also) * \" \", also, 1)\n self.put_pretty(\"\\n\".join(strung))", "def help():", "def help():\n print(UI.HELP)", "def usage():\n pass", "def usage(progname):\n \n sys.stderr.write(\"Usage: \" +progname + \" [-cmnv] [-z score] \"\n \" <outdir>\\n\")\n sys.stderr.write(' -c class level not fold level evaluation\\n')\n sys.stderr.write(' -m read multiquery file on stdin\\n')\n sys.stderr.write(' -n negate scores (so that most -ve is best)\\n')\n sys.stderr.write(' -v verbose messages to stderr\\n')\n sys.stderr.write(' -z score : assign identifiers not present in the output a score of score\\n')\n sys.exit(1)", "def command_help(self, *args, **kwargs):\n print(\"Commands available:\\n\")\n for name in dir(self):\n if not name.startswith(\"command_\"):\n continue\n name_clean = name[len(\"command_\"):]\n print(\"%s:\\n - %s\\n\" % (name_clean, getattr(self, name).__doc__.strip()))", "def usage(msg):\n ap.print_usage()\n print \"-\"*40\n print msg\n exit(1)", "def test_usage(self):\n # Make sure the usage message is shown when no arguments\n # are given and when the -h or --help option is given.\n for options in [], ['-h'], ['--help']:\n exit_code, output = run_cli(*options)\n assert \"Usage:\" in output", "def display_help(self):\n pass", "def _help(self):\n self.onecmd('help')", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")", "def help(self, *args):\n for _, v in self.useage.items():\n print v.__doc__", "def usage(self):\n self._usage1()\n print 'folder COOL_channel COOL_tag ROOT_file'\n self._usage2()", "def help():\n \n pass", "def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))", "def _show_help(self):\r\n info = {\"/contexts/<context>/[orgs/[<org_name>]]/[spaces/[<space_name>]]\": \"reports\",\r\n \"/contexts/<context>/orgs_metadata/[<org_name>]\": \"metadata\",\r\n \"/contexts/<context>/orgs/<org_name>/director\": \"org/director mapping\",\r\n \"/reader_status\": \"status of Bitbucket reader cache\"}\r\n if self._cache_refresh:\r\n info['/refresh'] = \"force cache refresh from BitBucket\"\r\n return info", "def usage(msgarg):\n if msgarg:\n sys.stderr.write(\"error: %s\\n\" % msgarg)\n print(\"\"\"\\\n usage: %s [options]\n\n options:\n -d increase debug msg verbosity level\n -c N emit N classes (def: 500) per instances\n -I N emit N instances\n\n \"\"\" % os.path.basename(sys.argv[0]))\n sys.exit(1)", "def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})", "def help_message():\n print('')\n print('Usage: python make_cutout.py [infile] [ra_cent] [dec_cent] '\n '[imsize] [outfile]')\n print('')\n print('Example: python make_cutout.py bigim.fits 272.345 62.5432'\n '4.5 cutout.fits')\n print('')\n print('Inputs:')\n print(' 1. infile - input fits file with WCS information')\n print(' 2. ra_cent - requested central RA for cutout, in decimal degrees')\n print(' 3. dec_cent - requested central Dec for cutout, in decimal '\n 'degrees')\n print(' 4. imsize - size of cutout, in arcsec')\n print(' 5. outfile - name of output fits file')\n print('')", "def do_command_help(self, command):\n summary = self.base.commands[command].get_summary()\n usage = self.get_command_usage(command)\n description = self.base.commands[command].get_description()\n sys.stdout.write('%s\\n%s' % (summary, usage))\n if description != None:\n sys.stdout.write('Arguments Description:\\n%s\\n' %\n (description, ))", "def help(self):\n pass", "def help(self):\n pass", "def explainerdashboard_cli(ctx):", "def help(self, msg=None):\n\n # Print the message if given.\n if not msg == None:\n print str(msg) + \"\\n\"\n\n # Display the list of commands, in the alphabetical order.\n print \"Use one of the following commands:\"\n for action in sorted(self.actions.keys()):\n info = self.actions[action]\n joined_oblig = ' '.join(info['required'])\n if len(info['additional']) > 0:\n add = [\"<%s>\" % x for x in info['additional']]\n joined_add = '[' + ' '.join(add) + ']'\n else:\n joined_add = ''\n print \"\\t* %s %s %s\" % (action, joined_oblig, joined_add)", "def usage(self, host):", "def command_help(self):\n print(\"Command \", self)\n print(\"\\t\\thelp (Get help for command)\")\n\n params = self.params.copy()\n del params[\"help\"]\n\n if len(params) == 0:\n print(\"This command has no parameters\")\n return\n\n print(\"Parameters:\")\n for info in params.values():\n print(\" %s\" % info.get_basic_info())\n description = info.get_desc()\n if description != \"\":\n print(textwrap.fill(description,\n initial_indent=\" \",\n subsequent_indent=\" \",\n width=70))", "def help(self):\n res = \"\"", "def usage(self, subcommand):\r\n if len(self.option_list) > 0:\r\n usage = '%%prog %s [options] %s' % (subcommand, self.args)\r\n else:\r\n usage = '%%prog %s %s' % (subcommand, self.args)\r\n if self.help:\r\n return '%s\\n\\n%s' % (usage, self.help)\r\n else:\r\n return usage", "def usage(self, subcommand):\n usage = '%%prog %s [options] %s' % (subcommand, self.args)\n if self.help:\n return '%s\\n\\n%s' % (usage, self.help)\n else:\n return usage", "def show(self):\n prev_queries = 0\n prev_cpu_sys = 0\n prev_cpu_user = 0\n \n lines = {\n \"Uptime (seconds)\": \"--\",\n \"Number of queries\": \"--\",\n \"Query per second\": \"--\",\n \"ACL drops\": \"--\",\n \"Dynamic drops\": \"--\",\n \"Rule drops\": \"--\",\n \"CPU Usage (%s)\": \"--\",\n \"Cache hitrate\": \"--\"\n }\n\n while True:\n try:\n # get stats from dnsdist\n stats = Statistics(console=self.console)\n global_stats = stats[\"global\"]\n \n qps = int(global_stats[\"queries\"]) - prev_queries\n prev_queries = int(global_stats[\"queries\"])\n cpu = (int(global_stats[\"cpu-sys-msec\"])+int(global_stats[\"cpu-user-msec\"]) - prev_cpu_sys - prev_cpu_user) / 10\n prev_cpu_sys = int(global_stats[\"cpu-sys-msec\"])\n prev_cpu_user = int(global_stats[\"cpu-user-msec\"])\n \n lines[\"Uptime (seconds)\"] = global_stats[\"uptime\"]\n lines[\"Number of queries\"] = global_stats[\"queries\"]\n lines[\"Query per second\"] = qps\n lines[\"CPU Usage (%s)\"] = cpu\n lines[\"ACL drops\"] = global_stats[\"acl-drops\"]\n lines[\"Rule drops\"] = global_stats[\"rule-drop\"]\n lines[\"Cache hitrate\"] = global_stats[\"cache-hits\"]\n lines[\"Dynamic drops\"] = global_stats[\"dyn-blocked\"]\n\n # reprint the lines \n sys.stdout.write(\"\\033[1mDashboard for dnsdist\\033[0m\\n\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Global:\\n\")\n for k,v in lines.items():\n sys.stdout.write(\"\\t%s: %s\\n\" % (k,v))\n sys.stdout.write(\"Backends:\\n\")\n for s in stats[\"backends\"]:\n if not len(s[\"name\"]):\n s[\"name\"] = \"--\"\n if not len(s[\"pools\"]):\n s[\"pools\"] = \"--\"\n sys.stdout.write(\"\\t#%s / %s / %s / %s\\n\" % (s[\"#\"],s[\"address\"],s[\"name\"],s[\"pools\"]) )\n sys.stdout.write(\"\\t\\tNumber of queries: %s\\n\" % s[\"queries\"])\n sys.stdout.write(\"\\t\\tQuery per second: %s\\n\" % s[\"qps\"])\n sys.stdout.write(\"\\t\\tNumber of drops: %s\\n\" % s[\"drops\"])\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Ctrl+C to exit\\n\")\n \n time.sleep(1)\n \n \n # move up cursor and delete whole line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for k,v in lines.items():\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for s in stats[\"backends\"]:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n \n del stats\n except KeyboardInterrupt:\n break", "def help_text(command):\n\n courses_list = ('ENPM611', 'ENPM613', 'ENPM631', 'ENPM687',\\\n 'ENPM691', 'ENPM693', 'ENPM694', 'ENPM696',\\\n 'ENPM809J','ENPM809R', 'ENPM809W')\n\n response = 'I have course descriptions for: '\n for course_name in courses_list:\n response = response + course_name + ' '\n\n response = response + '\\nTo get the course description, execute command: about ENPM<course_number>'\n\n return response", "def print_usage():\n usage_msg = \"\"\"\n%s.py -H <host or group> -P <path> -M <mode>\n\nUsage:\n -h, --help\n Print detailed help screen\n -H, --hostname=STRING\n Host name or group of hosts\n -V, --version\n Print version information\n -P, --path=STRING\n Path to rancid var directory. Usually the dir contains a logs dirs and hostgroup dirs\n Example : /usr/local/rancid/var\n -M, --mod=STRING\n Plugin mod. Must be one of the following : ping, hash, config, cards, filter, qos\n *ping:\n Check if all host in the hostgroup are up from the rancid point of view.\n It uses the .up file to determine the lists of host to look for\n *hash:\n Check if the firmware hash is different from the ref one (or from the previous one)\n *config:\n Check if the configuration has changed for the host / group (notify diff)\n *cards:\n Specific to 8600 models. Check the hardware cards plugged to the host (notify diff).\n *filter:\n Specific to ES-470. Check the filters (notify diff)\n *qos:\n Specific to ES-470. Check the qos values (notify diff)\n -u, --url=URL\n URL to submit passive results to Shinken Receiver with HTTP\n Need a host and service to send result.\n -a, --passive-host=STRING\n Required if not in plugin mod to send data to Shinken ws_arbiter\n -b, --passive-service=STRING\n Required if not in plugin mod to send data to Shinken ws_arbiter\n\"\"\" % PLUGIN_NAME\n print usage_msg", "def cli_help(self):\n self._generate_cli_version()\n self._generate_cli_help()\n sys.exit(0)", "def _show_help(self):\n puts(e(\"Usage: %s [options] <file/folder>\" % sys.argv[0]))\n\n puts(p(\"--help \") + \"Show the help system\")\n puts(p(\"--lossy \") + \"Apply a lossy optimization on the image(s)\")\n puts(p(\"--lossless \") + \"Apply a lossless optimization on the image(s)\")\n puts(p(\"--watch \") + \"Watch a folder and apply optimizations straight up\")\n\n sys.exit(0)", "def showUsage():\n None", "def add_usage(self, usage, actions, groups, prefix=''):\n #if prefix is None:\n # prefix = ''\n return super(SubcommandHelpFormatter, self).add_usage(usage, actions, groups, prefix='')", "def print_usage():\n print 'USAGE: %s [options]' % os.path.abspath(__file__)\n print 'EXAMPLE1: %s # FOR DEFAULTS' % os.path.abspath(__file__)\n print 'EXAMPLE2: %s 121f03=tweek hirap=towelie details=False # TWO SMALL SETS' % os.path.abspath(__file__)\n print 'EXAMPLE3: %s 121f03=tweek details=True # ONE DETAILED SET' % os.path.abspath(__file__)\n print 'EXAMPLE4: %s details=True # SHOWS MAX INFO' % os.path.abspath(__file__)", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -t, --transaction\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -f, --from-file <filename>\n --not-error-tolerant\n \"\"\"", "def usage():\n\n # Local constants\n\n # Local variables\n\n #****** start usage() ******#\n print()\n print(\" Usage: python TCGCardTracker.py <arguement below> <optional-argument-1>\")\n print(\"\\tadd (Optional): Add a card to your collection. Requires TCGPlayer URL.\")\n print(\"\\tdelete (Optional): Delete a card from your collection. Requires TCGPlayer URL.\")\n print(\"\\tupdate (Optional): Updates pricing data for every card in your collection.\")\n print(\"\\ttop25 (Optional): Outputs the 25 most valuable cards from your collection.\")\n print(\"\\texport (Optional): Exports a list of TCGPlayer URLs to a text file.\")\n print(\"\\texport_collection (Optional): Exports your collection to a .csv including most recent price data.\")\n print(\"\\timport (Optional): Imports a text file of TCGPlayer URLs to bulk import cards into your collection. Requires text file.\")\n print(\"\\tworth (Optional): Ouputs how much your collection is worth using latest price data.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tticker (Optional): Displays a ticker grid of the change in value over a given time. If run without the days back parameter it will default to 7 days.\")\n sys.exit()", "def help(inp, *, command, elemental):\n if elemental:\n return\n url = 'http://scp-stats.wikidot.com/jarvis'\n return url if not command else url + '#' + command.replace(' ', '-')", "def command_help(self, bot, update):\n\n messages = [\n 'Available commands:',\n '/who - Who is Myles?',\n '/where - Where is Myles?',\n '/tweet - What was the last tweet Myles sent?',\n '/photo - What was the last Instagram photo Myles took?',\n '/web - Where can I find Myles on the interwebs?',\n ]\n\n self.send_messages(bot, update, messages)", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser(version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-t\", \"--test\", dest=\"test\", type=\"string\",\n help=\"supply help\")\n\n parser.add_option(\"--method\", dest=\"method\", type=\"choice\",\n choices=(\"metrics\", \"summary\", \"module_summary\"),\n help=\"method to summarise clustering\")\n\n parser.add_option(\"--ref-gtf-files\", dest=\"ref_gtf\", type=\"string\",\n help=\"comma separated list of reference gtf files\")\n\n # add common options (-h/--help, ...) and parse command line\n (options, args) = E.Start(parser, argv=argv)\n\n if options.method == \"metrics\":\n infile = argv[-1]\n E.info(\"loading input file: %s\" % infile)\n assert infile\n\n df = pd.read_table(infile,\n sep=\"\\t\",\n header=None,\n index_col=0)\n\n df = df.ix[:, :50]\n cluster_combs = (x for x in itertools.combinations(df.columns,\n 2))\n genes = df.index\n results_dict = {}\n all_clusts = {}\n\n E.info(\"setting up cluster containers\")\n for i in df.columns:\n clusters = set(df[i].values.tolist())\n cluster_dict = {}\n for clust in clusters:\n cluster_dict[clust] = []\n for gene in genes:\n cluster_dict[df[i][gene]].append(gene)\n\n for col in clusters:\n col_set = set()\n clust_col = cluster_dict[col]\n gene_members = itertools.combinations(clust_col,\n 2)\n col_set.update(gene_members)\n cluster_dict[col] = col_set\n all_clusts[i] = cluster_dict\n E.info(\"generating all pair-wise cluster comparisons\")\n E.info(\"calculating adjusted mutual information\")\n for k in cluster_combs:\n clusters1 = all_clusts[k[0]]\n clusters2 = all_clusts[k[1]]\n metric_dict = {}\n metric_dict['AMI'] = TS.adjustedMutualInformation(clusters1,\n clusters2)\n results_dict[k] = metric_dict\n\n res_frame = pd.DataFrame(results_dict).T\n res_frame = res_frame.reset_index()\n res_frame.drop(['level_0'], inplace=True, axis=1)\n res_frame.drop(['level_1'], inplace=True, axis=1)\n\n # flatten rand indices and add to output dataframe\n rand_arrays = TS.randIndexes(df)\n flat_adj_rand = TS.unravel_arrays(rand_arrays[0])\n flat_rand = TS.unravel_arrays(rand_arrays[1])\n res_frame['Rand_Index'] = flat_rand\n res_frame['Adjusted_Rand_Index'] = flat_adj_rand\n E.info(\"aggregating results\")\n\n res_frame.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"summary\":\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n file_dict = {}\n for fle in list_of_files:\n fname = fle.split(\"/\")[-1]\n condition = fname.split(\"-\")[0]\n ref = fname.split(\"-\")[1]\n df_ = pd.read_table(fle,\n sep=\"\\t\",\n header=0,\n index_col=0)\n df_.columns = ['gene_id', 'cluster']\n clust_dict = {}\n for idx in df_.index:\n cluster = df_.loc[idx]['cluster']\n gene = df_.loc[idx]['gene_id']\n try:\n clust_dict[cluster] += 1\n except KeyError:\n clust_dict[cluster] = 1\n med_size = np.median(clust_dict.values())\n file_dict[fname] = {'condition': condition,\n 'reference': ref,\n 'median_cluster_size': med_size}\n\n outframe = pd.DataFrame(file_dict).T\n outframe.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"module_summary\":\n # get lncRNA/gene lengths from reference gtfs\n ref_gtfs = options.ref_gtf.split(\",\")\n length_dict = {}\n for ref in ref_gtfs:\n oref = IOTools.openFile(ref, \"rb\")\n git = GTF.transcript_iterator(GTF.iterator(oref))\n for gene in git:\n for trans in gene:\n length = trans.end - trans.start\n try:\n length_dict[trans.gene_id] += length\n except KeyError:\n length_dict[trans.gene_id] = length\n oref.close()\n\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n fdfs = []\n for fle in list_of_files:\n cond = fle.split(\"/\")[-1].split(\"-\")[0]\n refer = fle.split(\"/\")[-1].split(\"-\")[1]\n _df = pd.read_table(fle, sep=\"\\t\",\n header=0, index_col=0)\n _df.columns = ['gene_id', 'cluster']\n clusters = set(_df['cluster'])\n c_dict = {}\n # summarize over each cluster\n for clust in clusters:\n lengths = []\n c_df = _df[_df['cluster'] == clust]\n for lid in c_df['gene_id']:\n lengths.append(length_dict[lid])\n c_dict[clust] = {'cluster_size': len(c_df['gene_id']),\n 'mean_length': np.mean(lengths),\n 'index': (cond, refer),\n 'module': clust}\n cdf = pd.DataFrame(c_dict).T\n # use a multindex for hierarchical indexing\n midx = pd.MultiIndex.from_tuples(cdf['index'])\n cdf.index = midx\n cdf.drop(['index'], inplace=True, axis=1)\n fdfs.append(cdf)\n\n # generate a single output df\n s_df = fdfs[0]\n fdfs.pop(0)\n for df in fdfs:\n s_df = s_df.append(df)\n\n s_df.to_csv(options.stdout,\n index_label=(\"condition\", \"reference\"),\n sep=\"\\t\")\n\n # write footer and output benchmark information.\n E.Stop()", "def usage(app_name):\n global version\n print '\\npython {0} -a MediaLive_ARN -n Dashboard_Name [Optional parameters]\\n'.format(app_name)\n print 'Version:', version\n print '\\nThis script creates a CloudWatch Dashboard for a MediaLive/MediaPackage workflow.'\n print \"It uses the MediaLive Channel Arn as input and determines the MediaPackage instances from the \"\n print \"MediaLive channel configuration. It then creates the CloudWatch Dashboard that contains info on the\"\n print \"MediaLive channel, the two MediaPackage channels, and all of the MediaPackage endpoints.\"\n print \"\\nRequired parameters:\"\n print \"-a, --arn: MediaLive Channel ARN\"\n print \"-n, --name: Name for the CloudWatch Dashboard. \"\n print \"\"\n print \"Optional parameters\"\n print \"-l, --list: Filename of a file that contains a list of MediaLive Channel ARNs, 1 ARN per line. \"\n print \" All MediaLive channels and their corresponding MediaPackage channels will be included in \"\n print \" the CloudWatch Dashboard.\"\n print \" Note: This parameter is ignored if a channel ARN is provided via the '-a/--arn' option\"\n print \" Note: All ARNs in the list must be for channels in the same region. All ARNs not in the same\"\n print \" region as the first ARN in the list will be ignored.\"\n print '-h, --help: Print this help and exit.'\n print \"\"\n print 'Examples:'\n print \"\"\n print 'Using MediaLive ARN arn:aws:medialive:us-west-2:0123456789:channel:123456 and create a CloudWatch ' \\\n 'Dashboard called \"My TV Dashboard\"'\n print 'python {0} -a arn:aws:medialive:us-west-2:0123456789:channel:123456 ' \\\n '-n \"My TV Dashboard\" '.format(app_name)\n print \"\"\n print 'Using the MediaLive Channel ARN list defined in the text file \"My EML arns.txt\" create a CloudWatch' \\\n 'Dashboard called \"Primary Bouquet\".'\n print 'python {0} -l \"My EML arns.txt\" -n \"Primary Bouquet\"\\n'.format(app_name)", "async def eventstats(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send_help(ctx.command)", "def add_usage(self, usage, actions, groups, prefix=None):\n if prefix is None:\n prefix = colored('Usage: \\n ', 'cyan')\n return super(ColoredHelpFormatter, self).add_usage(\n usage, actions, groups, prefix)", "def usage(self):\n\n usage_string = \"\"\"\n usage: %prog [options] -H SERVER -j JOB -w WARNING -c CRITICAL\n\n Make sure the last job is successful\n OR the current is not stuck (LastBuild)\n Warning and Critical are defined in minutes\n\n Ex :\n\n check_jenkins.py -H ci.jenkins-ci.org -j infa_release.rss -w 10 -c 42\n will check if the the job infa_release.rss is successful\n or not stuck for more than 10 (warn) 42 minutes (critical alert)\n\n \"\"\"\n\n return usage_string", "def help(self):\n\t\treturn", "def usage():\n return _usage", "def __generate_usage_string(self):\n usage = \"{} <command> [<args>]\\n\\n\" \\\n \"Available commands:\\n\".format(self.name)\n max_name_length = len(max(self.cmd_list, key=lambda x: len(x[0]))[0])\n for name, desc in self.cmd_list:\n name_spacing = \" \" * (max_name_length - len(name)) + \" \" * 5\n usage += \" {}{}{}\\n\".format(name, name_spacing, desc)\n\n return usage", "def help(self) -> str:\n\t\treturn None", "def help_display(self):\r\n cmdString = 'pyuic5 -h' \r\n # execute command and return stdout string\r\n output2 = subprocess.getoutput(cmdString) \r\n # show stdout \r\n self.plainTextEdit.insertPlainText( output2 )", "def help(cmd, cmdArgs):\n global commandDict\n retInfo = []\n if len(cmdArgs) > 0:\n #return help on a single function\n if cmdArgs[0] in commandDict.keys():\n return commandDict[cmdArgs[0]].__doc__\n\n #else, return general info\n retInfo = ['pypeople: A command line tool for vCard management',\n 'Version:' + __version__,\n 'Available Commands:']\n #fill in more info here\n for cmdName in commandDict.keys():\n cmdFunc = commandDict[cmdName]\n cmdDoc = str(cmdName) + ': ' + str(cmdFunc.__doc__) if cmdFunc.__doc__ is not None else 'Undocumented Function'\n retInfo.append('\\t' + cmdDoc)\n\n return '\\n'.join(retInfo)", "def test_implicit_usage_message(self):\n assert 'Usage:' in main('coloredlogs', capture=True)", "def help(bot, sender, sendmsg, label, args):\n\n clist = commands.commands\n csort = sorted(clist.values(), key=lambda c: c.__name__.lower())\n\n if len(args) > 0:\n page = int(args[0]) - 1\n else:\n page = 0\n\n pages = len(clist) // 10 + 1\n\n sendmsg(\"-- Help (Page {} of {}) --\".format(page + 1, pages))\n for i in range(10):\n if i >= len(csort):\n break\n\n command = csort[i + (page * 10)]\n sendmsg(\"{}: {}\".format(command.__name__, command.__doc__))", "def help(self):", "def help(self):", "def help_option(args, run):\n pass" ]
[ "0.6095599", "0.59995025", "0.5967684", "0.58918285", "0.5879187", "0.58760214", "0.58532023", "0.58369166", "0.58359265", "0.5832341", "0.58278644", "0.58267254", "0.582368", "0.57165456", "0.5707957", "0.56925315", "0.5681756", "0.5681756", "0.5672487", "0.5671114", "0.5671114", "0.56505", "0.56297034", "0.56186634", "0.5612054", "0.5602668", "0.5588876", "0.5586467", "0.5581898", "0.5570037", "0.5568709", "0.5553763", "0.5544499", "0.5499408", "0.5498082", "0.5487744", "0.5483736", "0.5483529", "0.54806995", "0.5476519", "0.54674447", "0.5466753", "0.5464972", "0.5456349", "0.5453582", "0.5453055", "0.54464304", "0.5440669", "0.5429928", "0.5426443", "0.5425971", "0.54167664", "0.5416014", "0.5407925", "0.5399284", "0.5387605", "0.53824514", "0.5381444", "0.537407", "0.5367269", "0.5365627", "0.53647107", "0.5363898", "0.5363038", "0.5363038", "0.535527", "0.53550345", "0.5350881", "0.5348667", "0.5346462", "0.5344052", "0.5341422", "0.53367144", "0.5332763", "0.5324792", "0.5323842", "0.5320636", "0.5319792", "0.5317385", "0.5313676", "0.53121775", "0.5297122", "0.529432", "0.5281836", "0.52814865", "0.52778953", "0.5275736", "0.5265325", "0.5263936", "0.5262892", "0.52520466", "0.524681", "0.5242085", "0.52413875", "0.5236255", "0.52360415", "0.523505", "0.5231926", "0.5231926", "0.5229235" ]
0.60818744
1
Report usage metrics for active partitions of CPCs in DPM mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_partition(cmd_ctx, cpc, partition, **options): cmd_ctx.execute_cmd( lambda: cmd_metrics_partition(cmd_ctx, cpc, partition, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()", "def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))", "def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))", "def metrics_flash(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_flash(cmd_ctx, cpc, options))", "def qsub_cmmd(self):\n temp = 'qsub -l mem=1G,time=:5: -cwd -j y -o {log} {job}'.format(**self.dict)\n return temp.format(fnum=self.fnum)", "async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)", "def show_help(argv=None):\n if argv:\n if \"list_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"list_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ls\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"search_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"search_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm search <keyword>\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"show_mounts\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"show_mounts\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ps\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mount <dataset_name> [<mount_path>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mmount <dataset_name> [<dataset_name> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"unmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"unmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm unmount <mount_id> [<cleanup_flag>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"munmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"munmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm munmount <mount_id> [<mount_id> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"clean\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"clean\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm clean\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n else:\n sdm_util.print_message(\"Unrecognized command\")\n return 1\n else:\n sdm_util.print_message(\"command : sdm <COMMAND> [<COMMAND_SPECIFIC_ARGS> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(\"Available Commands\")\n\n tbl = PrettyTable()\n tbl.field_names = [\"COMMAND\", \"DESCRIPTION\"]\n for cmd in COMMANDS:\n command, _, desc = cmd\n command_str = \" | \".join(command)\n tbl.add_row([command_str, desc])\n\n sdm_util.print_message(tbl)\n sdm_util.print_message(\"\")\n return 0", "def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text", "def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()", "def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))", "def printOptions(opts,subject_ids,session_ids,task_list, run_list, acq, rec):\n uname = os.popen('uname -s -n -r').read()\n print \"\\n\"\n print \"* Pipeline started at \"+time.strftime(\"%c\")+\"on \"+uname\n print \"* Command line is : \\n \"+str(sys.argv)+\"\\n\"\n print \"* The source directory is : \"+opts.sourceDir\n print \"* The target directory is : \"+opts.targetDir+\"\\n\"\n print \"* Data-set Subject ID(s) is/are : \"+str(', '.join(subject_ids))+\"\\n\"\n # print \"* PET conditions : \"+ ','.join(opts.condiList)+\"\\n\"\n print \"* Sessions : \", session_ids, \"\\n\"\n print \"* Tasks : \" , task_list , \"\\n\"\n print \"* Runs : \" , run_list , \"\\n\"\n print \"* Acquisition : \" , acq , \"\\n\"\n print \"* Reconstruction : \" , rec , \"\\n\"", "def stat_cuda(msg: str) -> None:\n print(f'-- {msg:<35} allocated: %dM, max allocated: %dM, cached: %dM, max cached: %dM' % (\n torch.cuda.memory_allocated() / 1024 / 1024,\n torch.cuda.max_memory_allocated() / 1024 / 1024,\n torch.cuda.memory_cached() / 1024 / 1024,\n torch.cuda.max_memory_cached() / 1024 / 1024\n ))", "def capacitygroup_show(cmd_ctx, cpc, capacitygroup):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_show(cmd_ctx, cpc, capacitygroup))", "def help(self, args):\n print('No commands available for this consumer')", "def phast_cmmd(self):\n temp = '{prog} -R {rho} -C {ecov} -E {elen} -N {chrom} -i MAF {maf} {model} > {wig}\\n'.format(**self.dict)\n return temp.format(fnum=self.fnum)", "def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)", "def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()", "def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')", "def do_stats(self, args):\n total_cpu = free_cpu = in_use_cpu = 0\n\n summary = self._qm.get_all_host_summary()\n for host_id, host_info in summary.viewitems():\n host_cpu = int(host_info['total cores'])\n total_cpu += host_cpu\n locked = host_info.get('locked by')\n if locked:\n # If host is locked then all CPUs are in use.\n in_use_cpu += host_cpu\n else:\n free_host_cpu = int(host_info['free cores'])\n in_use_cpu += (host_cpu - free_host_cpu)\n free_cpu += free_host_cpu\n\n print('total CPU: ', total_cpu)\n print('used/locked CPU: ', in_use_cpu)\n print('free CPU: ', free_cpu)\n capacity = float(in_use_cpu) / float(total_cpu)\n print('capacity used: %.1f%%' % (capacity * 100,))\n capacity = float(free_cpu) / float(total_cpu)\n print('capacity remaining: %.1f%%' % (capacity * 100,))", "def show(self):\n prev_queries = 0\n prev_cpu_sys = 0\n prev_cpu_user = 0\n \n lines = {\n \"Uptime (seconds)\": \"--\",\n \"Number of queries\": \"--\",\n \"Query per second\": \"--\",\n \"ACL drops\": \"--\",\n \"Dynamic drops\": \"--\",\n \"Rule drops\": \"--\",\n \"CPU Usage (%s)\": \"--\",\n \"Cache hitrate\": \"--\"\n }\n\n while True:\n try:\n # get stats from dnsdist\n stats = Statistics(console=self.console)\n global_stats = stats[\"global\"]\n \n qps = int(global_stats[\"queries\"]) - prev_queries\n prev_queries = int(global_stats[\"queries\"])\n cpu = (int(global_stats[\"cpu-sys-msec\"])+int(global_stats[\"cpu-user-msec\"]) - prev_cpu_sys - prev_cpu_user) / 10\n prev_cpu_sys = int(global_stats[\"cpu-sys-msec\"])\n prev_cpu_user = int(global_stats[\"cpu-user-msec\"])\n \n lines[\"Uptime (seconds)\"] = global_stats[\"uptime\"]\n lines[\"Number of queries\"] = global_stats[\"queries\"]\n lines[\"Query per second\"] = qps\n lines[\"CPU Usage (%s)\"] = cpu\n lines[\"ACL drops\"] = global_stats[\"acl-drops\"]\n lines[\"Rule drops\"] = global_stats[\"rule-drop\"]\n lines[\"Cache hitrate\"] = global_stats[\"cache-hits\"]\n lines[\"Dynamic drops\"] = global_stats[\"dyn-blocked\"]\n\n # reprint the lines \n sys.stdout.write(\"\\033[1mDashboard for dnsdist\\033[0m\\n\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Global:\\n\")\n for k,v in lines.items():\n sys.stdout.write(\"\\t%s: %s\\n\" % (k,v))\n sys.stdout.write(\"Backends:\\n\")\n for s in stats[\"backends\"]:\n if not len(s[\"name\"]):\n s[\"name\"] = \"--\"\n if not len(s[\"pools\"]):\n s[\"pools\"] = \"--\"\n sys.stdout.write(\"\\t#%s / %s / %s / %s\\n\" % (s[\"#\"],s[\"address\"],s[\"name\"],s[\"pools\"]) )\n sys.stdout.write(\"\\t\\tNumber of queries: %s\\n\" % s[\"queries\"])\n sys.stdout.write(\"\\t\\tQuery per second: %s\\n\" % s[\"qps\"])\n sys.stdout.write(\"\\t\\tNumber of drops: %s\\n\" % s[\"drops\"])\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Ctrl+C to exit\\n\")\n \n time.sleep(1)\n \n \n # move up cursor and delete whole line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for k,v in lines.items():\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for s in stats[\"backends\"]:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n \n del stats\n except KeyboardInterrupt:\n break", "def procs_calculate_axyzc(molecules, n_cores=-1, show_progress=True, scr=None, cmd=XTB_CMD):\n results = None\n return results", "def printHelp():\n print(\"amqWorkApiMass.py -n <msgcnt> -b <body> -m <headers> -s <path/to/bodyandheaders>\")", "def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging", "def Usage():\n print \"\"\"\n To plot the result using the iter number of the x axis:\n\n plot_sdcard.py -i /tmp/data.txt\n\n To plot the result using time for the x axis:\n\n plot_sdcard.py -t /tmp/data.txt\n\n To plot the result from the profiler:\n\n profile_sdcard.sh\n plot_sdcard.py -p\n\n \"\"\"\n sys.exit(2)", "def _get_metrics_options(metrics):\n metrics_options = []\n if metrics is None:\n metrics = []\n for static_metric in metrics:\n metrics_options += [\n \"-m\",\n static_metric.metric.mp_metric_name,\n str(static_metric.value),\n ]\n return metrics_options", "def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)", "def measure(self,command_exe, command_args, measure_out):\n pass", "def _cmd_segmetrics(args):\n if not 0.0 < args.alpha <= 1.0:\n raise RuntimeError(\"alpha must be between 0 and 1.\")\n\n if not any((args.location_stats, args.spread_stats, args.interval_stats)):\n logging.info(\"No stats specified\")\n return\n\n # Calculate all metrics\n cnarr = read_cna(args.cnarray)\n segarr = read_cna(args.segments)\n segarr = do_segmetrics(\n cnarr,\n segarr,\n args.location_stats,\n args.spread_stats,\n args.interval_stats,\n args.alpha,\n args.bootstrap,\n args.smooth_bootstrap,\n skip_low=args.drop_low_coverage,\n )\n tabio.write(segarr, args.output or segarr.sample_id + \".segmetrics.cns\")", "def main():\n\n parser = argparse.ArgumentParser(description=main.__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n epilog=\"Homepage: https://github.com/kbat/mc-tools\")\n parser.add_argument('com', type=str, help='plot requests file name', nargs='?', default=\"/tmp/foo.c\")\n parser.add_argument('comout', type=str, help='COMOUT file name', nargs='?', default=\"zoom\")\n\n args = parser.parse_args()\n\n cmd = {} # dictionary of commands\n\n bas = False\n plane = False\n \n with open(args.com) as f:\n for line in f.readlines():\n words = line.strip().split()\n if len(words) is 0:\n continue\n\n for i,w in enumerate(words):\n if re.search(\"^bas\", w):\n cmd['bas'] = list(map(float, words[i+1:i+7]))\n if plane is False: bas = True # basis was before plane cuts\n elif re.search(\"^or\", w):\n cmd['or'] = list(map(float, words[i+1:i+4]))\n elif re.search(\"^ex\", w):\n try: # both x and y scales are given\n cmd['ex'] = list(map(float, words[i+1:i+3]))\n continue\n except ValueError: # just 1 scale is given\n cmd['ex'] = list(map(float, words[i+1:i+2]))\n elif re.search(\"^lab\", w):\n cmd['label'] = list(map(int, map(float, words[i+1:i+3]))) #+ [words[i+3]]\n elif re.search(\"^p[xyz]\", w):\n cmd[w] = [float(words[i+1])]\n if bas is False: plane = True # plane cuts were before basis\n elif re.search(\"^legend\", w):\n cmd[w] = [words[i+1]]\n elif w == \"scale\":\n print(w)\n if int(words[i+1]): # no need to put 'scale 0'\n cmd[w] = [words[i+1]]\n elif w in (\"mesh\"):\n if int(words[i+1])==1: # no need to put 'mesh 1'\n cmd[w] = [words[i+1]]\n\n print(bas, plane)\n\n if plane: # bas was first\n keys = ('bas', 'or', 'ex', 'px', 'py', 'pz', 'label', 'mesh', 'legend', 'scale')\n elif bas:\n keys = ('or', 'ex', 'px', 'py', 'pz', 'bas', 'label', 'mesh', 'legend', 'scale')\n else:\n keys = {'or', 'ex', 'label', 'mesh', 'legend', 'scale'}\n \n with open(args.comout, 'w') as f:\n for key in keys:\n if key in cmd:\n # newline required by mcplot:\n if key in ('mesh', 'legend', 'scale', 'label'):\n f.write(\"\\n\")\n f.write(\"%s %s \" % (key,\" \".join(str(e) for e in cmd[key]),))\n f.write(\"\\n\")", "def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])", "def main():\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n add_common_snmp_args(parser)\n parser.add_argument(\n \"-w\",\n \"--warning\",\n type=int,\n default=70,\n help=\"Warning memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-c\",\n \"--critical\",\n type=int,\n default=90,\n help=\"Critical memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-f\",\n \"--family\",\n required=True,\n help=\"Switch family. Supported families: 1910, 1920, 1920S\",\n )\n\n config = vars(parser.parse_args())\n check_snmp_args(config)\n check_thresholds(config)\n\n dataset = {}\n\n if config[\"family\"] == \"1920S\":\n cpu = ObjectType(\n ObjectIdentity(\n \"HP-SWITCHING-MIB\", \"agentSwitchCpuProcessTotalUtilization\", 0\n )\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n cpu = ObjectType(\n ObjectIdentity(\"HH3C-ENTITY-EXT-MIB\", \"hh3cEntityExtCpuUsage\", 8)\n )\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n try:\n raw_data = get_snmp_data(config, cpu)\n except ValueError as err:\n unknown_exit(SERVICE, err)\n add_vars_to_dataset(dataset, raw_data)\n\n if config[\"family\"] == \"1920S\":\n dataset[\"cpu_usage\"] = get_hp_cpu_usage(\n dataset[\"agentSwitchCpuProcessTotalUtilization\"]\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n dataset[\"cpu_usage\"] = int(dataset[\"hh3cEntityExtCpuUsage\"])\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n state, message = generate_output(config, dataset)\n report(state, message)", "def show_mem(cmd, cnt, args):\n if cpu is None:\n log(\"Load program first\") \n return\n elif len(cpu.memory) == 0:\n log(\"Load program first\") \n return \n chunk = 0\n chunk_count = len(cpu.memory)\n while chunk < chunk_count: \n chunk_start = cpu.memory[chunk][MEMADDR]\n chunk_end = chunk_start + cpu.memory[chunk][MEMSIZE] \n log(\"{:d} {:#x}..{:#x}\".format(chunk, chunk_start, chunk_end)) \n chunk += 1\n if machine == \"ARM\":\n if len(cpu.high_memory) != 0:\n log(\"High memory\")\n for addr in sorted(cpu.high_memory):\n log(\"{:#x}\".format(addr))", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")", "def print_usage():\n usage_msg = \"\"\"\n%s.py -H <host or group> -P <path> -M <mode>\n\nUsage:\n -h, --help\n Print detailed help screen\n -H, --hostname=STRING\n Host name or group of hosts\n -V, --version\n Print version information\n -P, --path=STRING\n Path to rancid var directory. Usually the dir contains a logs dirs and hostgroup dirs\n Example : /usr/local/rancid/var\n -M, --mod=STRING\n Plugin mod. Must be one of the following : ping, hash, config, cards, filter, qos\n *ping:\n Check if all host in the hostgroup are up from the rancid point of view.\n It uses the .up file to determine the lists of host to look for\n *hash:\n Check if the firmware hash is different from the ref one (or from the previous one)\n *config:\n Check if the configuration has changed for the host / group (notify diff)\n *cards:\n Specific to 8600 models. Check the hardware cards plugged to the host (notify diff).\n *filter:\n Specific to ES-470. Check the filters (notify diff)\n *qos:\n Specific to ES-470. Check the qos values (notify diff)\n -u, --url=URL\n URL to submit passive results to Shinken Receiver with HTTP\n Need a host and service to send result.\n -a, --passive-host=STRING\n Required if not in plugin mod to send data to Shinken ws_arbiter\n -b, --passive-service=STRING\n Required if not in plugin mod to send data to Shinken ws_arbiter\n\"\"\" % PLUGIN_NAME\n print usage_msg", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser(version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-t\", \"--test\", dest=\"test\", type=\"string\",\n help=\"supply help\")\n\n parser.add_option(\"--method\", dest=\"method\", type=\"choice\",\n choices=(\"metrics\", \"summary\", \"module_summary\"),\n help=\"method to summarise clustering\")\n\n parser.add_option(\"--ref-gtf-files\", dest=\"ref_gtf\", type=\"string\",\n help=\"comma separated list of reference gtf files\")\n\n # add common options (-h/--help, ...) and parse command line\n (options, args) = E.Start(parser, argv=argv)\n\n if options.method == \"metrics\":\n infile = argv[-1]\n E.info(\"loading input file: %s\" % infile)\n assert infile\n\n df = pd.read_table(infile,\n sep=\"\\t\",\n header=None,\n index_col=0)\n\n df = df.ix[:, :50]\n cluster_combs = (x for x in itertools.combinations(df.columns,\n 2))\n genes = df.index\n results_dict = {}\n all_clusts = {}\n\n E.info(\"setting up cluster containers\")\n for i in df.columns:\n clusters = set(df[i].values.tolist())\n cluster_dict = {}\n for clust in clusters:\n cluster_dict[clust] = []\n for gene in genes:\n cluster_dict[df[i][gene]].append(gene)\n\n for col in clusters:\n col_set = set()\n clust_col = cluster_dict[col]\n gene_members = itertools.combinations(clust_col,\n 2)\n col_set.update(gene_members)\n cluster_dict[col] = col_set\n all_clusts[i] = cluster_dict\n E.info(\"generating all pair-wise cluster comparisons\")\n E.info(\"calculating adjusted mutual information\")\n for k in cluster_combs:\n clusters1 = all_clusts[k[0]]\n clusters2 = all_clusts[k[1]]\n metric_dict = {}\n metric_dict['AMI'] = TS.adjustedMutualInformation(clusters1,\n clusters2)\n results_dict[k] = metric_dict\n\n res_frame = pd.DataFrame(results_dict).T\n res_frame = res_frame.reset_index()\n res_frame.drop(['level_0'], inplace=True, axis=1)\n res_frame.drop(['level_1'], inplace=True, axis=1)\n\n # flatten rand indices and add to output dataframe\n rand_arrays = TS.randIndexes(df)\n flat_adj_rand = TS.unravel_arrays(rand_arrays[0])\n flat_rand = TS.unravel_arrays(rand_arrays[1])\n res_frame['Rand_Index'] = flat_rand\n res_frame['Adjusted_Rand_Index'] = flat_adj_rand\n E.info(\"aggregating results\")\n\n res_frame.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"summary\":\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n file_dict = {}\n for fle in list_of_files:\n fname = fle.split(\"/\")[-1]\n condition = fname.split(\"-\")[0]\n ref = fname.split(\"-\")[1]\n df_ = pd.read_table(fle,\n sep=\"\\t\",\n header=0,\n index_col=0)\n df_.columns = ['gene_id', 'cluster']\n clust_dict = {}\n for idx in df_.index:\n cluster = df_.loc[idx]['cluster']\n gene = df_.loc[idx]['gene_id']\n try:\n clust_dict[cluster] += 1\n except KeyError:\n clust_dict[cluster] = 1\n med_size = np.median(clust_dict.values())\n file_dict[fname] = {'condition': condition,\n 'reference': ref,\n 'median_cluster_size': med_size}\n\n outframe = pd.DataFrame(file_dict).T\n outframe.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"module_summary\":\n # get lncRNA/gene lengths from reference gtfs\n ref_gtfs = options.ref_gtf.split(\",\")\n length_dict = {}\n for ref in ref_gtfs:\n oref = IOTools.openFile(ref, \"rb\")\n git = GTF.transcript_iterator(GTF.iterator(oref))\n for gene in git:\n for trans in gene:\n length = trans.end - trans.start\n try:\n length_dict[trans.gene_id] += length\n except KeyError:\n length_dict[trans.gene_id] = length\n oref.close()\n\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n fdfs = []\n for fle in list_of_files:\n cond = fle.split(\"/\")[-1].split(\"-\")[0]\n refer = fle.split(\"/\")[-1].split(\"-\")[1]\n _df = pd.read_table(fle, sep=\"\\t\",\n header=0, index_col=0)\n _df.columns = ['gene_id', 'cluster']\n clusters = set(_df['cluster'])\n c_dict = {}\n # summarize over each cluster\n for clust in clusters:\n lengths = []\n c_df = _df[_df['cluster'] == clust]\n for lid in c_df['gene_id']:\n lengths.append(length_dict[lid])\n c_dict[clust] = {'cluster_size': len(c_df['gene_id']),\n 'mean_length': np.mean(lengths),\n 'index': (cond, refer),\n 'module': clust}\n cdf = pd.DataFrame(c_dict).T\n # use a multindex for hierarchical indexing\n midx = pd.MultiIndex.from_tuples(cdf['index'])\n cdf.index = midx\n cdf.drop(['index'], inplace=True, axis=1)\n fdfs.append(cdf)\n\n # generate a single output df\n s_df = fdfs[0]\n fdfs.pop(0)\n for df in fdfs:\n s_df = s_df.append(df)\n\n s_df.to_csv(options.stdout,\n index_label=(\"condition\", \"reference\"),\n sep=\"\\t\")\n\n # write footer and output benchmark information.\n E.Stop()", "def _display_cuda_devices():\n\n cuda_query_output = subprocess.run(\"nvidia-smi --query-gpu=gpu_uuid,gpu_name,compute_mode --format=csv\", shell=True, capture_output=True, text=True)\n # Check if command worked\n if cuda_query_output.returncode == 0:\n # Split by line jump and comma\n cuda_devices_list = [entry for entry in cuda_query_output.stdout.splitlines()]\n logger.debug(f\"CUDA devices available: {*cuda_devices_list,}\")\n # We only support \"Default\" and not \"Exclusive_Process\" for the compute mode\n if \"Default\" not in cuda_query_output.stdout:\n logger.warning(f\"GPU in 'Exclusive_Process' mode (or Prohibited), one context is allowed per device. This may prevent some openmmtools features from working. GPU must be in 'Default' compute mode\")\n # Handel the case where the command had some error\n else:\n logger.debug(f\"nvidia-smi command failed: {cuda_query_output.stderr}, this is expected if there is no GPU available\")", "def metrics_nic(cmd_ctx, cpc, partition, nic, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_nic(cmd_ctx, cpc, partition, nic, options))", "def usage(progname):\n \n sys.stderr.write(\"Usage: \" +progname + \" [-cmnv] [-z score] \"\n \" <outdir>\\n\")\n sys.stderr.write(' -c class level not fold level evaluation\\n')\n sys.stderr.write(' -m read multiquery file on stdin\\n')\n sys.stderr.write(' -n negate scores (so that most -ve is best)\\n')\n sys.stderr.write(' -v verbose messages to stderr\\n')\n sys.stderr.write(' -z score : assign identifiers not present in the output a score of score\\n')\n sys.exit(1)", "def usage(msg):\n ap.print_usage()\n print \"-\"*40\n print msg\n exit(1)", "def help_opt(self):\n print(OPTIONS)", "def usage(msgarg):\n if msgarg:\n sys.stderr.write(\"error: %s\\n\" % msgarg)\n print(\"\"\"\\\n usage: %s [options]\n\n options:\n -d increase debug msg verbosity level\n -c N emit N classes (def: 500) per instances\n -I N emit N instances\n\n \"\"\" % os.path.basename(sys.argv[0]))\n sys.exit(1)", "def gather_info_and_display():\n # Obtain total rss displayed in memory.stat for each group,\n # container and service.\n try:\n output_mem = pipe_command(GREP_CMD, AWK_CMD, cwd=MEMPATH)\n LOG.debug(\n 'command: %s\\n%s',\n \"grep -rs total_rss '/sys/fs/cgroup/memory/' \"\n \"| awk '$2>0{print$0}' \",\n output_mem)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get total_rss memory, error=%s', error)\n return 1\n\n mem_info = get_meminfo()\n pt_groups = gather_groups_memory(output_mem)\n pt_cont = gather_containers_memory(output_mem)\n pt_serv = sys_service_memory()\n\n # Dump the tables out\n print('\\nPer groups memory usage:')\n\n # Get string to be printed and create list of elements separated by \\n\n list_of_table_lines = pt_groups.get_string().split('\\n')\n\n # Use the first line (+---+-- ...) as horizontal rule to insert later\n horizontal_line = list_of_table_lines[0]\n\n # Print the table, except last two lines ( \"Total\" row + final separator).\n print(\"\\n\".join(list_of_table_lines[:-2]))\n # Print separator, and finally the \"Total\" row.\n print(horizontal_line)\n print(\"\\n\".join(list_of_table_lines[-2:]))\n\n pt_namespc = prettytable.PrettyTable(\n ['Namespace',\n 'Resident Set Size (MiB)',\n ], caching=False)\n pt_namespc.align = 'l'\n pt_namespc.align['Resident Set Size (MiB)'] = 'r'\n\n print('\\nPer namespace memory usage:')\n for n_s in MEMORY['namespaces']:\n pt_namespc.add_row(\n [n_s,\n MEMORY['namespaces'][n_s],\n ])\n print(pt_namespc)\n\n print('\\nPer container memory usage:')\n print(pt_cont)\n\n print('\\nPer service memory usage:')\n print(pt_serv)\n\n base_mebib = 0.0\n k8s_system = 0.0\n k8s_addon = 0.0\n platform_memory_percent = 0.0\n\n # Calculate base memory usage (i.e., normal memory, exclude K8S and VMs)\n # e.g., docker, system.slice, user.slice\n for group in MEMORY['cgroups']:\n if group in BASE_GROUPS:\n base_mebib += float(MEMORY['cgroups'][group])\n\n # K8S platform system usage (essential) and addons usage (non-essential)\n for n_s in MEMORY['namespaces']:\n if n_s in K8S_NAMESPACE_SYSTEM:\n k8s_system += MEMORY['namespaces'][n_s]\n elif n_s in K8S_NAMESPACE_ADDON:\n k8s_addon += MEMORY['namespaces'][n_s]\n\n # Calculate platform memory usage\n platform_mebib = base_mebib + k8s_system\n\n anon_mebib = float(mem_to_mebibytes(\n mem_info['Active(anon)'] + mem_info['Inactive(anon)'])) * KBYTE\n avail_mebib = float(mem_to_mebibytes(\n mem_info['MemAvailable'])) * KBYTE\n total_mebib = float(anon_mebib + avail_mebib)\n\n anon_percent = py2_round(100 * anon_mebib / total_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n reserved_mebib = get_platform_reserved_memory()\n # Calculate platform memory in terms of percent reserved\n if reserved_mebib > 0.0:\n platform_memory_percent = py2_round(\n 100 * platform_mebib / reserved_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n pt_platf = prettytable.PrettyTable(\n ['Reserved',\n 'Platform',\n 'Base',\n 'K8s Platform system',\n 'k8s-addon'\n ], caching=False)\n pt_platf.align = 'l'\n\n pt_platf.add_row(\n [reserved_mebib,\n '{} ({}%)'.format(platform_mebib, platform_memory_percent),\n base_mebib,\n k8s_system,\n k8s_addon\n ])\n print('\\nPlatform memory usage in MiB:')\n print(pt_platf)\n\n pt_4k = prettytable.PrettyTable(\n ['Anon',\n 'Cgroup-rss',\n 'Available',\n 'Total'\n ], caching=False)\n pt_4k.align = 'l'\n\n pt_4k.add_row(\n ['{} ({}%)'.format(anon_mebib, anon_percent),\n MEMORY['cgroups']['total_rss'],\n avail_mebib,\n total_mebib\n ])\n\n print('\\n4K memory usage in MiB:')\n print(pt_4k)\n\n return 0", "def usage():\n \n print '-b <bench> the bench to show.'\n print '-c <config> the config to show (GPU, 8888, 565, etc).'\n print '-d <dir> a directory containing bench_r<revision>_<scalar> files.'\n print '-e <file> file containing expected bench values/ranges.'\n print ' Will raise exception if actual bench values are out of range.'\n print ' See bench_expectations.txt for data format and examples.'\n print '-f <revision>[:<revision>] the revisions to use for fitting.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-i <time> the time to ignore (w, c, g, etc).'\n print ' The flag is ignored when -t is set; otherwise we plot all the'\n print ' times except the one specified here.'\n print '-l <title> title to use for the output graph'\n print '-m <representation> representation of bench value.'\n print ' See _ListAlgorithm class in bench_util.py.'\n print '-o <path> path to which to write output; writes to stdout if not specified'\n print '-r <revision>[:<revision>] the revisions to show.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-s <setting>[=<value>] a setting to show (alpha, scalar, etc).'\n print '-t <time> the time to show (w, c, g, etc).'\n print '-x <int> the desired width of the svg.'\n print '-y <int> the desired height of the svg.'\n print '--default-setting <setting>[=<value>] setting for those without.'", "def metrics_lpar(cmd_ctx, cpc, lpar, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_lpar(cmd_ctx, cpc, lpar, options))", "def explainerdashboard_cli(ctx):", "def collect_usage_pieces(self, ctx):\n pieces = super(ProfilingCommand, self).collect_usage_pieces(ctx)\n assert pieces[-1] == '[ARGV]...'\n pieces.insert(-1, '[--]')\n return pieces", "def usage(err=''):\r\n m = '%s\\n' %err\r\n m += 'Default usage is to list Cases closed for the 30 days\\n'\r\n m += '\\n Example:\\n'\r\n m += ' closedcases -n 90 \\n' \r\n m += ' \\n'\r\n# m += ' closedcases -n 60 -s blast5 \\n'\r\n return m", "def _GenAppcommandsUsage(cmd, printer):\n # pylint: disable=too-many-arguments,unused-argument\n def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n \"\"\"A replacement for app.usage.\"\"\"\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)\n\n return Usage", "def capacitygroup_list(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_capacitygroup_list(cmd_ctx, cpc, options))", "def DoHelp(options, args):\n __pychecker__ = 'unusednames=options'\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print(COMMAND_USAGE_TEXT[args[0]])\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n args[0])", "def dicom_cli():", "def print_help_info(self, global_options):\r\n usage = ['',\"Type '%s help <subcommand>' for help on a specific subcommand.\" % self.prog_name,'']\r\n usage.append('Available subcommands:')\r\n commands = self.get_commands(global_options).keys()\r\n commands.sort()\r\n for cmd in commands:\r\n usage.append(' %s' % cmd)\r\n return '\\n'.join(usage)", "def help(self):\n msg = \"`%s' performs the computational aspects of genotyping-by-sequencing.\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Usage: %s [OPTIONS] ...\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Options:\\n\"\n msg += \" -h, --help\\tdisplay the help and exit\\n\"\n msg += \" -V, --version\\toutput version information and exit\\n\"\n msg += \" -v, --verbose\\tverbosity level (0/default=1/2/3)\\n\"\n msg += \" --proj1\\tname of the project used for steps 1 to 4\\n\"\n msg += \"\\t\\tmention a reference genome only if all samples belong to\\n\"\n msg += \"\\t\\t the same species, and will be mapped to the same ref genome\\n\"\n msg += \" --proj2\\tname of the project used for steps 4 to 8\\n\"\n msg += \"\\t\\tcan be the same as --proj1, or can be different\\n\"\n msg +=\"\\t\\t notably when samples come from different species\\n\"\n msg += \"\\t\\t or if one wants to align reads to different ref genomes\\n\"\n msg += \" --schdlr\\tname of the cluster scheduler (default=SGE)\\n\"\n msg += \" --queue\\tname of the cluster queue (default=normal.q)\\n\"\n msg += \" --resou\\tcluster resources (e.g. 'test' for 'qsub -l test')\\n\"\n msg += \" --rmvb\\tremove bash scripts for jobs launched in parallel\\n\"\n msg += \" --step\\tstep to perform (1/2/3/.../9)\\n\"\n msg += \"\\t\\t1: raw read quality per lane (with FastQC v >= 0.11.2)\\n\"\n msg += \"\\t\\t2: demultiplexing per lane (with demultiplex.py v >= 1.14.0)\\n\"\n msg += \"\\t\\t3: cleaning per sample (with CutAdapt v >= 1.8)\\n\"\n msg += \"\\t\\t4: alignment per sample (with BWA MEM v >= 0.7.12, Samtools v >= 1.3, Picard and R v >= 3)\\n\"\n msg += \"\\t\\t5: local realignment per sample (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t6: local realignment per genotype (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t7: variant and genotype calling per genotype (with GATK HaplotypeCaller v >= 3.5)\\n\"\n msg += \"\\t\\t8: variant and genotype calling jointly across genotypes (with GATK GenotypeGVCFs v >= 3.5)\\n\"\n msg += \"\\t\\t9: variant and genotype filtering (with GATK v >= 3.5)\\n\"\n msg += \" --samples\\tpath to the 'samples' file\\n\"\n msg += \"\\t\\tcompulsory for all steps, but can differ between steps\\n\"\n msg += \"\\t\\t e.g. if samples come from different species or are aligned\\n\"\n msg += \"\\t\\t on different ref genomes, different samples file should\\n\"\n msg += \"\\t\\t be used for steps 4-9, representing different subsets of\\n\"\n msg += \"\\t\\t the file used for steps 1-3\\n\"\n msg += \"\\t\\tthe file should be encoded in ASCII\\n\"\n msg += \"\\t\\tthe first row should be a header with column names\\n\"\n msg += \"\\t\\teach 'sample' (see details below) should have one and only one row\\n\"\n msg += \"\\t\\tany two columns should be separated with one tabulation\\n\"\n msg += \"\\t\\tcolumns can be in any order\\n\"\n msg += \"\\t\\trows starting by '#' are skipped\\n\"\n msg += \"\\t\\t12 columns are compulsory (but there can be more):\\n\"\n msg += \"\\t\\t genotype (see details below, e.g. 'Col-0', but use neither underscore '_' nor space ' ' nor dot '.', use dash '-' instead)\\n\"\n msg += \"\\t\\t ref_genome (identifier of the reference genome used for alignment, e.g. 'Atha_v2', but use neither space ' ' nor dot '.'; the full species name, e.g. 'Arabidopsis thaliana', will be present in the file given to --dict)\\n\"\n msg += \"\\t\\t library (e.g. can be the same as 'genotype')\\n\"\n msg += \"\\t\\t barcode (e.g. 'ATGG')\\n\"\n msg += \"\\t\\t seq_center (e.g. 'Broad Institute', 'GenoToul', etc)\\n\"\n msg += \"\\t\\t seq_platform (e.g. 'ILLUMINA', see SAM format specification)\\n\"\n msg += \"\\t\\t seq_platform_model (e.g. 'HiSeq 2000')\\n\"\n msg += \"\\t\\t flowcell (e.g. 'C5YMDACXX')\\n\"\n msg += \"\\t\\t lane (e.g. '3', can be '31' if a first demultiplexing was done per index)\\n\"\n msg += \"\\t\\t date (e.g. '2015-01-15', see SAM format specification)\\n\"\n msg += \"\\t\\t fastq_file_R1 (filename, one per lane, gzip-compressed)\\n\"\n msg += \"\\t\\t fastq_file_R2 (filename, one per lane, gzip-compressed)\\n\"\n msg += \" --fcln\\tidentifier of a flowcell and lane number\\n\"\n msg += \"\\t\\tformat as <flowcell>_<lane-number>, e.g. 'C5YMDACXX_1'\\n\"\n msg += \"\\t\\tif set, only the samples from this lane will be analyzed\\n\"\n msg += \" --pird\\tpath to the input reads directory\\n\"\n msg += \"\\t\\tcompulsory for steps 1 and 2\\n\"\n msg += \"\\t\\twill be added to the columns 'fastq_file_R*' from the sample file\\n\"\n msg += \"\\t\\tif not set, input read files should be in current directory\\n\"\n msg += \" --enz\\tname of the restriction enzyme\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=ApeKI\\n\"\n msg += \" --dmxmet\\tmethod used to demultiplex\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=4c (see the help of demultiplex.py to know more)\\n\"\n msg += \" --subst\\tnumber of substitutions allowed during demultiplexing\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=2\\n\"\n msg += \" --ensubst\\tenforce the nb of substitutions allowed\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=lenient/strict\\n\"\n msg += \" --adp\\tpath to the file containing the adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tsame format as FastQC: name<tab>sequence\\n\"\n msg += \"\\t\\tname: at least 'adpR1' (also 'adpR2' if paired-end)\\n\"\n msg += \"\\t\\tsequence: from 5' (left) to 3' (right)\\n\"\n msg += \" --errtol\\terror tolerance to find adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --minovl\\tminimum overlap length between reads and adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=3 (in bases)\\n\"\n msg += \" --minrl\\tminimum length to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=35 (in bases)\\n\"\n msg += \" --minq\\tminimum quality to trim a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=20 (used for both reads if paired-end)\\n\"\n msg += \" --maxNp\\tmaximum percentage of N to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --ref\\tpath to the prefix of files for the reference genome\\n\"\n msg += \"\\t\\tcompulsory for steps 4, 5, 6, 7, 8, 9\\n\"\n msg += \"\\t\\tshould correspond to the 'ref_genome' column in --samples\\n\"\n msg += \"\\t\\te.g. '/data/Atha_v2' for '/data/Atha_v2.fa', '/data/Atha_v2.bwt', etc\\n\"\n msg += \"\\t\\tthese files are produced via 'bwa index ...'\\n\"\n msg += \" --dict\\tpath to the 'dict' file (SAM header with @SQ tags)\\n\"\n msg += \"\\t\\tcompulsory for step 4\\n\"\n msg += \"\\t\\tsee 'CreateSequenceDictionary' in the Picard software\\n\"\n msg += \" --jgid\\tcohort identifier to use for joint genotyping\\n\"\n msg += \"\\t\\tcompulsory for steps 8, 9\\n\"\n msg += \"\\t\\tuseful to launch several, different cohorts in parallel\\n\"\n msg += \" --rat\\trestrict alleles to be of a particular allelicity\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdefault=ALL/BIALLELIC/MULTIALLELIC\\n\"\n msg += \"\\t\\tsee '--restrictAllelesTo' in GATK's SelectVariant\\n\"\n msg += \" --mdp\\tminimum value for DP (read depth; e.g. 10)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mgq\\tminimum value for GQ (genotype quality; e.g. 20)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mnfg\\tmaximum number of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mffg\\tmaximum fraction of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFractionFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mnnc\\tmaximum number of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \" --mfnc\\tmaximum fraction of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxNOCALLfraction' in GATK's SelectVariants\\n\"\n msg += \" --fam\\tpath to the file containing pedigree information\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdiscard variants with Mendelian violations (see Semler et al, 2012)\\n\"\n msg += \"\\t\\tshould be in the 'fam' format specified by PLINK\\n\"\n msg += \"\\t\\tvalidation strictness (GATK '-pedValidationType') is set at 'SILENT'\\n\"\n msg += \"\\t\\t allowing some samples to be absent from the pedigree\\n\"\n msg += \" --mvq\\tminimum GQ for each trio member to accept a variant as a Mendelian violation\\n\"\n msg += \"\\t\\tused in step 9 if '--fam' is specified\\n\"\n msg += \"\\t\\tdefault=0\\n\"\n msg += \" --xlssf\\tpath to the file with genotypes to exclude\\n\"\n msg += \"\\t\\tused in step 9 (can be especially useful if '--fam' is specified)\\n\"\n msg += \" --tmpd\\tpath to a temporary directory on child nodes (default=.)\\n\"\n msg += \"\\t\\te.g. it can be /tmp or /scratch\\n\"\n msg += \"\\t\\tused in step 4 for 'samtools sort'\\n\"\n msg += \"\\t\\tused in step 7 for 'GATK HaplotypeCaller'\\n\"\n msg += \" --jvmXms\\tinitial memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=512m (can also be specified as 1024k, 1g, etc)\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --jvmXmx\\tmaximum memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=4g\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --queue2\\tname of the second cluster queue (default=bigmem.q)\\n\"\n msg += \"\\t\\tused in step 4 for Picard to collect insert sizes\\n\"\n msg += \" --knowni\\tpath to a VCF file with known indels (for local realignment)\\n\"\n msg += \" --known\\tpath to a VCF file with known variants (e.g. from dbSNP)\\n\"\n msg += \" --force\\tforce to re-run step(s)\\n\"\n msg += \"\\t\\tthis removes without warning the step directory if it exists\\n\"\n msg += \"\\n\"\n msg += \"Examples:\\n\"\n msg += \" %s --step 1 --samples samples.txt\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Details:\\n\"\n msg += \"This program aims at genotyping a set of 'genotypes' using data from\\n\"\n msg += \"a restriction-assisted DNA sequencing (RAD-seq) experiment, also known\\n\"\n msg += \"as a genotyping-by-sequencing (GBS) experiment.\\n\"\n msg += \"Here, by 'genotype', we mean the entity which is the focus of the\\n\"\n msg += \"study. For instance, it can be a plant variety (or a human being), or\\n\"\n msg += \"the specific clone of a given plant variety (or a specific tumor of a\\n\"\n msg += \"given human being), etc.\\n\"\n msg += \"Importantly, note that the content of the 'genotype' column will\\n\"\n msg += \"be used to set the 'SM' (sample) tag of the 'RG' (read group) header\\n\"\n msg += \"record type of the SAM format (see http://www.htslib.org/). However,\\n\"\n msg += \"internal to this program, the term 'sample' corresponds to the unique\\n\"\n msg += \"quadruplet (genotype,flowcell,lane,barcode) for steps 1 and 2, and to\\n\"\n msg += \"the unique triplet (genotype,flowcell,lane) for the others.\\n\"\n msg += \"Jobs are executed in parallel (--schdlr). Their return status is\\n\"\n msg += \"recorded in a SQLite database which is removed at the end. If a job\\n\"\n msg += \"fails, the whole script stops with an error.\\n\"\n msg += \"\\n\"\n msg += \"Dependencies:\\n\"\n msg += \"Python >= 2.7; Biopython; pyutilstimflutre >= 0.5\\n\"\n msg += \"\\n\"\n msg += \"Report bugs to <[email protected]>.\"\n print(msg); sys.stdout.flush()", "def HMC_Help():\n os.system(\"cls\")\n while True:\n print((\"\\n\\n\",\"Help\".center(50)))\n print_list = [\"ManagedSystem\",\"LogicalPartition\",\"VirtualIOServer\",\"Cluster\",\"Performance Capaity Monitoring\",\"Return to Main Menu\"]\n choice = int(print_obj.print_on_screen(print_list))\n directory = os.path.dirname(os.path.dirname(__file__))\n if choice == 1:\n path = directory+\"/help/ManagedSystem\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 2:\n path = directory+\"/help/LogicalPartition\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 3:\n path = directory+\"/help/VirtualIOServer\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 4:\n print((open(directory+\"/help/Cluster.txt\").read()))\n elif choice == 5:\n print((open(directory+\"/help/PerformanceCapacityMonitoring.txt\").read()))\n elif choice == 6:\n os.system(\"cls\")\n return\n else:\n print(\"\\nTry using Valid option\")\n back_to_menu()", "def fusion_generate_mmmc_script(x: hammer_vlsi.HammerTool) -> str:\n mmmc_output = [] # type: List[str]\n\n def append_mmmc(cmd: str) -> None:\n x.verbose_tcl_append(cmd, mmmc_output)\n\n # Create an Innovus constraint mode.\n constraint_mode = \"my_constraint_mode\"\n sdc_files = [] # type: List[str]\n\n # Generate constraints\n clock_constraints_fragment = os.path.join(x.run_dir, \"clock_constraints_fragment.sdc\")\n with open(clock_constraints_fragment, \"w\") as f:\n f.write(x.sdc_clock_constraints)\n sdc_files.append(clock_constraints_fragment)\n\n # Generate port constraints.\n pin_constraints_fragment = os.path.join(x.run_dir, \"pin_constraints_fragment.sdc\")\n with open(pin_constraints_fragment, \"w\") as f:\n f.write(x.sdc_pin_constraints)\n sdc_files.append(pin_constraints_fragment)\n\n # Add the post-synthesis SDC, if present.\n post_synth_sdc = x.post_synth_sdc\n if post_synth_sdc is not None:\n sdc_files.append(post_synth_sdc)\n\n # TODO: add floorplanning SDC\n if len(sdc_files) > 0:\n sdc_files_arg = \"-sdc_files [list {sdc_files}]\".format(\n sdc_files=\" \".join(sdc_files)\n )\n else:\n blank_sdc = os.path.join(x.run_dir, \"blank.sdc\")\n x.run_executable([\"touch\", blank_sdc])\n sdc_files_arg = \"-sdc_files {{ {} }}\".format(blank_sdc)\n append_mmmc(\"create_constraint_mode -name {name} {sdc_files_arg}\".format(\n name=constraint_mode,\n sdc_files_arg=sdc_files_arg\n ))\n\n corners = x.get_mmmc_corners() # type: List[MMMCCorner]\n # In parallel, create the delay corners\n if corners:\n setup_corner = corners[0] # type: MMMCCorner\n hold_corner = corners[0] # type: MMMCCorner\n pwr_corner = corners[0] # type: MMMCCorner\n # TODO(colins): handle more than one corner and do something with extra corners\n for corner in corners:\n if corner.type is MMMCCornerType.Setup:\n setup_corner = corner\n if corner.type is MMMCCornerType.Hold:\n hold_corner = corner\n if corner.type is MMMCCornerType.Extra:\n pwr_corner = corner\n\n # First, create Innovus library sets\n append_mmmc(\"create_library_set -name {name} -timing [list {list}]\".format(\n name=\"{n}.setup_set\".format(n=setup_corner.name),\n list=x.get_timing_libs(setup_corner)\n ))\n append_mmmc(\"create_library_set -name {name} -timing [list {list}]\".format(\n name=\"{n}.hold_set\".format(n=hold_corner.name),\n list=x.get_timing_libs(hold_corner)\n ))\n append_mmmc(\"create_library_set -name {name} -timing [list {list}]\".format(\n name=\"{n}.pwr_set\".format(n=pwr_corner.name),\n list=x.get_timing_libs(pwr_corner)\n ))\n # Skip opconds for now\n # Next, create Innovus timing conditions\n append_mmmc(\"create_timing_condition -name {name} -library_sets [list {list}]\".format(\n name=\"{n}.setup_cond\".format(n=setup_corner.name),\n list=\"{n}.setup_set\".format(n=setup_corner.name)\n ))\n append_mmmc(\"create_timing_condition -name {name} -library_sets [list {list}]\".format(\n name=\"{n}.hold_cond\".format(n=hold_corner.name),\n list=\"{n}.hold_set\".format(n=hold_corner.name)\n ))\n append_mmmc(\"create_timing_condition -name {name} -library_sets [list {list}]\".format(\n name=\"{n}.pwr_cond\".format(n=pwr_corner.name),\n list=\"{n}.pwr_set\".format(n=pwr_corner.name)\n ))\n # Next, create Innovus rc corners from qrc tech files\n append_mmmc(\"create_rc_corner -name {name} -temperature {tempInCelsius} {qrc}\".format(\n name=\"{n}.setup_rc\".format(n=setup_corner.name),\n tempInCelsius=str(setup_corner.temp.value),\n qrc=\"-qrc_tech {}\".format(x.get_mmmc_qrc(setup_corner)) if x.get_mmmc_qrc(setup_corner) != '' else ''\n ))\n append_mmmc(\"create_rc_corner -name {name} -temperature {tempInCelsius} {qrc}\".format(\n name=\"{n}.hold_rc\".format(n=hold_corner.name),\n tempInCelsius=str(hold_corner.temp.value),\n qrc=\"-qrc_tech {}\".format(x.get_mmmc_qrc(hold_corner)) if x.get_mmmc_qrc(hold_corner) != '' else ''\n ))\n append_mmmc(\"create_rc_corner -name {name} -temperature {tempInCelsius} {qrc}\".format(\n name=\"{n}.pwr_rc\".format(n=pwr_corner.name),\n tempInCelsius=str(pwr_corner.temp.value),\n qrc=\"-qrc_tech {}\".format(x.get_mmmc_qrc(pwr_corner)) if x.get_mmmc_qrc(pwr_corner) != '' else ''\n ))\n # Next, create an Innovus delay corner.\n append_mmmc(\n \"create_delay_corner -name {name}_delay -timing_condition {name}_cond -rc_corner {name}_rc\".format(\n name=\"{n}.setup\".format(n=setup_corner.name)\n ))\n append_mmmc(\n \"create_delay_corner -name {name}_delay -timing_condition {name}_cond -rc_corner {name}_rc\".format(\n name=\"{n}.hold\".format(n=hold_corner.name)\n ))\n append_mmmc(\n \"create_delay_corner -name {name}_delay -timing_condition {name}_cond -rc_corner {name}_rc\".format(\n name=\"{n}.pwr\".format(n=pwr_corner.name)\n ))\n # Next, create the analysis views\n append_mmmc(\"create_analysis_view -name {name}_view -delay_corner {name}_delay -constraint_mode {constraint}\".format(\n name=\"{n}.setup\".format(n=setup_corner.name), constraint=constraint_mode))\n append_mmmc(\"create_analysis_view -name {name}_view -delay_corner {name}_delay -constraint_mode {constraint}\".format(\n name=\"{n}.hold\".format(n=hold_corner.name), constraint=constraint_mode))\n append_mmmc(\"create_analysis_view -name {name}_view -delay_corner {name}_delay -constraint_mode {constraint}\".format(\n name=\"{n}.pwr\".format(n=pwr_corner.name), constraint=constraint_mode))\n # Finally, apply the analysis view.\n append_mmmc(\"set_analysis_view -setup {{ {setup_view} }} -hold {{ {hold_view} }} -leakage {{ {pwr_view} }} -dynamic {{ {pwr_view} }}\".format(\n setup_view=\"{n}.setup_view\".format(n=setup_corner.name),\n hold_view=\"{n}.hold_view\".format(n=hold_corner.name),\n pwr_view=\"{n}.pwr_view\".format(n=pwr_corner.name)\n ))\n else:\n # First, create an Innovus library set.\n library_set_name = \"my_lib_set\"\n append_mmmc(\"create_library_set -name {name} -timing [list {list}]\".format(\n name=library_set_name,\n list=x.get_timing_libs()\n ))\n # Next, create an Innovus timing condition.\n timing_condition_name = \"my_timing_condition\"\n append_mmmc(\"create_timing_condition -name {name} -library_sets [list {list}]\".format(\n name=timing_condition_name,\n list=library_set_name\n ))\n # extra junk: -opcond ...\n rc_corner_name = \"rc_cond\"\n append_mmmc(\"create_rc_corner -name {name} -temperature {tempInCelsius} {qrc}\".format(\n name=rc_corner_name,\n tempInCelsius=120, # TODO: this should come from tech config\n qrc=\"-qrc_tech {}\".format(x.get_qrc_tech()) if x.get_qrc_tech() != '' else ''\n ))\n # Next, create an Innovus delay corner.\n delay_corner_name = \"my_delay_corner\"\n append_mmmc(\n \"create_delay_corner -name {name} -timing_condition {timing_cond} -rc_corner {rc}\".format(\n name=delay_corner_name,\n timing_cond=timing_condition_name,\n rc=rc_corner_name\n ))\n # extra junk: -rc_corner my_rc_corner_maybe_worst\n # Next, create an Innovus analysis view.\n analysis_view_name = \"my_view\"\n append_mmmc(\"create_analysis_view -name {name} -delay_corner {corner} -constraint_mode {constraint}\".format(\n name=analysis_view_name, corner=delay_corner_name, constraint=constraint_mode))\n # Finally, apply the analysis view.\n # TODO: introduce different views of setup/hold and true multi-corner\n append_mmmc(\"set_analysis_view -setup {{ {setup_view} }} -hold {{ {hold_view} }}\".format(\n setup_view=analysis_view_name,\n hold_view=analysis_view_name\n ))\n\n return \"\\n\".join(mmmc_output)", "def cmd_help(args):", "def print_usage_command(self):\n print self.get_usage_command()", "def print_usage_command(self):\n print self.get_usage_command()", "def runMCMC(df, cents, show=False):\n if type(cents) is not list:\n cents = [cents]\n numCents = len(cents)\n p = None\n \n # Tau = the precision of the normal distribution (of the above peaks)\n taus = 1. / pm.Uniform('stds', 0, 100, size=numCents)**2 # tau = 1/sigma**2\n centers = pm.Normal('centers', cents, [0.0025 for i in cents],\n size=numCents)\n \n if numCents == 2: # Assignment probability\n p = pm.Uniform('p', 0, 1)\n assignment = pm.Categorical('asisgnment', [p, 1-p],\n size=len(df.intervals))\n @pm.deterministic\n def center_i(assignment=assignment, centers=centers):\n return centers[assignment]\n @pm.deterministic\n def tau_i(assignment=assignment, taus=taus):\n return taus[assignment]\n observations = pm.Normal('obs', center_i, tau_i, value=df.intervals,\n observed=True)\n # Create the model 2 peaks\n mcmc = pm.MCMC([p, assignment, observations, taus, centers])\n \n else:\n observations = pm.Normal('obs', value=df.intervals, observed=True)\n mcmc = pm.MCMC([observations, taus, centers]) # Create model, 1 peak\n \n # Run the model\n mcmc.sample(50000)\n center_trace = mcmc.trace(\"centers\")[:]\n try:\n clusts = [center_trace[:,i] for i in range(numCents)]\n except:\n clusts = [center_trace]\n \n if show:\n for i in range(numCents):\n plt.hist(center_trace[:,i], bins=50, histtype='stepfilled',\n color=['blue', 'red'][i], alpha=0.7)\n plt.show()\n \n print('Evolved clusters at:')\n print([np.mean(c) for c in clusts])\n return clusts", "def config_pbc_md(self):\n\n self._config_md()\n self.title = \"PBC MD Simulation\"\n self.cntrl[\"cut\"] = 8.0\n self.cntrl[\"igb\"] = 0\n self.cntrl[\"iwrap\"] = 1\n self.cntrl[\"ntp\"] = 1\n self.cntrl[\"barostat\"] = 2", "def help(self, msg=None):\n\n # Print the message if given.\n if not msg == None:\n print str(msg) + \"\\n\"\n\n # Display the list of commands, in the alphabetical order.\n print \"Use one of the following commands:\"\n for action in sorted(self.actions.keys()):\n info = self.actions[action]\n joined_oblig = ' '.join(info['required'])\n if len(info['additional']) > 0:\n add = [\"<%s>\" % x for x in info['additional']]\n joined_add = '[' + ' '.join(add) + ']'\n else:\n joined_add = ''\n print \"\\t* %s %s %s\" % (action, joined_oblig, joined_add)", "def help_help(self):\n print(\"List commands or print details about a command\")", "def _usage_options_example(self):\n pass", "def process_meter_message(self, d):\n dpid = int(d.get(\"dpid\", 0))\n dp = self.dpset.get(dpid)\n if not dp:\n return \"Datapath does not exist!\"\n\n ofproto = dp.ofproto\n parser = dp.ofproto_parser\n\n command = {\n 'add': ofproto.OFPMC_ADD,\n 'mod': ofproto.OFPMC_MODIFY,\n 'del': ofproto.OFPMC_DELETE,\n }\n cmd = command.get(d[\"operation\"], ofproto.OFPMC_ADD)\n\n meter_id = d[\"meter_id\"]\n\n flags = 0\n bands = []\n if \"flags\" in d: # Ryu's format\n print(d['flags'])\n for f in d['flags']:\n flags += 0x01 if f == 'KBPS' else 0\n flags += 0x02 if f == 'PKTPS' else 0\n flags += 0x04 if f == 'BURST' else 0\n flags += 0x08 if f == 'STATS' else 0\n\n for band in d[\"bands\"]:\n if band['type'] == 'DROP':\n bands += [parser.OFPMeterBandDrop(rate=band['rate'],\n burst_size=band['burst_size'])]\n elif band['type'] == 'DSCP_REMARK':\n bands += [parser.OFPMeterBandDscpRemark(rate=band['rate'],\n burst_size=band['burst_size'], prec_level=band['prec_level'])]\n\n else: # FlowManager's format\n flags += 0x01 if d['OFPMF_KBPS'] else 0\n flags += 0x02 if d['OFPMF_PKTPS'] else 0\n flags += 0x04 if d['OFPMF_BURST'] else 0\n flags += 0x08 if d['OFPMF_STATS'] else 0\n\n # Flags must have KBPS or PKTPS\n flags = flags if (flags & 0x03) else (flags | 0x01)\n\n for band in d[\"bands\"]:\n #mtype = type_convert.get(band[0])\n if band[0] == 'DROP':\n bands += [parser.OFPMeterBandDrop(rate=band[1],\n burst_size=band[2])]\n elif band[0] == 'DSCP_REMARK':\n bands += [parser.OFPMeterBandDscpRemark(rate=band[1],\n burst_size=band[2], prec_level=band[3])]\n\n # TODO: catch some errors\n meter_mod = parser.OFPMeterMod(dp, cmd, flags, meter_id, bands)\n try:\n dp.send_msg(meter_mod)\n except KeyError as e:\n return e.__repr__()\n except Exception as e:\n return e.__repr__()\n\n return \"Message sent successfully.\"", "def cli(ctx):\n if ctx.obj[\"debug\"]:\n click.echo(\"Debug mode initiated\")\n set_trace()\n\n logger.debug(\"cluster subcommand called from cli\")", "def com_measure(state, sel=None):\n if not isinstance(state, Qobj):\n raise TypeError(\"Input must be a Qobj\")\n\n if state.type == \"ket\" or state.type == \"oper\":\n if isinstance(sel,Iterable):\n N = len(sel)\n subsystem = sel\n else:\n N = len(state.dims[0])\n subsystem = list(range(N))\n qc = QubitCircuit(N,num_cbits=N)\n for i in range(N):\n qc.add_measurement('M{}'.format(i),[i],classical_store=i)\n\n sim = CircuitSimulator(qc)\n return sim.run_statistics(state.ptrace(subsystem)).get_probabilities()\n else:\n raise ValueError(\"Invalid input state.\")", "def main():\n parser = argparse.ArgumentParser(description='show disk usage and clean directories')\n \n parser.add_argument(\"dir_to_check\")\n parser.add_argument(\"-1\",\"--pass1\", action=\"store_true\", \\\n help=\"Run pass1, that computes .du in all subdirs\")\n parser.add_argument(\"-b\", \"--both_passes\", action=\"store_true\",\\\n help=\"Run pass1, that computes .du in all subdirs,and then pass2 that is interactive\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\",\\\n help=\"increase output verbosity\")\n args = parser.parse_args()\n \n my_dir = TopLevelDir(args.dir_to_check)\n \n my_dir.verb = False\n if args.verbose:\n my_dir.verb = True\n if args.pass1:\n my_dir.pass1(my_dir.verb)\n if args.both_passes:\n my_dir.pass1(my_dir.verb)\n my_dir.pass2()", "def usage(self, host):", "def main():\n options = docopt(__doc__)\n\n # In case the user asked for verbose logging, increase\n # the log level to debug.\n if options[\"--verbose\"] > 0:\n logging.basicConfig(level=logging.DEBUG)\n LOGGER.setLevel(logging.DEBUG)\n\n LOGGER.debug(\n \"Received options: %s\",\n options,\n )\n\n billing_account_id = _get_billing_account_id()\n member_accounts = _get_member_accounts(\n billing_account_id=billing_account_id,\n options=options,\n )\n _flush_out(accounts=member_accounts, options=options)\n\n return 0", "def print_current_mem_usage():\n mem = get_current_mem_usage()\n output = \"# Mem usage = {} MiB #\".format(mem)\n print(\"\\n\" + \"-\" * len(output))\n print(output)\n print(\"-\" * len(output) + \"\\n\")", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def mem_info(self):\n\t\t\tavailable, total = cuda.mem_get_info() #Note: pycuda._driver.LogicError: cuMemGetInfo failed: context is destroyed\n\t\t\tprint(\"Available: %.2f GB\\nTotal: %.2f GB\"%(available/1e9, total/1e9))", "def help_command(server, output, conf):\n server.tell(output.name, 'Available commands:')\n for key in COMMANDS.keys():\n cmd_func = COMMANDS[key]\n if cmd_func.__doc__:\n server.tell(output.name, '%s: %s' % (key[1:], cmd_func.__doc__))\n else:\n server.tell(output.name, key[1:])\n return", "def command_help(args):\n\tprint_usage()\n\treturn 0", "def cmd_help(self, commands=None, usage=False):\n if commands:\n usage = True\n commands = {self.approx.decmd(c.lower()) for c in commands}\n rejects = commands - self.approx.keys()\n for reject in rejects:\n self.put_pretty(\"No command named %r\" % reject)\n continue\n commands -= rejects\n if self.debug:\n assert not any(self.approx.encmd(r) in self.mod_commands for\n r in rejects)\n assert all(self.approx.encmd(c) in self.mod_commands for\n c in commands)\n if not commands:\n return\n requested = zip(commands, (self.approx[c] for c in commands))\n else:\n requested = self.approx.items()\n help = znc.CTable()\n help.AddColumn(\"Command\")\n help.AddColumn(\"Usage\" if usage else \"Description\")\n from itertools import zip_longest\n #\n for command, parser in requested:\n if usage:\n upre = \"usage: %s\" % command\n rest = (parser.format_usage()\n .replace(upre, \"\", 1)\n .replace(\"[-h] \", \"\", 1))\n desc = [l.strip() for l in rest.split(\"\\n\") if l.strip()]\n else:\n desc = [parser.description]\n for line, comm in zip_longest(desc, (command,), fillvalue=\"\"):\n help.AddRow()\n help.SetCell(\"Command\", comm)\n help.SetCell(\"Usage\" if usage else \"Description\", line)\n #\n s_line = znc.String()\n strung = []\n while help.GetLine(len(strung), s_line):\n strung.append(s_line.s)\n also = \" (<command> [-h] for details)\"\n strung[1] = strung[1].replace(len(also) * \" \", also, 1)\n self.put_pretty(\"\\n\".join(strung))", "def main(argv=None):\n parser = ArgParser(\n description=\"Calculate percentiled data over a given coordinate by \"\n \"collapsing that coordinate. Typically used to convert realization \"\n \"data into percentiled data, but may calculate over any \"\n \"dimension coordinate. Alternatively, calling this CLI with a dataset\"\n \" containing probabilities will convert those to percentiles using \"\n \"the ensemble copula coupling plugin. If no particular percentiles \"\n \"are given at which to calculate values and no 'number of percentiles'\"\n \" to calculate are specified, the following defaults will be used: \"\n \"[0, 5, 10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 95, 100]\")\n parser.add_argument(\"input_filepath\", metavar=\"INPUT_FILE\",\n help=\"A path to an input NetCDF file to be processed\")\n parser.add_argument(\"output_filepath\", metavar=\"OUTPUT_FILE\",\n help=\"The output path for the processed NetCDF\")\n parser.add_argument(\"--coordinates\", metavar=\"COORDINATES_TO_COLLAPSE\",\n nargs=\"+\",\n help=\"Coordinate or coordinates over which to collapse\"\n \" data and calculate percentiles; e.g. \"\n \"'realization' or 'latitude longitude'. This argument \"\n \"must be provided when collapsing a coordinate or \"\n \"coordinates to create percentiles, but is redundant \"\n \"when converting probabilities to percentiles and may \"\n \"be omitted. This coordinate(s) will be removed \"\n \"and replaced by a percentile coordinate.\")\n parser.add_argument('--ecc_bounds_warning', default=False,\n action='store_true',\n help='If True, where calculated percentiles are '\n 'outside the ECC bounds range, raise a warning '\n 'rather than an exception.')\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument(\"--percentiles\", metavar=\"PERCENTILES\",\n nargs=\"+\", default=None, type=float,\n help=\"Optional definition of percentiles at which to \"\n \"calculate data, e.g. --percentiles 0 33.3 66.6 100\")\n group.add_argument('--no-of-percentiles', default=None, type=int,\n metavar='NUMBER_OF_PERCENTILES',\n help=\"Optional definition of the number of percentiles \"\n \"to be generated, these distributed regularly with the \"\n \"aim of dividing into blocks of equal probability.\")\n\n args = parser.parse_args(args=argv)\n\n # Load Cube\n cube = load_cube(args.input_filepath)\n\n # Process Cube\n result = process(cube, args.coordinates, args.ecc_bounds_warning,\n args.percentiles, args.no_of_percentiles)\n\n # Save Cube\n save_netcdf(result, args.output_filepath)", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"", "def main():\n known_args, unknown_args = parse_known_args()\n if not unknown_args:\n # return an error message if no command is provided\n sys.exit(\"Please provide a command to benchmark: $ humann_benchmark COMMAND\")\n try:\n process = subprocess.Popen(\" \".join(unknown_args),shell=True)\n except (EnvironmentError, subprocess.CalledProcessError):\n sys.exit(\"Unable to execute command: \" + \" \".join(unknown_args))\n pid=str(process.pid)\n start=time.time()\n max_memory=0\n while process.poll() is None:\n time.sleep(1)\n # while the process is running check on the memory use\n # get the pids of the main process and all children (and their children)\n pids=get_pids(pid)\n stdout=subprocess.check_output([\"ps\",\"--pid\",\",\".join(pids),\"-o\",\"pid,rss,command\"]).decode(\"utf-8\")\n print(\"\\n\"+stdout+\"\\n\")\n # remove the header from the process output\n status=[i.split() for i in filter(lambda x: x, stdout.split(\"\\n\")[1:])]\n # memory is the sum of all rss\n memory=sum(int(i[1]) for i in status)\n if memory > max_memory:\n max_memory=memory\n \n end=time.time()\n print(\"Time: {:.0f} minutes\".format((end-start)/60))\n print(\"Max Memory (RSS): {:.1f} GB\".format(max_memory*1.0/1024**2))", "def main(self, names, options) :\n names = self.sanitizeNames(options, names)\n suffix = (options[\"groups\"] and \"Group\") or \"User\" \n printernames = options[\"printer\"].split(\",\")\n \n if not options[\"list\"] :\n percent = Percent(self)\n percent.display(\"%s...\" % _(\"Extracting datas\"))\n printers = self.storage.getMatchingPrinters(options[\"printer\"])\n entries = getattr(self.storage, \"getMatching%ss\" % suffix)(\",\".join(names))\n if not options[\"list\"] :\n percent.setSize(len(printers) * len(entries))\n \n if options[\"list\"] :\n for printer in printers :\n for entry in entries :\n pqentry = getattr(self.storage, \"get%sPQuota\" % suffix)(entry, printer)\n if pqentry.Exists :\n print \"%s@%s\" % (entry.Name, printer.Name)\n print \" %s\" % (_(\"Page counter : %s\") % pqentry.PageCounter)\n print \" %s\" % (_(\"Lifetime page counter : %s\") % pqentry.LifePageCounter)\n print \" %s\" % (_(\"Soft limit : %s\") % pqentry.SoftLimit)\n print \" %s\" % (_(\"Hard limit : %s\") % pqentry.HardLimit)\n print \" %s\" % (_(\"Date limit : %s\") % pqentry.DateLimit)\n print \" %s (Not supported yet)\" % (_(\"Maximum job size : %s\") % ((pqentry.MaxJobSize and (_(\"%s pages\") % pqentry.MaxJobSize)) or _(\"Unlimited\")))\n if hasattr(pqentry, \"WarnCount\") :\n print \" %s\" % (_(\"Warning banners printed : %s\") % pqentry.WarnCount)\n print\n elif options[\"delete\"] : \n percent.display(\"\\n%s...\" % _(\"Deletion\"))\n getattr(self.storage, \"deleteMany%sPQuotas\" % suffix)(printers, entries)\n percent.display(\"\\n\")\n else :\n skipexisting = options[\"skipexisting\"]\n used = options[\"used\"]\n if used :\n used = used.strip()\n try :\n int(used)\n except ValueError :\n raise CPSCommandLineError, _(\"Invalid used value %s.\") % used\n \n increase = options[\"increase\"]\n if increase :\n try :\n increase = int(increase.strip())\n except ValueError :\n raise CPSCommandLineError, _(\"Invalid increase value %s.\") % increase\n \n noquota = options[\"noquota\"]\n reset = options[\"reset\"] \n hardreset = options[\"hardreset\"]\n softlimit = hardlimit = None\n if not noquota :\n if options[\"softlimit\"] :\n try :\n softlimit = int(options[\"softlimit\"].strip())\n if softlimit < 0 :\n raise ValueError\n except ValueError : \n raise CPSCommandLineError, _(\"Invalid softlimit value %s.\") % options[\"softlimit\"]\n if options[\"hardlimit\"] :\n try :\n hardlimit = int(options[\"hardlimit\"].strip())\n if hardlimit < 0 :\n raise ValueError\n except ValueError : \n raise CPSCommandLineError, _(\"Invalid hardlimit value %s.\") % options[\"hardlimit\"]\n if (softlimit is not None) and (hardlimit is not None) and (hardlimit < softlimit) : \n # error, exchange them\n self.printInfo(_(\"Hard limit %i is less than soft limit %i, values will be exchanged.\") % (hardlimit, softlimit))\n (softlimit, hardlimit) = (hardlimit, softlimit)\n if hardlimit is None : \n hardlimit = softlimit\n if hardlimit is not None :\n self.printInfo(_(\"Undefined hard limit set to soft limit (%s).\") % str(hardlimit))\n if softlimit is None : \n softlimit = hardlimit\n if softlimit is not None :\n self.printInfo(_(\"Undefined soft limit set to hard limit (%s).\") % str(softlimit))\n \n self.storage.beginTransaction() \n try :\n if options[\"add\"] :\n percent.display(\"\\n%s...\\n\" % _(\"Creation\"))\n if not entries : \n self.printInfo(_(\"No entry matches %s. Please use pkusers to create them first.\") % (\" \".join(names)), \"warn\")\n \n factory = globals()[\"Storage%sPQuota\" % suffix]\n for printer in printers :\n pname = printer.Name\n for entry in entries :\n ename = entry.Name\n pqkey = \"%s@%s\" % (ename, pname)\n pqentry = factory(self.storage, entry, printer)\n self.modifyPQEntry(pqkey, pqentry, noquota, \\\n softlimit, hardlimit, \\\n increase, reset, \\\n hardreset, suffix, used)\n oldpqentry = getattr(self.storage, \"add%sPQuota\" % suffix)(pqentry)\n if oldpqentry is not None : \n if skipexisting :\n self.logdebug(\"%s print quota entry %s@%s already exists, skipping.\" % (suffix, ename, pname))\n else : \n self.logdebug(\"%s print quota entry %s@%s already exists, will be modified.\" % (suffix, ename, pname))\n self.modifyPQEntry(pqkey, oldpqentry, noquota, \\\n softlimit, hardlimit, \\\n increase, reset, \\\n hardreset, suffix, used)\n oldpqentry.save() \n percent.oneMore()\n else : \n percent.display(\"\\n%s...\\n\" % _(\"Modification\"))\n for printer in printers :\n for entry in entries :\n pqkey = \"%s@%s\" % (entry.Name, printer.Name)\n pqentry = getattr(self.storage, \"get%sPQuota\" % suffix)(entry, printer)\n if pqentry.Exists : \n self.modifyPQEntry(pqkey, pqentry, noquota, \\\n softlimit, hardlimit, \\\n increase, reset, \\\n hardreset, suffix, used)\n pqentry.save() \n percent.oneMore()\n except : \n self.storage.rollbackTransaction()\n raise\n else : \n self.storage.commitTransaction()\n \n if not options[\"list\"] :\n percent.done()", "def metrics(self, account_id):\n from pureport_client.commands.accounts.metrics import Command\n return Command(self.client, account_id)", "def run(self, line):\n LOGGER.info(\"Scalable PMEM: {}\".format(self.name))\n try:\n (options, _) = self._parse_arglist(line)\n except:\n if (\"-h\" in line) or (\"--help\" in line):\n return ReturnCodes.SUCCESS\n else:\n raise InvalidCommandLineErrorOPTS(\"\")\n\n if len(args):\n InvalidCommandLineError(\"This command takes no parameters.\")\n\n LOGGER.info(\"Options: {}\".format(options))\n\n if not self._chif_lib:\n self._helpers.failNoChifLibrary()\n\n enable = True\n if options.enableFeature is False:\n enable = False\n\n self.enableOrDisableFeature(enable)\n\n #Return code\n return ReturnCodes.SUCCESS", "def getHelp(self):\r\n help_str =\\\r\n \"\"\"##########################################################################################\r\n#\r\n# Required:\r\n#\r\n# --query_NAST multi-fasta file containing query sequences in alignment format\r\n#\r\n# Common opts:\r\n#\r\n# --db_NAST db in NAST format\r\n# --db_FASTA db in fasta format (megablast formatted)\r\n#\r\n#\r\n# -n number of top matching database sequences to compare to (default 15)\r\n# -R min divergence ratio default: 1.007\r\n# -P min percent identity among matching sequences (default: 90)\r\n#\r\n# ## parameters to tune ChimeraParentSelector:\r\n#\r\n# Scoring parameters:\r\n# -M match score (default: +5)\r\n# -N mismatch penalty (default: -4)\r\n# -Q min query coverage by matching database sequence (default: 70)\r\n# -T maximum traverses of the multiple alignment (default: 1)\r\n\r\n#\r\n# ## parameters to tune ChimeraPhyloChecker:\r\n#\r\n#\r\n# --windowSize default 50\r\n# --windowStep default 5\r\n# --minBS minimum bootstrap support for calling chimera (default: 90)\r\n# -S percent of SNPs to sample on each side of breakpoint for computing bootstrap support (default: 10)\r\n# --num_parents_test number of potential parents to test for chimeras (default: 3)\r\n# --MAX_CHIMERA_PARENT_PER_ID Chimera/Parent alignments with perID above this are considered non-chimeras (default 100; turned off)\r\n#\r\n# ## misc opts\r\n#\r\n# --printFinalAlignments shows alignment between query sequence and pair of candidate chimera parents\r\n# --printCSalignments print ChimeraSlayer alignments in ChimeraSlayer output\r\n# --exec_dir chdir to here before running\r\n#\r\n#########################################################################################\r\n \"\"\"\r\n return help_str", "def usage(app_name):\n global version\n print '\\npython {0} -a MediaLive_ARN -n Dashboard_Name [Optional parameters]\\n'.format(app_name)\n print 'Version:', version\n print '\\nThis script creates a CloudWatch Dashboard for a MediaLive/MediaPackage workflow.'\n print \"It uses the MediaLive Channel Arn as input and determines the MediaPackage instances from the \"\n print \"MediaLive channel configuration. It then creates the CloudWatch Dashboard that contains info on the\"\n print \"MediaLive channel, the two MediaPackage channels, and all of the MediaPackage endpoints.\"\n print \"\\nRequired parameters:\"\n print \"-a, --arn: MediaLive Channel ARN\"\n print \"-n, --name: Name for the CloudWatch Dashboard. \"\n print \"\"\n print \"Optional parameters\"\n print \"-l, --list: Filename of a file that contains a list of MediaLive Channel ARNs, 1 ARN per line. \"\n print \" All MediaLive channels and their corresponding MediaPackage channels will be included in \"\n print \" the CloudWatch Dashboard.\"\n print \" Note: This parameter is ignored if a channel ARN is provided via the '-a/--arn' option\"\n print \" Note: All ARNs in the list must be for channels in the same region. All ARNs not in the same\"\n print \" region as the first ARN in the list will be ignored.\"\n print '-h, --help: Print this help and exit.'\n print \"\"\n print 'Examples:'\n print \"\"\n print 'Using MediaLive ARN arn:aws:medialive:us-west-2:0123456789:channel:123456 and create a CloudWatch ' \\\n 'Dashboard called \"My TV Dashboard\"'\n print 'python {0} -a arn:aws:medialive:us-west-2:0123456789:channel:123456 ' \\\n '-n \"My TV Dashboard\" '.format(app_name)\n print \"\"\n print 'Using the MediaLive Channel ARN list defined in the text file \"My EML arns.txt\" create a CloudWatch' \\\n 'Dashboard called \"Primary Bouquet\".'\n print 'python {0} -l \"My EML arns.txt\" -n \"Primary Bouquet\"\\n'.format(app_name)", "def main( argv = None ):\n\n if argv == None: argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser( version = \"%prog version: $Id$\", \n usage = globals()[\"__doc__\"] )\n\n parser.add_option(\"--category\", dest=\"category\", type=\"choice\",\n choices = (\"B\", \"C\"), help=\"supply help\" )\n\n ## add common options (-h/--help, ...) and parse command line \n (options, args) = E.Start( parser, argv = argv )\n\n data = getData(options.stdin)\n if options.category == \"B\":\n options.stdout.write(\"Category B pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in b2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n\n elif options.category == \"C\":\n options.stdout.write(\"Category C pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in c2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n else:\n raise ValueError(\"must specify the category of pathway\")\n\n\n ## write footer and output benchmark information.\n E.Stop()", "def print_usage():\n print(\"usage: MILP.py -p <parameter file> -i <payoff file> -o <output file>\")\n print(\"-p, --params\\t sets the parameter file\")\n print(\"-i, --payoff\\t sets the payoff file\")\n print(\"-o, --output\\t sets the output file. Defaults to out.csv\")\n print(\"-d, --delimiter\\t sets the delimiter of ALL files. Defaults to csv\")", "def command_help(self, *args, **kwargs):\n print(\"Commands available:\\n\")\n for name in dir(self):\n if not name.startswith(\"command_\"):\n continue\n name_clean = name[len(\"command_\"):]\n print(\"%s:\\n - %s\\n\" % (name_clean, getattr(self, name).__doc__.strip()))", "def diagnostics(self,\n *opts, # type: DiagnosticsOptions\n **kwargs # type: Dict[str, Any]\n ) -> DiagnosticsResult:\n\n return super().diagnostics(*opts, **kwargs)", "def process_info(process):\n\thelp(process)", "def mc(self, *args) -> None:\n env = os.environ.copy()\n env['MC_HOST_minio'] = self.auth_url\n # --config-dir is set just to prevent any config set by the user\n # from interfering with the test.\n try:\n subprocess.run(\n [\n 'mc', '--quiet', '--no-color', f'--config-dir={self.path}',\n *args\n ],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.PIPE,\n env=env,\n encoding='utf-8',\n errors='replace',\n check=True\n )\n except OSError as exc:\n raise MissingProgram(f'mc could not be run: {exc}') from exc\n except subprocess.CalledProcessError as exc:\n raise ProgramFailed(exc.stderr) from exc", "async def eventstats(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send_help(ctx.command)", "def help(update, context):\n msg = \"\"\n msg += \"\\n/covid 7-Day-Incident per Million\"\n msg += \"\\n/daylio What did I do a year ago today?\"\n msg += \"\\n/f1last Results of the last race\"\n msg += \"\\n/f1stand Driver standings\"\n msg += \"\\n/f1next Time and place of the next race\"\n msg += \"\\n/fuel prices and consump. (args: Xeur Ykm)\"\n msg += \"\\n/ip Outside ip address\"\n msg += \"\\n/rate Exchange rates (args: Xeur/Yhuf)\"\n msg += \"\\n/rss check rss feeds for new content\"\n msg += \"\\n/sun Time of sunrise and sunset\"\n msg += \"\\n/xkcd Sends last comic image and alt\"\n msg.rstrip()\n update.message.reply_text(msg)", "def usage():", "def usage():", "def ShowMQueue(cmd_args=None, cmd_options={}):\n if not cmd_args:\n print \"Please specify the address of the ipc_mqueue whose details you want to print\"\n print ShowMQueue.__doc__\n return\n space = 0\n if \"-S\" in cmd_options:\n space = kern.GetValueFromAddress(cmd_options[\"-S\"], 'struct ipc_space *')\n mqueue = kern.GetValueFromAddress(cmd_args[0], 'struct ipc_mqueue *')\n wq_type = mqueue.data.pset.setq.wqset_q.waitq_type\n if int(wq_type) == 3:\n psetoff = getfieldoffset('struct ipc_pset', 'ips_messages')\n pset = unsigned(ArgumentStringToInt(cmd_args[0])) - unsigned(psetoff)\n print PrintPortSetSummary.header\n PrintPortSetSummary(kern.GetValueFromAddress(pset, 'struct ipc_pset *'), space)\n elif int(wq_type) == 2:\n portoff = getfieldoffset('struct ipc_port', 'ip_messages')\n port = unsigned(ArgumentStringToInt(cmd_args[0])) - unsigned(portoff)\n print PrintPortSummary.header\n PrintPortSummary(kern.GetValueFromAddress(port, 'struct ipc_port *'))\n else:\n print \"Invalid mqueue? (waitq type {:d} is invalid)\".format(int(wq_type))", "def stats(caesar, input):\n commands = {}\n users = {}\n channels = {}\n\n ignore = set(['f_note', 'startup', 'message', 'noteuri'])\n for (name, user), count in caesar.stats.items(): \n if name in ignore: continue\n if not user: continue\n\n if not user.startswith('#'): \n try: users[user] += count\n except KeyError: users[user] = count\n else: \n try: commands[name] += count\n except KeyError: commands[name] = count\n\n try: channels[user] += count\n except KeyError: channels[user] = count\n\n comrank = sorted([(b, a) for (a, b) in commands.iteritems()], reverse=True)\n userank = sorted([(b, a) for (a, b) in users.iteritems()], reverse=True)\n charank = sorted([(b, a) for (a, b) in channels.iteritems()], reverse=True)\n\n # most heavily used commands\n creply = 'most used commands: '\n for count, command in comrank[:10]: \n creply += '%s (%s), ' % (command, count)\n caesar.say(creply.rstrip(', '))\n\n # most heavy users\n reply = 'power users: '\n for count, user in userank[:10]: \n reply += '%s (%s), ' % (user, count)\n caesar.say(reply.rstrip(', '))\n\n # most heavy channels\n chreply = 'power channels: '\n for count, channel in charank[:3]: \n chreply += '%s (%s), ' % (channel, count)\n caesar.say(chreply.rstrip(', '))" ]
[ "0.59879315", "0.59793746", "0.5929683", "0.5513063", "0.5448064", "0.54282194", "0.54047555", "0.5269779", "0.5266438", "0.5178241", "0.5156107", "0.5139908", "0.5117616", "0.51164675", "0.5078646", "0.50614786", "0.5020368", "0.5004413", "0.5001407", "0.4972005", "0.4944734", "0.4910308", "0.4910103", "0.48951426", "0.4874687", "0.48275676", "0.4813321", "0.48064005", "0.48051345", "0.48032242", "0.47986543", "0.47911313", "0.47624928", "0.4731794", "0.47241566", "0.4722797", "0.47224617", "0.47222456", "0.47190836", "0.47132087", "0.47093666", "0.4706905", "0.47062838", "0.47033933", "0.47007436", "0.4699971", "0.4697163", "0.4693018", "0.46920824", "0.46876243", "0.46850437", "0.46763372", "0.46756563", "0.46696624", "0.4658716", "0.46549788", "0.46518508", "0.46490717", "0.463917", "0.4626967", "0.46219748", "0.46110052", "0.46110052", "0.46106642", "0.46096477", "0.46071655", "0.4604083", "0.45882815", "0.45856723", "0.45846894", "0.45818603", "0.45803452", "0.45776942", "0.4576603", "0.45752308", "0.45730877", "0.4568339", "0.4566195", "0.45593685", "0.45552793", "0.45496267", "0.45469728", "0.45458445", "0.4540675", "0.45346835", "0.4531416", "0.45298645", "0.45259723", "0.4525173", "0.45174825", "0.45169201", "0.45106348", "0.4506213", "0.45052838", "0.4498942", "0.4498433", "0.4497871", "0.4497871", "0.44876477", "0.4485999" ]
0.5978439
2
Report usage metrics for active LPARs of CPCs in classic mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_lpar(cmd_ctx, cpc, lpar, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_lpar(cmd_ctx, cpc, lpar, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text", "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def process_cl_args():\n\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('commands', nargs='*')\n parser.add_argument('--help', '-h', action='store_true')\n parser.add_argument('--version', '-v', action='store_true')\n parser.add_argument('--debug', '-d', action='store_true')\n parser.add_argument('--logging', '-l', action='store_true')\n parser.add_argument('--no-autosize', action='store_true')\n parser.add_argument('--no-preload', action='store_true')\n args = parser.parse_args()\n\n if args.version:\n xprint(get_version_info())\n xprint(\"\")\n sys.exit()\n\n elif args.help:\n for x in helptext():\n xprint(x[2])\n sys.exit()\n\n if args.debug or os.environ.get(\"mpsytdebug\") == \"1\":\n xprint(get_version_info())\n g.debug_mode = True\n g.no_clear_screen = True\n logfile = os.path.join(tempfile.gettempdir(), \"mpsyt.log\")\n logging.basicConfig(level=logging.DEBUG, filename=logfile)\n logging.getLogger(\"pafy\").setLevel(logging.DEBUG)\n\n elif args.logging or os.environ.get(\"mpsytlog\") == \"1\":\n logfile = os.path.join(tempfile.gettempdir(), \"mpsyt.log\")\n logging.basicConfig(level=logging.DEBUG, filename=logfile)\n logging.getLogger(\"pafy\").setLevel(logging.DEBUG)\n\n if args.no_autosize:\n g.detectable_size = False\n\n g.command_line = \"playurl\" in args.commands or \"dlurl\" in args.commands\n if g.command_line:\n g.no_clear_screen = True\n\n if args.no_preload:\n g.preload_disabled = True\n\n g.argument_commands = args.commands", "async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "def main_CL():\r\n version=1.0\r\n st = time.time()\r\n parser = OptionParser(usage=usage(), version='%s'%version)\r\n parser.add_option(\"-n\", \"--days\", dest=\"days\", default=\"30\", help=\"Days ago, defaults to 30 days\")\r\n parser.add_option(\"-s\", \"--stream\", dest=\"stream\", default=\"all\", help=\"Code Stream, defaults to all\")\r\n parser.add_option(\"-u\", \"--usage\", dest=\"usage\", default=\"\", help=\"Show usage information\")\r\n parser.add_option(\"-d\", \"--debug\", dest='debug', action=\"count\", help=\"The debug level, use multiple to get more.\")\r\n (options, args) = parser.parse_args()\r\n\r\n if options.debug > 1:\r\n print ' days %s' %(options.days)\r\n print ' args: %s' %args\r\n else:\r\n options.debug = 0\r\n \r\n if options.usage:\r\n print usage()\r\n else:\r\n obj=ListCRs()\r\n obj.setUp()\r\n since = options.days \r\n \r\n #stream = str(stream).strip() \r\n obj.listCRsCL(since, options, st) \r\n \r\n print '\\nTook a total of %3.2f secs -^' %(time.time()-st)", "def help(self):\n msg = \"`%s' performs the computational aspects of genotyping-by-sequencing.\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Usage: %s [OPTIONS] ...\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Options:\\n\"\n msg += \" -h, --help\\tdisplay the help and exit\\n\"\n msg += \" -V, --version\\toutput version information and exit\\n\"\n msg += \" -v, --verbose\\tverbosity level (0/default=1/2/3)\\n\"\n msg += \" --proj1\\tname of the project used for steps 1 to 4\\n\"\n msg += \"\\t\\tmention a reference genome only if all samples belong to\\n\"\n msg += \"\\t\\t the same species, and will be mapped to the same ref genome\\n\"\n msg += \" --proj2\\tname of the project used for steps 4 to 8\\n\"\n msg += \"\\t\\tcan be the same as --proj1, or can be different\\n\"\n msg +=\"\\t\\t notably when samples come from different species\\n\"\n msg += \"\\t\\t or if one wants to align reads to different ref genomes\\n\"\n msg += \" --schdlr\\tname of the cluster scheduler (default=SGE)\\n\"\n msg += \" --queue\\tname of the cluster queue (default=normal.q)\\n\"\n msg += \" --resou\\tcluster resources (e.g. 'test' for 'qsub -l test')\\n\"\n msg += \" --rmvb\\tremove bash scripts for jobs launched in parallel\\n\"\n msg += \" --step\\tstep to perform (1/2/3/.../9)\\n\"\n msg += \"\\t\\t1: raw read quality per lane (with FastQC v >= 0.11.2)\\n\"\n msg += \"\\t\\t2: demultiplexing per lane (with demultiplex.py v >= 1.14.0)\\n\"\n msg += \"\\t\\t3: cleaning per sample (with CutAdapt v >= 1.8)\\n\"\n msg += \"\\t\\t4: alignment per sample (with BWA MEM v >= 0.7.12, Samtools v >= 1.3, Picard and R v >= 3)\\n\"\n msg += \"\\t\\t5: local realignment per sample (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t6: local realignment per genotype (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t7: variant and genotype calling per genotype (with GATK HaplotypeCaller v >= 3.5)\\n\"\n msg += \"\\t\\t8: variant and genotype calling jointly across genotypes (with GATK GenotypeGVCFs v >= 3.5)\\n\"\n msg += \"\\t\\t9: variant and genotype filtering (with GATK v >= 3.5)\\n\"\n msg += \" --samples\\tpath to the 'samples' file\\n\"\n msg += \"\\t\\tcompulsory for all steps, but can differ between steps\\n\"\n msg += \"\\t\\t e.g. if samples come from different species or are aligned\\n\"\n msg += \"\\t\\t on different ref genomes, different samples file should\\n\"\n msg += \"\\t\\t be used for steps 4-9, representing different subsets of\\n\"\n msg += \"\\t\\t the file used for steps 1-3\\n\"\n msg += \"\\t\\tthe file should be encoded in ASCII\\n\"\n msg += \"\\t\\tthe first row should be a header with column names\\n\"\n msg += \"\\t\\teach 'sample' (see details below) should have one and only one row\\n\"\n msg += \"\\t\\tany two columns should be separated with one tabulation\\n\"\n msg += \"\\t\\tcolumns can be in any order\\n\"\n msg += \"\\t\\trows starting by '#' are skipped\\n\"\n msg += \"\\t\\t12 columns are compulsory (but there can be more):\\n\"\n msg += \"\\t\\t genotype (see details below, e.g. 'Col-0', but use neither underscore '_' nor space ' ' nor dot '.', use dash '-' instead)\\n\"\n msg += \"\\t\\t ref_genome (identifier of the reference genome used for alignment, e.g. 'Atha_v2', but use neither space ' ' nor dot '.'; the full species name, e.g. 'Arabidopsis thaliana', will be present in the file given to --dict)\\n\"\n msg += \"\\t\\t library (e.g. can be the same as 'genotype')\\n\"\n msg += \"\\t\\t barcode (e.g. 'ATGG')\\n\"\n msg += \"\\t\\t seq_center (e.g. 'Broad Institute', 'GenoToul', etc)\\n\"\n msg += \"\\t\\t seq_platform (e.g. 'ILLUMINA', see SAM format specification)\\n\"\n msg += \"\\t\\t seq_platform_model (e.g. 'HiSeq 2000')\\n\"\n msg += \"\\t\\t flowcell (e.g. 'C5YMDACXX')\\n\"\n msg += \"\\t\\t lane (e.g. '3', can be '31' if a first demultiplexing was done per index)\\n\"\n msg += \"\\t\\t date (e.g. '2015-01-15', see SAM format specification)\\n\"\n msg += \"\\t\\t fastq_file_R1 (filename, one per lane, gzip-compressed)\\n\"\n msg += \"\\t\\t fastq_file_R2 (filename, one per lane, gzip-compressed)\\n\"\n msg += \" --fcln\\tidentifier of a flowcell and lane number\\n\"\n msg += \"\\t\\tformat as <flowcell>_<lane-number>, e.g. 'C5YMDACXX_1'\\n\"\n msg += \"\\t\\tif set, only the samples from this lane will be analyzed\\n\"\n msg += \" --pird\\tpath to the input reads directory\\n\"\n msg += \"\\t\\tcompulsory for steps 1 and 2\\n\"\n msg += \"\\t\\twill be added to the columns 'fastq_file_R*' from the sample file\\n\"\n msg += \"\\t\\tif not set, input read files should be in current directory\\n\"\n msg += \" --enz\\tname of the restriction enzyme\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=ApeKI\\n\"\n msg += \" --dmxmet\\tmethod used to demultiplex\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=4c (see the help of demultiplex.py to know more)\\n\"\n msg += \" --subst\\tnumber of substitutions allowed during demultiplexing\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=2\\n\"\n msg += \" --ensubst\\tenforce the nb of substitutions allowed\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=lenient/strict\\n\"\n msg += \" --adp\\tpath to the file containing the adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tsame format as FastQC: name<tab>sequence\\n\"\n msg += \"\\t\\tname: at least 'adpR1' (also 'adpR2' if paired-end)\\n\"\n msg += \"\\t\\tsequence: from 5' (left) to 3' (right)\\n\"\n msg += \" --errtol\\terror tolerance to find adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --minovl\\tminimum overlap length between reads and adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=3 (in bases)\\n\"\n msg += \" --minrl\\tminimum length to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=35 (in bases)\\n\"\n msg += \" --minq\\tminimum quality to trim a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=20 (used for both reads if paired-end)\\n\"\n msg += \" --maxNp\\tmaximum percentage of N to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --ref\\tpath to the prefix of files for the reference genome\\n\"\n msg += \"\\t\\tcompulsory for steps 4, 5, 6, 7, 8, 9\\n\"\n msg += \"\\t\\tshould correspond to the 'ref_genome' column in --samples\\n\"\n msg += \"\\t\\te.g. '/data/Atha_v2' for '/data/Atha_v2.fa', '/data/Atha_v2.bwt', etc\\n\"\n msg += \"\\t\\tthese files are produced via 'bwa index ...'\\n\"\n msg += \" --dict\\tpath to the 'dict' file (SAM header with @SQ tags)\\n\"\n msg += \"\\t\\tcompulsory for step 4\\n\"\n msg += \"\\t\\tsee 'CreateSequenceDictionary' in the Picard software\\n\"\n msg += \" --jgid\\tcohort identifier to use for joint genotyping\\n\"\n msg += \"\\t\\tcompulsory for steps 8, 9\\n\"\n msg += \"\\t\\tuseful to launch several, different cohorts in parallel\\n\"\n msg += \" --rat\\trestrict alleles to be of a particular allelicity\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdefault=ALL/BIALLELIC/MULTIALLELIC\\n\"\n msg += \"\\t\\tsee '--restrictAllelesTo' in GATK's SelectVariant\\n\"\n msg += \" --mdp\\tminimum value for DP (read depth; e.g. 10)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mgq\\tminimum value for GQ (genotype quality; e.g. 20)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mnfg\\tmaximum number of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mffg\\tmaximum fraction of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFractionFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mnnc\\tmaximum number of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \" --mfnc\\tmaximum fraction of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxNOCALLfraction' in GATK's SelectVariants\\n\"\n msg += \" --fam\\tpath to the file containing pedigree information\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdiscard variants with Mendelian violations (see Semler et al, 2012)\\n\"\n msg += \"\\t\\tshould be in the 'fam' format specified by PLINK\\n\"\n msg += \"\\t\\tvalidation strictness (GATK '-pedValidationType') is set at 'SILENT'\\n\"\n msg += \"\\t\\t allowing some samples to be absent from the pedigree\\n\"\n msg += \" --mvq\\tminimum GQ for each trio member to accept a variant as a Mendelian violation\\n\"\n msg += \"\\t\\tused in step 9 if '--fam' is specified\\n\"\n msg += \"\\t\\tdefault=0\\n\"\n msg += \" --xlssf\\tpath to the file with genotypes to exclude\\n\"\n msg += \"\\t\\tused in step 9 (can be especially useful if '--fam' is specified)\\n\"\n msg += \" --tmpd\\tpath to a temporary directory on child nodes (default=.)\\n\"\n msg += \"\\t\\te.g. it can be /tmp or /scratch\\n\"\n msg += \"\\t\\tused in step 4 for 'samtools sort'\\n\"\n msg += \"\\t\\tused in step 7 for 'GATK HaplotypeCaller'\\n\"\n msg += \" --jvmXms\\tinitial memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=512m (can also be specified as 1024k, 1g, etc)\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --jvmXmx\\tmaximum memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=4g\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --queue2\\tname of the second cluster queue (default=bigmem.q)\\n\"\n msg += \"\\t\\tused in step 4 for Picard to collect insert sizes\\n\"\n msg += \" --knowni\\tpath to a VCF file with known indels (for local realignment)\\n\"\n msg += \" --known\\tpath to a VCF file with known variants (e.g. from dbSNP)\\n\"\n msg += \" --force\\tforce to re-run step(s)\\n\"\n msg += \"\\t\\tthis removes without warning the step directory if it exists\\n\"\n msg += \"\\n\"\n msg += \"Examples:\\n\"\n msg += \" %s --step 1 --samples samples.txt\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Details:\\n\"\n msg += \"This program aims at genotyping a set of 'genotypes' using data from\\n\"\n msg += \"a restriction-assisted DNA sequencing (RAD-seq) experiment, also known\\n\"\n msg += \"as a genotyping-by-sequencing (GBS) experiment.\\n\"\n msg += \"Here, by 'genotype', we mean the entity which is the focus of the\\n\"\n msg += \"study. For instance, it can be a plant variety (or a human being), or\\n\"\n msg += \"the specific clone of a given plant variety (or a specific tumor of a\\n\"\n msg += \"given human being), etc.\\n\"\n msg += \"Importantly, note that the content of the 'genotype' column will\\n\"\n msg += \"be used to set the 'SM' (sample) tag of the 'RG' (read group) header\\n\"\n msg += \"record type of the SAM format (see http://www.htslib.org/). However,\\n\"\n msg += \"internal to this program, the term 'sample' corresponds to the unique\\n\"\n msg += \"quadruplet (genotype,flowcell,lane,barcode) for steps 1 and 2, and to\\n\"\n msg += \"the unique triplet (genotype,flowcell,lane) for the others.\\n\"\n msg += \"Jobs are executed in parallel (--schdlr). Their return status is\\n\"\n msg += \"recorded in a SQLite database which is removed at the end. If a job\\n\"\n msg += \"fails, the whole script stops with an error.\\n\"\n msg += \"\\n\"\n msg += \"Dependencies:\\n\"\n msg += \"Python >= 2.7; Biopython; pyutilstimflutre >= 0.5\\n\"\n msg += \"\\n\"\n msg += \"Report bugs to <[email protected]>.\"\n print(msg); sys.stdout.flush()", "def help(self, args):\n print('No commands available for this consumer')", "def help_opt(self):\n print(OPTIONS)", "def getHelp(self):\r\n help_str =\\\r\n \"\"\"##########################################################################################\r\n#\r\n# Required:\r\n#\r\n# --query_NAST multi-fasta file containing query sequences in alignment format\r\n#\r\n# Common opts:\r\n#\r\n# --db_NAST db in NAST format\r\n# --db_FASTA db in fasta format (megablast formatted)\r\n#\r\n#\r\n# -n number of top matching database sequences to compare to (default 15)\r\n# -R min divergence ratio default: 1.007\r\n# -P min percent identity among matching sequences (default: 90)\r\n#\r\n# ## parameters to tune ChimeraParentSelector:\r\n#\r\n# Scoring parameters:\r\n# -M match score (default: +5)\r\n# -N mismatch penalty (default: -4)\r\n# -Q min query coverage by matching database sequence (default: 70)\r\n# -T maximum traverses of the multiple alignment (default: 1)\r\n\r\n#\r\n# ## parameters to tune ChimeraPhyloChecker:\r\n#\r\n#\r\n# --windowSize default 50\r\n# --windowStep default 5\r\n# --minBS minimum bootstrap support for calling chimera (default: 90)\r\n# -S percent of SNPs to sample on each side of breakpoint for computing bootstrap support (default: 10)\r\n# --num_parents_test number of potential parents to test for chimeras (default: 3)\r\n# --MAX_CHIMERA_PARENT_PER_ID Chimera/Parent alignments with perID above this are considered non-chimeras (default 100; turned off)\r\n#\r\n# ## misc opts\r\n#\r\n# --printFinalAlignments shows alignment between query sequence and pair of candidate chimera parents\r\n# --printCSalignments print ChimeraSlayer alignments in ChimeraSlayer output\r\n# --exec_dir chdir to here before running\r\n#\r\n#########################################################################################\r\n \"\"\"\r\n return help_str", "def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()", "def cmd_help(args):", "def procs_calculate_axyzc(molecules, n_cores=-1, show_progress=True, scr=None, cmd=XTB_CMD):\n results = None\n return results", "def usage(err=''):\r\n m = '%s\\n' %err\r\n m += 'Default usage is to list Cases closed for the 30 days\\n'\r\n m += '\\n Example:\\n'\r\n m += ' closedcases -n 90 \\n' \r\n m += ' \\n'\r\n# m += ' closedcases -n 60 -s blast5 \\n'\r\n return m", "def stats(caesar, input):\n commands = {}\n users = {}\n channels = {}\n\n ignore = set(['f_note', 'startup', 'message', 'noteuri'])\n for (name, user), count in caesar.stats.items(): \n if name in ignore: continue\n if not user: continue\n\n if not user.startswith('#'): \n try: users[user] += count\n except KeyError: users[user] = count\n else: \n try: commands[name] += count\n except KeyError: commands[name] = count\n\n try: channels[user] += count\n except KeyError: channels[user] = count\n\n comrank = sorted([(b, a) for (a, b) in commands.iteritems()], reverse=True)\n userank = sorted([(b, a) for (a, b) in users.iteritems()], reverse=True)\n charank = sorted([(b, a) for (a, b) in channels.iteritems()], reverse=True)\n\n # most heavily used commands\n creply = 'most used commands: '\n for count, command in comrank[:10]: \n creply += '%s (%s), ' % (command, count)\n caesar.say(creply.rstrip(', '))\n\n # most heavy users\n reply = 'power users: '\n for count, user in userank[:10]: \n reply += '%s (%s), ' % (user, count)\n caesar.say(reply.rstrip(', '))\n\n # most heavy channels\n chreply = 'power channels: '\n for count, channel in charank[:3]: \n chreply += '%s (%s), ' % (channel, count)\n caesar.say(chreply.rstrip(', '))", "def help_help(self):\n print(\"List commands or print details about a command\")", "def command_help(args):\n\tprint_usage()\n\treturn 0", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def help_text(command):\n\n courses_list = ('ENPM611', 'ENPM613', 'ENPM631', 'ENPM687',\\\n 'ENPM691', 'ENPM693', 'ENPM694', 'ENPM696',\\\n 'ENPM809J','ENPM809R', 'ENPM809W')\n\n response = 'I have course descriptions for: '\n for course_name in courses_list:\n response = response + course_name + ' '\n\n response = response + '\\nTo get the course description, execute command: about ENPM<course_number>'\n\n return response", "def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging", "def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()", "def print_help_info(self, global_options):\r\n usage = ['',\"Type '%s help <subcommand>' for help on a specific subcommand.\" % self.prog_name,'']\r\n usage.append('Available subcommands:')\r\n commands = self.get_commands(global_options).keys()\r\n commands.sort()\r\n for cmd in commands:\r\n usage.append(' %s' % cmd)\r\n return '\\n'.join(usage)", "def usage():\n print(\"[1] Getting help from a cipher \")\n print(\" ---> ./cryptogra.py caesar -h \")\n print(\"\")", "def usage(progname):\n \n sys.stderr.write(\"Usage: \" +progname + \" [-cmnv] [-z score] \"\n \" <outdir>\\n\")\n sys.stderr.write(' -c class level not fold level evaluation\\n')\n sys.stderr.write(' -m read multiquery file on stdin\\n')\n sys.stderr.write(' -n negate scores (so that most -ve is best)\\n')\n sys.stderr.write(' -v verbose messages to stderr\\n')\n sys.stderr.write(' -z score : assign identifiers not present in the output a score of score\\n')\n sys.exit(1)", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"", "def cl_args(parser, show_beta=True, dbeta=0.01, show_from_scratch=False,\n show_multi_head=False, show_cl_scenario=False,\n show_split_head_cl3=True, dcl_scenario=1,\n show_num_tasks=False, dnum_tasks=1):\n ### Continual learning options.\n agroup = parser.add_argument_group('Continual learning options')\n\n if show_beta:\n agroup.add_argument('--beta', type=float, default=dbeta,\n help='Trade-off for the CL regularizer. ' +\n 'Default: %(default)s.')\n\n if show_from_scratch:\n agroup.add_argument('--train_from_scratch', action='store_true',\n help='If set, all networks are recreated after ' +\n 'training on each task. Hence, training starts ' +\n 'from scratch.')\n\n if show_multi_head:\n agroup.add_argument('--multi_head', action='store_true',\n help='Use a multihead setting, where each task has ' +\n 'its own output head.')\n\n if show_cl_scenario:\n agroup.add_argument('--cl_scenario', type=int, default=dcl_scenario,\n help='Continual learning scenarios according to ' +\n 'https://arxiv.org/pdf/1809.10635.pdf. ' +\n '\"1\" - Task-incremental learning; ' +\n '\"2\" - Domain-incremental learning; ' +\n '\"3\" - Class-incremental learning. ' +\n 'Default: %(default)s.',\n choices=[1, 2, 3])\n\n if show_cl_scenario and show_split_head_cl3:\n agroup.add_argument('--split_head_cl3', action='store_true',\n help='CL scenario 3 (CL3, cmp. \"cl_scenario\") ' +\n 'originally requires to compute the softmax ' +\n 'across all output neurons. Though, if a ' +\n 'task-conditioned hypernetwork is used, the ' +\n 'task identity had to be inferred a priori. ' +\n 'Hence, in CL2 and CL3 we always know the ' +\n 'task identity, which is why we can also ' +\n 'compute the softmax over single output ' +\n 'heads in CL3 using this option.')\n\n if show_num_tasks:\n agroup.add_argument('--num_tasks', type=int, metavar='N',\n default=dnum_tasks,\n help='Number of tasks. Default: %(default)s.')\n\n return agroup", "def cli(ctx):\n if ctx.obj[\"debug\"]:\n click.echo(\"Debug mode initiated\")\n set_trace()\n\n logger.debug(\"cluster subcommand called from cli\")", "def display_help(self):\n pass", "def command_help(self, *args, **kwargs):\n print(\"Commands available:\\n\")\n for name in dir(self):\n if not name.startswith(\"command_\"):\n continue\n name_clean = name[len(\"command_\"):]\n print(\"%s:\\n - %s\\n\" % (name_clean, getattr(self, name).__doc__.strip()))", "def DoHelp(options, args):\n __pychecker__ = 'unusednames=options'\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print(COMMAND_USAGE_TEXT[args[0]])\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n args[0])", "def help(cls, extra_args=None):\n if (_is_text_interface()):\n return _create_text_help_str(cls, cls._TEXT_USAGE)\n else:\n return cls._GRAPHICAL_USAGE", "def command_help(self, bot, update):\n\n messages = [\n 'Available commands:',\n '/who - Who is Myles?',\n '/where - Where is Myles?',\n '/tweet - What was the last tweet Myles sent?',\n '/photo - What was the last Instagram photo Myles took?',\n '/web - Where can I find Myles on the interwebs?',\n ]\n\n self.send_messages(bot, update, messages)", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser(version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-t\", \"--test\", dest=\"test\", type=\"string\",\n help=\"supply help\")\n\n parser.add_option(\"--method\", dest=\"method\", type=\"choice\",\n choices=(\"metrics\", \"summary\", \"module_summary\"),\n help=\"method to summarise clustering\")\n\n parser.add_option(\"--ref-gtf-files\", dest=\"ref_gtf\", type=\"string\",\n help=\"comma separated list of reference gtf files\")\n\n # add common options (-h/--help, ...) and parse command line\n (options, args) = E.Start(parser, argv=argv)\n\n if options.method == \"metrics\":\n infile = argv[-1]\n E.info(\"loading input file: %s\" % infile)\n assert infile\n\n df = pd.read_table(infile,\n sep=\"\\t\",\n header=None,\n index_col=0)\n\n df = df.ix[:, :50]\n cluster_combs = (x for x in itertools.combinations(df.columns,\n 2))\n genes = df.index\n results_dict = {}\n all_clusts = {}\n\n E.info(\"setting up cluster containers\")\n for i in df.columns:\n clusters = set(df[i].values.tolist())\n cluster_dict = {}\n for clust in clusters:\n cluster_dict[clust] = []\n for gene in genes:\n cluster_dict[df[i][gene]].append(gene)\n\n for col in clusters:\n col_set = set()\n clust_col = cluster_dict[col]\n gene_members = itertools.combinations(clust_col,\n 2)\n col_set.update(gene_members)\n cluster_dict[col] = col_set\n all_clusts[i] = cluster_dict\n E.info(\"generating all pair-wise cluster comparisons\")\n E.info(\"calculating adjusted mutual information\")\n for k in cluster_combs:\n clusters1 = all_clusts[k[0]]\n clusters2 = all_clusts[k[1]]\n metric_dict = {}\n metric_dict['AMI'] = TS.adjustedMutualInformation(clusters1,\n clusters2)\n results_dict[k] = metric_dict\n\n res_frame = pd.DataFrame(results_dict).T\n res_frame = res_frame.reset_index()\n res_frame.drop(['level_0'], inplace=True, axis=1)\n res_frame.drop(['level_1'], inplace=True, axis=1)\n\n # flatten rand indices and add to output dataframe\n rand_arrays = TS.randIndexes(df)\n flat_adj_rand = TS.unravel_arrays(rand_arrays[0])\n flat_rand = TS.unravel_arrays(rand_arrays[1])\n res_frame['Rand_Index'] = flat_rand\n res_frame['Adjusted_Rand_Index'] = flat_adj_rand\n E.info(\"aggregating results\")\n\n res_frame.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"summary\":\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n file_dict = {}\n for fle in list_of_files:\n fname = fle.split(\"/\")[-1]\n condition = fname.split(\"-\")[0]\n ref = fname.split(\"-\")[1]\n df_ = pd.read_table(fle,\n sep=\"\\t\",\n header=0,\n index_col=0)\n df_.columns = ['gene_id', 'cluster']\n clust_dict = {}\n for idx in df_.index:\n cluster = df_.loc[idx]['cluster']\n gene = df_.loc[idx]['gene_id']\n try:\n clust_dict[cluster] += 1\n except KeyError:\n clust_dict[cluster] = 1\n med_size = np.median(clust_dict.values())\n file_dict[fname] = {'condition': condition,\n 'reference': ref,\n 'median_cluster_size': med_size}\n\n outframe = pd.DataFrame(file_dict).T\n outframe.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"module_summary\":\n # get lncRNA/gene lengths from reference gtfs\n ref_gtfs = options.ref_gtf.split(\",\")\n length_dict = {}\n for ref in ref_gtfs:\n oref = IOTools.openFile(ref, \"rb\")\n git = GTF.transcript_iterator(GTF.iterator(oref))\n for gene in git:\n for trans in gene:\n length = trans.end - trans.start\n try:\n length_dict[trans.gene_id] += length\n except KeyError:\n length_dict[trans.gene_id] = length\n oref.close()\n\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n fdfs = []\n for fle in list_of_files:\n cond = fle.split(\"/\")[-1].split(\"-\")[0]\n refer = fle.split(\"/\")[-1].split(\"-\")[1]\n _df = pd.read_table(fle, sep=\"\\t\",\n header=0, index_col=0)\n _df.columns = ['gene_id', 'cluster']\n clusters = set(_df['cluster'])\n c_dict = {}\n # summarize over each cluster\n for clust in clusters:\n lengths = []\n c_df = _df[_df['cluster'] == clust]\n for lid in c_df['gene_id']:\n lengths.append(length_dict[lid])\n c_dict[clust] = {'cluster_size': len(c_df['gene_id']),\n 'mean_length': np.mean(lengths),\n 'index': (cond, refer),\n 'module': clust}\n cdf = pd.DataFrame(c_dict).T\n # use a multindex for hierarchical indexing\n midx = pd.MultiIndex.from_tuples(cdf['index'])\n cdf.index = midx\n cdf.drop(['index'], inplace=True, axis=1)\n fdfs.append(cdf)\n\n # generate a single output df\n s_df = fdfs[0]\n fdfs.pop(0)\n for df in fdfs:\n s_df = s_df.append(df)\n\n s_df.to_csv(options.stdout,\n index_label=(\"condition\", \"reference\"),\n sep=\"\\t\")\n\n # write footer and output benchmark information.\n E.Stop()", "def help(self):\r\n self._short_help(None, None, None, None)", "def HMC_Help():\n os.system(\"cls\")\n while True:\n print((\"\\n\\n\",\"Help\".center(50)))\n print_list = [\"ManagedSystem\",\"LogicalPartition\",\"VirtualIOServer\",\"Cluster\",\"Performance Capaity Monitoring\",\"Return to Main Menu\"]\n choice = int(print_obj.print_on_screen(print_list))\n directory = os.path.dirname(os.path.dirname(__file__))\n if choice == 1:\n path = directory+\"/help/ManagedSystem\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 2:\n path = directory+\"/help/LogicalPartition\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 3:\n path = directory+\"/help/VirtualIOServer\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 4:\n print((open(directory+\"/help/Cluster.txt\").read()))\n elif choice == 5:\n print((open(directory+\"/help/PerformanceCapacityMonitoring.txt\").read()))\n elif choice == 6:\n os.system(\"cls\")\n return\n else:\n print(\"\\nTry using Valid option\")\n back_to_menu()", "def _help(self):\n self.onecmd('help')", "def EnableCNML(mlu_id=0):\n global option\n option['device'] = 'CNML'\n option['device_id'] = mlu_id", "def cmd_help(ctx):\n echo(ctx.parent.get_help())", "def _usage_options_example(self):\n pass", "def usage():\n print 'LisfloodPy - Lisflood using pcraster Python framework'\n print 'Authors: ', __authors__\n print 'Version: ', __version__\n print 'Date: ', __date__\n print 'Status: ', __status__\n print \"\"\"\n Arguments list:\n settings.xml settings file\n\n -q --quiet output progression given as .\n -v --veryquiet no output progression is given\n -l --loud output progression given as time step, date and discharge\n -c --check input maps and stack maps are checked, output for each input map BUT no model run\n -h --noheader .tss file have no header and start immediately with the time series\n -t --printtime the computation time for hydrological modules are printed\n \"\"\"\n sys.exit(1)", "async def help(self, ctx):\n self.log_command_call(\"help\", ctx.message)\n await ctx.send(HELP_TEXT)\n embed_output = create_embed(description=MORE_INFO_TEXT)\n await ctx.send(embed=embed_output)", "def help():\n \n pass", "def print_usage():\n usage_msg = \"\"\"\n%s.py -H <host or group> -P <path> -M <mode>\n\nUsage:\n -h, --help\n Print detailed help screen\n -H, --hostname=STRING\n Host name or group of hosts\n -V, --version\n Print version information\n -P, --path=STRING\n Path to rancid var directory. Usually the dir contains a logs dirs and hostgroup dirs\n Example : /usr/local/rancid/var\n -M, --mod=STRING\n Plugin mod. Must be one of the following : ping, hash, config, cards, filter, qos\n *ping:\n Check if all host in the hostgroup are up from the rancid point of view.\n It uses the .up file to determine the lists of host to look for\n *hash:\n Check if the firmware hash is different from the ref one (or from the previous one)\n *config:\n Check if the configuration has changed for the host / group (notify diff)\n *cards:\n Specific to 8600 models. Check the hardware cards plugged to the host (notify diff).\n *filter:\n Specific to ES-470. Check the filters (notify diff)\n *qos:\n Specific to ES-470. Check the qos values (notify diff)\n -u, --url=URL\n URL to submit passive results to Shinken Receiver with HTTP\n Need a host and service to send result.\n -a, --passive-host=STRING\n Required if not in plugin mod to send data to Shinken ws_arbiter\n -b, --passive-service=STRING\n Required if not in plugin mod to send data to Shinken ws_arbiter\n\"\"\" % PLUGIN_NAME\n print usage_msg", "def command_help(self):\n print(\"Command \", self)\n print(\"\\t\\thelp (Get help for command)\")\n\n params = self.params.copy()\n del params[\"help\"]\n\n if len(params) == 0:\n print(\"This command has no parameters\")\n return\n\n print(\"Parameters:\")\n for info in params.values():\n print(\" %s\" % info.get_basic_info())\n description = info.get_desc()\n if description != \"\":\n print(textwrap.fill(description,\n initial_indent=\" \",\n subsequent_indent=\" \",\n width=70))", "def printHelp():\n print(\"amqWorkApiMass.py -n <msgcnt> -b <body> -m <headers> -s <path/to/bodyandheaders>\")", "def do_config():\n\n tracking = get_tracking()\n for unit in (\"ppm\", \"sec\"):\n\ttunit = unit\n\tif unit == \"sec\":\n\t tunit = \"seconds\"\n\tprint \"multigraph chrony_%s\" % unit\n\tprint \"graph_title NTP (Chrony) Statistics (%s)\" % unit\n\tprint \"graph_vlabel %s\" % unit\n\tprint \"graph_args --base 1000\"\n\tprint \"graph_category time\"\n\tprint \"graph_info NTP (Chrony) tracking statistics (the ones measured in %s)\" % tunit\n\tfor key in tracking[tunit]:\n\t item = tracking[tunit][key]\n\t print \"\"\"%s.label %s\n%s.draw LINE2\n%s.info %s\"\"\" % (key, item[\"label\"], key, key, item[\"label\"])\n\tprint\n return 0", "def cmd_help(self, commands=None, usage=False):\n if commands:\n usage = True\n commands = {self.approx.decmd(c.lower()) for c in commands}\n rejects = commands - self.approx.keys()\n for reject in rejects:\n self.put_pretty(\"No command named %r\" % reject)\n continue\n commands -= rejects\n if self.debug:\n assert not any(self.approx.encmd(r) in self.mod_commands for\n r in rejects)\n assert all(self.approx.encmd(c) in self.mod_commands for\n c in commands)\n if not commands:\n return\n requested = zip(commands, (self.approx[c] for c in commands))\n else:\n requested = self.approx.items()\n help = znc.CTable()\n help.AddColumn(\"Command\")\n help.AddColumn(\"Usage\" if usage else \"Description\")\n from itertools import zip_longest\n #\n for command, parser in requested:\n if usage:\n upre = \"usage: %s\" % command\n rest = (parser.format_usage()\n .replace(upre, \"\", 1)\n .replace(\"[-h] \", \"\", 1))\n desc = [l.strip() for l in rest.split(\"\\n\") if l.strip()]\n else:\n desc = [parser.description]\n for line, comm in zip_longest(desc, (command,), fillvalue=\"\"):\n help.AddRow()\n help.SetCell(\"Command\", comm)\n help.SetCell(\"Usage\" if usage else \"Description\", line)\n #\n s_line = znc.String()\n strung = []\n while help.GetLine(len(strung), s_line):\n strung.append(s_line.s)\n also = \" (<command> [-h] for details)\"\n strung[1] = strung[1].replace(len(also) * \" \", also, 1)\n self.put_pretty(\"\\n\".join(strung))", "def get_help(self) -> None: \n print(messages.get_help())", "def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))", "def explainerdashboard_cli(ctx):", "def parallel_cl(desc, additional_cmds=None):\n desc = desc or 'Run jobs and view their statuses.'\n parser = argparse.ArgumentParser(description=desc)\n\n parser.add_argument(\n '--pdb', action='store_true',\n help=\"If supplied, enter post-mortem debugging on error.\")\n\n subparsers = parser.add_subparsers()\n\n run_parser = subparsers.add_parser('run', help='Run a job.')\n run_parser.add_argument('path', type=str)\n run_parser.add_argument('pattern', type=str)\n run_parser.add_argument('indices', nargs='*', type=int)\n run_parser.add_argument('--idx-in-node', type=int, default=-1)\n run_parser.add_argument('--tasks-per-node', type=int, default=-1)\n run_parser.add_argument('--gpu-set', type=str, default=\"\")\n run_parser.add_argument('--ignore-gpu', action=\"store_true\")\n run_parser.add_argument(\n '--force', action='store_true',\n help=\"If supplied, run the selected operators even \"\n \"if they've already been completed.\")\n run_parser.add_argument(\n '--output-to-files', action='store_true',\n help=\"If supplied, output is stored in files rather than being printed.\")\n run_parser.add_argument(\n '-v', '--verbose', action='count', default=0, help=\"Increase verbosity.\")\n\n run_parser.set_defaults(func=run_command)\n\n view_parser = subparsers.add_parser('view', help='View status of a job.')\n view_parser.add_argument('path', type=str)\n view_parser.add_argument(\n '-v', '--verbose', action='count', default=0, help=\"Increase verbosity.\")\n\n view_parser.set_defaults(func=view_command)\n\n subparser_names = ['run', 'view']\n\n additional_cmds = additional_cmds or []\n for sub_command in additional_cmds:\n subparser_names.append(sub_command.name)\n cmd_parser = subparsers.add_parser(sub_command.name, help=sub_command.help)\n for args, kwargs in sub_command.arguments:\n cmd_parser.add_argument(*args, **kwargs)\n cmd_parser.set_defaults(func=sub_command.function)\n\n args, _ = parser.parse_known_args()\n\n try:\n func = args.func\n except AttributeError:\n raise ValueError(\n \"Missing ``command`` argument to script. Should be one of \"\n \"{}.\".format(subparser_names))\n\n pdb = args.pdb\n del args.pdb\n del args.func\n print(vars(args))\n\n if pdb:\n with ipdb_postmortem():\n func(**vars(args))\n else:\n func(**vars(args))", "def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)", "def usage(app_name):\n global version\n print '\\npython {0} -a MediaLive_ARN -n Dashboard_Name [Optional parameters]\\n'.format(app_name)\n print 'Version:', version\n print '\\nThis script creates a CloudWatch Dashboard for a MediaLive/MediaPackage workflow.'\n print \"It uses the MediaLive Channel Arn as input and determines the MediaPackage instances from the \"\n print \"MediaLive channel configuration. It then creates the CloudWatch Dashboard that contains info on the\"\n print \"MediaLive channel, the two MediaPackage channels, and all of the MediaPackage endpoints.\"\n print \"\\nRequired parameters:\"\n print \"-a, --arn: MediaLive Channel ARN\"\n print \"-n, --name: Name for the CloudWatch Dashboard. \"\n print \"\"\n print \"Optional parameters\"\n print \"-l, --list: Filename of a file that contains a list of MediaLive Channel ARNs, 1 ARN per line. \"\n print \" All MediaLive channels and their corresponding MediaPackage channels will be included in \"\n print \" the CloudWatch Dashboard.\"\n print \" Note: This parameter is ignored if a channel ARN is provided via the '-a/--arn' option\"\n print \" Note: All ARNs in the list must be for channels in the same region. All ARNs not in the same\"\n print \" region as the first ARN in the list will be ignored.\"\n print '-h, --help: Print this help and exit.'\n print \"\"\n print 'Examples:'\n print \"\"\n print 'Using MediaLive ARN arn:aws:medialive:us-west-2:0123456789:channel:123456 and create a CloudWatch ' \\\n 'Dashboard called \"My TV Dashboard\"'\n print 'python {0} -a arn:aws:medialive:us-west-2:0123456789:channel:123456 ' \\\n '-n \"My TV Dashboard\" '.format(app_name)\n print \"\"\n print 'Using the MediaLive Channel ARN list defined in the text file \"My EML arns.txt\" create a CloudWatch' \\\n 'Dashboard called \"Primary Bouquet\".'\n print 'python {0} -l \"My EML arns.txt\" -n \"Primary Bouquet\"\\n'.format(app_name)", "def usage():", "def usage():", "def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc", "def test_cli_help(self):\n output = self.update_command('-h')", "def usage(self):\n self._usage1()\n print 'folder COOL_channel COOL_tag ROOT_file'\n self._usage2()", "def main(argv):\n version = \"0.1.2\"\n interval = 1\n max_run_time = 0\n finished = 0\n first_time = 1\n output_file = 0\n output_file_enabled = 0\n output_path = 0\n header_row = 1\n\n #*** Get the hostname for use in filenames etc:\n hostname = socket.gethostname()\n\n #*** Start by parsing command line parameters:\n try:\n opts, args = getopt.getopt(argv, \"hu:m:ni:w:Wb:jv\",\n [\"help\",\n \"url=\",\n \"max-run-time=\",\n \"no-keepalive\",\n \"interval=\",\n \"output-file=\",\n \"output-path=\",\n \"no-header-row\",\n \"version\"])\n except getopt.GetoptError as err:\n print \"mosp: Error with options:\", err\n print_help()\n sys.exit(2)\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n print_help()\n sys.exit()\n elif opt in (\"-v\", \"--version\"):\n print 'mosp.py version', version\n sys.exit()\n elif opt in (\"-m\", \"--max-run-time\"):\n max_run_time = float(arg)\n elif opt in (\"-i\", \"--interval\"):\n interval = float(arg)\n elif opt in (\"-w\", \"--output-file\"):\n output_file = arg\n output_file_enabled = 1\n elif opt == \"-W\":\n output_file = \"mosp-\" + hostname + \"-\" + \\\n time.strftime(\"%Y%m%d-%H%M%S.csv\")\n output_file_enabled = 1\n elif opt in (\"-b\", \"--output-path\"):\n output_path = arg\n elif opt in (\"-j\", \"--no-header-row\"):\n header_row = 0\n\n print \"\\nMeasure Operating System Performance (mosp) version\", \\\n version\n\n #*** Display output filename:\n if output_file_enabled:\n if output_path:\n output_file = os.path.join(output_path, output_file)\n print \"Results filename is\", output_file\n else:\n print \"Not outputing results to file, as option not selected\"\n\n if not header_row:\n print \"Not writing a header row to CSV\"\n\n #*** Use this if max_run_time is set:\n initial_time = time.time()\n\n #*** Instantiate classes:\n cpus = CPUs()\n swap = Swap()\n nics = NICs()\n\n #*** Start the loop:\n while not finished:\n timenow = datetime.datetime.now()\n timestamp = timenow.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]\n start_time = time.time()\n\n #*** Update CPU measurements:\n cpus.update()\n\n #*** Update swap measurements:\n swap.update()\n\n #*** Update network measurements:\n nics.update()\n\n #*** Put the stats into a nice string for printing and\n #*** writing to file:\n result_csv = str(timestamp) + \",\" \\\n + cpus.csv() \\\n + swap.csv() \\\n + nics.csv() \\\n + \"\\n\"\n result_kvp = str(timestamp) + \" \" \\\n + cpus.kvp() \\\n + swap.kvp() \\\n + nics.kvp()\n print result_kvp\n if output_file_enabled:\n #*** Header row in CSV:\n if first_time and header_row:\n #*** Write a header row to CSV:\n header_csv = \"time,\" + cpus.csv_header(hostname) + \\\n swap.csv_header(hostname) + \\\n nics.csv_header(hostname) + \\\n \"\\n\"\n first_time = 0\n with open(output_file, 'a') as the_file:\n the_file.write(header_csv)\n\n #*** Write a data row to CSV:\n with open(output_file, 'a') as the_file:\n the_file.write(result_csv)\n\n if max_run_time:\n if (start_time - initial_time) > max_run_time:\n break\n\n #*** Sleep for interval seconds:\n time.sleep(interval)", "def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)", "def help(self, *args):\n for _, v in self.useage.items():\n print v.__doc__", "def help():", "def metrics_flash(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_flash(cmd_ctx, cpc, options))", "def help_option(args, run):\n pass", "def help(bot, sender, sendmsg, label, args):\n\n clist = commands.commands\n csort = sorted(clist.values(), key=lambda c: c.__name__.lower())\n\n if len(args) > 0:\n page = int(args[0]) - 1\n else:\n page = 0\n\n pages = len(clist) // 10 + 1\n\n sendmsg(\"-- Help (Page {} of {}) --\".format(page + 1, pages))\n for i in range(10):\n if i >= len(csort):\n break\n\n command = csort[i + (page * 10)]\n sendmsg(\"{}: {}\".format(command.__name__, command.__doc__))", "def help_command(update: Update, _: CallbackContext) -> None:\n update.message.reply_text(\"These are the list of commands supported. \\n\\n /deadlines. \\n\\n \"\n \"Hey! I am still being enhanced, more features to come...!\")", "def main():\n parser = argparse.ArgumentParser(description='investigate code health and random statistics')\n sub_parsers = parser.add_subparsers(dest='command_name', title='Commands', help='', metavar='<command>')\n\n sub = sub_parsers.add_parser('line-count', help='list line counts')\n sub.add_argument('files', nargs='+', help='files or folders to look in')\n sub.add_argument('--each', type=int, default=1)\n sub.add_argument('--show', action='store_true')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_line_count)\n\n sub = sub_parsers.add_parser('include-list', help='list headers from files')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--print', dest='print_files', action='store_true')\n sub.add_argument('--print-stats', dest='print_stats', action='store_true')\n sub.add_argument('--print-max', dest='print_max', action='store_true')\n sub.add_argument('--no-list', dest='print_list', action='store_false')\n sub.add_argument('--count', default=2, type=int, help=\"only print includes that are more or equal to <count>\")\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.set_defaults(func=handle_list)\n\n sub = sub_parsers.add_parser('include-gv', help='generate a graphviz of the includes')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.add_argument('--group', action='store_true', help=\"group output\")\n sub.add_argument('--cluster', action='store_true', help=\"group output into clusters\")\n sub.set_defaults(func=handle_gv)\n\n sub = sub_parsers.add_parser('list-indents', help='list the files with the maximum indents')\n sub.add_argument('files', nargs='+')\n sub.add_argument('--each', type=int, default=1, help='group counts')\n sub.add_argument('--show', action='store_true', help='include files in list')\n sub.add_argument('--hist', action='store_true', help='show simple histogram')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_list_indents)\n\n sub = sub_parsers.add_parser('missing-pragma-once', help='find headers with missing include guards')\n sub.add_argument('files', nargs='+')\n sub.set_defaults(func=handle_missing_include_guards)\n\n sub = sub_parsers.add_parser('missing-in-cmake', help='find files that existis on disk but missing in cmake')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_missing_in_cmake)\n\n sub = sub_parsers.add_parser('list-no-project-folders', help='find projects that have not set the solution folder')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_list_no_project_folder)\n\n sub = sub_parsers.add_parser('check-files', help=\"find files that doesn't match the name style\")\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_check_files)\n\n args = parser.parse_args()\n if args.command_name is not None:\n args.func(args)\n else:\n parser.print_help()", "def help():\n print(UI.HELP)", "def cli_help(self):\n self._generate_cli_version()\n self._generate_cli_help()\n sys.exit(0)", "def _show_help(self):\r\n info = {\"/contexts/<context>/[orgs/[<org_name>]]/[spaces/[<space_name>]]\": \"reports\",\r\n \"/contexts/<context>/orgs_metadata/[<org_name>]\": \"metadata\",\r\n \"/contexts/<context>/orgs/<org_name>/director\": \"org/director mapping\",\r\n \"/reader_status\": \"status of Bitbucket reader cache\"}\r\n if self._cache_refresh:\r\n info['/refresh'] = \"force cache refresh from BitBucket\"\r\n return info", "def cmd_HELP(self, line):\r\n configs = [UserOptions(self.terminal), ContainerOptions(self.terminal),\r\n NodeOptions(self.terminal), ParameterOptions(self.terminal),\r\n InterfaceOptions(self.terminal),\r\n ConnectionOptions(self.terminal),\r\n RobotOptions(self.terminal), MachineOptions(self.terminal)]\r\n\r\n for config in configs:\r\n self.terminal.nextLine()\r\n config.opt_help()", "def help_usage(self):\n\t\thelptext = \"\"\"\nUSAGE\n==========\n1.) connect to server:\n\tWhen starting p22p, you dont automatically connect to a server.\n\tTo do this, use the 'connect'-command.\n\tWithout additional arguements, p22p will connect to {default}.\n\tIf you want to connect to a other server, use the following syntax:\n\t\tconnect PROTO://SERVER:PORT\n\twhere PROTO is either 'ws' or 'wss'. 'wss' is a SSL/TLS connection, ws a insecure connection.\n\tNote that the communication between to clients is always CBC-encrypted (additionaly to other encryption methods.)\n\tThe CBC-password will never be sent to the server.\n\tThe Server only receives a hash of the password.\n\n2.) join or create a Group\n\tp22p is using Group as Network-Namespaces.\n\tEach Groupmember has a unique CID. However, the CID is only unique in the Group and only unique during that clients connection.\n\tTo create a new Group, use the 'create'-command:\n\t\tcreate NAME PASSWORD [KEYFILE]\n\tThe server only receives a hash of the PASSWORD.\n\tNote that groupnames starting with a \"#\" are reserved (You cant create them except if you have the key).\n\tIf you want to create a reserved group, pass the path to the keyfile.\n\tWhen creating a Group, you will automatically join that Group.\n\t\n\tTo join a Group, use the 'join'-command:\n\t\tjoin NAME PSWD\n\tThe Server only reveives a hash of the Password.\n\n3.) relay a Port\n\tTo relay a port from your Device to a target device, use the 'relay'-command:\n\t\trelay PEER [LOCAL] REMOTE\n\tIf LOCAL is 0 or ommited, a free port is choosen.\n\tThis Command will create a socket listening to Port LOCAL on your DEVICE.\n\tOnce a connection is made to that Port, P22P will send a message to PEER, telling him to create a connection to Port REMOTE.\n\tAll data sent trough this connection will be encrypted with the Group's Password.\n\tThe Server only knows the hash of the password, meaning only Groupmembers know how to decrypt the Message.\n\tThe Server knows who should receive this message and sends it to only that Client.\n\n4.) Leaving a Group\n\tOnce you are finished, you can leave the Group.\n\tThis will close all connections to peers and free your CID.\n\tAll Groupmembers will receive a message that you left the Group.\n\tto leave a Group, use thr 'leave'-command.\n\n5.) Disconnecting\n\tIf you want to disconnect from the Server, use the 'disconnect'-command.\n\tThis will close all connections and also auto-leaves the Group (see 4.)\n\n6.) Exiting\n\tTo close this script, use the 'exit'-command.\n\tIf required, the 'disconnect'-command is invoked.\n\n7.) Additional commands\n\tTo get a list of all aviable commands, use the 'help'-command.\n\tTo get a description about a command, use the gollowing syntax:\n\t\thelp COMMAND\n\tHere are some useful commands:\n\t\tping PEER: pings a peer (not the Server.)\n\t\tlist: shows a list of all connections and relayed ports. also shows some information.\n\t\tcid: shows your current CID.\n\"\"\".format(default=DEFAULT_SERVER)\n\t\tself.stdout.write(helptext)", "def help_command(server, output, conf):\n server.tell(output.name, 'Available commands:')\n for key in COMMANDS.keys():\n cmd_func = COMMANDS[key]\n if cmd_func.__doc__:\n server.tell(output.name, '%s: %s' % (key[1:], cmd_func.__doc__))\n else:\n server.tell(output.name, key[1:])\n return", "async def help(self, ctx, *cog):\n try:\n if not cog:\n halp = discord.Embed(title='Useless\\' Commands',\n description='Use `!help *category*` to find out more about the commands in them!')\n cogs_desc = ''\n for x in self.bot.cogs:\n cogs_desc = f'{x}'\n cmds = ''\n for cmd in self.bot.get_cog(x).get_commands():\n if not cmd.hidden:\n cmds += f'`{cmd.name}`, '\n if cmds != '':\n halp.add_field(name= cogs_desc,\n value=f'{cmds[0:-2]}',\n inline=False)\n cmds_desc = ''\n for y in self.bot.walk_commands():\n if not y.cog_name and not y.hidden:\n cmds_desc += ('`{}` - {}'.format(y.name, y.help) + '\\n')\n if cmds_desc != '':\n halp.add_field(name='Uncatergorized Commands',\n value=cmds_desc[0:len(cmds_desc) - 1],\n inline=False)\n await ctx.send(embed=halp)\n else:\n if len(cog) > 1:\n halp = discord.Embed(title='Error!',\n description='I can only help with 1 category!',\n color=discord.Color.red())\n await ctx.send(embed=halp)\n else:\n found = False\n for x in self.bot.cogs:\n for y in cog:\n if x == y:\n halp = discord.Embed(\n title=cog[0] + ' Command Listing',\n description=self.bot.cogs[cog[0]].__doc__)\n for c in self.bot.get_cog(y).get_commands():\n if not c.hidden:\n halp.add_field(name=c.name,\n value=c.help,\n inline=False)\n found = True\n if not found:\n halp = discord.Embed(title='Error!',\n description='How do you even use \"' +\n cog[0] + '\"?',\n color=discord.Color.red())\n await ctx.send('', embed=halp)\n\n except:\n print('Pass')\n pass", "def help(self):\n pass", "def help(self):\n pass", "def printCLIHelp():\n \n cmd = os.path.basename(sys.argv[0])\n print \"\"\"\n - quickCurve - \n\nPerform a liklihood analysis on Fermi LAT data. You can use the\ncommand line functions listed below or run this module from within\npython. For full documentation on this module execute 'pydoc\nquickCurve'.\n \n%s (-h|--help) ... This help text.\n \n%s (-i|--initialize) ... Generate a default config file called\n example.cfg. Edit this file and rename it <basename>.cfg for use\n in the quickLike module.\n\n%s (-a|--analyze) (-n |--basename=)<basename> ... Perform an analysis\n on <basename>. <basename> is the prefix used for this analysis.\n You must already have a configuration file if using the command\n line interface.\n\n\"\"\" %(cmd,cmd,cmd)", "def cli(argv):\r\n argv.append(\"--exhaust-materials\")\r\n cltestbench.cli(argv)", "def help(self):\n\t\treturn", "def measure(self,command_exe, command_args, measure_out):\n pass", "def about( cls, ):\n url = r\"http://www.opencircuits.com/Python_Smart_Terminal\"\n __, mem_msg = cls.show_process_memory( )\n msg = ( f\"{cls.controller.app_name} version:{cls.controller.version} \\nmode: {cls.parameters.mode}\"\n f\"\\n by Russ Hensel\"\n f\"\\nMemory in use {mem_msg} \\nCheck <Help> or \\n{url} \\nfor more info.\" )\n messagebox.showinfo( \"About\", msg )", "def metrics_roce(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_roce(cmd_ctx, cpc, options))", "def run(self, line):\n LOGGER.info(\"Scalable PMEM: {}\".format(self.name))\n try:\n (options, _) = self._parse_arglist(line)\n except:\n if (\"-h\" in line) or (\"--help\" in line):\n return ReturnCodes.SUCCESS\n else:\n raise InvalidCommandLineErrorOPTS(\"\")\n\n if len(args):\n InvalidCommandLineError(\"This command takes no parameters.\")\n\n LOGGER.info(\"Options: {}\".format(options))\n\n if not self._chif_lib:\n self._helpers.failNoChifLibrary()\n\n enable = True\n if options.enableFeature is False:\n enable = False\n\n self.enableOrDisableFeature(enable)\n\n #Return code\n return ReturnCodes.SUCCESS", "def help(update, context):\n msg = \"\"\n msg += \"\\n/covid 7-Day-Incident per Million\"\n msg += \"\\n/daylio What did I do a year ago today?\"\n msg += \"\\n/f1last Results of the last race\"\n msg += \"\\n/f1stand Driver standings\"\n msg += \"\\n/f1next Time and place of the next race\"\n msg += \"\\n/fuel prices and consump. (args: Xeur Ykm)\"\n msg += \"\\n/ip Outside ip address\"\n msg += \"\\n/rate Exchange rates (args: Xeur/Yhuf)\"\n msg += \"\\n/rss check rss feeds for new content\"\n msg += \"\\n/sun Time of sunrise and sunset\"\n msg += \"\\n/xkcd Sends last comic image and alt\"\n msg.rstrip()\n update.message.reply_text(msg)", "def cli():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"tic\", help=\"TIC number\")\n parser.add_argument(\"-L\", \"--LIST\", help=\"Only fit the LC\", action=\"store_true\")\n parser.add_argument(\"-S\", \"--SAVEGAIA\", help=\"Save Gaia sources\", action=\"store_true\")\n parser.add_argument(\"-C\", \"--COORD\", help=\"Use coordinates\", default=False)\n parser.add_argument(\"-n\", \"--name\", help=\"Target name to be plotted in title\", default=False)\n parser.add_argument(\"-D2\", \"--DR2\", help=\"Use Gaia DR2 catalog instead of DR3\", action=\"store_true\")\n parser.add_argument(\"-PM\", \"--PM\", help=\"Add proper motion direction arrows in the plot\", action=\"store_true\")\n parser.add_argument(\"--maglim\", default=5., help=\"Maximum magnitude contrast respect to TIC\")\n parser.add_argument(\"--sector\", default=None, help=\"Select Sector if more than one\")\n parser.add_argument(\"--gid\", default=None, help=\"Gaia ID\")\n parser.add_argument(\"--gmag\", default=None, help=\"Gaia mag\")\n parser.add_argument(\"--sradius\", default=10., type=float, help=\"Search radius (in arcsec) for the get_gaia_data function\")\n parser.add_argument(\"--legend\", default='best', help=\"Legend location\")\n args = parser.parse_args()\n return args", "def usage(msg):\n ap.print_usage()\n print \"-\"*40\n print msg\n exit(1)", "def parsing_arguments(args=None):\n description = ''\n parser = argparse.ArgumentParser(\n prog='hatchet plot-cn',\n description=description,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n parser.add_argument('INPUT', help='One or more space-separated files in CN_BBC format')\n parser.add_argument(\n '-n',\n '--patientnames',\n required=False,\n default=config.plot_cn.patientnames,\n type=str,\n help='One or more space-separated patient names (default: inferred from filenames)',\n )\n parser.add_argument(\n '-u',\n '--minu',\n required=False,\n default=config.plot_cn.minu,\n type=float,\n help='Minimum proportion of a CNA to be considered subclonal (default: 0.2)\"',\n )\n parser.add_argument(\n '-x',\n '--rundir',\n required=False,\n default=config.plot_cn.rundir,\n type=str,\n help='Running directory (default: current directory)',\n )\n parser.add_argument(\n '-b',\n '--baseCN',\n required=False,\n default=config.plot_cn.basecn,\n type=int,\n help='Base copy number (default: inferred from tumor ploidy)',\n )\n parser.add_argument(\n '-sC',\n '--figsizeclones',\n required=False,\n default=config.plot_cn.figsizeclones,\n type=str,\n help='Size of clone plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-sP',\n '--figsizecn',\n required=False,\n default=config.plot_cn.figsizecn,\n type=str,\n help='Size of CN plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-sG',\n '--figsizegrid',\n required=False,\n default=config.plot_cn.figsizegrid,\n type=str,\n help='Size of grid plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-rC',\n '--resolutionclones',\n required=False,\n default=config.plot_cn.resolutionclones,\n type=int,\n help='Number of bins to merge together for plotting clone profiles (default: 100)\"',\n )\n parser.add_argument(\n '-rP',\n '--resolutioncn',\n required=False,\n default=config.plot_cn.resolutioncn,\n type=int,\n help='Number of bins to merge together for plotting proportions (default: 500)\"',\n )\n parser.add_argument(\n '-rG',\n '--resolutiongrid',\n required=False,\n default=config.plot_cn.resolutiongrid,\n type=int,\n help='Number of bins to merge together in grids (default: 100)\"',\n )\n parser.add_argument(\n '-e',\n '--threshold',\n required=False,\n default=config.plot_cn.threshold,\n type=float,\n help='Threshold used to classify a tumor into either diploid or tetraploid (default: 3.0)\"',\n )\n parser.add_argument(\n '--ymax',\n required=False,\n default=config.plot_cn.ymax,\n type=int,\n help='Maximum values in y-axis (default: automatically inferred)\"',\n )\n parser.add_argument(\n '--ymin',\n required=False,\n default=config.plot_cn.ymin,\n type=int,\n help='Minimum values in y-axis (default: automatically inferred)\"',\n )\n parser.add_argument(\n '--clonepalette',\n required=False,\n default=config.plot_cn.clonepalette,\n type=str,\n help='Palette for coloring the clones among Set1, Set2, Set3, Paired (default: Set1)\"',\n )\n parser.add_argument(\n '--linkage',\n required=False,\n default=config.plot_cn.linkage,\n type=str,\n help=(\n 'Linkage method used for clustering (default: single, available (single, complete, average, weighted, '\n 'centroid, median, ward) from SciPy)\"'\n ),\n )\n parser.add_argument('-V', '--version', action='version', version=f'%(prog)s {__version__}')\n args = parser.parse_args(args)\n\n if len(args.INPUT.split()) == 0:\n raise ValueError(error('Please specify at least one sample as input!'))\n if args.patientnames is None:\n patientnames = {fil: os.path.basename(fil) for fil in args.INPUT.split()}\n else:\n patientnames = {f: n for f, n in zip(args.INPUT.split(), args.patientnames.split())}\n if len(args.INPUT.split()) != len(set(patientnames.values())):\n raise ValueError(error('Multiple patients have the same name but they should unique!'))\n if args.figsizeclones is not None:\n figsizeclones = to_tuple(args.figsizeclones, error_message='Wrong format of figsizeclones!')\n if args.figsizecn is not None:\n figsizecn = to_tuple(args.figsizecn, error_message='Wrong format of figsizecn!')\n if args.figsizegrid is not None:\n figsizegrid = to_tuple(args.figsizegrid, error_message='Wrong format of figsizegrid!')\n\n if not os.path.isdir(args.rundir):\n raise ValueError(error('Running directory does not exist!'))\n if not 0.0 <= args.minu <= 1.0:\n raise ValueError(error('The minimum proportion for subclonal CNAs must be in [0, 1]!'))\n if args.baseCN is not None and args.baseCN < 2:\n raise ValueError(error('Base CN must be greater or equal than 2!'))\n if args.resolutionclones is not None and args.resolutionclones < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.resolutioncn is not None and args.resolutioncn < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.resolutiongrid is not None and args.resolutiongrid < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.threshold < 0:\n raise ValueError(error('Threshold must be positive!'))\n if args.linkage not in {\n 'single',\n 'complete',\n 'average',\n 'weighted',\n 'centroid',\n 'median',\n 'ward',\n }:\n raise ValueError(error('Unknown linkage method!'))\n\n if args.clonepalette == 'Set1':\n pal = plt.cm.Set1\n elif args.clonepalette == 'Set2':\n pal = plt.cm.Set2\n elif args.clonepalette == 'Set3':\n pal = plt.cm.Set3\n elif args.clonepalette == 'Paired':\n pal = plt.cm.Paired\n else:\n raise ValueError(error('Unknown clone palette!'))\n\n return {\n 'input': args.INPUT.split(),\n 'names': patientnames,\n 'rundir': args.rundir,\n 'minu': args.minu,\n 'base': args.baseCN,\n 'clonefigsize': figsizeclones,\n 'propsfigsize': figsizecn,\n 'clusterfigsize': figsizegrid,\n 'profileres': args.resolutionclones,\n 'cnres': args.resolutioncn,\n 'clusterres': args.resolutiongrid,\n 'threshold': args.threshold,\n 'linkage': args.linkage,\n 'ymax': args.ymax,\n 'ymin': args.ymin,\n 'clonepalette': pal,\n }", "def help(self, msg=None):\n\n # Print the message if given.\n if not msg == None:\n print str(msg) + \"\\n\"\n\n # Display the list of commands, in the alphabetical order.\n print \"Use one of the following commands:\"\n for action in sorted(self.actions.keys()):\n info = self.actions[action]\n joined_oblig = ' '.join(info['required'])\n if len(info['additional']) > 0:\n add = [\"<%s>\" % x for x in info['additional']]\n joined_add = '[' + ' '.join(add) + ']'\n else:\n joined_add = ''\n print \"\\t* %s %s %s\" % (action, joined_oblig, joined_add)", "def main():\n\n parser = argparse.ArgumentParser(description=main.__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n epilog=\"Homepage: https://github.com/kbat/mc-tools\")\n parser.add_argument('com', type=str, help='plot requests file name', nargs='?', default=\"/tmp/foo.c\")\n parser.add_argument('comout', type=str, help='COMOUT file name', nargs='?', default=\"zoom\")\n\n args = parser.parse_args()\n\n cmd = {} # dictionary of commands\n\n bas = False\n plane = False\n \n with open(args.com) as f:\n for line in f.readlines():\n words = line.strip().split()\n if len(words) is 0:\n continue\n\n for i,w in enumerate(words):\n if re.search(\"^bas\", w):\n cmd['bas'] = list(map(float, words[i+1:i+7]))\n if plane is False: bas = True # basis was before plane cuts\n elif re.search(\"^or\", w):\n cmd['or'] = list(map(float, words[i+1:i+4]))\n elif re.search(\"^ex\", w):\n try: # both x and y scales are given\n cmd['ex'] = list(map(float, words[i+1:i+3]))\n continue\n except ValueError: # just 1 scale is given\n cmd['ex'] = list(map(float, words[i+1:i+2]))\n elif re.search(\"^lab\", w):\n cmd['label'] = list(map(int, map(float, words[i+1:i+3]))) #+ [words[i+3]]\n elif re.search(\"^p[xyz]\", w):\n cmd[w] = [float(words[i+1])]\n if bas is False: plane = True # plane cuts were before basis\n elif re.search(\"^legend\", w):\n cmd[w] = [words[i+1]]\n elif w == \"scale\":\n print(w)\n if int(words[i+1]): # no need to put 'scale 0'\n cmd[w] = [words[i+1]]\n elif w in (\"mesh\"):\n if int(words[i+1])==1: # no need to put 'mesh 1'\n cmd[w] = [words[i+1]]\n\n print(bas, plane)\n\n if plane: # bas was first\n keys = ('bas', 'or', 'ex', 'px', 'py', 'pz', 'label', 'mesh', 'legend', 'scale')\n elif bas:\n keys = ('or', 'ex', 'px', 'py', 'pz', 'bas', 'label', 'mesh', 'legend', 'scale')\n else:\n keys = {'or', 'ex', 'label', 'mesh', 'legend', 'scale'}\n \n with open(args.comout, 'w') as f:\n for key in keys:\n if key in cmd:\n # newline required by mcplot:\n if key in ('mesh', 'legend', 'scale', 'label'):\n f.write(\"\\n\")\n f.write(\"%s %s \" % (key,\" \".join(str(e) for e in cmd[key]),))\n f.write(\"\\n\")", "def help(self):\n res = \"\"", "def print_usage_command(self):\n print self.get_usage_command()", "def print_usage_command(self):\n print self.get_usage_command()", "def main(args = sys.argv):\n\n parser = parser_setup()\n poptions = parser.parse_args()\n\n if poptions.quiet:\n logging.basicConfig(level=logging.WARNING, format=log_format)\n elif poptions.debug:\n logging.basicConfig(level=logging.DEBUG, format=log_format)\n else:\n # Set up the default logging levels\n logging.basicConfig(level=logging.INFO, format=log_format)\n # Make this a little less noisy by default\n requests_log = logging.getLogger(\"requests.packages.urllib3.connectionpool\")\n requests_log.setLevel(logging.WARN)\n\n if not poptions.base_api_url and \"LIMS_API_URL\" in os.environ:\n api_url = os.environ[\"LIMS_API_URL\"]\n log.debug(\"Using LIMS API endpoint: %s from environment\" % api_url)\n elif poptions.base_api_url:\n api_url = poptions.base_api_url\n log.debug(\"Using LIMS API endpoint: %s from options\" % api_url)\n else:\n sys.stderr.write(\"Could not find LIMS API URL.\\n\")\n sys.exit(1)\n\n\n if not poptions.token and \"LIMS_API_TOKEN\" in os.environ:\n token = os.environ[\"LIMS_API_TOKEN\"]\n elif poptions.token:\n token = poptions.token\n else:\n sys.stderr.write(\"Could not find LIMS API TOKEN.\\n\")\n sys.exit(1)\n\n monitor = ClusterMonitor(api_url, token, cluster_type=poptions.cluster)\n\n monitor.run()", "def do_help(self, line):\n Cmd.do_help(self, line)", "def print_performance_info(self):\n pass", "def help_analyze(self):\n print(ANALYZE)", "def printOptions(opts,subject_ids,session_ids,task_list, run_list, acq, rec):\n uname = os.popen('uname -s -n -r').read()\n print \"\\n\"\n print \"* Pipeline started at \"+time.strftime(\"%c\")+\"on \"+uname\n print \"* Command line is : \\n \"+str(sys.argv)+\"\\n\"\n print \"* The source directory is : \"+opts.sourceDir\n print \"* The target directory is : \"+opts.targetDir+\"\\n\"\n print \"* Data-set Subject ID(s) is/are : \"+str(', '.join(subject_ids))+\"\\n\"\n # print \"* PET conditions : \"+ ','.join(opts.condiList)+\"\\n\"\n print \"* Sessions : \", session_ids, \"\\n\"\n print \"* Tasks : \" , task_list , \"\\n\"\n print \"* Runs : \" , run_list , \"\\n\"\n print \"* Acquisition : \" , acq , \"\\n\"\n print \"* Reconstruction : \" , rec , \"\\n\"" ]
[ "0.56626374", "0.5658966", "0.5620566", "0.55107105", "0.5491458", "0.5334808", "0.53031045", "0.52814096", "0.5276708", "0.52741003", "0.52695084", "0.5258748", "0.5241154", "0.5233588", "0.51980907", "0.5171323", "0.5130707", "0.5097048", "0.5062551", "0.504902", "0.50489867", "0.50389296", "0.5026058", "0.5023283", "0.50151366", "0.50089407", "0.4967782", "0.4966398", "0.49616927", "0.49592665", "0.49482015", "0.49457487", "0.49408635", "0.49200994", "0.49174708", "0.49139494", "0.49062967", "0.4885377", "0.48789614", "0.48788804", "0.48737803", "0.4869046", "0.48665345", "0.48639932", "0.48572642", "0.48491445", "0.48463985", "0.48456246", "0.48423645", "0.48383087", "0.4832461", "0.48245895", "0.4821778", "0.4821547", "0.48197225", "0.480741", "0.48033103", "0.48033103", "0.48004842", "0.4797685", "0.4796005", "0.47922143", "0.479138", "0.47881064", "0.4787109", "0.47729892", "0.47701097", "0.47668794", "0.47649616", "0.47566026", "0.47535592", "0.47503737", "0.47494498", "0.47455868", "0.4744282", "0.4738984", "0.47380048", "0.47347307", "0.47347307", "0.47342587", "0.47325122", "0.4730875", "0.47304004", "0.472999", "0.4724864", "0.4717411", "0.4715871", "0.47149444", "0.47125491", "0.47018862", "0.4696979", "0.46961728", "0.4694499", "0.46934602", "0.46934602", "0.4682476", "0.46796164", "0.46793437", "0.467515", "0.4672467" ]
0.54307204
5
Report usage metrics for active adapters of CPCs in DPM mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_adapter(cmd_ctx, cpc, adapter, **options): cmd_ctx.execute_cmd( lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))", "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))", "def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()", "def show(self):\n prev_queries = 0\n prev_cpu_sys = 0\n prev_cpu_user = 0\n \n lines = {\n \"Uptime (seconds)\": \"--\",\n \"Number of queries\": \"--\",\n \"Query per second\": \"--\",\n \"ACL drops\": \"--\",\n \"Dynamic drops\": \"--\",\n \"Rule drops\": \"--\",\n \"CPU Usage (%s)\": \"--\",\n \"Cache hitrate\": \"--\"\n }\n\n while True:\n try:\n # get stats from dnsdist\n stats = Statistics(console=self.console)\n global_stats = stats[\"global\"]\n \n qps = int(global_stats[\"queries\"]) - prev_queries\n prev_queries = int(global_stats[\"queries\"])\n cpu = (int(global_stats[\"cpu-sys-msec\"])+int(global_stats[\"cpu-user-msec\"]) - prev_cpu_sys - prev_cpu_user) / 10\n prev_cpu_sys = int(global_stats[\"cpu-sys-msec\"])\n prev_cpu_user = int(global_stats[\"cpu-user-msec\"])\n \n lines[\"Uptime (seconds)\"] = global_stats[\"uptime\"]\n lines[\"Number of queries\"] = global_stats[\"queries\"]\n lines[\"Query per second\"] = qps\n lines[\"CPU Usage (%s)\"] = cpu\n lines[\"ACL drops\"] = global_stats[\"acl-drops\"]\n lines[\"Rule drops\"] = global_stats[\"rule-drop\"]\n lines[\"Cache hitrate\"] = global_stats[\"cache-hits\"]\n lines[\"Dynamic drops\"] = global_stats[\"dyn-blocked\"]\n\n # reprint the lines \n sys.stdout.write(\"\\033[1mDashboard for dnsdist\\033[0m\\n\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Global:\\n\")\n for k,v in lines.items():\n sys.stdout.write(\"\\t%s: %s\\n\" % (k,v))\n sys.stdout.write(\"Backends:\\n\")\n for s in stats[\"backends\"]:\n if not len(s[\"name\"]):\n s[\"name\"] = \"--\"\n if not len(s[\"pools\"]):\n s[\"pools\"] = \"--\"\n sys.stdout.write(\"\\t#%s / %s / %s / %s\\n\" % (s[\"#\"],s[\"address\"],s[\"name\"],s[\"pools\"]) )\n sys.stdout.write(\"\\t\\tNumber of queries: %s\\n\" % s[\"queries\"])\n sys.stdout.write(\"\\t\\tQuery per second: %s\\n\" % s[\"qps\"])\n sys.stdout.write(\"\\t\\tNumber of drops: %s\\n\" % s[\"drops\"])\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Ctrl+C to exit\\n\")\n \n time.sleep(1)\n \n \n # move up cursor and delete whole line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for k,v in lines.items():\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for s in stats[\"backends\"]:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n \n del stats\n except KeyboardInterrupt:\n break", "async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)", "def show_help(argv=None):\n if argv:\n if \"list_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"list_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ls\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"search_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"search_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm search <keyword>\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"show_mounts\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"show_mounts\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ps\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mount <dataset_name> [<mount_path>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mmount <dataset_name> [<dataset_name> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"unmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"unmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm unmount <mount_id> [<cleanup_flag>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"munmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"munmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm munmount <mount_id> [<mount_id> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"clean\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"clean\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm clean\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n else:\n sdm_util.print_message(\"Unrecognized command\")\n return 1\n else:\n sdm_util.print_message(\"command : sdm <COMMAND> [<COMMAND_SPECIFIC_ARGS> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(\"Available Commands\")\n\n tbl = PrettyTable()\n tbl.field_names = [\"COMMAND\", \"DESCRIPTION\"]\n for cmd in COMMANDS:\n command, _, desc = cmd\n command_str = \" | \".join(command)\n tbl.add_row([command_str, desc])\n\n sdm_util.print_message(tbl)\n sdm_util.print_message(\"\")\n return 0", "def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')", "def metrics(self, account_id):\n from pureport_client.commands.accounts.metrics import Command\n return Command(self.client, account_id)", "def usage(err=''):\r\n m = '%s\\n' %err\r\n m += 'Default usage is to list Cases closed for the 30 days\\n'\r\n m += '\\n Example:\\n'\r\n m += ' closedcases -n 90 \\n' \r\n m += ' \\n'\r\n# m += ' closedcases -n 60 -s blast5 \\n'\r\n return m", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"", "def usage():\n \n print '-b <bench> the bench to show.'\n print '-c <config> the config to show (GPU, 8888, 565, etc).'\n print '-d <dir> a directory containing bench_r<revision>_<scalar> files.'\n print '-e <file> file containing expected bench values/ranges.'\n print ' Will raise exception if actual bench values are out of range.'\n print ' See bench_expectations.txt for data format and examples.'\n print '-f <revision>[:<revision>] the revisions to use for fitting.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-i <time> the time to ignore (w, c, g, etc).'\n print ' The flag is ignored when -t is set; otherwise we plot all the'\n print ' times except the one specified here.'\n print '-l <title> title to use for the output graph'\n print '-m <representation> representation of bench value.'\n print ' See _ListAlgorithm class in bench_util.py.'\n print '-o <path> path to which to write output; writes to stdout if not specified'\n print '-r <revision>[:<revision>] the revisions to show.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-s <setting>[=<value>] a setting to show (alpha, scalar, etc).'\n print '-t <time> the time to show (w, c, g, etc).'\n print '-x <int> the desired width of the svg.'\n print '-y <int> the desired height of the svg.'\n print '--default-setting <setting>[=<value>] setting for those without.'", "def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging", "def metrics_flash(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_flash(cmd_ctx, cpc, options))", "def _get_metrics_options(metrics):\n metrics_options = []\n if metrics is None:\n metrics = []\n for static_metric in metrics:\n metrics_options += [\n \"-m\",\n static_metric.metric.mp_metric_name,\n str(static_metric.value),\n ]\n return metrics_options", "def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)", "def usage(self, host):", "def _GenAppcommandsUsage(cmd, printer):\n # pylint: disable=too-many-arguments,unused-argument\n def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n \"\"\"A replacement for app.usage.\"\"\"\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)\n\n return Usage", "def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text", "def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()", "def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()", "def help(self, args):\n print('No commands available for this consumer')", "def print_usage_command(self):\n print self.get_usage_command()", "def print_usage_command(self):\n print self.get_usage_command()", "def help_opt(self):\n print(OPTIONS)", "def usage(msgarg):\n if msgarg:\n sys.stderr.write(\"error: %s\\n\" % msgarg)\n print(\"\"\"\\\n usage: %s [options]\n\n options:\n -d increase debug msg verbosity level\n -c N emit N classes (def: 500) per instances\n -I N emit N instances\n\n \"\"\" % os.path.basename(sys.argv[0]))\n sys.exit(1)", "def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)", "def _usage_options_example(self):\n pass", "def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})", "def stat_cuda(msg: str) -> None:\n print(f'-- {msg:<35} allocated: %dM, max allocated: %dM, cached: %dM, max cached: %dM' % (\n torch.cuda.memory_allocated() / 1024 / 1024,\n torch.cuda.max_memory_allocated() / 1024 / 1024,\n torch.cuda.memory_cached() / 1024 / 1024,\n torch.cuda.max_memory_cached() / 1024 / 1024\n ))", "def measure(self,command_exe, command_args, measure_out):\n pass", "def help_help(self):\n print(\"List commands or print details about a command\")", "def usage(app_name):\n global version\n print '\\npython {0} -a MediaLive_ARN -n Dashboard_Name [Optional parameters]\\n'.format(app_name)\n print 'Version:', version\n print '\\nThis script creates a CloudWatch Dashboard for a MediaLive/MediaPackage workflow.'\n print \"It uses the MediaLive Channel Arn as input and determines the MediaPackage instances from the \"\n print \"MediaLive channel configuration. It then creates the CloudWatch Dashboard that contains info on the\"\n print \"MediaLive channel, the two MediaPackage channels, and all of the MediaPackage endpoints.\"\n print \"\\nRequired parameters:\"\n print \"-a, --arn: MediaLive Channel ARN\"\n print \"-n, --name: Name for the CloudWatch Dashboard. \"\n print \"\"\n print \"Optional parameters\"\n print \"-l, --list: Filename of a file that contains a list of MediaLive Channel ARNs, 1 ARN per line. \"\n print \" All MediaLive channels and their corresponding MediaPackage channels will be included in \"\n print \" the CloudWatch Dashboard.\"\n print \" Note: This parameter is ignored if a channel ARN is provided via the '-a/--arn' option\"\n print \" Note: All ARNs in the list must be for channels in the same region. All ARNs not in the same\"\n print \" region as the first ARN in the list will be ignored.\"\n print '-h, --help: Print this help and exit.'\n print \"\"\n print 'Examples:'\n print \"\"\n print 'Using MediaLive ARN arn:aws:medialive:us-west-2:0123456789:channel:123456 and create a CloudWatch ' \\\n 'Dashboard called \"My TV Dashboard\"'\n print 'python {0} -a arn:aws:medialive:us-west-2:0123456789:channel:123456 ' \\\n '-n \"My TV Dashboard\" '.format(app_name)\n print \"\"\n print 'Using the MediaLive Channel ARN list defined in the text file \"My EML arns.txt\" create a CloudWatch' \\\n 'Dashboard called \"Primary Bouquet\".'\n print 'python {0} -l \"My EML arns.txt\" -n \"Primary Bouquet\"\\n'.format(app_name)", "def usage(msg):\n ap.print_usage()\n print \"-\"*40\n print msg\n exit(1)", "def explainerdashboard_cli(ctx):", "def Usage():\n print \"\"\"\n To plot the result using the iter number of the x axis:\n\n plot_sdcard.py -i /tmp/data.txt\n\n To plot the result using time for the x axis:\n\n plot_sdcard.py -t /tmp/data.txt\n\n To plot the result from the profiler:\n\n profile_sdcard.sh\n plot_sdcard.py -p\n\n \"\"\"\n sys.exit(2)", "def getHelp(self):\r\n help_str =\\\r\n \"\"\"##########################################################################################\r\n#\r\n# Required:\r\n#\r\n# --query_NAST multi-fasta file containing query sequences in alignment format\r\n#\r\n# Common opts:\r\n#\r\n# --db_NAST db in NAST format\r\n# --db_FASTA db in fasta format (megablast formatted)\r\n#\r\n#\r\n# -n number of top matching database sequences to compare to (default 15)\r\n# -R min divergence ratio default: 1.007\r\n# -P min percent identity among matching sequences (default: 90)\r\n#\r\n# ## parameters to tune ChimeraParentSelector:\r\n#\r\n# Scoring parameters:\r\n# -M match score (default: +5)\r\n# -N mismatch penalty (default: -4)\r\n# -Q min query coverage by matching database sequence (default: 70)\r\n# -T maximum traverses of the multiple alignment (default: 1)\r\n\r\n#\r\n# ## parameters to tune ChimeraPhyloChecker:\r\n#\r\n#\r\n# --windowSize default 50\r\n# --windowStep default 5\r\n# --minBS minimum bootstrap support for calling chimera (default: 90)\r\n# -S percent of SNPs to sample on each side of breakpoint for computing bootstrap support (default: 10)\r\n# --num_parents_test number of potential parents to test for chimeras (default: 3)\r\n# --MAX_CHIMERA_PARENT_PER_ID Chimera/Parent alignments with perID above this are considered non-chimeras (default 100; turned off)\r\n#\r\n# ## misc opts\r\n#\r\n# --printFinalAlignments shows alignment between query sequence and pair of candidate chimera parents\r\n# --printCSalignments print ChimeraSlayer alignments in ChimeraSlayer output\r\n# --exec_dir chdir to here before running\r\n#\r\n#########################################################################################\r\n \"\"\"\r\n return help_str", "def printOptions(opts,subject_ids,session_ids,task_list, run_list, acq, rec):\n uname = os.popen('uname -s -n -r').read()\n print \"\\n\"\n print \"* Pipeline started at \"+time.strftime(\"%c\")+\"on \"+uname\n print \"* Command line is : \\n \"+str(sys.argv)+\"\\n\"\n print \"* The source directory is : \"+opts.sourceDir\n print \"* The target directory is : \"+opts.targetDir+\"\\n\"\n print \"* Data-set Subject ID(s) is/are : \"+str(', '.join(subject_ids))+\"\\n\"\n # print \"* PET conditions : \"+ ','.join(opts.condiList)+\"\\n\"\n print \"* Sessions : \", session_ids, \"\\n\"\n print \"* Tasks : \" , task_list , \"\\n\"\n print \"* Runs : \" , run_list , \"\\n\"\n print \"* Acquisition : \" , acq , \"\\n\"\n print \"* Reconstruction : \" , rec , \"\\n\"", "def diagnostics(self,\n *opts, # type: DiagnosticsOptions\n **kwargs # type: Dict[str, Any]\n ) -> DiagnosticsResult:\n\n return super().diagnostics(*opts, **kwargs)", "def usage():", "def usage():", "def DoHelp(options, args):\n __pychecker__ = 'unusednames=options'\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print(COMMAND_USAGE_TEXT[args[0]])\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n args[0])", "def main():\n options = docopt(__doc__)\n\n # In case the user asked for verbose logging, increase\n # the log level to debug.\n if options[\"--verbose\"] > 0:\n logging.basicConfig(level=logging.DEBUG)\n LOGGER.setLevel(logging.DEBUG)\n\n LOGGER.debug(\n \"Received options: %s\",\n options,\n )\n\n billing_account_id = _get_billing_account_id()\n member_accounts = _get_member_accounts(\n billing_account_id=billing_account_id,\n options=options,\n )\n _flush_out(accounts=member_accounts, options=options)\n\n return 0", "def show_usage():\n\n usage_screen = \"\\nUsage:\\n\" \\\n f\" {basename(argv[0])} <mock_1> [<mock_2> ...]\\n\" \\\n \"\\nOptions:\\n\" \\\n \" mock-departments Send HTTP requests to create some mock departments in the backend.\\n\" \\\n \" mock-employees Send HTTP requests to create some mock employees in the backend.\\n\" \\\n \" help Show this help page.\\n\" \\\n \"\" \\\n \" verbose Enables detailed request logging for the remaining options.\\n\"\n print(usage_screen)", "def dicom_cli():", "def print_usage():\n usage_msg = \"\"\"\n%s.py -H <host or group> -P <path> -M <mode>\n\nUsage:\n -h, --help\n Print detailed help screen\n -H, --hostname=STRING\n Host name or group of hosts\n -V, --version\n Print version information\n -P, --path=STRING\n Path to rancid var directory. Usually the dir contains a logs dirs and hostgroup dirs\n Example : /usr/local/rancid/var\n -M, --mod=STRING\n Plugin mod. Must be one of the following : ping, hash, config, cards, filter, qos\n *ping:\n Check if all host in the hostgroup are up from the rancid point of view.\n It uses the .up file to determine the lists of host to look for\n *hash:\n Check if the firmware hash is different from the ref one (or from the previous one)\n *config:\n Check if the configuration has changed for the host / group (notify diff)\n *cards:\n Specific to 8600 models. Check the hardware cards plugged to the host (notify diff).\n *filter:\n Specific to ES-470. Check the filters (notify diff)\n *qos:\n Specific to ES-470. Check the qos values (notify diff)\n -u, --url=URL\n URL to submit passive results to Shinken Receiver with HTTP\n Need a host and service to send result.\n -a, --passive-host=STRING\n Required if not in plugin mod to send data to Shinken ws_arbiter\n -b, --passive-service=STRING\n Required if not in plugin mod to send data to Shinken ws_arbiter\n\"\"\" % PLUGIN_NAME\n print usage_msg", "async def view_stats(self, ctx):\n app_info = await self.bot.application_info()\n total_ram = (psutil.virtual_memory().total >> 30) + 1\n embed = discord.Embed(\n title=\"Bot Stats\",\n description=f\"Running on a dedicated server with {total_ram}GB RAM \\n provided by RandomGhost#0666.\",\n )\n\n embed.add_field(name=\"**__General Info__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"Latency\", value=f\"{self.bot.latency*1000:.03f}ms\")\n embed.add_field(name=\"Guild Count\", value=f\"{len(self.bot.guilds):,}\")\n embed.add_field(name=\"User Count\", value=f\"{len(self.bot.users):,}\")\n\n embed.add_field(name=\"**__Technical Info__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"System CPU Usage\", value=f\"{psutil.cpu_percent():.02f}%\")\n embed.add_field(name=\"System RAM Usage\", value=f\"{psutil.virtual_memory().used/1048576:.02f} MB\")\n embed.add_field(name=\"System Uptime\", value=f\"{timedelta(seconds=int(time.time() - psutil.boot_time()))}\")\n embed.add_field(name=\"Bot CPU Usage\", value=f\"{process.cpu_percent():.02f}%\")\n embed.add_field(name=\"Bot RAM Usage\", value=f\"{process.memory_info().rss / 1048576:.02f} MB\")\n embed.add_field(name=\"Bot Uptime\", value=f\"{timedelta(seconds=int(time.time() - process.create_time()))}\")\n\n embed.add_field(name=\"**__Links__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"Support Server\", value=\"[https://discord.swaglyrics.dev](https://discord.swaglyrics.dev)\")\n embed.add_field(name=\"Invite\", value=\"[https://invite.swaglyrics.dev](https://invite.swaglyrics.dev)\")\n embed.add_field(\n name=\"Source\",\n value=\"[https://swaglyrics.dev/SwagLyrics-Discord-Bot]\" \"(https://swaglyrics.dev/SwagLyrics-discord-bot)\",\n )\n\n embed.set_footer(\n text=f\"Made by {app_info.owner} • {self.bot.get_user(512708394994368548)}\",\n icon_url=[\n app_info.owner.avatar_url_as(size=128),\n self.bot.get_user(512708394994368548).avatar_url_as(size=128),\n ][getrandbits(1)],\n ) # randomize clash or flabbet avatar\n\n await ctx.send(embed=embed)", "def command_help(args):\n\tprint_usage()\n\treturn 0", "def command_help(self, *args, **kwargs):\n print(\"Commands available:\\n\")\n for name in dir(self):\n if not name.startswith(\"command_\"):\n continue\n name_clean = name[len(\"command_\"):]\n print(\"%s:\\n - %s\\n\" % (name_clean, getattr(self, name).__doc__.strip()))", "def do_stats(self, args):\n total_cpu = free_cpu = in_use_cpu = 0\n\n summary = self._qm.get_all_host_summary()\n for host_id, host_info in summary.viewitems():\n host_cpu = int(host_info['total cores'])\n total_cpu += host_cpu\n locked = host_info.get('locked by')\n if locked:\n # If host is locked then all CPUs are in use.\n in_use_cpu += host_cpu\n else:\n free_host_cpu = int(host_info['free cores'])\n in_use_cpu += (host_cpu - free_host_cpu)\n free_cpu += free_host_cpu\n\n print('total CPU: ', total_cpu)\n print('used/locked CPU: ', in_use_cpu)\n print('free CPU: ', free_cpu)\n capacity = float(in_use_cpu) / float(total_cpu)\n print('capacity used: %.1f%%' % (capacity * 100,))\n capacity = float(free_cpu) / float(total_cpu)\n print('capacity remaining: %.1f%%' % (capacity * 100,))", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -t, --transaction\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -f, --from-file <filename>\n --not-error-tolerant\n \"\"\"", "def printHelp():\n print(\"amqWorkApiMass.py -n <msgcnt> -b <body> -m <headers> -s <path/to/bodyandheaders>\")", "def process_meter_message(self, d):\n dpid = int(d.get(\"dpid\", 0))\n dp = self.dpset.get(dpid)\n if not dp:\n return \"Datapath does not exist!\"\n\n ofproto = dp.ofproto\n parser = dp.ofproto_parser\n\n command = {\n 'add': ofproto.OFPMC_ADD,\n 'mod': ofproto.OFPMC_MODIFY,\n 'del': ofproto.OFPMC_DELETE,\n }\n cmd = command.get(d[\"operation\"], ofproto.OFPMC_ADD)\n\n meter_id = d[\"meter_id\"]\n\n flags = 0\n bands = []\n if \"flags\" in d: # Ryu's format\n print(d['flags'])\n for f in d['flags']:\n flags += 0x01 if f == 'KBPS' else 0\n flags += 0x02 if f == 'PKTPS' else 0\n flags += 0x04 if f == 'BURST' else 0\n flags += 0x08 if f == 'STATS' else 0\n\n for band in d[\"bands\"]:\n if band['type'] == 'DROP':\n bands += [parser.OFPMeterBandDrop(rate=band['rate'],\n burst_size=band['burst_size'])]\n elif band['type'] == 'DSCP_REMARK':\n bands += [parser.OFPMeterBandDscpRemark(rate=band['rate'],\n burst_size=band['burst_size'], prec_level=band['prec_level'])]\n\n else: # FlowManager's format\n flags += 0x01 if d['OFPMF_KBPS'] else 0\n flags += 0x02 if d['OFPMF_PKTPS'] else 0\n flags += 0x04 if d['OFPMF_BURST'] else 0\n flags += 0x08 if d['OFPMF_STATS'] else 0\n\n # Flags must have KBPS or PKTPS\n flags = flags if (flags & 0x03) else (flags | 0x01)\n\n for band in d[\"bands\"]:\n #mtype = type_convert.get(band[0])\n if band[0] == 'DROP':\n bands += [parser.OFPMeterBandDrop(rate=band[1],\n burst_size=band[2])]\n elif band[0] == 'DSCP_REMARK':\n bands += [parser.OFPMeterBandDscpRemark(rate=band[1],\n burst_size=band[2], prec_level=band[3])]\n\n # TODO: catch some errors\n meter_mod = parser.OFPMeterMod(dp, cmd, flags, meter_id, bands)\n try:\n dp.send_msg(meter_mod)\n except KeyError as e:\n return e.__repr__()\n except Exception as e:\n return e.__repr__()\n\n return \"Message sent successfully.\"", "def cmd_help(args):", "def print_current_mem_usage():\n mem = get_current_mem_usage()\n output = \"# Mem usage = {} MiB #\".format(mem)\n print(\"\\n\" + \"-\" * len(output))\n print(output)\n print(\"-\" * len(output) + \"\\n\")", "def showUsage():\n None", "def usage(self):\n\n # header\n self.usage_header()\n\n print _(\"\"\"Screen: %(screen)s\nDescription: %(description)s\n\nUsage: %(app_name)s %(screen)s [options]\"\"\") % {\n 'app_name': constants.App.NAME,\n 'screen': self.name,\n 'description': self.description,\n }\n # any additional info in between (see other classes for reference)\n self._usage_options_example()\n\n #footer\n self.usage_footer()", "def _display_cuda_devices():\n\n cuda_query_output = subprocess.run(\"nvidia-smi --query-gpu=gpu_uuid,gpu_name,compute_mode --format=csv\", shell=True, capture_output=True, text=True)\n # Check if command worked\n if cuda_query_output.returncode == 0:\n # Split by line jump and comma\n cuda_devices_list = [entry for entry in cuda_query_output.stdout.splitlines()]\n logger.debug(f\"CUDA devices available: {*cuda_devices_list,}\")\n # We only support \"Default\" and not \"Exclusive_Process\" for the compute mode\n if \"Default\" not in cuda_query_output.stdout:\n logger.warning(f\"GPU in 'Exclusive_Process' mode (or Prohibited), one context is allowed per device. This may prevent some openmmtools features from working. GPU must be in 'Default' compute mode\")\n # Handel the case where the command had some error\n else:\n logger.debug(f\"nvidia-smi command failed: {cuda_query_output.stderr}, this is expected if there is no GPU available\")", "def phast_cmmd(self):\n temp = '{prog} -R {rho} -C {ecov} -E {elen} -N {chrom} -i MAF {maf} {model} > {wig}\\n'.format(**self.dict)\n return temp.format(fnum=self.fnum)", "def cmd_help(self, commands=None, usage=False):\n if commands:\n usage = True\n commands = {self.approx.decmd(c.lower()) for c in commands}\n rejects = commands - self.approx.keys()\n for reject in rejects:\n self.put_pretty(\"No command named %r\" % reject)\n continue\n commands -= rejects\n if self.debug:\n assert not any(self.approx.encmd(r) in self.mod_commands for\n r in rejects)\n assert all(self.approx.encmd(c) in self.mod_commands for\n c in commands)\n if not commands:\n return\n requested = zip(commands, (self.approx[c] for c in commands))\n else:\n requested = self.approx.items()\n help = znc.CTable()\n help.AddColumn(\"Command\")\n help.AddColumn(\"Usage\" if usage else \"Description\")\n from itertools import zip_longest\n #\n for command, parser in requested:\n if usage:\n upre = \"usage: %s\" % command\n rest = (parser.format_usage()\n .replace(upre, \"\", 1)\n .replace(\"[-h] \", \"\", 1))\n desc = [l.strip() for l in rest.split(\"\\n\") if l.strip()]\n else:\n desc = [parser.description]\n for line, comm in zip_longest(desc, (command,), fillvalue=\"\"):\n help.AddRow()\n help.SetCell(\"Command\", comm)\n help.SetCell(\"Usage\" if usage else \"Description\", line)\n #\n s_line = znc.String()\n strung = []\n while help.GetLine(len(strung), s_line):\n strung.append(s_line.s)\n also = \" (<command> [-h] for details)\"\n strung[1] = strung[1].replace(len(also) * \" \", also, 1)\n self.put_pretty(\"\\n\".join(strung))", "def help(update, context):\n msg = \"\"\n msg += \"\\n/covid 7-Day-Incident per Million\"\n msg += \"\\n/daylio What did I do a year ago today?\"\n msg += \"\\n/f1last Results of the last race\"\n msg += \"\\n/f1stand Driver standings\"\n msg += \"\\n/f1next Time and place of the next race\"\n msg += \"\\n/fuel prices and consump. (args: Xeur Ykm)\"\n msg += \"\\n/ip Outside ip address\"\n msg += \"\\n/rate Exchange rates (args: Xeur/Yhuf)\"\n msg += \"\\n/rss check rss feeds for new content\"\n msg += \"\\n/sun Time of sunrise and sunset\"\n msg += \"\\n/xkcd Sends last comic image and alt\"\n msg.rstrip()\n update.message.reply_text(msg)", "def help_command(server, output, conf):\n server.tell(output.name, 'Available commands:')\n for key in COMMANDS.keys():\n cmd_func = COMMANDS[key]\n if cmd_func.__doc__:\n server.tell(output.name, '%s: %s' % (key[1:], cmd_func.__doc__))\n else:\n server.tell(output.name, key[1:])\n return", "def show_command_multiple(self, command, arglist, vdc=None, parser=None, optdict={}):\n self.logger.debug(\"run multiple show commands {} {}\".format(command, str(arglist)))\n output = \"\"\n if isinstance(arglist, str):\n arglist = [arglist]\n for vdcname in vdc:\n self.switchto_vdc(vdcname)\n if len(vdc) > 1:\n output = output + \"\\nvdc {}: \\n\".format(self.get_current_vdc())\n for a in arglist:\n self.logger.debug(\"run show commands {} {} in vdc {}\".format(command, a, vdcname))\n if parser is not None:\n scratch = parser(self._send_xml_cli_show(\"{} {}\".format(command, a)), **optdict)\n if scratch is None:\n output = output + \"Command '{} {}' returned no output\\n\".format(command, a)\n else:\n output = output + scratch\n else:\n output = output + self._send_xml_cli_show(\"{} {}\".format(command, a))\n self.logger.debug(\"multiple show commands output {}\".format(output))\n return output", "def main( argv = None ):\n\n if argv == None: argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser( version = \"%prog version: $Id$\", \n usage = globals()[\"__doc__\"] )\n\n parser.add_option(\"--category\", dest=\"category\", type=\"choice\",\n choices = (\"B\", \"C\"), help=\"supply help\" )\n\n ## add common options (-h/--help, ...) and parse command line \n (options, args) = E.Start( parser, argv = argv )\n\n data = getData(options.stdin)\n if options.category == \"B\":\n options.stdout.write(\"Category B pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in b2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n\n elif options.category == \"C\":\n options.stdout.write(\"Category C pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in c2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n else:\n raise ValueError(\"must specify the category of pathway\")\n\n\n ## write footer and output benchmark information.\n E.Stop()", "def do_config():\n\n tracking = get_tracking()\n for unit in (\"ppm\", \"sec\"):\n\ttunit = unit\n\tif unit == \"sec\":\n\t tunit = \"seconds\"\n\tprint \"multigraph chrony_%s\" % unit\n\tprint \"graph_title NTP (Chrony) Statistics (%s)\" % unit\n\tprint \"graph_vlabel %s\" % unit\n\tprint \"graph_args --base 1000\"\n\tprint \"graph_category time\"\n\tprint \"graph_info NTP (Chrony) tracking statistics (the ones measured in %s)\" % tunit\n\tfor key in tracking[tunit]:\n\t item = tracking[tunit][key]\n\t print \"\"\"%s.label %s\n%s.draw LINE2\n%s.info %s\"\"\" % (key, item[\"label\"], key, key, item[\"label\"])\n\tprint\n return 0", "def help(update, context):\n update.message.reply_text(\"\"\"usage \n /bus <bus name> or /bus <bus name> <stop name>\n /addstop <stop name> <stop code>\n /delstop <stop name>\n /showstops\n /help\n \"\"\")\n\n # log info\n logger.info(\"help used username:{0}\".format(update.message.from_user.username))", "def metrics_partition(cmd_ctx, cpc, partition, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_partition(cmd_ctx, cpc, partition, options))", "def main():\r\n\r\n try:\r\n argv = flags.parse_args(sys.argv)\r\n logging.setup(\"traffic\")\r\n except cfg.ConfigFilesNotFoundError:\r\n cfgfile = FLAGS.config_file[-1] if FLAGS.config_file else None\r\n if cfgfile and not os.access(cfgfile, os.R_OK):\r\n st = os.stat(cfgfile)\r\n print _(\"Could not read %s. Re-running with sudo\") % cfgfile\r\n try:\r\n os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)\r\n except Exception:\r\n print _('sudo failed, continuing as if nothing happened')\r\n\r\n print _('Please re-run traffic-manage as root.')\r\n sys.exit(2)\r\n\r\n script_name = argv.pop(0)\r\n if len(argv) < 1:\r\n print (_(\"\\nOpenStack Traffic version: %(version)s (%(vcs)s)\\n\") %\r\n {'version': version.version_string(),\r\n 'vcs': version.version_string_with_vcs()})\r\n print script_name + \" category action [<args>]\"\r\n print _(\"Available categories:\")\r\n for k, _v in CATEGORIES:\r\n print \"\\t%s\" % k\r\n sys.exit(2)\r\n category = argv.pop(0)\r\n if category == \"bash-completion\":\r\n if len(argv) < 1:\r\n print \" \".join([k for (k, v) in CATEGORIES])\r\n else:\r\n query_category = argv.pop(0)\r\n matches = lazy_match(query_category, CATEGORIES)\r\n # instantiate the command group object\r\n category, fn = matches[0]\r\n command_object = fn()\r\n actions = methods_of(command_object)\r\n print \" \".join([k for (k, v) in actions])\r\n sys.exit(0)\r\n matches = lazy_match(category, CATEGORIES)\r\n # instantiate the command group object\r\n category, fn = matches[0]\r\n command_object = fn()\r\n actions = methods_of(command_object)\r\n if len(argv) < 1:\r\n if hasattr(command_object, '__call__'):\r\n action = ''\r\n fn = command_object.__call__\r\n else:\r\n print script_name + \" category action [<args>]\"\r\n print _(\"Available actions for %s category:\") % category\r\n for k, _v in actions:\r\n print \"\\t%s\" % k\r\n sys.exit(2)\r\n else:\r\n action = argv.pop(0)\r\n matches = lazy_match(action, actions)\r\n action, fn = matches[0]\r\n\r\n # For not decorated methods\r\n options = getattr(fn, 'options', [])\r\n\r\n usage = \"%%prog %s %s <args> [options]\" % (category, action)\r\n parser = optparse.OptionParser(usage=usage)\r\n for ar, kw in options:\r\n parser.add_option(*ar, **kw)\r\n (opts, fn_args) = parser.parse_args(argv)\r\n fn_kwargs = vars(opts)\r\n\r\n for k, v in fn_kwargs.items():\r\n if v is None:\r\n del fn_kwargs[k]\r\n elif isinstance(v, basestring):\r\n fn_kwargs[k] = v.decode('utf-8')\r\n else:\r\n fn_kwargs[k] = v\r\n\r\n fn_args = [arg.decode('utf-8') for arg in fn_args]\r\n\r\n # call the action with the remaining arguments\r\n try:\r\n fn(*fn_args, **fn_kwargs)\r\n rpc.cleanup()\r\n sys.exit(0)\r\n except TypeError:\r\n print _(\"Possible wrong number of arguments supplied\")\r\n print fn.__doc__\r\n parser.print_help()\r\n raise\r\n except Exception:\r\n print _(\"Command failed, please check log for more info\")\r\n raise", "def stats(caesar, input):\n commands = {}\n users = {}\n channels = {}\n\n ignore = set(['f_note', 'startup', 'message', 'noteuri'])\n for (name, user), count in caesar.stats.items(): \n if name in ignore: continue\n if not user: continue\n\n if not user.startswith('#'): \n try: users[user] += count\n except KeyError: users[user] = count\n else: \n try: commands[name] += count\n except KeyError: commands[name] = count\n\n try: channels[user] += count\n except KeyError: channels[user] = count\n\n comrank = sorted([(b, a) for (a, b) in commands.iteritems()], reverse=True)\n userank = sorted([(b, a) for (a, b) in users.iteritems()], reverse=True)\n charank = sorted([(b, a) for (a, b) in channels.iteritems()], reverse=True)\n\n # most heavily used commands\n creply = 'most used commands: '\n for count, command in comrank[:10]: \n creply += '%s (%s), ' % (command, count)\n caesar.say(creply.rstrip(', '))\n\n # most heavy users\n reply = 'power users: '\n for count, user in userank[:10]: \n reply += '%s (%s), ' % (user, count)\n caesar.say(reply.rstrip(', '))\n\n # most heavy channels\n chreply = 'power channels: '\n for count, channel in charank[:3]: \n chreply += '%s (%s), ' % (channel, count)\n caesar.say(chreply.rstrip(', '))", "def command_short():\n pass", "def usage(msgarg):\n if msgarg:\n sys.stderr.write(\"error: %s\\n\" % msgarg)\n print(\"\"\"\\\n usage: %s [options] XXYYZZ\n\n where XXYYZZ is the serial number of a connected Android device.\n\n options:\n -d increase debug msg verbosity level\n\n \"\"\" % os.path.basename(sys.argv[0]))\n sys.exit(1)", "def help(cls, extra_args=None):\n if (_is_text_interface()):\n return _create_text_help_str(cls, cls._TEXT_USAGE)\n else:\n return cls._GRAPHICAL_USAGE", "def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])", "def show_mem(cmd, cnt, args):\n if cpu is None:\n log(\"Load program first\") \n return\n elif len(cpu.memory) == 0:\n log(\"Load program first\") \n return \n chunk = 0\n chunk_count = len(cpu.memory)\n while chunk < chunk_count: \n chunk_start = cpu.memory[chunk][MEMADDR]\n chunk_end = chunk_start + cpu.memory[chunk][MEMSIZE] \n log(\"{:d} {:#x}..{:#x}\".format(chunk, chunk_start, chunk_end)) \n chunk += 1\n if machine == \"ARM\":\n if len(cpu.high_memory) != 0:\n log(\"High memory\")\n for addr in sorted(cpu.high_memory):\n log(\"{:#x}\".format(addr))", "def main():\n known_args, unknown_args = parse_known_args()\n if not unknown_args:\n # return an error message if no command is provided\n sys.exit(\"Please provide a command to benchmark: $ humann_benchmark COMMAND\")\n try:\n process = subprocess.Popen(\" \".join(unknown_args),shell=True)\n except (EnvironmentError, subprocess.CalledProcessError):\n sys.exit(\"Unable to execute command: \" + \" \".join(unknown_args))\n pid=str(process.pid)\n start=time.time()\n max_memory=0\n while process.poll() is None:\n time.sleep(1)\n # while the process is running check on the memory use\n # get the pids of the main process and all children (and their children)\n pids=get_pids(pid)\n stdout=subprocess.check_output([\"ps\",\"--pid\",\",\".join(pids),\"-o\",\"pid,rss,command\"]).decode(\"utf-8\")\n print(\"\\n\"+stdout+\"\\n\")\n # remove the header from the process output\n status=[i.split() for i in filter(lambda x: x, stdout.split(\"\\n\")[1:])]\n # memory is the sum of all rss\n memory=sum(int(i[1]) for i in status)\n if memory > max_memory:\n max_memory=memory\n \n end=time.time()\n print(\"Time: {:.0f} minutes\".format((end-start)/60))\n print(\"Max Memory (RSS): {:.1f} GB\".format(max_memory*1.0/1024**2))", "def help(self, msg=None):\n\n # Print the message if given.\n if not msg == None:\n print str(msg) + \"\\n\"\n\n # Display the list of commands, in the alphabetical order.\n print \"Use one of the following commands:\"\n for action in sorted(self.actions.keys()):\n info = self.actions[action]\n joined_oblig = ' '.join(info['required'])\n if len(info['additional']) > 0:\n add = [\"<%s>\" % x for x in info['additional']]\n joined_add = '[' + ' '.join(add) + ']'\n else:\n joined_add = ''\n print \"\\t* %s %s %s\" % (action, joined_oblig, joined_add)", "def treatCmdOpts(argv):\n helpTxt = os.path.basename(__file__) + 'creates plots with time values until the jamming is detected'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n parser.add_argument('-f', '--file', help='Name of CSV file', required=True)\n parser.add_argument('-d', '--dir', help='Directory of CSV file, defaults to current', required=False, default='.')\n args = parser.parse_args()\n\n return args.file, args.dir", "def gather_info_and_display():\n # Obtain total rss displayed in memory.stat for each group,\n # container and service.\n try:\n output_mem = pipe_command(GREP_CMD, AWK_CMD, cwd=MEMPATH)\n LOG.debug(\n 'command: %s\\n%s',\n \"grep -rs total_rss '/sys/fs/cgroup/memory/' \"\n \"| awk '$2>0{print$0}' \",\n output_mem)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get total_rss memory, error=%s', error)\n return 1\n\n mem_info = get_meminfo()\n pt_groups = gather_groups_memory(output_mem)\n pt_cont = gather_containers_memory(output_mem)\n pt_serv = sys_service_memory()\n\n # Dump the tables out\n print('\\nPer groups memory usage:')\n\n # Get string to be printed and create list of elements separated by \\n\n list_of_table_lines = pt_groups.get_string().split('\\n')\n\n # Use the first line (+---+-- ...) as horizontal rule to insert later\n horizontal_line = list_of_table_lines[0]\n\n # Print the table, except last two lines ( \"Total\" row + final separator).\n print(\"\\n\".join(list_of_table_lines[:-2]))\n # Print separator, and finally the \"Total\" row.\n print(horizontal_line)\n print(\"\\n\".join(list_of_table_lines[-2:]))\n\n pt_namespc = prettytable.PrettyTable(\n ['Namespace',\n 'Resident Set Size (MiB)',\n ], caching=False)\n pt_namespc.align = 'l'\n pt_namespc.align['Resident Set Size (MiB)'] = 'r'\n\n print('\\nPer namespace memory usage:')\n for n_s in MEMORY['namespaces']:\n pt_namespc.add_row(\n [n_s,\n MEMORY['namespaces'][n_s],\n ])\n print(pt_namespc)\n\n print('\\nPer container memory usage:')\n print(pt_cont)\n\n print('\\nPer service memory usage:')\n print(pt_serv)\n\n base_mebib = 0.0\n k8s_system = 0.0\n k8s_addon = 0.0\n platform_memory_percent = 0.0\n\n # Calculate base memory usage (i.e., normal memory, exclude K8S and VMs)\n # e.g., docker, system.slice, user.slice\n for group in MEMORY['cgroups']:\n if group in BASE_GROUPS:\n base_mebib += float(MEMORY['cgroups'][group])\n\n # K8S platform system usage (essential) and addons usage (non-essential)\n for n_s in MEMORY['namespaces']:\n if n_s in K8S_NAMESPACE_SYSTEM:\n k8s_system += MEMORY['namespaces'][n_s]\n elif n_s in K8S_NAMESPACE_ADDON:\n k8s_addon += MEMORY['namespaces'][n_s]\n\n # Calculate platform memory usage\n platform_mebib = base_mebib + k8s_system\n\n anon_mebib = float(mem_to_mebibytes(\n mem_info['Active(anon)'] + mem_info['Inactive(anon)'])) * KBYTE\n avail_mebib = float(mem_to_mebibytes(\n mem_info['MemAvailable'])) * KBYTE\n total_mebib = float(anon_mebib + avail_mebib)\n\n anon_percent = py2_round(100 * anon_mebib / total_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n reserved_mebib = get_platform_reserved_memory()\n # Calculate platform memory in terms of percent reserved\n if reserved_mebib > 0.0:\n platform_memory_percent = py2_round(\n 100 * platform_mebib / reserved_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n pt_platf = prettytable.PrettyTable(\n ['Reserved',\n 'Platform',\n 'Base',\n 'K8s Platform system',\n 'k8s-addon'\n ], caching=False)\n pt_platf.align = 'l'\n\n pt_platf.add_row(\n [reserved_mebib,\n '{} ({}%)'.format(platform_mebib, platform_memory_percent),\n base_mebib,\n k8s_system,\n k8s_addon\n ])\n print('\\nPlatform memory usage in MiB:')\n print(pt_platf)\n\n pt_4k = prettytable.PrettyTable(\n ['Anon',\n 'Cgroup-rss',\n 'Available',\n 'Total'\n ], caching=False)\n pt_4k.align = 'l'\n\n pt_4k.add_row(\n ['{} ({}%)'.format(anon_mebib, anon_percent),\n MEMORY['cgroups']['total_rss'],\n avail_mebib,\n total_mebib\n ])\n\n print('\\n4K memory usage in MiB:')\n print(pt_4k)\n\n return 0", "def main():\n cli = DhcpClientCLI()\n\n parser = argparse.ArgumentParser(\n description='Management CLI for Mobility DHCP Client',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n # Add sub commands\n subparsers = parser.add_subparsers(title='subcommands', dest='cmd')\n\n # List\n subparser = subparsers.add_parser(\n 'list_dhcp_records',\n help='Lists all records from Redis',\n )\n subparser.set_defaults(func=cli.list_all_record)\n\n # Add\n subparser = subparsers.add_parser(\n 'add_rec',\n help='Add ip allocation record',\n )\n subparser.add_argument(\n 'mac', help='Mac address, e.g. \"8a:00:00:00:0b:11\"',\n type=str,\n )\n subparser.add_argument(\n 'ip', help='IP address, e.g. \"1.1.1.1\"',\n type=ip_address,\n )\n\n subparser.add_argument(\n 'state',\n help='DHCP protocol state 1 to 7, e.g. \"1\"',\n type=int,\n )\n subparser.add_argument(\n 'subnet',\n help='IP address subnet, e.g. \"1.1.1.0/24\"',\n type=ipaddress.ip_network,\n )\n\n subparser.add_argument('dhcp', help='DHCP IP address, e.g. \"1.1.1.100\"')\n subparser.add_argument('lease', help='Lease time in seconds, e.g. \"100\"')\n subparser.set_defaults(func=cli.add_record)\n\n # del\n subparser = subparsers.add_parser(\n 'del_rec',\n help='Add ip allocation record',\n )\n subparser.add_argument('mac', help='Mac address, e.g. \"8a:00:00:00:0b:11\"')\n subparser.set_defaults(func=cli.del_record)\n\n # set default gw\n subparser = subparsers.add_parser(\n 'set_default_gw',\n help='Set default GW',\n )\n subparser.add_argument('ip', help='IP address, e.g. \"1.1.1.1\"')\n\n subparser.set_defaults(func=cli.set_deafult_gw)\n\n # set gw mac\n subparser = subparsers.add_parser(\n 'set_gw_mac',\n help='Set GW Mac address',\n )\n subparser.add_argument('mac', help='Mac address, e.g. \"8a:00:00:00:0b:11\"')\n\n subparser.set_defaults(func=cli.set_deafult_gw)\n\n # Parse the args\n args = parser.parse_args()\n if not args.cmd:\n parser.print_usage()\n sys.exit(1)\n\n # Execute the sub-command function\n args.func(args)", "def main():\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n add_common_snmp_args(parser)\n parser.add_argument(\n \"-w\",\n \"--warning\",\n type=int,\n default=70,\n help=\"Warning memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-c\",\n \"--critical\",\n type=int,\n default=90,\n help=\"Critical memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-f\",\n \"--family\",\n required=True,\n help=\"Switch family. Supported families: 1910, 1920, 1920S\",\n )\n\n config = vars(parser.parse_args())\n check_snmp_args(config)\n check_thresholds(config)\n\n dataset = {}\n\n if config[\"family\"] == \"1920S\":\n cpu = ObjectType(\n ObjectIdentity(\n \"HP-SWITCHING-MIB\", \"agentSwitchCpuProcessTotalUtilization\", 0\n )\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n cpu = ObjectType(\n ObjectIdentity(\"HH3C-ENTITY-EXT-MIB\", \"hh3cEntityExtCpuUsage\", 8)\n )\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n try:\n raw_data = get_snmp_data(config, cpu)\n except ValueError as err:\n unknown_exit(SERVICE, err)\n add_vars_to_dataset(dataset, raw_data)\n\n if config[\"family\"] == \"1920S\":\n dataset[\"cpu_usage\"] = get_hp_cpu_usage(\n dataset[\"agentSwitchCpuProcessTotalUtilization\"]\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n dataset[\"cpu_usage\"] = int(dataset[\"hh3cEntityExtCpuUsage\"])\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n state, message = generate_output(config, dataset)\n report(state, message)", "def capacitygroup_show(cmd_ctx, cpc, capacitygroup):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_show(cmd_ctx, cpc, capacitygroup))", "def test_usage(self):\n # Make sure the usage message is shown when no arguments\n # are given and when the -h or --help option is given.\n for options in [], ['-h'], ['--help']:\n exit_code, output = run_cli(*options)\n assert \"Usage:\" in output", "def usage():\n pass", "def usage():\n\n # Local constants\n\n # Local variables\n\n #****** start usage() ******#\n print()\n print(\" Usage: python TCGCardTracker.py <arguement below> <optional-argument-1>\")\n print(\"\\tadd (Optional): Add a card to your collection. Requires TCGPlayer URL.\")\n print(\"\\tdelete (Optional): Delete a card from your collection. Requires TCGPlayer URL.\")\n print(\"\\tupdate (Optional): Updates pricing data for every card in your collection.\")\n print(\"\\ttop25 (Optional): Outputs the 25 most valuable cards from your collection.\")\n print(\"\\texport (Optional): Exports a list of TCGPlayer URLs to a text file.\")\n print(\"\\texport_collection (Optional): Exports your collection to a .csv including most recent price data.\")\n print(\"\\timport (Optional): Imports a text file of TCGPlayer URLs to bulk import cards into your collection. Requires text file.\")\n print(\"\\tworth (Optional): Ouputs how much your collection is worth using latest price data.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tticker (Optional): Displays a ticker grid of the change in value over a given time. If run without the days back parameter it will default to 7 days.\")\n sys.exit()", "def print_help_info(self, global_options):\r\n usage = ['',\"Type '%s help <subcommand>' for help on a specific subcommand.\" % self.prog_name,'']\r\n usage.append('Available subcommands:')\r\n commands = self.get_commands(global_options).keys()\r\n commands.sort()\r\n for cmd in commands:\r\n usage.append(' %s' % cmd)\r\n return '\\n'.join(usage)", "def do_command_help(self, command):\n summary = self.base.commands[command].get_summary()\n usage = self.get_command_usage(command)\n description = self.base.commands[command].get_description()\n sys.stdout.write('%s\\n%s' % (summary, usage))\n if description != None:\n sys.stdout.write('Arguments Description:\\n%s\\n' %\n (description, ))", "def usage(progname):\n \n sys.stderr.write(\"Usage: \" +progname + \" [-cmnv] [-z score] \"\n \" <outdir>\\n\")\n sys.stderr.write(' -c class level not fold level evaluation\\n')\n sys.stderr.write(' -m read multiquery file on stdin\\n')\n sys.stderr.write(' -n negate scores (so that most -ve is best)\\n')\n sys.stderr.write(' -v verbose messages to stderr\\n')\n sys.stderr.write(' -z score : assign identifiers not present in the output a score of score\\n')\n sys.exit(1)", "def reports_cli():", "def metric_options(self):\n return Optimizer.list_method_options(self.metric_creator.method_dict)", "def test_cli_help(self):\n output = self.update_command('-h')", "def handle_program_options():\n parser = argparse.ArgumentParser(description=\"Gather numeric information \\\n about the processed sequence data in an \\\n MG-RAST project.\")\n parser.add_argument('project_id',\n help=\"The project identifier (MG-RAST ID)\")\n parser.add_argument('-a', '--auth_key',\n help=\"An MG-RAST API authorization key. This is \\\n necessary to access projects marked as private.\")\n parser.add_argument('-g', '--group_by', action='append',\n help=\"A string that matches some part of the \\\n 'Metagenome Name' field. All matching project \\\n metagenomes will be grouped by this identifier \\\n and their stats will be summed. This option can \\\n be specified multiple times to create multiple \\\n groups. All non-matching metagenomes will \\\n appear separately in the table. NOTE: \\\n Strings will be matched longest first. This \\\n allows for matching names that might be a \\\n substring of another match. For example: -g S \\\n -g NS. The name field will first be matched \\\n against the longest string (NS) first and then \\\n each smaller string in order.\")\n parser.add_argument('-o', '--output_filename', default='meta_stats.txt',\n help=\"The name of the file the project summary \\\n information will be written to.\")\n\n# parser.add_argument('-v', '--verbose', action='store_true')\n\n return parser.parse_args()", "async def eventstats(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send_help(ctx.command)", "def print_usage():\n print(\"usage: MILP.py -p <parameter file> -i <payoff file> -o <output file>\")\n print(\"-p, --params\\t sets the parameter file\")\n print(\"-i, --payoff\\t sets the payoff file\")\n print(\"-o, --output\\t sets the output file. Defaults to out.csv\")\n print(\"-d, --delimiter\\t sets the delimiter of ALL files. Defaults to csv\")", "def run_pydca():\n parser = ArgumentParser()\n\n #Create subparsers\n subparsers = parser.add_subparsers(dest = CmdArgs.subcommand_name)\n\n parser_dca_visualizer_contact_map = subparsers.add_parser('plot_contact_map',\n help = 'Provides a quick contact map comparison of DCA computation results.'\n ' For a given PDB chain ID, the PDB contacts are extracted from PDB'\n ' file. Then top N ranked DCA pairs are used for the contact map'\n ' comparison. Use --help for more information.'\n )\n add_args_to_subparser(parser_dca_visualizer_contact_map, 'plot_contact_map')\n parser_dca_visualizer_tp_rate = subparsers.add_parser('plot_tp_rate',\n help = ' Plots the true positive rate per rank of a DCA computation'\n ' result. The DCA file should contain ranked pairs (i, j) such that'\n ' i < j. If the biomolecule is RNA, a secondary structure file should'\n ' be provided if one is interested on tertiary contacts. Use --help'\n ' for more information.'\n\n )\n add_args_to_subparser(parser_dca_visualizer_tp_rate, 'plot_tp_rate')\n\n parser_pdb_content = subparsers.add_parser('pdb_content',\n help = 'Displays information about the contents of a PDB file.'\n ' Use --verbose optional argument to display the PDB summary'\n ' on the terminal.',\n )\n add_args_to_subparser(parser_pdb_content, 'pdb_content')\n\n parser_trim_by_refseq = subparsers.add_parser('trim_by_refseq',\n help='Removes MSA columns containing fraction of gaps more than'\n ' the value specified by {} (default 0.5) if these columns'\n ' do not correspond to residues of the sequence in MSA that matches'\n ' with the reference. Setting max_gap to zero removes all columns'\n ' except those corresponding to the residues of the matching sequence'\n ' to the reference.'.format(CmdArgs.max_gap_optional)\n )\n add_args_to_subparser(parser_trim_by_refseq, 'trim_by_refseq')\n\n parser_trim_by_gap_size = subparsers.add_parser('trim_by_gap_size',\n help = 'Removes MSA columns containing gap fraction more than the value'\n ' specified by {} (default 0.5)'.format(CmdArgs.max_gap_optional)\n )\n add_args_to_subparser(parser_trim_by_gap_size, 'trim_by_gap_size')\n\n #display help if no argument is passed\n args = parser.parse_args(args=None if sys.argv[1:] else ['--help'])\n args_dict = vars(args)\n # Do the computations based on the arguments passed from the command line\n execute_from_command_line(\n biomolecule = args_dict.get('biomolecule'),\n msa_file = args_dict.get('msa_file'),\n refseq_file = args_dict.get('refseq_file'),\n the_command = args_dict.get('subcommand_name'),\n force_seq_type=args_dict.get('force_seq_type'),\n verbose = args_dict.get('verbose'),\n output_dir = args_dict.get('output_dir'),\n pdb_file = args_dict.get('pdb_file'),\n pdb_chain_id = args_dict.get('pdb_chain_id'),\n dca_file = args_dict.get('dca_file'),\n rna_secstruct_file = args_dict.get('rna_secstruct_file'),\n linear_dist = args_dict.get('linear_dist'),\n contact_dist = args_dict.get('contact_dist'),\n num_dca_contacts = args_dict.get('num_dca_contacts'),\n wc_neighbor_dist = args_dict.get('wc_neighbor_dist'),\n pdb_id = args_dict.get('pdb_id'),\n max_gap = args_dict.get('max_gap'),\n remove_all_gaps = args_dict.get('remove_all_gaps'),\n )\n logger.info('\\n\\tDONE')\n return None", "def usage():\n print(\"[1] Getting help from a cipher \")\n print(\" ---> ./cryptogra.py caesar -h \")\n print(\"\")", "def commandline_options():\n parser = argparse.ArgumentParser(\n description='ocn_diags_generator: CESM wrapper python program for Ocean Diagnostics packages.')\n\n parser.add_argument('--backtrace', action='store_true',\n help='show exception backtraces as extra debugging '\n 'output')\n\n parser.add_argument('--debug', action='store_true',\n help='extra debugging output')\n\n #parser.add_argument('--config', nargs=1, required=True, help='path to config file')\n\n options = parser.parse_args()\n return options" ]
[ "0.6317614", "0.59251046", "0.58999896", "0.58430314", "0.57148993", "0.56526655", "0.547031", "0.53368515", "0.5278745", "0.52745694", "0.52628577", "0.51370335", "0.5132186", "0.51290196", "0.512005", "0.5110605", "0.51074606", "0.5080317", "0.5074927", "0.50636065", "0.5045238", "0.50326115", "0.5021564", "0.50138205", "0.49624792", "0.49544725", "0.49428305", "0.49428305", "0.4932547", "0.49250507", "0.48911735", "0.48746535", "0.48668554", "0.4857556", "0.4854782", "0.48441866", "0.4841546", "0.48356408", "0.4832537", "0.48319176", "0.48140767", "0.48107842", "0.47930133", "0.47853255", "0.47853255", "0.47829592", "0.47786003", "0.47663677", "0.47659582", "0.4744153", "0.47339025", "0.4731227", "0.47309107", "0.4718063", "0.47163436", "0.47050616", "0.46889624", "0.46817568", "0.4677733", "0.46774387", "0.4668844", "0.46671963", "0.46565443", "0.46560454", "0.46553153", "0.4653297", "0.46363705", "0.46345383", "0.4633751", "0.46313944", "0.4630755", "0.46288836", "0.46250033", "0.46192405", "0.4618046", "0.46134958", "0.460794", "0.4605971", "0.4601797", "0.4595157", "0.4594818", "0.4592658", "0.4592104", "0.45888177", "0.45829865", "0.4577831", "0.45771435", "0.457691", "0.4576566", "0.45755786", "0.4572319", "0.4571672", "0.45712003", "0.45694444", "0.45656064", "0.4561756", "0.456026", "0.45602337", "0.45578074", "0.45462516" ]
0.643978
0
Report usage metrics for all channels of CPCs in classic mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_channel(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stats(caesar, input):\n commands = {}\n users = {}\n channels = {}\n\n ignore = set(['f_note', 'startup', 'message', 'noteuri'])\n for (name, user), count in caesar.stats.items(): \n if name in ignore: continue\n if not user: continue\n\n if not user.startswith('#'): \n try: users[user] += count\n except KeyError: users[user] = count\n else: \n try: commands[name] += count\n except KeyError: commands[name] = count\n\n try: channels[user] += count\n except KeyError: channels[user] = count\n\n comrank = sorted([(b, a) for (a, b) in commands.iteritems()], reverse=True)\n userank = sorted([(b, a) for (a, b) in users.iteritems()], reverse=True)\n charank = sorted([(b, a) for (a, b) in channels.iteritems()], reverse=True)\n\n # most heavily used commands\n creply = 'most used commands: '\n for count, command in comrank[:10]: \n creply += '%s (%s), ' % (command, count)\n caesar.say(creply.rstrip(', '))\n\n # most heavy users\n reply = 'power users: '\n for count, user in userank[:10]: \n reply += '%s (%s), ' % (user, count)\n caesar.say(reply.rstrip(', '))\n\n # most heavy channels\n chreply = 'power channels: '\n for count, channel in charank[:3]: \n chreply += '%s (%s), ' % (channel, count)\n caesar.say(chreply.rstrip(', '))", "def channel_help(message):\n message.reply(Strings['HELP'].format(config.HELP_URL))", "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "async def channel_stats(self, ctx, channel: discord.TextChannel = None):\n channel = channel or ctx.channel\n embed = discord.Embed(\n title=f\"Stats for **{channel.name}**\",\n description=f\"{'Category: {}'.format(channel.category.name) if channel.category else 'This channel is not in a category'}\",\n color=discord.Color.blurple(),\n )\n embed.add_field(name=\"Channel Guild\",\n value=ctx.guild.name, inline=False)\n embed.add_field(name=\"Channel Id\", value=channel.id, inline=False)\n embed.add_field(\n name=\"Channel Topic\",\n value=f\"{channel.topic if channel.topic else 'No topic.'}\",\n inline=False,\n )\n embed.add_field(name=\"Channel Position\",\n value=channel.position, inline=False)\n embed.add_field(\n name=\"Channel Slowmode Delay\", value=channel.slowmode_delay, inline=False\n )\n embed.add_field(name=\"Channel is nsfw?\",\n value=channel.is_nsfw(), inline=False)\n embed.add_field(name=\"Channel is news?\",\n value=channel.is_news(), inline=False)\n embed.add_field(\n name=\"Channel Creation Time\", value=channel.created_at, inline=False\n )\n embed.add_field(\n name=\"Channel Permissions Synced\",\n value=channel.permissions_synced,\n inline=False,\n )\n embed.add_field(name=\"Channel Hash\", value=hash(channel), inline=False)\n\n await ctx.message.delete()\n await ctx.send(embed=embed)", "def help(self, args):\n print('No commands available for this consumer')", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)", "def command_help(self, bot, update):\n\n messages = [\n 'Available commands:',\n '/who - Who is Myles?',\n '/where - Where is Myles?',\n '/tweet - What was the last tweet Myles sent?',\n '/photo - What was the last Instagram photo Myles took?',\n '/web - Where can I find Myles on the interwebs?',\n ]\n\n self.send_messages(bot, update, messages)", "async def serverchart(self, ctx: commands.Context, messages: int = 1000):\n if messages < 5:\n return await ctx.send(\"Don't be silly.\")\n channel_list = []\n blacklisted_channels = await self.config.guild(ctx.guild).channel_deny()\n for channel in ctx.guild.text_channels:\n channel: discord.TextChannel\n if channel.id in blacklisted_channels:\n continue\n if channel.permissions_for(ctx.message.author).read_messages is False:\n continue\n if channel.permissions_for(ctx.guild.me).read_messages is False:\n continue\n channel_list.append(channel)\n\n if len(channel_list) == 0:\n return await ctx.send(\"There are no channels to read... This should theoretically never happen.\")\n\n embed = discord.Embed(\n description=\"Fetching messages from the entire server this **will** take a while.\",\n colour=await self.bot.get_embed_colour(location=ctx.channel),\n )\n global_fetch_message = await ctx.send(embed=embed)\n global_history = []\n\n for channel in channel_list:\n embed = discord.Embed(\n title=f\"Fetching messages from #{channel.name}\",\n description=\"This might take a while...\",\n colour=await self.bot.get_embed_colour(location=channel)\n )\n loading_message = await ctx.send(embed=embed)\n try:\n history = await self.fetch_channel_history(channel, loading_message, messages)\n global_history += history\n await loading_message.delete()\n except discord.errors.Forbidden:\n try:\n await loading_message.delete()\n except discord.NotFound:\n continue\n except discord.NotFound:\n try:\n await loading_message.delete()\n except discord.NotFound:\n continue \n\n msg_data = self.calculate_member_perc(global_history)\n # If no members are found.\n if len(msg_data[\"users\"]) == 0:\n try:\n await global_fetch_message.delete()\n except discord.NotFound:\n pass\n return await ctx.send(f\"Only bots have sent messages in this server... Wauw...\")\n\n top_twenty, others = self.calculate_top(msg_data)\n chart = await self.create_chart(top_twenty, others, ctx.guild)\n\n try:\n await global_fetch_message.delete()\n except discord.NotFound:\n pass\n await ctx.send(file=discord.File(chart, \"chart.png\"))", "async def help(self, context):\n prefix = config.BOT_PREFIX\n user=context.message.author\n if not isinstance(prefix, str):\n prefix = prefix[0]\n embed = discord.Embed(title=\"Help\", description=\"List of available commands:\", color=0x00FF00)\n for i in self.bot.cogs:\n cog = self.bot.get_cog(i.lower())\n commands = cog.get_commands()\n command_list = [command.name for command in commands if not command.hidden or context.message.author.id in config.OWNERS]\n command_description = [command.help for command in commands if not command.hidden or context.message.author.id in config.OWNERS]\n help_text = '\\n'.join(f'{prefix}{n} - {h}' for n, h in zip(command_list, command_description))\n embed = discord.Embed(title=f\"Commands in {i.capitalize()} Cog\", description=f'```{help_text}```', color=0x00FF00)\n await user.send(embed=embed)\n if not isinstance(context.message.channel, discord.channel.DMChannel):\n await context.send(f\"DM sent to {user.mention}\")\n await context.message.delete()", "async def help(self, ctx, *cog):\n try:\n if not cog:\n halp = discord.Embed(title='Useless\\' Commands',\n description='Use `!help *category*` to find out more about the commands in them!')\n cogs_desc = ''\n for x in self.bot.cogs:\n cogs_desc = f'{x}'\n cmds = ''\n for cmd in self.bot.get_cog(x).get_commands():\n if not cmd.hidden:\n cmds += f'`{cmd.name}`, '\n if cmds != '':\n halp.add_field(name= cogs_desc,\n value=f'{cmds[0:-2]}',\n inline=False)\n cmds_desc = ''\n for y in self.bot.walk_commands():\n if not y.cog_name and not y.hidden:\n cmds_desc += ('`{}` - {}'.format(y.name, y.help) + '\\n')\n if cmds_desc != '':\n halp.add_field(name='Uncatergorized Commands',\n value=cmds_desc[0:len(cmds_desc) - 1],\n inline=False)\n await ctx.send(embed=halp)\n else:\n if len(cog) > 1:\n halp = discord.Embed(title='Error!',\n description='I can only help with 1 category!',\n color=discord.Color.red())\n await ctx.send(embed=halp)\n else:\n found = False\n for x in self.bot.cogs:\n for y in cog:\n if x == y:\n halp = discord.Embed(\n title=cog[0] + ' Command Listing',\n description=self.bot.cogs[cog[0]].__doc__)\n for c in self.bot.get_cog(y).get_commands():\n if not c.hidden:\n halp.add_field(name=c.name,\n value=c.help,\n inline=False)\n found = True\n if not found:\n halp = discord.Embed(title='Error!',\n description='How do you even use \"' +\n cog[0] + '\"?',\n color=discord.Color.red())\n await ctx.send('', embed=halp)\n\n except:\n print('Pass')\n pass", "async def managechannels(self, ctx:commands.Context):", "async def help(self, channel_id, user_infos, user_id, team_id):\n helpMessage = \"Bienvenue dans l'aide du bot MovieQuizz ! \\n\" \\\n \"Ce bot va tester vos connaissances cinématographiques ! \\n\" \\\n \"Les commandes disponibles sont les suivantes : \\n\" \\\n \" - ask : vous questionne à propos d'un film \\n\" \\\n \" - rank : affiche votre position et score \\n\" \\\n \" - ranking : affiche les 10 meilleures joueurs \\n\" \\\n \" - help : Vous connaissez déjà celle-là. \\n\" \\\n \"Amusez-vous bien les lapins !\"\n return await self.sendText(helpMessage, channel_id,user_infos, team_id)", "def help(bot, sender, sendmsg, label, args):\n\n clist = commands.commands\n csort = sorted(clist.values(), key=lambda c: c.__name__.lower())\n\n if len(args) > 0:\n page = int(args[0]) - 1\n else:\n page = 0\n\n pages = len(clist) // 10 + 1\n\n sendmsg(\"-- Help (Page {} of {}) --\".format(page + 1, pages))\n for i in range(10):\n if i >= len(csort):\n break\n\n command = csort[i + (page * 10)]\n sendmsg(\"{}: {}\".format(command.__name__, command.__doc__))", "def analyt(analytics):\n API_KEY = secrets.YT_KEY\n youtube = build('youtube', 'v3', developerKey=API_KEY)\n request = youtube.channels().list(\n part='statistics',\n forUsername=analytics\n )\n response = request.execute()\n print(response)", "def cmd_help(args):", "def usage(self):\n self._usage1()\n print 'folder COOL_channel COOL_tag ROOT_file'\n self._usage2()", "async def send_cog_help(self, cog):\n ctx = self.context\n title = cog.qualified_name\n embed = discord.Embed(\n title=title,\n description=cog.description,\n color=discord.Color.blue()\n )\n\n commands = cog.get_commands()\n\n if filtered_commands := await self.filter_commands(commands):\n for command in filtered_commands:\n embed.add_field(name=command, value=command.description or 'Without description')\n\n embed.set_footer(text=f'use {prefixo}help [command] for more information about commands')\n await ctx.reply(embed=embed)", "def do_config():\n\n tracking = get_tracking()\n for unit in (\"ppm\", \"sec\"):\n\ttunit = unit\n\tif unit == \"sec\":\n\t tunit = \"seconds\"\n\tprint \"multigraph chrony_%s\" % unit\n\tprint \"graph_title NTP (Chrony) Statistics (%s)\" % unit\n\tprint \"graph_vlabel %s\" % unit\n\tprint \"graph_args --base 1000\"\n\tprint \"graph_category time\"\n\tprint \"graph_info NTP (Chrony) tracking statistics (the ones measured in %s)\" % tunit\n\tfor key in tracking[tunit]:\n\t item = tracking[tunit][key]\n\t print \"\"\"%s.label %s\n%s.draw LINE2\n%s.info %s\"\"\" % (key, item[\"label\"], key, key, item[\"label\"])\n\tprint\n return 0", "def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)", "def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text", "async def help(self, ctx):\n self.log_command_call(\"help\", ctx.message)\n await ctx.send(HELP_TEXT)\n embed_output = create_embed(description=MORE_INFO_TEXT)\n await ctx.send(embed=embed_output)", "def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))", "def command_help(self, *args, **kwargs):\n print(\"Commands available:\\n\")\n for name in dir(self):\n if not name.startswith(\"command_\"):\n continue\n name_clean = name[len(\"command_\"):]\n print(\"%s:\\n - %s\\n\" % (name_clean, getattr(self, name).__doc__.strip()))", "def DoHelp(options, args):\n __pychecker__ = 'unusednames=options'\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print(COMMAND_USAGE_TEXT[args[0]])\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n args[0])", "def help(update, context):\n msg = \"\"\n msg += \"\\n/covid 7-Day-Incident per Million\"\n msg += \"\\n/daylio What did I do a year ago today?\"\n msg += \"\\n/f1last Results of the last race\"\n msg += \"\\n/f1stand Driver standings\"\n msg += \"\\n/f1next Time and place of the next race\"\n msg += \"\\n/fuel prices and consump. (args: Xeur Ykm)\"\n msg += \"\\n/ip Outside ip address\"\n msg += \"\\n/rate Exchange rates (args: Xeur/Yhuf)\"\n msg += \"\\n/rss check rss feeds for new content\"\n msg += \"\\n/sun Time of sunrise and sunset\"\n msg += \"\\n/xkcd Sends last comic image and alt\"\n msg.rstrip()\n update.message.reply_text(msg)", "def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()", "def _help_cog(cog: commands.Cog) -> str:\r\n # the underscore is there to differentiate from default attribute\r\n # default is class' docstring\r\n commands_list = '\\n' + (cog.description_ if hasattr(cog, 'description_') else '') + '\\n\\n'\r\n\r\n for command in cog.get_commands():\r\n # Add the cog's details to the embed.\r\n commands_list += f'**{command.name}**:' \\\r\n f'\\n- Usage: *{command.usage}*' \\\r\n f'\\n- Description: *{command.description}*' + \\\r\n (f'\\n- Aliases: *{\" \".join(command.aliases)}*' if command.aliases else '') + \\\r\n '\\n'\r\n # if there are no non-whitespace character (not description or commands) send \"nothing\"\r\n # in theory shouldn't happen outside dev environment\r\n return commands_list if commands_list.strip() else 'nothing'", "def command_help(args):\n\tprint_usage()\n\treturn 0", "async def view_stats(self, ctx):\n app_info = await self.bot.application_info()\n total_ram = (psutil.virtual_memory().total >> 30) + 1\n embed = discord.Embed(\n title=\"Bot Stats\",\n description=f\"Running on a dedicated server with {total_ram}GB RAM \\n provided by RandomGhost#0666.\",\n )\n\n embed.add_field(name=\"**__General Info__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"Latency\", value=f\"{self.bot.latency*1000:.03f}ms\")\n embed.add_field(name=\"Guild Count\", value=f\"{len(self.bot.guilds):,}\")\n embed.add_field(name=\"User Count\", value=f\"{len(self.bot.users):,}\")\n\n embed.add_field(name=\"**__Technical Info__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"System CPU Usage\", value=f\"{psutil.cpu_percent():.02f}%\")\n embed.add_field(name=\"System RAM Usage\", value=f\"{psutil.virtual_memory().used/1048576:.02f} MB\")\n embed.add_field(name=\"System Uptime\", value=f\"{timedelta(seconds=int(time.time() - psutil.boot_time()))}\")\n embed.add_field(name=\"Bot CPU Usage\", value=f\"{process.cpu_percent():.02f}%\")\n embed.add_field(name=\"Bot RAM Usage\", value=f\"{process.memory_info().rss / 1048576:.02f} MB\")\n embed.add_field(name=\"Bot Uptime\", value=f\"{timedelta(seconds=int(time.time() - process.create_time()))}\")\n\n embed.add_field(name=\"**__Links__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"Support Server\", value=\"[https://discord.swaglyrics.dev](https://discord.swaglyrics.dev)\")\n embed.add_field(name=\"Invite\", value=\"[https://invite.swaglyrics.dev](https://invite.swaglyrics.dev)\")\n embed.add_field(\n name=\"Source\",\n value=\"[https://swaglyrics.dev/SwagLyrics-Discord-Bot]\" \"(https://swaglyrics.dev/SwagLyrics-discord-bot)\",\n )\n\n embed.set_footer(\n text=f\"Made by {app_info.owner} • {self.bot.get_user(512708394994368548)}\",\n icon_url=[\n app_info.owner.avatar_url_as(size=128),\n self.bot.get_user(512708394994368548).avatar_url_as(size=128),\n ][getrandbits(1)],\n ) # randomize clash or flabbet avatar\n\n await ctx.send(embed=embed)", "def mc(self, *args) -> None:\n env = os.environ.copy()\n env['MC_HOST_minio'] = self.auth_url\n # --config-dir is set just to prevent any config set by the user\n # from interfering with the test.\n try:\n subprocess.run(\n [\n 'mc', '--quiet', '--no-color', f'--config-dir={self.path}',\n *args\n ],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.PIPE,\n env=env,\n encoding='utf-8',\n errors='replace',\n check=True\n )\n except OSError as exc:\n raise MissingProgram(f'mc could not be run: {exc}') from exc\n except subprocess.CalledProcessError as exc:\n raise ProgramFailed(exc.stderr) from exc", "def print_help_info(self, global_options):\r\n usage = ['',\"Type '%s help <subcommand>' for help on a specific subcommand.\" % self.prog_name,'']\r\n usage.append('Available subcommands:')\r\n commands = self.get_commands(global_options).keys()\r\n commands.sort()\r\n for cmd in commands:\r\n usage.append(' %s' % cmd)\r\n return '\\n'.join(usage)", "def procs_calculate_axyzc(molecules, n_cores=-1, show_progress=True, scr=None, cmd=XTB_CMD):\n results = None\n return results", "async def _c_list(self, ctx):\n command_list = self.database.get_guild_commands(ctx.guild.id)\n if len(command_list) == 0:\n await ctx.send(\"This server has no custom commands\")\n return\n out = \"```\\nServer Commands:\\n\"\n for command in command_list:\n out += f\"{command.name}: {command.text}\\n\"\n out += \"```\"\n await ctx.send(out)", "def list(sw, args):\n parser = argparse.ArgumentParser(\n prog='space channel list',\n description='List channels in spacewalk.'\n )\n parser.add_argument(\n 'type',\n choices=[\n 'all',\n 'user',\n 'popular',\n 'retired',\n 'shared',\n 'software',\n 'vendor'\n ],\n default='popular',\n help=\"Type of search you would like to perform\"\n )\n parser.add_argument(\n '--format',\n choices=[\n 'raw',\n 'json',\n 'pretty'\n ],\n default='pretty',\n required=False\n )\n parser.add_argument(\n '--popcount',\n default=None,\n help=('channels with at least this many systems ' +\n 'subscribed will be returned')\n )\n\n api_calls = {\n 'all': 'channel.listAllChannels',\n 'user': 'channel.listMyChannels',\n 'popular': 'channel.listPopularChannels',\n 'retired': 'channel.listRetiredChannels',\n 'shared': 'channel.listSharedChannels',\n 'software': 'channel.listSoftwareChannels',\n 'vendor': 'channel.listVendorChannels'\n }\n\n p = parser.parse_args(args)\n\n if p.type == 'popular' and not p.popcount:\n print(\"Popular requires popcount arg.\")\n parser.print_help()\n return False\n\n if p.popcount:\n popcount = int(p.popcount)\n results = sw.call(\n api_calls[p.type],\n popcount\n )\n else:\n results = sw.call(\n api_calls[p.type]\n )\n if results == []:\n print(\"Empty result set.\")\n\n channels = []\n for result in results:\n channels.append(result)\n\n if p.format == 'pretty':\n \"\"\"\n int \"id\"\n string \"label\"\n string \"name\"\n string \"provider_name\"\n int \"packages\"\n int \"systems\"\n string \"arch_name\"\n \"\"\"\n if p.type == \"software\":\n t = prettytable.PrettyTable([\n \"Label\",\n \"Name\",\n \"Parent Label\",\n \"End Of Life\",\n \"Arch\"\n ])\n t.align[\"Label\"] = \"l\"\n t.align[\"Name\"] = \"l\"\n t.align[\"Parent Label\"] = \"l\"\n t.padding_width = 1\n for c in results:\n\n t.add_row([\n c['label'],\n c['name'],\n c['parent_label'],\n c['end_of_life'],\n c['arch']\n ])\n else:\n t = prettytable.PrettyTable([\n \"Label\",\n \"Name\",\n \"Provider Name\",\n \"Packages\",\n \"Systems\",\n \"Arch Name\"\n ])\n t.align[\"Label\"] = \"l\"\n t.align[\"Name\"] = \"l\"\n t.align[\"Packages\"] = \"r\"\n t.align[\"Systems\"] = \"r\"\n t.align[\"Provider Name\"] = \"l\"\n t.padding_width = 1\n for c in results:\n\n t.add_row([\n c['label'],\n c['name'],\n c['provider_name'],\n c['packages'],\n c['systems'],\n c['arch_name']\n ])\n print(t)\n\n elif p.format == 'json':\n output = json.dumps(dict(channels=channels))\n print(output)\n else:\n for result in results:\n print(result)\n return results", "def help(update, context):\n update.message.reply_text(\n 'Use /convert to convert streaming music urls into other services. For example: \\n' +\n '/convert https://open.spotify.com/track/3nGWzFBJ5tMzHWAgs16fK6?si=dXVxz7D2RIya96S1jf-7VQ'\n )", "async def eventstats(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send_help(ctx.command)", "def help(self):\n msg = \"`%s' performs the computational aspects of genotyping-by-sequencing.\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Usage: %s [OPTIONS] ...\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Options:\\n\"\n msg += \" -h, --help\\tdisplay the help and exit\\n\"\n msg += \" -V, --version\\toutput version information and exit\\n\"\n msg += \" -v, --verbose\\tverbosity level (0/default=1/2/3)\\n\"\n msg += \" --proj1\\tname of the project used for steps 1 to 4\\n\"\n msg += \"\\t\\tmention a reference genome only if all samples belong to\\n\"\n msg += \"\\t\\t the same species, and will be mapped to the same ref genome\\n\"\n msg += \" --proj2\\tname of the project used for steps 4 to 8\\n\"\n msg += \"\\t\\tcan be the same as --proj1, or can be different\\n\"\n msg +=\"\\t\\t notably when samples come from different species\\n\"\n msg += \"\\t\\t or if one wants to align reads to different ref genomes\\n\"\n msg += \" --schdlr\\tname of the cluster scheduler (default=SGE)\\n\"\n msg += \" --queue\\tname of the cluster queue (default=normal.q)\\n\"\n msg += \" --resou\\tcluster resources (e.g. 'test' for 'qsub -l test')\\n\"\n msg += \" --rmvb\\tremove bash scripts for jobs launched in parallel\\n\"\n msg += \" --step\\tstep to perform (1/2/3/.../9)\\n\"\n msg += \"\\t\\t1: raw read quality per lane (with FastQC v >= 0.11.2)\\n\"\n msg += \"\\t\\t2: demultiplexing per lane (with demultiplex.py v >= 1.14.0)\\n\"\n msg += \"\\t\\t3: cleaning per sample (with CutAdapt v >= 1.8)\\n\"\n msg += \"\\t\\t4: alignment per sample (with BWA MEM v >= 0.7.12, Samtools v >= 1.3, Picard and R v >= 3)\\n\"\n msg += \"\\t\\t5: local realignment per sample (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t6: local realignment per genotype (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t7: variant and genotype calling per genotype (with GATK HaplotypeCaller v >= 3.5)\\n\"\n msg += \"\\t\\t8: variant and genotype calling jointly across genotypes (with GATK GenotypeGVCFs v >= 3.5)\\n\"\n msg += \"\\t\\t9: variant and genotype filtering (with GATK v >= 3.5)\\n\"\n msg += \" --samples\\tpath to the 'samples' file\\n\"\n msg += \"\\t\\tcompulsory for all steps, but can differ between steps\\n\"\n msg += \"\\t\\t e.g. if samples come from different species or are aligned\\n\"\n msg += \"\\t\\t on different ref genomes, different samples file should\\n\"\n msg += \"\\t\\t be used for steps 4-9, representing different subsets of\\n\"\n msg += \"\\t\\t the file used for steps 1-3\\n\"\n msg += \"\\t\\tthe file should be encoded in ASCII\\n\"\n msg += \"\\t\\tthe first row should be a header with column names\\n\"\n msg += \"\\t\\teach 'sample' (see details below) should have one and only one row\\n\"\n msg += \"\\t\\tany two columns should be separated with one tabulation\\n\"\n msg += \"\\t\\tcolumns can be in any order\\n\"\n msg += \"\\t\\trows starting by '#' are skipped\\n\"\n msg += \"\\t\\t12 columns are compulsory (but there can be more):\\n\"\n msg += \"\\t\\t genotype (see details below, e.g. 'Col-0', but use neither underscore '_' nor space ' ' nor dot '.', use dash '-' instead)\\n\"\n msg += \"\\t\\t ref_genome (identifier of the reference genome used for alignment, e.g. 'Atha_v2', but use neither space ' ' nor dot '.'; the full species name, e.g. 'Arabidopsis thaliana', will be present in the file given to --dict)\\n\"\n msg += \"\\t\\t library (e.g. can be the same as 'genotype')\\n\"\n msg += \"\\t\\t barcode (e.g. 'ATGG')\\n\"\n msg += \"\\t\\t seq_center (e.g. 'Broad Institute', 'GenoToul', etc)\\n\"\n msg += \"\\t\\t seq_platform (e.g. 'ILLUMINA', see SAM format specification)\\n\"\n msg += \"\\t\\t seq_platform_model (e.g. 'HiSeq 2000')\\n\"\n msg += \"\\t\\t flowcell (e.g. 'C5YMDACXX')\\n\"\n msg += \"\\t\\t lane (e.g. '3', can be '31' if a first demultiplexing was done per index)\\n\"\n msg += \"\\t\\t date (e.g. '2015-01-15', see SAM format specification)\\n\"\n msg += \"\\t\\t fastq_file_R1 (filename, one per lane, gzip-compressed)\\n\"\n msg += \"\\t\\t fastq_file_R2 (filename, one per lane, gzip-compressed)\\n\"\n msg += \" --fcln\\tidentifier of a flowcell and lane number\\n\"\n msg += \"\\t\\tformat as <flowcell>_<lane-number>, e.g. 'C5YMDACXX_1'\\n\"\n msg += \"\\t\\tif set, only the samples from this lane will be analyzed\\n\"\n msg += \" --pird\\tpath to the input reads directory\\n\"\n msg += \"\\t\\tcompulsory for steps 1 and 2\\n\"\n msg += \"\\t\\twill be added to the columns 'fastq_file_R*' from the sample file\\n\"\n msg += \"\\t\\tif not set, input read files should be in current directory\\n\"\n msg += \" --enz\\tname of the restriction enzyme\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=ApeKI\\n\"\n msg += \" --dmxmet\\tmethod used to demultiplex\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=4c (see the help of demultiplex.py to know more)\\n\"\n msg += \" --subst\\tnumber of substitutions allowed during demultiplexing\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=2\\n\"\n msg += \" --ensubst\\tenforce the nb of substitutions allowed\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=lenient/strict\\n\"\n msg += \" --adp\\tpath to the file containing the adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tsame format as FastQC: name<tab>sequence\\n\"\n msg += \"\\t\\tname: at least 'adpR1' (also 'adpR2' if paired-end)\\n\"\n msg += \"\\t\\tsequence: from 5' (left) to 3' (right)\\n\"\n msg += \" --errtol\\terror tolerance to find adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --minovl\\tminimum overlap length between reads and adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=3 (in bases)\\n\"\n msg += \" --minrl\\tminimum length to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=35 (in bases)\\n\"\n msg += \" --minq\\tminimum quality to trim a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=20 (used for both reads if paired-end)\\n\"\n msg += \" --maxNp\\tmaximum percentage of N to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --ref\\tpath to the prefix of files for the reference genome\\n\"\n msg += \"\\t\\tcompulsory for steps 4, 5, 6, 7, 8, 9\\n\"\n msg += \"\\t\\tshould correspond to the 'ref_genome' column in --samples\\n\"\n msg += \"\\t\\te.g. '/data/Atha_v2' for '/data/Atha_v2.fa', '/data/Atha_v2.bwt', etc\\n\"\n msg += \"\\t\\tthese files are produced via 'bwa index ...'\\n\"\n msg += \" --dict\\tpath to the 'dict' file (SAM header with @SQ tags)\\n\"\n msg += \"\\t\\tcompulsory for step 4\\n\"\n msg += \"\\t\\tsee 'CreateSequenceDictionary' in the Picard software\\n\"\n msg += \" --jgid\\tcohort identifier to use for joint genotyping\\n\"\n msg += \"\\t\\tcompulsory for steps 8, 9\\n\"\n msg += \"\\t\\tuseful to launch several, different cohorts in parallel\\n\"\n msg += \" --rat\\trestrict alleles to be of a particular allelicity\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdefault=ALL/BIALLELIC/MULTIALLELIC\\n\"\n msg += \"\\t\\tsee '--restrictAllelesTo' in GATK's SelectVariant\\n\"\n msg += \" --mdp\\tminimum value for DP (read depth; e.g. 10)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mgq\\tminimum value for GQ (genotype quality; e.g. 20)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mnfg\\tmaximum number of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mffg\\tmaximum fraction of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFractionFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mnnc\\tmaximum number of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \" --mfnc\\tmaximum fraction of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxNOCALLfraction' in GATK's SelectVariants\\n\"\n msg += \" --fam\\tpath to the file containing pedigree information\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdiscard variants with Mendelian violations (see Semler et al, 2012)\\n\"\n msg += \"\\t\\tshould be in the 'fam' format specified by PLINK\\n\"\n msg += \"\\t\\tvalidation strictness (GATK '-pedValidationType') is set at 'SILENT'\\n\"\n msg += \"\\t\\t allowing some samples to be absent from the pedigree\\n\"\n msg += \" --mvq\\tminimum GQ for each trio member to accept a variant as a Mendelian violation\\n\"\n msg += \"\\t\\tused in step 9 if '--fam' is specified\\n\"\n msg += \"\\t\\tdefault=0\\n\"\n msg += \" --xlssf\\tpath to the file with genotypes to exclude\\n\"\n msg += \"\\t\\tused in step 9 (can be especially useful if '--fam' is specified)\\n\"\n msg += \" --tmpd\\tpath to a temporary directory on child nodes (default=.)\\n\"\n msg += \"\\t\\te.g. it can be /tmp or /scratch\\n\"\n msg += \"\\t\\tused in step 4 for 'samtools sort'\\n\"\n msg += \"\\t\\tused in step 7 for 'GATK HaplotypeCaller'\\n\"\n msg += \" --jvmXms\\tinitial memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=512m (can also be specified as 1024k, 1g, etc)\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --jvmXmx\\tmaximum memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=4g\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --queue2\\tname of the second cluster queue (default=bigmem.q)\\n\"\n msg += \"\\t\\tused in step 4 for Picard to collect insert sizes\\n\"\n msg += \" --knowni\\tpath to a VCF file with known indels (for local realignment)\\n\"\n msg += \" --known\\tpath to a VCF file with known variants (e.g. from dbSNP)\\n\"\n msg += \" --force\\tforce to re-run step(s)\\n\"\n msg += \"\\t\\tthis removes without warning the step directory if it exists\\n\"\n msg += \"\\n\"\n msg += \"Examples:\\n\"\n msg += \" %s --step 1 --samples samples.txt\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Details:\\n\"\n msg += \"This program aims at genotyping a set of 'genotypes' using data from\\n\"\n msg += \"a restriction-assisted DNA sequencing (RAD-seq) experiment, also known\\n\"\n msg += \"as a genotyping-by-sequencing (GBS) experiment.\\n\"\n msg += \"Here, by 'genotype', we mean the entity which is the focus of the\\n\"\n msg += \"study. For instance, it can be a plant variety (or a human being), or\\n\"\n msg += \"the specific clone of a given plant variety (or a specific tumor of a\\n\"\n msg += \"given human being), etc.\\n\"\n msg += \"Importantly, note that the content of the 'genotype' column will\\n\"\n msg += \"be used to set the 'SM' (sample) tag of the 'RG' (read group) header\\n\"\n msg += \"record type of the SAM format (see http://www.htslib.org/). However,\\n\"\n msg += \"internal to this program, the term 'sample' corresponds to the unique\\n\"\n msg += \"quadruplet (genotype,flowcell,lane,barcode) for steps 1 and 2, and to\\n\"\n msg += \"the unique triplet (genotype,flowcell,lane) for the others.\\n\"\n msg += \"Jobs are executed in parallel (--schdlr). Their return status is\\n\"\n msg += \"recorded in a SQLite database which is removed at the end. If a job\\n\"\n msg += \"fails, the whole script stops with an error.\\n\"\n msg += \"\\n\"\n msg += \"Dependencies:\\n\"\n msg += \"Python >= 2.7; Biopython; pyutilstimflutre >= 0.5\\n\"\n msg += \"\\n\"\n msg += \"Report bugs to <[email protected]>.\"\n print(msg); sys.stdout.flush()", "def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging", "async def channel_info(bot, message):\n if isinstance(CHANNELS, (int, str)):\n channels = [CHANNELS]\n elif isinstance(CHANNELS, list):\n channels = CHANNELS\n else:\n raise ValueError(\"Unexpected type of CHANNELS\")\n\n text = '📑 **Indexed channels/groups**\\n'\n for channel in channels:\n chat = await bot.get_chat(channel)\n if chat.username:\n text += '\\n@' + chat.username\n else:\n text += '\\n' + chat.title or chat.first_name\n\n text += f'\\n\\n**Total:** {len(CHANNELS)}'\n\n if len(text) < 4096:\n await message.reply(text)\n else:\n file = 'Indexed channels.txt'\n with open(file, 'w') as f:\n f.write(text)\n await message.reply_document(file)\n os.remove(file)", "def stats(self, d_raw_materials=None):\n cm_stats = 'sugar {0} tablespoons remaining\\n'.format(d_raw_materials['sugar'])\n cm_stats += 'butter {0} teaspoons remaining\\n'.format(d_raw_materials['butter'])\n cm_stats += 'dark chocolate {0} tablespoons remaining\\n'.format(d_raw_materials['dark chocolate'])\n cm_stats += 'mint chocolate {0} tablespoons remaining\\n'.format(d_raw_materials['mint chocolate'])\n cm_stats += 'milk chocolate {0} tablespoons remaining\\n'.format(d_raw_materials['milk chocolate'])\n cm_stats += 'light corn syrup {0} teaspoons remaining\\n'.format(d_raw_materials['light corn syrup'])\n cm_stats += 'sweetened condensed milk {0} teaspoons remaining\\n'.format(d_raw_materials[\n 'sweetened condensed milk'])\n cm_stats += 'vanilla extract {0} teaspoons remaining\\n'.format(d_raw_materials['vanilla extract'])\n cm_stats += 'Reese\\'s Pieces {0} tablespoons remaining\\n'.format(d_raw_materials['Reese\\'s Pieces'])\n cm_stats += super(ChocolateMachine, self).stats()\n return cm_stats", "def metrics_flash(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_flash(cmd_ctx, cpc, options))", "def getHelp(self):\r\n help_str =\\\r\n \"\"\"##########################################################################################\r\n#\r\n# Required:\r\n#\r\n# --query_NAST multi-fasta file containing query sequences in alignment format\r\n#\r\n# Common opts:\r\n#\r\n# --db_NAST db in NAST format\r\n# --db_FASTA db in fasta format (megablast formatted)\r\n#\r\n#\r\n# -n number of top matching database sequences to compare to (default 15)\r\n# -R min divergence ratio default: 1.007\r\n# -P min percent identity among matching sequences (default: 90)\r\n#\r\n# ## parameters to tune ChimeraParentSelector:\r\n#\r\n# Scoring parameters:\r\n# -M match score (default: +5)\r\n# -N mismatch penalty (default: -4)\r\n# -Q min query coverage by matching database sequence (default: 70)\r\n# -T maximum traverses of the multiple alignment (default: 1)\r\n\r\n#\r\n# ## parameters to tune ChimeraPhyloChecker:\r\n#\r\n#\r\n# --windowSize default 50\r\n# --windowStep default 5\r\n# --minBS minimum bootstrap support for calling chimera (default: 90)\r\n# -S percent of SNPs to sample on each side of breakpoint for computing bootstrap support (default: 10)\r\n# --num_parents_test number of potential parents to test for chimeras (default: 3)\r\n# --MAX_CHIMERA_PARENT_PER_ID Chimera/Parent alignments with perID above this are considered non-chimeras (default 100; turned off)\r\n#\r\n# ## misc opts\r\n#\r\n# --printFinalAlignments shows alignment between query sequence and pair of candidate chimera parents\r\n# --printCSalignments print ChimeraSlayer alignments in ChimeraSlayer output\r\n# --exec_dir chdir to here before running\r\n#\r\n#########################################################################################\r\n \"\"\"\r\n return help_str", "async def showHelp(client, message):\n\n\t\tvalue = message.content.replace(cmd.show_help[\"command\"], \"\").strip()\n\n\t\tif value == \"\" or message.content.find(cmd.show_help[\"command\"]) == -1:\n\t\t\t\thelpStr = [\"Welcome to The Chronicler Help!\\n\\nBelow is a list of the commands that The Chronicler can read and understand. If you wish to learn more about a specific command, type the help command, followed by the command name that you wish to know about (for example: \" + (cmd.show_help[\"command\"]) + \" \" + (cmd.create_channel[\"command_name\"]) + \").\\n\\nIf you wish to learn more about how to format text such as making text bold or italicized, you can find that information here: https://support.discordapp.com/hc/en-us/articles/210298617-Markdown-Text-101-Chat-Formatting-Bold-Italic-Underline-\\n\\n\"]\n\n\t\t\t\ti = 0\n\t\t\t\tcNum = 0\n\n\t\t\t\twhile cNum < len(cmd.command_list):\n\t\t\t\t\t\tif len(helpStr[i] + cmd.command_list[cNum][\"command_name\"]) >= 1975:\n\t\t\t\t\t\t\t\ti += 1\n\n\t\t\t\t\t\t\t\t#If the Last Command in List\n\t\t\t\t\t\t\t\tif cNum == len(cmd.command_list) - 1:\n\t\t\t\t\t\t\t\t\t\thelpStr.append(cmd.command_list[cNum][\"command_name\"])\n\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\thelpStr.append(cmd.command_list[cNum][\"command_name\"] + \", \")\n\t\t\t\t\t\t\t\t\t\tcNum += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t#If the Last Command in List\n\t\t\t\t\t\t\t\tif cNum == len(cmd.command_list) - 1:\n\t\t\t\t\t\t\t\t\t\thelpStr[i] += cmd.command_list[cNum][\"command_name\"]\n\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\thelpStr[i] += cmd.command_list[cNum][\"command_name\"] + \", \"\n\t\t\t\t\t\t\t\t\t\tcNum += 1\n\n\t\t\t\tk = 0\n\t\t\t\twhile k < len(helpStr):\n\t\t\t\t\t\tawait lib.message.send(client, message.channel, helpStr[k], delete=False)\n\t\t\t\t\t\tk += 1\n\n\n\t\telse:\n\t\t\t\thelpStr = \"\"\n\t\t\t\tfor command in cmd.command_list:\n\t\t\t\t\t\tif command[\"command_name\"] == value:\n\t\t\t\t\t\t\t\thelpStr = \"__**\" + command[\"name\"] + \"**__\\n\\n\"\n\t\t\t\t\t\t\t\thelpStr += \"__Command:__ \" + command[\"command\"] + \"\\n\\n\"\n\t\t\t\t\t\t\t\thelpStr += \"__Description:__ \" + command[\"description\"] + \"\\n\\n\"\n\t\t\t\t\t\t\t\thelpStr += \"__Can Be Posted In:__ \" + command[\"can_post_in\"] + \"\\n\\n\"\n\n\t\t\t\t\t\t\t\toptionStr = \"\"\n\n\t\t\t\t\t\t\t\tif command[\"options\"] != None and len(command[\"options\"]) > 0:\n\t\t\t\t\t\t\t\t\t\toptionStr += \"__Options:__\\n\"\n\t\t\t\t\t\t\t\t\t\tfor option in command[\"options\"]:\n\t\t\t\t\t\t\t\t\t\t\t\toptionStr += \"\\n\\t\" + \"* \" + option\n\t\t\t\t\t\t\t\t\t\toptionStr += \"\\n\\n\"\n\n\t\t\t\t\t\t\t\texampleStr = \"\"\n\n\t\t\t\t\t\t\t\tif command[\"examples\"] != None and len(command[\"examples\"]) > 0:\n\t\t\t\t\t\t\t\t\t\texampleStr += \"\\n\\n__Examples:__\\n\\n\"\n\t\t\t\t\t\t\t\t\t\tfor example in command[\"examples\"]:\n\t\t\t\t\t\t\t\t\t\t\t\texampleStr += \"\\t\" + example + \"\\n\"\n\n\t\t\t\t\t\t\t\tif len(helpStr + optionStr + exampleStr) <= 2000:\n\t\t\t\t\t\t\t\t\t\tawait lib.message.send(client, message.channel, helpStr + optionStr + exampleStr, ignoreStyle=True, delete=False)\n\t\t\t\t\t\t\t\telif len(helpStr + optionStr) <= 2000:\n\t\t\t\t\t\t\t\t\t\tawait lib.message.send(client, message.channel, helpStr + optionStr, ignoreStyle=True, delete=False)\n\t\t\t\t\t\t\t\t\t\tawait lib.message.send(client, message.channel, exampleStr, ignoreStyle=True, delete=False)\n\t\t\t\t\t\t\t\telif len(optionStr + exampleStr) <= 2000:\n\t\t\t\t\t\t\t\t\t\tawait lib.message.send(client, message.channel, helpStr, ignoreStyle=True, delete=False)\n\t\t\t\t\t\t\t\t\t\tawait lib.message.send(client, message.channel, optionStr + exampleStr, ignoreStyle=True, delete=False)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tawait lib.message.send(client, message.channel, helpStr, ignoreStyle=True, delete=False)\n\t\t\t\t\t\t\t\t\t\tawait lib.message.send(client, message.channel, optionStr, ignoreStyle=True, delete=False)\n\t\t\t\t\t\t\t\t\t\tawait lib.message.send(client, message.channel, exampleStr, ignoreStyle=True, delete=False)\n\n\t\t\t\t\t\t\t\treturn\n\n\t\t\t\t#Show Player That The Chronicler Was Unsuccessful\n\t\t\t\tawait lib.reaction.reactThumbsDown(client, message)\n\n\t\t\t\tawait lib.error.postError(client, message.channel, \"We could not find the command that you provided. Type '!c help' for a full list of available commands that The Chronicler can read.\")", "def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc", "async def info(self, ctx):\n\n uptime = func.time_(self.bot.launch_time)\n users = sum(1 for _ in self.bot.get_all_members())\n channels = sum(1 for _ in self.bot.get_all_channels())\n\n author = self.bot.get_user(299879858572492802)\n\n invite = 'https://discordapp.com/oauth2/authorize?client_id=347205176903335937&scope=bot&permissions=470150359'\n about = ('Infamous is a actively developed bot that gets updated daily.'\n f' It is written with passion by {author} using the Rewrite branch of the discord.py library.')\n\n links = (f'**[[Invite Bot]]({invite})** \\n'\n '**[[Fame Discord]](https://discord.gg/NY2MSA3)** \\n'\n '**[[Discord.py]](https://github.com/Rapptz/discord.py/tree/rewrite)** \\n'\n '**[[Support]](https://discord.gg/JyJTh4H)**')\n\n # From Modelmat\n cpu_usage = self.process.cpu_percent() / psutil.cpu_count()\n ram_usage = self.process.memory_full_info().uss / 1024 ** 2\n\n embed = discord.Embed(color=self.bot.embed_color)\n embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar_url)\n embed.description = 'A multi-purpose bot with image manipulation, wiki pages and it\\'s own rpg; originally a ' \\\n 'community bot for ★ Fame ★'\n embed.set_thumbnail(\n url=self.bot.user.avatar_url)\n\n embed.add_field(name='About', value=about, inline=False)\n\n embed.add_field(name='Statistics 📈',\n value=(f'**{len(self.bot.guilds)} guilds.**\\n'\n f'**{channels} channels.**\\n'\n f'**{users} users.** \\n'\n f'**{self.bot.lines} lines**'), inline=True)\n\n embed.add_field(name='Uptime ⏰', value=(f'**{uptime[0]} days.** \\n'\n f'**{uptime[1]} hours.** \\n'\n f'**{uptime[2]} minutes.** \\n'\n f'**{uptime[3]} seconds.**'), inline=True)\n\n embed.add_field(name='Developer 🕵', value=author)\n embed.add_field(name='Resources 💻', value='`CPU:` {:.2f}% \\n`MEM:` {:.2f}'.format(cpu_usage, ram_usage))\n embed.add_field(name='Links 🔗', value=links, inline=True)\n\n await ctx.send(embed=embed)", "def usage(app_name):\n global version\n print '\\npython {0} -a MediaLive_ARN -n Dashboard_Name [Optional parameters]\\n'.format(app_name)\n print 'Version:', version\n print '\\nThis script creates a CloudWatch Dashboard for a MediaLive/MediaPackage workflow.'\n print \"It uses the MediaLive Channel Arn as input and determines the MediaPackage instances from the \"\n print \"MediaLive channel configuration. It then creates the CloudWatch Dashboard that contains info on the\"\n print \"MediaLive channel, the two MediaPackage channels, and all of the MediaPackage endpoints.\"\n print \"\\nRequired parameters:\"\n print \"-a, --arn: MediaLive Channel ARN\"\n print \"-n, --name: Name for the CloudWatch Dashboard. \"\n print \"\"\n print \"Optional parameters\"\n print \"-l, --list: Filename of a file that contains a list of MediaLive Channel ARNs, 1 ARN per line. \"\n print \" All MediaLive channels and their corresponding MediaPackage channels will be included in \"\n print \" the CloudWatch Dashboard.\"\n print \" Note: This parameter is ignored if a channel ARN is provided via the '-a/--arn' option\"\n print \" Note: All ARNs in the list must be for channels in the same region. All ARNs not in the same\"\n print \" region as the first ARN in the list will be ignored.\"\n print '-h, --help: Print this help and exit.'\n print \"\"\n print 'Examples:'\n print \"\"\n print 'Using MediaLive ARN arn:aws:medialive:us-west-2:0123456789:channel:123456 and create a CloudWatch ' \\\n 'Dashboard called \"My TV Dashboard\"'\n print 'python {0} -a arn:aws:medialive:us-west-2:0123456789:channel:123456 ' \\\n '-n \"My TV Dashboard\" '.format(app_name)\n print \"\"\n print 'Using the MediaLive Channel ARN list defined in the text file \"My EML arns.txt\" create a CloudWatch' \\\n 'Dashboard called \"Primary Bouquet\".'\n print 'python {0} -l \"My EML arns.txt\" -n \"Primary Bouquet\"\\n'.format(app_name)", "async def chatchart(self, ctx, channel: Optional[discord.TextChannel] = None, messages:int = 5000):\n if channel is None:\n channel = ctx.channel\n\n # --- Early terminations\n if channel.permissions_for(ctx.message.author).read_messages is False:\n return await ctx.send(\"You're not allowed to access that channel.\")\n if channel.permissions_for(ctx.guild.me).read_messages is False:\n return await ctx.send(\"I cannot read the history of that channel.\")\n blacklisted_channels = await self.config.guild(ctx.guild).channel_deny()\n if channel.id in blacklisted_channels:\n return await ctx.send(f\"I am not allowed to create a chatchart of {channel.mention}.\")\n if messages < 5:\n return await ctx.send(\"Don't be silly.\")\n\n message_limit = await self.config.limit()\n if (message_limit != 0) and (messages > message_limit):\n messages = message_limit\n\n embed = discord.Embed(\n title=f\"Fetching messages from #{channel.name}\",\n description=\"This might take a while...\",\n colour=await self.bot.get_embed_colour(location=channel)\n )\n loading_message = await ctx.send(embed=embed)\n try:\n history = await self.fetch_channel_history(channel, loading_message, messages)\n except discord.errors.Forbidden:\n try:\n await loading_message.delete()\n except discord.NotFound:\n pass\n return await ctx.send(\"No permissions to read that channel.\")\n\n msg_data = self.calculate_member_perc(history)\n # If no members are found.\n if len(msg_data[\"users\"]) == 0:\n try:\n await loading_message.delete()\n except discord.NotFound:\n pass\n return await ctx.send(f\"Only bots have sent messages in {channel.mention} or I can't read message history.\")\n\n top_twenty, others = self.calculate_top(msg_data)\n chart = await self.create_chart(top_twenty, others, channel)\n\n try:\n await loading_message.delete()\n except discord.NotFound:\n pass\n await ctx.send(file=discord.File(chart, \"chart.png\"))", "def show_all(self):\n cmodules.showModuleData(\n Options.Author,\n Options.Name,\n Options.Call,\n Options.Category,\n Options.Type,\n Options.Version,\n Options.Description,\n Options.License,\n Options.Datecreation,\n Options.Lastmodified\n )\n self.show_commands()\n self.show_opt()", "async def help(self, args):\n if not args:\n maxw = max([len(x) for x in self.commands]) + 1\n commands = list(self.commands)\n commands.sort()\n message = '\\n'.join(['`{name:{width}}|` {desc}'.format(\n name=command, width=maxw,\n desc=(self.commands[command].__doc__ or 'No description.').splitlines()[0]\n ) for command in commands])\n await self.send(\"Unlisted commands are forwarded to the Minecraft server.\\n\" + message)\n elif args.lower() not in self.commands:\n await self.send_error(\"Unknown command: {command}. This might be a Minecraft command.\".format(command=args))\n else:\n args = args.lower()\n await self.send(\"**`{name}`** - {doc}\".format(name=args, doc=self.commands[args].__doc__ or 'No description.'))", "def help_help(self):\n print(\"List commands or print details about a command\")", "async def help(ctx):\n await ctx.message.author.send(\n \"The Nano Center Subscription Bot is a discord bot that allows you to \"\n \"receive recognition for contributing to The Nano Center initiatives!\\n\"\n f\"For {os.getenv('AMOUNT')} NANO, you will receive the {os.getenv('ROLE_NAME')} role for {os.getenv('PERIOD')} days, \"\n \"a special color reserved for donors to show your support and the unending gratitude of your Nano friends.\\n\\n\"\n \"Commands:\\n\"\n \"- !help - This command! A description of the bot and a list of commands\\n\"\n \"- !status (also !renew / !subscribe) - Gives a status of your subscription along with payment address and subscription cost.\"\n )", "def do_stats(self, args):\n total_cpu = free_cpu = in_use_cpu = 0\n\n summary = self._qm.get_all_host_summary()\n for host_id, host_info in summary.viewitems():\n host_cpu = int(host_info['total cores'])\n total_cpu += host_cpu\n locked = host_info.get('locked by')\n if locked:\n # If host is locked then all CPUs are in use.\n in_use_cpu += host_cpu\n else:\n free_host_cpu = int(host_info['free cores'])\n in_use_cpu += (host_cpu - free_host_cpu)\n free_cpu += free_host_cpu\n\n print('total CPU: ', total_cpu)\n print('used/locked CPU: ', in_use_cpu)\n print('free CPU: ', free_cpu)\n capacity = float(in_use_cpu) / float(total_cpu)\n print('capacity used: %.1f%%' % (capacity * 100,))\n capacity = float(free_cpu) / float(total_cpu)\n print('capacity remaining: %.1f%%' % (capacity * 100,))", "def usage(err=''):\r\n m = '%s\\n' %err\r\n m += 'Default usage is to list Cases closed for the 30 days\\n'\r\n m += '\\n Example:\\n'\r\n m += ' closedcases -n 90 \\n' \r\n m += ' \\n'\r\n# m += ' closedcases -n 60 -s blast5 \\n'\r\n return m", "async def help(self, msg, *args, intro=None):\n sudo = Guard.allow_sudo(msg)\n if intro is not None:\n intro = f'{intro.strip()} '\n else:\n intro = ''\n if msg.channel.type == discord.ChannelType.private:\n nick = '@DayRInfo'\n else:\n nick = f'@{msg.channel.guild.me.nick}'\n content = f'{intro}I understand the following commands (tag me at the start of the message):\\n'\n for command, (args, desc, enabled, delay) in Controller.commands.items():\n if not sudo and not enabled:\n continue\n if args:\n args = f' {args.strip()}'\n if desc:\n desc = f'\\n\\t{desc}'\n content = f'{content}`{Controller.HELP_KEY}{command}{args}`{desc}\\n'\n content = f'{content}----------\\n'\n content = f'{content}• Also, if you tag this bot ({nick}) on a message containing a link to the interactive Day R map 🗺️ with a location URL, I will send you a snapshot of the location.\\n'\n content = f'{content}• React with ❌ to any of my messages to delete it (if I still remember that it was my message). You can only delete my messages that are directed to you.'\n await msg.author.send(**{\n 'content': content,\n })\n await msg.channel.send(**{\n 'content': 'Command list sent via DM!',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })", "def help(self, msg=None):\n\n # Print the message if given.\n if not msg == None:\n print str(msg) + \"\\n\"\n\n # Display the list of commands, in the alphabetical order.\n print \"Use one of the following commands:\"\n for action in sorted(self.actions.keys()):\n info = self.actions[action]\n joined_oblig = ' '.join(info['required'])\n if len(info['additional']) > 0:\n add = [\"<%s>\" % x for x in info['additional']]\n joined_add = '[' + ' '.join(add) + ']'\n else:\n joined_add = ''\n print \"\\t* %s %s %s\" % (action, joined_oblig, joined_add)", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")", "async def help(self, ctx, *cog):\n try:\n halp = None\n if not cog:\n halp = discord.Embed(title='Cog Listing and Uncatergorized Commands',\n description='Use `!help *cog*` to find out more about them!\\n'\n '(BTW, the Cog Name Must Be in Title Case, Just Like this Sentence.)')\n cogs_desc = ''\n for x in self.bot.cogs:\n cogs_desc += ('{} - {}'.format(x, self.bot.cogs[x].__doc__)+'\\n')\n halp.add_field(name='Cogs', value=cogs_desc[0:len(cogs_desc)-1], inline=False)\n cmds_desc = 'None '\n for y in self.bot.walk_commands():\n if not y.cog_name and not y.hidden:\n cmds_desc += ('{} - {}'.format(y.name, y.help)+'\\n')\n halp.add_field(name='Uncatergorized Commands', value=cmds_desc[0:len(cmds_desc)-1], inline=False)\n await ctx.message.add_reaction(emoji='✉')\n await ctx.message.author.send('', embed=halp)\n else:\n if len(cog) > 1:\n halp = discord.Embed(title='Error!', description='That is way too many cogs!',\n color=discord.Color.red())\n await ctx.message.author.send('', embed=halp)\n else:\n found = False\n for x in self.bot.cogs:\n for y in cog:\n if x == y:\n halp = discord.Embed(title=cog[0]+' Command Listing',\n description=self.bot.cogs[cog[0]].__doc__)\n for c in self.bot.get_cog(y).get_commands():\n if not c.hidden:\n halp.add_field(name=c.name, value=c.help, inline=False)\n found = True\n if not found:\n halp = discord.Embed(title='Error!', description='How do you even use \"'+cog[0]+'\"?',\n color=discord.Color.red())\n else:\n await ctx.message.add_reaction(emoji='✉')\n await ctx.message.author.send('', embed=halp)\n except:\n pass", "def subcmd_info(word, word_eol):\n\tif len(word) == 1:\n\t\tchancount = len(chanlist.channels)\n\t\tservcount = 0\n\t\tusercount = 0\n\t\tfor chan in chanlist.channels:\n\t\t\tservcount += len(chanlist.channels[chan].servers)\n\t\t\tfor serv in chanlist.channels[chan].servers:\n\t\t\t\tusercount += len(chanlist.channels[chan].servers[serv].users)\n\t\tdoprint('info', 'Totals: %d Channels, %d Servers, %d Users' % (chancount, servcount, usercount))\n\telif (len(word) == 2) and ((word[1] == \"v\") or (word[1] == \"verbose\")):\n\t\tchancount = len(chanlist.channels)\n\t\tservcount = 0\n\t\tusercount = 0\n\t\tfor chan in chanlist.channels:\n\t\t\tdoprint('info', 'Channel %s:' % chanlist.channels[chan].name)\n\t\t\tservcount += len(chanlist.channels[chan].servers)\n\t\t\tfor serv in chanlist.channels[chan].servers:\n\t\t\t\tdoprint('info', ' Server %s:' % chanlist.channels[chan].servers[serv].name)\n\t\t\t\tusercount += len(chanlist.channels[chan].servers[serv].users)\n\t\t\t\tfor user in chanlist.channels[chan].servers[serv].users:\n\t\t\t\t\tdoprint('info', ' %s' % user)\n\t\tdoprint('info', 'Totals: %d Channels, %d Servers, %d Users' % (chancount, servcount, usercount))\n\telse:\n\t\tdoprint('info', 'Invalid usage. Try \"/mt_irc help info\"')", "def show_commands(self):\n print(\n ''\n '\\n\\t' + bc.OKBLUE + 'COMMANDS:' + bc.ENDC +\n '\\n\\t' + '---------' +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'run', 'Run the script')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'runcom', 'Run program with specific arguments <runcom [ARGS]>')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'info', 'Information')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'help', 'Help')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'so', 'Show options')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'sa', 'Show module info')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'set', 'Set options, <set [PARAMETER] [VALUE]>')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'invoke', 'Invoke module')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'exit', 'Exit')) +\n '\\n'\n )", "async def greeter(self, ctx):\n await util.command_group_help(ctx)", "def measure_program(self, channels: Iterable[str]) -> Dict[str, numpy.ndarray]:", "def help(update, context):\n update.message.reply_text(\"\"\"THESE ARE THE COMMANDS\n /hi\n /gokul\n /bala\n /competition\n /form\n \"\"\")", "def set_channel_attributes(self, chan_names, clims=None):\n\n rdefs = {'defaultT': 0,\n 'model': 'color',\n 'projection': 'normal',\n 'defaultZ': 0}\n\n multiscale_dict = [{'datasets': [{'path': ARRAY_NAME}],\n 'version': '0.1'}]\n dict_list = []\n\n if clims and len(chan_names) < len(clims):\n raise ValueError('Contrast Limits specified exceed the number of channels given')\n\n for i in range(len(chan_names)):\n if clims:\n if len(clims[i]) == 2:\n if 'float' in self.dtype.name:\n clim = (float(clims[i][0]), float(clims[i][1]), -1000, 1000)\n else:\n info = np.iinfo(self.dtype)\n clim = (float(clims[i][0]), float(clims[i][1]), info.min, info.max)\n elif len(clims[i]) == 4:\n clim = (float(clims[i][0]), float(clims[i][1]), float(clims[i][2]), float(clims[i][3]))\n else:\n raise ValueError('clim specification must a tuple of length 2 or 4')\n\n first_chan = True if i == 0 else False\n if not clims or i >= len(clims):\n dict_list.append(self.create_channel_dict(chan_names[i], first_chan=first_chan))\n else:\n dict_list.append(self.create_channel_dict(chan_names[i], clim, first_chan=first_chan))\n\n full_dict = {'multiscales': multiscale_dict,\n 'omero': {\n 'channels': dict_list,\n 'rdefs': rdefs,\n 'version': 0.1}\n }\n\n self.current_pos_group.attrs.put(full_dict)", "def HMC_Help():\n os.system(\"cls\")\n while True:\n print((\"\\n\\n\",\"Help\".center(50)))\n print_list = [\"ManagedSystem\",\"LogicalPartition\",\"VirtualIOServer\",\"Cluster\",\"Performance Capaity Monitoring\",\"Return to Main Menu\"]\n choice = int(print_obj.print_on_screen(print_list))\n directory = os.path.dirname(os.path.dirname(__file__))\n if choice == 1:\n path = directory+\"/help/ManagedSystem\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 2:\n path = directory+\"/help/LogicalPartition\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 3:\n path = directory+\"/help/VirtualIOServer\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 4:\n print((open(directory+\"/help/Cluster.txt\").read()))\n elif choice == 5:\n print((open(directory+\"/help/PerformanceCapacityMonitoring.txt\").read()))\n elif choice == 6:\n os.system(\"cls\")\n return\n else:\n print(\"\\nTry using Valid option\")\n back_to_menu()", "def help(bot, update):\n update.message.reply_text('INLINE Bot usage: \\n@cw_guildBot {itemName} {quantity} {\"w\" (optional, to withdraw)}. \\n\\nItem Name does not have to be full, 3 characters is enough.\\n\\\nSTANDARD Bot usage: \\nForward a list of items. Should support all inventories.\\n\\n\\\nRECIPE Bot usage: \\nForward the recipe text as received from CW.\\n\\n\\\nWARNING: Enchanted and unique items will NOT be processed\\n\\n\\\nPoke @acun1994 if you find something that isn\\'t handled yet')", "def runMCMC(df, cents, show=False):\n if type(cents) is not list:\n cents = [cents]\n numCents = len(cents)\n p = None\n \n # Tau = the precision of the normal distribution (of the above peaks)\n taus = 1. / pm.Uniform('stds', 0, 100, size=numCents)**2 # tau = 1/sigma**2\n centers = pm.Normal('centers', cents, [0.0025 for i in cents],\n size=numCents)\n \n if numCents == 2: # Assignment probability\n p = pm.Uniform('p', 0, 1)\n assignment = pm.Categorical('asisgnment', [p, 1-p],\n size=len(df.intervals))\n @pm.deterministic\n def center_i(assignment=assignment, centers=centers):\n return centers[assignment]\n @pm.deterministic\n def tau_i(assignment=assignment, taus=taus):\n return taus[assignment]\n observations = pm.Normal('obs', center_i, tau_i, value=df.intervals,\n observed=True)\n # Create the model 2 peaks\n mcmc = pm.MCMC([p, assignment, observations, taus, centers])\n \n else:\n observations = pm.Normal('obs', value=df.intervals, observed=True)\n mcmc = pm.MCMC([observations, taus, centers]) # Create model, 1 peak\n \n # Run the model\n mcmc.sample(50000)\n center_trace = mcmc.trace(\"centers\")[:]\n try:\n clusts = [center_trace[:,i] for i in range(numCents)]\n except:\n clusts = [center_trace]\n \n if show:\n for i in range(numCents):\n plt.hist(center_trace[:,i], bins=50, histtype='stepfilled',\n color=['blue', 'red'][i], alpha=0.7)\n plt.show()\n \n print('Evolved clusters at:')\n print([np.mean(c) for c in clusts])\n return clusts", "def parsing_arguments(args=None):\n description = ''\n parser = argparse.ArgumentParser(\n prog='hatchet plot-cn',\n description=description,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n parser.add_argument('INPUT', help='One or more space-separated files in CN_BBC format')\n parser.add_argument(\n '-n',\n '--patientnames',\n required=False,\n default=config.plot_cn.patientnames,\n type=str,\n help='One or more space-separated patient names (default: inferred from filenames)',\n )\n parser.add_argument(\n '-u',\n '--minu',\n required=False,\n default=config.plot_cn.minu,\n type=float,\n help='Minimum proportion of a CNA to be considered subclonal (default: 0.2)\"',\n )\n parser.add_argument(\n '-x',\n '--rundir',\n required=False,\n default=config.plot_cn.rundir,\n type=str,\n help='Running directory (default: current directory)',\n )\n parser.add_argument(\n '-b',\n '--baseCN',\n required=False,\n default=config.plot_cn.basecn,\n type=int,\n help='Base copy number (default: inferred from tumor ploidy)',\n )\n parser.add_argument(\n '-sC',\n '--figsizeclones',\n required=False,\n default=config.plot_cn.figsizeclones,\n type=str,\n help='Size of clone plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-sP',\n '--figsizecn',\n required=False,\n default=config.plot_cn.figsizecn,\n type=str,\n help='Size of CN plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-sG',\n '--figsizegrid',\n required=False,\n default=config.plot_cn.figsizegrid,\n type=str,\n help='Size of grid plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-rC',\n '--resolutionclones',\n required=False,\n default=config.plot_cn.resolutionclones,\n type=int,\n help='Number of bins to merge together for plotting clone profiles (default: 100)\"',\n )\n parser.add_argument(\n '-rP',\n '--resolutioncn',\n required=False,\n default=config.plot_cn.resolutioncn,\n type=int,\n help='Number of bins to merge together for plotting proportions (default: 500)\"',\n )\n parser.add_argument(\n '-rG',\n '--resolutiongrid',\n required=False,\n default=config.plot_cn.resolutiongrid,\n type=int,\n help='Number of bins to merge together in grids (default: 100)\"',\n )\n parser.add_argument(\n '-e',\n '--threshold',\n required=False,\n default=config.plot_cn.threshold,\n type=float,\n help='Threshold used to classify a tumor into either diploid or tetraploid (default: 3.0)\"',\n )\n parser.add_argument(\n '--ymax',\n required=False,\n default=config.plot_cn.ymax,\n type=int,\n help='Maximum values in y-axis (default: automatically inferred)\"',\n )\n parser.add_argument(\n '--ymin',\n required=False,\n default=config.plot_cn.ymin,\n type=int,\n help='Minimum values in y-axis (default: automatically inferred)\"',\n )\n parser.add_argument(\n '--clonepalette',\n required=False,\n default=config.plot_cn.clonepalette,\n type=str,\n help='Palette for coloring the clones among Set1, Set2, Set3, Paired (default: Set1)\"',\n )\n parser.add_argument(\n '--linkage',\n required=False,\n default=config.plot_cn.linkage,\n type=str,\n help=(\n 'Linkage method used for clustering (default: single, available (single, complete, average, weighted, '\n 'centroid, median, ward) from SciPy)\"'\n ),\n )\n parser.add_argument('-V', '--version', action='version', version=f'%(prog)s {__version__}')\n args = parser.parse_args(args)\n\n if len(args.INPUT.split()) == 0:\n raise ValueError(error('Please specify at least one sample as input!'))\n if args.patientnames is None:\n patientnames = {fil: os.path.basename(fil) for fil in args.INPUT.split()}\n else:\n patientnames = {f: n for f, n in zip(args.INPUT.split(), args.patientnames.split())}\n if len(args.INPUT.split()) != len(set(patientnames.values())):\n raise ValueError(error('Multiple patients have the same name but they should unique!'))\n if args.figsizeclones is not None:\n figsizeclones = to_tuple(args.figsizeclones, error_message='Wrong format of figsizeclones!')\n if args.figsizecn is not None:\n figsizecn = to_tuple(args.figsizecn, error_message='Wrong format of figsizecn!')\n if args.figsizegrid is not None:\n figsizegrid = to_tuple(args.figsizegrid, error_message='Wrong format of figsizegrid!')\n\n if not os.path.isdir(args.rundir):\n raise ValueError(error('Running directory does not exist!'))\n if not 0.0 <= args.minu <= 1.0:\n raise ValueError(error('The minimum proportion for subclonal CNAs must be in [0, 1]!'))\n if args.baseCN is not None and args.baseCN < 2:\n raise ValueError(error('Base CN must be greater or equal than 2!'))\n if args.resolutionclones is not None and args.resolutionclones < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.resolutioncn is not None and args.resolutioncn < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.resolutiongrid is not None and args.resolutiongrid < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.threshold < 0:\n raise ValueError(error('Threshold must be positive!'))\n if args.linkage not in {\n 'single',\n 'complete',\n 'average',\n 'weighted',\n 'centroid',\n 'median',\n 'ward',\n }:\n raise ValueError(error('Unknown linkage method!'))\n\n if args.clonepalette == 'Set1':\n pal = plt.cm.Set1\n elif args.clonepalette == 'Set2':\n pal = plt.cm.Set2\n elif args.clonepalette == 'Set3':\n pal = plt.cm.Set3\n elif args.clonepalette == 'Paired':\n pal = plt.cm.Paired\n else:\n raise ValueError(error('Unknown clone palette!'))\n\n return {\n 'input': args.INPUT.split(),\n 'names': patientnames,\n 'rundir': args.rundir,\n 'minu': args.minu,\n 'base': args.baseCN,\n 'clonefigsize': figsizeclones,\n 'propsfigsize': figsizecn,\n 'clusterfigsize': figsizegrid,\n 'profileres': args.resolutionclones,\n 'cnres': args.resolutioncn,\n 'clusterres': args.resolutiongrid,\n 'threshold': args.threshold,\n 'linkage': args.linkage,\n 'ymax': args.ymax,\n 'ymin': args.ymin,\n 'clonepalette': pal,\n }", "def explainerdashboard_cli(ctx):", "def counts(self, values, channels=None ):\n if not isinstance( values, list ):\n # only one value is given\n values = [ values ] * self.nchans\n for chan in self.get_channel_list(channels):\n command = chan << 8\n command += (values[chan] & 0xff)\n self.vals[chan] = values[chan]\n self.cmds[chan] = command\n self.ohms() # update the resistances\n # temporary, show combined\n # print( self.Rcombine()[0], self.vals )\n if self.verbose:\n self.status()", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser(version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-t\", \"--test\", dest=\"test\", type=\"string\",\n help=\"supply help\")\n\n parser.add_option(\"--method\", dest=\"method\", type=\"choice\",\n choices=(\"metrics\", \"summary\", \"module_summary\"),\n help=\"method to summarise clustering\")\n\n parser.add_option(\"--ref-gtf-files\", dest=\"ref_gtf\", type=\"string\",\n help=\"comma separated list of reference gtf files\")\n\n # add common options (-h/--help, ...) and parse command line\n (options, args) = E.Start(parser, argv=argv)\n\n if options.method == \"metrics\":\n infile = argv[-1]\n E.info(\"loading input file: %s\" % infile)\n assert infile\n\n df = pd.read_table(infile,\n sep=\"\\t\",\n header=None,\n index_col=0)\n\n df = df.ix[:, :50]\n cluster_combs = (x for x in itertools.combinations(df.columns,\n 2))\n genes = df.index\n results_dict = {}\n all_clusts = {}\n\n E.info(\"setting up cluster containers\")\n for i in df.columns:\n clusters = set(df[i].values.tolist())\n cluster_dict = {}\n for clust in clusters:\n cluster_dict[clust] = []\n for gene in genes:\n cluster_dict[df[i][gene]].append(gene)\n\n for col in clusters:\n col_set = set()\n clust_col = cluster_dict[col]\n gene_members = itertools.combinations(clust_col,\n 2)\n col_set.update(gene_members)\n cluster_dict[col] = col_set\n all_clusts[i] = cluster_dict\n E.info(\"generating all pair-wise cluster comparisons\")\n E.info(\"calculating adjusted mutual information\")\n for k in cluster_combs:\n clusters1 = all_clusts[k[0]]\n clusters2 = all_clusts[k[1]]\n metric_dict = {}\n metric_dict['AMI'] = TS.adjustedMutualInformation(clusters1,\n clusters2)\n results_dict[k] = metric_dict\n\n res_frame = pd.DataFrame(results_dict).T\n res_frame = res_frame.reset_index()\n res_frame.drop(['level_0'], inplace=True, axis=1)\n res_frame.drop(['level_1'], inplace=True, axis=1)\n\n # flatten rand indices and add to output dataframe\n rand_arrays = TS.randIndexes(df)\n flat_adj_rand = TS.unravel_arrays(rand_arrays[0])\n flat_rand = TS.unravel_arrays(rand_arrays[1])\n res_frame['Rand_Index'] = flat_rand\n res_frame['Adjusted_Rand_Index'] = flat_adj_rand\n E.info(\"aggregating results\")\n\n res_frame.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"summary\":\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n file_dict = {}\n for fle in list_of_files:\n fname = fle.split(\"/\")[-1]\n condition = fname.split(\"-\")[0]\n ref = fname.split(\"-\")[1]\n df_ = pd.read_table(fle,\n sep=\"\\t\",\n header=0,\n index_col=0)\n df_.columns = ['gene_id', 'cluster']\n clust_dict = {}\n for idx in df_.index:\n cluster = df_.loc[idx]['cluster']\n gene = df_.loc[idx]['gene_id']\n try:\n clust_dict[cluster] += 1\n except KeyError:\n clust_dict[cluster] = 1\n med_size = np.median(clust_dict.values())\n file_dict[fname] = {'condition': condition,\n 'reference': ref,\n 'median_cluster_size': med_size}\n\n outframe = pd.DataFrame(file_dict).T\n outframe.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"module_summary\":\n # get lncRNA/gene lengths from reference gtfs\n ref_gtfs = options.ref_gtf.split(\",\")\n length_dict = {}\n for ref in ref_gtfs:\n oref = IOTools.openFile(ref, \"rb\")\n git = GTF.transcript_iterator(GTF.iterator(oref))\n for gene in git:\n for trans in gene:\n length = trans.end - trans.start\n try:\n length_dict[trans.gene_id] += length\n except KeyError:\n length_dict[trans.gene_id] = length\n oref.close()\n\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n fdfs = []\n for fle in list_of_files:\n cond = fle.split(\"/\")[-1].split(\"-\")[0]\n refer = fle.split(\"/\")[-1].split(\"-\")[1]\n _df = pd.read_table(fle, sep=\"\\t\",\n header=0, index_col=0)\n _df.columns = ['gene_id', 'cluster']\n clusters = set(_df['cluster'])\n c_dict = {}\n # summarize over each cluster\n for clust in clusters:\n lengths = []\n c_df = _df[_df['cluster'] == clust]\n for lid in c_df['gene_id']:\n lengths.append(length_dict[lid])\n c_dict[clust] = {'cluster_size': len(c_df['gene_id']),\n 'mean_length': np.mean(lengths),\n 'index': (cond, refer),\n 'module': clust}\n cdf = pd.DataFrame(c_dict).T\n # use a multindex for hierarchical indexing\n midx = pd.MultiIndex.from_tuples(cdf['index'])\n cdf.index = midx\n cdf.drop(['index'], inplace=True, axis=1)\n fdfs.append(cdf)\n\n # generate a single output df\n s_df = fdfs[0]\n fdfs.pop(0)\n for df in fdfs:\n s_df = s_df.append(df)\n\n s_df.to_csv(options.stdout,\n index_label=(\"condition\", \"reference\"),\n sep=\"\\t\")\n\n # write footer and output benchmark information.\n E.Stop()", "def process_cl_args():\n\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('commands', nargs='*')\n parser.add_argument('--help', '-h', action='store_true')\n parser.add_argument('--version', '-v', action='store_true')\n parser.add_argument('--debug', '-d', action='store_true')\n parser.add_argument('--logging', '-l', action='store_true')\n parser.add_argument('--no-autosize', action='store_true')\n parser.add_argument('--no-preload', action='store_true')\n args = parser.parse_args()\n\n if args.version:\n xprint(get_version_info())\n xprint(\"\")\n sys.exit()\n\n elif args.help:\n for x in helptext():\n xprint(x[2])\n sys.exit()\n\n if args.debug or os.environ.get(\"mpsytdebug\") == \"1\":\n xprint(get_version_info())\n g.debug_mode = True\n g.no_clear_screen = True\n logfile = os.path.join(tempfile.gettempdir(), \"mpsyt.log\")\n logging.basicConfig(level=logging.DEBUG, filename=logfile)\n logging.getLogger(\"pafy\").setLevel(logging.DEBUG)\n\n elif args.logging or os.environ.get(\"mpsytlog\") == \"1\":\n logfile = os.path.join(tempfile.gettempdir(), \"mpsyt.log\")\n logging.basicConfig(level=logging.DEBUG, filename=logfile)\n logging.getLogger(\"pafy\").setLevel(logging.DEBUG)\n\n if args.no_autosize:\n g.detectable_size = False\n\n g.command_line = \"playurl\" in args.commands or \"dlurl\" in args.commands\n if g.command_line:\n g.no_clear_screen = True\n\n if args.no_preload:\n g.preload_disabled = True\n\n g.argument_commands = args.commands", "def cli(ctx):\n if ctx.obj[\"debug\"]:\n click.echo(\"Debug mode initiated\")\n set_trace()\n\n logger.debug(\"cluster subcommand called from cli\")", "def do_overview(self):\n summaries = []\n for name, cmd in self.base.commands.iteritems():\n summaries.append(' %-14s %s\\n' % (name, cmd.get_summary()))\n summaries.sort()\n sys.stdout.write('Usage: %s COMMAND ARGUMENTS...\\n\\n' \\\n 'Available commands:\\n' % (self.base.scriptname, ))\n for line in summaries:\n sys.stdout.write(line)", "def help(cls, extra_args=None):\n if (_is_text_interface()):\n return _create_text_help_str(cls, cls._TEXT_USAGE)\n else:\n return cls._GRAPHICAL_USAGE", "def help_opt(self):\n print(OPTIONS)", "def help_command(server, output, conf):\n server.tell(output.name, 'Available commands:')\n for key in COMMANDS.keys():\n cmd_func = COMMANDS[key]\n if cmd_func.__doc__:\n server.tell(output.name, '%s: %s' % (key[1:], cmd_func.__doc__))\n else:\n server.tell(output.name, key[1:])\n return", "async def stats(ctx):\n pythonVersion = platform.python_version()\n dpyVersion = discord.__version__\n serverCount = len(bot.guilds)\n memberCount = len(set(bot.get_all_members()))\n\n embed = discord.Embed(\n title=f\"{bot.user.name} Stats\",\n description=\"\\uFEFF\",\n colour=ctx.author.colour,\n timestamp=ctx.message.created_at,\n )\n\n embed.add_field(name=\"Bot Version:\", value=\"0.0.1\")\n embed.add_field(name=\"Python Version:\", value=pythonVersion)\n embed.add_field(name=\"Discord.Py Version\", value=dpyVersion)\n embed.add_field(name=\"Total Guilds:\", value=serverCount)\n embed.add_field(name=\"Total Users:\", value=memberCount)\n embed.add_field(name=\"Bot Developers:\", value=\"<@271612318947868673>\")\n\n embed.set_footer(text=f\"Carpe Noctem | {bot.user.name}\")\n embed.set_author(name=bot.user.name, icon_url=bot.user.avatar_url)\n\n await ctx.send(embed=embed)", "def help(self, nick, channel):\n\n def getCmdArguments(handler):\n return inspect.getargspec(handler)[0][3:]\n\n helpTuples = []\n rules = self.getRules()\n processedHandlers = set()\n for _, handler in rules:\n if handler in processedHandlers:\n continue\n processedHandlers.add(handler)\n helpTuples.append((handler.__name__ + ' ' +\n ' '.join([\"<%s>\" % arg\n for arg in getCmdArguments(handler)]),\n handler.__doc__\n ))\n if handler.__name__ == 'help':\n break\n\n padding = max([len(cmdpart) for cmdpart, _ in helpTuples])\n\n messages = [\"List of lockbot commands:\"]\n for cmdpart, description in helpTuples:\n message = \" %s:%s%s\" % (cmdpart,\n (padding - len(cmdpart) + 1) * ' ',\n description if description else 'N/A')\n messages.append(message)\n\n return [(channel, message) for message in messages]", "def EnableCNML(mlu_id=0):\n global option\n option['device'] = 'CNML'\n option['device_id'] = mlu_id", "def help(self, update, context):\n help_message = textwrap.dedent(\"\"\"\n 1. /subscribe - To subscribe to sixes scored in IPL to avail 60% off swiggy coupon (SWIGGY6)\n 2. /snooze - To snooze the notifications for sixes scored for the day.\n 3. /removeSnooze - To resume the notifications for the day.\n 4. /unsubscribe - To unsubscribe to the sixes scored notifications.\n 5. /swiggyOffer - To know more about the ongoing swiggy offer.\n \"\"\")\n self.bot.send_message(chat_id=update.message.chat_id, text=help_message, parse_mode='markdown')", "async def _help(ctx, *, command_name: str=None):\n if command_name:\n command = bot.get_command(command_name)\n if not command:\n return await ctx.send(\"No such command!\")\n return await ctx.send(f\"```\\n{ctx.prefix}{command.name} {command.signature}\\n\\n{command.help or 'Missing description'}```\")\n description = []\n for name, cog in bot.cogs.items():\n entries = [\" - \".join([cmd.name, cmd.short_doc or \"Missing description\"]) for cmd in cog.get_commands() if await _can_run(cmd, ctx) and not cmd.hidden]\n if entries:\n description.append(f\"**{name}**:\")\n description.append(\"• \" + \"\\n• \".join(entries))\n await ctx.send(embed=discord.Embed(description=\"\\n\".join(description), color=ctx.me.color))", "def help_text(command):\n\n courses_list = ('ENPM611', 'ENPM613', 'ENPM631', 'ENPM687',\\\n 'ENPM691', 'ENPM693', 'ENPM694', 'ENPM696',\\\n 'ENPM809J','ENPM809R', 'ENPM809W')\n\n response = 'I have course descriptions for: '\n for course_name in courses_list:\n response = response + course_name + ' '\n\n response = response + '\\nTo get the course description, execute command: about ENPM<course_number>'\n\n return response", "def main():\n parser = argparse.ArgumentParser(description='investigate code health and random statistics')\n sub_parsers = parser.add_subparsers(dest='command_name', title='Commands', help='', metavar='<command>')\n\n sub = sub_parsers.add_parser('line-count', help='list line counts')\n sub.add_argument('files', nargs='+', help='files or folders to look in')\n sub.add_argument('--each', type=int, default=1)\n sub.add_argument('--show', action='store_true')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_line_count)\n\n sub = sub_parsers.add_parser('include-list', help='list headers from files')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--print', dest='print_files', action='store_true')\n sub.add_argument('--print-stats', dest='print_stats', action='store_true')\n sub.add_argument('--print-max', dest='print_max', action='store_true')\n sub.add_argument('--no-list', dest='print_list', action='store_false')\n sub.add_argument('--count', default=2, type=int, help=\"only print includes that are more or equal to <count>\")\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.set_defaults(func=handle_list)\n\n sub = sub_parsers.add_parser('include-gv', help='generate a graphviz of the includes')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.add_argument('--group', action='store_true', help=\"group output\")\n sub.add_argument('--cluster', action='store_true', help=\"group output into clusters\")\n sub.set_defaults(func=handle_gv)\n\n sub = sub_parsers.add_parser('list-indents', help='list the files with the maximum indents')\n sub.add_argument('files', nargs='+')\n sub.add_argument('--each', type=int, default=1, help='group counts')\n sub.add_argument('--show', action='store_true', help='include files in list')\n sub.add_argument('--hist', action='store_true', help='show simple histogram')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_list_indents)\n\n sub = sub_parsers.add_parser('missing-pragma-once', help='find headers with missing include guards')\n sub.add_argument('files', nargs='+')\n sub.set_defaults(func=handle_missing_include_guards)\n\n sub = sub_parsers.add_parser('missing-in-cmake', help='find files that existis on disk but missing in cmake')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_missing_in_cmake)\n\n sub = sub_parsers.add_parser('list-no-project-folders', help='find projects that have not set the solution folder')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_list_no_project_folder)\n\n sub = sub_parsers.add_parser('check-files', help=\"find files that doesn't match the name style\")\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_check_files)\n\n args = parser.parse_args()\n if args.command_name is not None:\n args.func(args)\n else:\n parser.print_help()", "def usage():\n print(\"[1] Getting help from a cipher \")\n print(\" ---> ./cryptogra.py caesar -h \")\n print(\"\")", "def set(self, opts, popsize=None, ccovfac=1, verbose=True):\r\n\r\n alpha_cc = 1.0 # cc-correction for mueff, was zero before\r\n\r\n def cone(df, mu, N, alphacov=2.0):\r\n \"\"\"rank one update learning rate, ``df`` is disregarded and obsolete, reduce alphacov on noisy problems, say to 0.5\"\"\"\r\n return alphacov / ((N + 1.3)**2 + mu)\r\n\r\n def cmu(df, mu, alphamu=0.0, alphacov=2.0):\r\n \"\"\"rank mu learning rate, disregarding the constrant cmu <= 1 - cone\"\"\"\r\n c = alphacov * (alphamu + mu - 2 + 1/mu) / ((N + 2)**2 + alphacov * mu / 2)\r\n # c = alphacov * (alphamu + mu - 2 + 1/mu) / (2 * (N + 2)**1.5 + alphacov * mu / 2)\r\n # print 'cmu =', c\r\n return c\r\n\r\n def conedf(df, mu, N):\r\n \"\"\"used for computing separable learning rate\"\"\"\r\n return 1. / (df + 2.*sqrt(df) + float(mu)/N)\r\n\r\n def cmudf(df, mu, alphamu):\r\n \"\"\"used for computing separable learning rate\"\"\"\r\n return (alphamu + mu - 2. + 1./mu) / (df + 4.*sqrt(df) + mu/2.)\r\n\r\n sp = self\r\n N = sp.N\r\n if popsize:\r\n opts.evalall({'N':N, 'popsize':popsize})\r\n else:\r\n popsize = opts.evalall({'N':N})['popsize'] # the default popsize is computed in Options()\r\n sp.popsize = popsize\r\n if opts['CMA_mirrors'] < 0.5:\r\n sp.lam_mirr = int(0.5 + opts['CMA_mirrors'] * popsize)\r\n elif opts['CMA_mirrors'] > 1:\r\n sp.lam_mirr = int(0.5 + opts['CMA_mirrors'])\r\n else:\r\n sp.lam_mirr = int(0.5 + 0.16 * min((popsize, 2 * N + 2)) + 0.29) # 0.158650... * popsize is optimal\r\n # lam = arange(2,22)\r\n # mirr = 0.16 + 0.29/lam\r\n # print(lam); print([int(0.5 + l) for l in mirr*lam])\r\n # [ 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21]\r\n # [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4]\r\n\r\n sp.mu_f = sp.popsize / 2.0 # float value of mu\r\n if opts['CMA_mu'] is not None:\r\n sp.mu_f = opts['CMA_mu']\r\n sp.mu = int(sp.mu_f + 0.499999) # round down for x.5\r\n # in principle we have mu_opt = popsize/2 + lam_mirr/2,\r\n # which means in particular weights should only be negative for q > 0.5+mirr_frac/2\r\n if sp.mu > sp.popsize - 2 * sp.lam_mirr + 1:\r\n print(\"WARNING: pairwise selection is not implemented, therefore \" +\r\n \" mu = %d > %d = %d - 2*%d + 1 = popsize - 2*mirr + 1 can produce a bias\" % (\r\n sp.mu, sp.popsize - 2 * sp.lam_mirr + 1, sp.popsize, sp.lam_mirr))\r\n if sp.lam_mirr > sp.popsize // 2:\r\n raise _Error(\"fraction of mirrors in the population as read from option CMA_mirrors cannot be larger 0.5, \" +\r\n \"theoretically optimal is 0.159\")\r\n sp.weights = log(max([sp.mu, sp.popsize / 2.0]) + 0.5) - log(1 + np.arange(sp.mu))\r\n if 11 < 3: # equal recombination weights\r\n sp.mu = sp.popsize // 4\r\n sp.weights = np.ones(sp.mu)\r\n print(sp.weights[:10])\r\n sp.weights /= sum(sp.weights)\r\n sp.mueff = 1 / sum(sp.weights**2)\r\n sp.cs = (sp.mueff + 2) / (N + sp.mueff + 3)\r\n # TODO: clean up (here the cumulation constant is shorter if sigma_vec is used)\r\n sp.dampsvec = opts['CMA_dampsvec_fac'] * (N + 2) if opts['CMA_dampsvec_fac'] else np.Inf\r\n sp.dampsvec_fading = opts['CMA_dampsvec_fade']\r\n if np.isfinite(sp.dampsvec):\r\n sp.cs = ((sp.mueff + 2) / (N + sp.mueff + 3))**0.5\r\n # sp.cs = (sp.mueff + 2) / (N + 1.5*sp.mueff + 1)\r\n sp.cc = (4 + alpha_cc * sp.mueff / N) / (N + 4 + alpha_cc * 2 * sp.mueff / N)\r\n sp.cc_sep = (1 + 1/N + alpha_cc * sp.mueff / N) / (N**0.5 + 1/N + alpha_cc * 2 * sp.mueff / N) # \\not\\gg\\cc\r\n sp.rankmualpha = opts['CMA_rankmualpha']\r\n # sp.rankmualpha = _evalOption(opts['CMA_rankmualpha'], 0.3)\r\n sp.c1 = ccovfac * min(1, sp.popsize/6) * cone((N**2 + N) / 2, sp.mueff, N) # 2. / ((N+1.3)**2 + sp.mucov)\r\n sp.c1_sep = ccovfac * conedf(N, sp.mueff, N)\r\n if 11 < 3:\r\n sp.c1 = 0.\r\n print('c1 is zero')\r\n if opts['CMA_rankmu'] != 0: # also empty\r\n sp.cmu = min(1 - sp.c1, ccovfac * cmu((N**2+N)/2, sp.mueff, sp.rankmualpha))\r\n sp.cmu_sep = min(1 - sp.c1_sep, ccovfac * cmudf(N, sp.mueff, sp.rankmualpha))\r\n else:\r\n sp.cmu = sp.cmu_sep = 0\r\n\r\n sp.neg = BlancClass()\r\n if opts['CMA_active']:\r\n # in principle we have mu_opt = popsize/2 + lam_mirr/2,\r\n # which means in particular weights should only be negative for q > 0.5+mirr_frac/2\r\n sp.neg.mu_f = popsize - (popsize + sp.lam_mirr) / 2 if popsize > 2 else 1\r\n sp.neg.weights = log(sp.mu_f + 0.5) - log(1 + np.arange(sp.popsize - int(sp.neg.mu_f), sp.popsize))\r\n sp.neg.mu = len(sp.neg.weights) # maybe never useful?\r\n sp.neg.weights /= sum(sp.neg.weights)\r\n sp.neg.mueff = 1 / sum(sp.neg.weights**2)\r\n sp.neg.cmuexp = opts['CMA_activefac'] * 0.25 * sp.neg.mueff / ((N+2)**1.5 + 2 * sp.neg.mueff)\r\n assert sp.neg.mu >= sp.lam_mirr # not really necessary\r\n # sp.neg.minresidualvariance = 0.66 # not it use, keep at least 0.66 in all directions, small popsize is most critical\r\n else:\r\n sp.neg.cmuexp = 0\r\n\r\n sp.CMA_on = sp.c1 + sp.cmu > 0\r\n # print(sp.c1_sep / sp.cc_sep)\r\n\r\n if not opts['CMA_on'] and opts['CMA_on'] not in (None,[],(),''):\r\n sp.CMA_on = False\r\n # sp.c1 = sp.cmu = sp.c1_sep = sp.cmu_sep = 0\r\n\r\n sp.damps = opts['CMA_dampfac'] * (0.5 +\r\n 0.5 * min([1, (sp.lam_mirr/(0.159*sp.popsize) - 1)**2])**1 +\r\n 2 * max([0, ((sp.mueff-1) / (N+1))**0.5 - 1]) + sp.cs\r\n )\r\n if 11 < 3:\r\n # this is worse than damps = 1 + sp.cs for the (1,10000)-ES on 40D parabolic ridge\r\n sp.damps = 0.3 + 2 * max([sp.mueff/sp.popsize, ((sp.mueff-1)/(N+1))**0.5 - 1]) + sp.cs\r\n if 11 < 3:\r\n # this does not work for lambda = 4*N^2 on the parabolic ridge\r\n sp.damps = opts['CMA_dampfac'] * (2 - 0*sp.lam_mirr/sp.popsize) * sp.mueff/sp.popsize + 0.3 + sp.cs # nicer future setting\r\n print('damps =', sp.damps)\r\n if 11 < 3:\r\n sp.damps = 10 * sp.damps # 1e99 # (1 + 2*max(0,sqrt((sp.mueff-1)/(N+1))-1)) + sp.cs;\r\n # sp.damps = 20 # 1. + 20 * sp.cs**-1 # 1e99 # (1 + 2*max(0,sqrt((sp.mueff-1)/(N+1))-1)) + sp.cs;\r\n print('damps is %f' % (sp.damps))\r\n\r\n sp.cmean = float(opts['CMA_cmean'])\r\n # sp.kappa = 1 # 4-D, lam=16, rank1, kappa < 4 does not influence convergence rate\r\n # in larger dim it does, 15-D with defaults, kappa=8 factor 2\r\n if sp.cmean != 1:\r\n print(' cmean = %f' % (sp.cmean))\r\n\r\n if verbose:\r\n if not sp.CMA_on:\r\n print('covariance matrix adaptation turned off')\r\n if opts['CMA_mu'] != None:\r\n print('mu = %f' % (sp.mu_f))\r\n\r\n # return self # the constructor returns itself\r", "def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()", "def cbstats_test(self):\n cluster_len = RestConnection(self.master).get_cluster_size()\n if self.command == \"kvstore\":\n self.verify_cluster_stats()\n if self.command != \"key\":\n if \"tapagg\" in self.command and cluster_len == 1:\n self.log.info(\"This command only works with cluster with 2 nodes or more\")\n raise Exception(\"This command does not work with one node cluster\")\n else:\n # tapagg needs replica items to print out results\n if \"tapagg\" in self.command:\n for bucket in self.buckets:\n self.shell.execute_cbworkloadgen(self.couchbase_usrname, \\\n self.couchbase_password, self.num_items, \\\n self.set_get_ratio, bucket.name, \\\n self.item_size, self.command_options)\n self.sleep(5)\n for bucket in self.buckets:\n if \"allocator\" in self.command:\n output, error = self.shell.execute_mcstat(bucket,\"\",\n keyname=self.command, vbid=\"\", enable_ipv6=self.enable_ipv6)\n else:\n output, error = self.shell.execute_cbstats(bucket, self.command)\n self.verify_results(output, error)\n if self.command in [\"allocator\", \"kvtimings\", \"timings\"]:\n self.log.warning(\"We will not verify exact values for this stat\")\n else:\n self._verify_direct_client_stats(bucket, self.command, output)\n else:\n mc_conn = MemcachedClientHelper.direct_client(self.master, self.buckets[0].name, self.timeout)\n bucket_info = RestConnection(self.master).get_bucket(self.buckets[0])\n keys_map = {}\n for i in range(1, self.num_items + 1):\n vb_id = i - len(bucket_info.vbuckets) * int(i // len(bucket_info.vbuckets))\n try:\n mc_conn.set(\"test_docs-%s\" % i, 0, 0, json.dumps('{ \"test\" : \"test\"}').encode(\"ascii\", \"ignore\"), vb_id)\n except Exception:\n continue\n keys_map[\"test_docs-%s\" % i] = vb_id\n count = 0\n for key, vb_id in keys_map.items():\n output, error = self.shell.execute_cbstats(self.buckets[0], self.command, key, vb_id)\n self.verify_results(output, error)\n count += 1\n if self.master.ip.endswith(\"amazonaws.com\") and count == 10:\n self.log.info(\"check only 10 keys in aws \")\n break", "def metrics_roce(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_roce(cmd_ctx, cpc, options))", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"", "def show(self):\n prev_queries = 0\n prev_cpu_sys = 0\n prev_cpu_user = 0\n \n lines = {\n \"Uptime (seconds)\": \"--\",\n \"Number of queries\": \"--\",\n \"Query per second\": \"--\",\n \"ACL drops\": \"--\",\n \"Dynamic drops\": \"--\",\n \"Rule drops\": \"--\",\n \"CPU Usage (%s)\": \"--\",\n \"Cache hitrate\": \"--\"\n }\n\n while True:\n try:\n # get stats from dnsdist\n stats = Statistics(console=self.console)\n global_stats = stats[\"global\"]\n \n qps = int(global_stats[\"queries\"]) - prev_queries\n prev_queries = int(global_stats[\"queries\"])\n cpu = (int(global_stats[\"cpu-sys-msec\"])+int(global_stats[\"cpu-user-msec\"]) - prev_cpu_sys - prev_cpu_user) / 10\n prev_cpu_sys = int(global_stats[\"cpu-sys-msec\"])\n prev_cpu_user = int(global_stats[\"cpu-user-msec\"])\n \n lines[\"Uptime (seconds)\"] = global_stats[\"uptime\"]\n lines[\"Number of queries\"] = global_stats[\"queries\"]\n lines[\"Query per second\"] = qps\n lines[\"CPU Usage (%s)\"] = cpu\n lines[\"ACL drops\"] = global_stats[\"acl-drops\"]\n lines[\"Rule drops\"] = global_stats[\"rule-drop\"]\n lines[\"Cache hitrate\"] = global_stats[\"cache-hits\"]\n lines[\"Dynamic drops\"] = global_stats[\"dyn-blocked\"]\n\n # reprint the lines \n sys.stdout.write(\"\\033[1mDashboard for dnsdist\\033[0m\\n\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Global:\\n\")\n for k,v in lines.items():\n sys.stdout.write(\"\\t%s: %s\\n\" % (k,v))\n sys.stdout.write(\"Backends:\\n\")\n for s in stats[\"backends\"]:\n if not len(s[\"name\"]):\n s[\"name\"] = \"--\"\n if not len(s[\"pools\"]):\n s[\"pools\"] = \"--\"\n sys.stdout.write(\"\\t#%s / %s / %s / %s\\n\" % (s[\"#\"],s[\"address\"],s[\"name\"],s[\"pools\"]) )\n sys.stdout.write(\"\\t\\tNumber of queries: %s\\n\" % s[\"queries\"])\n sys.stdout.write(\"\\t\\tQuery per second: %s\\n\" % s[\"qps\"])\n sys.stdout.write(\"\\t\\tNumber of drops: %s\\n\" % s[\"drops\"])\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Ctrl+C to exit\\n\")\n \n time.sleep(1)\n \n \n # move up cursor and delete whole line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for k,v in lines.items():\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for s in stats[\"backends\"]:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n \n del stats\n except KeyboardInterrupt:\n break", "def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})", "def _help(self):\n self.onecmd('help')", "def show_help(self):\n self.slack.reply('\\n\\n'.join(self.help_lines))", "def main():\n\n parser = argparse.ArgumentParser(description=main.__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n epilog=\"Homepage: https://github.com/kbat/mc-tools\")\n parser.add_argument('com', type=str, help='plot requests file name', nargs='?', default=\"/tmp/foo.c\")\n parser.add_argument('comout', type=str, help='COMOUT file name', nargs='?', default=\"zoom\")\n\n args = parser.parse_args()\n\n cmd = {} # dictionary of commands\n\n bas = False\n plane = False\n \n with open(args.com) as f:\n for line in f.readlines():\n words = line.strip().split()\n if len(words) is 0:\n continue\n\n for i,w in enumerate(words):\n if re.search(\"^bas\", w):\n cmd['bas'] = list(map(float, words[i+1:i+7]))\n if plane is False: bas = True # basis was before plane cuts\n elif re.search(\"^or\", w):\n cmd['or'] = list(map(float, words[i+1:i+4]))\n elif re.search(\"^ex\", w):\n try: # both x and y scales are given\n cmd['ex'] = list(map(float, words[i+1:i+3]))\n continue\n except ValueError: # just 1 scale is given\n cmd['ex'] = list(map(float, words[i+1:i+2]))\n elif re.search(\"^lab\", w):\n cmd['label'] = list(map(int, map(float, words[i+1:i+3]))) #+ [words[i+3]]\n elif re.search(\"^p[xyz]\", w):\n cmd[w] = [float(words[i+1])]\n if bas is False: plane = True # plane cuts were before basis\n elif re.search(\"^legend\", w):\n cmd[w] = [words[i+1]]\n elif w == \"scale\":\n print(w)\n if int(words[i+1]): # no need to put 'scale 0'\n cmd[w] = [words[i+1]]\n elif w in (\"mesh\"):\n if int(words[i+1])==1: # no need to put 'mesh 1'\n cmd[w] = [words[i+1]]\n\n print(bas, plane)\n\n if plane: # bas was first\n keys = ('bas', 'or', 'ex', 'px', 'py', 'pz', 'label', 'mesh', 'legend', 'scale')\n elif bas:\n keys = ('or', 'ex', 'px', 'py', 'pz', 'bas', 'label', 'mesh', 'legend', 'scale')\n else:\n keys = {'or', 'ex', 'label', 'mesh', 'legend', 'scale'}\n \n with open(args.comout, 'w') as f:\n for key in keys:\n if key in cmd:\n # newline required by mcplot:\n if key in ('mesh', 'legend', 'scale', 'label'):\n f.write(\"\\n\")\n f.write(\"%s %s \" % (key,\" \".join(str(e) for e in cmd[key]),))\n f.write(\"\\n\")", "def get_help(self) -> None: \n print(messages.get_help())", "def printHelp():\n print(\"amqWorkApiMass.py -n <msgcnt> -b <body> -m <headers> -s <path/to/bodyandheaders>\")" ]
[ "0.59195566", "0.5586245", "0.5444794", "0.54191935", "0.53922415", "0.5380868", "0.53543603", "0.52876854", "0.52462244", "0.51891714", "0.5112807", "0.51073605", "0.50244516", "0.49929634", "0.49870348", "0.4977524", "0.49535966", "0.4952735", "0.4930333", "0.4928074", "0.48858336", "0.4872037", "0.4856927", "0.48532456", "0.48516858", "0.48491573", "0.484905", "0.48335287", "0.48152322", "0.47984397", "0.47978327", "0.47588268", "0.47519702", "0.47516325", "0.475059", "0.47494075", "0.47475576", "0.47366527", "0.4706545", "0.47041366", "0.46963525", "0.4694065", "0.4689688", "0.4687018", "0.46858668", "0.4683857", "0.46827915", "0.4679129", "0.46700305", "0.46672073", "0.46628106", "0.46602583", "0.4656443", "0.4655807", "0.46555868", "0.4645425", "0.46430716", "0.46399507", "0.4638259", "0.46340883", "0.46325195", "0.46320254", "0.46311247", "0.4627891", "0.46259654", "0.46225363", "0.46156046", "0.46109548", "0.46100485", "0.46073896", "0.46029687", "0.45984596", "0.45979375", "0.45969066", "0.45962533", "0.45881486", "0.45862833", "0.45857984", "0.458119", "0.4580543", "0.4579286", "0.45776227", "0.45727044", "0.45687884", "0.4552141", "0.455197", "0.45498073", "0.45470518", "0.45462748", "0.45456916", "0.45418176", "0.4539556", "0.45395306", "0.45359552", "0.45323652", "0.45265102", "0.45174748", "0.45146957", "0.45143402", "0.45141006" ]
0.6753259
0
Report environmental and power consumption metrics for CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_env(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()", "def metrics_flash(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_flash(cmd_ctx, cpc, options))", "def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text", "def phast_cmmd(self):\n temp = '{prog} -R {rho} -C {ecov} -E {elen} -N {chrom} -i MAF {maf} {model} > {wig}\\n'.format(**self.dict)\n return temp.format(fnum=self.fnum)", "def help_opt(self):\n print(OPTIONS)", "def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging", "async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)", "def commandline_options():\n parser = argparse.ArgumentParser(\n description='ocn_diags_generator: CESM wrapper python program for Ocean Diagnostics packages.')\n\n parser.add_argument('--backtrace', action='store_true',\n help='show exception backtraces as extra debugging '\n 'output')\n\n parser.add_argument('--debug', action='store_true',\n help='extra debugging output')\n\n #parser.add_argument('--config', nargs=1, required=True, help='path to config file')\n\n options = parser.parse_args()\n return options", "def mc(self, *args) -> None:\n env = os.environ.copy()\n env['MC_HOST_minio'] = self.auth_url\n # --config-dir is set just to prevent any config set by the user\n # from interfering with the test.\n try:\n subprocess.run(\n [\n 'mc', '--quiet', '--no-color', f'--config-dir={self.path}',\n *args\n ],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.PIPE,\n env=env,\n encoding='utf-8',\n errors='replace',\n check=True\n )\n except OSError as exc:\n raise MissingProgram(f'mc could not be run: {exc}') from exc\n except subprocess.CalledProcessError as exc:\n raise ProgramFailed(exc.stderr) from exc", "def main( argv = None ):\n\n if argv == None: argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser( version = \"%prog version: $Id$\", \n usage = globals()[\"__doc__\"] )\n\n parser.add_option(\"--category\", dest=\"category\", type=\"choice\",\n choices = (\"B\", \"C\"), help=\"supply help\" )\n\n ## add common options (-h/--help, ...) and parse command line \n (options, args) = E.Start( parser, argv = argv )\n\n data = getData(options.stdin)\n if options.category == \"B\":\n options.stdout.write(\"Category B pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in b2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n\n elif options.category == \"C\":\n options.stdout.write(\"Category C pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in c2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n else:\n raise ValueError(\"must specify the category of pathway\")\n\n\n ## write footer and output benchmark information.\n E.Stop()", "def measure(self,command_exe, command_args, measure_out):\n pass", "def main():\n test_cases = ast.literal_eval(sys.argv[1])\n results = str(my_info()) + '\\t\\t'\n for test_case in test_cases:\n mode = test_case[0]\n id_1 = int(test_case[1])\n id_2 = int(test_case[2])\n if mode == 'jc':\n results += str(Jaccard_Coefficient(id_1, id_2)) + '\\t\\t'\n elif mode == 'cc':\n results += str(Correlation_Coefficient(id_1, id_2)) + '\\t\\t'\n else:\n exit('bad command')\n print results + '\\n'", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def report_on_config( args ):\n\n from khmer.utils import print_error\n\n if args.quiet: return\n\n print_error( \"\\nPARAMETERS:\" )\n print_error( \" - kmer size = {0} \\t\\t(-k)\".format( args.ksize ) )\n print_error( \" - n hashes = {0} \\t\\t(-N)\".format( args.n_hashes ) )\n print_error(\n \" - min hashsize = {0:5.2g} \\t(-x)\".format( args.min_hashsize )\n )\n print_error( \"\" )\n print_error(\n \"Estimated memory usage is {0:.2g} bytes \"\n \"(n_hashes x min_hashsize)\".format( args.n_hashes * args.min_hashsize )\n )\n print_error( \"-\" * 8 )\n\n if DEFAULT_MIN_HASHSIZE == args.min_hashsize:\n print_error(\n \"** WARNING: hashsize is default! \" \n \"You absodefly want to increase this!\\n** \"\n \"Please read the docs!\"\n )", "def main():\n\n parser = argparse.ArgumentParser(description=main.__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n epilog=\"Homepage: https://github.com/kbat/mc-tools\")\n parser.add_argument('com', type=str, help='plot requests file name', nargs='?', default=\"/tmp/foo.c\")\n parser.add_argument('comout', type=str, help='COMOUT file name', nargs='?', default=\"zoom\")\n\n args = parser.parse_args()\n\n cmd = {} # dictionary of commands\n\n bas = False\n plane = False\n \n with open(args.com) as f:\n for line in f.readlines():\n words = line.strip().split()\n if len(words) is 0:\n continue\n\n for i,w in enumerate(words):\n if re.search(\"^bas\", w):\n cmd['bas'] = list(map(float, words[i+1:i+7]))\n if plane is False: bas = True # basis was before plane cuts\n elif re.search(\"^or\", w):\n cmd['or'] = list(map(float, words[i+1:i+4]))\n elif re.search(\"^ex\", w):\n try: # both x and y scales are given\n cmd['ex'] = list(map(float, words[i+1:i+3]))\n continue\n except ValueError: # just 1 scale is given\n cmd['ex'] = list(map(float, words[i+1:i+2]))\n elif re.search(\"^lab\", w):\n cmd['label'] = list(map(int, map(float, words[i+1:i+3]))) #+ [words[i+3]]\n elif re.search(\"^p[xyz]\", w):\n cmd[w] = [float(words[i+1])]\n if bas is False: plane = True # plane cuts were before basis\n elif re.search(\"^legend\", w):\n cmd[w] = [words[i+1]]\n elif w == \"scale\":\n print(w)\n if int(words[i+1]): # no need to put 'scale 0'\n cmd[w] = [words[i+1]]\n elif w in (\"mesh\"):\n if int(words[i+1])==1: # no need to put 'mesh 1'\n cmd[w] = [words[i+1]]\n\n print(bas, plane)\n\n if plane: # bas was first\n keys = ('bas', 'or', 'ex', 'px', 'py', 'pz', 'label', 'mesh', 'legend', 'scale')\n elif bas:\n keys = ('or', 'ex', 'px', 'py', 'pz', 'bas', 'label', 'mesh', 'legend', 'scale')\n else:\n keys = {'or', 'ex', 'label', 'mesh', 'legend', 'scale'}\n \n with open(args.comout, 'w') as f:\n for key in keys:\n if key in cmd:\n # newline required by mcplot:\n if key in ('mesh', 'legend', 'scale', 'label'):\n f.write(\"\\n\")\n f.write(\"%s %s \" % (key,\" \".join(str(e) for e in cmd[key]),))\n f.write(\"\\n\")", "def metrics_roce(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_roce(cmd_ctx, cpc, options))", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"", "def main():\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n add_common_snmp_args(parser)\n parser.add_argument(\n \"-w\",\n \"--warning\",\n type=int,\n default=70,\n help=\"Warning memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-c\",\n \"--critical\",\n type=int,\n default=90,\n help=\"Critical memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-f\",\n \"--family\",\n required=True,\n help=\"Switch family. Supported families: 1910, 1920, 1920S\",\n )\n\n config = vars(parser.parse_args())\n check_snmp_args(config)\n check_thresholds(config)\n\n dataset = {}\n\n if config[\"family\"] == \"1920S\":\n cpu = ObjectType(\n ObjectIdentity(\n \"HP-SWITCHING-MIB\", \"agentSwitchCpuProcessTotalUtilization\", 0\n )\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n cpu = ObjectType(\n ObjectIdentity(\"HH3C-ENTITY-EXT-MIB\", \"hh3cEntityExtCpuUsage\", 8)\n )\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n try:\n raw_data = get_snmp_data(config, cpu)\n except ValueError as err:\n unknown_exit(SERVICE, err)\n add_vars_to_dataset(dataset, raw_data)\n\n if config[\"family\"] == \"1920S\":\n dataset[\"cpu_usage\"] = get_hp_cpu_usage(\n dataset[\"agentSwitchCpuProcessTotalUtilization\"]\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n dataset[\"cpu_usage\"] = int(dataset[\"hh3cEntityExtCpuUsage\"])\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n state, message = generate_output(config, dataset)\n report(state, message)", "def help(self, args):\n print('No commands available for this consumer')", "def help(self):\n msg = \"`%s' performs the computational aspects of genotyping-by-sequencing.\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Usage: %s [OPTIONS] ...\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Options:\\n\"\n msg += \" -h, --help\\tdisplay the help and exit\\n\"\n msg += \" -V, --version\\toutput version information and exit\\n\"\n msg += \" -v, --verbose\\tverbosity level (0/default=1/2/3)\\n\"\n msg += \" --proj1\\tname of the project used for steps 1 to 4\\n\"\n msg += \"\\t\\tmention a reference genome only if all samples belong to\\n\"\n msg += \"\\t\\t the same species, and will be mapped to the same ref genome\\n\"\n msg += \" --proj2\\tname of the project used for steps 4 to 8\\n\"\n msg += \"\\t\\tcan be the same as --proj1, or can be different\\n\"\n msg +=\"\\t\\t notably when samples come from different species\\n\"\n msg += \"\\t\\t or if one wants to align reads to different ref genomes\\n\"\n msg += \" --schdlr\\tname of the cluster scheduler (default=SGE)\\n\"\n msg += \" --queue\\tname of the cluster queue (default=normal.q)\\n\"\n msg += \" --resou\\tcluster resources (e.g. 'test' for 'qsub -l test')\\n\"\n msg += \" --rmvb\\tremove bash scripts for jobs launched in parallel\\n\"\n msg += \" --step\\tstep to perform (1/2/3/.../9)\\n\"\n msg += \"\\t\\t1: raw read quality per lane (with FastQC v >= 0.11.2)\\n\"\n msg += \"\\t\\t2: demultiplexing per lane (with demultiplex.py v >= 1.14.0)\\n\"\n msg += \"\\t\\t3: cleaning per sample (with CutAdapt v >= 1.8)\\n\"\n msg += \"\\t\\t4: alignment per sample (with BWA MEM v >= 0.7.12, Samtools v >= 1.3, Picard and R v >= 3)\\n\"\n msg += \"\\t\\t5: local realignment per sample (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t6: local realignment per genotype (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t7: variant and genotype calling per genotype (with GATK HaplotypeCaller v >= 3.5)\\n\"\n msg += \"\\t\\t8: variant and genotype calling jointly across genotypes (with GATK GenotypeGVCFs v >= 3.5)\\n\"\n msg += \"\\t\\t9: variant and genotype filtering (with GATK v >= 3.5)\\n\"\n msg += \" --samples\\tpath to the 'samples' file\\n\"\n msg += \"\\t\\tcompulsory for all steps, but can differ between steps\\n\"\n msg += \"\\t\\t e.g. if samples come from different species or are aligned\\n\"\n msg += \"\\t\\t on different ref genomes, different samples file should\\n\"\n msg += \"\\t\\t be used for steps 4-9, representing different subsets of\\n\"\n msg += \"\\t\\t the file used for steps 1-3\\n\"\n msg += \"\\t\\tthe file should be encoded in ASCII\\n\"\n msg += \"\\t\\tthe first row should be a header with column names\\n\"\n msg += \"\\t\\teach 'sample' (see details below) should have one and only one row\\n\"\n msg += \"\\t\\tany two columns should be separated with one tabulation\\n\"\n msg += \"\\t\\tcolumns can be in any order\\n\"\n msg += \"\\t\\trows starting by '#' are skipped\\n\"\n msg += \"\\t\\t12 columns are compulsory (but there can be more):\\n\"\n msg += \"\\t\\t genotype (see details below, e.g. 'Col-0', but use neither underscore '_' nor space ' ' nor dot '.', use dash '-' instead)\\n\"\n msg += \"\\t\\t ref_genome (identifier of the reference genome used for alignment, e.g. 'Atha_v2', but use neither space ' ' nor dot '.'; the full species name, e.g. 'Arabidopsis thaliana', will be present in the file given to --dict)\\n\"\n msg += \"\\t\\t library (e.g. can be the same as 'genotype')\\n\"\n msg += \"\\t\\t barcode (e.g. 'ATGG')\\n\"\n msg += \"\\t\\t seq_center (e.g. 'Broad Institute', 'GenoToul', etc)\\n\"\n msg += \"\\t\\t seq_platform (e.g. 'ILLUMINA', see SAM format specification)\\n\"\n msg += \"\\t\\t seq_platform_model (e.g. 'HiSeq 2000')\\n\"\n msg += \"\\t\\t flowcell (e.g. 'C5YMDACXX')\\n\"\n msg += \"\\t\\t lane (e.g. '3', can be '31' if a first demultiplexing was done per index)\\n\"\n msg += \"\\t\\t date (e.g. '2015-01-15', see SAM format specification)\\n\"\n msg += \"\\t\\t fastq_file_R1 (filename, one per lane, gzip-compressed)\\n\"\n msg += \"\\t\\t fastq_file_R2 (filename, one per lane, gzip-compressed)\\n\"\n msg += \" --fcln\\tidentifier of a flowcell and lane number\\n\"\n msg += \"\\t\\tformat as <flowcell>_<lane-number>, e.g. 'C5YMDACXX_1'\\n\"\n msg += \"\\t\\tif set, only the samples from this lane will be analyzed\\n\"\n msg += \" --pird\\tpath to the input reads directory\\n\"\n msg += \"\\t\\tcompulsory for steps 1 and 2\\n\"\n msg += \"\\t\\twill be added to the columns 'fastq_file_R*' from the sample file\\n\"\n msg += \"\\t\\tif not set, input read files should be in current directory\\n\"\n msg += \" --enz\\tname of the restriction enzyme\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=ApeKI\\n\"\n msg += \" --dmxmet\\tmethod used to demultiplex\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=4c (see the help of demultiplex.py to know more)\\n\"\n msg += \" --subst\\tnumber of substitutions allowed during demultiplexing\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=2\\n\"\n msg += \" --ensubst\\tenforce the nb of substitutions allowed\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=lenient/strict\\n\"\n msg += \" --adp\\tpath to the file containing the adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tsame format as FastQC: name<tab>sequence\\n\"\n msg += \"\\t\\tname: at least 'adpR1' (also 'adpR2' if paired-end)\\n\"\n msg += \"\\t\\tsequence: from 5' (left) to 3' (right)\\n\"\n msg += \" --errtol\\terror tolerance to find adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --minovl\\tminimum overlap length between reads and adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=3 (in bases)\\n\"\n msg += \" --minrl\\tminimum length to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=35 (in bases)\\n\"\n msg += \" --minq\\tminimum quality to trim a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=20 (used for both reads if paired-end)\\n\"\n msg += \" --maxNp\\tmaximum percentage of N to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --ref\\tpath to the prefix of files for the reference genome\\n\"\n msg += \"\\t\\tcompulsory for steps 4, 5, 6, 7, 8, 9\\n\"\n msg += \"\\t\\tshould correspond to the 'ref_genome' column in --samples\\n\"\n msg += \"\\t\\te.g. '/data/Atha_v2' for '/data/Atha_v2.fa', '/data/Atha_v2.bwt', etc\\n\"\n msg += \"\\t\\tthese files are produced via 'bwa index ...'\\n\"\n msg += \" --dict\\tpath to the 'dict' file (SAM header with @SQ tags)\\n\"\n msg += \"\\t\\tcompulsory for step 4\\n\"\n msg += \"\\t\\tsee 'CreateSequenceDictionary' in the Picard software\\n\"\n msg += \" --jgid\\tcohort identifier to use for joint genotyping\\n\"\n msg += \"\\t\\tcompulsory for steps 8, 9\\n\"\n msg += \"\\t\\tuseful to launch several, different cohorts in parallel\\n\"\n msg += \" --rat\\trestrict alleles to be of a particular allelicity\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdefault=ALL/BIALLELIC/MULTIALLELIC\\n\"\n msg += \"\\t\\tsee '--restrictAllelesTo' in GATK's SelectVariant\\n\"\n msg += \" --mdp\\tminimum value for DP (read depth; e.g. 10)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mgq\\tminimum value for GQ (genotype quality; e.g. 20)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mnfg\\tmaximum number of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mffg\\tmaximum fraction of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFractionFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mnnc\\tmaximum number of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \" --mfnc\\tmaximum fraction of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxNOCALLfraction' in GATK's SelectVariants\\n\"\n msg += \" --fam\\tpath to the file containing pedigree information\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdiscard variants with Mendelian violations (see Semler et al, 2012)\\n\"\n msg += \"\\t\\tshould be in the 'fam' format specified by PLINK\\n\"\n msg += \"\\t\\tvalidation strictness (GATK '-pedValidationType') is set at 'SILENT'\\n\"\n msg += \"\\t\\t allowing some samples to be absent from the pedigree\\n\"\n msg += \" --mvq\\tminimum GQ for each trio member to accept a variant as a Mendelian violation\\n\"\n msg += \"\\t\\tused in step 9 if '--fam' is specified\\n\"\n msg += \"\\t\\tdefault=0\\n\"\n msg += \" --xlssf\\tpath to the file with genotypes to exclude\\n\"\n msg += \"\\t\\tused in step 9 (can be especially useful if '--fam' is specified)\\n\"\n msg += \" --tmpd\\tpath to a temporary directory on child nodes (default=.)\\n\"\n msg += \"\\t\\te.g. it can be /tmp or /scratch\\n\"\n msg += \"\\t\\tused in step 4 for 'samtools sort'\\n\"\n msg += \"\\t\\tused in step 7 for 'GATK HaplotypeCaller'\\n\"\n msg += \" --jvmXms\\tinitial memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=512m (can also be specified as 1024k, 1g, etc)\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --jvmXmx\\tmaximum memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=4g\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --queue2\\tname of the second cluster queue (default=bigmem.q)\\n\"\n msg += \"\\t\\tused in step 4 for Picard to collect insert sizes\\n\"\n msg += \" --knowni\\tpath to a VCF file with known indels (for local realignment)\\n\"\n msg += \" --known\\tpath to a VCF file with known variants (e.g. from dbSNP)\\n\"\n msg += \" --force\\tforce to re-run step(s)\\n\"\n msg += \"\\t\\tthis removes without warning the step directory if it exists\\n\"\n msg += \"\\n\"\n msg += \"Examples:\\n\"\n msg += \" %s --step 1 --samples samples.txt\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Details:\\n\"\n msg += \"This program aims at genotyping a set of 'genotypes' using data from\\n\"\n msg += \"a restriction-assisted DNA sequencing (RAD-seq) experiment, also known\\n\"\n msg += \"as a genotyping-by-sequencing (GBS) experiment.\\n\"\n msg += \"Here, by 'genotype', we mean the entity which is the focus of the\\n\"\n msg += \"study. For instance, it can be a plant variety (or a human being), or\\n\"\n msg += \"the specific clone of a given plant variety (or a specific tumor of a\\n\"\n msg += \"given human being), etc.\\n\"\n msg += \"Importantly, note that the content of the 'genotype' column will\\n\"\n msg += \"be used to set the 'SM' (sample) tag of the 'RG' (read group) header\\n\"\n msg += \"record type of the SAM format (see http://www.htslib.org/). However,\\n\"\n msg += \"internal to this program, the term 'sample' corresponds to the unique\\n\"\n msg += \"quadruplet (genotype,flowcell,lane,barcode) for steps 1 and 2, and to\\n\"\n msg += \"the unique triplet (genotype,flowcell,lane) for the others.\\n\"\n msg += \"Jobs are executed in parallel (--schdlr). Their return status is\\n\"\n msg += \"recorded in a SQLite database which is removed at the end. If a job\\n\"\n msg += \"fails, the whole script stops with an error.\\n\"\n msg += \"\\n\"\n msg += \"Dependencies:\\n\"\n msg += \"Python >= 2.7; Biopython; pyutilstimflutre >= 0.5\\n\"\n msg += \"\\n\"\n msg += \"Report bugs to <[email protected]>.\"\n print(msg); sys.stdout.flush()", "def main(argv):\n version = \"0.1.2\"\n interval = 1\n max_run_time = 0\n finished = 0\n first_time = 1\n output_file = 0\n output_file_enabled = 0\n output_path = 0\n header_row = 1\n\n #*** Get the hostname for use in filenames etc:\n hostname = socket.gethostname()\n\n #*** Start by parsing command line parameters:\n try:\n opts, args = getopt.getopt(argv, \"hu:m:ni:w:Wb:jv\",\n [\"help\",\n \"url=\",\n \"max-run-time=\",\n \"no-keepalive\",\n \"interval=\",\n \"output-file=\",\n \"output-path=\",\n \"no-header-row\",\n \"version\"])\n except getopt.GetoptError as err:\n print \"mosp: Error with options:\", err\n print_help()\n sys.exit(2)\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n print_help()\n sys.exit()\n elif opt in (\"-v\", \"--version\"):\n print 'mosp.py version', version\n sys.exit()\n elif opt in (\"-m\", \"--max-run-time\"):\n max_run_time = float(arg)\n elif opt in (\"-i\", \"--interval\"):\n interval = float(arg)\n elif opt in (\"-w\", \"--output-file\"):\n output_file = arg\n output_file_enabled = 1\n elif opt == \"-W\":\n output_file = \"mosp-\" + hostname + \"-\" + \\\n time.strftime(\"%Y%m%d-%H%M%S.csv\")\n output_file_enabled = 1\n elif opt in (\"-b\", \"--output-path\"):\n output_path = arg\n elif opt in (\"-j\", \"--no-header-row\"):\n header_row = 0\n\n print \"\\nMeasure Operating System Performance (mosp) version\", \\\n version\n\n #*** Display output filename:\n if output_file_enabled:\n if output_path:\n output_file = os.path.join(output_path, output_file)\n print \"Results filename is\", output_file\n else:\n print \"Not outputing results to file, as option not selected\"\n\n if not header_row:\n print \"Not writing a header row to CSV\"\n\n #*** Use this if max_run_time is set:\n initial_time = time.time()\n\n #*** Instantiate classes:\n cpus = CPUs()\n swap = Swap()\n nics = NICs()\n\n #*** Start the loop:\n while not finished:\n timenow = datetime.datetime.now()\n timestamp = timenow.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]\n start_time = time.time()\n\n #*** Update CPU measurements:\n cpus.update()\n\n #*** Update swap measurements:\n swap.update()\n\n #*** Update network measurements:\n nics.update()\n\n #*** Put the stats into a nice string for printing and\n #*** writing to file:\n result_csv = str(timestamp) + \",\" \\\n + cpus.csv() \\\n + swap.csv() \\\n + nics.csv() \\\n + \"\\n\"\n result_kvp = str(timestamp) + \" \" \\\n + cpus.kvp() \\\n + swap.kvp() \\\n + nics.kvp()\n print result_kvp\n if output_file_enabled:\n #*** Header row in CSV:\n if first_time and header_row:\n #*** Write a header row to CSV:\n header_csv = \"time,\" + cpus.csv_header(hostname) + \\\n swap.csv_header(hostname) + \\\n nics.csv_header(hostname) + \\\n \"\\n\"\n first_time = 0\n with open(output_file, 'a') as the_file:\n the_file.write(header_csv)\n\n #*** Write a data row to CSV:\n with open(output_file, 'a') as the_file:\n the_file.write(result_csv)\n\n if max_run_time:\n if (start_time - initial_time) > max_run_time:\n break\n\n #*** Sleep for interval seconds:\n time.sleep(interval)", "def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)", "def define_options(self):\n\n from clinica.engine.cmdparser import PIPELINE_CATEGORIES\n\n clinica_comp = self._args.add_argument_group(PIPELINE_CATEGORIES['CLINICA_COMPULSORY'])\n clinica_comp.add_argument(\"caps_directory\",\n help='Path to the CAPS directory.')\n clinica_comp.add_argument(\"list_bvalues\", type=str,\n help='String listing all the shells (i.e. the b-values) in the corrected DWI datasets comma separated (e.g, 0,300,700,2200)')\n # Optional arguments\n clinica_opt = self._args.add_argument_group(PIPELINE_CATEGORIES['CLINICA_OPTIONAL'])\n\n clinica_opt.add_argument(\"-wd\", \"--working_directory\",\n help='Temporary directory to store pipeline intermediate results')\n clinica_opt.add_argument(\"-np\", \"--n_procs\", type=int, default=4,\n help='Number of cores used to run in parallel')\n clinica_opt.add_argument(\"-tsv\", \"--subjects_sessions_tsv\",\n help='TSV file containing a list of subjects with their sessions.')", "def cmd_help(args):", "def reports_cli():", "def printOptions(opts,subject_ids,session_ids,task_list, run_list, acq, rec):\n uname = os.popen('uname -s -n -r').read()\n print \"\\n\"\n print \"* Pipeline started at \"+time.strftime(\"%c\")+\"on \"+uname\n print \"* Command line is : \\n \"+str(sys.argv)+\"\\n\"\n print \"* The source directory is : \"+opts.sourceDir\n print \"* The target directory is : \"+opts.targetDir+\"\\n\"\n print \"* Data-set Subject ID(s) is/are : \"+str(', '.join(subject_ids))+\"\\n\"\n # print \"* PET conditions : \"+ ','.join(opts.condiList)+\"\\n\"\n print \"* Sessions : \", session_ids, \"\\n\"\n print \"* Tasks : \" , task_list , \"\\n\"\n print \"* Runs : \" , run_list , \"\\n\"\n print \"* Acquisition : \" , acq , \"\\n\"\n print \"* Reconstruction : \" , rec , \"\\n\"", "def getHelp(self):\r\n help_str =\\\r\n \"\"\"##########################################################################################\r\n#\r\n# Required:\r\n#\r\n# --query_NAST multi-fasta file containing query sequences in alignment format\r\n#\r\n# Common opts:\r\n#\r\n# --db_NAST db in NAST format\r\n# --db_FASTA db in fasta format (megablast formatted)\r\n#\r\n#\r\n# -n number of top matching database sequences to compare to (default 15)\r\n# -R min divergence ratio default: 1.007\r\n# -P min percent identity among matching sequences (default: 90)\r\n#\r\n# ## parameters to tune ChimeraParentSelector:\r\n#\r\n# Scoring parameters:\r\n# -M match score (default: +5)\r\n# -N mismatch penalty (default: -4)\r\n# -Q min query coverage by matching database sequence (default: 70)\r\n# -T maximum traverses of the multiple alignment (default: 1)\r\n\r\n#\r\n# ## parameters to tune ChimeraPhyloChecker:\r\n#\r\n#\r\n# --windowSize default 50\r\n# --windowStep default 5\r\n# --minBS minimum bootstrap support for calling chimera (default: 90)\r\n# -S percent of SNPs to sample on each side of breakpoint for computing bootstrap support (default: 10)\r\n# --num_parents_test number of potential parents to test for chimeras (default: 3)\r\n# --MAX_CHIMERA_PARENT_PER_ID Chimera/Parent alignments with perID above this are considered non-chimeras (default 100; turned off)\r\n#\r\n# ## misc opts\r\n#\r\n# --printFinalAlignments shows alignment between query sequence and pair of candidate chimera parents\r\n# --printCSalignments print ChimeraSlayer alignments in ChimeraSlayer output\r\n# --exec_dir chdir to here before running\r\n#\r\n#########################################################################################\r\n \"\"\"\r\n return help_str", "def procs_calculate_axyzc(molecules, n_cores=-1, show_progress=True, scr=None, cmd=XTB_CMD):\n results = None\n return results", "def _get_metrics_options(metrics):\n metrics_options = []\n if metrics is None:\n metrics = []\n for static_metric in metrics:\n metrics_options += [\n \"-m\",\n static_metric.metric.mp_metric_name,\n str(static_metric.value),\n ]\n return metrics_options", "def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()", "def cvv_report(argv):\n p = optparse.OptionParser()\n p.add_option('-c', '--cfg',\n action='store', default='', dest='config',\n help='config file name')\n p.add_option('-d', '--debug',\n action='store_true', default=False, dest='debug',\n help='run the debugger')\n p.add_option('-p', '--prefix',\n action='store', default='', dest='prefix',\n help='table name prefix')\n p.add_option('-v', '--verbose',\n action='store_true', default=False, dest='verbose',\n help='pass verbose flag to HSI object')\n try:\n (o, a) = p.parse_args(argv)\n except SystemExit:\n return\n\n if o.debug:\n pdb.set_trace()\n\n if o.config != '':\n cfg = CrawlConfig.get_config(o.config)\n else:\n cfg = CrawlConfig.get_config()\n\n if o.prefix != '':\n cfg.set('dbi', 'tbl_prefix', o.prefix)\n\n dim = {}\n dim['cos'] = Dimension.get_dim('cos')\n dim['ttypes'] = Dimension.get_dim('ttypes')\n\n print dim['cos'].report()\n print dim['ttypes'].report()", "def _show_help(self):\r\n info = {\"/contexts/<context>/[orgs/[<org_name>]]/[spaces/[<space_name>]]\": \"reports\",\r\n \"/contexts/<context>/orgs_metadata/[<org_name>]\": \"metadata\",\r\n \"/contexts/<context>/orgs/<org_name>/director\": \"org/director mapping\",\r\n \"/reader_status\": \"status of Bitbucket reader cache\"}\r\n if self._cache_refresh:\r\n info['/refresh'] = \"force cache refresh from BitBucket\"\r\n return info", "def info():\n f = Figlet(font='standard')\n click.echo(f.renderText('covtool'))\n click.secho(\n \"covtool: a simple CLI for fetching covid data\", fg='cyan')\n click.echo(\n \"Data Sources: https://www.worldometers.info/coronavirus\\nJohn Hopkins [https://github.com/CSSEGISandData/COVID-19] \")\n click.secho(\"Author: Amayo II <[email protected]>\", fg='magenta')", "def cmdopt(request):\n return request.config.getoption(\"-c\")", "def show(self):\n prev_queries = 0\n prev_cpu_sys = 0\n prev_cpu_user = 0\n \n lines = {\n \"Uptime (seconds)\": \"--\",\n \"Number of queries\": \"--\",\n \"Query per second\": \"--\",\n \"ACL drops\": \"--\",\n \"Dynamic drops\": \"--\",\n \"Rule drops\": \"--\",\n \"CPU Usage (%s)\": \"--\",\n \"Cache hitrate\": \"--\"\n }\n\n while True:\n try:\n # get stats from dnsdist\n stats = Statistics(console=self.console)\n global_stats = stats[\"global\"]\n \n qps = int(global_stats[\"queries\"]) - prev_queries\n prev_queries = int(global_stats[\"queries\"])\n cpu = (int(global_stats[\"cpu-sys-msec\"])+int(global_stats[\"cpu-user-msec\"]) - prev_cpu_sys - prev_cpu_user) / 10\n prev_cpu_sys = int(global_stats[\"cpu-sys-msec\"])\n prev_cpu_user = int(global_stats[\"cpu-user-msec\"])\n \n lines[\"Uptime (seconds)\"] = global_stats[\"uptime\"]\n lines[\"Number of queries\"] = global_stats[\"queries\"]\n lines[\"Query per second\"] = qps\n lines[\"CPU Usage (%s)\"] = cpu\n lines[\"ACL drops\"] = global_stats[\"acl-drops\"]\n lines[\"Rule drops\"] = global_stats[\"rule-drop\"]\n lines[\"Cache hitrate\"] = global_stats[\"cache-hits\"]\n lines[\"Dynamic drops\"] = global_stats[\"dyn-blocked\"]\n\n # reprint the lines \n sys.stdout.write(\"\\033[1mDashboard for dnsdist\\033[0m\\n\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Global:\\n\")\n for k,v in lines.items():\n sys.stdout.write(\"\\t%s: %s\\n\" % (k,v))\n sys.stdout.write(\"Backends:\\n\")\n for s in stats[\"backends\"]:\n if not len(s[\"name\"]):\n s[\"name\"] = \"--\"\n if not len(s[\"pools\"]):\n s[\"pools\"] = \"--\"\n sys.stdout.write(\"\\t#%s / %s / %s / %s\\n\" % (s[\"#\"],s[\"address\"],s[\"name\"],s[\"pools\"]) )\n sys.stdout.write(\"\\t\\tNumber of queries: %s\\n\" % s[\"queries\"])\n sys.stdout.write(\"\\t\\tQuery per second: %s\\n\" % s[\"qps\"])\n sys.stdout.write(\"\\t\\tNumber of drops: %s\\n\" % s[\"drops\"])\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Ctrl+C to exit\\n\")\n \n time.sleep(1)\n \n \n # move up cursor and delete whole line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for k,v in lines.items():\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for s in stats[\"backends\"]:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n \n del stats\n except KeyboardInterrupt:\n break", "def printHelp():\n print(\"amqWorkApiMass.py -n <msgcnt> -b <body> -m <headers> -s <path/to/bodyandheaders>\")", "def do_config():\n\n tracking = get_tracking()\n for unit in (\"ppm\", \"sec\"):\n\ttunit = unit\n\tif unit == \"sec\":\n\t tunit = \"seconds\"\n\tprint \"multigraph chrony_%s\" % unit\n\tprint \"graph_title NTP (Chrony) Statistics (%s)\" % unit\n\tprint \"graph_vlabel %s\" % unit\n\tprint \"graph_args --base 1000\"\n\tprint \"graph_category time\"\n\tprint \"graph_info NTP (Chrony) tracking statistics (the ones measured in %s)\" % tunit\n\tfor key in tracking[tunit]:\n\t item = tracking[tunit][key]\n\t print \"\"\"%s.label %s\n%s.draw LINE2\n%s.info %s\"\"\" % (key, item[\"label\"], key, key, item[\"label\"])\n\tprint\n return 0", "def cli(argv):\r\n argv.append(\"--exhaust-materials\")\r\n cltestbench.cli(argv)", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")", "def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')", "def help(update, context):\n msg = \"\"\n msg += \"\\n/covid 7-Day-Incident per Million\"\n msg += \"\\n/daylio What did I do a year ago today?\"\n msg += \"\\n/f1last Results of the last race\"\n msg += \"\\n/f1stand Driver standings\"\n msg += \"\\n/f1next Time and place of the next race\"\n msg += \"\\n/fuel prices and consump. (args: Xeur Ykm)\"\n msg += \"\\n/ip Outside ip address\"\n msg += \"\\n/rate Exchange rates (args: Xeur/Yhuf)\"\n msg += \"\\n/rss check rss feeds for new content\"\n msg += \"\\n/sun Time of sunrise and sunset\"\n msg += \"\\n/xkcd Sends last comic image and alt\"\n msg.rstrip()\n update.message.reply_text(msg)", "def show(self, options=None):\n\n # # IMPLEMENTATION NOTE: Stub for implementing options:\n # if options and self.InspectOptions.ALL_OUTPUT_LABELS in options:\n # pass\n\n print (\"\\n---------------------------------------------------------\")\n print (\"\\n{0}\".format(self.name))\n\n\n print (\"\\n\\tControl enabled: {0}\".format(self.enable_controller))\n print (\"\\n\\tProcesses:\")\n\n for process in self.processes:\n print (\"\\t\\t{} [learning enabled: {}]\".format(process.name, process._learning_enabled))\n\n\n # Print execution_sets (output of toposort)\n print (\"\\n\\tExecution sets: \".format(self.name))\n # Sort for consistency of output\n execution_sets_sorted = sorted(self.execution_sets)\n for i in range(len(execution_sets_sorted)):\n # for i in range(len(self.execution_sets)):\n print (\"\\t\\tSet {0}:\\n\\t\\t\\t\".format(i),end='')\n print(\"{ \",end='')\n sorted_mechs_names_in_set = sorted(list(mech_tuple.mechanism.name\n for mech_tuple in self.execution_sets[i]))\n for name in sorted_mechs_names_in_set:\n print(\"{0} \".format(name), end='')\n print(\"}\")\n\n # Print executionList sorted by phase and including EVC mechanism\n\n # Sort executionList by phase\n sorted_execution_list = self.executionList.copy()\n\n\n # Sort by phaseSpec and, within each phase, by mechanism name\n sorted_execution_list.sort(key=lambda mech_tuple: mech_tuple.phase)\n\n\n # Add controller to execution list for printing if enabled\n if self.enable_controller:\n sorted_execution_list.append(MechanismTuple(self.controller, None, self.controller.phaseSpec))\n\n\n mech_names_from_exec_list = list(mech_tuple.mechanism.name for mech_tuple in self.executionList)\n mech_names_from_sorted_exec_list = list(mech_tuple.mechanism.name for mech_tuple in sorted_execution_list)\n\n print (\"\\n\\tExecution list: \".format(self.name))\n phase = 0\n print(\"\\t\\tPhase {}:\".format(phase))\n for mech_tuple in sorted_execution_list:\n if mech_tuple.phase != phase:\n phase = mech_tuple.phase\n print(\"\\t\\tPhase {}:\".format(phase))\n print (\"\\t\\t\\t{}\".format(mech_tuple.mechanism.name))\n\n print (\"\\n\\tOrigin mechanisms: \".format(self.name))\n for mech_tuple in self.originMechanisms.mech_tuples_sorted:\n print(\"\\t\\t{0} (phase: {1})\".format(mech_tuple.mechanism.name, mech_tuple.phase))\n\n print (\"\\n\\tTerminal mechanisms: \".format(self.name))\n for mech_tuple in self.terminalMechanisms.mech_tuples_sorted:\n print(\"\\t\\t{0} (phase: {1})\".format(mech_tuple.mechanism.name, mech_tuple.phase))\n for output_state_name in mech_tuple.mechanism.outputStates:\n print(\"\\t\\t\\t{0}\".format(output_state_name))\n\n # if any(process.learning for process in self.processes):\n if self.learning:\n print (\"\\n\\tTarget mechanisms: \".format(self.name))\n for mech_tuple in self.targetMechanisms.mech_tuples:\n print(\"\\t\\t{0} (phase: {1})\".format(mech_tuple.mechanism.name, mech_tuple.phase))\n\n print (\"\\n---------------------------------------------------------\")", "def usage(err=''):\r\n m = '%s\\n' %err\r\n m += 'Default usage is to list Cases closed for the 30 days\\n'\r\n m += '\\n Example:\\n'\r\n m += ' closedcases -n 90 \\n' \r\n m += ' \\n'\r\n# m += ' closedcases -n 60 -s blast5 \\n'\r\n return m", "def _cmd_segmetrics(args):\n if not 0.0 < args.alpha <= 1.0:\n raise RuntimeError(\"alpha must be between 0 and 1.\")\n\n if not any((args.location_stats, args.spread_stats, args.interval_stats)):\n logging.info(\"No stats specified\")\n return\n\n # Calculate all metrics\n cnarr = read_cna(args.cnarray)\n segarr = read_cna(args.segments)\n segarr = do_segmetrics(\n cnarr,\n segarr,\n args.location_stats,\n args.spread_stats,\n args.interval_stats,\n args.alpha,\n args.bootstrap,\n args.smooth_bootstrap,\n skip_low=args.drop_low_coverage,\n )\n tabio.write(segarr, args.output or segarr.sample_id + \".segmetrics.cns\")", "def help(bin_name='windmill'):\n bin_name = 'windmill'\n module = sys.modules[__name__]\n from windmill.conf import global_settings\n all_option_names = []\n options_string = []\n for option in [getattr(module, x) for x in dir(module) if (\n hasattr(getattr(module, x), 'option_names')) and (\n getattr(module, x).__doc__ is not None ) ]:\n all_option_names.append(option.option_names)\n if hasattr(option, 'setting'):\n if getattr(global_settings, option.setting, None) is not None:\n default = ' Defaults to %s' % str(getattr(global_settings, option.setting, None))\n else:\n default = ''\n else:\n default = ''\n if option.option_names[0] is None:\n if not issubclass(option, GeneralBool):\n options_string.append(' '+''.join([str(option.option_names[1])+'='+' :: ', \n option.__doc__]) + default)\n else:\n options_string.append(' '+''.join([str(option.option_names[1])+' :: ', \n option.__doc__]) + default)\n else:\n if not issubclass(option, GeneralBool):\n options_string.append(' '+''.join([\n '-'+str(option.option_names[0])+', '\n +str(option.option_names[1])+'='+' :: ',\n option.__doc__]) + default)\n else:\n options_string.append(' '+''.join([\n '-'+str(option.option_names[0])+', '\n +str(option.option_names[1])+' :: ',\n option.__doc__]) + default)\n\n preamble = \"\"\"windmill web test automation system.\n %s [-%s] action [option=value] [firefox|ie|safari] [http://www.example.com]\n \nAvailable Actions:\n shell Enter the windmilll shell environment (modified python shell). \n Uses ipython if installed. Exit using ^d\n run_service Run the windmill service in foreground. Kill using ^c.\n \nAvailable Options:\"\"\" % ( bin_name,\n ''.join([ o[0] for o in all_option_names if o[0] is not None ]) \n )\n print preamble\n print '\\n'.join(options_string)", "def build_cmdline():\n\tcmd=optparse.OptionParser(version=__version__)\n\tcmd.add_option('-c', '', dest='config_fname',type=\"string\", help='WHM/WHMCS configuration file', metavar=\"FILE\")\n\tcmd.add_option('-s', '', dest=\"whm_section\", type=\"string\", help=\"WHM server to use. Specify section name. eg: -s ds01\", metavar=\"SERVER\")\n\tcmd.add_option('','--search', action=\"store\", dest='search', type=\"string\", help=\"Search client by DNS domain name or cPanel username\", metavar=\"STRING\")\n\tcmd.add_option('-d', '', dest='whmcs_deptid', type=\"int\", help=\"WHMCS Department ID\", metavar=\"INT\") \n\tcmd.add_option('-m', '', dest='whmcs_ticketmsg_fname', type=\"string\", help=\"WHMCS abuse ticket template file\", metavar='FILE')\n\tcmd.add_option('-r', '', dest='whm_suspendmsg_fname', type=\"string\", help='cPanel account suspension reason template file', metavar='FILE')\n\tcmd.add_option('-f', '', dest='whmcs_proofmsg_fname', type=\"string\", help='Abuse proof file which will be appended to abuse ticket message', metavar='FILE')\n\tcmd.add_option('', '--subject', dest='whmcs_subject', type=\"string\", help='Specify abuse ticket subject title.', metavar=\"STRING\")\n\tcmd.add_option('-y', '--allyes', dest='allyes', action=\"store_true\", default=False, help='Assume yes as an answer to any question which would be asked')\n\treturn cmd", "def cmdline(self, executable, options, task, rlimits):\n data_model_param = get_data_model_from_task(task, {ILP32: \"-m32\", LP64: \"-m64\"})\n print(options)\n if data_model_param and not any(\n option.startswith(\"--clang-options=\") for option in options\n ):\n options += [\"--clang-options=\" + data_model_param]\n\n if task.property_file:\n options += [\"--svcomp-property\", task.property_file]\n else:\n raise UnsupportedFeatureException(\n \"SMACK can't execute without a property file.\"\n )\n\n options += [task.single_input_file]\n\n return [executable] + options", "def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})", "def execute(self, context):\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n logger.info(\"[ChainedTransfig] Collectd-Jmx w/ Common MBeans Generation...\")\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n\n #insert common to assemble collectd.genericjmx.common.conf in later step\n context[CTX_KEY_COMMON_COLLECTD_JMX_APP_PREFIX] = 'common'\n super().execute(context)\n\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n logger.info(\"[ChainedTransfig] Collectd-Jmx w/ Common MBeans Generation... COMPLETES\")\n logger.info(\"///////////////////////////////////////////////////////////////////////\")", "def ShowIPCVoucherAttributeControl(cmd_args=[], cmd_options={}):\n if not cmd_args:\n raise ArgumentError(\"Please provide correct arguments.\")\n ivac = kern.GetValueFromAddress(cmd_args[0], 'ipc_voucher_attr_control_t')\n print GetIPCVoucherAttrControlSummary.header\n print GetIPCVoucherAttrControlSummary(ivac)\n if config['verbosity'] > vHUMAN:\n cur_entry_index = 0\n last_entry_index = unsigned(ivac.ivac_table_size)\n print \"index \" + GetIPCVoucherAttributeEntrySummary.header\n while cur_entry_index < last_entry_index:\n print \"{: <5d} \".format(cur_entry_index) + GetIPCVoucherAttributeEntrySummary(addressof(ivac.ivac_table[cur_entry_index]))\n cur_entry_index += 1", "def host_report(args):\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n k.report()", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser(version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-t\", \"--test\", dest=\"test\", type=\"string\",\n help=\"supply help\")\n\n parser.add_option(\"--method\", dest=\"method\", type=\"choice\",\n choices=(\"metrics\", \"summary\", \"module_summary\"),\n help=\"method to summarise clustering\")\n\n parser.add_option(\"--ref-gtf-files\", dest=\"ref_gtf\", type=\"string\",\n help=\"comma separated list of reference gtf files\")\n\n # add common options (-h/--help, ...) and parse command line\n (options, args) = E.Start(parser, argv=argv)\n\n if options.method == \"metrics\":\n infile = argv[-1]\n E.info(\"loading input file: %s\" % infile)\n assert infile\n\n df = pd.read_table(infile,\n sep=\"\\t\",\n header=None,\n index_col=0)\n\n df = df.ix[:, :50]\n cluster_combs = (x for x in itertools.combinations(df.columns,\n 2))\n genes = df.index\n results_dict = {}\n all_clusts = {}\n\n E.info(\"setting up cluster containers\")\n for i in df.columns:\n clusters = set(df[i].values.tolist())\n cluster_dict = {}\n for clust in clusters:\n cluster_dict[clust] = []\n for gene in genes:\n cluster_dict[df[i][gene]].append(gene)\n\n for col in clusters:\n col_set = set()\n clust_col = cluster_dict[col]\n gene_members = itertools.combinations(clust_col,\n 2)\n col_set.update(gene_members)\n cluster_dict[col] = col_set\n all_clusts[i] = cluster_dict\n E.info(\"generating all pair-wise cluster comparisons\")\n E.info(\"calculating adjusted mutual information\")\n for k in cluster_combs:\n clusters1 = all_clusts[k[0]]\n clusters2 = all_clusts[k[1]]\n metric_dict = {}\n metric_dict['AMI'] = TS.adjustedMutualInformation(clusters1,\n clusters2)\n results_dict[k] = metric_dict\n\n res_frame = pd.DataFrame(results_dict).T\n res_frame = res_frame.reset_index()\n res_frame.drop(['level_0'], inplace=True, axis=1)\n res_frame.drop(['level_1'], inplace=True, axis=1)\n\n # flatten rand indices and add to output dataframe\n rand_arrays = TS.randIndexes(df)\n flat_adj_rand = TS.unravel_arrays(rand_arrays[0])\n flat_rand = TS.unravel_arrays(rand_arrays[1])\n res_frame['Rand_Index'] = flat_rand\n res_frame['Adjusted_Rand_Index'] = flat_adj_rand\n E.info(\"aggregating results\")\n\n res_frame.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"summary\":\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n file_dict = {}\n for fle in list_of_files:\n fname = fle.split(\"/\")[-1]\n condition = fname.split(\"-\")[0]\n ref = fname.split(\"-\")[1]\n df_ = pd.read_table(fle,\n sep=\"\\t\",\n header=0,\n index_col=0)\n df_.columns = ['gene_id', 'cluster']\n clust_dict = {}\n for idx in df_.index:\n cluster = df_.loc[idx]['cluster']\n gene = df_.loc[idx]['gene_id']\n try:\n clust_dict[cluster] += 1\n except KeyError:\n clust_dict[cluster] = 1\n med_size = np.median(clust_dict.values())\n file_dict[fname] = {'condition': condition,\n 'reference': ref,\n 'median_cluster_size': med_size}\n\n outframe = pd.DataFrame(file_dict).T\n outframe.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"module_summary\":\n # get lncRNA/gene lengths from reference gtfs\n ref_gtfs = options.ref_gtf.split(\",\")\n length_dict = {}\n for ref in ref_gtfs:\n oref = IOTools.openFile(ref, \"rb\")\n git = GTF.transcript_iterator(GTF.iterator(oref))\n for gene in git:\n for trans in gene:\n length = trans.end - trans.start\n try:\n length_dict[trans.gene_id] += length\n except KeyError:\n length_dict[trans.gene_id] = length\n oref.close()\n\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n fdfs = []\n for fle in list_of_files:\n cond = fle.split(\"/\")[-1].split(\"-\")[0]\n refer = fle.split(\"/\")[-1].split(\"-\")[1]\n _df = pd.read_table(fle, sep=\"\\t\",\n header=0, index_col=0)\n _df.columns = ['gene_id', 'cluster']\n clusters = set(_df['cluster'])\n c_dict = {}\n # summarize over each cluster\n for clust in clusters:\n lengths = []\n c_df = _df[_df['cluster'] == clust]\n for lid in c_df['gene_id']:\n lengths.append(length_dict[lid])\n c_dict[clust] = {'cluster_size': len(c_df['gene_id']),\n 'mean_length': np.mean(lengths),\n 'index': (cond, refer),\n 'module': clust}\n cdf = pd.DataFrame(c_dict).T\n # use a multindex for hierarchical indexing\n midx = pd.MultiIndex.from_tuples(cdf['index'])\n cdf.index = midx\n cdf.drop(['index'], inplace=True, axis=1)\n fdfs.append(cdf)\n\n # generate a single output df\n s_df = fdfs[0]\n fdfs.pop(0)\n for df in fdfs:\n s_df = s_df.append(df)\n\n s_df.to_csv(options.stdout,\n index_label=(\"condition\", \"reference\"),\n sep=\"\\t\")\n\n # write footer and output benchmark information.\n E.Stop()", "def about( cls, ):\n url = r\"http://www.opencircuits.com/Python_Smart_Terminal\"\n __, mem_msg = cls.show_process_memory( )\n msg = ( f\"{cls.controller.app_name} version:{cls.controller.version} \\nmode: {cls.parameters.mode}\"\n f\"\\n by Russ Hensel\"\n f\"\\nMemory in use {mem_msg} \\nCheck <Help> or \\n{url} \\nfor more info.\" )\n messagebox.showinfo( \"About\", msg )", "def stats(caesar, input):\n commands = {}\n users = {}\n channels = {}\n\n ignore = set(['f_note', 'startup', 'message', 'noteuri'])\n for (name, user), count in caesar.stats.items(): \n if name in ignore: continue\n if not user: continue\n\n if not user.startswith('#'): \n try: users[user] += count\n except KeyError: users[user] = count\n else: \n try: commands[name] += count\n except KeyError: commands[name] = count\n\n try: channels[user] += count\n except KeyError: channels[user] = count\n\n comrank = sorted([(b, a) for (a, b) in commands.iteritems()], reverse=True)\n userank = sorted([(b, a) for (a, b) in users.iteritems()], reverse=True)\n charank = sorted([(b, a) for (a, b) in channels.iteritems()], reverse=True)\n\n # most heavily used commands\n creply = 'most used commands: '\n for count, command in comrank[:10]: \n creply += '%s (%s), ' % (command, count)\n caesar.say(creply.rstrip(', '))\n\n # most heavy users\n reply = 'power users: '\n for count, user in userank[:10]: \n reply += '%s (%s), ' % (user, count)\n caesar.say(reply.rstrip(', '))\n\n # most heavy channels\n chreply = 'power channels: '\n for count, channel in charank[:3]: \n chreply += '%s (%s), ' % (channel, count)\n caesar.say(chreply.rstrip(', '))", "def help_help(self):\n print(\"List commands or print details about a command\")", "def help(self):\n self._log.info('### Help for the class %s ###',\n self.__class__)\n self._log.info(self.__doc__)\n \n print ('## job property:',self._context_name, self.statusOn, self.StoredValue)\n print ('## allowed Values:',self.allowedValues)\n print ('## default Value:', self.__class__.StoredValue )\n print ('## allowed Types :',self.allowedTypes )\n self._log.info('### End of the help for the class %s ###',\n self.__class__)", "def cli_help(self):\n self._generate_cli_version()\n self._generate_cli_help()\n sys.exit(0)", "async def eventstats(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send_help(ctx.command)", "def command_help(args):\n\tprint_usage()\n\treturn 0", "def print_help():\n print(\"Archive generated report to a web server. e.g.\")\n print(\"rm -rf /cs-shared/contrail_code_coverage/test_coverage\")\n print(\"cp -a build/coverage/controller/test_coverage \" +\n \"/cs-shared/contrail_code_coverage/\")\n print(\"http://10.84.5.100/cs-shared/contrail_code_coverage/test_coverage\")", "def opc_calcs(df_param_indexed):\n \n df_param_indexed = df_param_indexed.copy()\n \n ''' commented 20180210 after Calmetrix update\n # Remove for cc1 data exported with cc2\n mix_start = datetime.strptime(\n df_param_indexed.loc['Mix Time', 1], \"%d-%b-%Y %H:%M:%S\")\n log_start = datetime.strptime(\n df_param_indexed.loc['Start Time', 1], \"%d-%b-%Y %H:%M:%S\")\n time_difference = (log_start - mix_start).total_seconds()\n '''\n # select values from DataFrame and calculate mass of binder in sample\n # may be worth checking if any of these values are 0 at some point in the future\n \n m_water = float(df_param_indexed.loc['Water Mass, g', 1])\n m_cem = float(df_param_indexed.loc['Cement Mass, g', 1])\n m_sample = float(df_param_indexed.loc['Sample Mass, g', 1])\n m_sample_cem = m_sample / (m_cem + m_water) * m_cem\n \n return m_sample_cem", "def config_pbc_md(self):\n\n self._config_md()\n self.title = \"PBC MD Simulation\"\n self.cntrl[\"cut\"] = 8.0\n self.cntrl[\"igb\"] = 0\n self.cntrl[\"iwrap\"] = 1\n self.cntrl[\"ntp\"] = 1\n self.cntrl[\"barostat\"] = 2", "def cli(argv):\r\n args = get_args(argv)\r\n verbosity = \"summary\"\r\n if args.verbose:\r\n verbosity = \"report\"\r\n report = evaluate(args.design, verbosity)\r\n print json.dumps(report, indent=4)", "def qsub_cmmd(self):\n temp = 'qsub -l mem=1G,time=:5: -cwd -j y -o {log} {job}'.format(**self.dict)\n return temp.format(fnum=self.fnum)", "def runMCMC(df, cents, show=False):\n if type(cents) is not list:\n cents = [cents]\n numCents = len(cents)\n p = None\n \n # Tau = the precision of the normal distribution (of the above peaks)\n taus = 1. / pm.Uniform('stds', 0, 100, size=numCents)**2 # tau = 1/sigma**2\n centers = pm.Normal('centers', cents, [0.0025 for i in cents],\n size=numCents)\n \n if numCents == 2: # Assignment probability\n p = pm.Uniform('p', 0, 1)\n assignment = pm.Categorical('asisgnment', [p, 1-p],\n size=len(df.intervals))\n @pm.deterministic\n def center_i(assignment=assignment, centers=centers):\n return centers[assignment]\n @pm.deterministic\n def tau_i(assignment=assignment, taus=taus):\n return taus[assignment]\n observations = pm.Normal('obs', center_i, tau_i, value=df.intervals,\n observed=True)\n # Create the model 2 peaks\n mcmc = pm.MCMC([p, assignment, observations, taus, centers])\n \n else:\n observations = pm.Normal('obs', value=df.intervals, observed=True)\n mcmc = pm.MCMC([observations, taus, centers]) # Create model, 1 peak\n \n # Run the model\n mcmc.sample(50000)\n center_trace = mcmc.trace(\"centers\")[:]\n try:\n clusts = [center_trace[:,i] for i in range(numCents)]\n except:\n clusts = [center_trace]\n \n if show:\n for i in range(numCents):\n plt.hist(center_trace[:,i], bins=50, histtype='stepfilled',\n color=['blue', 'red'][i], alpha=0.7)\n plt.show()\n \n print('Evolved clusters at:')\n print([np.mean(c) for c in clusts])\n return clusts", "def test_cli_help(self):\n output = self.update_command('-h')", "def explainerdashboard_cli(ctx):", "def execute(self, context):\n app = context[CTX_KEY_COMMON_COLLECTD_JMX_APP_PREFIX]\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n logger.info(\"[ChainedTransfig] Collectd-Jmx w/ Application-wise [%s] ...\", app)\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n\n super().execute(context)\n\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n logger.info(\"[ChainedTransfig] Collectd-Jmx w/ Application-wise [%s] ... \"\n \"COMPLETES\", app)\n logger.info(\"///////////////////////////////////////////////////////////////////////\")", "def print_help_info(self, global_options):\r\n usage = ['',\"Type '%s help <subcommand>' for help on a specific subcommand.\" % self.prog_name,'']\r\n usage.append('Available subcommands:')\r\n commands = self.get_commands(global_options).keys()\r\n commands.sort()\r\n for cmd in commands:\r\n usage.append(' %s' % cmd)\r\n return '\\n'.join(usage)", "def print_performance_info(self):\n pass", "def help_text(command):\n\n courses_list = ('ENPM611', 'ENPM613', 'ENPM631', 'ENPM687',\\\n 'ENPM691', 'ENPM693', 'ENPM694', 'ENPM696',\\\n 'ENPM809J','ENPM809R', 'ENPM809W')\n\n response = 'I have course descriptions for: '\n for course_name in courses_list:\n response = response + course_name + ' '\n\n response = response + '\\nTo get the course description, execute command: about ENPM<course_number>'\n\n return response", "def HMC_Help():\n os.system(\"cls\")\n while True:\n print((\"\\n\\n\",\"Help\".center(50)))\n print_list = [\"ManagedSystem\",\"LogicalPartition\",\"VirtualIOServer\",\"Cluster\",\"Performance Capaity Monitoring\",\"Return to Main Menu\"]\n choice = int(print_obj.print_on_screen(print_list))\n directory = os.path.dirname(os.path.dirname(__file__))\n if choice == 1:\n path = directory+\"/help/ManagedSystem\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 2:\n path = directory+\"/help/LogicalPartition\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 3:\n path = directory+\"/help/VirtualIOServer\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 4:\n print((open(directory+\"/help/Cluster.txt\").read()))\n elif choice == 5:\n print((open(directory+\"/help/PerformanceCapacityMonitoring.txt\").read()))\n elif choice == 6:\n os.system(\"cls\")\n return\n else:\n print(\"\\nTry using Valid option\")\n back_to_menu()", "def stats(self, d_raw_materials=None):\n cm_stats = 'sugar {0} tablespoons remaining\\n'.format(d_raw_materials['sugar'])\n cm_stats += 'butter {0} teaspoons remaining\\n'.format(d_raw_materials['butter'])\n cm_stats += 'dark chocolate {0} tablespoons remaining\\n'.format(d_raw_materials['dark chocolate'])\n cm_stats += 'mint chocolate {0} tablespoons remaining\\n'.format(d_raw_materials['mint chocolate'])\n cm_stats += 'milk chocolate {0} tablespoons remaining\\n'.format(d_raw_materials['milk chocolate'])\n cm_stats += 'light corn syrup {0} teaspoons remaining\\n'.format(d_raw_materials['light corn syrup'])\n cm_stats += 'sweetened condensed milk {0} teaspoons remaining\\n'.format(d_raw_materials[\n 'sweetened condensed milk'])\n cm_stats += 'vanilla extract {0} teaspoons remaining\\n'.format(d_raw_materials['vanilla extract'])\n cm_stats += 'Reese\\'s Pieces {0} tablespoons remaining\\n'.format(d_raw_materials['Reese\\'s Pieces'])\n cm_stats += super(ChocolateMachine, self).stats()\n return cm_stats", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -t, --transaction\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -f, --from-file <filename>\n --not-error-tolerant\n \"\"\"", "def DoHelp(options, args):\n __pychecker__ = 'unusednames=options'\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print(COMMAND_USAGE_TEXT[args[0]])\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n args[0])", "def show_help(argv=None):\n if argv:\n if \"list_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"list_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ls\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"search_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"search_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm search <keyword>\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"show_mounts\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"show_mounts\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ps\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mount <dataset_name> [<mount_path>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mmount <dataset_name> [<dataset_name> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"unmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"unmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm unmount <mount_id> [<cleanup_flag>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"munmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"munmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm munmount <mount_id> [<mount_id> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"clean\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"clean\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm clean\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n else:\n sdm_util.print_message(\"Unrecognized command\")\n return 1\n else:\n sdm_util.print_message(\"command : sdm <COMMAND> [<COMMAND_SPECIFIC_ARGS> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(\"Available Commands\")\n\n tbl = PrettyTable()\n tbl.field_names = [\"COMMAND\", \"DESCRIPTION\"]\n for cmd in COMMANDS:\n command, _, desc = cmd\n command_str = \" | \".join(command)\n tbl.add_row([command_str, desc])\n\n sdm_util.print_message(tbl)\n sdm_util.print_message(\"\")\n return 0", "def printMCLStatus(self):\n print()\n print(\"====================\")\n print(\"Cycle\", self.countCycles)\n self._printHist()", "def get_hmcs(self):\n url = '%s/ibm-hmcs' % self.catalog['compute']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['hmcs']\n else:\n LOG.error('Get HMCs failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])", "def main():\n configuration = {'resource-folder': 'resources',\n 'build-folder': 'build',\n 'log-folder': 'logfiles',\n 'use-preloaded': False,\n 'addi-metrics': 'addi-metrics.json',\n 'jenkins': {'dependency-filename': 'dependencies.txt',\n 'server': 'http://is.dbc.dk',\n 'repository-project': 'opensearch-3rd-party-dependencies'},\n 'log-zip-file':'logs.zip'}\n configuration.update(cli())\n setup_logger(configuration['verbose'])\n run_performance_test(configuration)", "def cli():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"tic\", help=\"TIC number\")\n parser.add_argument(\"-L\", \"--LIST\", help=\"Only fit the LC\", action=\"store_true\")\n parser.add_argument(\"-S\", \"--SAVEGAIA\", help=\"Save Gaia sources\", action=\"store_true\")\n parser.add_argument(\"-C\", \"--COORD\", help=\"Use coordinates\", default=False)\n parser.add_argument(\"-n\", \"--name\", help=\"Target name to be plotted in title\", default=False)\n parser.add_argument(\"-D2\", \"--DR2\", help=\"Use Gaia DR2 catalog instead of DR3\", action=\"store_true\")\n parser.add_argument(\"-PM\", \"--PM\", help=\"Add proper motion direction arrows in the plot\", action=\"store_true\")\n parser.add_argument(\"--maglim\", default=5., help=\"Maximum magnitude contrast respect to TIC\")\n parser.add_argument(\"--sector\", default=None, help=\"Select Sector if more than one\")\n parser.add_argument(\"--gid\", default=None, help=\"Gaia ID\")\n parser.add_argument(\"--gmag\", default=None, help=\"Gaia mag\")\n parser.add_argument(\"--sradius\", default=10., type=float, help=\"Search radius (in arcsec) for the get_gaia_data function\")\n parser.add_argument(\"--legend\", default='best', help=\"Legend location\")\n args = parser.parse_args()\n return args", "def cli(*args, **kwargs):\n logger.debug('Global options: %s %s', args, kwargs)", "def main():\n logging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n _makeEnvir()\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hv:f:r:\", [\"help\", \"vista=\", \"fmqlep=\", \"report=\", \"host=\", \"port=\", \"access=\", \"verify=\"])\n except getopt.GetoptError, err:\n print str(err)\n print __doc__\n sys.exit(2)\n # Default VistA to Caregraf Demo VistA\n vista = \"CGVISTA\"\n # Will match to CGVISTA's if no other VistA name is specified\n fmqlEP = \"\"\n host = \"\"\n port = -1\n access = \"\"\n verify = \"\"\n report = \"\"\n for o, a in opts:\n if o in [\"-v\", \"--vista\"]:\n vista = a\n elif o in [\"-f\", \"--fmqlep\"]:\n fmqlEP = a\n elif o in [\"--host\"]:\n host = a\n elif o in [\"--port\"]:\n port = a \n elif o in [\"--access\"]:\n access = a\n elif o in [\"--verify\"]:\n verify = a\n elif o in [\"-r\", \"--report\"]:\n report = a\n elif o in [\"-h\", \"--help\"]:\n print __doc__\n sys.exit()\n if not report:\n sys.exit()\n if vista == \"CGVISTA\":\n print \"Defaulting to Caregraf's demo VistA, 'CGVISTA'\"\n fmqlEP = \"http://vista.caregraf.org/fmqlEP\"\n print \"VDM - comparing %s against GOLD\" % vista\n goldCacher = FMQLCacher(\"Caches\")\n goldCacher.setVista(\"GOLD\")\n otherCacher = FMQLCacher(\"Caches\")\n otherCacher.setVista(vista, fmqlEP=fmqlEP, host=host, port=int(port), access=access, verify=verify)\n _runReport(report, goldCacher, otherCacher)", "def process_cl_args():\n\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('commands', nargs='*')\n parser.add_argument('--help', '-h', action='store_true')\n parser.add_argument('--version', '-v', action='store_true')\n parser.add_argument('--debug', '-d', action='store_true')\n parser.add_argument('--logging', '-l', action='store_true')\n parser.add_argument('--no-autosize', action='store_true')\n parser.add_argument('--no-preload', action='store_true')\n args = parser.parse_args()\n\n if args.version:\n xprint(get_version_info())\n xprint(\"\")\n sys.exit()\n\n elif args.help:\n for x in helptext():\n xprint(x[2])\n sys.exit()\n\n if args.debug or os.environ.get(\"mpsytdebug\") == \"1\":\n xprint(get_version_info())\n g.debug_mode = True\n g.no_clear_screen = True\n logfile = os.path.join(tempfile.gettempdir(), \"mpsyt.log\")\n logging.basicConfig(level=logging.DEBUG, filename=logfile)\n logging.getLogger(\"pafy\").setLevel(logging.DEBUG)\n\n elif args.logging or os.environ.get(\"mpsytlog\") == \"1\":\n logfile = os.path.join(tempfile.gettempdir(), \"mpsyt.log\")\n logging.basicConfig(level=logging.DEBUG, filename=logfile)\n logging.getLogger(\"pafy\").setLevel(logging.DEBUG)\n\n if args.no_autosize:\n g.detectable_size = False\n\n g.command_line = \"playurl\" in args.commands or \"dlurl\" in args.commands\n if g.command_line:\n g.no_clear_screen = True\n\n if args.no_preload:\n g.preload_disabled = True\n\n g.argument_commands = args.commands", "def get_cp_info(self):\n return self.get(COMMAND_CPM, 'GetCpInfo')", "def help(self):\r\n self._short_help(None, None, None, None)", "def test_cw_metrics(self):\n\n instances = set()\n result = self.cw_client.list_metrics(Namespace=\"CWAgent\", MetricName=\"cpu_usage_system\")\n for i in result[\"Metrics\"]:\n instances.add(i[\"Dimensions\"][0][\"Value\"])\n\n for key, value in self.cdk_output_map.items():\n if \"Instance\" in key:\n self.assertTrue(value in instances)", "def help():\n description = \"\"\"\n Get the observed and expected enrichment trend plot based on contact matrix.\n Example:\n getSigEnrich.py -d GM12878_Trac -o GM12878_Trac -cut 0 -p 10 \n \"\"\"\n parser = argparse.ArgumentParser(description=description,\n formatter_class=RawTextHelpFormatter)\n parser.add_argument(\"-d\",\n dest=\"dir\",\n required=True,\n type=str,\n help=\"Directory for cLoops2 pre generated.\")\n parser.add_argument(\"-o\",\n dest=\"output\",\n required=True,\n type=str,\n help=\"Output prefix.\")\n parser.add_argument(\n \"-c\",\n dest=\"chroms\",\n required=False,\n default=\"\",\n type=str,\n help=\n \"Whether to process limited chroms, specify it as chr1,chr2,chr3, default is not. Use this to save time for quite big data.\"\n )\n parser.add_argument(\n \"-bs\",\n dest=\"binSize\",\n required=False,\n default=5000,\n type=int,\n help=\n \"Bin size (bp) to generate the contact matrix for estimation, default is 5000 bp.\"\n )\n parser.add_argument(\n \"-cut\",\n dest=\"cut\",\n type=int,\n default=0,\n help=\"Distance cutoff for PETs to filter, default is 0.\")\n parser.add_argument('-p',\n dest=\"cpu\",\n required=False,\n default=1,\n type=int,\n help=\"Number of CPUs to run the job, default is 1.\")\n parser.add_argument(\n '-r',\n dest=\"repeats\",\n required=False,\n default=10,\n type=int,\n help=\n \"The reapet times to shuffle PETs to get the mean expected background,default is 10.\"\n )\n parser.add_argument('-plot',\n dest=\"plot\",\n required=False,\n action=\"store_true\",\n help=\"Set to plot the result.\")\n op = parser.parse_args()\n return op", "def metrics(_):\r\n collector = BuildsCollector()\r\n build_metrics, headers = collector.get_metrics_table()\r\n print(tabulate(build_metrics, headers=headers))", "def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()", "async def sysinfo(self, ctx: Context):\n\t\tstart = time.perf_counter()\n\t\tend = time.perf_counter()\n\t\tduration = (end - start) * 1000\n\t\tcpuavg = psutil.cpu_percent(interval=None)\n\t\tmem = psutil.virtual_memory()[2]\n\t\tdurround = round(duration, 3)\n\t\tosun = os.uname()\n\t\tawait self.send(f\"System Info | CPU: {cpuavg}% | RAM: {mem}% | Latency: {durround * 1000}ms | OS: {sys.platform}\", whisper=[ctx.author.id])", "def diagnostics(self,\n *opts, # type: DiagnosticsOptions\n **kwargs # type: Dict[str, Any]\n ) -> DiagnosticsResult:\n\n return super().diagnostics(*opts, **kwargs)", "def get_help(self) -> None: \n print(messages.get_help())", "def optionHelp(self):\n return {}", "def help():", "def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))" ]
[ "0.629929", "0.6273303", "0.60736", "0.5926187", "0.5641638", "0.55025315", "0.5396064", "0.53120846", "0.52493966", "0.5243705", "0.523837", "0.5222159", "0.51953936", "0.519269", "0.5182496", "0.5178438", "0.51700777", "0.5129585", "0.51175195", "0.51148605", "0.51081395", "0.5094479", "0.5088534", "0.50876766", "0.5083076", "0.50563556", "0.5043129", "0.5018782", "0.50118124", "0.5006939", "0.49899745", "0.49886495", "0.49876744", "0.49844867", "0.4984234", "0.49684325", "0.49650833", "0.49590605", "0.49570918", "0.4928168", "0.4922446", "0.4919366", "0.4916089", "0.4908699", "0.49069148", "0.49012366", "0.48999208", "0.4895324", "0.48884052", "0.48766488", "0.48741058", "0.4873641", "0.48608923", "0.48579937", "0.48553276", "0.48514852", "0.4843435", "0.48313886", "0.48263013", "0.4812996", "0.48095083", "0.48004547", "0.48003402", "0.47984856", "0.4797413", "0.4791654", "0.47847703", "0.47708428", "0.4768093", "0.47652674", "0.4755939", "0.4754236", "0.47494218", "0.47474626", "0.4732156", "0.47295058", "0.47291747", "0.47262752", "0.47110453", "0.47055694", "0.46991584", "0.4696264", "0.4693831", "0.46922427", "0.4688808", "0.4683552", "0.4682853", "0.46808046", "0.46804628", "0.46799797", "0.4673382", "0.46684214", "0.46664265", "0.46621153", "0.46611106", "0.4658123", "0.4655142", "0.46526244", "0.46523097", "0.46492645" ]
0.649805
0
Report processor usage metrics for CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_proc(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)", "def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()", "def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))", "def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text", "def measure(self,command_exe, command_args, measure_out):\n pass", "def do_stats(self, args):\n total_cpu = free_cpu = in_use_cpu = 0\n\n summary = self._qm.get_all_host_summary()\n for host_id, host_info in summary.viewitems():\n host_cpu = int(host_info['total cores'])\n total_cpu += host_cpu\n locked = host_info.get('locked by')\n if locked:\n # If host is locked then all CPUs are in use.\n in_use_cpu += host_cpu\n else:\n free_host_cpu = int(host_info['free cores'])\n in_use_cpu += (host_cpu - free_host_cpu)\n free_cpu += free_host_cpu\n\n print('total CPU: ', total_cpu)\n print('used/locked CPU: ', in_use_cpu)\n print('free CPU: ', free_cpu)\n capacity = float(in_use_cpu) / float(total_cpu)\n print('capacity used: %.1f%%' % (capacity * 100,))\n capacity = float(free_cpu) / float(total_cpu)\n print('capacity remaining: %.1f%%' % (capacity * 100,))", "def metrics_roce(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_roce(cmd_ctx, cpc, options))", "def stats(caesar, input):\n commands = {}\n users = {}\n channels = {}\n\n ignore = set(['f_note', 'startup', 'message', 'noteuri'])\n for (name, user), count in caesar.stats.items(): \n if name in ignore: continue\n if not user: continue\n\n if not user.startswith('#'): \n try: users[user] += count\n except KeyError: users[user] = count\n else: \n try: commands[name] += count\n except KeyError: commands[name] = count\n\n try: channels[user] += count\n except KeyError: channels[user] = count\n\n comrank = sorted([(b, a) for (a, b) in commands.iteritems()], reverse=True)\n userank = sorted([(b, a) for (a, b) in users.iteritems()], reverse=True)\n charank = sorted([(b, a) for (a, b) in channels.iteritems()], reverse=True)\n\n # most heavily used commands\n creply = 'most used commands: '\n for count, command in comrank[:10]: \n creply += '%s (%s), ' % (command, count)\n caesar.say(creply.rstrip(', '))\n\n # most heavy users\n reply = 'power users: '\n for count, user in userank[:10]: \n reply += '%s (%s), ' % (user, count)\n caesar.say(reply.rstrip(', '))\n\n # most heavy channels\n chreply = 'power channels: '\n for count, channel in charank[:3]: \n chreply += '%s (%s), ' % (channel, count)\n caesar.say(chreply.rstrip(', '))", "def main():\n processor.custom_config = parse_arguments()\n processor.process()\n logger.info(processor.statistics)\n logger.info(processor.custom_config)", "def procs_calculate_axyzc(molecules, n_cores=-1, show_progress=True, scr=None, cmd=XTB_CMD):\n results = None\n return results", "async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)", "def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()", "def process_info(process):\n\thelp(process)", "def metrics_flash(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_flash(cmd_ctx, cpc, options))", "def phast_cmmd(self):\n temp = '{prog} -R {rho} -C {ecov} -E {elen} -N {chrom} -i MAF {maf} {model} > {wig}\\n'.format(**self.dict)\n return temp.format(fnum=self.fnum)", "def help(self, args):\n print('No commands available for this consumer')", "def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging", "def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')", "def help(self):\n msg = \"`%s' performs the computational aspects of genotyping-by-sequencing.\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Usage: %s [OPTIONS] ...\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Options:\\n\"\n msg += \" -h, --help\\tdisplay the help and exit\\n\"\n msg += \" -V, --version\\toutput version information and exit\\n\"\n msg += \" -v, --verbose\\tverbosity level (0/default=1/2/3)\\n\"\n msg += \" --proj1\\tname of the project used for steps 1 to 4\\n\"\n msg += \"\\t\\tmention a reference genome only if all samples belong to\\n\"\n msg += \"\\t\\t the same species, and will be mapped to the same ref genome\\n\"\n msg += \" --proj2\\tname of the project used for steps 4 to 8\\n\"\n msg += \"\\t\\tcan be the same as --proj1, or can be different\\n\"\n msg +=\"\\t\\t notably when samples come from different species\\n\"\n msg += \"\\t\\t or if one wants to align reads to different ref genomes\\n\"\n msg += \" --schdlr\\tname of the cluster scheduler (default=SGE)\\n\"\n msg += \" --queue\\tname of the cluster queue (default=normal.q)\\n\"\n msg += \" --resou\\tcluster resources (e.g. 'test' for 'qsub -l test')\\n\"\n msg += \" --rmvb\\tremove bash scripts for jobs launched in parallel\\n\"\n msg += \" --step\\tstep to perform (1/2/3/.../9)\\n\"\n msg += \"\\t\\t1: raw read quality per lane (with FastQC v >= 0.11.2)\\n\"\n msg += \"\\t\\t2: demultiplexing per lane (with demultiplex.py v >= 1.14.0)\\n\"\n msg += \"\\t\\t3: cleaning per sample (with CutAdapt v >= 1.8)\\n\"\n msg += \"\\t\\t4: alignment per sample (with BWA MEM v >= 0.7.12, Samtools v >= 1.3, Picard and R v >= 3)\\n\"\n msg += \"\\t\\t5: local realignment per sample (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t6: local realignment per genotype (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t7: variant and genotype calling per genotype (with GATK HaplotypeCaller v >= 3.5)\\n\"\n msg += \"\\t\\t8: variant and genotype calling jointly across genotypes (with GATK GenotypeGVCFs v >= 3.5)\\n\"\n msg += \"\\t\\t9: variant and genotype filtering (with GATK v >= 3.5)\\n\"\n msg += \" --samples\\tpath to the 'samples' file\\n\"\n msg += \"\\t\\tcompulsory for all steps, but can differ between steps\\n\"\n msg += \"\\t\\t e.g. if samples come from different species or are aligned\\n\"\n msg += \"\\t\\t on different ref genomes, different samples file should\\n\"\n msg += \"\\t\\t be used for steps 4-9, representing different subsets of\\n\"\n msg += \"\\t\\t the file used for steps 1-3\\n\"\n msg += \"\\t\\tthe file should be encoded in ASCII\\n\"\n msg += \"\\t\\tthe first row should be a header with column names\\n\"\n msg += \"\\t\\teach 'sample' (see details below) should have one and only one row\\n\"\n msg += \"\\t\\tany two columns should be separated with one tabulation\\n\"\n msg += \"\\t\\tcolumns can be in any order\\n\"\n msg += \"\\t\\trows starting by '#' are skipped\\n\"\n msg += \"\\t\\t12 columns are compulsory (but there can be more):\\n\"\n msg += \"\\t\\t genotype (see details below, e.g. 'Col-0', but use neither underscore '_' nor space ' ' nor dot '.', use dash '-' instead)\\n\"\n msg += \"\\t\\t ref_genome (identifier of the reference genome used for alignment, e.g. 'Atha_v2', but use neither space ' ' nor dot '.'; the full species name, e.g. 'Arabidopsis thaliana', will be present in the file given to --dict)\\n\"\n msg += \"\\t\\t library (e.g. can be the same as 'genotype')\\n\"\n msg += \"\\t\\t barcode (e.g. 'ATGG')\\n\"\n msg += \"\\t\\t seq_center (e.g. 'Broad Institute', 'GenoToul', etc)\\n\"\n msg += \"\\t\\t seq_platform (e.g. 'ILLUMINA', see SAM format specification)\\n\"\n msg += \"\\t\\t seq_platform_model (e.g. 'HiSeq 2000')\\n\"\n msg += \"\\t\\t flowcell (e.g. 'C5YMDACXX')\\n\"\n msg += \"\\t\\t lane (e.g. '3', can be '31' if a first demultiplexing was done per index)\\n\"\n msg += \"\\t\\t date (e.g. '2015-01-15', see SAM format specification)\\n\"\n msg += \"\\t\\t fastq_file_R1 (filename, one per lane, gzip-compressed)\\n\"\n msg += \"\\t\\t fastq_file_R2 (filename, one per lane, gzip-compressed)\\n\"\n msg += \" --fcln\\tidentifier of a flowcell and lane number\\n\"\n msg += \"\\t\\tformat as <flowcell>_<lane-number>, e.g. 'C5YMDACXX_1'\\n\"\n msg += \"\\t\\tif set, only the samples from this lane will be analyzed\\n\"\n msg += \" --pird\\tpath to the input reads directory\\n\"\n msg += \"\\t\\tcompulsory for steps 1 and 2\\n\"\n msg += \"\\t\\twill be added to the columns 'fastq_file_R*' from the sample file\\n\"\n msg += \"\\t\\tif not set, input read files should be in current directory\\n\"\n msg += \" --enz\\tname of the restriction enzyme\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=ApeKI\\n\"\n msg += \" --dmxmet\\tmethod used to demultiplex\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=4c (see the help of demultiplex.py to know more)\\n\"\n msg += \" --subst\\tnumber of substitutions allowed during demultiplexing\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=2\\n\"\n msg += \" --ensubst\\tenforce the nb of substitutions allowed\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=lenient/strict\\n\"\n msg += \" --adp\\tpath to the file containing the adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tsame format as FastQC: name<tab>sequence\\n\"\n msg += \"\\t\\tname: at least 'adpR1' (also 'adpR2' if paired-end)\\n\"\n msg += \"\\t\\tsequence: from 5' (left) to 3' (right)\\n\"\n msg += \" --errtol\\terror tolerance to find adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --minovl\\tminimum overlap length between reads and adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=3 (in bases)\\n\"\n msg += \" --minrl\\tminimum length to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=35 (in bases)\\n\"\n msg += \" --minq\\tminimum quality to trim a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=20 (used for both reads if paired-end)\\n\"\n msg += \" --maxNp\\tmaximum percentage of N to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --ref\\tpath to the prefix of files for the reference genome\\n\"\n msg += \"\\t\\tcompulsory for steps 4, 5, 6, 7, 8, 9\\n\"\n msg += \"\\t\\tshould correspond to the 'ref_genome' column in --samples\\n\"\n msg += \"\\t\\te.g. '/data/Atha_v2' for '/data/Atha_v2.fa', '/data/Atha_v2.bwt', etc\\n\"\n msg += \"\\t\\tthese files are produced via 'bwa index ...'\\n\"\n msg += \" --dict\\tpath to the 'dict' file (SAM header with @SQ tags)\\n\"\n msg += \"\\t\\tcompulsory for step 4\\n\"\n msg += \"\\t\\tsee 'CreateSequenceDictionary' in the Picard software\\n\"\n msg += \" --jgid\\tcohort identifier to use for joint genotyping\\n\"\n msg += \"\\t\\tcompulsory for steps 8, 9\\n\"\n msg += \"\\t\\tuseful to launch several, different cohorts in parallel\\n\"\n msg += \" --rat\\trestrict alleles to be of a particular allelicity\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdefault=ALL/BIALLELIC/MULTIALLELIC\\n\"\n msg += \"\\t\\tsee '--restrictAllelesTo' in GATK's SelectVariant\\n\"\n msg += \" --mdp\\tminimum value for DP (read depth; e.g. 10)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mgq\\tminimum value for GQ (genotype quality; e.g. 20)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mnfg\\tmaximum number of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mffg\\tmaximum fraction of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFractionFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mnnc\\tmaximum number of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \" --mfnc\\tmaximum fraction of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxNOCALLfraction' in GATK's SelectVariants\\n\"\n msg += \" --fam\\tpath to the file containing pedigree information\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdiscard variants with Mendelian violations (see Semler et al, 2012)\\n\"\n msg += \"\\t\\tshould be in the 'fam' format specified by PLINK\\n\"\n msg += \"\\t\\tvalidation strictness (GATK '-pedValidationType') is set at 'SILENT'\\n\"\n msg += \"\\t\\t allowing some samples to be absent from the pedigree\\n\"\n msg += \" --mvq\\tminimum GQ for each trio member to accept a variant as a Mendelian violation\\n\"\n msg += \"\\t\\tused in step 9 if '--fam' is specified\\n\"\n msg += \"\\t\\tdefault=0\\n\"\n msg += \" --xlssf\\tpath to the file with genotypes to exclude\\n\"\n msg += \"\\t\\tused in step 9 (can be especially useful if '--fam' is specified)\\n\"\n msg += \" --tmpd\\tpath to a temporary directory on child nodes (default=.)\\n\"\n msg += \"\\t\\te.g. it can be /tmp or /scratch\\n\"\n msg += \"\\t\\tused in step 4 for 'samtools sort'\\n\"\n msg += \"\\t\\tused in step 7 for 'GATK HaplotypeCaller'\\n\"\n msg += \" --jvmXms\\tinitial memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=512m (can also be specified as 1024k, 1g, etc)\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --jvmXmx\\tmaximum memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=4g\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --queue2\\tname of the second cluster queue (default=bigmem.q)\\n\"\n msg += \"\\t\\tused in step 4 for Picard to collect insert sizes\\n\"\n msg += \" --knowni\\tpath to a VCF file with known indels (for local realignment)\\n\"\n msg += \" --known\\tpath to a VCF file with known variants (e.g. from dbSNP)\\n\"\n msg += \" --force\\tforce to re-run step(s)\\n\"\n msg += \"\\t\\tthis removes without warning the step directory if it exists\\n\"\n msg += \"\\n\"\n msg += \"Examples:\\n\"\n msg += \" %s --step 1 --samples samples.txt\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Details:\\n\"\n msg += \"This program aims at genotyping a set of 'genotypes' using data from\\n\"\n msg += \"a restriction-assisted DNA sequencing (RAD-seq) experiment, also known\\n\"\n msg += \"as a genotyping-by-sequencing (GBS) experiment.\\n\"\n msg += \"Here, by 'genotype', we mean the entity which is the focus of the\\n\"\n msg += \"study. For instance, it can be a plant variety (or a human being), or\\n\"\n msg += \"the specific clone of a given plant variety (or a specific tumor of a\\n\"\n msg += \"given human being), etc.\\n\"\n msg += \"Importantly, note that the content of the 'genotype' column will\\n\"\n msg += \"be used to set the 'SM' (sample) tag of the 'RG' (read group) header\\n\"\n msg += \"record type of the SAM format (see http://www.htslib.org/). However,\\n\"\n msg += \"internal to this program, the term 'sample' corresponds to the unique\\n\"\n msg += \"quadruplet (genotype,flowcell,lane,barcode) for steps 1 and 2, and to\\n\"\n msg += \"the unique triplet (genotype,flowcell,lane) for the others.\\n\"\n msg += \"Jobs are executed in parallel (--schdlr). Their return status is\\n\"\n msg += \"recorded in a SQLite database which is removed at the end. If a job\\n\"\n msg += \"fails, the whole script stops with an error.\\n\"\n msg += \"\\n\"\n msg += \"Dependencies:\\n\"\n msg += \"Python >= 2.7; Biopython; pyutilstimflutre >= 0.5\\n\"\n msg += \"\\n\"\n msg += \"Report bugs to <[email protected]>.\"\n print(msg); sys.stdout.flush()", "def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])", "def usage(progname):\n \n sys.stderr.write(\"Usage: \" +progname + \" [-cmnv] [-z score] \"\n \" <outdir>\\n\")\n sys.stderr.write(' -c class level not fold level evaluation\\n')\n sys.stderr.write(' -m read multiquery file on stdin\\n')\n sys.stderr.write(' -n negate scores (so that most -ve is best)\\n')\n sys.stderr.write(' -v verbose messages to stderr\\n')\n sys.stderr.write(' -z score : assign identifiers not present in the output a score of score\\n')\n sys.exit(1)", "def _cmd_segmetrics(args):\n if not 0.0 < args.alpha <= 1.0:\n raise RuntimeError(\"alpha must be between 0 and 1.\")\n\n if not any((args.location_stats, args.spread_stats, args.interval_stats)):\n logging.info(\"No stats specified\")\n return\n\n # Calculate all metrics\n cnarr = read_cna(args.cnarray)\n segarr = read_cna(args.segments)\n segarr = do_segmetrics(\n cnarr,\n segarr,\n args.location_stats,\n args.spread_stats,\n args.interval_stats,\n args.alpha,\n args.bootstrap,\n args.smooth_bootstrap,\n skip_low=args.drop_low_coverage,\n )\n tabio.write(segarr, args.output or segarr.sample_id + \".segmetrics.cns\")", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser(version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-t\", \"--test\", dest=\"test\", type=\"string\",\n help=\"supply help\")\n\n parser.add_option(\"--method\", dest=\"method\", type=\"choice\",\n choices=(\"metrics\", \"summary\", \"module_summary\"),\n help=\"method to summarise clustering\")\n\n parser.add_option(\"--ref-gtf-files\", dest=\"ref_gtf\", type=\"string\",\n help=\"comma separated list of reference gtf files\")\n\n # add common options (-h/--help, ...) and parse command line\n (options, args) = E.Start(parser, argv=argv)\n\n if options.method == \"metrics\":\n infile = argv[-1]\n E.info(\"loading input file: %s\" % infile)\n assert infile\n\n df = pd.read_table(infile,\n sep=\"\\t\",\n header=None,\n index_col=0)\n\n df = df.ix[:, :50]\n cluster_combs = (x for x in itertools.combinations(df.columns,\n 2))\n genes = df.index\n results_dict = {}\n all_clusts = {}\n\n E.info(\"setting up cluster containers\")\n for i in df.columns:\n clusters = set(df[i].values.tolist())\n cluster_dict = {}\n for clust in clusters:\n cluster_dict[clust] = []\n for gene in genes:\n cluster_dict[df[i][gene]].append(gene)\n\n for col in clusters:\n col_set = set()\n clust_col = cluster_dict[col]\n gene_members = itertools.combinations(clust_col,\n 2)\n col_set.update(gene_members)\n cluster_dict[col] = col_set\n all_clusts[i] = cluster_dict\n E.info(\"generating all pair-wise cluster comparisons\")\n E.info(\"calculating adjusted mutual information\")\n for k in cluster_combs:\n clusters1 = all_clusts[k[0]]\n clusters2 = all_clusts[k[1]]\n metric_dict = {}\n metric_dict['AMI'] = TS.adjustedMutualInformation(clusters1,\n clusters2)\n results_dict[k] = metric_dict\n\n res_frame = pd.DataFrame(results_dict).T\n res_frame = res_frame.reset_index()\n res_frame.drop(['level_0'], inplace=True, axis=1)\n res_frame.drop(['level_1'], inplace=True, axis=1)\n\n # flatten rand indices and add to output dataframe\n rand_arrays = TS.randIndexes(df)\n flat_adj_rand = TS.unravel_arrays(rand_arrays[0])\n flat_rand = TS.unravel_arrays(rand_arrays[1])\n res_frame['Rand_Index'] = flat_rand\n res_frame['Adjusted_Rand_Index'] = flat_adj_rand\n E.info(\"aggregating results\")\n\n res_frame.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"summary\":\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n file_dict = {}\n for fle in list_of_files:\n fname = fle.split(\"/\")[-1]\n condition = fname.split(\"-\")[0]\n ref = fname.split(\"-\")[1]\n df_ = pd.read_table(fle,\n sep=\"\\t\",\n header=0,\n index_col=0)\n df_.columns = ['gene_id', 'cluster']\n clust_dict = {}\n for idx in df_.index:\n cluster = df_.loc[idx]['cluster']\n gene = df_.loc[idx]['gene_id']\n try:\n clust_dict[cluster] += 1\n except KeyError:\n clust_dict[cluster] = 1\n med_size = np.median(clust_dict.values())\n file_dict[fname] = {'condition': condition,\n 'reference': ref,\n 'median_cluster_size': med_size}\n\n outframe = pd.DataFrame(file_dict).T\n outframe.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"module_summary\":\n # get lncRNA/gene lengths from reference gtfs\n ref_gtfs = options.ref_gtf.split(\",\")\n length_dict = {}\n for ref in ref_gtfs:\n oref = IOTools.openFile(ref, \"rb\")\n git = GTF.transcript_iterator(GTF.iterator(oref))\n for gene in git:\n for trans in gene:\n length = trans.end - trans.start\n try:\n length_dict[trans.gene_id] += length\n except KeyError:\n length_dict[trans.gene_id] = length\n oref.close()\n\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n fdfs = []\n for fle in list_of_files:\n cond = fle.split(\"/\")[-1].split(\"-\")[0]\n refer = fle.split(\"/\")[-1].split(\"-\")[1]\n _df = pd.read_table(fle, sep=\"\\t\",\n header=0, index_col=0)\n _df.columns = ['gene_id', 'cluster']\n clusters = set(_df['cluster'])\n c_dict = {}\n # summarize over each cluster\n for clust in clusters:\n lengths = []\n c_df = _df[_df['cluster'] == clust]\n for lid in c_df['gene_id']:\n lengths.append(length_dict[lid])\n c_dict[clust] = {'cluster_size': len(c_df['gene_id']),\n 'mean_length': np.mean(lengths),\n 'index': (cond, refer),\n 'module': clust}\n cdf = pd.DataFrame(c_dict).T\n # use a multindex for hierarchical indexing\n midx = pd.MultiIndex.from_tuples(cdf['index'])\n cdf.index = midx\n cdf.drop(['index'], inplace=True, axis=1)\n fdfs.append(cdf)\n\n # generate a single output df\n s_df = fdfs[0]\n fdfs.pop(0)\n for df in fdfs:\n s_df = s_df.append(df)\n\n s_df.to_csv(options.stdout,\n index_label=(\"condition\", \"reference\"),\n sep=\"\\t\")\n\n # write footer and output benchmark information.\n E.Stop()", "def main():\n\n parser = argparse.ArgumentParser(description=main.__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n epilog=\"Homepage: https://github.com/kbat/mc-tools\")\n parser.add_argument('com', type=str, help='plot requests file name', nargs='?', default=\"/tmp/foo.c\")\n parser.add_argument('comout', type=str, help='COMOUT file name', nargs='?', default=\"zoom\")\n\n args = parser.parse_args()\n\n cmd = {} # dictionary of commands\n\n bas = False\n plane = False\n \n with open(args.com) as f:\n for line in f.readlines():\n words = line.strip().split()\n if len(words) is 0:\n continue\n\n for i,w in enumerate(words):\n if re.search(\"^bas\", w):\n cmd['bas'] = list(map(float, words[i+1:i+7]))\n if plane is False: bas = True # basis was before plane cuts\n elif re.search(\"^or\", w):\n cmd['or'] = list(map(float, words[i+1:i+4]))\n elif re.search(\"^ex\", w):\n try: # both x and y scales are given\n cmd['ex'] = list(map(float, words[i+1:i+3]))\n continue\n except ValueError: # just 1 scale is given\n cmd['ex'] = list(map(float, words[i+1:i+2]))\n elif re.search(\"^lab\", w):\n cmd['label'] = list(map(int, map(float, words[i+1:i+3]))) #+ [words[i+3]]\n elif re.search(\"^p[xyz]\", w):\n cmd[w] = [float(words[i+1])]\n if bas is False: plane = True # plane cuts were before basis\n elif re.search(\"^legend\", w):\n cmd[w] = [words[i+1]]\n elif w == \"scale\":\n print(w)\n if int(words[i+1]): # no need to put 'scale 0'\n cmd[w] = [words[i+1]]\n elif w in (\"mesh\"):\n if int(words[i+1])==1: # no need to put 'mesh 1'\n cmd[w] = [words[i+1]]\n\n print(bas, plane)\n\n if plane: # bas was first\n keys = ('bas', 'or', 'ex', 'px', 'py', 'pz', 'label', 'mesh', 'legend', 'scale')\n elif bas:\n keys = ('or', 'ex', 'px', 'py', 'pz', 'bas', 'label', 'mesh', 'legend', 'scale')\n else:\n keys = {'or', 'ex', 'label', 'mesh', 'legend', 'scale'}\n \n with open(args.comout, 'w') as f:\n for key in keys:\n if key in cmd:\n # newline required by mcplot:\n if key in ('mesh', 'legend', 'scale', 'label'):\n f.write(\"\\n\")\n f.write(\"%s %s \" % (key,\" \".join(str(e) for e in cmd[key]),))\n f.write(\"\\n\")", "def main():\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n add_common_snmp_args(parser)\n parser.add_argument(\n \"-w\",\n \"--warning\",\n type=int,\n default=70,\n help=\"Warning memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-c\",\n \"--critical\",\n type=int,\n default=90,\n help=\"Critical memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-f\",\n \"--family\",\n required=True,\n help=\"Switch family. Supported families: 1910, 1920, 1920S\",\n )\n\n config = vars(parser.parse_args())\n check_snmp_args(config)\n check_thresholds(config)\n\n dataset = {}\n\n if config[\"family\"] == \"1920S\":\n cpu = ObjectType(\n ObjectIdentity(\n \"HP-SWITCHING-MIB\", \"agentSwitchCpuProcessTotalUtilization\", 0\n )\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n cpu = ObjectType(\n ObjectIdentity(\"HH3C-ENTITY-EXT-MIB\", \"hh3cEntityExtCpuUsage\", 8)\n )\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n try:\n raw_data = get_snmp_data(config, cpu)\n except ValueError as err:\n unknown_exit(SERVICE, err)\n add_vars_to_dataset(dataset, raw_data)\n\n if config[\"family\"] == \"1920S\":\n dataset[\"cpu_usage\"] = get_hp_cpu_usage(\n dataset[\"agentSwitchCpuProcessTotalUtilization\"]\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n dataset[\"cpu_usage\"] = int(dataset[\"hh3cEntityExtCpuUsage\"])\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n state, message = generate_output(config, dataset)\n report(state, message)", "def cmd_help(args):", "def eval_cpuset():\n\tnum_cpu = run('grep -c ^processor /proc/cpuinfo',quiet=True,warn_only=True)\n\tprint(red('Number of cpus : \\t'+num_cpu))", "def handle_program_options():\n parser = argparse.ArgumentParser(description=\"Gather numeric information \\\n about the processed sequence data in an \\\n MG-RAST project.\")\n parser.add_argument('project_id',\n help=\"The project identifier (MG-RAST ID)\")\n parser.add_argument('-a', '--auth_key',\n help=\"An MG-RAST API authorization key. This is \\\n necessary to access projects marked as private.\")\n parser.add_argument('-g', '--group_by', action='append',\n help=\"A string that matches some part of the \\\n 'Metagenome Name' field. All matching project \\\n metagenomes will be grouped by this identifier \\\n and their stats will be summed. This option can \\\n be specified multiple times to create multiple \\\n groups. All non-matching metagenomes will \\\n appear separately in the table. NOTE: \\\n Strings will be matched longest first. This \\\n allows for matching names that might be a \\\n substring of another match. For example: -g S \\\n -g NS. The name field will first be matched \\\n against the longest string (NS) first and then \\\n each smaller string in order.\")\n parser.add_argument('-o', '--output_filename', default='meta_stats.txt',\n help=\"The name of the file the project summary \\\n information will be written to.\")\n\n# parser.add_argument('-v', '--verbose', action='store_true')\n\n return parser.parse_args()", "def qsub_cmmd(self):\n temp = 'qsub -l mem=1G,time=:5: -cwd -j y -o {log} {job}'.format(**self.dict)\n return temp.format(fnum=self.fnum)", "def process_cl_args():\n\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('commands', nargs='*')\n parser.add_argument('--help', '-h', action='store_true')\n parser.add_argument('--version', '-v', action='store_true')\n parser.add_argument('--debug', '-d', action='store_true')\n parser.add_argument('--logging', '-l', action='store_true')\n parser.add_argument('--no-autosize', action='store_true')\n parser.add_argument('--no-preload', action='store_true')\n args = parser.parse_args()\n\n if args.version:\n xprint(get_version_info())\n xprint(\"\")\n sys.exit()\n\n elif args.help:\n for x in helptext():\n xprint(x[2])\n sys.exit()\n\n if args.debug or os.environ.get(\"mpsytdebug\") == \"1\":\n xprint(get_version_info())\n g.debug_mode = True\n g.no_clear_screen = True\n logfile = os.path.join(tempfile.gettempdir(), \"mpsyt.log\")\n logging.basicConfig(level=logging.DEBUG, filename=logfile)\n logging.getLogger(\"pafy\").setLevel(logging.DEBUG)\n\n elif args.logging or os.environ.get(\"mpsytlog\") == \"1\":\n logfile = os.path.join(tempfile.gettempdir(), \"mpsyt.log\")\n logging.basicConfig(level=logging.DEBUG, filename=logfile)\n logging.getLogger(\"pafy\").setLevel(logging.DEBUG)\n\n if args.no_autosize:\n g.detectable_size = False\n\n g.command_line = \"playurl\" in args.commands or \"dlurl\" in args.commands\n if g.command_line:\n g.no_clear_screen = True\n\n if args.no_preload:\n g.preload_disabled = True\n\n g.argument_commands = args.commands", "def main():\n parser = argparse.ArgumentParser(description='investigate code health and random statistics')\n sub_parsers = parser.add_subparsers(dest='command_name', title='Commands', help='', metavar='<command>')\n\n sub = sub_parsers.add_parser('line-count', help='list line counts')\n sub.add_argument('files', nargs='+', help='files or folders to look in')\n sub.add_argument('--each', type=int, default=1)\n sub.add_argument('--show', action='store_true')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_line_count)\n\n sub = sub_parsers.add_parser('include-list', help='list headers from files')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--print', dest='print_files', action='store_true')\n sub.add_argument('--print-stats', dest='print_stats', action='store_true')\n sub.add_argument('--print-max', dest='print_max', action='store_true')\n sub.add_argument('--no-list', dest='print_list', action='store_false')\n sub.add_argument('--count', default=2, type=int, help=\"only print includes that are more or equal to <count>\")\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.set_defaults(func=handle_list)\n\n sub = sub_parsers.add_parser('include-gv', help='generate a graphviz of the includes')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.add_argument('--group', action='store_true', help=\"group output\")\n sub.add_argument('--cluster', action='store_true', help=\"group output into clusters\")\n sub.set_defaults(func=handle_gv)\n\n sub = sub_parsers.add_parser('list-indents', help='list the files with the maximum indents')\n sub.add_argument('files', nargs='+')\n sub.add_argument('--each', type=int, default=1, help='group counts')\n sub.add_argument('--show', action='store_true', help='include files in list')\n sub.add_argument('--hist', action='store_true', help='show simple histogram')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_list_indents)\n\n sub = sub_parsers.add_parser('missing-pragma-once', help='find headers with missing include guards')\n sub.add_argument('files', nargs='+')\n sub.set_defaults(func=handle_missing_include_guards)\n\n sub = sub_parsers.add_parser('missing-in-cmake', help='find files that existis on disk but missing in cmake')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_missing_in_cmake)\n\n sub = sub_parsers.add_parser('list-no-project-folders', help='find projects that have not set the solution folder')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_list_no_project_folder)\n\n sub = sub_parsers.add_parser('check-files', help=\"find files that doesn't match the name style\")\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_check_files)\n\n args = parser.parse_args()\n if args.command_name is not None:\n args.func(args)\n else:\n parser.print_help()", "def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))", "def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()", "def command_help(args):\n\tprint_usage()\n\treturn 0", "def main():\n known_args, unknown_args = parse_known_args()\n if not unknown_args:\n # return an error message if no command is provided\n sys.exit(\"Please provide a command to benchmark: $ humann_benchmark COMMAND\")\n try:\n process = subprocess.Popen(\" \".join(unknown_args),shell=True)\n except (EnvironmentError, subprocess.CalledProcessError):\n sys.exit(\"Unable to execute command: \" + \" \".join(unknown_args))\n pid=str(process.pid)\n start=time.time()\n max_memory=0\n while process.poll() is None:\n time.sleep(1)\n # while the process is running check on the memory use\n # get the pids of the main process and all children (and their children)\n pids=get_pids(pid)\n stdout=subprocess.check_output([\"ps\",\"--pid\",\",\".join(pids),\"-o\",\"pid,rss,command\"]).decode(\"utf-8\")\n print(\"\\n\"+stdout+\"\\n\")\n # remove the header from the process output\n status=[i.split() for i in filter(lambda x: x, stdout.split(\"\\n\")[1:])]\n # memory is the sum of all rss\n memory=sum(int(i[1]) for i in status)\n if memory > max_memory:\n max_memory=memory\n \n end=time.time()\n print(\"Time: {:.0f} minutes\".format((end-start)/60))\n print(\"Max Memory (RSS): {:.1f} GB\".format(max_memory*1.0/1024**2))", "def usage():\n \n print '-b <bench> the bench to show.'\n print '-c <config> the config to show (GPU, 8888, 565, etc).'\n print '-d <dir> a directory containing bench_r<revision>_<scalar> files.'\n print '-e <file> file containing expected bench values/ranges.'\n print ' Will raise exception if actual bench values are out of range.'\n print ' See bench_expectations.txt for data format and examples.'\n print '-f <revision>[:<revision>] the revisions to use for fitting.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-i <time> the time to ignore (w, c, g, etc).'\n print ' The flag is ignored when -t is set; otherwise we plot all the'\n print ' times except the one specified here.'\n print '-l <title> title to use for the output graph'\n print '-m <representation> representation of bench value.'\n print ' See _ListAlgorithm class in bench_util.py.'\n print '-o <path> path to which to write output; writes to stdout if not specified'\n print '-r <revision>[:<revision>] the revisions to show.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-s <setting>[=<value>] a setting to show (alpha, scalar, etc).'\n print '-t <time> the time to show (w, c, g, etc).'\n print '-x <int> the desired width of the svg.'\n print '-y <int> the desired height of the svg.'\n print '--default-setting <setting>[=<value>] setting for those without.'", "def printHelp():\n print(\"amqWorkApiMass.py -n <msgcnt> -b <body> -m <headers> -s <path/to/bodyandheaders>\")", "def _usage_options_example(self):\n pass", "def main(argv):\n version = \"0.1.2\"\n interval = 1\n max_run_time = 0\n finished = 0\n first_time = 1\n output_file = 0\n output_file_enabled = 0\n output_path = 0\n header_row = 1\n\n #*** Get the hostname for use in filenames etc:\n hostname = socket.gethostname()\n\n #*** Start by parsing command line parameters:\n try:\n opts, args = getopt.getopt(argv, \"hu:m:ni:w:Wb:jv\",\n [\"help\",\n \"url=\",\n \"max-run-time=\",\n \"no-keepalive\",\n \"interval=\",\n \"output-file=\",\n \"output-path=\",\n \"no-header-row\",\n \"version\"])\n except getopt.GetoptError as err:\n print \"mosp: Error with options:\", err\n print_help()\n sys.exit(2)\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n print_help()\n sys.exit()\n elif opt in (\"-v\", \"--version\"):\n print 'mosp.py version', version\n sys.exit()\n elif opt in (\"-m\", \"--max-run-time\"):\n max_run_time = float(arg)\n elif opt in (\"-i\", \"--interval\"):\n interval = float(arg)\n elif opt in (\"-w\", \"--output-file\"):\n output_file = arg\n output_file_enabled = 1\n elif opt == \"-W\":\n output_file = \"mosp-\" + hostname + \"-\" + \\\n time.strftime(\"%Y%m%d-%H%M%S.csv\")\n output_file_enabled = 1\n elif opt in (\"-b\", \"--output-path\"):\n output_path = arg\n elif opt in (\"-j\", \"--no-header-row\"):\n header_row = 0\n\n print \"\\nMeasure Operating System Performance (mosp) version\", \\\n version\n\n #*** Display output filename:\n if output_file_enabled:\n if output_path:\n output_file = os.path.join(output_path, output_file)\n print \"Results filename is\", output_file\n else:\n print \"Not outputing results to file, as option not selected\"\n\n if not header_row:\n print \"Not writing a header row to CSV\"\n\n #*** Use this if max_run_time is set:\n initial_time = time.time()\n\n #*** Instantiate classes:\n cpus = CPUs()\n swap = Swap()\n nics = NICs()\n\n #*** Start the loop:\n while not finished:\n timenow = datetime.datetime.now()\n timestamp = timenow.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]\n start_time = time.time()\n\n #*** Update CPU measurements:\n cpus.update()\n\n #*** Update swap measurements:\n swap.update()\n\n #*** Update network measurements:\n nics.update()\n\n #*** Put the stats into a nice string for printing and\n #*** writing to file:\n result_csv = str(timestamp) + \",\" \\\n + cpus.csv() \\\n + swap.csv() \\\n + nics.csv() \\\n + \"\\n\"\n result_kvp = str(timestamp) + \" \" \\\n + cpus.kvp() \\\n + swap.kvp() \\\n + nics.kvp()\n print result_kvp\n if output_file_enabled:\n #*** Header row in CSV:\n if first_time and header_row:\n #*** Write a header row to CSV:\n header_csv = \"time,\" + cpus.csv_header(hostname) + \\\n swap.csv_header(hostname) + \\\n nics.csv_header(hostname) + \\\n \"\\n\"\n first_time = 0\n with open(output_file, 'a') as the_file:\n the_file.write(header_csv)\n\n #*** Write a data row to CSV:\n with open(output_file, 'a') as the_file:\n the_file.write(result_csv)\n\n if max_run_time:\n if (start_time - initial_time) > max_run_time:\n break\n\n #*** Sleep for interval seconds:\n time.sleep(interval)", "def printOptions(opts,subject_ids,session_ids,task_list, run_list, acq, rec):\n uname = os.popen('uname -s -n -r').read()\n print \"\\n\"\n print \"* Pipeline started at \"+time.strftime(\"%c\")+\"on \"+uname\n print \"* Command line is : \\n \"+str(sys.argv)+\"\\n\"\n print \"* The source directory is : \"+opts.sourceDir\n print \"* The target directory is : \"+opts.targetDir+\"\\n\"\n print \"* Data-set Subject ID(s) is/are : \"+str(', '.join(subject_ids))+\"\\n\"\n # print \"* PET conditions : \"+ ','.join(opts.condiList)+\"\\n\"\n print \"* Sessions : \", session_ids, \"\\n\"\n print \"* Tasks : \" , task_list , \"\\n\"\n print \"* Runs : \" , run_list , \"\\n\"\n print \"* Acquisition : \" , acq , \"\\n\"\n print \"* Reconstruction : \" , rec , \"\\n\"", "def usage():", "def usage():", "def help(cls, extra_args=None):\n if (_is_text_interface()):\n return _create_text_help_str(cls, cls._TEXT_USAGE)\n else:\n return cls._GRAPHICAL_USAGE", "def metrics_lpar(cmd_ctx, cpc, lpar, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_lpar(cmd_ctx, cpc, lpar, options))", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")", "def print_performance_info(self):\n pass", "def collect_usage_pieces(self, ctx):\n pieces = super(ProfilingCommand, self).collect_usage_pieces(ctx)\n assert pieces[-1] == '[ARGV]...'\n pieces.insert(-1, '[--]')\n return pieces", "def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))", "async def sysinfo(self, ctx: Context):\n\t\tstart = time.perf_counter()\n\t\tend = time.perf_counter()\n\t\tduration = (end - start) * 1000\n\t\tcpuavg = psutil.cpu_percent(interval=None)\n\t\tmem = psutil.virtual_memory()[2]\n\t\tdurround = round(duration, 3)\n\t\tosun = os.uname()\n\t\tawait self.send(f\"System Info | CPU: {cpuavg}% | RAM: {mem}% | Latency: {durround * 1000}ms | OS: {sys.platform}\", whisper=[ctx.author.id])", "def usage(err=''):\r\n m = '%s\\n' %err\r\n m += 'Default usage is to list Cases closed for the 30 days\\n'\r\n m += '\\n Example:\\n'\r\n m += ' closedcases -n 90 \\n' \r\n m += ' \\n'\r\n# m += ' closedcases -n 60 -s blast5 \\n'\r\n return m", "def cmdline(self, executable, options, task, rlimits):\n data_model_param = get_data_model_from_task(task, {ILP32: \"-m32\", LP64: \"-m64\"})\n print(options)\n if data_model_param and not any(\n option.startswith(\"--clang-options=\") for option in options\n ):\n options += [\"--clang-options=\" + data_model_param]\n\n if task.property_file:\n options += [\"--svcomp-property\", task.property_file]\n else:\n raise UnsupportedFeatureException(\n \"SMACK can't execute without a property file.\"\n )\n\n options += [task.single_input_file]\n\n return [executable] + options", "def usage():\n print(\"[1] Getting help from a cipher \")\n print(\" ---> ./cryptogra.py caesar -h \")\n print(\"\")", "def _get_metrics_options(metrics):\n metrics_options = []\n if metrics is None:\n metrics = []\n for static_metric in metrics:\n metrics_options += [\n \"-m\",\n static_metric.metric.mp_metric_name,\n str(static_metric.value),\n ]\n return metrics_options", "def getHelp(self):\r\n help_str =\\\r\n \"\"\"##########################################################################################\r\n#\r\n# Required:\r\n#\r\n# --query_NAST multi-fasta file containing query sequences in alignment format\r\n#\r\n# Common opts:\r\n#\r\n# --db_NAST db in NAST format\r\n# --db_FASTA db in fasta format (megablast formatted)\r\n#\r\n#\r\n# -n number of top matching database sequences to compare to (default 15)\r\n# -R min divergence ratio default: 1.007\r\n# -P min percent identity among matching sequences (default: 90)\r\n#\r\n# ## parameters to tune ChimeraParentSelector:\r\n#\r\n# Scoring parameters:\r\n# -M match score (default: +5)\r\n# -N mismatch penalty (default: -4)\r\n# -Q min query coverage by matching database sequence (default: 70)\r\n# -T maximum traverses of the multiple alignment (default: 1)\r\n\r\n#\r\n# ## parameters to tune ChimeraPhyloChecker:\r\n#\r\n#\r\n# --windowSize default 50\r\n# --windowStep default 5\r\n# --minBS minimum bootstrap support for calling chimera (default: 90)\r\n# -S percent of SNPs to sample on each side of breakpoint for computing bootstrap support (default: 10)\r\n# --num_parents_test number of potential parents to test for chimeras (default: 3)\r\n# --MAX_CHIMERA_PARENT_PER_ID Chimera/Parent alignments with perID above this are considered non-chimeras (default 100; turned off)\r\n#\r\n# ## misc opts\r\n#\r\n# --printFinalAlignments shows alignment between query sequence and pair of candidate chimera parents\r\n# --printCSalignments print ChimeraSlayer alignments in ChimeraSlayer output\r\n# --exec_dir chdir to here before running\r\n#\r\n#########################################################################################\r\n \"\"\"\r\n return help_str", "def usage(msgarg):\n if msgarg:\n sys.stderr.write(\"error: %s\\n\" % msgarg)\n print(\"\"\"\\\n usage: %s [options]\n\n options:\n -d increase debug msg verbosity level\n -c N emit N classes (def: 500) per instances\n -I N emit N instances\n\n \"\"\" % os.path.basename(sys.argv[0]))\n sys.exit(1)", "def DoHelp(options, args):\n __pychecker__ = 'unusednames=options'\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print(COMMAND_USAGE_TEXT[args[0]])\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n args[0])", "def run(self, line):\n LOGGER.info(\"Scalable PMEM: {}\".format(self.name))\n try:\n (options, _) = self._parse_arglist(line)\n except:\n if (\"-h\" in line) or (\"--help\" in line):\n return ReturnCodes.SUCCESS\n else:\n raise InvalidCommandLineErrorOPTS(\"\")\n\n if len(args):\n InvalidCommandLineError(\"This command takes no parameters.\")\n\n LOGGER.info(\"Options: {}\".format(options))\n\n if not self._chif_lib:\n self._helpers.failNoChifLibrary()\n\n enable = True\n if options.enableFeature is False:\n enable = False\n\n self.enableOrDisableFeature(enable)\n\n #Return code\n return ReturnCodes.SUCCESS", "def help_opt(self):\n print(OPTIONS)", "def usage(self, host):", "def print_usage_command(self):\n print self.get_usage_command()", "def print_usage_command(self):\n print self.get_usage_command()", "def main():\n DataClasses = [FamilyStats, SeqHdrStats, UniProtStats]\n CmdLineOps, Args = parse_command_line_options()\n ThreadManager(DataClasses)\n print_results(DataClasses, CmdLineOps.output_file)\n return", "def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)", "def parsing_arguments(args=None):\n description = ''\n parser = argparse.ArgumentParser(\n prog='hatchet plot-cn',\n description=description,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n parser.add_argument('INPUT', help='One or more space-separated files in CN_BBC format')\n parser.add_argument(\n '-n',\n '--patientnames',\n required=False,\n default=config.plot_cn.patientnames,\n type=str,\n help='One or more space-separated patient names (default: inferred from filenames)',\n )\n parser.add_argument(\n '-u',\n '--minu',\n required=False,\n default=config.plot_cn.minu,\n type=float,\n help='Minimum proportion of a CNA to be considered subclonal (default: 0.2)\"',\n )\n parser.add_argument(\n '-x',\n '--rundir',\n required=False,\n default=config.plot_cn.rundir,\n type=str,\n help='Running directory (default: current directory)',\n )\n parser.add_argument(\n '-b',\n '--baseCN',\n required=False,\n default=config.plot_cn.basecn,\n type=int,\n help='Base copy number (default: inferred from tumor ploidy)',\n )\n parser.add_argument(\n '-sC',\n '--figsizeclones',\n required=False,\n default=config.plot_cn.figsizeclones,\n type=str,\n help='Size of clone plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-sP',\n '--figsizecn',\n required=False,\n default=config.plot_cn.figsizecn,\n type=str,\n help='Size of CN plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-sG',\n '--figsizegrid',\n required=False,\n default=config.plot_cn.figsizegrid,\n type=str,\n help='Size of grid plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-rC',\n '--resolutionclones',\n required=False,\n default=config.plot_cn.resolutionclones,\n type=int,\n help='Number of bins to merge together for plotting clone profiles (default: 100)\"',\n )\n parser.add_argument(\n '-rP',\n '--resolutioncn',\n required=False,\n default=config.plot_cn.resolutioncn,\n type=int,\n help='Number of bins to merge together for plotting proportions (default: 500)\"',\n )\n parser.add_argument(\n '-rG',\n '--resolutiongrid',\n required=False,\n default=config.plot_cn.resolutiongrid,\n type=int,\n help='Number of bins to merge together in grids (default: 100)\"',\n )\n parser.add_argument(\n '-e',\n '--threshold',\n required=False,\n default=config.plot_cn.threshold,\n type=float,\n help='Threshold used to classify a tumor into either diploid or tetraploid (default: 3.0)\"',\n )\n parser.add_argument(\n '--ymax',\n required=False,\n default=config.plot_cn.ymax,\n type=int,\n help='Maximum values in y-axis (default: automatically inferred)\"',\n )\n parser.add_argument(\n '--ymin',\n required=False,\n default=config.plot_cn.ymin,\n type=int,\n help='Minimum values in y-axis (default: automatically inferred)\"',\n )\n parser.add_argument(\n '--clonepalette',\n required=False,\n default=config.plot_cn.clonepalette,\n type=str,\n help='Palette for coloring the clones among Set1, Set2, Set3, Paired (default: Set1)\"',\n )\n parser.add_argument(\n '--linkage',\n required=False,\n default=config.plot_cn.linkage,\n type=str,\n help=(\n 'Linkage method used for clustering (default: single, available (single, complete, average, weighted, '\n 'centroid, median, ward) from SciPy)\"'\n ),\n )\n parser.add_argument('-V', '--version', action='version', version=f'%(prog)s {__version__}')\n args = parser.parse_args(args)\n\n if len(args.INPUT.split()) == 0:\n raise ValueError(error('Please specify at least one sample as input!'))\n if args.patientnames is None:\n patientnames = {fil: os.path.basename(fil) for fil in args.INPUT.split()}\n else:\n patientnames = {f: n for f, n in zip(args.INPUT.split(), args.patientnames.split())}\n if len(args.INPUT.split()) != len(set(patientnames.values())):\n raise ValueError(error('Multiple patients have the same name but they should unique!'))\n if args.figsizeclones is not None:\n figsizeclones = to_tuple(args.figsizeclones, error_message='Wrong format of figsizeclones!')\n if args.figsizecn is not None:\n figsizecn = to_tuple(args.figsizecn, error_message='Wrong format of figsizecn!')\n if args.figsizegrid is not None:\n figsizegrid = to_tuple(args.figsizegrid, error_message='Wrong format of figsizegrid!')\n\n if not os.path.isdir(args.rundir):\n raise ValueError(error('Running directory does not exist!'))\n if not 0.0 <= args.minu <= 1.0:\n raise ValueError(error('The minimum proportion for subclonal CNAs must be in [0, 1]!'))\n if args.baseCN is not None and args.baseCN < 2:\n raise ValueError(error('Base CN must be greater or equal than 2!'))\n if args.resolutionclones is not None and args.resolutionclones < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.resolutioncn is not None and args.resolutioncn < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.resolutiongrid is not None and args.resolutiongrid < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.threshold < 0:\n raise ValueError(error('Threshold must be positive!'))\n if args.linkage not in {\n 'single',\n 'complete',\n 'average',\n 'weighted',\n 'centroid',\n 'median',\n 'ward',\n }:\n raise ValueError(error('Unknown linkage method!'))\n\n if args.clonepalette == 'Set1':\n pal = plt.cm.Set1\n elif args.clonepalette == 'Set2':\n pal = plt.cm.Set2\n elif args.clonepalette == 'Set3':\n pal = plt.cm.Set3\n elif args.clonepalette == 'Paired':\n pal = plt.cm.Paired\n else:\n raise ValueError(error('Unknown clone palette!'))\n\n return {\n 'input': args.INPUT.split(),\n 'names': patientnames,\n 'rundir': args.rundir,\n 'minu': args.minu,\n 'base': args.baseCN,\n 'clonefigsize': figsizeclones,\n 'propsfigsize': figsizecn,\n 'clusterfigsize': figsizegrid,\n 'profileres': args.resolutionclones,\n 'cnres': args.resolutioncn,\n 'clusterres': args.resolutiongrid,\n 'threshold': args.threshold,\n 'linkage': args.linkage,\n 'ymax': args.ymax,\n 'ymin': args.ymin,\n 'clonepalette': pal,\n }", "def usage(msg):\n ap.print_usage()\n print \"-\"*40\n print msg\n exit(1)", "def get_cp_info(self):\n return self.get(COMMAND_CPM, 'GetCpInfo')", "def _memtop_setup_parser(parser):\n parser.add_argument('file', nargs=1, help='Python script to check for memory usage.')\n parser.add_argument('-o', default=None, action='store', dest='outfile',\n help='Name of output file. By default, output goes to stdout.')\n parser.add_argument('-l', '--limit', action='store', type=int, default=20, dest='limit',\n help='Limit the number of lines in the output.')", "def metrics_partition(cmd_ctx, cpc, partition, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_partition(cmd_ctx, cpc, partition, options))", "def show_mem(cmd, cnt, args):\n if cpu is None:\n log(\"Load program first\") \n return\n elif len(cpu.memory) == 0:\n log(\"Load program first\") \n return \n chunk = 0\n chunk_count = len(cpu.memory)\n while chunk < chunk_count: \n chunk_start = cpu.memory[chunk][MEMADDR]\n chunk_end = chunk_start + cpu.memory[chunk][MEMSIZE] \n log(\"{:d} {:#x}..{:#x}\".format(chunk, chunk_start, chunk_end)) \n chunk += 1\n if machine == \"ARM\":\n if len(cpu.high_memory) != 0:\n log(\"High memory\")\n for addr in sorted(cpu.high_memory):\n log(\"{:#x}\".format(addr))", "def usage(self):\n\n # header\n self.usage_header()\n\n print _(\"\"\"Screen: %(screen)s\nDescription: %(description)s\n\nUsage: %(app_name)s %(screen)s [options]\"\"\") % {\n 'app_name': constants.App.NAME,\n 'screen': self.name,\n 'description': self.description,\n }\n # any additional info in between (see other classes for reference)\n self._usage_options_example()\n\n #footer\n self.usage_footer()", "def gather_info_and_display():\n # Obtain total rss displayed in memory.stat for each group,\n # container and service.\n try:\n output_mem = pipe_command(GREP_CMD, AWK_CMD, cwd=MEMPATH)\n LOG.debug(\n 'command: %s\\n%s',\n \"grep -rs total_rss '/sys/fs/cgroup/memory/' \"\n \"| awk '$2>0{print$0}' \",\n output_mem)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get total_rss memory, error=%s', error)\n return 1\n\n mem_info = get_meminfo()\n pt_groups = gather_groups_memory(output_mem)\n pt_cont = gather_containers_memory(output_mem)\n pt_serv = sys_service_memory()\n\n # Dump the tables out\n print('\\nPer groups memory usage:')\n\n # Get string to be printed and create list of elements separated by \\n\n list_of_table_lines = pt_groups.get_string().split('\\n')\n\n # Use the first line (+---+-- ...) as horizontal rule to insert later\n horizontal_line = list_of_table_lines[0]\n\n # Print the table, except last two lines ( \"Total\" row + final separator).\n print(\"\\n\".join(list_of_table_lines[:-2]))\n # Print separator, and finally the \"Total\" row.\n print(horizontal_line)\n print(\"\\n\".join(list_of_table_lines[-2:]))\n\n pt_namespc = prettytable.PrettyTable(\n ['Namespace',\n 'Resident Set Size (MiB)',\n ], caching=False)\n pt_namespc.align = 'l'\n pt_namespc.align['Resident Set Size (MiB)'] = 'r'\n\n print('\\nPer namespace memory usage:')\n for n_s in MEMORY['namespaces']:\n pt_namespc.add_row(\n [n_s,\n MEMORY['namespaces'][n_s],\n ])\n print(pt_namespc)\n\n print('\\nPer container memory usage:')\n print(pt_cont)\n\n print('\\nPer service memory usage:')\n print(pt_serv)\n\n base_mebib = 0.0\n k8s_system = 0.0\n k8s_addon = 0.0\n platform_memory_percent = 0.0\n\n # Calculate base memory usage (i.e., normal memory, exclude K8S and VMs)\n # e.g., docker, system.slice, user.slice\n for group in MEMORY['cgroups']:\n if group in BASE_GROUPS:\n base_mebib += float(MEMORY['cgroups'][group])\n\n # K8S platform system usage (essential) and addons usage (non-essential)\n for n_s in MEMORY['namespaces']:\n if n_s in K8S_NAMESPACE_SYSTEM:\n k8s_system += MEMORY['namespaces'][n_s]\n elif n_s in K8S_NAMESPACE_ADDON:\n k8s_addon += MEMORY['namespaces'][n_s]\n\n # Calculate platform memory usage\n platform_mebib = base_mebib + k8s_system\n\n anon_mebib = float(mem_to_mebibytes(\n mem_info['Active(anon)'] + mem_info['Inactive(anon)'])) * KBYTE\n avail_mebib = float(mem_to_mebibytes(\n mem_info['MemAvailable'])) * KBYTE\n total_mebib = float(anon_mebib + avail_mebib)\n\n anon_percent = py2_round(100 * anon_mebib / total_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n reserved_mebib = get_platform_reserved_memory()\n # Calculate platform memory in terms of percent reserved\n if reserved_mebib > 0.0:\n platform_memory_percent = py2_round(\n 100 * platform_mebib / reserved_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n pt_platf = prettytable.PrettyTable(\n ['Reserved',\n 'Platform',\n 'Base',\n 'K8s Platform system',\n 'k8s-addon'\n ], caching=False)\n pt_platf.align = 'l'\n\n pt_platf.add_row(\n [reserved_mebib,\n '{} ({}%)'.format(platform_mebib, platform_memory_percent),\n base_mebib,\n k8s_system,\n k8s_addon\n ])\n print('\\nPlatform memory usage in MiB:')\n print(pt_platf)\n\n pt_4k = prettytable.PrettyTable(\n ['Anon',\n 'Cgroup-rss',\n 'Available',\n 'Total'\n ], caching=False)\n pt_4k.align = 'l'\n\n pt_4k.add_row(\n ['{} ({}%)'.format(anon_mebib, anon_percent),\n MEMORY['cgroups']['total_rss'],\n avail_mebib,\n total_mebib\n ])\n\n print('\\n4K memory usage in MiB:')\n print(pt_4k)\n\n return 0", "def usage():\n pass", "def main(argv):\n args = parse_command_line(argv)\n return show_scale_file_info(args.info_file) or 0", "def mc(self, *args) -> None:\n env = os.environ.copy()\n env['MC_HOST_minio'] = self.auth_url\n # --config-dir is set just to prevent any config set by the user\n # from interfering with the test.\n try:\n subprocess.run(\n [\n 'mc', '--quiet', '--no-color', f'--config-dir={self.path}',\n *args\n ],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.PIPE,\n env=env,\n encoding='utf-8',\n errors='replace',\n check=True\n )\n except OSError as exc:\n raise MissingProgram(f'mc could not be run: {exc}') from exc\n except subprocess.CalledProcessError as exc:\n raise ProgramFailed(exc.stderr) from exc", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def info(self):\n import string\n results = self.info_list()\n labels = \"%-8s %-9s %-4s %-8s %-8s %-4s\" % \\\n ('MACHINE','CPU','GHZ','MB TOTAL',\n 'MB FREE','LOAD')\n print labels\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%-8s %2dx%-6s %4.1f %8.1f %8.1f %4.2f\" % \\\n (name[-8:], res['cpu_count'],res['cpu_type'][-6:], \\\n res['cpu_speed'],res['mem_total'],res['mem_free'],\\\n res['load_1'])\n print s", "def report_on_config( args ):\n\n from khmer.utils import print_error\n\n if args.quiet: return\n\n print_error( \"\\nPARAMETERS:\" )\n print_error( \" - kmer size = {0} \\t\\t(-k)\".format( args.ksize ) )\n print_error( \" - n hashes = {0} \\t\\t(-N)\".format( args.n_hashes ) )\n print_error(\n \" - min hashsize = {0:5.2g} \\t(-x)\".format( args.min_hashsize )\n )\n print_error( \"\" )\n print_error(\n \"Estimated memory usage is {0:.2g} bytes \"\n \"(n_hashes x min_hashsize)\".format( args.n_hashes * args.min_hashsize )\n )\n print_error( \"-\" * 8 )\n\n if DEFAULT_MIN_HASHSIZE == args.min_hashsize:\n print_error(\n \"** WARNING: hashsize is default! \" \n \"You absodefly want to increase this!\\n** \"\n \"Please read the docs!\"\n )", "def execute(self):\n\n if not os.path.exists(self._source_file_name):\n logger.info(\"Did not find the aicpu profiling source file\")\n return\n\n with open(self._source_file_name, 'rb') as ai_cpu_data:\n content = ai_cpu_data.read()\n if content[0:2].hex().upper() == \"5A5A\":\n ai_cpu_total_time_summary, result_list = self.parser_binary_file(content)\n else:\n ai_cpu_total_time_summary, result_list = self.parser_txt_file(content)\n\n os.chmod(self._source_file_name, stat.S_IREAD)\n\n if result_list:\n ai_cpu_total_time = format(ai_cpu_total_time_summary, '.6f')\n result_list.append([\"AI CPU Total Time(ms):\", ai_cpu_total_time])\n fwrite_format(self._output_filename, \" \".join(self._dst_file_column_title), is_start=True, is_print=True)\n fwrite_format(self._output_filename, result_list, is_print=True)\n\n # For timeline display.\n self._result_list = result_list", "def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})", "async def run_mpc(self) -> Dict[str, Dict[Metric, int]]:\n pass", "def test_cw_metrics(self):\n\n instances = set()\n result = self.cw_client.list_metrics(Namespace=\"CWAgent\", MetricName=\"cpu_usage_system\")\n for i in result[\"Metrics\"]:\n instances.add(i[\"Dimensions\"][0][\"Value\"])\n\n for key, value in self.cdk_output_map.items():\n if \"Instance\" in key:\n self.assertTrue(value in instances)", "def mcmc(self, assign_step_methods=True, *args, **kwargs):\n\n self.mc = pm.MCMC(self.nodes_db.node.values, *args, **kwargs)\n\n self.pre_sample()\n\n return self.mc", "def main():\n test_cases = ast.literal_eval(sys.argv[1])\n results = str(my_info()) + '\\t\\t'\n for test_case in test_cases:\n mode = test_case[0]\n id_1 = int(test_case[1])\n id_2 = int(test_case[2])\n if mode == 'jc':\n results += str(Jaccard_Coefficient(id_1, id_2)) + '\\t\\t'\n elif mode == 'cc':\n results += str(Correlation_Coefficient(id_1, id_2)) + '\\t\\t'\n else:\n exit('bad command')\n print results + '\\n'", "def do_stats(cs, args):\n stats_info = cs.containers.stats(args.container)\n utils.print_dict(stats_info)", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"", "def usage():\n\n # Local constants\n\n # Local variables\n\n #****** start usage() ******#\n print()\n print(\" Usage: python TCGCardTracker.py <arguement below> <optional-argument-1>\")\n print(\"\\tadd (Optional): Add a card to your collection. Requires TCGPlayer URL.\")\n print(\"\\tdelete (Optional): Delete a card from your collection. Requires TCGPlayer URL.\")\n print(\"\\tupdate (Optional): Updates pricing data for every card in your collection.\")\n print(\"\\ttop25 (Optional): Outputs the 25 most valuable cards from your collection.\")\n print(\"\\texport (Optional): Exports a list of TCGPlayer URLs to a text file.\")\n print(\"\\texport_collection (Optional): Exports your collection to a .csv including most recent price data.\")\n print(\"\\timport (Optional): Imports a text file of TCGPlayer URLs to bulk import cards into your collection. Requires text file.\")\n print(\"\\tworth (Optional): Ouputs how much your collection is worth using latest price data.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tticker (Optional): Displays a ticker grid of the change in value over a given time. If run without the days back parameter it will default to 7 days.\")\n sys.exit()", "def stat_cuda(msg: str) -> None:\n print(f'-- {msg:<35} allocated: %dM, max allocated: %dM, cached: %dM, max cached: %dM' % (\n torch.cuda.memory_allocated() / 1024 / 1024,\n torch.cuda.max_memory_allocated() / 1024 / 1024,\n torch.cuda.memory_cached() / 1024 / 1024,\n torch.cuda.max_memory_cached() / 1024 / 1024\n ))", "def do_config():\n\n tracking = get_tracking()\n for unit in (\"ppm\", \"sec\"):\n\ttunit = unit\n\tif unit == \"sec\":\n\t tunit = \"seconds\"\n\tprint \"multigraph chrony_%s\" % unit\n\tprint \"graph_title NTP (Chrony) Statistics (%s)\" % unit\n\tprint \"graph_vlabel %s\" % unit\n\tprint \"graph_args --base 1000\"\n\tprint \"graph_category time\"\n\tprint \"graph_info NTP (Chrony) tracking statistics (the ones measured in %s)\" % tunit\n\tfor key in tracking[tunit]:\n\t item = tracking[tunit][key]\n\t print \"\"\"%s.label %s\n%s.draw LINE2\n%s.info %s\"\"\" % (key, item[\"label\"], key, key, item[\"label\"])\n\tprint\n return 0", "def help(update, context):\n msg = \"\"\n msg += \"\\n/covid 7-Day-Incident per Million\"\n msg += \"\\n/daylio What did I do a year ago today?\"\n msg += \"\\n/f1last Results of the last race\"\n msg += \"\\n/f1stand Driver standings\"\n msg += \"\\n/f1next Time and place of the next race\"\n msg += \"\\n/fuel prices and consump. (args: Xeur Ykm)\"\n msg += \"\\n/ip Outside ip address\"\n msg += \"\\n/rate Exchange rates (args: Xeur/Yhuf)\"\n msg += \"\\n/rss check rss feeds for new content\"\n msg += \"\\n/sun Time of sunrise and sunset\"\n msg += \"\\n/xkcd Sends last comic image and alt\"\n msg.rstrip()\n update.message.reply_text(msg)", "def show(self):\n prev_queries = 0\n prev_cpu_sys = 0\n prev_cpu_user = 0\n \n lines = {\n \"Uptime (seconds)\": \"--\",\n \"Number of queries\": \"--\",\n \"Query per second\": \"--\",\n \"ACL drops\": \"--\",\n \"Dynamic drops\": \"--\",\n \"Rule drops\": \"--\",\n \"CPU Usage (%s)\": \"--\",\n \"Cache hitrate\": \"--\"\n }\n\n while True:\n try:\n # get stats from dnsdist\n stats = Statistics(console=self.console)\n global_stats = stats[\"global\"]\n \n qps = int(global_stats[\"queries\"]) - prev_queries\n prev_queries = int(global_stats[\"queries\"])\n cpu = (int(global_stats[\"cpu-sys-msec\"])+int(global_stats[\"cpu-user-msec\"]) - prev_cpu_sys - prev_cpu_user) / 10\n prev_cpu_sys = int(global_stats[\"cpu-sys-msec\"])\n prev_cpu_user = int(global_stats[\"cpu-user-msec\"])\n \n lines[\"Uptime (seconds)\"] = global_stats[\"uptime\"]\n lines[\"Number of queries\"] = global_stats[\"queries\"]\n lines[\"Query per second\"] = qps\n lines[\"CPU Usage (%s)\"] = cpu\n lines[\"ACL drops\"] = global_stats[\"acl-drops\"]\n lines[\"Rule drops\"] = global_stats[\"rule-drop\"]\n lines[\"Cache hitrate\"] = global_stats[\"cache-hits\"]\n lines[\"Dynamic drops\"] = global_stats[\"dyn-blocked\"]\n\n # reprint the lines \n sys.stdout.write(\"\\033[1mDashboard for dnsdist\\033[0m\\n\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Global:\\n\")\n for k,v in lines.items():\n sys.stdout.write(\"\\t%s: %s\\n\" % (k,v))\n sys.stdout.write(\"Backends:\\n\")\n for s in stats[\"backends\"]:\n if not len(s[\"name\"]):\n s[\"name\"] = \"--\"\n if not len(s[\"pools\"]):\n s[\"pools\"] = \"--\"\n sys.stdout.write(\"\\t#%s / %s / %s / %s\\n\" % (s[\"#\"],s[\"address\"],s[\"name\"],s[\"pools\"]) )\n sys.stdout.write(\"\\t\\tNumber of queries: %s\\n\" % s[\"queries\"])\n sys.stdout.write(\"\\t\\tQuery per second: %s\\n\" % s[\"qps\"])\n sys.stdout.write(\"\\t\\tNumber of drops: %s\\n\" % s[\"drops\"])\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Ctrl+C to exit\\n\")\n \n time.sleep(1)\n \n \n # move up cursor and delete whole line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for k,v in lines.items():\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for s in stats[\"backends\"]:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n \n del stats\n except KeyboardInterrupt:\n break", "def _cmd_genemetrics(args):\n cnarr = read_cna(args.filename)\n segarr = read_cna(args.segment) if args.segment else None\n is_sample_female = verify_sample_sex(cnarr, args.sample_sex, args.male_reference, args.diploid_parx_genome)\n # TODO use the stats args\n table = do_genemetrics(\n cnarr,\n segarr,\n args.threshold,\n args.min_probes,\n args.drop_low_coverage,\n args.male_reference,\n is_sample_female,\n args.diploid_parx_genome,\n )\n logging.info(\"Found %d gene-level gains and losses\", len(table))\n write_dataframe(args.output, table)", "def execute(self, context):\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n logger.info(\"[ChainedTransfig] Collectd-Jmx w/ Common MBeans Generation...\")\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n\n #insert common to assemble collectd.genericjmx.common.conf in later step\n context[CTX_KEY_COMMON_COLLECTD_JMX_APP_PREFIX] = 'common'\n super().execute(context)\n\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n logger.info(\"[ChainedTransfig] Collectd-Jmx w/ Common MBeans Generation... COMPLETES\")\n logger.info(\"///////////////////////////////////////////////////////////////////////\")", "def info():\n f = Figlet(font='standard')\n click.echo(f.renderText('covtool'))\n click.secho(\n \"covtool: a simple CLI for fetching covid data\", fg='cyan')\n click.echo(\n \"Data Sources: https://www.worldometers.info/coronavirus\\nJohn Hopkins [https://github.com/CSSEGISandData/COVID-19] \")\n click.secho(\"Author: Amayo II <[email protected]>\", fg='magenta')", "def help(bot, sender, sendmsg, label, args):\n\n clist = commands.commands\n csort = sorted(clist.values(), key=lambda c: c.__name__.lower())\n\n if len(args) > 0:\n page = int(args[0]) - 1\n else:\n page = 0\n\n pages = len(clist) // 10 + 1\n\n sendmsg(\"-- Help (Page {} of {}) --\".format(page + 1, pages))\n for i in range(10):\n if i >= len(csort):\n break\n\n command = csort[i + (page * 10)]\n sendmsg(\"{}: {}\".format(command.__name__, command.__doc__))", "def _cmd_coverage(args):\n pset = coverage.do_coverage(\n args.interval,\n args.bam_file,\n args.count,\n args.min_mapq,\n args.processes,\n args.fasta,\n )\n if not args.output:\n # Create an informative but unique name for the coverage output file\n bambase = core.fbase(args.bam_file)\n bedbase = core.fbase(args.interval)\n tgtbase = (\n \"antitargetcoverage\" if \"anti\" in bedbase.lower() else \"targetcoverage\"\n )\n args.output = f\"{bambase}.{tgtbase}.cnn\"\n if os.path.exists(args.output):\n args.output = f\"{bambase}.{bedbase}.cnn\"\n core.ensure_path(args.output)\n tabio.write(pset, args.output)", "def profiler(*args, addCategory: AnyStr=\"\", allCategories: bool=True, bufferSize: Union[int,\n bool]=0, categoryIndex: Union[int, bool]=0, categoryIndexToName: Union[int,\n bool]=0, categoryInfo: Union[AnyStr, bool]=\"\", categoryName: Union[AnyStr,\n bool]=\"\", categoryNameToIndex: Union[AnyStr, bool]=\"\", categoryRecording:\n bool=True, clearAllMelInstrumentation: bool=True, colorIndex: int=0, eventCPUId:\n bool=True, eventCategory: bool=True, eventColor: bool=True, eventCount: bool=True,\n eventDescription: bool=True, eventDuration: bool=True, eventIndex: Union[int,\n bool]=0, eventName: bool=True, eventStartTime: bool=True, eventThreadId: bool=True,\n instrumentMel: bool=True, load: Union[AnyStr, bool]=\"\", output: Union[AnyStr,\n bool]=\"\", procedureDescription: AnyStr=\"\", procedureName: AnyStr=\"\",\n removeCategory: AnyStr=\"\", reset: bool=True, sampling: bool=True, signalEvent:\n bool=True, signalMelEvent: bool=True, q=True, query=True, **kwargs)->Union[None,\n Any]:\n pass" ]
[ "0.65378803", "0.627548", "0.6020114", "0.5702169", "0.56018233", "0.55116755", "0.55000883", "0.5320139", "0.5315897", "0.5299815", "0.52958316", "0.5274619", "0.52392995", "0.52203125", "0.52038133", "0.5199431", "0.5187151", "0.5178949", "0.5170392", "0.5164013", "0.5105565", "0.5103818", "0.50898266", "0.5052539", "0.5035622", "0.502821", "0.501706", "0.5015949", "0.49987954", "0.4995435", "0.4994893", "0.49872065", "0.49857652", "0.49823377", "0.49802217", "0.49538845", "0.4953671", "0.49489754", "0.49483764", "0.49386388", "0.49346632", "0.4929749", "0.49271503", "0.4909886", "0.49063715", "0.49063715", "0.4888474", "0.4883137", "0.4876717", "0.48731402", "0.48707312", "0.48706523", "0.48664415", "0.48657164", "0.48556435", "0.48515126", "0.48501876", "0.4835232", "0.48343426", "0.48284125", "0.48110816", "0.4809616", "0.48017365", "0.4801053", "0.4801053", "0.47884846", "0.47855777", "0.47798613", "0.4779617", "0.47782972", "0.47773227", "0.47709617", "0.47623646", "0.47552183", "0.4751337", "0.47482225", "0.47429726", "0.47399902", "0.47312617", "0.47309047", "0.4730584", "0.47276497", "0.47267857", "0.47211823", "0.4715795", "0.47132322", "0.47124293", "0.47105163", "0.47093555", "0.47082168", "0.470178", "0.47009042", "0.46667597", "0.4663084", "0.46617326", "0.46565914", "0.46551764", "0.46529984", "0.46527383", "0.46503717" ]
0.66221523
0